diff options
author | Pablo Neira Ayuso <pablo@netfilter.org> | 2019-07-19 12:20:13 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-07-20 00:27:45 -0400 |
commit | aef833c58d321f09ae4ce4467723542842ba9faf (patch) | |
tree | 2d2567ee29b74b37e128ce44bb8c7147e175db56 /net/openvswitch | |
parent | 9a2f97bb8ddddbf655ce1fcdf688dcec19deb59f (diff) |
net: openvswitch: rename flow_stats to sw_flow_stats
There is a flow_stats structure defined in include/net/flow_offload.h
and a follow up patch adds #include <net/flow_offload.h> to
net/sch_generic.h.
This breaks compilation since OVS codebase includes net/sock.h which
pulls in linux/filter.h which includes net/sch_generic.h.
In file included from ./include/net/sch_generic.h:18:0,
from ./include/linux/filter.h:25,
from ./include/net/sock.h:59,
from ./include/linux/tcp.h:19,
from net/openvswitch/datapath.c:24
This definition takes precedence on OVS since it is placed in the
networking core, so rename flow_stats in OVS to sw_flow_stats since
this structure is contained in sw_flow.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/openvswitch')
-rw-r--r-- | net/openvswitch/flow.c | 8 | ||||
-rw-r--r-- | net/openvswitch/flow.h | 4 | ||||
-rw-r--r-- | net/openvswitch/flow_table.c | 8 |
3 files changed, 10 insertions, 10 deletions
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index dca3b1e2acf0..bc89e16e0505 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
@@ -59,7 +59,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies) | |||
59 | void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, | 59 | void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, |
60 | const struct sk_buff *skb) | 60 | const struct sk_buff *skb) |
61 | { | 61 | { |
62 | struct flow_stats *stats; | 62 | struct sw_flow_stats *stats; |
63 | unsigned int cpu = smp_processor_id(); | 63 | unsigned int cpu = smp_processor_id(); |
64 | int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); | 64 | int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); |
65 | 65 | ||
@@ -87,7 +87,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, | |||
87 | if (likely(flow->stats_last_writer != -1) && | 87 | if (likely(flow->stats_last_writer != -1) && |
88 | likely(!rcu_access_pointer(flow->stats[cpu]))) { | 88 | likely(!rcu_access_pointer(flow->stats[cpu]))) { |
89 | /* Try to allocate CPU-specific stats. */ | 89 | /* Try to allocate CPU-specific stats. */ |
90 | struct flow_stats *new_stats; | 90 | struct sw_flow_stats *new_stats; |
91 | 91 | ||
92 | new_stats = | 92 | new_stats = |
93 | kmem_cache_alloc_node(flow_stats_cache, | 93 | kmem_cache_alloc_node(flow_stats_cache, |
@@ -134,7 +134,7 @@ void ovs_flow_stats_get(const struct sw_flow *flow, | |||
134 | 134 | ||
135 | /* We open code this to make sure cpu 0 is always considered */ | 135 | /* We open code this to make sure cpu 0 is always considered */ |
136 | for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { | 136 | for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { |
137 | struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]); | 137 | struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]); |
138 | 138 | ||
139 | if (stats) { | 139 | if (stats) { |
140 | /* Local CPU may write on non-local stats, so we must | 140 | /* Local CPU may write on non-local stats, so we must |
@@ -158,7 +158,7 @@ void ovs_flow_stats_clear(struct sw_flow *flow) | |||
158 | 158 | ||
159 | /* We open code this to make sure cpu 0 is always considered */ | 159 | /* We open code this to make sure cpu 0 is always considered */ |
160 | for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { | 160 | for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { |
161 | struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]); | 161 | struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]); |
162 | 162 | ||
163 | if (stats) { | 163 | if (stats) { |
164 | spin_lock_bh(&stats->lock); | 164 | spin_lock_bh(&stats->lock); |
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 3e2cc2202d66..a5506e2d4b7a 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h | |||
@@ -194,7 +194,7 @@ struct sw_flow_actions { | |||
194 | struct nlattr actions[]; | 194 | struct nlattr actions[]; |
195 | }; | 195 | }; |
196 | 196 | ||
197 | struct flow_stats { | 197 | struct sw_flow_stats { |
198 | u64 packet_count; /* Number of packets matched. */ | 198 | u64 packet_count; /* Number of packets matched. */ |
199 | u64 byte_count; /* Number of bytes matched. */ | 199 | u64 byte_count; /* Number of bytes matched. */ |
200 | unsigned long used; /* Last used time (in jiffies). */ | 200 | unsigned long used; /* Last used time (in jiffies). */ |
@@ -216,7 +216,7 @@ struct sw_flow { | |||
216 | struct cpumask cpu_used_mask; | 216 | struct cpumask cpu_used_mask; |
217 | struct sw_flow_mask *mask; | 217 | struct sw_flow_mask *mask; |
218 | struct sw_flow_actions __rcu *sf_acts; | 218 | struct sw_flow_actions __rcu *sf_acts; |
219 | struct flow_stats __rcu *stats[]; /* One for each CPU. First one | 219 | struct sw_flow_stats __rcu *stats[]; /* One for each CPU. First one |
220 | * is allocated at flow creation time, | 220 | * is allocated at flow creation time, |
221 | * the rest are allocated on demand | 221 | * the rest are allocated on demand |
222 | * while holding the 'stats[0].lock'. | 222 | * while holding the 'stats[0].lock'. |
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index 988fd8a94e43..cf3582c5ed70 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c | |||
@@ -66,7 +66,7 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, | |||
66 | struct sw_flow *ovs_flow_alloc(void) | 66 | struct sw_flow *ovs_flow_alloc(void) |
67 | { | 67 | { |
68 | struct sw_flow *flow; | 68 | struct sw_flow *flow; |
69 | struct flow_stats *stats; | 69 | struct sw_flow_stats *stats; |
70 | 70 | ||
71 | flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); | 71 | flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); |
72 | if (!flow) | 72 | if (!flow) |
@@ -110,7 +110,7 @@ static void flow_free(struct sw_flow *flow) | |||
110 | for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) | 110 | for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) |
111 | if (flow->stats[cpu]) | 111 | if (flow->stats[cpu]) |
112 | kmem_cache_free(flow_stats_cache, | 112 | kmem_cache_free(flow_stats_cache, |
113 | (struct flow_stats __force *)flow->stats[cpu]); | 113 | (struct sw_flow_stats __force *)flow->stats[cpu]); |
114 | kmem_cache_free(flow_cache, flow); | 114 | kmem_cache_free(flow_cache, flow); |
115 | } | 115 | } |
116 | 116 | ||
@@ -712,13 +712,13 @@ int ovs_flow_init(void) | |||
712 | 712 | ||
713 | flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) | 713 | flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) |
714 | + (nr_cpu_ids | 714 | + (nr_cpu_ids |
715 | * sizeof(struct flow_stats *)), | 715 | * sizeof(struct sw_flow_stats *)), |
716 | 0, 0, NULL); | 716 | 0, 0, NULL); |
717 | if (flow_cache == NULL) | 717 | if (flow_cache == NULL) |
718 | return -ENOMEM; | 718 | return -ENOMEM; |
719 | 719 | ||
720 | flow_stats_cache | 720 | flow_stats_cache |
721 | = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats), | 721 | = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats), |
722 | 0, SLAB_HWCACHE_ALIGN, NULL); | 722 | 0, SLAB_HWCACHE_ALIGN, NULL); |
723 | if (flow_stats_cache == NULL) { | 723 | if (flow_stats_cache == NULL) { |
724 | kmem_cache_destroy(flow_cache); | 724 | kmem_cache_destroy(flow_cache); |