aboutsummaryrefslogtreecommitdiffstats
path: root/net/openvswitch/flow_table.c
diff options
context:
space:
mode:
authorJarno Rajahalme <jrajahalme@nicira.com>2014-03-27 15:35:23 -0400
committerJesse Gross <jesse@nicira.com>2014-05-16 16:40:29 -0400
commit23dabf88abb48a866fdb19ee08ebcf1ddd9b1840 (patch)
treecc835b9d88c36d7b3b0c296fb2617f9971c5bd7e /net/openvswitch/flow_table.c
parent8c63ff09bddf944ab0033fea97aacfadfffa76de (diff)
openvswitch: Remove 5-tuple optimization.
The 5-tuple optimization becomes unnecessary with a later per-NUMA node stats patch. Remove it first to make the changes easier to grasp. Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com> Signed-off-by: Jesse Gross <jesse@nicira.com>
Diffstat (limited to 'net/openvswitch/flow_table.c')
-rw-r--r--net/openvswitch/flow_table.c31
1 files changed, 9 insertions, 22 deletions
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 1ba1e0b8ade5..aa92da23053d 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -72,7 +72,7 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
72 *d++ = *s++ & *m++; 72 *d++ = *s++ & *m++;
73} 73}
74 74
75struct sw_flow *ovs_flow_alloc(bool percpu_stats) 75struct sw_flow *ovs_flow_alloc(void)
76{ 76{
77 struct sw_flow *flow; 77 struct sw_flow *flow;
78 int cpu; 78 int cpu;
@@ -84,25 +84,15 @@ struct sw_flow *ovs_flow_alloc(bool percpu_stats)
84 flow->sf_acts = NULL; 84 flow->sf_acts = NULL;
85 flow->mask = NULL; 85 flow->mask = NULL;
86 86
87 flow->stats.is_percpu = percpu_stats; 87 flow->stats = alloc_percpu(struct flow_stats);
88 if (!flow->stats)
89 goto err;
88 90
89 if (!percpu_stats) { 91 for_each_possible_cpu(cpu) {
90 flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL); 92 struct flow_stats *cpu_stats;
91 if (!flow->stats.stat)
92 goto err;
93 93
94 spin_lock_init(&flow->stats.stat->lock); 94 cpu_stats = per_cpu_ptr(flow->stats, cpu);
95 } else { 95 spin_lock_init(&cpu_stats->lock);
96 flow->stats.cpu_stats = alloc_percpu(struct flow_stats);
97 if (!flow->stats.cpu_stats)
98 goto err;
99
100 for_each_possible_cpu(cpu) {
101 struct flow_stats *cpu_stats;
102
103 cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
104 spin_lock_init(&cpu_stats->lock);
105 }
106 } 96 }
107 return flow; 97 return flow;
108err: 98err:
@@ -141,10 +131,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
141static void flow_free(struct sw_flow *flow) 131static void flow_free(struct sw_flow *flow)
142{ 132{
143 kfree((struct sf_flow_acts __force *)flow->sf_acts); 133 kfree((struct sf_flow_acts __force *)flow->sf_acts);
144 if (flow->stats.is_percpu) 134 free_percpu(flow->stats);
145 free_percpu(flow->stats.cpu_stats);
146 else
147 kfree(flow->stats.stat);
148 kmem_cache_free(flow_cache, flow); 135 kmem_cache_free(flow_cache, flow);
149} 136}
150 137