aboutsummaryrefslogtreecommitdiffstats
path: root/net/openvswitch/flow_table.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-01-06 19:48:38 -0500
committerDavid S. Miller <davem@davemloft.net>2014-01-06 19:48:38 -0500
commit39b6b2992f9dc65d1de5c66e7ec2271b8a5fac33 (patch)
treec0fc4e2be0429bb4d7643e6b6f8f5a56212f9284 /net/openvswitch/flow_table.c
parent56a4342dfe3145cd66f766adccb28fd9b571606d (diff)
parent443cd88c8a31379e95326428bbbd40af25c1d440 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch
Jesse Gross says: ==================== [GIT net-next] Open vSwitch Open vSwitch changes for net-next/3.14. Highlights are: * Performance improvements in the mechanism to get packets to userspace using memory mapped netlink and skb zero copy where appropriate. * Per-cpu flow stats in situations where flows are likely to be shared across CPUs. Standard flow stats are used in other situations to save memory and allocation time. * A handful of code cleanups and rationalization. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/openvswitch/flow_table.c')
-rw-r--r--net/openvswitch/flow_table.c60
1 files changed, 41 insertions, 19 deletions
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 0e720c316070..b430d42b2d0f 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -44,8 +44,6 @@
44#include <net/ipv6.h> 44#include <net/ipv6.h>
45#include <net/ndisc.h> 45#include <net/ndisc.h>
46 46
47#include "datapath.h"
48
49#define TBL_MIN_BUCKETS 1024 47#define TBL_MIN_BUCKETS 1024
50#define REHASH_INTERVAL (10 * 60 * HZ) 48#define REHASH_INTERVAL (10 * 60 * HZ)
51 49
@@ -72,19 +70,42 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
72 *d++ = *s++ & *m++; 70 *d++ = *s++ & *m++;
73} 71}
74 72
75struct sw_flow *ovs_flow_alloc(void) 73struct sw_flow *ovs_flow_alloc(bool percpu_stats)
76{ 74{
77 struct sw_flow *flow; 75 struct sw_flow *flow;
76 int cpu;
78 77
79 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); 78 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
80 if (!flow) 79 if (!flow)
81 return ERR_PTR(-ENOMEM); 80 return ERR_PTR(-ENOMEM);
82 81
83 spin_lock_init(&flow->lock);
84 flow->sf_acts = NULL; 82 flow->sf_acts = NULL;
85 flow->mask = NULL; 83 flow->mask = NULL;
86 84
85 flow->stats.is_percpu = percpu_stats;
86
87 if (!percpu_stats) {
88 flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL);
89 if (!flow->stats.stat)
90 goto err;
91
92 spin_lock_init(&flow->stats.stat->lock);
93 } else {
94 flow->stats.cpu_stats = alloc_percpu(struct flow_stats);
95 if (!flow->stats.cpu_stats)
96 goto err;
97
98 for_each_possible_cpu(cpu) {
99 struct flow_stats *cpu_stats;
100
101 cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
102 spin_lock_init(&cpu_stats->lock);
103 }
104 }
87 return flow; 105 return flow;
106err:
107 kfree(flow);
108 return ERR_PTR(-ENOMEM);
88} 109}
89 110
90int ovs_flow_tbl_count(struct flow_table *table) 111int ovs_flow_tbl_count(struct flow_table *table)
@@ -118,6 +139,10 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
118static void flow_free(struct sw_flow *flow) 139static void flow_free(struct sw_flow *flow)
119{ 140{
120 kfree((struct sf_flow_acts __force *)flow->sf_acts); 141 kfree((struct sf_flow_acts __force *)flow->sf_acts);
142 if (flow->stats.is_percpu)
143 free_percpu(flow->stats.cpu_stats);
144 else
145 kfree(flow->stats.stat);
121 kmem_cache_free(flow_cache, flow); 146 kmem_cache_free(flow_cache, flow);
122} 147}
123 148
@@ -128,13 +153,6 @@ static void rcu_free_flow_callback(struct rcu_head *rcu)
128 flow_free(flow); 153 flow_free(flow);
129} 154}
130 155
131static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
132{
133 struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
134
135 kfree(mask);
136}
137
138static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) 156static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
139{ 157{
140 if (!mask) 158 if (!mask)
@@ -146,7 +164,7 @@ static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
146 if (!mask->ref_count) { 164 if (!mask->ref_count) {
147 list_del_rcu(&mask->list); 165 list_del_rcu(&mask->list);
148 if (deferred) 166 if (deferred)
149 call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb); 167 kfree_rcu(mask, rcu);
150 else 168 else
151 kfree(mask); 169 kfree(mask);
152 } 170 }
@@ -429,11 +447,11 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
429 return NULL; 447 return NULL;
430} 448}
431 449
432struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, 450struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
433 const struct sw_flow_key *key, 451 const struct sw_flow_key *key,
434 u32 *n_mask_hit) 452 u32 *n_mask_hit)
435{ 453{
436 struct table_instance *ti = rcu_dereference(tbl->ti); 454 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
437 struct sw_flow_mask *mask; 455 struct sw_flow_mask *mask;
438 struct sw_flow *flow; 456 struct sw_flow *flow;
439 457
@@ -447,6 +465,14 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
447 return NULL; 465 return NULL;
448} 466}
449 467
468struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
469 const struct sw_flow_key *key)
470{
471 u32 __always_unused n_mask_hit;
472
473 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
474}
475
450int ovs_flow_tbl_num_masks(const struct flow_table *table) 476int ovs_flow_tbl_num_masks(const struct flow_table *table)
451{ 477{
452 struct sw_flow_mask *mask; 478 struct sw_flow_mask *mask;
@@ -514,11 +540,7 @@ static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
514 return NULL; 540 return NULL;
515} 541}
516 542
517/** 543/* Add 'mask' into the mask list, if it is not already there. */
518 * add a new mask into the mask list.
519 * The caller needs to make sure that 'mask' is not the same
520 * as any masks that are already on the list.
521 */
522static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, 544static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
523 struct sw_flow_mask *new) 545 struct sw_flow_mask *new)
524{ 546{