aboutsummaryrefslogtreecommitdiffstats
path: root/net/openvswitch/flow_table.c
diff options
context:
space:
mode:
authorPravin B Shelar <pshelar@nicira.com>2013-10-04 03:14:23 -0400
committerJesse Gross <jesse@nicira.com>2013-10-04 03:18:26 -0400
commitb637e4988c2d689bb43f943a5af0e684a4981159 (patch)
tree36fbf2ee83323b2441861de650c6dabe6d5cc663 /net/openvswitch/flow_table.c
parente64457191a259537bbbfaebeba9a8043786af96f (diff)
openvswitch: Move mega-flow list out of rehashing struct.
ovs-flow rehash does not touch mega flow list. Following patch moves it dp struct datapath. Avoid one extra indirection for accessing mega-flow list head on every packet receive. Signed-off-by: Pravin B Shelar <pshelar@nicira.com> Signed-off-by: Jesse Gross <jesse@nicira.com>
Diffstat (limited to 'net/openvswitch/flow_table.c')
-rw-r--r--net/openvswitch/flow_table.c205
1 files changed, 123 insertions, 82 deletions
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index dcadb75bb173..1c7e7732ed4c 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -44,6 +44,11 @@
44#include <net/ipv6.h> 44#include <net/ipv6.h>
45#include <net/ndisc.h> 45#include <net/ndisc.h>
46 46
47#include "datapath.h"
48
49#define TBL_MIN_BUCKETS 1024
50#define REHASH_INTERVAL (10 * 60 * HZ)
51
47static struct kmem_cache *flow_cache; 52static struct kmem_cache *flow_cache;
48 53
49static u16 range_n_bytes(const struct sw_flow_key_range *range) 54static u16 range_n_bytes(const struct sw_flow_key_range *range)
@@ -82,6 +87,11 @@ struct sw_flow *ovs_flow_alloc(void)
82 return flow; 87 return flow;
83} 88}
84 89
90int ovs_flow_tbl_count(struct flow_table *table)
91{
92 return table->count;
93}
94
85static struct flex_array *alloc_buckets(unsigned int n_buckets) 95static struct flex_array *alloc_buckets(unsigned int n_buckets)
86{ 96{
87 struct flex_array *buckets; 97 struct flex_array *buckets;
@@ -136,18 +146,18 @@ static void free_buckets(struct flex_array *buckets)
136 flex_array_free(buckets); 146 flex_array_free(buckets);
137} 147}
138 148
139static void __flow_tbl_destroy(struct flow_table *table) 149static void __table_instance_destroy(struct table_instance *ti)
140{ 150{
141 int i; 151 int i;
142 152
143 if (table->keep_flows) 153 if (ti->keep_flows)
144 goto skip_flows; 154 goto skip_flows;
145 155
146 for (i = 0; i < table->n_buckets; i++) { 156 for (i = 0; i < ti->n_buckets; i++) {
147 struct sw_flow *flow; 157 struct sw_flow *flow;
148 struct hlist_head *head = flex_array_get(table->buckets, i); 158 struct hlist_head *head = flex_array_get(ti->buckets, i);
149 struct hlist_node *n; 159 struct hlist_node *n;
150 int ver = table->node_ver; 160 int ver = ti->node_ver;
151 161
152 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { 162 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
153 hlist_del(&flow->hash_node[ver]); 163 hlist_del(&flow->hash_node[ver]);
@@ -155,74 +165,74 @@ static void __flow_tbl_destroy(struct flow_table *table)
155 } 165 }
156 } 166 }
157 167
158 BUG_ON(!list_empty(table->mask_list));
159 kfree(table->mask_list);
160
161skip_flows: 168skip_flows:
162 free_buckets(table->buckets); 169 free_buckets(ti->buckets);
163 kfree(table); 170 kfree(ti);
164} 171}
165 172
166static struct flow_table *__flow_tbl_alloc(int new_size) 173static struct table_instance *table_instance_alloc(int new_size)
167{ 174{
168 struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL); 175 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
169 176
170 if (!table) 177 if (!ti)
171 return NULL; 178 return NULL;
172 179
173 table->buckets = alloc_buckets(new_size); 180 ti->buckets = alloc_buckets(new_size);
174 181
175 if (!table->buckets) { 182 if (!ti->buckets) {
176 kfree(table); 183 kfree(ti);
177 return NULL; 184 return NULL;
178 } 185 }
179 table->n_buckets = new_size; 186 ti->n_buckets = new_size;
180 table->count = 0; 187 ti->node_ver = 0;
181 table->node_ver = 0; 188 ti->keep_flows = false;
182 table->keep_flows = false; 189 get_random_bytes(&ti->hash_seed, sizeof(u32));
183 get_random_bytes(&table->hash_seed, sizeof(u32));
184 table->mask_list = NULL;
185 190
186 return table; 191 return ti;
187} 192}
188 193
189struct flow_table *ovs_flow_tbl_alloc(int new_size) 194int ovs_flow_tbl_init(struct flow_table *table)
190{ 195{
191 struct flow_table *table = __flow_tbl_alloc(new_size); 196 struct table_instance *ti;
192 197
193 if (!table) 198 ti = table_instance_alloc(TBL_MIN_BUCKETS);
194 return NULL;
195 199
196 table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); 200 if (!ti)
197 if (!table->mask_list) { 201 return -ENOMEM;
198 table->keep_flows = true;
199 __flow_tbl_destroy(table);
200 return NULL;
201 }
202 INIT_LIST_HEAD(table->mask_list);
203 202
204 return table; 203 rcu_assign_pointer(table->ti, ti);
204 INIT_LIST_HEAD(&table->mask_list);
205 table->last_rehash = jiffies;
206 table->count = 0;
207 return 0;
205} 208}
206 209
207static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) 210static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
208{ 211{
209 struct flow_table *table = container_of(rcu, struct flow_table, rcu); 212 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
210 213
211 __flow_tbl_destroy(table); 214 __table_instance_destroy(ti);
212} 215}
213 216
214void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) 217static void table_instance_destroy(struct table_instance *ti, bool deferred)
215{ 218{
216 if (!table) 219 if (!ti)
217 return; 220 return;
218 221
219 if (deferred) 222 if (deferred)
220 call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb); 223 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
221 else 224 else
222 __flow_tbl_destroy(table); 225 __table_instance_destroy(ti);
226}
227
228void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
229{
230 struct table_instance *ti = ovsl_dereference(table->ti);
231
232 table_instance_destroy(ti, deferred);
223} 233}
224 234
225struct sw_flow *ovs_flow_tbl_dump_next(struct flow_table *table, 235struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
226 u32 *bucket, u32 *last) 236 u32 *bucket, u32 *last)
227{ 237{
228 struct sw_flow *flow; 238 struct sw_flow *flow;
@@ -230,10 +240,10 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct flow_table *table,
230 int ver; 240 int ver;
231 int i; 241 int i;
232 242
233 ver = table->node_ver; 243 ver = ti->node_ver;
234 while (*bucket < table->n_buckets) { 244 while (*bucket < ti->n_buckets) {
235 i = 0; 245 i = 0;
236 head = flex_array_get(table->buckets, *bucket); 246 head = flex_array_get(ti->buckets, *bucket);
237 hlist_for_each_entry_rcu(flow, head, hash_node[ver]) { 247 hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
238 if (i < *last) { 248 if (i < *last) {
239 i++; 249 i++;
@@ -249,25 +259,23 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct flow_table *table,
249 return NULL; 259 return NULL;
250} 260}
251 261
252static struct hlist_head *find_bucket(struct flow_table *table, u32 hash) 262static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
253{ 263{
254 hash = jhash_1word(hash, table->hash_seed); 264 hash = jhash_1word(hash, ti->hash_seed);
255 return flex_array_get(table->buckets, 265 return flex_array_get(ti->buckets,
256 (hash & (table->n_buckets - 1))); 266 (hash & (ti->n_buckets - 1)));
257} 267}
258 268
259static void __tbl_insert(struct flow_table *table, struct sw_flow *flow) 269static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow)
260{ 270{
261 struct hlist_head *head; 271 struct hlist_head *head;
262 272
263 head = find_bucket(table, flow->hash); 273 head = find_bucket(ti, flow->hash);
264 hlist_add_head_rcu(&flow->hash_node[table->node_ver], head); 274 hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head);
265
266 table->count++;
267} 275}
268 276
269static void flow_table_copy_flows(struct flow_table *old, 277static void flow_table_copy_flows(struct table_instance *old,
270 struct flow_table *new) 278 struct table_instance *new)
271{ 279{
272 int old_ver; 280 int old_ver;
273 int i; 281 int i;
@@ -283,35 +291,42 @@ static void flow_table_copy_flows(struct flow_table *old,
283 head = flex_array_get(old->buckets, i); 291 head = flex_array_get(old->buckets, i);
284 292
285 hlist_for_each_entry(flow, head, hash_node[old_ver]) 293 hlist_for_each_entry(flow, head, hash_node[old_ver])
286 __tbl_insert(new, flow); 294 table_instance_insert(new, flow);
287 } 295 }
288 296
289 new->mask_list = old->mask_list;
290 old->keep_flows = true; 297 old->keep_flows = true;
291} 298}
292 299
293static struct flow_table *__flow_tbl_rehash(struct flow_table *table, 300static struct table_instance *table_instance_rehash(struct table_instance *ti,
294 int n_buckets) 301 int n_buckets)
295{ 302{
296 struct flow_table *new_table; 303 struct table_instance *new_ti;
297 304
298 new_table = __flow_tbl_alloc(n_buckets); 305 new_ti = table_instance_alloc(n_buckets);
299 if (!new_table) 306 if (!new_ti)
300 return ERR_PTR(-ENOMEM); 307 return ERR_PTR(-ENOMEM);
301 308
302 flow_table_copy_flows(table, new_table); 309 flow_table_copy_flows(ti, new_ti);
303 310
304 return new_table; 311 return new_ti;
305} 312}
306 313
307struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table) 314int ovs_flow_tbl_flush(struct flow_table *flow_table)
308{ 315{
309 return __flow_tbl_rehash(table, table->n_buckets); 316 struct table_instance *old_ti;
310} 317 struct table_instance *new_ti;
311 318
312struct flow_table *ovs_flow_tbl_expand(struct flow_table *table) 319 old_ti = ovsl_dereference(flow_table->ti);
313{ 320 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
314 return __flow_tbl_rehash(table, table->n_buckets * 2); 321 if (!new_ti)
322 return -ENOMEM;
323
324 rcu_assign_pointer(flow_table->ti, new_ti);
325 flow_table->last_rehash = jiffies;
326 flow_table->count = 0;
327
328 table_instance_destroy(old_ti, true);
329 return 0;
315} 330}
316 331
317static u32 flow_hash(const struct sw_flow_key *key, int key_start, 332static u32 flow_hash(const struct sw_flow_key *key, int key_start,
@@ -367,7 +382,7 @@ bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
367 return cmp_key(&flow->unmasked_key, key, key_start, key_end); 382 return cmp_key(&flow->unmasked_key, key, key_start, key_end);
368} 383}
369 384
370static struct sw_flow *masked_flow_lookup(struct flow_table *table, 385static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
371 const struct sw_flow_key *unmasked, 386 const struct sw_flow_key *unmasked,
372 struct sw_flow_mask *mask) 387 struct sw_flow_mask *mask)
373{ 388{
@@ -380,8 +395,8 @@ static struct sw_flow *masked_flow_lookup(struct flow_table *table,
380 395
381 ovs_flow_mask_key(&masked_key, unmasked, mask); 396 ovs_flow_mask_key(&masked_key, unmasked, mask);
382 hash = flow_hash(&masked_key, key_start, key_end); 397 hash = flow_hash(&masked_key, key_start, key_end);
383 head = find_bucket(table, hash); 398 head = find_bucket(ti, hash);
384 hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) { 399 hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
385 if (flow->mask == mask && 400 if (flow->mask == mask &&
386 flow_cmp_masked_key(flow, &masked_key, 401 flow_cmp_masked_key(flow, &masked_key,
387 key_start, key_end)) 402 key_start, key_end))
@@ -393,29 +408,55 @@ static struct sw_flow *masked_flow_lookup(struct flow_table *table,
393struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, 408struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
394 const struct sw_flow_key *key) 409 const struct sw_flow_key *key)
395{ 410{
396 struct sw_flow *flow = NULL; 411 struct table_instance *ti = rcu_dereference(tbl->ti);
397 struct sw_flow_mask *mask; 412 struct sw_flow_mask *mask;
413 struct sw_flow *flow;
398 414
399 list_for_each_entry_rcu(mask, tbl->mask_list, list) { 415 list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
400 flow = masked_flow_lookup(tbl, key, mask); 416 flow = masked_flow_lookup(ti, key, mask);
401 if (flow) /* Found */ 417 if (flow) /* Found */
402 break; 418 return flow;
403 } 419 }
420 return NULL;
421}
404 422
405 return flow; 423static struct table_instance *table_instance_expand(struct table_instance *ti)
424{
425 return table_instance_rehash(ti, ti->n_buckets * 2);
406} 426}
407 427
408void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) 428void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
409{ 429{
430 struct table_instance *ti = NULL;
431 struct table_instance *new_ti = NULL;
432
433 ti = ovsl_dereference(table->ti);
434
435 /* Expand table, if necessary, to make room. */
436 if (table->count > ti->n_buckets)
437 new_ti = table_instance_expand(ti);
438 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
439 new_ti = table_instance_rehash(ti, ti->n_buckets);
440
441 if (new_ti && !IS_ERR(new_ti)) {
442 rcu_assign_pointer(table->ti, new_ti);
443 ovs_flow_tbl_destroy(table, true);
444 ti = ovsl_dereference(table->ti);
445 table->last_rehash = jiffies;
446 }
447
410 flow->hash = flow_hash(&flow->key, flow->mask->range.start, 448 flow->hash = flow_hash(&flow->key, flow->mask->range.start,
411 flow->mask->range.end); 449 flow->mask->range.end);
412 __tbl_insert(table, flow); 450 table_instance_insert(ti, flow);
451 table->count++;
413} 452}
414 453
415void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) 454void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
416{ 455{
456 struct table_instance *ti = ovsl_dereference(table->ti);
457
417 BUG_ON(table->count == 0); 458 BUG_ON(table->count == 0);
418 hlist_del_rcu(&flow->hash_node[table->node_ver]); 459 hlist_del_rcu(&flow->hash_node[ti->node_ver]);
419 table->count--; 460 table->count--;
420} 461}
421 462
@@ -475,7 +516,7 @@ struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl,
475{ 516{
476 struct list_head *ml; 517 struct list_head *ml;
477 518
478 list_for_each(ml, tbl->mask_list) { 519 list_for_each(ml, &tbl->mask_list) {
479 struct sw_flow_mask *m; 520 struct sw_flow_mask *m;
480 m = container_of(ml, struct sw_flow_mask, list); 521 m = container_of(ml, struct sw_flow_mask, list);
481 if (mask_equal(mask, m)) 522 if (mask_equal(mask, m))
@@ -492,7 +533,7 @@ struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl,
492 */ 533 */
493void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask) 534void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask)
494{ 535{
495 list_add_rcu(&mask->list, tbl->mask_list); 536 list_add_rcu(&mask->list, &tbl->mask_list);
496} 537}
497 538
498/* Initializes the flow module. 539/* Initializes the flow module.