aboutsummaryrefslogtreecommitdiffstats
path: root/net/openvswitch/flow_table.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/openvswitch/flow_table.c')
-rw-r--r--net/openvswitch/flow_table.c138
1 files changed, 79 insertions, 59 deletions
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 1c7e7732ed4c..036e019f8c3c 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -128,12 +128,36 @@ static void rcu_free_flow_callback(struct rcu_head *rcu)
128 flow_free(flow); 128 flow_free(flow);
129} 129}
130 130
131static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
132{
133 struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
134
135 kfree(mask);
136}
137
138static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
139{
140 if (!mask)
141 return;
142
143 BUG_ON(!mask->ref_count);
144 mask->ref_count--;
145
146 if (!mask->ref_count) {
147 list_del_rcu(&mask->list);
148 if (deferred)
149 call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
150 else
151 kfree(mask);
152 }
153}
154
131void ovs_flow_free(struct sw_flow *flow, bool deferred) 155void ovs_flow_free(struct sw_flow *flow, bool deferred)
132{ 156{
133 if (!flow) 157 if (!flow)
134 return; 158 return;
135 159
136 ovs_sw_flow_mask_del_ref(flow->mask, deferred); 160 flow_mask_del_ref(flow->mask, deferred);
137 161
138 if (deferred) 162 if (deferred)
139 call_rcu(&flow->rcu, rcu_free_flow_callback); 163 call_rcu(&flow->rcu, rcu_free_flow_callback);
@@ -225,11 +249,11 @@ static void table_instance_destroy(struct table_instance *ti, bool deferred)
225 __table_instance_destroy(ti); 249 __table_instance_destroy(ti);
226} 250}
227 251
228void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) 252void ovs_flow_tbl_destroy(struct flow_table *table)
229{ 253{
230 struct table_instance *ti = ovsl_dereference(table->ti); 254 struct table_instance *ti = ovsl_dereference(table->ti);
231 255
232 table_instance_destroy(ti, deferred); 256 table_instance_destroy(ti, false);
233} 257}
234 258
235struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, 259struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
@@ -304,7 +328,7 @@ static struct table_instance *table_instance_rehash(struct table_instance *ti,
304 328
305 new_ti = table_instance_alloc(n_buckets); 329 new_ti = table_instance_alloc(n_buckets);
306 if (!new_ti) 330 if (!new_ti)
307 return ERR_PTR(-ENOMEM); 331 return NULL;
308 332
309 flow_table_copy_flows(ti, new_ti); 333 flow_table_copy_flows(ti, new_ti);
310 334
@@ -425,32 +449,6 @@ static struct table_instance *table_instance_expand(struct table_instance *ti)
425 return table_instance_rehash(ti, ti->n_buckets * 2); 449 return table_instance_rehash(ti, ti->n_buckets * 2);
426} 450}
427 451
428void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
429{
430 struct table_instance *ti = NULL;
431 struct table_instance *new_ti = NULL;
432
433 ti = ovsl_dereference(table->ti);
434
435 /* Expand table, if necessary, to make room. */
436 if (table->count > ti->n_buckets)
437 new_ti = table_instance_expand(ti);
438 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
439 new_ti = table_instance_rehash(ti, ti->n_buckets);
440
441 if (new_ti && !IS_ERR(new_ti)) {
442 rcu_assign_pointer(table->ti, new_ti);
443 ovs_flow_tbl_destroy(table, true);
444 ti = ovsl_dereference(table->ti);
445 table->last_rehash = jiffies;
446 }
447
448 flow->hash = flow_hash(&flow->key, flow->mask->range.start,
449 flow->mask->range.end);
450 table_instance_insert(ti, flow);
451 table->count++;
452}
453
454void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) 452void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
455{ 453{
456 struct table_instance *ti = ovsl_dereference(table->ti); 454 struct table_instance *ti = ovsl_dereference(table->ti);
@@ -460,7 +458,7 @@ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
460 table->count--; 458 table->count--;
461} 459}
462 460
463struct sw_flow_mask *ovs_sw_flow_mask_alloc(void) 461static struct sw_flow_mask *mask_alloc(void)
464{ 462{
465 struct sw_flow_mask *mask; 463 struct sw_flow_mask *mask;
466 464
@@ -471,35 +469,11 @@ struct sw_flow_mask *ovs_sw_flow_mask_alloc(void)
471 return mask; 469 return mask;
472} 470}
473 471
474void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask) 472static void mask_add_ref(struct sw_flow_mask *mask)
475{ 473{
476 mask->ref_count++; 474 mask->ref_count++;
477} 475}
478 476
479static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
480{
481 struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
482
483 kfree(mask);
484}
485
486void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
487{
488 if (!mask)
489 return;
490
491 BUG_ON(!mask->ref_count);
492 mask->ref_count--;
493
494 if (!mask->ref_count) {
495 list_del_rcu(&mask->list);
496 if (deferred)
497 call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
498 else
499 kfree(mask);
500 }
501}
502
503static bool mask_equal(const struct sw_flow_mask *a, 477static bool mask_equal(const struct sw_flow_mask *a,
504 const struct sw_flow_mask *b) 478 const struct sw_flow_mask *b)
505{ 479{
@@ -511,7 +485,7 @@ static bool mask_equal(const struct sw_flow_mask *a,
511 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); 485 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
512} 486}
513 487
514struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl, 488static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
515 const struct sw_flow_mask *mask) 489 const struct sw_flow_mask *mask)
516{ 490{
517 struct list_head *ml; 491 struct list_head *ml;
@@ -531,9 +505,55 @@ struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl,
531 * The caller needs to make sure that 'mask' is not the same 505 * The caller needs to make sure that 'mask' is not the same
532 * as any masks that are already on the list. 506 * as any masks that are already on the list.
533 */ 507 */
534void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask) 508static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
509 struct sw_flow_mask *new)
510{
511 struct sw_flow_mask *mask;
512 mask = flow_mask_find(tbl, new);
513 if (!mask) {
514 /* Allocate a new mask if none exsits. */
515 mask = mask_alloc();
516 if (!mask)
517 return -ENOMEM;
518 mask->key = new->key;
519 mask->range = new->range;
520 list_add_rcu(&mask->list, &tbl->mask_list);
521 }
522
523 mask_add_ref(mask);
524 flow->mask = mask;
525 return 0;
526}
527
528int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
529 struct sw_flow_mask *mask)
535{ 530{
536 list_add_rcu(&mask->list, &tbl->mask_list); 531 struct table_instance *new_ti = NULL;
532 struct table_instance *ti;
533 int err;
534
535 err = flow_mask_insert(table, flow, mask);
536 if (err)
537 return err;
538
539 flow->hash = flow_hash(&flow->key, flow->mask->range.start,
540 flow->mask->range.end);
541 ti = ovsl_dereference(table->ti);
542 table_instance_insert(ti, flow);
543 table->count++;
544
545 /* Expand table, if necessary, to make room. */
546 if (table->count > ti->n_buckets)
547 new_ti = table_instance_expand(ti);
548 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
549 new_ti = table_instance_rehash(ti, ti->n_buckets);
550
551 if (new_ti) {
552 rcu_assign_pointer(table->ti, new_ti);
553 table_instance_destroy(ti, true);
554 table->last_rehash = jiffies;
555 }
556 return 0;
537} 557}
538 558
539/* Initializes the flow module. 559/* Initializes the flow module.