diff options
author | Pravin B Shelar <pshelar@nicira.com> | 2013-10-04 03:17:42 -0400 |
---|---|---|
committer | Jesse Gross <jesse@nicira.com> | 2013-10-04 03:18:30 -0400 |
commit | 618ed0c805b64c820279f50732110ab873221c3b (patch) | |
tree | 28af2a6cbc7d81cbc19b02fb4b9c79079a03a8ce /net/openvswitch | |
parent | b637e4988c2d689bb43f943a5af0e684a4981159 (diff) |
openvswitch: Simplify mega-flow APIs.
Hides mega-flow implementation in flow_table.c rather than
datapath.c.
Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
Signed-off-by: Jesse Gross <jesse@nicira.com>
Diffstat (limited to 'net/openvswitch')
-rw-r--r-- | net/openvswitch/datapath.c | 27 | ||||
-rw-r--r-- | net/openvswitch/flow_table.c | 138 | ||||
-rw-r--r-- | net/openvswitch/flow_table.h | 12 |
3 files changed, 89 insertions, 88 deletions
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 60b9be3b9477..cf270973095d 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -161,7 +161,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu) | |||
161 | { | 161 | { |
162 | struct datapath *dp = container_of(rcu, struct datapath, rcu); | 162 | struct datapath *dp = container_of(rcu, struct datapath, rcu); |
163 | 163 | ||
164 | ovs_flow_tbl_destroy(&dp->table, false); | 164 | ovs_flow_tbl_destroy(&dp->table); |
165 | free_percpu(dp->stats_percpu); | 165 | free_percpu(dp->stats_percpu); |
166 | release_net(ovs_dp_get_net(dp)); | 166 | release_net(ovs_dp_get_net(dp)); |
167 | kfree(dp->ports); | 167 | kfree(dp->ports); |
@@ -795,8 +795,6 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
795 | /* Check if this is a duplicate flow */ | 795 | /* Check if this is a duplicate flow */ |
796 | flow = ovs_flow_tbl_lookup(&dp->table, &key); | 796 | flow = ovs_flow_tbl_lookup(&dp->table, &key); |
797 | if (!flow) { | 797 | if (!flow) { |
798 | struct sw_flow_mask *mask_p; | ||
799 | |||
800 | /* Bail out if we're not allowed to create a new flow. */ | 798 | /* Bail out if we're not allowed to create a new flow. */ |
801 | error = -ENOENT; | 799 | error = -ENOENT; |
802 | if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) | 800 | if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) |
@@ -812,25 +810,14 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
812 | 810 | ||
813 | flow->key = masked_key; | 811 | flow->key = masked_key; |
814 | flow->unmasked_key = key; | 812 | flow->unmasked_key = key; |
815 | |||
816 | /* Make sure mask is unique in the system */ | ||
817 | mask_p = ovs_sw_flow_mask_find(&dp->table, &mask); | ||
818 | if (!mask_p) { | ||
819 | /* Allocate a new mask if none exsits. */ | ||
820 | mask_p = ovs_sw_flow_mask_alloc(); | ||
821 | if (!mask_p) | ||
822 | goto err_flow_free; | ||
823 | mask_p->key = mask.key; | ||
824 | mask_p->range = mask.range; | ||
825 | ovs_sw_flow_mask_insert(&dp->table, mask_p); | ||
826 | } | ||
827 | |||
828 | ovs_sw_flow_mask_add_ref(mask_p); | ||
829 | flow->mask = mask_p; | ||
830 | rcu_assign_pointer(flow->sf_acts, acts); | 813 | rcu_assign_pointer(flow->sf_acts, acts); |
831 | 814 | ||
832 | /* Put flow in bucket. */ | 815 | /* Put flow in bucket. */ |
833 | ovs_flow_tbl_insert(&dp->table, flow); | 816 | error = ovs_flow_tbl_insert(&dp->table, flow, &mask); |
817 | if (error) { | ||
818 | acts = NULL; | ||
819 | goto err_flow_free; | ||
820 | } | ||
834 | 821 | ||
835 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, | 822 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, |
836 | info->snd_seq, OVS_FLOW_CMD_NEW); | 823 | info->snd_seq, OVS_FLOW_CMD_NEW); |
@@ -1236,7 +1223,7 @@ err_destroy_ports_array: | |||
1236 | err_destroy_percpu: | 1223 | err_destroy_percpu: |
1237 | free_percpu(dp->stats_percpu); | 1224 | free_percpu(dp->stats_percpu); |
1238 | err_destroy_table: | 1225 | err_destroy_table: |
1239 | ovs_flow_tbl_destroy(&dp->table, false); | 1226 | ovs_flow_tbl_destroy(&dp->table); |
1240 | err_free_dp: | 1227 | err_free_dp: |
1241 | release_net(ovs_dp_get_net(dp)); | 1228 | release_net(ovs_dp_get_net(dp)); |
1242 | kfree(dp); | 1229 | kfree(dp); |
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index 1c7e7732ed4c..036e019f8c3c 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c | |||
@@ -128,12 +128,36 @@ static void rcu_free_flow_callback(struct rcu_head *rcu) | |||
128 | flow_free(flow); | 128 | flow_free(flow); |
129 | } | 129 | } |
130 | 130 | ||
131 | static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu) | ||
132 | { | ||
133 | struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu); | ||
134 | |||
135 | kfree(mask); | ||
136 | } | ||
137 | |||
138 | static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) | ||
139 | { | ||
140 | if (!mask) | ||
141 | return; | ||
142 | |||
143 | BUG_ON(!mask->ref_count); | ||
144 | mask->ref_count--; | ||
145 | |||
146 | if (!mask->ref_count) { | ||
147 | list_del_rcu(&mask->list); | ||
148 | if (deferred) | ||
149 | call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb); | ||
150 | else | ||
151 | kfree(mask); | ||
152 | } | ||
153 | } | ||
154 | |||
131 | void ovs_flow_free(struct sw_flow *flow, bool deferred) | 155 | void ovs_flow_free(struct sw_flow *flow, bool deferred) |
132 | { | 156 | { |
133 | if (!flow) | 157 | if (!flow) |
134 | return; | 158 | return; |
135 | 159 | ||
136 | ovs_sw_flow_mask_del_ref(flow->mask, deferred); | 160 | flow_mask_del_ref(flow->mask, deferred); |
137 | 161 | ||
138 | if (deferred) | 162 | if (deferred) |
139 | call_rcu(&flow->rcu, rcu_free_flow_callback); | 163 | call_rcu(&flow->rcu, rcu_free_flow_callback); |
@@ -225,11 +249,11 @@ static void table_instance_destroy(struct table_instance *ti, bool deferred) | |||
225 | __table_instance_destroy(ti); | 249 | __table_instance_destroy(ti); |
226 | } | 250 | } |
227 | 251 | ||
228 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) | 252 | void ovs_flow_tbl_destroy(struct flow_table *table) |
229 | { | 253 | { |
230 | struct table_instance *ti = ovsl_dereference(table->ti); | 254 | struct table_instance *ti = ovsl_dereference(table->ti); |
231 | 255 | ||
232 | table_instance_destroy(ti, deferred); | 256 | table_instance_destroy(ti, false); |
233 | } | 257 | } |
234 | 258 | ||
235 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, | 259 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, |
@@ -304,7 +328,7 @@ static struct table_instance *table_instance_rehash(struct table_instance *ti, | |||
304 | 328 | ||
305 | new_ti = table_instance_alloc(n_buckets); | 329 | new_ti = table_instance_alloc(n_buckets); |
306 | if (!new_ti) | 330 | if (!new_ti) |
307 | return ERR_PTR(-ENOMEM); | 331 | return NULL; |
308 | 332 | ||
309 | flow_table_copy_flows(ti, new_ti); | 333 | flow_table_copy_flows(ti, new_ti); |
310 | 334 | ||
@@ -425,32 +449,6 @@ static struct table_instance *table_instance_expand(struct table_instance *ti) | |||
425 | return table_instance_rehash(ti, ti->n_buckets * 2); | 449 | return table_instance_rehash(ti, ti->n_buckets * 2); |
426 | } | 450 | } |
427 | 451 | ||
428 | void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) | ||
429 | { | ||
430 | struct table_instance *ti = NULL; | ||
431 | struct table_instance *new_ti = NULL; | ||
432 | |||
433 | ti = ovsl_dereference(table->ti); | ||
434 | |||
435 | /* Expand table, if necessary, to make room. */ | ||
436 | if (table->count > ti->n_buckets) | ||
437 | new_ti = table_instance_expand(ti); | ||
438 | else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL)) | ||
439 | new_ti = table_instance_rehash(ti, ti->n_buckets); | ||
440 | |||
441 | if (new_ti && !IS_ERR(new_ti)) { | ||
442 | rcu_assign_pointer(table->ti, new_ti); | ||
443 | ovs_flow_tbl_destroy(table, true); | ||
444 | ti = ovsl_dereference(table->ti); | ||
445 | table->last_rehash = jiffies; | ||
446 | } | ||
447 | |||
448 | flow->hash = flow_hash(&flow->key, flow->mask->range.start, | ||
449 | flow->mask->range.end); | ||
450 | table_instance_insert(ti, flow); | ||
451 | table->count++; | ||
452 | } | ||
453 | |||
454 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) | 452 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) |
455 | { | 453 | { |
456 | struct table_instance *ti = ovsl_dereference(table->ti); | 454 | struct table_instance *ti = ovsl_dereference(table->ti); |
@@ -460,7 +458,7 @@ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) | |||
460 | table->count--; | 458 | table->count--; |
461 | } | 459 | } |
462 | 460 | ||
463 | struct sw_flow_mask *ovs_sw_flow_mask_alloc(void) | 461 | static struct sw_flow_mask *mask_alloc(void) |
464 | { | 462 | { |
465 | struct sw_flow_mask *mask; | 463 | struct sw_flow_mask *mask; |
466 | 464 | ||
@@ -471,35 +469,11 @@ struct sw_flow_mask *ovs_sw_flow_mask_alloc(void) | |||
471 | return mask; | 469 | return mask; |
472 | } | 470 | } |
473 | 471 | ||
474 | void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask) | 472 | static void mask_add_ref(struct sw_flow_mask *mask) |
475 | { | 473 | { |
476 | mask->ref_count++; | 474 | mask->ref_count++; |
477 | } | 475 | } |
478 | 476 | ||
479 | static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu) | ||
480 | { | ||
481 | struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu); | ||
482 | |||
483 | kfree(mask); | ||
484 | } | ||
485 | |||
486 | void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) | ||
487 | { | ||
488 | if (!mask) | ||
489 | return; | ||
490 | |||
491 | BUG_ON(!mask->ref_count); | ||
492 | mask->ref_count--; | ||
493 | |||
494 | if (!mask->ref_count) { | ||
495 | list_del_rcu(&mask->list); | ||
496 | if (deferred) | ||
497 | call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb); | ||
498 | else | ||
499 | kfree(mask); | ||
500 | } | ||
501 | } | ||
502 | |||
503 | static bool mask_equal(const struct sw_flow_mask *a, | 477 | static bool mask_equal(const struct sw_flow_mask *a, |
504 | const struct sw_flow_mask *b) | 478 | const struct sw_flow_mask *b) |
505 | { | 479 | { |
@@ -511,7 +485,7 @@ static bool mask_equal(const struct sw_flow_mask *a, | |||
511 | && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); | 485 | && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); |
512 | } | 486 | } |
513 | 487 | ||
514 | struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl, | 488 | static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, |
515 | const struct sw_flow_mask *mask) | 489 | const struct sw_flow_mask *mask) |
516 | { | 490 | { |
517 | struct list_head *ml; | 491 | struct list_head *ml; |
@@ -531,9 +505,55 @@ struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl, | |||
531 | * The caller needs to make sure that 'mask' is not the same | 505 | * The caller needs to make sure that 'mask' is not the same |
532 | * as any masks that are already on the list. | 506 | * as any masks that are already on the list. |
533 | */ | 507 | */ |
534 | void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask) | 508 | static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, |
509 | struct sw_flow_mask *new) | ||
510 | { | ||
511 | struct sw_flow_mask *mask; | ||
512 | mask = flow_mask_find(tbl, new); | ||
513 | if (!mask) { | ||
514 | /* Allocate a new mask if none exsits. */ | ||
515 | mask = mask_alloc(); | ||
516 | if (!mask) | ||
517 | return -ENOMEM; | ||
518 | mask->key = new->key; | ||
519 | mask->range = new->range; | ||
520 | list_add_rcu(&mask->list, &tbl->mask_list); | ||
521 | } | ||
522 | |||
523 | mask_add_ref(mask); | ||
524 | flow->mask = mask; | ||
525 | return 0; | ||
526 | } | ||
527 | |||
528 | int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, | ||
529 | struct sw_flow_mask *mask) | ||
535 | { | 530 | { |
536 | list_add_rcu(&mask->list, &tbl->mask_list); | 531 | struct table_instance *new_ti = NULL; |
532 | struct table_instance *ti; | ||
533 | int err; | ||
534 | |||
535 | err = flow_mask_insert(table, flow, mask); | ||
536 | if (err) | ||
537 | return err; | ||
538 | |||
539 | flow->hash = flow_hash(&flow->key, flow->mask->range.start, | ||
540 | flow->mask->range.end); | ||
541 | ti = ovsl_dereference(table->ti); | ||
542 | table_instance_insert(ti, flow); | ||
543 | table->count++; | ||
544 | |||
545 | /* Expand table, if necessary, to make room. */ | ||
546 | if (table->count > ti->n_buckets) | ||
547 | new_ti = table_instance_expand(ti); | ||
548 | else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL)) | ||
549 | new_ti = table_instance_rehash(ti, ti->n_buckets); | ||
550 | |||
551 | if (new_ti) { | ||
552 | rcu_assign_pointer(table->ti, new_ti); | ||
553 | table_instance_destroy(ti, true); | ||
554 | table->last_rehash = jiffies; | ||
555 | } | ||
556 | return 0; | ||
537 | } | 557 | } |
538 | 558 | ||
539 | /* Initializes the flow module. | 559 | /* Initializes the flow module. |
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index 5d1abe566c46..4db5f78b6f81 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h | |||
@@ -60,10 +60,11 @@ void ovs_flow_free(struct sw_flow *, bool deferred); | |||
60 | 60 | ||
61 | int ovs_flow_tbl_init(struct flow_table *); | 61 | int ovs_flow_tbl_init(struct flow_table *); |
62 | int ovs_flow_tbl_count(struct flow_table *table); | 62 | int ovs_flow_tbl_count(struct flow_table *table); |
63 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred); | 63 | void ovs_flow_tbl_destroy(struct flow_table *table); |
64 | int ovs_flow_tbl_flush(struct flow_table *flow_table); | 64 | int ovs_flow_tbl_flush(struct flow_table *flow_table); |
65 | 65 | ||
66 | void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow); | 66 | int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, |
67 | struct sw_flow_mask *mask); | ||
67 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow); | 68 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow); |
68 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table, | 69 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table, |
69 | u32 *bucket, u32 *idx); | 70 | u32 *bucket, u32 *idx); |
@@ -73,13 +74,6 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, | |||
73 | bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, | 74 | bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, |
74 | struct sw_flow_match *match); | 75 | struct sw_flow_match *match); |
75 | 76 | ||
76 | struct sw_flow_mask *ovs_sw_flow_mask_alloc(void); | ||
77 | void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *); | ||
78 | void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *, bool deferred); | ||
79 | void ovs_sw_flow_mask_insert(struct flow_table *, struct sw_flow_mask *); | ||
80 | struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *, | ||
81 | const struct sw_flow_mask *); | ||
82 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, | 77 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, |
83 | const struct sw_flow_mask *mask); | 78 | const struct sw_flow_mask *mask); |
84 | |||
85 | #endif /* flow_table.h */ | 79 | #endif /* flow_table.h */ |