diff options
author | Andy Zhou <azhou@nicira.com> | 2014-01-21 12:31:04 -0500 |
---|---|---|
committer | Jesse Gross <jesse@nicira.com> | 2014-02-05 01:21:17 -0500 |
commit | e80857cce82da31e41a6599fc888dfc92e0167cc (patch) | |
tree | 307e621f4780b302065c4a922cab850c864ceab6 /net/openvswitch/flow_table.c | |
parent | aea0bb4f8ee513537ad84b9f3f609f96e272d98e (diff) |
openvswitch: Fix kernel panic on ovs_flow_free
Both mega flow mask's reference counter and per flow table mask list
should only be accessed when holding ovs_mutex() lock. However
this is not true with ovs_flow_table_flush(). The patch fixes this bug.
Reported-by: Joe Stringer <joestringer@nicira.com>
Signed-off-by: Andy Zhou <azhou@nicira.com>
Signed-off-by: Jesse Gross <jesse@nicira.com>
Diffstat (limited to 'net/openvswitch/flow_table.c')
-rw-r--r-- | net/openvswitch/flow_table.c | 84 |
1 files changed, 40 insertions, 44 deletions
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index c58a0fe3c889..bd14052ed342 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c | |||
@@ -153,29 +153,27 @@ static void rcu_free_flow_callback(struct rcu_head *rcu) | |||
153 | flow_free(flow); | 153 | flow_free(flow); |
154 | } | 154 | } |
155 | 155 | ||
156 | static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) | 156 | void ovs_flow_free(struct sw_flow *flow, bool deferred) |
157 | { | 157 | { |
158 | if (!mask) | 158 | if (!flow) |
159 | return; | 159 | return; |
160 | 160 | ||
161 | BUG_ON(!mask->ref_count); | 161 | ASSERT_OVSL(); |
162 | mask->ref_count--; | ||
163 | 162 | ||
164 | if (!mask->ref_count) { | 163 | if (flow->mask) { |
165 | list_del_rcu(&mask->list); | 164 | struct sw_flow_mask *mask = flow->mask; |
166 | if (deferred) | ||
167 | kfree_rcu(mask, rcu); | ||
168 | else | ||
169 | kfree(mask); | ||
170 | } | ||
171 | } | ||
172 | 165 | ||
173 | void ovs_flow_free(struct sw_flow *flow, bool deferred) | 166 | BUG_ON(!mask->ref_count); |
174 | { | 167 | mask->ref_count--; |
175 | if (!flow) | ||
176 | return; | ||
177 | 168 | ||
178 | flow_mask_del_ref(flow->mask, deferred); | 169 | if (!mask->ref_count) { |
170 | list_del_rcu(&mask->list); | ||
171 | if (deferred) | ||
172 | kfree_rcu(mask, rcu); | ||
173 | else | ||
174 | kfree(mask); | ||
175 | } | ||
176 | } | ||
179 | 177 | ||
180 | if (deferred) | 178 | if (deferred) |
181 | call_rcu(&flow->rcu, rcu_free_flow_callback); | 179 | call_rcu(&flow->rcu, rcu_free_flow_callback); |
@@ -188,26 +186,9 @@ static void free_buckets(struct flex_array *buckets) | |||
188 | flex_array_free(buckets); | 186 | flex_array_free(buckets); |
189 | } | 187 | } |
190 | 188 | ||
189 | |||
191 | static void __table_instance_destroy(struct table_instance *ti) | 190 | static void __table_instance_destroy(struct table_instance *ti) |
192 | { | 191 | { |
193 | int i; | ||
194 | |||
195 | if (ti->keep_flows) | ||
196 | goto skip_flows; | ||
197 | |||
198 | for (i = 0; i < ti->n_buckets; i++) { | ||
199 | struct sw_flow *flow; | ||
200 | struct hlist_head *head = flex_array_get(ti->buckets, i); | ||
201 | struct hlist_node *n; | ||
202 | int ver = ti->node_ver; | ||
203 | |||
204 | hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { | ||
205 | hlist_del(&flow->hash_node[ver]); | ||
206 | ovs_flow_free(flow, false); | ||
207 | } | ||
208 | } | ||
209 | |||
210 | skip_flows: | ||
211 | free_buckets(ti->buckets); | 192 | free_buckets(ti->buckets); |
212 | kfree(ti); | 193 | kfree(ti); |
213 | } | 194 | } |
@@ -258,20 +239,38 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) | |||
258 | 239 | ||
259 | static void table_instance_destroy(struct table_instance *ti, bool deferred) | 240 | static void table_instance_destroy(struct table_instance *ti, bool deferred) |
260 | { | 241 | { |
242 | int i; | ||
243 | |||
261 | if (!ti) | 244 | if (!ti) |
262 | return; | 245 | return; |
263 | 246 | ||
247 | if (ti->keep_flows) | ||
248 | goto skip_flows; | ||
249 | |||
250 | for (i = 0; i < ti->n_buckets; i++) { | ||
251 | struct sw_flow *flow; | ||
252 | struct hlist_head *head = flex_array_get(ti->buckets, i); | ||
253 | struct hlist_node *n; | ||
254 | int ver = ti->node_ver; | ||
255 | |||
256 | hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { | ||
257 | hlist_del_rcu(&flow->hash_node[ver]); | ||
258 | ovs_flow_free(flow, deferred); | ||
259 | } | ||
260 | } | ||
261 | |||
262 | skip_flows: | ||
264 | if (deferred) | 263 | if (deferred) |
265 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); | 264 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); |
266 | else | 265 | else |
267 | __table_instance_destroy(ti); | 266 | __table_instance_destroy(ti); |
268 | } | 267 | } |
269 | 268 | ||
270 | void ovs_flow_tbl_destroy(struct flow_table *table) | 269 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) |
271 | { | 270 | { |
272 | struct table_instance *ti = ovsl_dereference(table->ti); | 271 | struct table_instance *ti = ovsl_dereference(table->ti); |
273 | 272 | ||
274 | table_instance_destroy(ti, false); | 273 | table_instance_destroy(ti, deferred); |
275 | } | 274 | } |
276 | 275 | ||
277 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, | 276 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, |
@@ -504,16 +503,11 @@ static struct sw_flow_mask *mask_alloc(void) | |||
504 | 503 | ||
505 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); | 504 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); |
506 | if (mask) | 505 | if (mask) |
507 | mask->ref_count = 0; | 506 | mask->ref_count = 1; |
508 | 507 | ||
509 | return mask; | 508 | return mask; |
510 | } | 509 | } |
511 | 510 | ||
512 | static void mask_add_ref(struct sw_flow_mask *mask) | ||
513 | { | ||
514 | mask->ref_count++; | ||
515 | } | ||
516 | |||
517 | static bool mask_equal(const struct sw_flow_mask *a, | 511 | static bool mask_equal(const struct sw_flow_mask *a, |
518 | const struct sw_flow_mask *b) | 512 | const struct sw_flow_mask *b) |
519 | { | 513 | { |
@@ -554,9 +548,11 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, | |||
554 | mask->key = new->key; | 548 | mask->key = new->key; |
555 | mask->range = new->range; | 549 | mask->range = new->range; |
556 | list_add_rcu(&mask->list, &tbl->mask_list); | 550 | list_add_rcu(&mask->list, &tbl->mask_list); |
551 | } else { | ||
552 | BUG_ON(!mask->ref_count); | ||
553 | mask->ref_count++; | ||
557 | } | 554 | } |
558 | 555 | ||
559 | mask_add_ref(mask); | ||
560 | flow->mask = mask; | 556 | flow->mask = mask; |
561 | return 0; | 557 | return 0; |
562 | } | 558 | } |