aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorAndy Zhou <azhou@nicira.com>2014-01-21 12:31:04 -0500
committerJesse Gross <jesse@nicira.com>2014-02-05 01:21:17 -0500
commite80857cce82da31e41a6599fc888dfc92e0167cc (patch)
tree307e621f4780b302065c4a922cab850c864ceab6 /net
parentaea0bb4f8ee513537ad84b9f3f609f96e272d98e (diff)
openvswitch: Fix kernel panic on ovs_flow_free
Both mega flow mask's reference counter and per flow table mask list should only be accessed when holding ovs_mutex() lock. However this is not true with ovs_flow_table_flush(). The patch fixes this bug. Reported-by: Joe Stringer <joestringer@nicira.com> Signed-off-by: Andy Zhou <azhou@nicira.com> Signed-off-by: Jesse Gross <jesse@nicira.com>
Diffstat (limited to 'net')
-rw-r--r--net/openvswitch/datapath.c9
-rw-r--r--net/openvswitch/flow_table.c84
-rw-r--r--net/openvswitch/flow_table.h2
3 files changed, 47 insertions, 48 deletions
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index d1a73a6102f8..e1b337e0bf4d 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -55,6 +55,7 @@
55 55
56#include "datapath.h" 56#include "datapath.h"
57#include "flow.h" 57#include "flow.h"
58#include "flow_table.h"
58#include "flow_netlink.h" 59#include "flow_netlink.h"
59#include "vport-internal_dev.h" 60#include "vport-internal_dev.h"
60#include "vport-netdev.h" 61#include "vport-netdev.h"
@@ -160,7 +161,6 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
160{ 161{
161 struct datapath *dp = container_of(rcu, struct datapath, rcu); 162 struct datapath *dp = container_of(rcu, struct datapath, rcu);
162 163
163 ovs_flow_tbl_destroy(&dp->table);
164 free_percpu(dp->stats_percpu); 164 free_percpu(dp->stats_percpu);
165 release_net(ovs_dp_get_net(dp)); 165 release_net(ovs_dp_get_net(dp));
166 kfree(dp->ports); 166 kfree(dp->ports);
@@ -1287,7 +1287,7 @@ err_destroy_ports_array:
1287err_destroy_percpu: 1287err_destroy_percpu:
1288 free_percpu(dp->stats_percpu); 1288 free_percpu(dp->stats_percpu);
1289err_destroy_table: 1289err_destroy_table:
1290 ovs_flow_tbl_destroy(&dp->table); 1290 ovs_flow_tbl_destroy(&dp->table, false);
1291err_free_dp: 1291err_free_dp:
1292 release_net(ovs_dp_get_net(dp)); 1292 release_net(ovs_dp_get_net(dp));
1293 kfree(dp); 1293 kfree(dp);
@@ -1314,10 +1314,13 @@ static void __dp_destroy(struct datapath *dp)
1314 list_del_rcu(&dp->list_node); 1314 list_del_rcu(&dp->list_node);
1315 1315
1316 /* OVSP_LOCAL is datapath internal port. We need to make sure that 1316 /* OVSP_LOCAL is datapath internal port. We need to make sure that
1317 * all port in datapath are destroyed first before freeing datapath. 1317 * all ports in datapath are destroyed first before freeing datapath.
1318 */ 1318 */
1319 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); 1319 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1320 1320
1321 /* RCU destroy the flow table */
1322 ovs_flow_tbl_destroy(&dp->table, true);
1323
1321 call_rcu(&dp->rcu, destroy_dp_rcu); 1324 call_rcu(&dp->rcu, destroy_dp_rcu);
1322} 1325}
1323 1326
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index c58a0fe3c889..bd14052ed342 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -153,29 +153,27 @@ static void rcu_free_flow_callback(struct rcu_head *rcu)
153 flow_free(flow); 153 flow_free(flow);
154} 154}
155 155
156static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) 156void ovs_flow_free(struct sw_flow *flow, bool deferred)
157{ 157{
158 if (!mask) 158 if (!flow)
159 return; 159 return;
160 160
161 BUG_ON(!mask->ref_count); 161 ASSERT_OVSL();
162 mask->ref_count--;
163 162
164 if (!mask->ref_count) { 163 if (flow->mask) {
165 list_del_rcu(&mask->list); 164 struct sw_flow_mask *mask = flow->mask;
166 if (deferred)
167 kfree_rcu(mask, rcu);
168 else
169 kfree(mask);
170 }
171}
172 165
173void ovs_flow_free(struct sw_flow *flow, bool deferred) 166 BUG_ON(!mask->ref_count);
174{ 167 mask->ref_count--;
175 if (!flow)
176 return;
177 168
178 flow_mask_del_ref(flow->mask, deferred); 169 if (!mask->ref_count) {
170 list_del_rcu(&mask->list);
171 if (deferred)
172 kfree_rcu(mask, rcu);
173 else
174 kfree(mask);
175 }
176 }
179 177
180 if (deferred) 178 if (deferred)
181 call_rcu(&flow->rcu, rcu_free_flow_callback); 179 call_rcu(&flow->rcu, rcu_free_flow_callback);
@@ -188,26 +186,9 @@ static void free_buckets(struct flex_array *buckets)
188 flex_array_free(buckets); 186 flex_array_free(buckets);
189} 187}
190 188
189
191static void __table_instance_destroy(struct table_instance *ti) 190static void __table_instance_destroy(struct table_instance *ti)
192{ 191{
193 int i;
194
195 if (ti->keep_flows)
196 goto skip_flows;
197
198 for (i = 0; i < ti->n_buckets; i++) {
199 struct sw_flow *flow;
200 struct hlist_head *head = flex_array_get(ti->buckets, i);
201 struct hlist_node *n;
202 int ver = ti->node_ver;
203
204 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
205 hlist_del(&flow->hash_node[ver]);
206 ovs_flow_free(flow, false);
207 }
208 }
209
210skip_flows:
211 free_buckets(ti->buckets); 192 free_buckets(ti->buckets);
212 kfree(ti); 193 kfree(ti);
213} 194}
@@ -258,20 +239,38 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
258 239
259static void table_instance_destroy(struct table_instance *ti, bool deferred) 240static void table_instance_destroy(struct table_instance *ti, bool deferred)
260{ 241{
242 int i;
243
261 if (!ti) 244 if (!ti)
262 return; 245 return;
263 246
247 if (ti->keep_flows)
248 goto skip_flows;
249
250 for (i = 0; i < ti->n_buckets; i++) {
251 struct sw_flow *flow;
252 struct hlist_head *head = flex_array_get(ti->buckets, i);
253 struct hlist_node *n;
254 int ver = ti->node_ver;
255
256 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
257 hlist_del_rcu(&flow->hash_node[ver]);
258 ovs_flow_free(flow, deferred);
259 }
260 }
261
262skip_flows:
264 if (deferred) 263 if (deferred)
265 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); 264 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
266 else 265 else
267 __table_instance_destroy(ti); 266 __table_instance_destroy(ti);
268} 267}
269 268
270void ovs_flow_tbl_destroy(struct flow_table *table) 269void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
271{ 270{
272 struct table_instance *ti = ovsl_dereference(table->ti); 271 struct table_instance *ti = ovsl_dereference(table->ti);
273 272
274 table_instance_destroy(ti, false); 273 table_instance_destroy(ti, deferred);
275} 274}
276 275
277struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, 276struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
@@ -504,16 +503,11 @@ static struct sw_flow_mask *mask_alloc(void)
504 503
505 mask = kmalloc(sizeof(*mask), GFP_KERNEL); 504 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
506 if (mask) 505 if (mask)
507 mask->ref_count = 0; 506 mask->ref_count = 1;
508 507
509 return mask; 508 return mask;
510} 509}
511 510
512static void mask_add_ref(struct sw_flow_mask *mask)
513{
514 mask->ref_count++;
515}
516
517static bool mask_equal(const struct sw_flow_mask *a, 511static bool mask_equal(const struct sw_flow_mask *a,
518 const struct sw_flow_mask *b) 512 const struct sw_flow_mask *b)
519{ 513{
@@ -554,9 +548,11 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
554 mask->key = new->key; 548 mask->key = new->key;
555 mask->range = new->range; 549 mask->range = new->range;
556 list_add_rcu(&mask->list, &tbl->mask_list); 550 list_add_rcu(&mask->list, &tbl->mask_list);
551 } else {
552 BUG_ON(!mask->ref_count);
553 mask->ref_count++;
557 } 554 }
558 555
559 mask_add_ref(mask);
560 flow->mask = mask; 556 flow->mask = mask;
561 return 0; 557 return 0;
562} 558}
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 1996e34c0fd8..baaeb101924d 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -60,7 +60,7 @@ void ovs_flow_free(struct sw_flow *, bool deferred);
60 60
61int ovs_flow_tbl_init(struct flow_table *); 61int ovs_flow_tbl_init(struct flow_table *);
62int ovs_flow_tbl_count(struct flow_table *table); 62int ovs_flow_tbl_count(struct flow_table *table);
63void ovs_flow_tbl_destroy(struct flow_table *table); 63void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred);
64int ovs_flow_tbl_flush(struct flow_table *flow_table); 64int ovs_flow_tbl_flush(struct flow_table *flow_table);
65 65
66int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, 66int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,