aboutsummaryrefslogtreecommitdiffstats
path: root/net/openvswitch/datapath.c
diff options
context:
space:
mode:
authorPravin B Shelar <pshelar@nicira.com>2013-09-17 12:38:23 -0400
committerJesse Gross <jesse@nicira.com>2013-09-17 12:38:23 -0400
commite7f133290660d976da8cb20e9bc7310d0cd19341 (patch)
treec4eb7687f7aeb2c20a3e23006d811a7495ca9ed4 /net/openvswitch/datapath.c
parent272b98c6455f00884f0350f775c5342358ebb73f (diff)
openvswitch: Move flow table rehashing to flow install.
Rehashing in ovs-workqueue can cause ovs-mutex lock contentions in case of heavy flow setups where both needs ovs-mutex. So by moving rehashing to flow-setup we can eliminate contention. This also simplify ovs locking and reduces dependence on workqueue. Signed-off-by: Pravin B Shelar <pshelar@nicira.com> Signed-off-by: Jesse Gross <jesse@nicira.com>
Diffstat (limited to 'net/openvswitch/datapath.c')
-rw-r--r--net/openvswitch/datapath.c50
1 files changed, 11 insertions, 39 deletions
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 2aa13bd7f2b2..2e1a9c24e380 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -60,8 +60,6 @@
60 60
61 61
62#define REHASH_FLOW_INTERVAL (10 * 60 * HZ) 62#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
63static void rehash_flow_table(struct work_struct *work);
64static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
65 63
66int ovs_net_id __read_mostly; 64int ovs_net_id __read_mostly;
67 65
@@ -1289,22 +1287,25 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
1289 /* Check if this is a duplicate flow */ 1287 /* Check if this is a duplicate flow */
1290 flow = ovs_flow_lookup(table, &key); 1288 flow = ovs_flow_lookup(table, &key);
1291 if (!flow) { 1289 if (!flow) {
1290 struct flow_table *new_table = NULL;
1292 struct sw_flow_mask *mask_p; 1291 struct sw_flow_mask *mask_p;
1292
1293 /* Bail out if we're not allowed to create a new flow. */ 1293 /* Bail out if we're not allowed to create a new flow. */
1294 error = -ENOENT; 1294 error = -ENOENT;
1295 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) 1295 if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
1296 goto err_unlock_ovs; 1296 goto err_unlock_ovs;
1297 1297
1298 /* Expand table, if necessary, to make room. */ 1298 /* Expand table, if necessary, to make room. */
1299 if (ovs_flow_tbl_need_to_expand(table)) { 1299 if (ovs_flow_tbl_need_to_expand(table))
1300 struct flow_table *new_table;
1301
1302 new_table = ovs_flow_tbl_expand(table); 1300 new_table = ovs_flow_tbl_expand(table);
1303 if (!IS_ERR(new_table)) { 1301 else if (time_after(jiffies, dp->last_rehash + REHASH_FLOW_INTERVAL))
1304 rcu_assign_pointer(dp->table, new_table); 1302 new_table = ovs_flow_tbl_rehash(table);
1305 ovs_flow_tbl_destroy(table, true); 1303
1306 table = ovsl_dereference(dp->table); 1304 if (new_table && !IS_ERR(new_table)) {
1307 } 1305 rcu_assign_pointer(dp->table, new_table);
1306 ovs_flow_tbl_destroy(table, true);
1307 table = ovsl_dereference(dp->table);
1308 dp->last_rehash = jiffies;
1308 } 1309 }
1309 1310
1310 /* Allocate flow. */ 1311 /* Allocate flow. */
@@ -2336,32 +2337,6 @@ error:
2336 return err; 2337 return err;
2337} 2338}
2338 2339
2339static void rehash_flow_table(struct work_struct *work)
2340{
2341 struct datapath *dp;
2342 struct net *net;
2343
2344 ovs_lock();
2345 rtnl_lock();
2346 for_each_net(net) {
2347 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2348
2349 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2350 struct flow_table *old_table = ovsl_dereference(dp->table);
2351 struct flow_table *new_table;
2352
2353 new_table = ovs_flow_tbl_rehash(old_table);
2354 if (!IS_ERR(new_table)) {
2355 rcu_assign_pointer(dp->table, new_table);
2356 ovs_flow_tbl_destroy(old_table, true);
2357 }
2358 }
2359 }
2360 rtnl_unlock();
2361 ovs_unlock();
2362 schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2363}
2364
2365static int __net_init ovs_init_net(struct net *net) 2340static int __net_init ovs_init_net(struct net *net)
2366{ 2341{
2367 struct ovs_net *ovs_net = net_generic(net, ovs_net_id); 2342 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
@@ -2419,8 +2394,6 @@ static int __init dp_init(void)
2419 if (err < 0) 2394 if (err < 0)
2420 goto error_unreg_notifier; 2395 goto error_unreg_notifier;
2421 2396
2422 schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
2423
2424 return 0; 2397 return 0;
2425 2398
2426error_unreg_notifier: 2399error_unreg_notifier:
@@ -2437,7 +2410,6 @@ error:
2437 2410
2438static void dp_cleanup(void) 2411static void dp_cleanup(void)
2439{ 2412{
2440 cancel_delayed_work_sync(&rehash_flow_wq);
2441 dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); 2413 dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2442 unregister_netdevice_notifier(&ovs_dp_device_notifier); 2414 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2443 unregister_pernet_device(&ovs_net_ops); 2415 unregister_pernet_device(&ovs_net_ops);