aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOctavian Purdila <opurdila@ixiacom.com>2010-12-13 07:44:07 -0500
committerDavid S. Miller <davem@davemloft.net>2010-12-16 17:04:44 -0500
commit443457242beb6716b43db4d62fe148eab5515505 (patch)
tree0dbcf7dbaa7c6be6ca84631f3e865cde3d6b59f2
parentc6c8fea29769d998d94fcec9b9f14d4b52b349d3 (diff)
net: factorize sync-rcu call in unregister_netdevice_many
Add dev_close_many and dev_deactivate_many to factorize another sync-rcu operation on the netdevice unregister path. $ modprobe dummy numdummies=10000 $ ip link set dev dummy* up $ time rmmod dummy Without the patch With the patch real 0m 24.63s real 0m 5.15s user 0m 0.00s user 0m 0.00s sys 0m 6.05s sys 0m 5.14s Signed-off-by: Octavian Purdila <opurdila@ixiacom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--net/core/dev.c118
-rw-r--r--net/sched/sch_generic.c29
3 files changed, 99 insertions, 49 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index ea1f8a83160d..786cc396cb4a 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -321,6 +321,7 @@ extern void dev_init_scheduler(struct net_device *dev);
321extern void dev_shutdown(struct net_device *dev); 321extern void dev_shutdown(struct net_device *dev);
322extern void dev_activate(struct net_device *dev); 322extern void dev_activate(struct net_device *dev);
323extern void dev_deactivate(struct net_device *dev); 323extern void dev_deactivate(struct net_device *dev);
324extern void dev_deactivate_many(struct list_head *head);
324extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 325extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
325 struct Qdisc *qdisc); 326 struct Qdisc *qdisc);
326extern void qdisc_reset(struct Qdisc *qdisc); 327extern void qdisc_reset(struct Qdisc *qdisc);
diff --git a/net/core/dev.c b/net/core/dev.c
index 7ac26d2b9722..794b20de5d44 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1222,52 +1222,90 @@ int dev_open(struct net_device *dev)
1222} 1222}
1223EXPORT_SYMBOL(dev_open); 1223EXPORT_SYMBOL(dev_open);
1224 1224
1225static int __dev_close(struct net_device *dev) 1225static int __dev_close_many(struct list_head *head)
1226{ 1226{
1227 const struct net_device_ops *ops = dev->netdev_ops; 1227 struct net_device *dev;
1228 1228
1229 ASSERT_RTNL(); 1229 ASSERT_RTNL();
1230 might_sleep(); 1230 might_sleep();
1231 1231
1232 /* 1232 list_for_each_entry(dev, head, unreg_list) {
1233 * Tell people we are going down, so that they can 1233 /*
1234 * prepare to death, when device is still operating. 1234 * Tell people we are going down, so that they can
1235 */ 1235 * prepare to death, when device is still operating.
1236 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1236 */
1237 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1237 1238
1238 clear_bit(__LINK_STATE_START, &dev->state); 1239 clear_bit(__LINK_STATE_START, &dev->state);
1239 1240
1240 /* Synchronize to scheduled poll. We cannot touch poll list, 1241 /* Synchronize to scheduled poll. We cannot touch poll list, it
1241 * it can be even on different cpu. So just clear netif_running(). 1242 * can be even on different cpu. So just clear netif_running().
1242 * 1243 *
1243 * dev->stop() will invoke napi_disable() on all of it's 1244 * dev->stop() will invoke napi_disable() on all of it's
1244 * napi_struct instances on this device. 1245 * napi_struct instances on this device.
1245 */ 1246 */
1246 smp_mb__after_clear_bit(); /* Commit netif_running(). */ 1247 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1248 }
1247 1249
1248 dev_deactivate(dev); 1250 dev_deactivate_many(head);
1249 1251
1250 /* 1252 list_for_each_entry(dev, head, unreg_list) {
1251 * Call the device specific close. This cannot fail. 1253 const struct net_device_ops *ops = dev->netdev_ops;
1252 * Only if device is UP
1253 *
1254 * We allow it to be called even after a DETACH hot-plug
1255 * event.
1256 */
1257 if (ops->ndo_stop)
1258 ops->ndo_stop(dev);
1259 1254
1260 /* 1255 /*
1261 * Device is now down. 1256 * Call the device specific close. This cannot fail.
1262 */ 1257 * Only if device is UP
1258 *
1259 * We allow it to be called even after a DETACH hot-plug
1260 * event.
1261 */
1262 if (ops->ndo_stop)
1263 ops->ndo_stop(dev);
1264
1265 /*
1266 * Device is now down.
1267 */
1268
1269 dev->flags &= ~IFF_UP;
1270
1271 /*
1272 * Shutdown NET_DMA
1273 */
1274 net_dmaengine_put();
1275 }
1263 1276
1264 dev->flags &= ~IFF_UP; 1277 return 0;
1278}
1279
1280static int __dev_close(struct net_device *dev)
1281{
1282 LIST_HEAD(single);
1283
1284 list_add(&dev->unreg_list, &single);
1285 return __dev_close_many(&single);
1286}
1287
1288int dev_close_many(struct list_head *head)
1289{
1290 struct net_device *dev, *tmp;
1291 LIST_HEAD(tmp_list);
1292
1293 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1294 if (!(dev->flags & IFF_UP))
1295 list_move(&dev->unreg_list, &tmp_list);
1296
1297 __dev_close_many(head);
1265 1298
1266 /* 1299 /*
1267 * Shutdown NET_DMA 1300 * Tell people we are down
1268 */ 1301 */
1269 net_dmaengine_put(); 1302 list_for_each_entry(dev, head, unreg_list) {
1303 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1304 call_netdevice_notifiers(NETDEV_DOWN, dev);
1305 }
1270 1306
1307 /* rollback_registered_many needs the complete original list */
1308 list_splice(&tmp_list, head);
1271 return 0; 1309 return 0;
1272} 1310}
1273 1311
@@ -1282,16 +1320,10 @@ static int __dev_close(struct net_device *dev)
1282 */ 1320 */
1283int dev_close(struct net_device *dev) 1321int dev_close(struct net_device *dev)
1284{ 1322{
1285 if (!(dev->flags & IFF_UP)) 1323 LIST_HEAD(single);
1286 return 0;
1287
1288 __dev_close(dev);
1289 1324
1290 /* 1325 list_add(&dev->unreg_list, &single);
1291 * Tell people we are down 1326 dev_close_many(&single);
1292 */
1293 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1294 call_netdevice_notifiers(NETDEV_DOWN, dev);
1295 1327
1296 return 0; 1328 return 0;
1297} 1329}
@@ -4963,10 +4995,12 @@ static void rollback_registered_many(struct list_head *head)
4963 } 4995 }
4964 4996
4965 BUG_ON(dev->reg_state != NETREG_REGISTERED); 4997 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4998 }
4966 4999
4967 /* If device is running, close it first. */ 5000 /* If device is running, close it first. */
4968 dev_close(dev); 5001 dev_close_many(head);
4969 5002
5003 list_for_each_entry(dev, head, unreg_list) {
4970 /* And unlink it from device chain. */ 5004 /* And unlink it from device chain. */
4971 unlist_netdevice(dev); 5005 unlist_netdevice(dev);
4972 5006
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 0918834ee4a1..34dc598440a2 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -810,20 +810,35 @@ static bool some_qdisc_is_busy(struct net_device *dev)
810 return false; 810 return false;
811} 811}
812 812
813void dev_deactivate(struct net_device *dev) 813void dev_deactivate_many(struct list_head *head)
814{ 814{
815 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); 815 struct net_device *dev;
816 if (dev_ingress_queue(dev))
817 dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
818 816
819 dev_watchdog_down(dev); 817 list_for_each_entry(dev, head, unreg_list) {
818 netdev_for_each_tx_queue(dev, dev_deactivate_queue,
819 &noop_qdisc);
820 if (dev_ingress_queue(dev))
821 dev_deactivate_queue(dev, dev_ingress_queue(dev),
822 &noop_qdisc);
823
824 dev_watchdog_down(dev);
825 }
820 826
821 /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ 827 /* Wait for outstanding qdisc-less dev_queue_xmit calls. */
822 synchronize_rcu(); 828 synchronize_rcu();
823 829
824 /* Wait for outstanding qdisc_run calls. */ 830 /* Wait for outstanding qdisc_run calls. */
825 while (some_qdisc_is_busy(dev)) 831 list_for_each_entry(dev, head, unreg_list)
826 yield(); 832 while (some_qdisc_is_busy(dev))
833 yield();
834}
835
836void dev_deactivate(struct net_device *dev)
837{
838 LIST_HEAD(single);
839
840 list_add(&dev->unreg_list, &single);
841 dev_deactivate_many(&single);
827} 842}
828 843
829static void dev_init_scheduler_queue(struct net_device *dev, 844static void dev_init_scheduler_queue(struct net_device *dev,