aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/netpoll.h11
-rw-r--r--net/core/dev.c27
-rw-r--r--net/core/netpoll.c40
3 files changed, 76 insertions, 2 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index f54c3bb6a22b..ab856d507b7e 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -38,8 +38,9 @@ struct netpoll {
38struct netpoll_info { 38struct netpoll_info {
39 atomic_t refcnt; 39 atomic_t refcnt;
40 40
41 int rx_flags; 41 unsigned long rx_flags;
42 spinlock_t rx_lock; 42 spinlock_t rx_lock;
43 struct mutex dev_lock;
43 struct list_head rx_np; /* netpolls that registered an rx_hook */ 44 struct list_head rx_np; /* netpolls that registered an rx_hook */
44 45
45 struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */ 46 struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
@@ -51,6 +52,14 @@ struct netpoll_info {
51 struct rcu_head rcu; 52 struct rcu_head rcu;
52}; 53};
53 54
55#ifdef CONFIG_NETPOLL
56extern int netpoll_rx_disable(struct net_device *dev);
57extern void netpoll_rx_enable(struct net_device *dev);
58#else
59static inline int netpoll_rx_disable(struct net_device *dev) { return 0; }
60static inline void netpoll_rx_enable(struct net_device *dev) { return; }
61#endif
62
54void netpoll_send_udp(struct netpoll *np, const char *msg, int len); 63void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
55void netpoll_print_options(struct netpoll *np); 64void netpoll_print_options(struct netpoll *np);
56int netpoll_parse_options(struct netpoll *np, char *opt); 65int netpoll_parse_options(struct netpoll *np, char *opt);
diff --git a/net/core/dev.c b/net/core/dev.c
index e04bfdc9e3e4..2b275a7b8677 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1266,6 +1266,14 @@ static int __dev_open(struct net_device *dev)
1266 if (!netif_device_present(dev)) 1266 if (!netif_device_present(dev))
1267 return -ENODEV; 1267 return -ENODEV;
1268 1268
1269 /* Block netpoll from trying to do any rx path servicing.
1270 * If we don't do this there is a chance ndo_poll_controller
1271 * or ndo_poll may be running while we open the device
1272 */
1273 ret = netpoll_rx_disable(dev);
1274 if (ret)
1275 return ret;
1276
1269 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); 1277 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1270 ret = notifier_to_errno(ret); 1278 ret = notifier_to_errno(ret);
1271 if (ret) 1279 if (ret)
@@ -1279,6 +1287,8 @@ static int __dev_open(struct net_device *dev)
1279 if (!ret && ops->ndo_open) 1287 if (!ret && ops->ndo_open)
1280 ret = ops->ndo_open(dev); 1288 ret = ops->ndo_open(dev);
1281 1289
1290 netpoll_rx_enable(dev);
1291
1282 if (ret) 1292 if (ret)
1283 clear_bit(__LINK_STATE_START, &dev->state); 1293 clear_bit(__LINK_STATE_START, &dev->state);
1284 else { 1294 else {
@@ -1370,9 +1380,16 @@ static int __dev_close(struct net_device *dev)
1370 int retval; 1380 int retval;
1371 LIST_HEAD(single); 1381 LIST_HEAD(single);
1372 1382
1383 /* Temporarily disable netpoll until the interface is down */
1384 retval = netpoll_rx_disable(dev);
1385 if (retval)
1386 return retval;
1387
1373 list_add(&dev->unreg_list, &single); 1388 list_add(&dev->unreg_list, &single);
1374 retval = __dev_close_many(&single); 1389 retval = __dev_close_many(&single);
1375 list_del(&single); 1390 list_del(&single);
1391
1392 netpoll_rx_enable(dev);
1376 return retval; 1393 return retval;
1377} 1394}
1378 1395
@@ -1408,14 +1425,22 @@ static int dev_close_many(struct list_head *head)
1408 */ 1425 */
1409int dev_close(struct net_device *dev) 1426int dev_close(struct net_device *dev)
1410{ 1427{
1428 int ret = 0;
1411 if (dev->flags & IFF_UP) { 1429 if (dev->flags & IFF_UP) {
1412 LIST_HEAD(single); 1430 LIST_HEAD(single);
1413 1431
1432 /* Block netpoll rx while the interface is going down */
1433 ret = netpoll_rx_disable(dev);
1434 if (ret)
1435 return ret;
1436
1414 list_add(&dev->unreg_list, &single); 1437 list_add(&dev->unreg_list, &single);
1415 dev_close_many(&single); 1438 dev_close_many(&single);
1416 list_del(&single); 1439 list_del(&single);
1440
1441 netpoll_rx_enable(dev);
1417 } 1442 }
1418 return 0; 1443 return ret;
1419} 1444}
1420EXPORT_SYMBOL(dev_close); 1445EXPORT_SYMBOL(dev_close);
1421 1446
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 331ccb90f915..edcd9ad95304 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -47,6 +47,8 @@ static struct sk_buff_head skb_pool;
47 47
48static atomic_t trapped; 48static atomic_t trapped;
49 49
50static struct srcu_struct netpoll_srcu;
51
50#define USEC_PER_POLL 50 52#define USEC_PER_POLL 50
51#define NETPOLL_RX_ENABLED 1 53#define NETPOLL_RX_ENABLED 1
52#define NETPOLL_RX_DROP 2 54#define NETPOLL_RX_DROP 2
@@ -199,6 +201,13 @@ static void netpoll_poll_dev(struct net_device *dev)
199 const struct net_device_ops *ops; 201 const struct net_device_ops *ops;
200 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); 202 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
201 203
204 /* Don't do any rx activity if the dev_lock mutex is held
205 * the dev_open/close paths use this to block netpoll activity
206 * while changing device state
207 */
208 if (!mutex_trylock(&dev->npinfo->dev_lock))
209 return;
210
202 if (!dev || !netif_running(dev)) 211 if (!dev || !netif_running(dev))
203 return; 212 return;
204 213
@@ -211,6 +220,8 @@ static void netpoll_poll_dev(struct net_device *dev)
211 220
212 poll_napi(dev); 221 poll_napi(dev);
213 222
223 mutex_unlock(&dev->npinfo->dev_lock);
224
214 if (dev->flags & IFF_SLAVE) { 225 if (dev->flags & IFF_SLAVE) {
215 if (ni) { 226 if (ni) {
216 struct net_device *bond_dev; 227 struct net_device *bond_dev;
@@ -231,6 +242,31 @@ static void netpoll_poll_dev(struct net_device *dev)
231 zap_completion_queue(); 242 zap_completion_queue();
232} 243}
233 244
245int netpoll_rx_disable(struct net_device *dev)
246{
247 struct netpoll_info *ni;
248 int idx;
249 might_sleep();
250 idx = srcu_read_lock(&netpoll_srcu);
251 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
252 if (ni)
253 mutex_lock(&ni->dev_lock);
254 srcu_read_unlock(&netpoll_srcu, idx);
255 return 0;
256}
257EXPORT_SYMBOL(netpoll_rx_disable);
258
259void netpoll_rx_enable(struct net_device *dev)
260{
261 struct netpoll_info *ni;
262 rcu_read_lock();
263 ni = rcu_dereference(dev->npinfo);
264 if (ni)
265 mutex_unlock(&ni->dev_lock);
266 rcu_read_unlock();
267}
268EXPORT_SYMBOL(netpoll_rx_enable);
269
234static void refill_skbs(void) 270static void refill_skbs(void)
235{ 271{
236 struct sk_buff *skb; 272 struct sk_buff *skb;
@@ -1004,6 +1040,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
1004 INIT_LIST_HEAD(&npinfo->rx_np); 1040 INIT_LIST_HEAD(&npinfo->rx_np);
1005 1041
1006 spin_lock_init(&npinfo->rx_lock); 1042 spin_lock_init(&npinfo->rx_lock);
1043 mutex_init(&npinfo->dev_lock);
1007 skb_queue_head_init(&npinfo->neigh_tx); 1044 skb_queue_head_init(&npinfo->neigh_tx);
1008 skb_queue_head_init(&npinfo->txq); 1045 skb_queue_head_init(&npinfo->txq);
1009 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); 1046 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
@@ -1169,6 +1206,7 @@ EXPORT_SYMBOL(netpoll_setup);
1169static int __init netpoll_init(void) 1206static int __init netpoll_init(void)
1170{ 1207{
1171 skb_queue_head_init(&skb_pool); 1208 skb_queue_head_init(&skb_pool);
1209 init_srcu_struct(&netpoll_srcu);
1172 return 0; 1210 return 0;
1173} 1211}
1174core_initcall(netpoll_init); 1212core_initcall(netpoll_init);
@@ -1208,6 +1246,8 @@ void __netpoll_cleanup(struct netpoll *np)
1208 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 1246 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1209 } 1247 }
1210 1248
1249 synchronize_srcu(&netpoll_srcu);
1250
1211 if (atomic_dec_and_test(&npinfo->refcnt)) { 1251 if (atomic_dec_and_test(&npinfo->refcnt)) {
1212 const struct net_device_ops *ops; 1252 const struct net_device_ops *ops;
1213 1253