aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeil Horman <nhorman@tuxdriver.com>2013-02-11 05:25:30 -0500
committerDavid S. Miller <davem@davemloft.net>2013-02-11 19:19:33 -0500
commit2cde6acd49daca58b96f1fbc697492825511ad31 (patch)
tree77353244a904fbb41e8658cceedb4b5120c5552a
parentf05de73bf82fbbc00265c06d12efb7273f7dc54a (diff)
netpoll: Fix __netpoll_rcu_free so that it can hold the rtnl lock
__netpoll_rcu_free is used to free netpoll structures when the rtnl_lock is already held. The mechanism is used to asynchronously call __netpoll_cleanup outside of the holding of the rtnl_lock, so as to avoid deadlock. Unfortunately, __netpoll_cleanup modifies pointers (dev->np), which means the rtnl_lock must be held while calling it. Further, it cannot be held, because rcu callbacks may be issued in softirq contexts, which cannot sleep. Fix this by converting the rcu callback to a work queue that is guaranteed to get scheduled in process context, so that we can hold the rtnl properly while calling __netpoll_cleanup Tested successfully by myself. Signed-off-by: Neil Horman <nhorman@tuxdriver.com> CC: "David S. Miller" <davem@davemloft.net> CC: Cong Wang <amwang@redhat.com> CC: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--include/linux/netpoll.h4
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/core/netpoll.c16
5 files changed, 15 insertions, 11 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 22399374b1e1..94c1534dd578 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1249,7 +1249,7 @@ static inline void slave_disable_netpoll(struct slave *slave)
1249 return; 1249 return;
1250 1250
1251 slave->np = NULL; 1251 slave->np = NULL;
1252 __netpoll_free_rcu(np); 1252 __netpoll_free_async(np);
1253} 1253}
1254static inline bool slave_dev_support_netpoll(struct net_device *slave_dev) 1254static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
1255{ 1255{
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index ab856d507b7e..9d7d8c64f7c8 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -32,7 +32,7 @@ struct netpoll {
32 u8 remote_mac[ETH_ALEN]; 32 u8 remote_mac[ETH_ALEN];
33 33
34 struct list_head rx; /* rx_np list element */ 34 struct list_head rx; /* rx_np list element */
35 struct rcu_head rcu; 35 struct work_struct cleanup_work;
36}; 36};
37 37
38struct netpoll_info { 38struct netpoll_info {
@@ -68,7 +68,7 @@ int netpoll_setup(struct netpoll *np);
68int netpoll_trap(void); 68int netpoll_trap(void);
69void netpoll_set_trap(int trap); 69void netpoll_set_trap(int trap);
70void __netpoll_cleanup(struct netpoll *np); 70void __netpoll_cleanup(struct netpoll *np);
71void __netpoll_free_rcu(struct netpoll *np); 71void __netpoll_free_async(struct netpoll *np);
72void netpoll_cleanup(struct netpoll *np); 72void netpoll_cleanup(struct netpoll *np);
73int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo); 73int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
74void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, 74void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 34df5b3c9b75..19cf81bf9f69 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -733,7 +733,7 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
733 733
734 vlan->netpoll = NULL; 734 vlan->netpoll = NULL;
735 735
736 __netpoll_free_rcu(netpoll); 736 __netpoll_free_async(netpoll);
737} 737}
738#endif /* CONFIG_NET_POLL_CONTROLLER */ 738#endif /* CONFIG_NET_POLL_CONTROLLER */
739 739
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ba6fb2d60940..ca98fa5b2c78 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -265,7 +265,7 @@ void br_netpoll_disable(struct net_bridge_port *p)
265 265
266 p->np = NULL; 266 p->np = NULL;
267 267
268 __netpoll_free_rcu(np); 268 __netpoll_free_async(np);
269} 269}
270 270
271#endif 271#endif
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index edcd9ad95304..c536474e2260 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -61,6 +61,7 @@ static struct srcu_struct netpoll_srcu;
61 61
62static void zap_completion_queue(void); 62static void zap_completion_queue(void);
63static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo); 63static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
64static void netpoll_async_cleanup(struct work_struct *work);
64 65
65static unsigned int carrier_timeout = 4; 66static unsigned int carrier_timeout = 4;
66module_param(carrier_timeout, uint, 0644); 67module_param(carrier_timeout, uint, 0644);
@@ -1020,6 +1021,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
1020 1021
1021 np->dev = ndev; 1022 np->dev = ndev;
1022 strlcpy(np->dev_name, ndev->name, IFNAMSIZ); 1023 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
1024 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
1023 1025
1024 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || 1026 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
1025 !ndev->netdev_ops->ndo_poll_controller) { 1027 !ndev->netdev_ops->ndo_poll_controller) {
@@ -1255,25 +1257,27 @@ void __netpoll_cleanup(struct netpoll *np)
1255 if (ops->ndo_netpoll_cleanup) 1257 if (ops->ndo_netpoll_cleanup)
1256 ops->ndo_netpoll_cleanup(np->dev); 1258 ops->ndo_netpoll_cleanup(np->dev);
1257 1259
1258 RCU_INIT_POINTER(np->dev->npinfo, NULL); 1260 rcu_assign_pointer(np->dev->npinfo, NULL);
1259 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); 1261 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
1260 } 1262 }
1261} 1263}
1262EXPORT_SYMBOL_GPL(__netpoll_cleanup); 1264EXPORT_SYMBOL_GPL(__netpoll_cleanup);
1263 1265
1264static void rcu_cleanup_netpoll(struct rcu_head *rcu_head) 1266static void netpoll_async_cleanup(struct work_struct *work)
1265{ 1267{
1266 struct netpoll *np = container_of(rcu_head, struct netpoll, rcu); 1268 struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
1267 1269
1270 rtnl_lock();
1268 __netpoll_cleanup(np); 1271 __netpoll_cleanup(np);
1272 rtnl_unlock();
1269 kfree(np); 1273 kfree(np);
1270} 1274}
1271 1275
1272void __netpoll_free_rcu(struct netpoll *np) 1276void __netpoll_free_async(struct netpoll *np)
1273{ 1277{
1274 call_rcu_bh(&np->rcu, rcu_cleanup_netpoll); 1278 schedule_work(&np->cleanup_work);
1275} 1279}
1276EXPORT_SYMBOL_GPL(__netpoll_free_rcu); 1280EXPORT_SYMBOL_GPL(__netpoll_free_async);
1277 1281
1278void netpoll_cleanup(struct netpoll *np) 1282void netpoll_cleanup(struct netpoll *np)
1279{ 1283{