diff options
author | Amerigo Wang <amwang@redhat.com> | 2012-08-09 21:24:38 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-08-14 17:33:30 -0400 |
commit | 38e6bc185d9544dfad1774b3f8902a0b061aea25 (patch) | |
tree | 5dc41a28f9dc48095c998dfc0dcf8b970db6e1b5 /net | |
parent | 47be03a28cc6c80e3aa2b3e8ed6d960ff0c5c0af (diff) |
netpoll: make __netpoll_cleanup non-block
Like the previous patch, slave_disable_netpoll() and __netpoll_cleanup()
may be called with read_lock() held too, so we should make them
non-block, by moving the cleanup and kfree() to call_rcu_bh() callbacks.
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Cong Wang <amwang@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/8021q/vlan_dev.c | 6 | ||||
-rw-r--r-- | net/bridge/br_device.c | 6 | ||||
-rw-r--r-- | net/core/netpoll.c | 42 |
3 files changed, 34 insertions, 20 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index ee4ae0944cef..b65623f90660 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -704,11 +704,7 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev) | |||
704 | 704 | ||
705 | info->netpoll = NULL; | 705 | info->netpoll = NULL; |
706 | 706 | ||
707 | /* Wait for transmitting packets to finish before freeing. */ | 707 | __netpoll_free_rcu(netpoll); |
708 | synchronize_rcu_bh(); | ||
709 | |||
710 | __netpoll_cleanup(netpoll); | ||
711 | kfree(netpoll); | ||
712 | } | 708 | } |
713 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | 709 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
714 | 710 | ||
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index ed0e0f9dc788..f41ba4048c9a 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -267,11 +267,7 @@ void br_netpoll_disable(struct net_bridge_port *p) | |||
267 | 267 | ||
268 | p->np = NULL; | 268 | p->np = NULL; |
269 | 269 | ||
270 | /* Wait for transmitting packets to finish before freeing. */ | 270 | __netpoll_free_rcu(np); |
271 | synchronize_rcu_bh(); | ||
272 | |||
273 | __netpoll_cleanup(np); | ||
274 | kfree(np); | ||
275 | } | 271 | } |
276 | 272 | ||
277 | #endif | 273 | #endif |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 37cc854774a4..dc17f1db1479 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -878,6 +878,24 @@ static int __init netpoll_init(void) | |||
878 | } | 878 | } |
879 | core_initcall(netpoll_init); | 879 | core_initcall(netpoll_init); |
880 | 880 | ||
881 | static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) | ||
882 | { | ||
883 | struct netpoll_info *npinfo = | ||
884 | container_of(rcu_head, struct netpoll_info, rcu); | ||
885 | |||
886 | skb_queue_purge(&npinfo->arp_tx); | ||
887 | skb_queue_purge(&npinfo->txq); | ||
888 | |||
889 | /* we can't call cancel_delayed_work_sync here, as we are in softirq */ | ||
890 | cancel_delayed_work(&npinfo->tx_work); | ||
891 | |||
892 | /* clean after last, unfinished work */ | ||
893 | __skb_queue_purge(&npinfo->txq); | ||
894 | /* now cancel it again */ | ||
895 | cancel_delayed_work(&npinfo->tx_work); | ||
896 | kfree(npinfo); | ||
897 | } | ||
898 | |||
881 | void __netpoll_cleanup(struct netpoll *np) | 899 | void __netpoll_cleanup(struct netpoll *np) |
882 | { | 900 | { |
883 | struct netpoll_info *npinfo; | 901 | struct netpoll_info *npinfo; |
@@ -903,20 +921,24 @@ void __netpoll_cleanup(struct netpoll *np) | |||
903 | ops->ndo_netpoll_cleanup(np->dev); | 921 | ops->ndo_netpoll_cleanup(np->dev); |
904 | 922 | ||
905 | RCU_INIT_POINTER(np->dev->npinfo, NULL); | 923 | RCU_INIT_POINTER(np->dev->npinfo, NULL); |
924 | call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); | ||
925 | } | ||
926 | } | ||
927 | EXPORT_SYMBOL_GPL(__netpoll_cleanup); | ||
906 | 928 | ||
907 | /* avoid racing with NAPI reading npinfo */ | 929 | static void rcu_cleanup_netpoll(struct rcu_head *rcu_head) |
908 | synchronize_rcu_bh(); | 930 | { |
931 | struct netpoll *np = container_of(rcu_head, struct netpoll, rcu); | ||
909 | 932 | ||
910 | skb_queue_purge(&npinfo->arp_tx); | 933 | __netpoll_cleanup(np); |
911 | skb_queue_purge(&npinfo->txq); | 934 | kfree(np); |
912 | cancel_delayed_work_sync(&npinfo->tx_work); | 935 | } |
913 | 936 | ||
914 | /* clean after last, unfinished work */ | 937 | void __netpoll_free_rcu(struct netpoll *np) |
915 | __skb_queue_purge(&npinfo->txq); | 938 | { |
916 | kfree(npinfo); | 939 | call_rcu_bh(&np->rcu, rcu_cleanup_netpoll); |
917 | } | ||
918 | } | 940 | } |
919 | EXPORT_SYMBOL_GPL(__netpoll_cleanup); | 941 | EXPORT_SYMBOL_GPL(__netpoll_free_rcu); |
920 | 942 | ||
921 | void netpoll_cleanup(struct netpoll *np) | 943 | void netpoll_cleanup(struct netpoll *np) |
922 | { | 944 | { |