aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/netpoll.c
diff options
context:
space:
mode:
authorAmerigo Wang <amwang@redhat.com>2012-08-09 21:24:38 -0400
committerDavid S. Miller <davem@davemloft.net>2012-08-14 17:33:30 -0400
commit38e6bc185d9544dfad1774b3f8902a0b061aea25 (patch)
tree5dc41a28f9dc48095c998dfc0dcf8b970db6e1b5 /net/core/netpoll.c
parent47be03a28cc6c80e3aa2b3e8ed6d960ff0c5c0af (diff)
netpoll: make __netpoll_cleanup non-block
Like the previous patch, slave_disable_netpoll() and __netpoll_cleanup() may be called with read_lock() held too, so we should make them non-block, by moving the cleanup and kfree() to call_rcu_bh() callbacks. Cc: "David S. Miller" <davem@davemloft.net> Signed-off-by: Cong Wang <amwang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/netpoll.c')
-rw-r--r--net/core/netpoll.c42
1 files changed, 32 insertions, 10 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 37cc854774a4..dc17f1db1479 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -878,6 +878,24 @@ static int __init netpoll_init(void)
878} 878}
879core_initcall(netpoll_init); 879core_initcall(netpoll_init);
880 880
881static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
882{
883 struct netpoll_info *npinfo =
884 container_of(rcu_head, struct netpoll_info, rcu);
885
886 skb_queue_purge(&npinfo->arp_tx);
887 skb_queue_purge(&npinfo->txq);
888
889 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
890 cancel_delayed_work(&npinfo->tx_work);
891
892 /* clean after last, unfinished work */
893 __skb_queue_purge(&npinfo->txq);
894 /* now cancel it again */
895 cancel_delayed_work(&npinfo->tx_work);
896 kfree(npinfo);
897}
898
881void __netpoll_cleanup(struct netpoll *np) 899void __netpoll_cleanup(struct netpoll *np)
882{ 900{
883 struct netpoll_info *npinfo; 901 struct netpoll_info *npinfo;
@@ -903,20 +921,24 @@ void __netpoll_cleanup(struct netpoll *np)
903 ops->ndo_netpoll_cleanup(np->dev); 921 ops->ndo_netpoll_cleanup(np->dev);
904 922
905 RCU_INIT_POINTER(np->dev->npinfo, NULL); 923 RCU_INIT_POINTER(np->dev->npinfo, NULL);
924 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
925 }
926}
927EXPORT_SYMBOL_GPL(__netpoll_cleanup);
906 928
907 /* avoid racing with NAPI reading npinfo */ 929static void rcu_cleanup_netpoll(struct rcu_head *rcu_head)
908 synchronize_rcu_bh(); 930{
931 struct netpoll *np = container_of(rcu_head, struct netpoll, rcu);
909 932
910 skb_queue_purge(&npinfo->arp_tx); 933 __netpoll_cleanup(np);
911 skb_queue_purge(&npinfo->txq); 934 kfree(np);
912 cancel_delayed_work_sync(&npinfo->tx_work); 935}
913 936
914 /* clean after last, unfinished work */ 937void __netpoll_free_rcu(struct netpoll *np)
915 __skb_queue_purge(&npinfo->txq); 938{
916 kfree(npinfo); 939 call_rcu_bh(&np->rcu, rcu_cleanup_netpoll);
917 }
918} 940}
919EXPORT_SYMBOL_GPL(__netpoll_cleanup); 941EXPORT_SYMBOL_GPL(__netpoll_free_rcu);
920 942
921void netpoll_cleanup(struct netpoll *np) 943void netpoll_cleanup(struct netpoll *np)
922{ 944{