aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2010-06-10 12:12:44 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-15 13:58:38 -0400
commitde85d99eb7b595f6751550184b94c1e2f74a828b (patch)
tree950eea4329bfb2b5d94f2e242a86c36cf1fd49fc
parent36655042f9873efc2a90d251b9aef9b6b79d75d8 (diff)
netpoll: Fix RCU usage
The use of RCU in netpoll is incorrect in a number of places: 1) The initial setting is lacking a write barrier. 2) The synchronize_rcu is in the wrong place. 3) Read barriers are missing. 4) Some places are even missing rcu_read_lock. 5) npinfo is zeroed after freeing. This patch fixes those issues. As most users are in BH context, this also converts the RCU usage to the BH variant. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/netpoll.h13
-rw-r--r--net/core/netpoll.c20
2 files changed, 20 insertions, 13 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index e9e231215865..95c9f7e16776 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -57,12 +57,15 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
57#ifdef CONFIG_NETPOLL 57#ifdef CONFIG_NETPOLL
58static inline bool netpoll_rx(struct sk_buff *skb) 58static inline bool netpoll_rx(struct sk_buff *skb)
59{ 59{
60 struct netpoll_info *npinfo = skb->dev->npinfo; 60 struct netpoll_info *npinfo;
61 unsigned long flags; 61 unsigned long flags;
62 bool ret = false; 62 bool ret = false;
63 63
64 rcu_read_lock_bh();
65 npinfo = rcu_dereference(skb->dev->npinfo);
66
64 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) 67 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
65 return false; 68 goto out;
66 69
67 spin_lock_irqsave(&npinfo->rx_lock, flags); 70 spin_lock_irqsave(&npinfo->rx_lock, flags);
68 /* check rx_flags again with the lock held */ 71 /* check rx_flags again with the lock held */
@@ -70,12 +73,14 @@ static inline bool netpoll_rx(struct sk_buff *skb)
70 ret = true; 73 ret = true;
71 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 74 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
72 75
76out:
77 rcu_read_unlock_bh();
73 return ret; 78 return ret;
74} 79}
75 80
76static inline int netpoll_rx_on(struct sk_buff *skb) 81static inline int netpoll_rx_on(struct sk_buff *skb)
77{ 82{
78 struct netpoll_info *npinfo = skb->dev->npinfo; 83 struct netpoll_info *npinfo = rcu_dereference(skb->dev->npinfo);
79 84
80 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); 85 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
81} 86}
@@ -91,7 +96,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
91{ 96{
92 struct net_device *dev = napi->dev; 97 struct net_device *dev = napi->dev;
93 98
94 rcu_read_lock(); /* deal with race on ->npinfo */
95 if (dev && dev->npinfo) { 99 if (dev && dev->npinfo) {
96 spin_lock(&napi->poll_lock); 100 spin_lock(&napi->poll_lock);
97 napi->poll_owner = smp_processor_id(); 101 napi->poll_owner = smp_processor_id();
@@ -108,7 +112,6 @@ static inline void netpoll_poll_unlock(void *have)
108 napi->poll_owner = -1; 112 napi->poll_owner = -1;
109 spin_unlock(&napi->poll_lock); 113 spin_unlock(&napi->poll_lock);
110 } 114 }
111 rcu_read_unlock();
112} 115}
113 116
114#else 117#else
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 19ff66079f76..e9ab4f0c454c 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -261,6 +261,7 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
261 unsigned long tries; 261 unsigned long tries;
262 struct net_device *dev = np->dev; 262 struct net_device *dev = np->dev;
263 const struct net_device_ops *ops = dev->netdev_ops; 263 const struct net_device_ops *ops = dev->netdev_ops;
264 /* It is up to the caller to keep npinfo alive. */
264 struct netpoll_info *npinfo = np->dev->npinfo; 265 struct netpoll_info *npinfo = np->dev->npinfo;
265 266
266 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { 267 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
@@ -810,10 +811,7 @@ int netpoll_setup(struct netpoll *np)
810 refill_skbs(); 811 refill_skbs();
811 812
812 /* last thing to do is link it to the net device structure */ 813 /* last thing to do is link it to the net device structure */
813 ndev->npinfo = npinfo; 814 rcu_assign_pointer(ndev->npinfo, npinfo);
814
815 /* avoid racing with NAPI reading npinfo */
816 synchronize_rcu();
817 815
818 return 0; 816 return 0;
819 817
@@ -857,6 +855,16 @@ void netpoll_cleanup(struct netpoll *np)
857 855
858 if (atomic_dec_and_test(&npinfo->refcnt)) { 856 if (atomic_dec_and_test(&npinfo->refcnt)) {
859 const struct net_device_ops *ops; 857 const struct net_device_ops *ops;
858
859 ops = np->dev->netdev_ops;
860 if (ops->ndo_netpoll_cleanup)
861 ops->ndo_netpoll_cleanup(np->dev);
862
863 rcu_assign_pointer(np->dev->npinfo, NULL);
864
865 /* avoid racing with NAPI reading npinfo */
866 synchronize_rcu_bh();
867
860 skb_queue_purge(&npinfo->arp_tx); 868 skb_queue_purge(&npinfo->arp_tx);
861 skb_queue_purge(&npinfo->txq); 869 skb_queue_purge(&npinfo->txq);
862 cancel_rearming_delayed_work(&npinfo->tx_work); 870 cancel_rearming_delayed_work(&npinfo->tx_work);
@@ -864,10 +872,6 @@ void netpoll_cleanup(struct netpoll *np)
864 /* clean after last, unfinished work */ 872 /* clean after last, unfinished work */
865 __skb_queue_purge(&npinfo->txq); 873 __skb_queue_purge(&npinfo->txq);
866 kfree(npinfo); 874 kfree(npinfo);
867 ops = np->dev->netdev_ops;
868 if (ops->ndo_netpoll_cleanup)
869 ops->ndo_netpoll_cleanup(np->dev);
870 np->dev->npinfo = NULL;
871 } 875 }
872 } 876 }
873 877