diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2010-06-10 12:12:44 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-06-15 13:58:38 -0400 |
commit | de85d99eb7b595f6751550184b94c1e2f74a828b (patch) | |
tree | 950eea4329bfb2b5d94f2e242a86c36cf1fd49fc /include | |
parent | 36655042f9873efc2a90d251b9aef9b6b79d75d8 (diff) |
netpoll: Fix RCU usage
The use of RCU in netpoll is incorrect in a number of places:
1) The initial setting is lacking a write barrier.
2) The synchronize_rcu is in the wrong place.
3) Read barriers are missing.
4) Some places are even missing rcu_read_lock.
5) npinfo is zeroed after freeing.
This patch fixes those issues. As most users are in BH context,
this also converts the RCU usage to the BH variant.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/netpoll.h | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index e9e231215865..95c9f7e16776 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h | |||
@@ -57,12 +57,15 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); | |||
57 | #ifdef CONFIG_NETPOLL | 57 | #ifdef CONFIG_NETPOLL |
58 | static inline bool netpoll_rx(struct sk_buff *skb) | 58 | static inline bool netpoll_rx(struct sk_buff *skb) |
59 | { | 59 | { |
60 | struct netpoll_info *npinfo = skb->dev->npinfo; | 60 | struct netpoll_info *npinfo; |
61 | unsigned long flags; | 61 | unsigned long flags; |
62 | bool ret = false; | 62 | bool ret = false; |
63 | 63 | ||
64 | rcu_read_lock_bh(); | ||
65 | npinfo = rcu_dereference(skb->dev->npinfo); | ||
66 | |||
64 | if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) | 67 | if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) |
65 | return false; | 68 | goto out; |
66 | 69 | ||
67 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 70 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
68 | /* check rx_flags again with the lock held */ | 71 | /* check rx_flags again with the lock held */ |
@@ -70,12 +73,14 @@ static inline bool netpoll_rx(struct sk_buff *skb) | |||
70 | ret = true; | 73 | ret = true; |
71 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 74 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
72 | 75 | ||
76 | out: | ||
77 | rcu_read_unlock_bh(); | ||
73 | return ret; | 78 | return ret; |
74 | } | 79 | } |
75 | 80 | ||
76 | static inline int netpoll_rx_on(struct sk_buff *skb) | 81 | static inline int netpoll_rx_on(struct sk_buff *skb) |
77 | { | 82 | { |
78 | struct netpoll_info *npinfo = skb->dev->npinfo; | 83 | struct netpoll_info *npinfo = rcu_dereference(skb->dev->npinfo); |
79 | 84 | ||
80 | return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); | 85 | return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); |
81 | } | 86 | } |
@@ -91,7 +96,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) | |||
91 | { | 96 | { |
92 | struct net_device *dev = napi->dev; | 97 | struct net_device *dev = napi->dev; |
93 | 98 | ||
94 | rcu_read_lock(); /* deal with race on ->npinfo */ | ||
95 | if (dev && dev->npinfo) { | 99 | if (dev && dev->npinfo) { |
96 | spin_lock(&napi->poll_lock); | 100 | spin_lock(&napi->poll_lock); |
97 | napi->poll_owner = smp_processor_id(); | 101 | napi->poll_owner = smp_processor_id(); |
@@ -108,7 +112,6 @@ static inline void netpoll_poll_unlock(void *have) | |||
108 | napi->poll_owner = -1; | 112 | napi->poll_owner = -1; |
109 | spin_unlock(&napi->poll_lock); | 113 | spin_unlock(&napi->poll_lock); |
110 | } | 114 | } |
111 | rcu_read_unlock(); | ||
112 | } | 115 | } |
113 | 116 | ||
114 | #else | 117 | #else |