aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAmerigo Wang <amwang@redhat.com>2012-08-09 21:24:42 -0400
committerDavid S. Miller <davem@davemloft.net>2012-08-14 17:33:31 -0400
commit2899656b494dcd118123af1126826b115c8ea6f9 (patch)
tree969d2b4362c2237cb38259f909774dc0a00a7e4d
parent91fe4a4b9e490a24f6702dd8afe72d8afab6fcdb (diff)
netpoll: take rcu_read_lock_bh() in netpoll_send_skb_on_dev()
This patch fixes several problems in the call path of netpoll_send_skb_on_dev(): 1. Disable IRQ's before calling netpoll_send_skb_on_dev(). 2. All the callees of netpoll_send_skb_on_dev() should use rcu_dereference_bh() to dereference ->npinfo. 3. Rename arp_reply() to netpoll_arp_reply(), the former is too generic. Cc: "David S. Miller" <davem@davemloft.net> Signed-off-by: Cong Wang <amwang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/netpoll.h3
-rw-r--r--net/core/netpoll.c31
2 files changed, 20 insertions, 14 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 2d178baa49df..61aee86cf21d 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -57,7 +57,10 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
57 struct net_device *dev); 57 struct net_device *dev);
58static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) 58static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
59{ 59{
60 unsigned long flags;
61 local_irq_save(flags);
60 netpoll_send_skb_on_dev(np, skb, np->dev); 62 netpoll_send_skb_on_dev(np, skb, np->dev);
63 local_irq_restore(flags);
61} 64}
62 65
63 66
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index d055bb01328b..174346ac15a0 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -54,7 +54,7 @@ static atomic_t trapped;
54 MAX_UDP_CHUNK) 54 MAX_UDP_CHUNK)
55 55
56static void zap_completion_queue(void); 56static void zap_completion_queue(void);
57static void arp_reply(struct sk_buff *skb); 57static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
58 58
59static unsigned int carrier_timeout = 4; 59static unsigned int carrier_timeout = 4;
60module_param(carrier_timeout, uint, 0644); 60module_param(carrier_timeout, uint, 0644);
@@ -170,7 +170,8 @@ static void poll_napi(struct net_device *dev)
170 list_for_each_entry(napi, &dev->napi_list, dev_list) { 170 list_for_each_entry(napi, &dev->napi_list, dev_list) {
171 if (napi->poll_owner != smp_processor_id() && 171 if (napi->poll_owner != smp_processor_id() &&
172 spin_trylock(&napi->poll_lock)) { 172 spin_trylock(&napi->poll_lock)) {
173 budget = poll_one_napi(dev->npinfo, napi, budget); 173 budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
174 napi, budget);
174 spin_unlock(&napi->poll_lock); 175 spin_unlock(&napi->poll_lock);
175 176
176 if (!budget) 177 if (!budget)
@@ -185,13 +186,14 @@ static void service_arp_queue(struct netpoll_info *npi)
185 struct sk_buff *skb; 186 struct sk_buff *skb;
186 187
187 while ((skb = skb_dequeue(&npi->arp_tx))) 188 while ((skb = skb_dequeue(&npi->arp_tx)))
188 arp_reply(skb); 189 netpoll_arp_reply(skb, npi);
189 } 190 }
190} 191}
191 192
192static void netpoll_poll_dev(struct net_device *dev) 193static void netpoll_poll_dev(struct net_device *dev)
193{ 194{
194 const struct net_device_ops *ops; 195 const struct net_device_ops *ops;
196 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
195 197
196 if (!dev || !netif_running(dev)) 198 if (!dev || !netif_running(dev))
197 return; 199 return;
@@ -206,17 +208,18 @@ static void netpoll_poll_dev(struct net_device *dev)
206 poll_napi(dev); 208 poll_napi(dev);
207 209
208 if (dev->flags & IFF_SLAVE) { 210 if (dev->flags & IFF_SLAVE) {
209 if (dev->npinfo) { 211 if (ni) {
210 struct net_device *bond_dev = dev->master; 212 struct net_device *bond_dev = dev->master;
211 struct sk_buff *skb; 213 struct sk_buff *skb;
212 while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) { 214 struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo);
215 while ((skb = skb_dequeue(&ni->arp_tx))) {
213 skb->dev = bond_dev; 216 skb->dev = bond_dev;
214 skb_queue_tail(&bond_dev->npinfo->arp_tx, skb); 217 skb_queue_tail(&bond_ni->arp_tx, skb);
215 } 218 }
216 } 219 }
217 } 220 }
218 221
219 service_arp_queue(dev->npinfo); 222 service_arp_queue(ni);
220 223
221 zap_completion_queue(); 224 zap_completion_queue();
222} 225}
@@ -302,6 +305,7 @@ static int netpoll_owner_active(struct net_device *dev)
302 return 0; 305 return 0;
303} 306}
304 307
308/* call with IRQ disabled */
305void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, 309void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
306 struct net_device *dev) 310 struct net_device *dev)
307{ 311{
@@ -309,8 +313,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
309 unsigned long tries; 313 unsigned long tries;
310 const struct net_device_ops *ops = dev->netdev_ops; 314 const struct net_device_ops *ops = dev->netdev_ops;
311 /* It is up to the caller to keep npinfo alive. */ 315 /* It is up to the caller to keep npinfo alive. */
312 struct netpoll_info *npinfo = np->dev->npinfo; 316 struct netpoll_info *npinfo;
317
318 WARN_ON_ONCE(!irqs_disabled());
313 319
320 npinfo = rcu_dereference_bh(np->dev->npinfo);
314 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { 321 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
315 __kfree_skb(skb); 322 __kfree_skb(skb);
316 return; 323 return;
@@ -319,11 +326,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
319 /* don't get messages out of order, and no recursion */ 326 /* don't get messages out of order, and no recursion */
320 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { 327 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
321 struct netdev_queue *txq; 328 struct netdev_queue *txq;
322 unsigned long flags;
323 329
324 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 330 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
325 331
326 local_irq_save(flags);
327 /* try until next clock tick */ 332 /* try until next clock tick */
328 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 333 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
329 tries > 0; --tries) { 334 tries > 0; --tries) {
@@ -347,10 +352,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
347 } 352 }
348 353
349 WARN_ONCE(!irqs_disabled(), 354 WARN_ONCE(!irqs_disabled(),
350 "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n", 355 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
351 dev->name, ops->ndo_start_xmit); 356 dev->name, ops->ndo_start_xmit);
352 357
353 local_irq_restore(flags);
354 } 358 }
355 359
356 if (status != NETDEV_TX_OK) { 360 if (status != NETDEV_TX_OK) {
@@ -423,9 +427,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
423} 427}
424EXPORT_SYMBOL(netpoll_send_udp); 428EXPORT_SYMBOL(netpoll_send_udp);
425 429
426static void arp_reply(struct sk_buff *skb) 430static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
427{ 431{
428 struct netpoll_info *npinfo = skb->dev->npinfo;
429 struct arphdr *arp; 432 struct arphdr *arp;
430 unsigned char *arp_ptr; 433 unsigned char *arp_ptr;
431 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; 434 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;