diff options
Diffstat (limited to 'net/core/netpoll.c')
-rw-r--r-- | net/core/netpoll.c | 99 |
1 files changed, 69 insertions, 30 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index b4c90e42b443..e4ba3e70c174 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/workqueue.h> | 26 | #include <linux/workqueue.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/export.h> | 28 | #include <linux/export.h> |
29 | #include <linux/if_vlan.h> | ||
29 | #include <net/tcp.h> | 30 | #include <net/tcp.h> |
30 | #include <net/udp.h> | 31 | #include <net/udp.h> |
31 | #include <asm/unaligned.h> | 32 | #include <asm/unaligned.h> |
@@ -54,7 +55,7 @@ static atomic_t trapped; | |||
54 | MAX_UDP_CHUNK) | 55 | MAX_UDP_CHUNK) |
55 | 56 | ||
56 | static void zap_completion_queue(void); | 57 | static void zap_completion_queue(void); |
57 | static void arp_reply(struct sk_buff *skb); | 58 | static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo); |
58 | 59 | ||
59 | static unsigned int carrier_timeout = 4; | 60 | static unsigned int carrier_timeout = 4; |
60 | module_param(carrier_timeout, uint, 0644); | 61 | module_param(carrier_timeout, uint, 0644); |
@@ -170,7 +171,8 @@ static void poll_napi(struct net_device *dev) | |||
170 | list_for_each_entry(napi, &dev->napi_list, dev_list) { | 171 | list_for_each_entry(napi, &dev->napi_list, dev_list) { |
171 | if (napi->poll_owner != smp_processor_id() && | 172 | if (napi->poll_owner != smp_processor_id() && |
172 | spin_trylock(&napi->poll_lock)) { | 173 | spin_trylock(&napi->poll_lock)) { |
173 | budget = poll_one_napi(dev->npinfo, napi, budget); | 174 | budget = poll_one_napi(rcu_dereference_bh(dev->npinfo), |
175 | napi, budget); | ||
174 | spin_unlock(&napi->poll_lock); | 176 | spin_unlock(&napi->poll_lock); |
175 | 177 | ||
176 | if (!budget) | 178 | if (!budget) |
@@ -185,13 +187,14 @@ static void service_arp_queue(struct netpoll_info *npi) | |||
185 | struct sk_buff *skb; | 187 | struct sk_buff *skb; |
186 | 188 | ||
187 | while ((skb = skb_dequeue(&npi->arp_tx))) | 189 | while ((skb = skb_dequeue(&npi->arp_tx))) |
188 | arp_reply(skb); | 190 | netpoll_arp_reply(skb, npi); |
189 | } | 191 | } |
190 | } | 192 | } |
191 | 193 | ||
192 | static void netpoll_poll_dev(struct net_device *dev) | 194 | static void netpoll_poll_dev(struct net_device *dev) |
193 | { | 195 | { |
194 | const struct net_device_ops *ops; | 196 | const struct net_device_ops *ops; |
197 | struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); | ||
195 | 198 | ||
196 | if (!dev || !netif_running(dev)) | 199 | if (!dev || !netif_running(dev)) |
197 | return; | 200 | return; |
@@ -206,17 +209,18 @@ static void netpoll_poll_dev(struct net_device *dev) | |||
206 | poll_napi(dev); | 209 | poll_napi(dev); |
207 | 210 | ||
208 | if (dev->flags & IFF_SLAVE) { | 211 | if (dev->flags & IFF_SLAVE) { |
209 | if (dev->npinfo) { | 212 | if (ni) { |
210 | struct net_device *bond_dev = dev->master; | 213 | struct net_device *bond_dev = dev->master; |
211 | struct sk_buff *skb; | 214 | struct sk_buff *skb; |
212 | while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) { | 215 | struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo); |
216 | while ((skb = skb_dequeue(&ni->arp_tx))) { | ||
213 | skb->dev = bond_dev; | 217 | skb->dev = bond_dev; |
214 | skb_queue_tail(&bond_dev->npinfo->arp_tx, skb); | 218 | skb_queue_tail(&bond_ni->arp_tx, skb); |
215 | } | 219 | } |
216 | } | 220 | } |
217 | } | 221 | } |
218 | 222 | ||
219 | service_arp_queue(dev->npinfo); | 223 | service_arp_queue(ni); |
220 | 224 | ||
221 | zap_completion_queue(); | 225 | zap_completion_queue(); |
222 | } | 226 | } |
@@ -302,6 +306,7 @@ static int netpoll_owner_active(struct net_device *dev) | |||
302 | return 0; | 306 | return 0; |
303 | } | 307 | } |
304 | 308 | ||
309 | /* call with IRQ disabled */ | ||
305 | void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | 310 | void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, |
306 | struct net_device *dev) | 311 | struct net_device *dev) |
307 | { | 312 | { |
@@ -309,8 +314,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
309 | unsigned long tries; | 314 | unsigned long tries; |
310 | const struct net_device_ops *ops = dev->netdev_ops; | 315 | const struct net_device_ops *ops = dev->netdev_ops; |
311 | /* It is up to the caller to keep npinfo alive. */ | 316 | /* It is up to the caller to keep npinfo alive. */ |
312 | struct netpoll_info *npinfo = np->dev->npinfo; | 317 | struct netpoll_info *npinfo; |
318 | |||
319 | WARN_ON_ONCE(!irqs_disabled()); | ||
313 | 320 | ||
321 | npinfo = rcu_dereference_bh(np->dev->npinfo); | ||
314 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { | 322 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { |
315 | __kfree_skb(skb); | 323 | __kfree_skb(skb); |
316 | return; | 324 | return; |
@@ -319,16 +327,22 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
319 | /* don't get messages out of order, and no recursion */ | 327 | /* don't get messages out of order, and no recursion */ |
320 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { | 328 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { |
321 | struct netdev_queue *txq; | 329 | struct netdev_queue *txq; |
322 | unsigned long flags; | ||
323 | 330 | ||
324 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 331 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
325 | 332 | ||
326 | local_irq_save(flags); | ||
327 | /* try until next clock tick */ | 333 | /* try until next clock tick */ |
328 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; | 334 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; |
329 | tries > 0; --tries) { | 335 | tries > 0; --tries) { |
330 | if (__netif_tx_trylock(txq)) { | 336 | if (__netif_tx_trylock(txq)) { |
331 | if (!netif_xmit_stopped(txq)) { | 337 | if (!netif_xmit_stopped(txq)) { |
338 | if (vlan_tx_tag_present(skb) && | ||
339 | !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) { | ||
340 | skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); | ||
341 | if (unlikely(!skb)) | ||
342 | break; | ||
343 | skb->vlan_tci = 0; | ||
344 | } | ||
345 | |||
332 | status = ops->ndo_start_xmit(skb, dev); | 346 | status = ops->ndo_start_xmit(skb, dev); |
333 | if (status == NETDEV_TX_OK) | 347 | if (status == NETDEV_TX_OK) |
334 | txq_trans_update(txq); | 348 | txq_trans_update(txq); |
@@ -347,10 +361,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
347 | } | 361 | } |
348 | 362 | ||
349 | WARN_ONCE(!irqs_disabled(), | 363 | WARN_ONCE(!irqs_disabled(), |
350 | "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n", | 364 | "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n", |
351 | dev->name, ops->ndo_start_xmit); | 365 | dev->name, ops->ndo_start_xmit); |
352 | 366 | ||
353 | local_irq_restore(flags); | ||
354 | } | 367 | } |
355 | 368 | ||
356 | if (status != NETDEV_TX_OK) { | 369 | if (status != NETDEV_TX_OK) { |
@@ -423,9 +436,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) | |||
423 | } | 436 | } |
424 | EXPORT_SYMBOL(netpoll_send_udp); | 437 | EXPORT_SYMBOL(netpoll_send_udp); |
425 | 438 | ||
426 | static void arp_reply(struct sk_buff *skb) | 439 | static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo) |
427 | { | 440 | { |
428 | struct netpoll_info *npinfo = skb->dev->npinfo; | ||
429 | struct arphdr *arp; | 441 | struct arphdr *arp; |
430 | unsigned char *arp_ptr; | 442 | unsigned char *arp_ptr; |
431 | int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; | 443 | int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; |
@@ -543,13 +555,12 @@ static void arp_reply(struct sk_buff *skb) | |||
543 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 555 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
544 | } | 556 | } |
545 | 557 | ||
546 | int __netpoll_rx(struct sk_buff *skb) | 558 | int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) |
547 | { | 559 | { |
548 | int proto, len, ulen; | 560 | int proto, len, ulen; |
549 | int hits = 0; | 561 | int hits = 0; |
550 | const struct iphdr *iph; | 562 | const struct iphdr *iph; |
551 | struct udphdr *uh; | 563 | struct udphdr *uh; |
552 | struct netpoll_info *npinfo = skb->dev->npinfo; | ||
553 | struct netpoll *np, *tmp; | 564 | struct netpoll *np, *tmp; |
554 | 565 | ||
555 | if (list_empty(&npinfo->rx_np)) | 566 | if (list_empty(&npinfo->rx_np)) |
@@ -565,6 +576,12 @@ int __netpoll_rx(struct sk_buff *skb) | |||
565 | return 1; | 576 | return 1; |
566 | } | 577 | } |
567 | 578 | ||
579 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { | ||
580 | skb = vlan_untag(skb); | ||
581 | if (unlikely(!skb)) | ||
582 | goto out; | ||
583 | } | ||
584 | |||
568 | proto = ntohs(eth_hdr(skb)->h_proto); | 585 | proto = ntohs(eth_hdr(skb)->h_proto); |
569 | if (proto != ETH_P_IP) | 586 | if (proto != ETH_P_IP) |
570 | goto out; | 587 | goto out; |
@@ -715,7 +732,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
715 | } | 732 | } |
716 | EXPORT_SYMBOL(netpoll_parse_options); | 733 | EXPORT_SYMBOL(netpoll_parse_options); |
717 | 734 | ||
718 | int __netpoll_setup(struct netpoll *np, struct net_device *ndev) | 735 | int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) |
719 | { | 736 | { |
720 | struct netpoll_info *npinfo; | 737 | struct netpoll_info *npinfo; |
721 | const struct net_device_ops *ops; | 738 | const struct net_device_ops *ops; |
@@ -734,7 +751,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) | |||
734 | } | 751 | } |
735 | 752 | ||
736 | if (!ndev->npinfo) { | 753 | if (!ndev->npinfo) { |
737 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); | 754 | npinfo = kmalloc(sizeof(*npinfo), gfp); |
738 | if (!npinfo) { | 755 | if (!npinfo) { |
739 | err = -ENOMEM; | 756 | err = -ENOMEM; |
740 | goto out; | 757 | goto out; |
@@ -752,7 +769,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) | |||
752 | 769 | ||
753 | ops = np->dev->netdev_ops; | 770 | ops = np->dev->netdev_ops; |
754 | if (ops->ndo_netpoll_setup) { | 771 | if (ops->ndo_netpoll_setup) { |
755 | err = ops->ndo_netpoll_setup(ndev, npinfo); | 772 | err = ops->ndo_netpoll_setup(ndev, npinfo, gfp); |
756 | if (err) | 773 | if (err) |
757 | goto free_npinfo; | 774 | goto free_npinfo; |
758 | } | 775 | } |
@@ -857,7 +874,7 @@ int netpoll_setup(struct netpoll *np) | |||
857 | refill_skbs(); | 874 | refill_skbs(); |
858 | 875 | ||
859 | rtnl_lock(); | 876 | rtnl_lock(); |
860 | err = __netpoll_setup(np, ndev); | 877 | err = __netpoll_setup(np, ndev, GFP_KERNEL); |
861 | rtnl_unlock(); | 878 | rtnl_unlock(); |
862 | 879 | ||
863 | if (err) | 880 | if (err) |
@@ -878,6 +895,24 @@ static int __init netpoll_init(void) | |||
878 | } | 895 | } |
879 | core_initcall(netpoll_init); | 896 | core_initcall(netpoll_init); |
880 | 897 | ||
898 | static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) | ||
899 | { | ||
900 | struct netpoll_info *npinfo = | ||
901 | container_of(rcu_head, struct netpoll_info, rcu); | ||
902 | |||
903 | skb_queue_purge(&npinfo->arp_tx); | ||
904 | skb_queue_purge(&npinfo->txq); | ||
905 | |||
906 | /* we can't call cancel_delayed_work_sync here, as we are in softirq */ | ||
907 | cancel_delayed_work(&npinfo->tx_work); | ||
908 | |||
909 | /* clean after last, unfinished work */ | ||
910 | __skb_queue_purge(&npinfo->txq); | ||
911 | /* now cancel it again */ | ||
912 | cancel_delayed_work(&npinfo->tx_work); | ||
913 | kfree(npinfo); | ||
914 | } | ||
915 | |||
881 | void __netpoll_cleanup(struct netpoll *np) | 916 | void __netpoll_cleanup(struct netpoll *np) |
882 | { | 917 | { |
883 | struct netpoll_info *npinfo; | 918 | struct netpoll_info *npinfo; |
@@ -903,20 +938,24 @@ void __netpoll_cleanup(struct netpoll *np) | |||
903 | ops->ndo_netpoll_cleanup(np->dev); | 938 | ops->ndo_netpoll_cleanup(np->dev); |
904 | 939 | ||
905 | RCU_INIT_POINTER(np->dev->npinfo, NULL); | 940 | RCU_INIT_POINTER(np->dev->npinfo, NULL); |
941 | call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); | ||
942 | } | ||
943 | } | ||
944 | EXPORT_SYMBOL_GPL(__netpoll_cleanup); | ||
906 | 945 | ||
907 | /* avoid racing with NAPI reading npinfo */ | 946 | static void rcu_cleanup_netpoll(struct rcu_head *rcu_head) |
908 | synchronize_rcu_bh(); | 947 | { |
948 | struct netpoll *np = container_of(rcu_head, struct netpoll, rcu); | ||
909 | 949 | ||
910 | skb_queue_purge(&npinfo->arp_tx); | 950 | __netpoll_cleanup(np); |
911 | skb_queue_purge(&npinfo->txq); | 951 | kfree(np); |
912 | cancel_delayed_work_sync(&npinfo->tx_work); | 952 | } |
913 | 953 | ||
914 | /* clean after last, unfinished work */ | 954 | void __netpoll_free_rcu(struct netpoll *np) |
915 | __skb_queue_purge(&npinfo->txq); | 955 | { |
916 | kfree(npinfo); | 956 | call_rcu_bh(&np->rcu, rcu_cleanup_netpoll); |
917 | } | ||
918 | } | 957 | } |
919 | EXPORT_SYMBOL_GPL(__netpoll_cleanup); | 958 | EXPORT_SYMBOL_GPL(__netpoll_free_rcu); |
920 | 959 | ||
921 | void netpoll_cleanup(struct netpoll *np) | 960 | void netpoll_cleanup(struct netpoll *np) |
922 | { | 961 | { |