aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2014-03-14 23:51:52 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-17 15:48:12 -0400
commit9c62a68d13119a1ca9718381d97b0cb415ff4e9d (patch)
treeaae7c0ffaa451dc44afbbba9c37780e9c12bd1fc /net/core
parente1bd4d3d7dd2a4a0e731ffe07c439927c23f16ea (diff)
netpoll: Remove dead packet receive code (CONFIG_NETPOLL_TRAP)
The netpoll packet receive code only becomes active if the netpoll rx_skb_hook is implemented, and there is not a single implementation of the netpoll rx_skb_hook in the kernel. All of the out of tree implementations I have found all call netpoll_poll which was removed from the kernel in 2011, so this change should not add any additional breakage. There are problems with the netpoll packet receive code. __netpoll_rx does not call dev_kfree_skb_irq or dev_kfree_skb_any in hard irq context. netpoll_neigh_reply leaks every skb it receives. Reception of packets does not work successfully on stacked devices (aka bonding, team, bridge, and vlans). Given that the netpoll packet receive code is buggy, there are no out of tree users that will be merged soon, and the code has not been used for in tree for a decade let's just remove it. Reverting this commit can server as a starting point for anyone who wants to resurrect netpoll packet reception support. Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c11
-rw-r--r--net/core/netpoll.c520
2 files changed, 2 insertions, 529 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 587f9fb85d73..55f8e64c03a2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3231,10 +3231,6 @@ static int netif_rx_internal(struct sk_buff *skb)
3231{ 3231{
3232 int ret; 3232 int ret;
3233 3233
3234 /* if netpoll wants it, pretend we never saw it */
3235 if (netpoll_rx(skb))
3236 return NET_RX_DROP;
3237
3238 net_timestamp_check(netdev_tstamp_prequeue, skb); 3234 net_timestamp_check(netdev_tstamp_prequeue, skb);
3239 3235
3240 trace_netif_rx(skb); 3236 trace_netif_rx(skb);
@@ -3520,10 +3516,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3520 3516
3521 trace_netif_receive_skb(skb); 3517 trace_netif_receive_skb(skb);
3522 3518
3523 /* if we've gotten here through NAPI, check netpoll */
3524 if (netpoll_receive_skb(skb))
3525 goto out;
3526
3527 orig_dev = skb->dev; 3519 orig_dev = skb->dev;
3528 3520
3529 skb_reset_network_header(skb); 3521 skb_reset_network_header(skb);
@@ -3650,7 +3642,6 @@ drop:
3650 3642
3651unlock: 3643unlock:
3652 rcu_read_unlock(); 3644 rcu_read_unlock();
3653out:
3654 return ret; 3645 return ret;
3655} 3646}
3656 3647
@@ -3875,7 +3866,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
3875 int same_flow; 3866 int same_flow;
3876 enum gro_result ret; 3867 enum gro_result ret;
3877 3868
3878 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) 3869 if (!(skb->dev->features & NETIF_F_GRO))
3879 goto normal; 3870 goto normal;
3880 3871
3881 if (skb_is_gso(skb) || skb_has_frag_list(skb)) 3872 if (skb_is_gso(skb) || skb_has_frag_list(skb))
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index eed8b1d2d302..7291dde93469 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -46,11 +46,6 @@
46 46
47static struct sk_buff_head skb_pool; 47static struct sk_buff_head skb_pool;
48 48
49#ifdef CONFIG_NETPOLL_TRAP
50static atomic_t trapped;
51static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
52#endif
53
54DEFINE_STATIC_SRCU(netpoll_srcu); 49DEFINE_STATIC_SRCU(netpoll_srcu);
55 50
56#define USEC_PER_POLL 50 51#define USEC_PER_POLL 50
@@ -109,27 +104,6 @@ static void queue_process(struct work_struct *work)
109 } 104 }
110} 105}
111 106
112#ifdef CONFIG_NETPOLL_TRAP
113static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
114 unsigned short ulen, __be32 saddr, __be32 daddr)
115{
116 __wsum psum;
117
118 if (uh->check == 0 || skb_csum_unnecessary(skb))
119 return 0;
120
121 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
122
123 if (skb->ip_summed == CHECKSUM_COMPLETE &&
124 !csum_fold(csum_add(psum, skb->csum)))
125 return 0;
126
127 skb->csum = psum;
128
129 return __skb_checksum_complete(skb);
130}
131#endif /* CONFIG_NETPOLL_TRAP */
132
133/* 107/*
134 * Check whether delayed processing was scheduled for our NIC. If so, 108 * Check whether delayed processing was scheduled for our NIC. If so,
135 * we attempt to grab the poll lock and use ->poll() to pump the card. 109 * we attempt to grab the poll lock and use ->poll() to pump the card.
@@ -140,11 +114,6 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
140 * trylock here and interrupts are already disabled in the softirq 114 * trylock here and interrupts are already disabled in the softirq
141 * case. Further, we test the poll_owner to avoid recursion on UP 115 * case. Further, we test the poll_owner to avoid recursion on UP
142 * systems where the lock doesn't exist. 116 * systems where the lock doesn't exist.
143 *
144 * In cases where there is bi-directional communications, reading only
145 * one message at a time can lead to packets being dropped by the
146 * network adapter, forcing superfluous retries and possibly timeouts.
147 * Thus, we set our budget to greater than 1.
148 */ 117 */
149static int poll_one_napi(struct napi_struct *napi, int budget) 118static int poll_one_napi(struct napi_struct *napi, int budget)
150{ 119{
@@ -181,38 +150,11 @@ static void poll_napi(struct net_device *dev, int budget)
181 } 150 }
182} 151}
183 152
184#ifdef CONFIG_NETPOLL_TRAP
185static void service_neigh_queue(struct net_device *dev,
186 struct netpoll_info *npi)
187{
188 struct sk_buff *skb;
189 if (dev->flags & IFF_SLAVE) {
190 struct net_device *bond_dev;
191 struct netpoll_info *bond_ni;
192
193 bond_dev = netdev_master_upper_dev_get_rcu(dev);
194 bond_ni = rcu_dereference_bh(bond_dev->npinfo);
195 while ((skb = skb_dequeue(&npi->neigh_tx))) {
196 skb->dev = bond_dev;
197 skb_queue_tail(&bond_ni->neigh_tx, skb);
198 }
199 }
200 while ((skb = skb_dequeue(&npi->neigh_tx)))
201 netpoll_neigh_reply(skb, npi);
202}
203#else /* !CONFIG_NETPOLL_TRAP */
204static inline void service_neigh_queue(struct net_device *dev,
205 struct netpoll_info *npi)
206{
207}
208#endif /* CONFIG_NETPOLL_TRAP */
209
210static void netpoll_poll_dev(struct net_device *dev) 153static void netpoll_poll_dev(struct net_device *dev)
211{ 154{
212 const struct net_device_ops *ops; 155 const struct net_device_ops *ops;
213 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); 156 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
214 bool rx_processing = netpoll_rx_processing(ni); 157 int budget = 0;
215 int budget = rx_processing? 16 : 0;
216 158
217 /* Don't do any rx activity if the dev_lock mutex is held 159 /* Don't do any rx activity if the dev_lock mutex is held
218 * the dev_open/close paths use this to block netpoll activity 160 * the dev_open/close paths use this to block netpoll activity
@@ -226,9 +168,6 @@ static void netpoll_poll_dev(struct net_device *dev)
226 return; 168 return;
227 } 169 }
228 170
229 if (rx_processing)
230 netpoll_set_trap(1);
231
232 ops = dev->netdev_ops; 171 ops = dev->netdev_ops;
233 if (!ops->ndo_poll_controller) { 172 if (!ops->ndo_poll_controller) {
234 up(&ni->dev_lock); 173 up(&ni->dev_lock);
@@ -240,13 +179,8 @@ static void netpoll_poll_dev(struct net_device *dev)
240 179
241 poll_napi(dev, budget); 180 poll_napi(dev, budget);
242 181
243 if (rx_processing)
244 netpoll_set_trap(0);
245
246 up(&ni->dev_lock); 182 up(&ni->dev_lock);
247 183
248 service_neigh_queue(dev, ni);
249
250 zap_completion_queue(); 184 zap_completion_queue();
251} 185}
252 186
@@ -531,434 +465,6 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
531} 465}
532EXPORT_SYMBOL(netpoll_send_udp); 466EXPORT_SYMBOL(netpoll_send_udp);
533 467
534#ifdef CONFIG_NETPOLL_TRAP
535static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
536{
537 int size, type = ARPOP_REPLY;
538 __be32 sip, tip;
539 unsigned char *sha;
540 struct sk_buff *send_skb;
541 struct netpoll *np, *tmp;
542 unsigned long flags;
543 int hlen, tlen;
544 int hits = 0, proto;
545
546 if (!netpoll_rx_processing(npinfo))
547 return;
548
549 /* Before checking the packet, we do some early
550 inspection whether this is interesting at all */
551 spin_lock_irqsave(&npinfo->rx_lock, flags);
552 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
553 if (np->dev == skb->dev)
554 hits++;
555 }
556 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
557
558 /* No netpoll struct is using this dev */
559 if (!hits)
560 return;
561
562 proto = ntohs(eth_hdr(skb)->h_proto);
563 if (proto == ETH_P_ARP) {
564 struct arphdr *arp;
565 unsigned char *arp_ptr;
566 /* No arp on this interface */
567 if (skb->dev->flags & IFF_NOARP)
568 return;
569
570 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
571 return;
572
573 skb_reset_network_header(skb);
574 skb_reset_transport_header(skb);
575 arp = arp_hdr(skb);
576
577 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
578 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
579 arp->ar_pro != htons(ETH_P_IP) ||
580 arp->ar_op != htons(ARPOP_REQUEST))
581 return;
582
583 arp_ptr = (unsigned char *)(arp+1);
584 /* save the location of the src hw addr */
585 sha = arp_ptr;
586 arp_ptr += skb->dev->addr_len;
587 memcpy(&sip, arp_ptr, 4);
588 arp_ptr += 4;
589 /* If we actually cared about dst hw addr,
590 it would get copied here */
591 arp_ptr += skb->dev->addr_len;
592 memcpy(&tip, arp_ptr, 4);
593
594 /* Should we ignore arp? */
595 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
596 return;
597
598 size = arp_hdr_len(skb->dev);
599
600 spin_lock_irqsave(&npinfo->rx_lock, flags);
601 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
602 if (tip != np->local_ip.ip)
603 continue;
604
605 hlen = LL_RESERVED_SPACE(np->dev);
606 tlen = np->dev->needed_tailroom;
607 send_skb = find_skb(np, size + hlen + tlen, hlen);
608 if (!send_skb)
609 continue;
610
611 skb_reset_network_header(send_skb);
612 arp = (struct arphdr *) skb_put(send_skb, size);
613 send_skb->dev = skb->dev;
614 send_skb->protocol = htons(ETH_P_ARP);
615
616 /* Fill the device header for the ARP frame */
617 if (dev_hard_header(send_skb, skb->dev, ETH_P_ARP,
618 sha, np->dev->dev_addr,
619 send_skb->len) < 0) {
620 kfree_skb(send_skb);
621 continue;
622 }
623
624 /*
625 * Fill out the arp protocol part.
626 *
627 * we only support ethernet device type,
628 * which (according to RFC 1390) should
629 * always equal 1 (Ethernet).
630 */
631
632 arp->ar_hrd = htons(np->dev->type);
633 arp->ar_pro = htons(ETH_P_IP);
634 arp->ar_hln = np->dev->addr_len;
635 arp->ar_pln = 4;
636 arp->ar_op = htons(type);
637
638 arp_ptr = (unsigned char *)(arp + 1);
639 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
640 arp_ptr += np->dev->addr_len;
641 memcpy(arp_ptr, &tip, 4);
642 arp_ptr += 4;
643 memcpy(arp_ptr, sha, np->dev->addr_len);
644 arp_ptr += np->dev->addr_len;
645 memcpy(arp_ptr, &sip, 4);
646
647 netpoll_send_skb(np, send_skb);
648
649 /* If there are several rx_skb_hooks for the same
650 * address we're fine by sending a single reply
651 */
652 break;
653 }
654 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
655 } else if( proto == ETH_P_IPV6) {
656#if IS_ENABLED(CONFIG_IPV6)
657 struct nd_msg *msg;
658 u8 *lladdr = NULL;
659 struct ipv6hdr *hdr;
660 struct icmp6hdr *icmp6h;
661 const struct in6_addr *saddr;
662 const struct in6_addr *daddr;
663 struct inet6_dev *in6_dev = NULL;
664 struct in6_addr *target;
665
666 in6_dev = in6_dev_get(skb->dev);
667 if (!in6_dev || !in6_dev->cnf.accept_ra)
668 return;
669
670 if (!pskb_may_pull(skb, skb->len))
671 return;
672
673 msg = (struct nd_msg *)skb_transport_header(skb);
674
675 __skb_push(skb, skb->data - skb_transport_header(skb));
676
677 if (ipv6_hdr(skb)->hop_limit != 255)
678 return;
679 if (msg->icmph.icmp6_code != 0)
680 return;
681 if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
682 return;
683
684 saddr = &ipv6_hdr(skb)->saddr;
685 daddr = &ipv6_hdr(skb)->daddr;
686
687 size = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
688
689 spin_lock_irqsave(&npinfo->rx_lock, flags);
690 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
691 if (!ipv6_addr_equal(daddr, &np->local_ip.in6))
692 continue;
693
694 hlen = LL_RESERVED_SPACE(np->dev);
695 tlen = np->dev->needed_tailroom;
696 send_skb = find_skb(np, size + hlen + tlen, hlen);
697 if (!send_skb)
698 continue;
699
700 send_skb->protocol = htons(ETH_P_IPV6);
701 send_skb->dev = skb->dev;
702
703 skb_reset_network_header(send_skb);
704 hdr = (struct ipv6hdr *) skb_put(send_skb, sizeof(struct ipv6hdr));
705 *(__be32*)hdr = htonl(0x60000000);
706 hdr->payload_len = htons(size);
707 hdr->nexthdr = IPPROTO_ICMPV6;
708 hdr->hop_limit = 255;
709 hdr->saddr = *saddr;
710 hdr->daddr = *daddr;
711
712 icmp6h = (struct icmp6hdr *) skb_put(send_skb, sizeof(struct icmp6hdr));
713 icmp6h->icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
714 icmp6h->icmp6_router = 0;
715 icmp6h->icmp6_solicited = 1;
716
717 target = (struct in6_addr *) skb_put(send_skb, sizeof(struct in6_addr));
718 *target = msg->target;
719 icmp6h->icmp6_cksum = csum_ipv6_magic(saddr, daddr, size,
720 IPPROTO_ICMPV6,
721 csum_partial(icmp6h,
722 size, 0));
723
724 if (dev_hard_header(send_skb, skb->dev, ETH_P_IPV6,
725 lladdr, np->dev->dev_addr,
726 send_skb->len) < 0) {
727 kfree_skb(send_skb);
728 continue;
729 }
730
731 netpoll_send_skb(np, send_skb);
732
733 /* If there are several rx_skb_hooks for the same
734 * address, we're fine by sending a single reply
735 */
736 break;
737 }
738 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
739#endif
740 }
741}
742
743static bool pkt_is_ns(struct sk_buff *skb)
744{
745 struct nd_msg *msg;
746 struct ipv6hdr *hdr;
747
748 if (skb->protocol != htons(ETH_P_ARP))
749 return false;
750 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
751 return false;
752
753 msg = (struct nd_msg *)skb_transport_header(skb);
754 __skb_push(skb, skb->data - skb_transport_header(skb));
755 hdr = ipv6_hdr(skb);
756
757 if (hdr->nexthdr != IPPROTO_ICMPV6)
758 return false;
759 if (hdr->hop_limit != 255)
760 return false;
761 if (msg->icmph.icmp6_code != 0)
762 return false;
763 if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
764 return false;
765
766 return true;
767}
768
769int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
770{
771 int proto, len, ulen, data_len;
772 int hits = 0, offset;
773 const struct iphdr *iph;
774 struct udphdr *uh;
775 struct netpoll *np, *tmp;
776 uint16_t source;
777
778 if (!netpoll_rx_processing(npinfo))
779 goto out;
780
781 if (skb->dev->type != ARPHRD_ETHER)
782 goto out;
783
784 /* check if netpoll clients need ARP */
785 if (skb->protocol == htons(ETH_P_ARP) && netpoll_trap()) {
786 skb_queue_tail(&npinfo->neigh_tx, skb);
787 return 1;
788 } else if (pkt_is_ns(skb) && netpoll_trap()) {
789 skb_queue_tail(&npinfo->neigh_tx, skb);
790 return 1;
791 }
792
793 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
794 skb = vlan_untag(skb);
795 if (unlikely(!skb))
796 goto out;
797 }
798
799 proto = ntohs(eth_hdr(skb)->h_proto);
800 if (proto != ETH_P_IP && proto != ETH_P_IPV6)
801 goto out;
802 if (skb->pkt_type == PACKET_OTHERHOST)
803 goto out;
804 if (skb_shared(skb))
805 goto out;
806
807 if (proto == ETH_P_IP) {
808 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
809 goto out;
810 iph = (struct iphdr *)skb->data;
811 if (iph->ihl < 5 || iph->version != 4)
812 goto out;
813 if (!pskb_may_pull(skb, iph->ihl*4))
814 goto out;
815 iph = (struct iphdr *)skb->data;
816 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
817 goto out;
818
819 len = ntohs(iph->tot_len);
820 if (skb->len < len || len < iph->ihl*4)
821 goto out;
822
823 /*
824 * Our transport medium may have padded the buffer out.
825 * Now We trim to the true length of the frame.
826 */
827 if (pskb_trim_rcsum(skb, len))
828 goto out;
829
830 iph = (struct iphdr *)skb->data;
831 if (iph->protocol != IPPROTO_UDP)
832 goto out;
833
834 len -= iph->ihl*4;
835 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
836 offset = (unsigned char *)(uh + 1) - skb->data;
837 ulen = ntohs(uh->len);
838 data_len = skb->len - offset;
839 source = ntohs(uh->source);
840
841 if (ulen != len)
842 goto out;
843 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
844 goto out;
845 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
846 if (np->local_ip.ip && np->local_ip.ip != iph->daddr)
847 continue;
848 if (np->remote_ip.ip && np->remote_ip.ip != iph->saddr)
849 continue;
850 if (np->local_port && np->local_port != ntohs(uh->dest))
851 continue;
852
853 np->rx_skb_hook(np, source, skb, offset, data_len);
854 hits++;
855 }
856 } else {
857#if IS_ENABLED(CONFIG_IPV6)
858 const struct ipv6hdr *ip6h;
859
860 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
861 goto out;
862 ip6h = (struct ipv6hdr *)skb->data;
863 if (ip6h->version != 6)
864 goto out;
865 len = ntohs(ip6h->payload_len);
866 if (!len)
867 goto out;
868 if (len + sizeof(struct ipv6hdr) > skb->len)
869 goto out;
870 if (pskb_trim_rcsum(skb, len + sizeof(struct ipv6hdr)))
871 goto out;
872 ip6h = ipv6_hdr(skb);
873 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
874 goto out;
875 uh = udp_hdr(skb);
876 offset = (unsigned char *)(uh + 1) - skb->data;
877 ulen = ntohs(uh->len);
878 data_len = skb->len - offset;
879 source = ntohs(uh->source);
880 if (ulen != skb->len)
881 goto out;
882 if (udp6_csum_init(skb, uh, IPPROTO_UDP))
883 goto out;
884 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
885 if (!ipv6_addr_equal(&np->local_ip.in6, &ip6h->daddr))
886 continue;
887 if (!ipv6_addr_equal(&np->remote_ip.in6, &ip6h->saddr))
888 continue;
889 if (np->local_port && np->local_port != ntohs(uh->dest))
890 continue;
891
892 np->rx_skb_hook(np, source, skb, offset, data_len);
893 hits++;
894 }
895#endif
896 }
897
898 if (!hits)
899 goto out;
900
901 kfree_skb(skb);
902 return 1;
903
904out:
905 if (netpoll_trap()) {
906 kfree_skb(skb);
907 return 1;
908 }
909
910 return 0;
911}
912
913static void netpoll_trap_setup_info(struct netpoll_info *npinfo)
914{
915 INIT_LIST_HEAD(&npinfo->rx_np);
916 spin_lock_init(&npinfo->rx_lock);
917 skb_queue_head_init(&npinfo->neigh_tx);
918}
919
920static void netpoll_trap_cleanup_info(struct netpoll_info *npinfo)
921{
922 skb_queue_purge(&npinfo->neigh_tx);
923}
924
925static void netpoll_trap_setup(struct netpoll *np, struct netpoll_info *npinfo)
926{
927 unsigned long flags;
928 if (np->rx_skb_hook) {
929 spin_lock_irqsave(&npinfo->rx_lock, flags);
930 list_add_tail(&np->rx, &npinfo->rx_np);
931 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
932 }
933}
934
935static void netpoll_trap_cleanup(struct netpoll *np, struct netpoll_info *npinfo)
936{
937 unsigned long flags;
938 if (!list_empty(&npinfo->rx_np)) {
939 spin_lock_irqsave(&npinfo->rx_lock, flags);
940 list_del(&np->rx);
941 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
942 }
943}
944
945#else /* !CONFIG_NETPOLL_TRAP */
946static inline void netpoll_trap_setup_info(struct netpoll_info *npinfo)
947{
948}
949static inline void netpoll_trap_cleanup_info(struct netpoll_info *npinfo)
950{
951}
952static inline
953void netpoll_trap_setup(struct netpoll *np, struct netpoll_info *npinfo)
954{
955}
956static inline
957void netpoll_trap_cleanup(struct netpoll *np, struct netpoll_info *npinfo)
958{
959}
960#endif /* CONFIG_NETPOLL_TRAP */
961
962void netpoll_print_options(struct netpoll *np) 468void netpoll_print_options(struct netpoll *np)
963{ 469{
964 np_info(np, "local port %d\n", np->local_port); 470 np_info(np, "local port %d\n", np->local_port);
@@ -1103,8 +609,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
1103 goto out; 609 goto out;
1104 } 610 }
1105 611
1106 netpoll_trap_setup_info(npinfo);
1107
1108 sema_init(&npinfo->dev_lock, 1); 612 sema_init(&npinfo->dev_lock, 1);
1109 skb_queue_head_init(&npinfo->txq); 613 skb_queue_head_init(&npinfo->txq);
1110 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); 614 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
@@ -1124,8 +628,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
1124 628
1125 npinfo->netpoll = np; 629 npinfo->netpoll = np;
1126 630
1127 netpoll_trap_setup(np, npinfo);
1128
1129 /* last thing to do is link it to the net device structure */ 631 /* last thing to do is link it to the net device structure */
1130 rcu_assign_pointer(ndev->npinfo, npinfo); 632 rcu_assign_pointer(ndev->npinfo, npinfo);
1131 633
@@ -1274,7 +776,6 @@ static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
1274 struct netpoll_info *npinfo = 776 struct netpoll_info *npinfo =
1275 container_of(rcu_head, struct netpoll_info, rcu); 777 container_of(rcu_head, struct netpoll_info, rcu);
1276 778
1277 netpoll_trap_cleanup_info(npinfo);
1278 skb_queue_purge(&npinfo->txq); 779 skb_queue_purge(&npinfo->txq);
1279 780
1280 /* we can't call cancel_delayed_work_sync here, as we are in softirq */ 781 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
@@ -1299,8 +800,6 @@ void __netpoll_cleanup(struct netpoll *np)
1299 if (!npinfo) 800 if (!npinfo)
1300 return; 801 return;
1301 802
1302 netpoll_trap_cleanup(np, npinfo);
1303
1304 synchronize_srcu(&netpoll_srcu); 803 synchronize_srcu(&netpoll_srcu);
1305 804
1306 if (atomic_dec_and_test(&npinfo->refcnt)) { 805 if (atomic_dec_and_test(&npinfo->refcnt)) {
@@ -1344,20 +843,3 @@ out:
1344 rtnl_unlock(); 843 rtnl_unlock();
1345} 844}
1346EXPORT_SYMBOL(netpoll_cleanup); 845EXPORT_SYMBOL(netpoll_cleanup);
1347
1348#ifdef CONFIG_NETPOLL_TRAP
1349int netpoll_trap(void)
1350{
1351 return atomic_read(&trapped);
1352}
1353EXPORT_SYMBOL(netpoll_trap);
1354
1355void netpoll_set_trap(int trap)
1356{
1357 if (trap)
1358 atomic_inc(&trapped);
1359 else
1360 atomic_dec(&trapped);
1361}
1362EXPORT_SYMBOL(netpoll_set_trap);
1363#endif /* CONFIG_NETPOLL_TRAP */