aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/netpoll.h7
-rw-r--r--net/core/netpoll.c12
2 files changed, 12 insertions, 7 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index a0525a1f4715..e3d79593fb3a 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -25,6 +25,7 @@ struct netpoll {
25 25
26struct netpoll_info { 26struct netpoll_info {
27 atomic_t refcnt; 27 atomic_t refcnt;
28 int rx_flags;
28 spinlock_t rx_lock; 29 spinlock_t rx_lock;
29 struct netpoll *rx_np; /* netpoll that registered an rx_hook */ 30 struct netpoll *rx_np; /* netpoll that registered an rx_hook */
30 struct sk_buff_head arp_tx; /* list of arp requests to reply to */ 31 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
@@ -50,12 +51,12 @@ static inline int netpoll_rx(struct sk_buff *skb)
50 unsigned long flags; 51 unsigned long flags;
51 int ret = 0; 52 int ret = 0;
52 53
53 if (!npinfo || !npinfo->rx_np) 54 if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
54 return 0; 55 return 0;
55 56
56 spin_lock_irqsave(&npinfo->rx_lock, flags); 57 spin_lock_irqsave(&npinfo->rx_lock, flags);
57 /* check rx_np again with the lock held */ 58 /* check rx_flags again with the lock held */
58 if (npinfo->rx_np && __netpoll_rx(skb)) 59 if (npinfo->rx_flags && __netpoll_rx(skb))
59 ret = 1; 60 ret = 1;
60 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 61 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
61 62
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 6faa128a4c8e..4b7e756181c9 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -39,6 +39,8 @@ static struct sk_buff_head skb_pool;
39static atomic_t trapped; 39static atomic_t trapped;
40 40
41#define USEC_PER_POLL 50 41#define USEC_PER_POLL 50
42#define NETPOLL_RX_ENABLED 1
43#define NETPOLL_RX_DROP 2
42 44
43#define MAX_SKB_SIZE \ 45#define MAX_SKB_SIZE \
44 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ 46 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
@@ -126,11 +128,13 @@ static int poll_one_napi(struct netpoll_info *npinfo,
126 if (!test_bit(NAPI_STATE_SCHED, &napi->state)) 128 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
127 return budget; 129 return budget;
128 130
131 npinfo->rx_flags |= NETPOLL_RX_DROP;
129 atomic_inc(&trapped); 132 atomic_inc(&trapped);
130 133
131 work = napi->poll(napi, budget); 134 work = napi->poll(napi, budget);
132 135
133 atomic_dec(&trapped); 136 atomic_dec(&trapped);
137 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
134 138
135 return budget - work; 139 return budget - work;
136} 140}
@@ -472,7 +476,7 @@ int __netpoll_rx(struct sk_buff *skb)
472 if (skb->dev->type != ARPHRD_ETHER) 476 if (skb->dev->type != ARPHRD_ETHER)
473 goto out; 477 goto out;
474 478
475 /* if receive ARP during middle of NAPI poll, then queue */ 479 /* check if netpoll clients need ARP */
476 if (skb->protocol == htons(ETH_P_ARP) && 480 if (skb->protocol == htons(ETH_P_ARP) &&
477 atomic_read(&trapped)) { 481 atomic_read(&trapped)) {
478 skb_queue_tail(&npi->arp_tx, skb); 482 skb_queue_tail(&npi->arp_tx, skb);
@@ -534,9 +538,6 @@ int __netpoll_rx(struct sk_buff *skb)
534 return 1; 538 return 1;
535 539
536out: 540out:
537 /* If packet received while already in poll then just
538 * silently drop.
539 */
540 if (atomic_read(&trapped)) { 541 if (atomic_read(&trapped)) {
541 kfree_skb(skb); 542 kfree_skb(skb);
542 return 1; 543 return 1;
@@ -675,6 +676,7 @@ int netpoll_setup(struct netpoll *np)
675 goto release; 676 goto release;
676 } 677 }
677 678
679 npinfo->rx_flags = 0;
678 npinfo->rx_np = NULL; 680 npinfo->rx_np = NULL;
679 681
680 spin_lock_init(&npinfo->rx_lock); 682 spin_lock_init(&npinfo->rx_lock);
@@ -756,6 +758,7 @@ int netpoll_setup(struct netpoll *np)
756 758
757 if (np->rx_hook) { 759 if (np->rx_hook) {
758 spin_lock_irqsave(&npinfo->rx_lock, flags); 760 spin_lock_irqsave(&npinfo->rx_lock, flags);
761 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
759 npinfo->rx_np = np; 762 npinfo->rx_np = np;
760 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 763 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
761 } 764 }
@@ -797,6 +800,7 @@ void netpoll_cleanup(struct netpoll *np)
797 if (npinfo->rx_np == np) { 800 if (npinfo->rx_np == np) {
798 spin_lock_irqsave(&npinfo->rx_lock, flags); 801 spin_lock_irqsave(&npinfo->rx_lock, flags);
799 npinfo->rx_np = NULL; 802 npinfo->rx_np = NULL;
803 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
800 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 804 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
801 } 805 }
802 806