aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/netpoll.h2
-rw-r--r--net/core/netpoll.c50
2 files changed, 17 insertions, 35 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 39845fc975f9..93a8b7664423 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -33,6 +33,8 @@ struct netpoll_info {
33 spinlock_t rx_lock; 33 spinlock_t rx_lock;
34 struct netpoll *rx_np; /* netpoll that registered an rx_hook */ 34 struct netpoll *rx_np; /* netpoll that registered an rx_hook */
35 struct sk_buff_head arp_tx; /* list of arp requests to reply to */ 35 struct sk_buff_head arp_tx; /* list of arp requests to reply to */
36 struct sk_buff_head txq;
37 struct work_struct tx_work;
36}; 38};
37 39
38void netpoll_poll(struct netpoll *np); 40void netpoll_poll(struct netpoll *np);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c66df2f45d26..ac4e8b8f57d1 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -38,10 +38,6 @@
38 38
39static struct sk_buff_head skb_pool; 39static struct sk_buff_head skb_pool;
40 40
41static DEFINE_SPINLOCK(queue_lock);
42static int queue_depth;
43static struct sk_buff *queue_head, *queue_tail;
44
45static atomic_t trapped; 41static atomic_t trapped;
46 42
47#define NETPOLL_RX_ENABLED 1 43#define NETPOLL_RX_ENABLED 1
@@ -56,46 +52,25 @@ static void arp_reply(struct sk_buff *skb);
56 52
57static void queue_process(void *p) 53static void queue_process(void *p)
58{ 54{
59 unsigned long flags; 55 struct netpoll_info *npinfo = p;
60 struct sk_buff *skb; 56 struct sk_buff *skb;
61 57
62 while (queue_head) { 58 while ((skb = skb_dequeue(&npinfo->txq)))
63 spin_lock_irqsave(&queue_lock, flags);
64
65 skb = queue_head;
66 queue_head = skb->next;
67 if (skb == queue_tail)
68 queue_head = NULL;
69
70 queue_depth--;
71
72 spin_unlock_irqrestore(&queue_lock, flags);
73
74 dev_queue_xmit(skb); 59 dev_queue_xmit(skb);
75 }
76}
77 60
78static DECLARE_WORK(send_queue, queue_process, NULL); 61}
79 62
80void netpoll_queue(struct sk_buff *skb) 63void netpoll_queue(struct sk_buff *skb)
81{ 64{
82 unsigned long flags; 65 struct net_device *dev = skb->dev;
66 struct netpoll_info *npinfo = dev->npinfo;
83 67
84 if (queue_depth == MAX_QUEUE_DEPTH) { 68 if (!npinfo)
85 __kfree_skb(skb); 69 kfree_skb(skb);
86 return; 70 else {
71 skb_queue_tail(&npinfo->txq, skb);
72 schedule_work(&npinfo->tx_work);
87 } 73 }
88
89 spin_lock_irqsave(&queue_lock, flags);
90 if (!queue_head)
91 queue_head = skb;
92 else
93 queue_tail->next = skb;
94 queue_tail = skb;
95 queue_depth++;
96 spin_unlock_irqrestore(&queue_lock, flags);
97
98 schedule_work(&send_queue);
99} 74}
100 75
101static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, 76static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
@@ -658,6 +633,9 @@ int netpoll_setup(struct netpoll *np)
658 npinfo->tries = MAX_RETRIES; 633 npinfo->tries = MAX_RETRIES;
659 spin_lock_init(&npinfo->rx_lock); 634 spin_lock_init(&npinfo->rx_lock);
660 skb_queue_head_init(&npinfo->arp_tx); 635 skb_queue_head_init(&npinfo->arp_tx);
636 skb_queue_head_init(&npinfo->txq);
637 INIT_WORK(&npinfo->tx_work, queue_process, npinfo);
638
661 atomic_set(&npinfo->refcnt, 1); 639 atomic_set(&npinfo->refcnt, 1);
662 } else { 640 } else {
663 npinfo = ndev->npinfo; 641 npinfo = ndev->npinfo;
@@ -780,6 +758,8 @@ void netpoll_cleanup(struct netpoll *np)
780 np->dev->npinfo = NULL; 758 np->dev->npinfo = NULL;
781 if (atomic_dec_and_test(&npinfo->refcnt)) { 759 if (atomic_dec_and_test(&npinfo->refcnt)) {
782 skb_queue_purge(&npinfo->arp_tx); 760 skb_queue_purge(&npinfo->arp_tx);
761 skb_queue_purge(&npinfo->txq);
762 flush_scheduled_work();
783 763
784 kfree(npinfo); 764 kfree(npinfo);
785 } 765 }