aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/netpoll.c
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2006-10-26 18:46:51 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-03 00:22:33 -0500
commitb6cd27ed33886a5ffaf0925a6d98e13e18e8a1af (patch)
tree9657515409d89b8b43134e0a29f7c3730901277c /net/core/netpoll.c
parent93ec2c723e3f8a216dde2899aeb85c648672bc6b (diff)
netpoll per device txq
When the netpoll beast got really busy, it tended to clog things, so it stored them for later. But the beast was putting all it's skb's in one basket. This was bad because maybe some pipes were clogged and others were not. Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Diffstat (limited to 'net/core/netpoll.c')
-rw-r--r--net/core/netpoll.c50
1 files changed, 15 insertions, 35 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c66df2f45d26..ac4e8b8f57d1 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -38,10 +38,6 @@
38 38
39static struct sk_buff_head skb_pool; 39static struct sk_buff_head skb_pool;
40 40
41static DEFINE_SPINLOCK(queue_lock);
42static int queue_depth;
43static struct sk_buff *queue_head, *queue_tail;
44
45static atomic_t trapped; 41static atomic_t trapped;
46 42
47#define NETPOLL_RX_ENABLED 1 43#define NETPOLL_RX_ENABLED 1
@@ -56,46 +52,25 @@ static void arp_reply(struct sk_buff *skb);
56 52
57static void queue_process(void *p) 53static void queue_process(void *p)
58{ 54{
59 unsigned long flags; 55 struct netpoll_info *npinfo = p;
60 struct sk_buff *skb; 56 struct sk_buff *skb;
61 57
62 while (queue_head) { 58 while ((skb = skb_dequeue(&npinfo->txq)))
63 spin_lock_irqsave(&queue_lock, flags);
64
65 skb = queue_head;
66 queue_head = skb->next;
67 if (skb == queue_tail)
68 queue_head = NULL;
69
70 queue_depth--;
71
72 spin_unlock_irqrestore(&queue_lock, flags);
73
74 dev_queue_xmit(skb); 59 dev_queue_xmit(skb);
75 }
76}
77 60
78static DECLARE_WORK(send_queue, queue_process, NULL); 61}
79 62
80void netpoll_queue(struct sk_buff *skb) 63void netpoll_queue(struct sk_buff *skb)
81{ 64{
82 unsigned long flags; 65 struct net_device *dev = skb->dev;
66 struct netpoll_info *npinfo = dev->npinfo;
83 67
84 if (queue_depth == MAX_QUEUE_DEPTH) { 68 if (!npinfo)
85 __kfree_skb(skb); 69 kfree_skb(skb);
86 return; 70 else {
71 skb_queue_tail(&npinfo->txq, skb);
72 schedule_work(&npinfo->tx_work);
87 } 73 }
88
89 spin_lock_irqsave(&queue_lock, flags);
90 if (!queue_head)
91 queue_head = skb;
92 else
93 queue_tail->next = skb;
94 queue_tail = skb;
95 queue_depth++;
96 spin_unlock_irqrestore(&queue_lock, flags);
97
98 schedule_work(&send_queue);
99} 74}
100 75
101static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, 76static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
@@ -658,6 +633,9 @@ int netpoll_setup(struct netpoll *np)
658 npinfo->tries = MAX_RETRIES; 633 npinfo->tries = MAX_RETRIES;
659 spin_lock_init(&npinfo->rx_lock); 634 spin_lock_init(&npinfo->rx_lock);
660 skb_queue_head_init(&npinfo->arp_tx); 635 skb_queue_head_init(&npinfo->arp_tx);
636 skb_queue_head_init(&npinfo->txq);
637 INIT_WORK(&npinfo->tx_work, queue_process, npinfo);
638
661 atomic_set(&npinfo->refcnt, 1); 639 atomic_set(&npinfo->refcnt, 1);
662 } else { 640 } else {
663 npinfo = ndev->npinfo; 641 npinfo = ndev->npinfo;
@@ -780,6 +758,8 @@ void netpoll_cleanup(struct netpoll *np)
780 np->dev->npinfo = NULL; 758 np->dev->npinfo = NULL;
781 if (atomic_dec_and_test(&npinfo->refcnt)) { 759 if (atomic_dec_and_test(&npinfo->refcnt)) {
782 skb_queue_purge(&npinfo->arp_tx); 760 skb_queue_purge(&npinfo->arp_tx);
761 skb_queue_purge(&npinfo->txq);
762 flush_scheduled_work();
783 763
784 kfree(npinfo); 764 kfree(npinfo);
785 } 765 }