aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/netpoll.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/netpoll.c')
-rw-r--r--net/core/netpoll.c327
1 files changed, 153 insertions, 174 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 63f24c914ddb..b3c559b9ac35 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -34,18 +34,12 @@
34#define MAX_UDP_CHUNK 1460 34#define MAX_UDP_CHUNK 1460
35#define MAX_SKBS 32 35#define MAX_SKBS 32
36#define MAX_QUEUE_DEPTH (MAX_SKBS / 2) 36#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
37#define MAX_RETRIES 20000
38 37
39static DEFINE_SPINLOCK(skb_list_lock); 38static struct sk_buff_head skb_pool;
40static int nr_skbs;
41static struct sk_buff *skbs;
42
43static DEFINE_SPINLOCK(queue_lock);
44static int queue_depth;
45static struct sk_buff *queue_head, *queue_tail;
46 39
47static atomic_t trapped; 40static atomic_t trapped;
48 41
42#define USEC_PER_POLL 50
49#define NETPOLL_RX_ENABLED 1 43#define NETPOLL_RX_ENABLED 1
50#define NETPOLL_RX_DROP 2 44#define NETPOLL_RX_DROP 2
51 45
@@ -58,52 +52,34 @@ static void arp_reply(struct sk_buff *skb);
58 52
59static void queue_process(struct work_struct *work) 53static void queue_process(struct work_struct *work)
60{ 54{
61 unsigned long flags; 55 struct netpoll_info *npinfo =
56 container_of(work, struct netpoll_info, tx_work.work);
62 struct sk_buff *skb; 57 struct sk_buff *skb;
63 58
64 while (queue_head) { 59 while ((skb = skb_dequeue(&npinfo->txq))) {
65 spin_lock_irqsave(&queue_lock, flags); 60 struct net_device *dev = skb->dev;
66
67 skb = queue_head;
68 queue_head = skb->next;
69 if (skb == queue_tail)
70 queue_head = NULL;
71
72 queue_depth--;
73
74 spin_unlock_irqrestore(&queue_lock, flags);
75
76 dev_queue_xmit(skb);
77 }
78}
79 61
80static DECLARE_WORK(send_queue, queue_process); 62 if (!netif_device_present(dev) || !netif_running(dev)) {
63 __kfree_skb(skb);
64 continue;
65 }
81 66
82void netpoll_queue(struct sk_buff *skb) 67 netif_tx_lock_bh(dev);
83{ 68 if (netif_queue_stopped(dev) ||
84 unsigned long flags; 69 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
70 skb_queue_head(&npinfo->txq, skb);
71 netif_tx_unlock_bh(dev);
85 72
86 if (queue_depth == MAX_QUEUE_DEPTH) { 73 schedule_delayed_work(&npinfo->tx_work, HZ/10);
87 __kfree_skb(skb); 74 return;
88 return; 75 }
89 } 76 }
90
91 spin_lock_irqsave(&queue_lock, flags);
92 if (!queue_head)
93 queue_head = skb;
94 else
95 queue_tail->next = skb;
96 queue_tail = skb;
97 queue_depth++;
98 spin_unlock_irqrestore(&queue_lock, flags);
99
100 schedule_work(&send_queue);
101} 77}
102 78
103static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, 79static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
104 unsigned short ulen, u32 saddr, u32 daddr) 80 unsigned short ulen, __be32 saddr, __be32 daddr)
105{ 81{
106 unsigned int psum; 82 __wsum psum;
107 83
108 if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY) 84 if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY)
109 return 0; 85 return 0;
@@ -111,7 +87,7 @@ static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
111 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); 87 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
112 88
113 if (skb->ip_summed == CHECKSUM_COMPLETE && 89 if (skb->ip_summed == CHECKSUM_COMPLETE &&
114 !(u16)csum_fold(csum_add(psum, skb->csum))) 90 !csum_fold(csum_add(psum, skb->csum)))
115 return 0; 91 return 0;
116 92
117 skb->csum = psum; 93 skb->csum = psum;
@@ -167,12 +143,11 @@ static void service_arp_queue(struct netpoll_info *npi)
167 arp_reply(skb); 143 arp_reply(skb);
168 skb = skb_dequeue(&npi->arp_tx); 144 skb = skb_dequeue(&npi->arp_tx);
169 } 145 }
170 return;
171} 146}
172 147
173void netpoll_poll(struct netpoll *np) 148void netpoll_poll(struct netpoll *np)
174{ 149{
175 if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller) 150 if (!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
176 return; 151 return;
177 152
178 /* Process pending work on NIC */ 153 /* Process pending work on NIC */
@@ -190,17 +165,15 @@ static void refill_skbs(void)
190 struct sk_buff *skb; 165 struct sk_buff *skb;
191 unsigned long flags; 166 unsigned long flags;
192 167
193 spin_lock_irqsave(&skb_list_lock, flags); 168 spin_lock_irqsave(&skb_pool.lock, flags);
194 while (nr_skbs < MAX_SKBS) { 169 while (skb_pool.qlen < MAX_SKBS) {
195 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); 170 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
196 if (!skb) 171 if (!skb)
197 break; 172 break;
198 173
199 skb->next = skbs; 174 __skb_queue_tail(&skb_pool, skb);
200 skbs = skb;
201 nr_skbs++;
202 } 175 }
203 spin_unlock_irqrestore(&skb_list_lock, flags); 176 spin_unlock_irqrestore(&skb_pool.lock, flags);
204} 177}
205 178
206static void zap_completion_queue(void) 179static void zap_completion_queue(void)
@@ -219,7 +192,7 @@ static void zap_completion_queue(void)
219 while (clist != NULL) { 192 while (clist != NULL) {
220 struct sk_buff *skb = clist; 193 struct sk_buff *skb = clist;
221 clist = clist->next; 194 clist = clist->next;
222 if(skb->destructor) 195 if (skb->destructor)
223 dev_kfree_skb_any(skb); /* put this one back */ 196 dev_kfree_skb_any(skb); /* put this one back */
224 else 197 else
225 __kfree_skb(skb); 198 __kfree_skb(skb);
@@ -229,38 +202,25 @@ static void zap_completion_queue(void)
229 put_cpu_var(softnet_data); 202 put_cpu_var(softnet_data);
230} 203}
231 204
232static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve) 205static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
233{ 206{
234 int once = 1, count = 0; 207 int count = 0;
235 unsigned long flags; 208 struct sk_buff *skb;
236 struct sk_buff *skb = NULL;
237 209
238 zap_completion_queue(); 210 zap_completion_queue();
211 refill_skbs();
239repeat: 212repeat:
240 if (nr_skbs < MAX_SKBS)
241 refill_skbs();
242 213
243 skb = alloc_skb(len, GFP_ATOMIC); 214 skb = alloc_skb(len, GFP_ATOMIC);
215 if (!skb)
216 skb = skb_dequeue(&skb_pool);
244 217
245 if (!skb) { 218 if (!skb) {
246 spin_lock_irqsave(&skb_list_lock, flags); 219 if (++count < 10) {
247 skb = skbs; 220 netpoll_poll(np);
248 if (skb) { 221 goto repeat;
249 skbs = skb->next;
250 skb->next = NULL;
251 nr_skbs--;
252 } 222 }
253 spin_unlock_irqrestore(&skb_list_lock, flags); 223 return NULL;
254 }
255
256 if(!skb) {
257 count++;
258 if (once && (count == 1000000)) {
259 printk("out of netpoll skbs!\n");
260 once = 0;
261 }
262 netpoll_poll(np);
263 goto repeat;
264 } 224 }
265 225
266 atomic_set(&skb->users, 1); 226 atomic_set(&skb->users, 1);
@@ -270,50 +230,40 @@ repeat:
270 230
271static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) 231static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
272{ 232{
273 int status; 233 int status = NETDEV_TX_BUSY;
274 struct netpoll_info *npinfo; 234 unsigned long tries;
275 235 struct net_device *dev = np->dev;
276 if (!np || !np->dev || !netif_running(np->dev)) { 236 struct netpoll_info *npinfo = np->dev->npinfo;
277 __kfree_skb(skb); 237
278 return; 238 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
279 } 239 __kfree_skb(skb);
280 240 return;
281 npinfo = np->dev->npinfo; 241 }
282 242
283 /* avoid recursion */ 243 /* don't get messages out of order, and no recursion */
284 if (npinfo->poll_owner == smp_processor_id() || 244 if (skb_queue_len(&npinfo->txq) == 0 &&
285 np->dev->xmit_lock_owner == smp_processor_id()) { 245 npinfo->poll_owner != smp_processor_id() &&
286 if (np->drop) 246 netif_tx_trylock(dev)) {
287 np->drop(skb); 247 /* try until next clock tick */
288 else 248 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) {
289 __kfree_skb(skb); 249 if (!netif_queue_stopped(dev))
290 return; 250 status = dev->hard_start_xmit(skb, dev);
291 }
292
293 do {
294 npinfo->tries--;
295 netif_tx_lock(np->dev);
296 251
297 /* 252 if (status == NETDEV_TX_OK)
298 * network drivers do not expect to be called if the queue is 253 break;
299 * stopped.
300 */
301 status = NETDEV_TX_BUSY;
302 if (!netif_queue_stopped(np->dev))
303 status = np->dev->hard_start_xmit(skb, np->dev);
304 254
305 netif_tx_unlock(np->dev); 255 /* tickle device maybe there is some cleanup */
256 netpoll_poll(np);
306 257
307 /* success */ 258 udelay(USEC_PER_POLL);
308 if(!status) {
309 npinfo->tries = MAX_RETRIES; /* reset */
310 return;
311 } 259 }
260 netif_tx_unlock(dev);
261 }
312 262
313 /* transmit busy */ 263 if (status != NETDEV_TX_OK) {
314 netpoll_poll(np); 264 skb_queue_tail(&npinfo->txq, skb);
315 udelay(50); 265 schedule_delayed_work(&npinfo->tx_work,0);
316 } while (npinfo->tries > 0); 266 }
317} 267}
318 268
319void netpoll_send_udp(struct netpoll *np, const char *msg, int len) 269void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
@@ -345,7 +295,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
345 udp_len, IPPROTO_UDP, 295 udp_len, IPPROTO_UDP,
346 csum_partial((unsigned char *)udph, udp_len, 0)); 296 csum_partial((unsigned char *)udph, udp_len, 0));
347 if (udph->check == 0) 297 if (udph->check == 0)
348 udph->check = -1; 298 udph->check = CSUM_MANGLED_0;
349 299
350 skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph)); 300 skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
351 301
@@ -379,7 +329,7 @@ static void arp_reply(struct sk_buff *skb)
379 struct arphdr *arp; 329 struct arphdr *arp;
380 unsigned char *arp_ptr; 330 unsigned char *arp_ptr;
381 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; 331 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
382 u32 sip, tip; 332 __be32 sip, tip;
383 struct sk_buff *send_skb; 333 struct sk_buff *send_skb;
384 struct netpoll *np = NULL; 334 struct netpoll *np = NULL;
385 335
@@ -431,8 +381,8 @@ static void arp_reply(struct sk_buff *skb)
431 381
432 if (np->dev->hard_header && 382 if (np->dev->hard_header &&
433 np->dev->hard_header(send_skb, skb->dev, ptype, 383 np->dev->hard_header(send_skb, skb->dev, ptype,
434 np->remote_mac, np->local_mac, 384 np->remote_mac, np->local_mac,
435 send_skb->len) < 0) { 385 send_skb->len) < 0) {
436 kfree_skb(send_skb); 386 kfree_skb(send_skb);
437 return; 387 return;
438 } 388 }
@@ -470,7 +420,6 @@ int __netpoll_rx(struct sk_buff *skb)
470 struct netpoll_info *npi = skb->dev->npinfo; 420 struct netpoll_info *npi = skb->dev->npinfo;
471 struct netpoll *np = npi->rx_np; 421 struct netpoll *np = npi->rx_np;
472 422
473
474 if (!np) 423 if (!np)
475 goto out; 424 goto out;
476 if (skb->dev->type != ARPHRD_ETHER) 425 if (skb->dev->type != ARPHRD_ETHER)
@@ -543,47 +492,47 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
543{ 492{
544 char *cur=opt, *delim; 493 char *cur=opt, *delim;
545 494
546 if(*cur != '@') { 495 if (*cur != '@') {
547 if ((delim = strchr(cur, '@')) == NULL) 496 if ((delim = strchr(cur, '@')) == NULL)
548 goto parse_failed; 497 goto parse_failed;
549 *delim=0; 498 *delim = 0;
550 np->local_port=simple_strtol(cur, NULL, 10); 499 np->local_port = simple_strtol(cur, NULL, 10);
551 cur=delim; 500 cur = delim;
552 } 501 }
553 cur++; 502 cur++;
554 printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port); 503 printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port);
555 504
556 if(*cur != '/') { 505 if (*cur != '/') {
557 if ((delim = strchr(cur, '/')) == NULL) 506 if ((delim = strchr(cur, '/')) == NULL)
558 goto parse_failed; 507 goto parse_failed;
559 *delim=0; 508 *delim = 0;
560 np->local_ip=ntohl(in_aton(cur)); 509 np->local_ip = ntohl(in_aton(cur));
561 cur=delim; 510 cur = delim;
562 511
563 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", 512 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
564 np->name, HIPQUAD(np->local_ip)); 513 np->name, HIPQUAD(np->local_ip));
565 } 514 }
566 cur++; 515 cur++;
567 516
568 if ( *cur != ',') { 517 if (*cur != ',') {
569 /* parse out dev name */ 518 /* parse out dev name */
570 if ((delim = strchr(cur, ',')) == NULL) 519 if ((delim = strchr(cur, ',')) == NULL)
571 goto parse_failed; 520 goto parse_failed;
572 *delim=0; 521 *delim = 0;
573 strlcpy(np->dev_name, cur, sizeof(np->dev_name)); 522 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
574 cur=delim; 523 cur = delim;
575 } 524 }
576 cur++; 525 cur++;
577 526
578 printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name); 527 printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name);
579 528
580 if ( *cur != '@' ) { 529 if (*cur != '@') {
581 /* dst port */ 530 /* dst port */
582 if ((delim = strchr(cur, '@')) == NULL) 531 if ((delim = strchr(cur, '@')) == NULL)
583 goto parse_failed; 532 goto parse_failed;
584 *delim=0; 533 *delim = 0;
585 np->remote_port=simple_strtol(cur, NULL, 10); 534 np->remote_port = simple_strtol(cur, NULL, 10);
586 cur=delim; 535 cur = delim;
587 } 536 }
588 cur++; 537 cur++;
589 printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port); 538 printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port);
@@ -591,42 +540,41 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
591 /* dst ip */ 540 /* dst ip */
592 if ((delim = strchr(cur, '/')) == NULL) 541 if ((delim = strchr(cur, '/')) == NULL)
593 goto parse_failed; 542 goto parse_failed;
594 *delim=0; 543 *delim = 0;
595 np->remote_ip=ntohl(in_aton(cur)); 544 np->remote_ip = ntohl(in_aton(cur));
596 cur=delim+1; 545 cur = delim + 1;
597 546
598 printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n", 547 printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
599 np->name, HIPQUAD(np->remote_ip)); 548 np->name, HIPQUAD(np->remote_ip));
600 549
601 if( *cur != 0 ) 550 if (*cur != 0) {
602 {
603 /* MAC address */ 551 /* MAC address */
604 if ((delim = strchr(cur, ':')) == NULL) 552 if ((delim = strchr(cur, ':')) == NULL)
605 goto parse_failed; 553 goto parse_failed;
606 *delim=0; 554 *delim = 0;
607 np->remote_mac[0]=simple_strtol(cur, NULL, 16); 555 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
608 cur=delim+1; 556 cur = delim + 1;
609 if ((delim = strchr(cur, ':')) == NULL) 557 if ((delim = strchr(cur, ':')) == NULL)
610 goto parse_failed; 558 goto parse_failed;
611 *delim=0; 559 *delim = 0;
612 np->remote_mac[1]=simple_strtol(cur, NULL, 16); 560 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
613 cur=delim+1; 561 cur = delim + 1;
614 if ((delim = strchr(cur, ':')) == NULL) 562 if ((delim = strchr(cur, ':')) == NULL)
615 goto parse_failed; 563 goto parse_failed;
616 *delim=0; 564 *delim = 0;
617 np->remote_mac[2]=simple_strtol(cur, NULL, 16); 565 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
618 cur=delim+1; 566 cur = delim + 1;
619 if ((delim = strchr(cur, ':')) == NULL) 567 if ((delim = strchr(cur, ':')) == NULL)
620 goto parse_failed; 568 goto parse_failed;
621 *delim=0; 569 *delim = 0;
622 np->remote_mac[3]=simple_strtol(cur, NULL, 16); 570 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
623 cur=delim+1; 571 cur = delim + 1;
624 if ((delim = strchr(cur, ':')) == NULL) 572 if ((delim = strchr(cur, ':')) == NULL)
625 goto parse_failed; 573 goto parse_failed;
626 *delim=0; 574 *delim = 0;
627 np->remote_mac[4]=simple_strtol(cur, NULL, 16); 575 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
628 cur=delim+1; 576 cur = delim + 1;
629 np->remote_mac[5]=simple_strtol(cur, NULL, 16); 577 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
630 } 578 }
631 579
632 printk(KERN_INFO "%s: remote ethernet address " 580 printk(KERN_INFO "%s: remote ethernet address "
@@ -653,34 +601,44 @@ int netpoll_setup(struct netpoll *np)
653 struct in_device *in_dev; 601 struct in_device *in_dev;
654 struct netpoll_info *npinfo; 602 struct netpoll_info *npinfo;
655 unsigned long flags; 603 unsigned long flags;
604 int err;
656 605
657 if (np->dev_name) 606 if (np->dev_name)
658 ndev = dev_get_by_name(np->dev_name); 607 ndev = dev_get_by_name(np->dev_name);
659 if (!ndev) { 608 if (!ndev) {
660 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", 609 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
661 np->name, np->dev_name); 610 np->name, np->dev_name);
662 return -1; 611 return -ENODEV;
663 } 612 }
664 613
665 np->dev = ndev; 614 np->dev = ndev;
666 if (!ndev->npinfo) { 615 if (!ndev->npinfo) {
667 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); 616 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
668 if (!npinfo) 617 if (!npinfo) {
618 err = -ENOMEM;
669 goto release; 619 goto release;
620 }
670 621
671 npinfo->rx_flags = 0; 622 npinfo->rx_flags = 0;
672 npinfo->rx_np = NULL; 623 npinfo->rx_np = NULL;
673 spin_lock_init(&npinfo->poll_lock); 624 spin_lock_init(&npinfo->poll_lock);
674 npinfo->poll_owner = -1; 625 npinfo->poll_owner = -1;
675 npinfo->tries = MAX_RETRIES; 626
676 spin_lock_init(&npinfo->rx_lock); 627 spin_lock_init(&npinfo->rx_lock);
677 skb_queue_head_init(&npinfo->arp_tx); 628 skb_queue_head_init(&npinfo->arp_tx);
678 } else 629 skb_queue_head_init(&npinfo->txq);
630 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
631
632 atomic_set(&npinfo->refcnt, 1);
633 } else {
679 npinfo = ndev->npinfo; 634 npinfo = ndev->npinfo;
635 atomic_inc(&npinfo->refcnt);
636 }
680 637
681 if (!ndev->poll_controller) { 638 if (!ndev->poll_controller) {
682 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", 639 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
683 np->name, np->dev_name); 640 np->name, np->dev_name);
641 err = -ENOTSUPP;
684 goto release; 642 goto release;
685 } 643 }
686 644
@@ -691,13 +649,14 @@ int netpoll_setup(struct netpoll *np)
691 np->name, np->dev_name); 649 np->name, np->dev_name);
692 650
693 rtnl_lock(); 651 rtnl_lock();
694 if (dev_change_flags(ndev, ndev->flags | IFF_UP) < 0) { 652 err = dev_open(ndev);
653 rtnl_unlock();
654
655 if (err) {
695 printk(KERN_ERR "%s: failed to open %s\n", 656 printk(KERN_ERR "%s: failed to open %s\n",
696 np->name, np->dev_name); 657 np->name, ndev->name);
697 rtnl_unlock();
698 goto release; 658 goto release;
699 } 659 }
700 rtnl_unlock();
701 660
702 atleast = jiffies + HZ/10; 661 atleast = jiffies + HZ/10;
703 atmost = jiffies + 4*HZ; 662 atmost = jiffies + 4*HZ;
@@ -735,6 +694,7 @@ int netpoll_setup(struct netpoll *np)
735 rcu_read_unlock(); 694 rcu_read_unlock();
736 printk(KERN_ERR "%s: no IP address for %s, aborting\n", 695 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
737 np->name, np->dev_name); 696 np->name, np->dev_name);
697 err = -EDESTADDRREQ;
738 goto release; 698 goto release;
739 } 699 }
740 700
@@ -767,9 +727,16 @@ int netpoll_setup(struct netpoll *np)
767 kfree(npinfo); 727 kfree(npinfo);
768 np->dev = NULL; 728 np->dev = NULL;
769 dev_put(ndev); 729 dev_put(ndev);
770 return -1; 730 return err;
771} 731}
772 732
733static int __init netpoll_init(void)
734{
735 skb_queue_head_init(&skb_pool);
736 return 0;
737}
738core_initcall(netpoll_init);
739
773void netpoll_cleanup(struct netpoll *np) 740void netpoll_cleanup(struct netpoll *np)
774{ 741{
775 struct netpoll_info *npinfo; 742 struct netpoll_info *npinfo;
@@ -777,12 +744,25 @@ void netpoll_cleanup(struct netpoll *np)
777 744
778 if (np->dev) { 745 if (np->dev) {
779 npinfo = np->dev->npinfo; 746 npinfo = np->dev->npinfo;
780 if (npinfo && npinfo->rx_np == np) { 747 if (npinfo) {
781 spin_lock_irqsave(&npinfo->rx_lock, flags); 748 if (npinfo->rx_np == np) {
782 npinfo->rx_np = NULL; 749 spin_lock_irqsave(&npinfo->rx_lock, flags);
783 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; 750 npinfo->rx_np = NULL;
784 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 751 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
752 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
753 }
754
755 np->dev->npinfo = NULL;
756 if (atomic_dec_and_test(&npinfo->refcnt)) {
757 skb_queue_purge(&npinfo->arp_tx);
758 skb_queue_purge(&npinfo->txq);
759 cancel_rearming_delayed_work(&npinfo->tx_work);
760 flush_scheduled_work();
761
762 kfree(npinfo);
763 }
785 } 764 }
765
786 dev_put(np->dev); 766 dev_put(np->dev);
787 } 767 }
788 768
@@ -809,4 +789,3 @@ EXPORT_SYMBOL(netpoll_setup);
809EXPORT_SYMBOL(netpoll_cleanup); 789EXPORT_SYMBOL(netpoll_cleanup);
810EXPORT_SYMBOL(netpoll_send_udp); 790EXPORT_SYMBOL(netpoll_send_udp);
811EXPORT_SYMBOL(netpoll_poll); 791EXPORT_SYMBOL(netpoll_poll);
812EXPORT_SYMBOL(netpoll_queue);