diff options
Diffstat (limited to 'drivers/block/aoe/aoecmd.c')
-rw-r--r-- | drivers/block/aoe/aoecmd.c | 85 |
1 files changed, 32 insertions, 53 deletions
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 2f1746295d06..e33da30be4c4 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -114,29 +114,22 @@ ifrotate(struct aoetgt *t) | |||
114 | static void | 114 | static void |
115 | skb_pool_put(struct aoedev *d, struct sk_buff *skb) | 115 | skb_pool_put(struct aoedev *d, struct sk_buff *skb) |
116 | { | 116 | { |
117 | if (!d->skbpool_hd) | 117 | __skb_queue_tail(&d->skbpool, skb); |
118 | d->skbpool_hd = skb; | ||
119 | else | ||
120 | d->skbpool_tl->next = skb; | ||
121 | d->skbpool_tl = skb; | ||
122 | } | 118 | } |
123 | 119 | ||
124 | static struct sk_buff * | 120 | static struct sk_buff * |
125 | skb_pool_get(struct aoedev *d) | 121 | skb_pool_get(struct aoedev *d) |
126 | { | 122 | { |
127 | struct sk_buff *skb; | 123 | struct sk_buff *skb = skb_peek(&d->skbpool); |
128 | 124 | ||
129 | skb = d->skbpool_hd; | ||
130 | if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) { | 125 | if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) { |
131 | d->skbpool_hd = skb->next; | 126 | __skb_unlink(skb, &d->skbpool); |
132 | skb->next = NULL; | ||
133 | return skb; | 127 | return skb; |
134 | } | 128 | } |
135 | if (d->nskbpool < NSKBPOOLMAX | 129 | if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX && |
136 | && (skb = new_skb(ETH_ZLEN))) { | 130 | (skb = new_skb(ETH_ZLEN))) |
137 | d->nskbpool++; | ||
138 | return skb; | 131 | return skb; |
139 | } | 132 | |
140 | return NULL; | 133 | return NULL; |
141 | } | 134 | } |
142 | 135 | ||
@@ -293,29 +286,22 @@ aoecmd_ata_rw(struct aoedev *d) | |||
293 | 286 | ||
294 | skb->dev = t->ifp->nd; | 287 | skb->dev = t->ifp->nd; |
295 | skb = skb_clone(skb, GFP_ATOMIC); | 288 | skb = skb_clone(skb, GFP_ATOMIC); |
296 | if (skb) { | 289 | if (skb) |
297 | if (d->sendq_hd) | 290 | __skb_queue_tail(&d->sendq, skb); |
298 | d->sendq_tl->next = skb; | ||
299 | else | ||
300 | d->sendq_hd = skb; | ||
301 | d->sendq_tl = skb; | ||
302 | } | ||
303 | return 1; | 291 | return 1; |
304 | } | 292 | } |
305 | 293 | ||
306 | /* some callers cannot sleep, and they can call this function, | 294 | /* some callers cannot sleep, and they can call this function, |
307 | * transmitting the packets later, when interrupts are on | 295 | * transmitting the packets later, when interrupts are on |
308 | */ | 296 | */ |
309 | static struct sk_buff * | 297 | static void |
310 | aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail) | 298 | aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue) |
311 | { | 299 | { |
312 | struct aoe_hdr *h; | 300 | struct aoe_hdr *h; |
313 | struct aoe_cfghdr *ch; | 301 | struct aoe_cfghdr *ch; |
314 | struct sk_buff *skb, *sl, *sl_tail; | 302 | struct sk_buff *skb; |
315 | struct net_device *ifp; | 303 | struct net_device *ifp; |
316 | 304 | ||
317 | sl = sl_tail = NULL; | ||
318 | |||
319 | read_lock(&dev_base_lock); | 305 | read_lock(&dev_base_lock); |
320 | for_each_netdev(&init_net, ifp) { | 306 | for_each_netdev(&init_net, ifp) { |
321 | dev_hold(ifp); | 307 | dev_hold(ifp); |
@@ -329,8 +315,7 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail) | |||
329 | } | 315 | } |
330 | skb_put(skb, sizeof *h + sizeof *ch); | 316 | skb_put(skb, sizeof *h + sizeof *ch); |
331 | skb->dev = ifp; | 317 | skb->dev = ifp; |
332 | if (sl_tail == NULL) | 318 | __skb_queue_tail(queue, skb); |
333 | sl_tail = skb; | ||
334 | h = (struct aoe_hdr *) skb_mac_header(skb); | 319 | h = (struct aoe_hdr *) skb_mac_header(skb); |
335 | memset(h, 0, sizeof *h + sizeof *ch); | 320 | memset(h, 0, sizeof *h + sizeof *ch); |
336 | 321 | ||
@@ -342,16 +327,10 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail) | |||
342 | h->minor = aoeminor; | 327 | h->minor = aoeminor; |
343 | h->cmd = AOECMD_CFG; | 328 | h->cmd = AOECMD_CFG; |
344 | 329 | ||
345 | skb->next = sl; | ||
346 | sl = skb; | ||
347 | cont: | 330 | cont: |
348 | dev_put(ifp); | 331 | dev_put(ifp); |
349 | } | 332 | } |
350 | read_unlock(&dev_base_lock); | 333 | read_unlock(&dev_base_lock); |
351 | |||
352 | if (tail != NULL) | ||
353 | *tail = sl_tail; | ||
354 | return sl; | ||
355 | } | 334 | } |
356 | 335 | ||
357 | static void | 336 | static void |
@@ -406,11 +385,7 @@ resend(struct aoedev *d, struct aoetgt *t, struct frame *f) | |||
406 | skb = skb_clone(skb, GFP_ATOMIC); | 385 | skb = skb_clone(skb, GFP_ATOMIC); |
407 | if (skb == NULL) | 386 | if (skb == NULL) |
408 | return; | 387 | return; |
409 | if (d->sendq_hd) | 388 | __skb_queue_tail(&d->sendq, skb); |
410 | d->sendq_tl->next = skb; | ||
411 | else | ||
412 | d->sendq_hd = skb; | ||
413 | d->sendq_tl = skb; | ||
414 | } | 389 | } |
415 | 390 | ||
416 | static int | 391 | static int |
@@ -508,16 +483,15 @@ ata_scnt(unsigned char *packet) { | |||
508 | static void | 483 | static void |
509 | rexmit_timer(ulong vp) | 484 | rexmit_timer(ulong vp) |
510 | { | 485 | { |
486 | struct sk_buff_head queue; | ||
511 | struct aoedev *d; | 487 | struct aoedev *d; |
512 | struct aoetgt *t, **tt, **te; | 488 | struct aoetgt *t, **tt, **te; |
513 | struct aoeif *ifp; | 489 | struct aoeif *ifp; |
514 | struct frame *f, *e; | 490 | struct frame *f, *e; |
515 | struct sk_buff *sl; | ||
516 | register long timeout; | 491 | register long timeout; |
517 | ulong flags, n; | 492 | ulong flags, n; |
518 | 493 | ||
519 | d = (struct aoedev *) vp; | 494 | d = (struct aoedev *) vp; |
520 | sl = NULL; | ||
521 | 495 | ||
522 | /* timeout is always ~150% of the moving average */ | 496 | /* timeout is always ~150% of the moving average */ |
523 | timeout = d->rttavg; | 497 | timeout = d->rttavg; |
@@ -589,7 +563,7 @@ rexmit_timer(ulong vp) | |||
589 | } | 563 | } |
590 | } | 564 | } |
591 | 565 | ||
592 | if (d->sendq_hd) { | 566 | if (!skb_queue_empty(&d->sendq)) { |
593 | n = d->rttavg <<= 1; | 567 | n = d->rttavg <<= 1; |
594 | if (n > MAXTIMER) | 568 | if (n > MAXTIMER) |
595 | d->rttavg = MAXTIMER; | 569 | d->rttavg = MAXTIMER; |
@@ -600,15 +574,15 @@ rexmit_timer(ulong vp) | |||
600 | aoecmd_work(d); | 574 | aoecmd_work(d); |
601 | } | 575 | } |
602 | 576 | ||
603 | sl = d->sendq_hd; | 577 | __skb_queue_head_init(&queue); |
604 | d->sendq_hd = d->sendq_tl = NULL; | 578 | skb_queue_splice_init(&d->sendq, &queue); |
605 | 579 | ||
606 | d->timer.expires = jiffies + TIMERTICK; | 580 | d->timer.expires = jiffies + TIMERTICK; |
607 | add_timer(&d->timer); | 581 | add_timer(&d->timer); |
608 | 582 | ||
609 | spin_unlock_irqrestore(&d->lock, flags); | 583 | spin_unlock_irqrestore(&d->lock, flags); |
610 | 584 | ||
611 | aoenet_xmit(sl); | 585 | aoenet_xmit(&queue); |
612 | } | 586 | } |
613 | 587 | ||
614 | /* enters with d->lock held */ | 588 | /* enters with d->lock held */ |
@@ -767,12 +741,12 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector | |||
767 | void | 741 | void |
768 | aoecmd_ata_rsp(struct sk_buff *skb) | 742 | aoecmd_ata_rsp(struct sk_buff *skb) |
769 | { | 743 | { |
744 | struct sk_buff_head queue; | ||
770 | struct aoedev *d; | 745 | struct aoedev *d; |
771 | struct aoe_hdr *hin, *hout; | 746 | struct aoe_hdr *hin, *hout; |
772 | struct aoe_atahdr *ahin, *ahout; | 747 | struct aoe_atahdr *ahin, *ahout; |
773 | struct frame *f; | 748 | struct frame *f; |
774 | struct buf *buf; | 749 | struct buf *buf; |
775 | struct sk_buff *sl; | ||
776 | struct aoetgt *t; | 750 | struct aoetgt *t; |
777 | struct aoeif *ifp; | 751 | struct aoeif *ifp; |
778 | register long n; | 752 | register long n; |
@@ -893,21 +867,21 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
893 | 867 | ||
894 | aoecmd_work(d); | 868 | aoecmd_work(d); |
895 | xmit: | 869 | xmit: |
896 | sl = d->sendq_hd; | 870 | __skb_queue_head_init(&queue); |
897 | d->sendq_hd = d->sendq_tl = NULL; | 871 | skb_queue_splice_init(&d->sendq, &queue); |
898 | 872 | ||
899 | spin_unlock_irqrestore(&d->lock, flags); | 873 | spin_unlock_irqrestore(&d->lock, flags); |
900 | aoenet_xmit(sl); | 874 | aoenet_xmit(&queue); |
901 | } | 875 | } |
902 | 876 | ||
903 | void | 877 | void |
904 | aoecmd_cfg(ushort aoemajor, unsigned char aoeminor) | 878 | aoecmd_cfg(ushort aoemajor, unsigned char aoeminor) |
905 | { | 879 | { |
906 | struct sk_buff *sl; | 880 | struct sk_buff_head queue; |
907 | |||
908 | sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL); | ||
909 | 881 | ||
910 | aoenet_xmit(sl); | 882 | __skb_queue_head_init(&queue); |
883 | aoecmd_cfg_pkts(aoemajor, aoeminor, &queue); | ||
884 | aoenet_xmit(&queue); | ||
911 | } | 885 | } |
912 | 886 | ||
913 | struct sk_buff * | 887 | struct sk_buff * |
@@ -1076,7 +1050,12 @@ aoecmd_cfg_rsp(struct sk_buff *skb) | |||
1076 | 1050 | ||
1077 | spin_unlock_irqrestore(&d->lock, flags); | 1051 | spin_unlock_irqrestore(&d->lock, flags); |
1078 | 1052 | ||
1079 | aoenet_xmit(sl); | 1053 | if (sl) { |
1054 | struct sk_buff_head queue; | ||
1055 | __skb_queue_head_init(&queue); | ||
1056 | __skb_queue_tail(&queue, sl); | ||
1057 | aoenet_xmit(&queue); | ||
1058 | } | ||
1080 | } | 1059 | } |
1081 | 1060 | ||
1082 | void | 1061 | void |