aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/aoe/aoecmd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/aoe/aoecmd.c')
-rw-r--r--drivers/block/aoe/aoecmd.c104
1 files changed, 44 insertions, 60 deletions
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 2f1746295d0..71ff78c9e4d 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -114,29 +114,22 @@ ifrotate(struct aoetgt *t)
114static void 114static void
115skb_pool_put(struct aoedev *d, struct sk_buff *skb) 115skb_pool_put(struct aoedev *d, struct sk_buff *skb)
116{ 116{
117 if (!d->skbpool_hd) 117 __skb_queue_tail(&d->skbpool, skb);
118 d->skbpool_hd = skb;
119 else
120 d->skbpool_tl->next = skb;
121 d->skbpool_tl = skb;
122} 118}
123 119
124static struct sk_buff * 120static struct sk_buff *
125skb_pool_get(struct aoedev *d) 121skb_pool_get(struct aoedev *d)
126{ 122{
127 struct sk_buff *skb; 123 struct sk_buff *skb = skb_peek(&d->skbpool);
128 124
129 skb = d->skbpool_hd;
130 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) { 125 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
131 d->skbpool_hd = skb->next; 126 __skb_unlink(skb, &d->skbpool);
132 skb->next = NULL;
133 return skb; 127 return skb;
134 } 128 }
135 if (d->nskbpool < NSKBPOOLMAX 129 if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
136 && (skb = new_skb(ETH_ZLEN))) { 130 (skb = new_skb(ETH_ZLEN)))
137 d->nskbpool++;
138 return skb; 131 return skb;
139 } 132
140 return NULL; 133 return NULL;
141} 134}
142 135
@@ -293,29 +286,22 @@ aoecmd_ata_rw(struct aoedev *d)
293 286
294 skb->dev = t->ifp->nd; 287 skb->dev = t->ifp->nd;
295 skb = skb_clone(skb, GFP_ATOMIC); 288 skb = skb_clone(skb, GFP_ATOMIC);
296 if (skb) { 289 if (skb)
297 if (d->sendq_hd) 290 __skb_queue_tail(&d->sendq, skb);
298 d->sendq_tl->next = skb;
299 else
300 d->sendq_hd = skb;
301 d->sendq_tl = skb;
302 }
303 return 1; 291 return 1;
304} 292}
305 293
306/* some callers cannot sleep, and they can call this function, 294/* some callers cannot sleep, and they can call this function,
307 * transmitting the packets later, when interrupts are on 295 * transmitting the packets later, when interrupts are on
308 */ 296 */
309static struct sk_buff * 297static void
310aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail) 298aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
311{ 299{
312 struct aoe_hdr *h; 300 struct aoe_hdr *h;
313 struct aoe_cfghdr *ch; 301 struct aoe_cfghdr *ch;
314 struct sk_buff *skb, *sl, *sl_tail; 302 struct sk_buff *skb;
315 struct net_device *ifp; 303 struct net_device *ifp;
316 304
317 sl = sl_tail = NULL;
318
319 read_lock(&dev_base_lock); 305 read_lock(&dev_base_lock);
320 for_each_netdev(&init_net, ifp) { 306 for_each_netdev(&init_net, ifp) {
321 dev_hold(ifp); 307 dev_hold(ifp);
@@ -329,8 +315,7 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
329 } 315 }
330 skb_put(skb, sizeof *h + sizeof *ch); 316 skb_put(skb, sizeof *h + sizeof *ch);
331 skb->dev = ifp; 317 skb->dev = ifp;
332 if (sl_tail == NULL) 318 __skb_queue_tail(queue, skb);
333 sl_tail = skb;
334 h = (struct aoe_hdr *) skb_mac_header(skb); 319 h = (struct aoe_hdr *) skb_mac_header(skb);
335 memset(h, 0, sizeof *h + sizeof *ch); 320 memset(h, 0, sizeof *h + sizeof *ch);
336 321
@@ -342,16 +327,10 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
342 h->minor = aoeminor; 327 h->minor = aoeminor;
343 h->cmd = AOECMD_CFG; 328 h->cmd = AOECMD_CFG;
344 329
345 skb->next = sl;
346 sl = skb;
347cont: 330cont:
348 dev_put(ifp); 331 dev_put(ifp);
349 } 332 }
350 read_unlock(&dev_base_lock); 333 read_unlock(&dev_base_lock);
351
352 if (tail != NULL)
353 *tail = sl_tail;
354 return sl;
355} 334}
356 335
357static void 336static void
@@ -406,11 +385,7 @@ resend(struct aoedev *d, struct aoetgt *t, struct frame *f)
406 skb = skb_clone(skb, GFP_ATOMIC); 385 skb = skb_clone(skb, GFP_ATOMIC);
407 if (skb == NULL) 386 if (skb == NULL)
408 return; 387 return;
409 if (d->sendq_hd) 388 __skb_queue_tail(&d->sendq, skb);
410 d->sendq_tl->next = skb;
411 else
412 d->sendq_hd = skb;
413 d->sendq_tl = skb;
414} 389}
415 390
416static int 391static int
@@ -508,16 +483,15 @@ ata_scnt(unsigned char *packet) {
508static void 483static void
509rexmit_timer(ulong vp) 484rexmit_timer(ulong vp)
510{ 485{
486 struct sk_buff_head queue;
511 struct aoedev *d; 487 struct aoedev *d;
512 struct aoetgt *t, **tt, **te; 488 struct aoetgt *t, **tt, **te;
513 struct aoeif *ifp; 489 struct aoeif *ifp;
514 struct frame *f, *e; 490 struct frame *f, *e;
515 struct sk_buff *sl;
516 register long timeout; 491 register long timeout;
517 ulong flags, n; 492 ulong flags, n;
518 493
519 d = (struct aoedev *) vp; 494 d = (struct aoedev *) vp;
520 sl = NULL;
521 495
522 /* timeout is always ~150% of the moving average */ 496 /* timeout is always ~150% of the moving average */
523 timeout = d->rttavg; 497 timeout = d->rttavg;
@@ -589,7 +563,7 @@ rexmit_timer(ulong vp)
589 } 563 }
590 } 564 }
591 565
592 if (d->sendq_hd) { 566 if (!skb_queue_empty(&d->sendq)) {
593 n = d->rttavg <<= 1; 567 n = d->rttavg <<= 1;
594 if (n > MAXTIMER) 568 if (n > MAXTIMER)
595 d->rttavg = MAXTIMER; 569 d->rttavg = MAXTIMER;
@@ -600,15 +574,15 @@ rexmit_timer(ulong vp)
600 aoecmd_work(d); 574 aoecmd_work(d);
601 } 575 }
602 576
603 sl = d->sendq_hd; 577 __skb_queue_head_init(&queue);
604 d->sendq_hd = d->sendq_tl = NULL; 578 skb_queue_splice_init(&d->sendq, &queue);
605 579
606 d->timer.expires = jiffies + TIMERTICK; 580 d->timer.expires = jiffies + TIMERTICK;
607 add_timer(&d->timer); 581 add_timer(&d->timer);
608 582
609 spin_unlock_irqrestore(&d->lock, flags); 583 spin_unlock_irqrestore(&d->lock, flags);
610 584
611 aoenet_xmit(sl); 585 aoenet_xmit(&queue);
612} 586}
613 587
614/* enters with d->lock held */ 588/* enters with d->lock held */
@@ -645,7 +619,7 @@ aoecmd_sleepwork(struct work_struct *work)
645 unsigned long flags; 619 unsigned long flags;
646 u64 ssize; 620 u64 ssize;
647 621
648 ssize = d->gd->capacity; 622 ssize = get_capacity(d->gd);
649 bd = bdget_disk(d->gd, 0); 623 bd = bdget_disk(d->gd, 0);
650 624
651 if (bd) { 625 if (bd) {
@@ -707,7 +681,7 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
707 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE)) 681 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
708 return; 682 return;
709 if (d->gd != NULL) { 683 if (d->gd != NULL) {
710 d->gd->capacity = ssize; 684 set_capacity(d->gd, ssize);
711 d->flags |= DEVFL_NEWSIZE; 685 d->flags |= DEVFL_NEWSIZE;
712 } else 686 } else
713 d->flags |= DEVFL_GDALLOC; 687 d->flags |= DEVFL_GDALLOC;
@@ -756,23 +730,28 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
756 unsigned long n_sect = bio->bi_size >> 9; 730 unsigned long n_sect = bio->bi_size >> 9;
757 const int rw = bio_data_dir(bio); 731 const int rw = bio_data_dir(bio);
758 struct hd_struct *part; 732 struct hd_struct *part;
733 int cpu;
734
735 cpu = part_stat_lock();
736 part = disk_map_sector_rcu(disk, sector);
737
738 part_stat_inc(cpu, part, ios[rw]);
739 part_stat_add(cpu, part, ticks[rw], duration);
740 part_stat_add(cpu, part, sectors[rw], n_sect);
741 part_stat_add(cpu, part, io_ticks, duration);
759 742
760 part = get_part(disk, sector); 743 part_stat_unlock();
761 all_stat_inc(disk, part, ios[rw], sector);
762 all_stat_add(disk, part, ticks[rw], duration, sector);
763 all_stat_add(disk, part, sectors[rw], n_sect, sector);
764 all_stat_add(disk, part, io_ticks, duration, sector);
765} 744}
766 745
767void 746void
768aoecmd_ata_rsp(struct sk_buff *skb) 747aoecmd_ata_rsp(struct sk_buff *skb)
769{ 748{
749 struct sk_buff_head queue;
770 struct aoedev *d; 750 struct aoedev *d;
771 struct aoe_hdr *hin, *hout; 751 struct aoe_hdr *hin, *hout;
772 struct aoe_atahdr *ahin, *ahout; 752 struct aoe_atahdr *ahin, *ahout;
773 struct frame *f; 753 struct frame *f;
774 struct buf *buf; 754 struct buf *buf;
775 struct sk_buff *sl;
776 struct aoetgt *t; 755 struct aoetgt *t;
777 struct aoeif *ifp; 756 struct aoeif *ifp;
778 register long n; 757 register long n;
@@ -893,21 +872,21 @@ aoecmd_ata_rsp(struct sk_buff *skb)
893 872
894 aoecmd_work(d); 873 aoecmd_work(d);
895xmit: 874xmit:
896 sl = d->sendq_hd; 875 __skb_queue_head_init(&queue);
897 d->sendq_hd = d->sendq_tl = NULL; 876 skb_queue_splice_init(&d->sendq, &queue);
898 877
899 spin_unlock_irqrestore(&d->lock, flags); 878 spin_unlock_irqrestore(&d->lock, flags);
900 aoenet_xmit(sl); 879 aoenet_xmit(&queue);
901} 880}
902 881
903void 882void
904aoecmd_cfg(ushort aoemajor, unsigned char aoeminor) 883aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
905{ 884{
906 struct sk_buff *sl; 885 struct sk_buff_head queue;
907 886
908 sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL); 887 __skb_queue_head_init(&queue);
909 888 aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
910 aoenet_xmit(sl); 889 aoenet_xmit(&queue);
911} 890}
912 891
913struct sk_buff * 892struct sk_buff *
@@ -1076,7 +1055,12 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
1076 1055
1077 spin_unlock_irqrestore(&d->lock, flags); 1056 spin_unlock_irqrestore(&d->lock, flags);
1078 1057
1079 aoenet_xmit(sl); 1058 if (sl) {
1059 struct sk_buff_head queue;
1060 __skb_queue_head_init(&queue);
1061 __skb_queue_tail(&queue, sl);
1062 aoenet_xmit(&queue);
1063 }
1080} 1064}
1081 1065
1082void 1066void