diff options
Diffstat (limited to 'drivers/block/aoe/aoecmd.c')
-rw-r--r-- | drivers/block/aoe/aoecmd.c | 177 |
1 files changed, 119 insertions, 58 deletions
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 4ab01ce5cf36..150eb78cd5a9 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
9 | #include <linux/skbuff.h> | 9 | #include <linux/skbuff.h> |
10 | #include <linux/netdevice.h> | 10 | #include <linux/netdevice.h> |
11 | #include <linux/genhd.h> | ||
11 | #include <asm/unaligned.h> | 12 | #include <asm/unaligned.h> |
12 | #include "aoe.h" | 13 | #include "aoe.h" |
13 | 14 | ||
@@ -189,12 +190,67 @@ aoecmd_ata_rw(struct aoedev *d, struct frame *f) | |||
189 | } | 190 | } |
190 | } | 191 | } |
191 | 192 | ||
193 | /* some callers cannot sleep, and they can call this function, | ||
194 | * transmitting the packets later, when interrupts are on | ||
195 | */ | ||
196 | static struct sk_buff * | ||
197 | aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail) | ||
198 | { | ||
199 | struct aoe_hdr *h; | ||
200 | struct aoe_cfghdr *ch; | ||
201 | struct sk_buff *skb, *sl, *sl_tail; | ||
202 | struct net_device *ifp; | ||
203 | |||
204 | sl = sl_tail = NULL; | ||
205 | |||
206 | read_lock(&dev_base_lock); | ||
207 | for (ifp = dev_base; ifp; dev_put(ifp), ifp = ifp->next) { | ||
208 | dev_hold(ifp); | ||
209 | if (!is_aoe_netif(ifp)) | ||
210 | continue; | ||
211 | |||
212 | skb = new_skb(ifp, sizeof *h + sizeof *ch); | ||
213 | if (skb == NULL) { | ||
214 | printk(KERN_INFO "aoe: aoecmd_cfg: skb alloc failure\n"); | ||
215 | continue; | ||
216 | } | ||
217 | if (sl_tail == NULL) | ||
218 | sl_tail = skb; | ||
219 | h = (struct aoe_hdr *) skb->mac.raw; | ||
220 | memset(h, 0, sizeof *h + sizeof *ch); | ||
221 | |||
222 | memset(h->dst, 0xff, sizeof h->dst); | ||
223 | memcpy(h->src, ifp->dev_addr, sizeof h->src); | ||
224 | h->type = __constant_cpu_to_be16(ETH_P_AOE); | ||
225 | h->verfl = AOE_HVER; | ||
226 | h->major = cpu_to_be16(aoemajor); | ||
227 | h->minor = aoeminor; | ||
228 | h->cmd = AOECMD_CFG; | ||
229 | |||
230 | skb->next = sl; | ||
231 | sl = skb; | ||
232 | } | ||
233 | read_unlock(&dev_base_lock); | ||
234 | |||
235 | if (tail != NULL) | ||
236 | *tail = sl_tail; | ||
237 | return sl; | ||
238 | } | ||
239 | |||
192 | /* enters with d->lock held */ | 240 | /* enters with d->lock held */ |
193 | void | 241 | void |
194 | aoecmd_work(struct aoedev *d) | 242 | aoecmd_work(struct aoedev *d) |
195 | { | 243 | { |
196 | struct frame *f; | 244 | struct frame *f; |
197 | struct buf *buf; | 245 | struct buf *buf; |
246 | |||
247 | if (d->flags & DEVFL_PAUSE) { | ||
248 | if (!aoedev_isbusy(d)) | ||
249 | d->sendq_hd = aoecmd_cfg_pkts(d->aoemajor, | ||
250 | d->aoeminor, &d->sendq_tl); | ||
251 | return; | ||
252 | } | ||
253 | |||
198 | loop: | 254 | loop: |
199 | f = getframe(d, FREETAG); | 255 | f = getframe(d, FREETAG); |
200 | if (f == NULL) | 256 | if (f == NULL) |
@@ -306,6 +362,37 @@ tdie: spin_unlock_irqrestore(&d->lock, flags); | |||
306 | aoenet_xmit(sl); | 362 | aoenet_xmit(sl); |
307 | } | 363 | } |
308 | 364 | ||
365 | /* this function performs work that has been deferred until sleeping is OK | ||
366 | */ | ||
367 | void | ||
368 | aoecmd_sleepwork(void *vp) | ||
369 | { | ||
370 | struct aoedev *d = (struct aoedev *) vp; | ||
371 | |||
372 | if (d->flags & DEVFL_GDALLOC) | ||
373 | aoeblk_gdalloc(d); | ||
374 | |||
375 | if (d->flags & DEVFL_NEWSIZE) { | ||
376 | struct block_device *bd; | ||
377 | unsigned long flags; | ||
378 | u64 ssize; | ||
379 | |||
380 | ssize = d->gd->capacity; | ||
381 | bd = bdget_disk(d->gd, 0); | ||
382 | |||
383 | if (bd) { | ||
384 | mutex_lock(&bd->bd_inode->i_mutex); | ||
385 | i_size_write(bd->bd_inode, (loff_t)ssize<<9); | ||
386 | mutex_unlock(&bd->bd_inode->i_mutex); | ||
387 | bdput(bd); | ||
388 | } | ||
389 | spin_lock_irqsave(&d->lock, flags); | ||
390 | d->flags |= DEVFL_UP; | ||
391 | d->flags &= ~DEVFL_NEWSIZE; | ||
392 | spin_unlock_irqrestore(&d->lock, flags); | ||
393 | } | ||
394 | } | ||
395 | |||
309 | static void | 396 | static void |
310 | ataid_complete(struct aoedev *d, unsigned char *id) | 397 | ataid_complete(struct aoedev *d, unsigned char *id) |
311 | { | 398 | { |
@@ -340,21 +427,29 @@ ataid_complete(struct aoedev *d, unsigned char *id) | |||
340 | d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1])); | 427 | d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1])); |
341 | d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1])); | 428 | d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1])); |
342 | } | 429 | } |
430 | |||
431 | if (d->ssize != ssize) | ||
432 | printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu " | ||
433 | "sectors\n", (unsigned long long)mac_addr(d->addr), | ||
434 | d->aoemajor, d->aoeminor, | ||
435 | d->fw_ver, (long long)ssize); | ||
343 | d->ssize = ssize; | 436 | d->ssize = ssize; |
344 | d->geo.start = 0; | 437 | d->geo.start = 0; |
345 | if (d->gd != NULL) { | 438 | if (d->gd != NULL) { |
346 | d->gd->capacity = ssize; | 439 | d->gd->capacity = ssize; |
347 | d->flags |= DEVFL_UP; | 440 | d->flags |= DEVFL_NEWSIZE; |
348 | return; | 441 | } else { |
349 | } | 442 | if (d->flags & DEVFL_GDALLOC) { |
350 | if (d->flags & DEVFL_WORKON) { | 443 | printk(KERN_INFO "aoe: %s: %s e%lu.%lu, %s\n", |
351 | printk(KERN_INFO "aoe: ataid_complete: can't schedule work, it's already on! " | 444 | __FUNCTION__, |
352 | "(This really shouldn't happen).\n"); | 445 | "can't schedule work for", |
353 | return; | 446 | d->aoemajor, d->aoeminor, |
447 | "it's already on! (This really shouldn't happen).\n"); | ||
448 | return; | ||
449 | } | ||
450 | d->flags |= DEVFL_GDALLOC; | ||
354 | } | 451 | } |
355 | INIT_WORK(&d->work, aoeblk_gdalloc, d); | ||
356 | schedule_work(&d->work); | 452 | schedule_work(&d->work); |
357 | d->flags |= DEVFL_WORKON; | ||
358 | } | 453 | } |
359 | 454 | ||
360 | static void | 455 | static void |
@@ -452,7 +547,7 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
452 | return; | 547 | return; |
453 | } | 548 | } |
454 | ataid_complete(d, (char *) (ahin+1)); | 549 | ataid_complete(d, (char *) (ahin+1)); |
455 | /* d->flags |= DEVFL_WC_UPDATE; */ | 550 | d->flags &= ~DEVFL_PAUSE; |
456 | break; | 551 | break; |
457 | default: | 552 | default: |
458 | printk(KERN_INFO "aoe: aoecmd_ata_rsp: unrecognized " | 553 | printk(KERN_INFO "aoe: aoecmd_ata_rsp: unrecognized " |
@@ -485,51 +580,19 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
485 | f->tag = FREETAG; | 580 | f->tag = FREETAG; |
486 | 581 | ||
487 | aoecmd_work(d); | 582 | aoecmd_work(d); |
488 | |||
489 | sl = d->sendq_hd; | 583 | sl = d->sendq_hd; |
490 | d->sendq_hd = d->sendq_tl = NULL; | 584 | d->sendq_hd = d->sendq_tl = NULL; |
491 | 585 | ||
492 | spin_unlock_irqrestore(&d->lock, flags); | 586 | spin_unlock_irqrestore(&d->lock, flags); |
493 | |||
494 | aoenet_xmit(sl); | 587 | aoenet_xmit(sl); |
495 | } | 588 | } |
496 | 589 | ||
497 | void | 590 | void |
498 | aoecmd_cfg(ushort aoemajor, unsigned char aoeminor) | 591 | aoecmd_cfg(ushort aoemajor, unsigned char aoeminor) |
499 | { | 592 | { |
500 | struct aoe_hdr *h; | 593 | struct sk_buff *sl; |
501 | struct aoe_cfghdr *ch; | ||
502 | struct sk_buff *skb, *sl; | ||
503 | struct net_device *ifp; | ||
504 | |||
505 | sl = NULL; | ||
506 | |||
507 | read_lock(&dev_base_lock); | ||
508 | for (ifp = dev_base; ifp; dev_put(ifp), ifp = ifp->next) { | ||
509 | dev_hold(ifp); | ||
510 | if (!is_aoe_netif(ifp)) | ||
511 | continue; | ||
512 | |||
513 | skb = new_skb(ifp, sizeof *h + sizeof *ch); | ||
514 | if (skb == NULL) { | ||
515 | printk(KERN_INFO "aoe: aoecmd_cfg: skb alloc failure\n"); | ||
516 | continue; | ||
517 | } | ||
518 | h = (struct aoe_hdr *) skb->mac.raw; | ||
519 | memset(h, 0, sizeof *h + sizeof *ch); | ||
520 | |||
521 | memset(h->dst, 0xff, sizeof h->dst); | ||
522 | memcpy(h->src, ifp->dev_addr, sizeof h->src); | ||
523 | h->type = __constant_cpu_to_be16(ETH_P_AOE); | ||
524 | h->verfl = AOE_HVER; | ||
525 | h->major = cpu_to_be16(aoemajor); | ||
526 | h->minor = aoeminor; | ||
527 | h->cmd = AOECMD_CFG; | ||
528 | 594 | ||
529 | skb->next = sl; | 595 | sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL); |
530 | sl = skb; | ||
531 | } | ||
532 | read_unlock(&dev_base_lock); | ||
533 | 596 | ||
534 | aoenet_xmit(sl); | 597 | aoenet_xmit(sl); |
535 | } | 598 | } |
@@ -562,9 +625,6 @@ aoecmd_ata_id(struct aoedev *d) | |||
562 | f->waited = 0; | 625 | f->waited = 0; |
563 | f->writedatalen = 0; | 626 | f->writedatalen = 0; |
564 | 627 | ||
565 | /* this message initializes the device, so we reset the rttavg */ | ||
566 | d->rttavg = MAXTIMER; | ||
567 | |||
568 | /* set up ata header */ | 628 | /* set up ata header */ |
569 | ah->scnt = 1; | 629 | ah->scnt = 1; |
570 | ah->cmdstat = WIN_IDENTIFY; | 630 | ah->cmdstat = WIN_IDENTIFY; |
@@ -572,12 +632,8 @@ aoecmd_ata_id(struct aoedev *d) | |||
572 | 632 | ||
573 | skb = skb_prepare(d, f); | 633 | skb = skb_prepare(d, f); |
574 | 634 | ||
575 | /* we now want to start the rexmit tracking */ | 635 | d->rttavg = MAXTIMER; |
576 | d->flags &= ~DEVFL_TKILL; | ||
577 | d->timer.data = (ulong) d; | ||
578 | d->timer.function = rexmit_timer; | 636 | d->timer.function = rexmit_timer; |
579 | d->timer.expires = jiffies + TIMERTICK; | ||
580 | add_timer(&d->timer); | ||
581 | 637 | ||
582 | return skb; | 638 | return skb; |
583 | } | 639 | } |
@@ -619,23 +675,28 @@ aoecmd_cfg_rsp(struct sk_buff *skb) | |||
619 | if (bufcnt > MAXFRAMES) /* keep it reasonable */ | 675 | if (bufcnt > MAXFRAMES) /* keep it reasonable */ |
620 | bufcnt = MAXFRAMES; | 676 | bufcnt = MAXFRAMES; |
621 | 677 | ||
622 | d = aoedev_set(sysminor, h->src, skb->dev, bufcnt); | 678 | d = aoedev_by_sysminor_m(sysminor, bufcnt); |
623 | if (d == NULL) { | 679 | if (d == NULL) { |
624 | printk(KERN_INFO "aoe: aoecmd_cfg_rsp: device set failure\n"); | 680 | printk(KERN_INFO "aoe: aoecmd_cfg_rsp: device sysminor_m failure\n"); |
625 | return; | 681 | return; |
626 | } | 682 | } |
627 | 683 | ||
628 | spin_lock_irqsave(&d->lock, flags); | 684 | spin_lock_irqsave(&d->lock, flags); |
629 | 685 | ||
630 | if (d->flags & (DEVFL_UP | DEVFL_CLOSEWAIT)) { | 686 | /* permit device to migrate mac and network interface */ |
687 | d->ifp = skb->dev; | ||
688 | memcpy(d->addr, h->src, sizeof d->addr); | ||
689 | |||
690 | /* don't change users' perspective */ | ||
691 | if (d->nopen && !(d->flags & DEVFL_PAUSE)) { | ||
631 | spin_unlock_irqrestore(&d->lock, flags); | 692 | spin_unlock_irqrestore(&d->lock, flags); |
632 | return; | 693 | return; |
633 | } | 694 | } |
634 | 695 | d->flags |= DEVFL_PAUSE; /* force pause */ | |
635 | d->fw_ver = be16_to_cpu(ch->fwver); | 696 | d->fw_ver = be16_to_cpu(ch->fwver); |
636 | 697 | ||
637 | /* we get here only if the device is new */ | 698 | /* check for already outstanding ataid */ |
638 | sl = aoecmd_ata_id(d); | 699 | sl = aoedev_isbusy(d) == 0 ? aoecmd_ata_id(d) : NULL; |
639 | 700 | ||
640 | spin_unlock_irqrestore(&d->lock, flags); | 701 | spin_unlock_irqrestore(&d->lock, flags); |
641 | 702 | ||