aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
authorJames Smart <James.Smart@Emulex.Com>2007-06-17 20:56:38 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-06-17 23:27:39 -0400
commit92d7f7b0cde3ad2260e7462b40867b57efd49851 (patch)
treefadb1d8f1a817c2f85937b5e9c3b830bdecb5555 /drivers/scsi/lpfc/lpfc_sli.c
parented957684294618602b48f1950b0c9bbcb036583f (diff)
[SCSI] lpfc: NPIV: add NPIV support on top of SLI-3
NPIV support is added to the driver. It utilizes the interfaces of the fc transport for the creation and deletion of vports. Within the driver, a new Scsi_Host is created for each NPIV instance, and is paired with a new instance of a FC port. This allows N FC Port elements to share a single Adapter. Signed-off-by: James Smart <James.Smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1034
1 files changed, 590 insertions, 444 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 6e0b42bcebe7..a2927dc3161f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -44,14 +44,15 @@
44 * This allows multiple uses of lpfc_msgBlk0311 44 * This allows multiple uses of lpfc_msgBlk0311
45 * w/o perturbing log msg utility. 45 * w/o perturbing log msg utility.
46 */ 46 */
47#define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \ 47#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
48 lpfc_printf_log(phba, \ 48 lpfc_printf_log(phba, \
49 KERN_INFO, \ 49 KERN_INFO, \
50 LOG_MBOX | LOG_SLI, \ 50 LOG_MBOX | LOG_SLI, \
51 "%d:0311 Mailbox command x%x cannot issue " \ 51 "%d (%d):0311 Mailbox command x%x cannot " \
52 "Data: x%x x%x x%x\n", \ 52 "issue Data: x%x x%x x%x\n", \
53 phba->brd_no, \ 53 phba->brd_no, \
54 mb->mbxCommand, \ 54 pmbox->vport ? pmbox->vport->vpi : 0, \
55 pmbox->mb.mbxCommand, \
55 phba->pport->port_state, \ 56 phba->pport->port_state, \
56 psli->sli_flag, \ 57 psli->sli_flag, \
57 flag) 58 flag)
@@ -65,11 +66,10 @@ typedef enum _lpfc_iocb_type {
65 LPFC_ABORT_IOCB 66 LPFC_ABORT_IOCB
66} lpfc_iocb_type; 67} lpfc_iocb_type;
67 68
68/* 69 /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer
69 * SLI-2/SLI-3 provide different sized iocbs. Given a pointer to the start of 70 * to the start of the ring, and the slot number of the
70 * the ring, and the slot number of the desired iocb entry, calc a pointer to 71 * desired iocb entry, calc a pointer to that entry.
71 * that entry. 72 */
72 */
73static inline IOCB_t * 73static inline IOCB_t *
74lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 74lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
75{ 75{
@@ -229,13 +229,11 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
229 lpfc_config_ring(phba, i, pmb); 229 lpfc_config_ring(phba, i, pmb);
230 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 230 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
231 if (rc != MBX_SUCCESS) { 231 if (rc != MBX_SUCCESS) {
232 lpfc_printf_log(phba, 232 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
233 KERN_ERR, 233 "%d:0446 Adapter failed to init (%d), "
234 LOG_INIT,
235 "%d:0446 Adapter failed to init, "
236 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 234 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
237 "ring %d\n", 235 "ring %d\n",
238 phba->brd_no, 236 phba->brd_no, rc,
239 pmbox->mbxCommand, 237 pmbox->mbxCommand,
240 pmbox->mbxStatus, 238 pmbox->mbxStatus,
241 i); 239 i);
@@ -254,9 +252,16 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
254{ 252{
255 list_add_tail(&piocb->list, &pring->txcmplq); 253 list_add_tail(&piocb->list, &pring->txcmplq);
256 pring->txcmplq_cnt++; 254 pring->txcmplq_cnt++;
257 if (unlikely(pring->ringno == LPFC_ELS_RING)) 255 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
258 mod_timer(&piocb->vport->els_tmofunc, 256 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
259 jiffies + HZ * (phba->fc_ratov << 1)); 257 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
258 if (!piocb->vport)
259 BUG();
260 else
261 mod_timer(&piocb->vport->els_tmofunc,
262 jiffies + HZ * (phba->fc_ratov << 1));
263 }
264
260 265
261 return 0; 266 return 0;
262} 267}
@@ -311,8 +316,10 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
311 */ 316 */
312 phba->work_ha |= HA_ERATT; 317 phba->work_ha |= HA_ERATT;
313 phba->work_hs = HS_FFER3; 318 phba->work_hs = HS_FFER3;
319
320 /* hbalock should already be held */
314 if (phba->work_wait) 321 if (phba->work_wait)
315 wake_up(phba->work_wait); 322 lpfc_worker_wake_up(phba);
316 323
317 return NULL; 324 return NULL;
318 } 325 }
@@ -399,7 +406,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
399 /* 406 /*
400 * Issue iocb command to adapter 407 * Issue iocb command to adapter
401 */ 408 */
402 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t)); 409 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
403 wmb(); 410 wmb();
404 pring->stats.iocb_cmd++; 411 pring->stats.iocb_cmd++;
405 412
@@ -520,14 +527,14 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
520 hbqp->next_hbqPutIdx = 0; 527 hbqp->next_hbqPutIdx = 0;
521 528
522 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 529 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
523 uint32_t raw_index = readl(&phba->hbq_get[hbqno]); 530 uint32_t raw_index = phba->hbq_get[hbqno];
524 uint32_t getidx = le32_to_cpu(raw_index); 531 uint32_t getidx = le32_to_cpu(raw_index);
525 532
526 hbqp->local_hbqGetIdx = getidx; 533 hbqp->local_hbqGetIdx = getidx;
527 534
528 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 535 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
529 lpfc_printf_log(phba, KERN_ERR, 536 lpfc_printf_log(phba, KERN_ERR,
530 LOG_SLI, 537 LOG_SLI | LOG_VPORT,
531 "%d:1802 HBQ %d: local_hbqGetIdx " 538 "%d:1802 HBQ %d: local_hbqGetIdx "
532 "%u is > than hbqp->entry_count %u\n", 539 "%u is > than hbqp->entry_count %u\n",
533 phba->brd_no, hbqno, 540 phba->brd_no, hbqno,
@@ -548,117 +555,121 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
548void 555void
549lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 556lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
550{ 557{
551 uint32_t i; 558 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
559 struct hbq_dmabuf *hbq_buf;
552 560
553 if (!phba->hbq_buffer_pool)
554 return;
555 /* Return all memory used by all HBQs */ 561 /* Return all memory used by all HBQs */
556 for (i = 0; i < phba->hbq_buffer_count; i++) { 562 list_for_each_entry_safe(dmabuf, next_dmabuf,
557 lpfc_hbq_free(phba, phba->hbq_buffer_pool[i].dbuf.virt, 563 &phba->hbq_buffer_list, list) {
558 phba->hbq_buffer_pool[i].dbuf.phys); 564 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
565 list_del(&hbq_buf->dbuf.list);
566 lpfc_hbq_free(phba, hbq_buf->dbuf.virt, hbq_buf->dbuf.phys);
567 kfree(hbq_buf);
559 } 568 }
560 kfree(phba->hbq_buffer_pool);
561 phba->hbq_buffer_pool = NULL;
562} 569}
563 570
564static void 571static void
565lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 572lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
566 struct hbq_dmabuf *hbq_buf_desc) 573 struct hbq_dmabuf *hbq_buf)
567{ 574{
568 struct lpfc_hbq_entry *hbqe; 575 struct lpfc_hbq_entry *hbqe;
576 dma_addr_t physaddr = hbq_buf->dbuf.phys;
569 577
570 /* Get next HBQ entry slot to use */ 578 /* Get next HBQ entry slot to use */
571 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 579 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
572 if (hbqe) { 580 if (hbqe) {
573 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 581 struct hbq_s *hbqp = &phba->hbqs[hbqno];
574 582
575 hbqe->bde.addrHigh = putPaddrHigh(hbq_buf_desc->dbuf.phys); 583 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
576 hbqe->bde.addrLow = putPaddrLow(hbq_buf_desc->dbuf.phys); 584 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
577 hbqe->bde.tus.f.bdeSize = FCELSSIZE; 585 hbqe->bde.tus.f.bdeSize = FCELSSIZE;
578 hbqe->bde.tus.f.bdeFlags = 0; 586 hbqe->bde.tus.f.bdeFlags = 0;
579 hbqe->buffer_tag = hbq_buf_desc->tag; 587 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
580 /* Sync SLIM */ 588 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
589 /* Sync SLIM */
581 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 590 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
582 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 591 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
583 /* flush */ 592 /* flush */
584 readl(phba->hbq_put + hbqno); 593 readl(phba->hbq_put + hbqno);
585 phba->hbq_buff_count++; 594 list_add_tail(&hbq_buf->dbuf.list, &phba->hbq_buffer_list);
586 } 595 }
587} 596}
588 597
589static void 598static struct lpfc_hbq_init lpfc_els_hbq = {
590lpfc_sli_fill_hbq(struct lpfc_hba *phba, uint32_t hbqno, uint32_t buffer_index) 599 .rn = 1,
591{ 600 .entry_count = 200,
592 struct hbq_dmabuf *hbq_buf_desc; 601 .mask_count = 0,
593 uint32_t i; 602 .profile = 0,
603 .ring_mask = 1 << LPFC_ELS_RING,
604 .buffer_count = 0,
605 .init_count = 20,
606 .add_count = 5,
607};
594 608
595 for (i = 0; i < phba->hbqs[hbqno].entry_count; i++) { 609static struct lpfc_hbq_init *lpfc_hbq_defs[] = {
596 /* Search hbqbufq, from the begining, 610 &lpfc_els_hbq,
597 * looking for an unused entry 611};
598 */
599 phba->hbq_buffer_pool[buffer_index + i].tag |= hbqno << 16;
600 hbq_buf_desc = phba->hbq_buffer_pool + buffer_index + i;
601 lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf_desc);
602 }
603}
604 612
605int 613int
606lpfc_sli_hbqbuf_fill_hbq(struct lpfc_hba *phba) 614lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
607{ 615{
608 return 0; 616 uint32_t i, start, end;
609} 617 struct hbq_dmabuf *hbq_buffer;
610 618
611static int 619 start = lpfc_hbq_defs[hbqno]->buffer_count;
612lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba) 620 end = count + lpfc_hbq_defs[hbqno]->buffer_count;
613{ 621 if (end > lpfc_hbq_defs[hbqno]->entry_count) {
614 uint32_t buffer_index = 0; 622 end = lpfc_hbq_defs[hbqno]->entry_count;
615 uint32_t hbqno; 623 }
616 624
617 /* Populate HBQ entries */ 625 /* Populate HBQ entries */
618 for (hbqno = 0; hbqno < phba->hbq_count; ++hbqno) { 626 for (i = start; i < end; i++) {
619 /* Find ring associated with HBQ */ 627 hbq_buffer = kmalloc(sizeof(struct hbq_dmabuf),
620 628 GFP_KERNEL);
621 lpfc_sli_fill_hbq(phba, hbqno, buffer_index); 629 if (!hbq_buffer)
622 buffer_index += phba->hbqs[hbqno].entry_count; 630 return 1;
631 hbq_buffer->dbuf.virt = lpfc_hbq_alloc(phba, MEM_PRI,
632 &hbq_buffer->dbuf.phys);
633 if (hbq_buffer->dbuf.virt == NULL)
634 return 1;
635 hbq_buffer->tag = (i | (hbqno << 16));
636 lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer);
637 lpfc_hbq_defs[hbqno]->buffer_count++;
623 } 638 }
624 return 0; 639 return 0;
625} 640}
626 641
627struct hbq_dmabuf * 642int
628lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 643lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
629{ 644{
630 if ((tag & 0xffff) < phba->hbq_buffer_count) 645 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
631 return phba->hbq_buffer_pool + (tag & 0xffff); 646 lpfc_hbq_defs[qno]->add_count));
647}
632 648
633 lpfc_printf_log(phba, KERN_ERR, 649int
634 LOG_SLI, 650lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
635 "%d:1803 Bad hbq tag. Data: x%x x%x\n", 651{
636 phba->brd_no, tag, 652 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
637 phba->hbq_buffer_count); 653 lpfc_hbq_defs[qno]->init_count));
638 return NULL;
639} 654}
640 655
641void 656struct hbq_dmabuf *
642lpfc_sli_hbqbuf_free(struct lpfc_hba *phba, void *virt, dma_addr_t phys) 657lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
643{ 658{
644 uint32_t i, hbqno; 659 struct lpfc_dmabuf *d_buf;
660 struct hbq_dmabuf *hbq_buf;
645 661
646 for (i = 0; i < phba->hbq_buffer_count; i++) { 662 list_for_each_entry(d_buf, &phba->hbq_buffer_list, list) {
647 /* Search hbqbufq, from the begining, looking for a match on 663 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
648 phys */ 664 if ((hbq_buf->tag & 0xffff) == tag) {
649 if (phba->hbq_buffer_pool[i].dbuf.phys == phys) { 665 return hbq_buf;
650 hbqno = phba->hbq_buffer_pool[i].tag >> 16;
651 lpfc_sli_hbq_to_firmware(phba, hbqno,
652 phba->hbq_buffer_pool + i);
653 return;
654 } 666 }
655 } 667 }
656 668 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
657 lpfc_printf_log(phba, KERN_ERR, 669 "%d:1803 Bad hbq tag. Data: x%x x%x\n",
658 LOG_SLI, 670 phba->brd_no, tag,
659 "%d:1804 Cannot find virtual addr for " 671 lpfc_hbq_defs[tag >> 16]->buffer_count);
660 "mapped buf. Data x%llx\n", 672 return NULL;
661 phba->brd_no, (unsigned long long) phys);
662} 673}
663 674
664void 675void
@@ -723,6 +734,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
723 case MBX_FLASH_WR_ULA: 734 case MBX_FLASH_WR_ULA:
724 case MBX_SET_DEBUG: 735 case MBX_SET_DEBUG:
725 case MBX_LOAD_EXP_ROM: 736 case MBX_LOAD_EXP_ROM:
737 case MBX_REG_VPI:
738 case MBX_UNREG_VPI:
726 ret = mbxCommand; 739 ret = mbxCommand;
727 break; 740 break;
728 default: 741 default:
@@ -770,8 +783,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
770 !pmb->mb.mbxStatus) { 783 !pmb->mb.mbxStatus) {
771 784
772 rpi = pmb->mb.un.varWords[0]; 785 rpi = pmb->mb.un.varWords[0];
773 lpfc_unreg_login(phba, rpi, pmb); 786 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
774 pmb->mbox_cmpl=lpfc_sli_def_mbox_cmpl; 787 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
775 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 788 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
776 if (rc != MBX_NOT_FINISHED) 789 if (rc != MBX_NOT_FINISHED)
777 return; 790 return;
@@ -784,60 +797,25 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
784int 797int
785lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 798lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
786{ 799{
787 MAILBOX_t *mbox, *pmbox; 800 MAILBOX_t *pmbox;
788 LPFC_MBOXQ_t *pmb; 801 LPFC_MBOXQ_t *pmb;
789 int i, rc; 802 int rc;
790 uint32_t process_next; 803 LIST_HEAD(cmplq);
791 unsigned long iflags;
792
793 /* We should only get here if we are in SLI2 mode */
794 if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) {
795 return 1;
796 }
797 804
798 phba->sli.slistat.mbox_event++; 805 phba->sli.slistat.mbox_event++;
799 806
800 /* Get a Mailbox buffer to setup mailbox commands for callback */ 807 /* Get all completed mailboxe buffers into the cmplq */
801 if ((pmb = phba->sli.mbox_active)) { 808 spin_lock_irq(&phba->hbalock);
802 pmbox = &pmb->mb; 809 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
803 mbox = &phba->slim2p->mbx; 810 spin_unlock_irq(&phba->hbalock);
804
805 /* First check out the status word */
806 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t));
807
808 /* Sanity check to ensure the host owns the mailbox */
809 if (pmbox->mbxOwner != OWN_HOST) {
810 /* Lets try for a while */
811 for (i = 0; i < 10240; i++) {
812 /* First copy command data */
813 lpfc_sli_pcimem_bcopy(mbox, pmbox,
814 sizeof (uint32_t));
815 if (pmbox->mbxOwner == OWN_HOST)
816 goto mbout;
817 }
818 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
819 <status> */
820 lpfc_printf_log(phba,
821 KERN_WARNING,
822 LOG_MBOX | LOG_SLI,
823 "%d:0304 Stray Mailbox Interrupt "
824 "mbxCommand x%x mbxStatus x%x\n",
825 phba->brd_no,
826 pmbox->mbxCommand,
827 pmbox->mbxStatus);
828
829 spin_lock_irq(&phba->hbalock);
830 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
831 spin_unlock_irq(&phba->hbalock);
832 return 1;
833 }
834 811
835 mbout: 812 /* Get a Mailbox buffer to setup mailbox commands for callback */
836 del_timer_sync(&phba->sli.mbox_tmo); 813 do {
814 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
815 if (pmb == NULL)
816 break;
837 817
838 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 818 pmbox = &pmb->mb;
839 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
840 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
841 819
842 /* 820 /*
843 * It is a fatal error if unknown mbox command completion. 821 * It is a fatal error if unknown mbox command completion.
@@ -846,33 +824,33 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
846 MBX_SHUTDOWN) { 824 MBX_SHUTDOWN) {
847 825
848 /* Unknow mailbox command compl */ 826 /* Unknow mailbox command compl */
849 lpfc_printf_log(phba, 827 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
850 KERN_ERR, 828 "%d (%d):0323 Unknown Mailbox command "
851 LOG_MBOX | LOG_SLI, 829 "%x Cmpl\n",
852 "%d:0323 Unknown Mailbox command %x Cmpl\n", 830 phba->brd_no,
853 phba->brd_no, 831 pmb->vport ? pmb->vport->vpi : 0,
854 pmbox->mbxCommand); 832 pmbox->mbxCommand);
855 phba->link_state = LPFC_HBA_ERROR; 833 phba->link_state = LPFC_HBA_ERROR;
856 phba->work_hs = HS_FFER3; 834 phba->work_hs = HS_FFER3;
857 lpfc_handle_eratt(phba); 835 lpfc_handle_eratt(phba);
858 return 0; 836 continue;
859 } 837 }
860 838
861 phba->sli.mbox_active = NULL;
862 if (pmbox->mbxStatus) { 839 if (pmbox->mbxStatus) {
863 phba->sli.slistat.mbox_stat_err++; 840 phba->sli.slistat.mbox_stat_err++;
864 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 841 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
865 /* Mbox cmd cmpl error - RETRYing */ 842 /* Mbox cmd cmpl error - RETRYing */
866 lpfc_printf_log(phba, 843 lpfc_printf_log(phba, KERN_INFO,
867 KERN_INFO, 844 LOG_MBOX | LOG_SLI,
868 LOG_MBOX | LOG_SLI, 845 "%d (%d):0305 Mbox cmd cmpl "
869 "%d:0305 Mbox cmd cmpl error - " 846 "error - RETRYing Data: x%x "
870 "RETRYing Data: x%x x%x x%x x%x\n", 847 "x%x x%x x%x\n",
871 phba->brd_no, 848 phba->brd_no,
872 pmbox->mbxCommand, 849 pmb->vport ? pmb->vport->vpi :0,
873 pmbox->mbxStatus, 850 pmbox->mbxCommand,
874 pmbox->un.varWords[0], 851 pmbox->mbxStatus,
875 phba->pport->port_state); 852 pmbox->un.varWords[0],
853 pmb->vport->port_state);
876 pmbox->mbxStatus = 0; 854 pmbox->mbxStatus = 0;
877 pmbox->mbxOwner = OWN_HOST; 855 pmbox->mbxOwner = OWN_HOST;
878 spin_lock_irq(&phba->hbalock); 856 spin_lock_irq(&phba->hbalock);
@@ -880,17 +858,16 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
880 spin_unlock_irq(&phba->hbalock); 858 spin_unlock_irq(&phba->hbalock);
881 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 859 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
882 if (rc == MBX_SUCCESS) 860 if (rc == MBX_SUCCESS)
883 return 0; 861 continue;
884 } 862 }
885 } 863 }
886 864
887 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 865 /* Mailbox cmd <cmd> Cmpl <cmpl> */
888 lpfc_printf_log(phba, 866 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
889 KERN_INFO, 867 "%d (%d):0307 Mailbox cmd x%x Cmpl x%p "
890 LOG_MBOX | LOG_SLI,
891 "%d:0307 Mailbox cmd x%x Cmpl x%p "
892 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 868 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
893 phba->brd_no, 869 phba->brd_no,
870 pmb->vport ? pmb->vport->vpi : 0,
894 pmbox->mbxCommand, 871 pmbox->mbxCommand,
895 pmb->mbox_cmpl, 872 pmb->mbox_cmpl,
896 *((uint32_t *) pmbox), 873 *((uint32_t *) pmbox),
@@ -903,39 +880,35 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
903 pmbox->un.varWords[6], 880 pmbox->un.varWords[6],
904 pmbox->un.varWords[7]); 881 pmbox->un.varWords[7]);
905 882
906 if (pmb->mbox_cmpl) { 883 if (pmb->mbox_cmpl)
907 lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE);
908 pmb->mbox_cmpl(phba,pmb); 884 pmb->mbox_cmpl(phba,pmb);
909 } 885 } while (1);
910 } 886 return 0;
911 887}
912
913 do {
914 process_next = 0; /* by default don't loop */
915 spin_lock_irq(&phba->hbalock);
916 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
917
918 /* Process next mailbox command if there is one */
919 if ((pmb = lpfc_mbox_get(phba))) {
920 spin_unlock_irq(&phba->hbalock);
921 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
922 if (rc == MBX_NOT_FINISHED) {
923 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
924 pmb->mbox_cmpl(phba,pmb);
925 process_next = 1;
926 continue; /* loop back */
927 }
928 } else {
929 spin_unlock_irq(&phba->hbalock);
930 /* Turn on IOCB processing */
931 for (i = 0; i < phba->sli.num_rings; i++)
932 lpfc_sli_turn_on_ring(phba, i);
933 }
934 888
935 } while (process_next); 889static struct lpfc_dmabuf *
890lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
891{
892 struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
936 893
937 return 0; 894 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
895 if (hbq_entry == NULL)
896 return NULL;
897 list_del(&hbq_entry->dbuf.list);
898 new_hbq_entry = kmalloc(sizeof(struct hbq_dmabuf), GFP_ATOMIC);
899 if (new_hbq_entry == NULL)
900 return &hbq_entry->dbuf;
901 new_hbq_entry->dbuf = hbq_entry->dbuf;
902 new_hbq_entry->tag = -1;
903 hbq_entry->dbuf.virt = lpfc_hbq_alloc(phba, 0, &hbq_entry->dbuf.phys);
904 if (hbq_entry->dbuf.virt == NULL) {
905 kfree(new_hbq_entry);
906 return &hbq_entry->dbuf;
907 }
908 lpfc_sli_free_hbq(phba, hbq_entry);
909 return &new_hbq_entry->dbuf;
938} 910}
911
939static int 912static int
940lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 913lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
941 struct lpfc_iocbq *saveq) 914 struct lpfc_iocbq *saveq)
@@ -962,14 +935,24 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
962 935
963 /* Firmware Workaround */ 936 /* Firmware Workaround */
964 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 937 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
965 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 938 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
966 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 939 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
967 Rctl = FC_ELS_REQ; 940 Rctl = FC_ELS_REQ;
968 Type = FC_ELS_DATA; 941 Type = FC_ELS_DATA;
969 w5p->hcsw.Rctl = Rctl; 942 w5p->hcsw.Rctl = Rctl;
970 w5p->hcsw.Type = Type; 943 w5p->hcsw.Type = Type;
971 } 944 }
972 } 945 }
946
947 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
948 if (irsp->ulpBdeCount != 0)
949 saveq->context2 = lpfc_sli_replace_hbqbuff(phba,
950 irsp->un.ulpWord[3]);
951 if (irsp->ulpBdeCount == 2)
952 saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
953 irsp->un.ulpWord[15]);
954 }
955
973 /* unSolicited Responses */ 956 /* unSolicited Responses */
974 if (pring->prt[0].profile) { 957 if (pring->prt[0].profile) {
975 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 958 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
@@ -997,17 +980,15 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
997 /* Unexpected Rctl / Type received */ 980 /* Unexpected Rctl / Type received */
998 /* Ring <ringno> handler: unexpected 981 /* Ring <ringno> handler: unexpected
999 Rctl <Rctl> Type <Type> received */ 982 Rctl <Rctl> Type <Type> received */
1000 lpfc_printf_log(phba, 983 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1001 KERN_WARNING,
1002 LOG_SLI,
1003 "%d:0313 Ring %d handler: unexpected Rctl x%x " 984 "%d:0313 Ring %d handler: unexpected Rctl x%x "
1004 "Type x%x received \n", 985 "Type x%x received\n",
1005 phba->brd_no, 986 phba->brd_no,
1006 pring->ringno, 987 pring->ringno,
1007 Rctl, 988 Rctl,
1008 Type); 989 Type);
1009 } 990 }
1010 return(1); 991 return 1;
1011} 992}
1012 993
1013static struct lpfc_iocbq * 994static struct lpfc_iocbq *
@@ -1022,7 +1003,7 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1022 1003
1023 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 1004 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
1024 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 1005 cmd_iocb = phba->sli.iocbq_lookup[iotag];
1025 list_del(&cmd_iocb->list); 1006 list_del_init(&cmd_iocb->list);
1026 pring->txcmplq_cnt--; 1007 pring->txcmplq_cnt--;
1027 return cmd_iocb; 1008 return cmd_iocb;
1028 } 1009 }
@@ -1079,18 +1060,18 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1079 * Ring <ringno> handler: unexpected completion IoTag 1060 * Ring <ringno> handler: unexpected completion IoTag
1080 * <IoTag> 1061 * <IoTag>
1081 */ 1062 */
1082 lpfc_printf_log(phba, 1063 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1083 KERN_WARNING, 1064 "%d (%d):0322 Ring %d handler: "
1084 LOG_SLI, 1065 "unexpected completion IoTag x%x "
1085 "%d:0322 Ring %d handler: unexpected " 1066 "Data: x%x x%x x%x x%x\n",
1086 "completion IoTag x%x Data: x%x x%x x%x x%x\n", 1067 phba->brd_no,
1087 phba->brd_no, 1068 cmdiocbp->vport->vpi,
1088 pring->ringno, 1069 pring->ringno,
1089 saveq->iocb.ulpIoTag, 1070 saveq->iocb.ulpIoTag,
1090 saveq->iocb.ulpStatus, 1071 saveq->iocb.ulpStatus,
1091 saveq->iocb.un.ulpWord[4], 1072 saveq->iocb.un.ulpWord[4],
1092 saveq->iocb.ulpCommand, 1073 saveq->iocb.ulpCommand,
1093 saveq->iocb.ulpContext); 1074 saveq->iocb.ulpContext);
1094 } 1075 }
1095 } 1076 }
1096 1077
@@ -1103,7 +1084,6 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1103 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 1084 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1104 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 1085 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1105 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1086 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1106
1107 /* 1087 /*
1108 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1088 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1109 * rsp ring <portRspMax> 1089 * rsp ring <portRspMax>
@@ -1123,8 +1103,10 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1123 */ 1103 */
1124 phba->work_ha |= HA_ERATT; 1104 phba->work_ha |= HA_ERATT;
1125 phba->work_hs = HS_FFER3; 1105 phba->work_hs = HS_FFER3;
1106
1107 /* hbalock should already be held */
1126 if (phba->work_wait) 1108 if (phba->work_wait)
1127 wake_up(phba->work_wait); 1109 lpfc_worker_wake_up(phba);
1128 1110
1129 return; 1111 return;
1130} 1112}
@@ -1171,7 +1153,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
1171 1153
1172 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1154 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1173 (uint32_t *) &rspiocbq.iocb, 1155 (uint32_t *) &rspiocbq.iocb,
1174 sizeof(IOCB_t)); 1156 phba->iocb_rsp_size);
1175 irsp = &rspiocbq.iocb; 1157 irsp = &rspiocbq.iocb;
1176 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1158 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1177 pring->stats.iocb_rsp++; 1159 pring->stats.iocb_rsp++;
@@ -1342,16 +1324,30 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1342 rsp_cmpl++; 1324 rsp_cmpl++;
1343 1325
1344 if (unlikely(irsp->ulpStatus)) { 1326 if (unlikely(irsp->ulpStatus)) {
1327 /*
1328 * If resource errors reported from HBA, reduce
1329 * queuedepths of the SCSI device.
1330 */
1331 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1332 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1333 spin_unlock_irqrestore(&phba->hbalock, iflag);
1334 lpfc_adjust_queue_depth(phba);
1335 spin_lock_irqsave(&phba->hbalock, iflag);
1336 }
1337
1345 /* Rsp ring <ringno> error: IOCB */ 1338 /* Rsp ring <ringno> error: IOCB */
1346 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1339 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1347 "%d:0336 Rsp Ring %d error: IOCB Data: " 1340 "%d:0336 Rsp Ring %d error: IOCB Data: "
1348 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1341 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1349 phba->brd_no, pring->ringno, 1342 phba->brd_no, pring->ringno,
1350 irsp->un.ulpWord[0], irsp->un.ulpWord[1], 1343 irsp->un.ulpWord[0],
1351 irsp->un.ulpWord[2], irsp->un.ulpWord[3], 1344 irsp->un.ulpWord[1],
1352 irsp->un.ulpWord[4], irsp->un.ulpWord[5], 1345 irsp->un.ulpWord[2],
1353 *(((uint32_t *) irsp) + 6), 1346 irsp->un.ulpWord[3],
1354 *(((uint32_t *) irsp) + 7)); 1347 irsp->un.ulpWord[4],
1348 irsp->un.ulpWord[5],
1349 *(((uint32_t *) irsp) + 6),
1350 *(((uint32_t *) irsp) + 7));
1355 } 1351 }
1356 1352
1357 switch (type) { 1353 switch (type) {
@@ -1365,7 +1361,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1365 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1361 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1366 "%d:0333 IOCB cmd 0x%x" 1362 "%d:0333 IOCB cmd 0x%x"
1367 " processed. Skipping" 1363 " processed. Skipping"
1368 " completion\n", phba->brd_no, 1364 " completion\n",
1365 phba->brd_no,
1369 irsp->ulpCommand); 1366 irsp->ulpCommand);
1370 break; 1367 break;
1371 } 1368 }
@@ -1402,11 +1399,13 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1402 } else { 1399 } else {
1403 /* Unknown IOCB command */ 1400 /* Unknown IOCB command */
1404 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1401 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1405 "%d:0334 Unknown IOCB command " 1402 "%d:0334 Unknown IOCB command "
1406 "Data: x%x, x%x x%x x%x x%x\n", 1403 "Data: x%x, x%x x%x x%x x%x\n",
1407 phba->brd_no, type, irsp->ulpCommand, 1404 phba->brd_no, type,
1408 irsp->ulpStatus, irsp->ulpIoTag, 1405 irsp->ulpCommand,
1409 irsp->ulpContext); 1406 irsp->ulpStatus,
1407 irsp->ulpIoTag,
1408 irsp->ulpContext);
1410 } 1409 }
1411 break; 1410 break;
1412 } 1411 }
@@ -1446,7 +1445,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1446 return rc; 1445 return rc;
1447} 1446}
1448 1447
1449
1450int 1448int
1451lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 1449lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1452 struct lpfc_sli_ring *pring, uint32_t mask) 1450 struct lpfc_sli_ring *pring, uint32_t mask)
@@ -1484,8 +1482,8 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1484 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1482 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1485 "%d:0303 Ring %d handler: portRspPut %d " 1483 "%d:0303 Ring %d handler: portRspPut %d "
1486 "is bigger then rsp ring %d\n", 1484 "is bigger then rsp ring %d\n",
1487 phba->brd_no, 1485 phba->brd_no, pring->ringno, portRspPut,
1488 pring->ringno, portRspPut, portRspMax); 1486 portRspMax);
1489 1487
1490 phba->link_state = LPFC_HBA_ERROR; 1488 phba->link_state = LPFC_HBA_ERROR;
1491 spin_unlock_irqrestore(&phba->hbalock, iflag); 1489 spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -1551,6 +1549,17 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1551 1549
1552 pring->stats.iocb_rsp++; 1550 pring->stats.iocb_rsp++;
1553 1551
1552 /*
1553 * If resource errors reported from HBA, reduce
1554 * queuedepths of the SCSI device.
1555 */
1556 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1557 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1558 spin_unlock_irqrestore(&phba->hbalock, iflag);
1559 lpfc_adjust_queue_depth(phba);
1560 spin_lock_irqsave(&phba->hbalock, iflag);
1561 }
1562
1554 if (irsp->ulpStatus) { 1563 if (irsp->ulpStatus) {
1555 /* Rsp ring <ringno> error: IOCB */ 1564 /* Rsp ring <ringno> error: IOCB */
1556 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1565 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
@@ -1634,16 +1643,15 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1634 phba->brd_no, adaptermsg); 1643 phba->brd_no, adaptermsg);
1635 } else { 1644 } else {
1636 /* Unknown IOCB command */ 1645 /* Unknown IOCB command */
1637 lpfc_printf_log(phba, 1646 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1638 KERN_ERR, 1647 "%d:0335 Unknown IOCB "
1639 LOG_SLI, 1648 "command Data: x%x "
1640 "%d:0335 Unknown IOCB command " 1649 "x%x x%x x%x\n",
1641 "Data: x%x x%x x%x x%x\n", 1650 phba->brd_no,
1642 phba->brd_no, 1651 irsp->ulpCommand,
1643 irsp->ulpCommand, 1652 irsp->ulpStatus,
1644 irsp->ulpStatus, 1653 irsp->ulpIoTag,
1645 irsp->ulpIoTag, 1654 irsp->ulpContext);
1646 irsp->ulpContext);
1647 } 1655 }
1648 } 1656 }
1649 1657
@@ -1656,6 +1664,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1656 } 1664 }
1657 __lpfc_sli_release_iocbq(phba, saveq); 1665 __lpfc_sli_release_iocbq(phba, saveq);
1658 } 1666 }
1667 rspiocbp = NULL;
1659 } 1668 }
1660 1669
1661 /* 1670 /*
@@ -1668,7 +1677,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1668 } 1677 }
1669 } /* while (pring->rspidx != portRspPut) */ 1678 } /* while (pring->rspidx != portRspPut) */
1670 1679
1671 if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) { 1680 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
1672 /* At least one response entry has been freed */ 1681 /* At least one response entry has been freed */
1673 pring->stats.iocb_rsp_full++; 1682 pring->stats.iocb_rsp_full++;
1674 /* SET RxRE_RSP in Chip Att register */ 1683 /* SET RxRE_RSP in Chip Att register */
@@ -1700,6 +1709,10 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1700 struct lpfc_iocbq *iocb, *next_iocb; 1709 struct lpfc_iocbq *iocb, *next_iocb;
1701 IOCB_t *cmd = NULL; 1710 IOCB_t *cmd = NULL;
1702 1711
1712 if (pring->ringno == LPFC_ELS_RING) {
1713 lpfc_fabric_abort_hba(phba);
1714 }
1715
1703 /* Error everything on txq and txcmplq 1716 /* Error everything on txq and txcmplq
1704 * First do the txq. 1717 * First do the txq.
1705 */ 1718 */
@@ -1716,7 +1729,7 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1716 while (!list_empty(&completions)) { 1729 while (!list_empty(&completions)) {
1717 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 1730 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1718 cmd = &iocb->iocb; 1731 cmd = &iocb->iocb;
1719 list_del(&iocb->list); 1732 list_del_init(&iocb->list);
1720 1733
1721 if (!iocb->iocb_cmpl) 1734 if (!iocb->iocb_cmpl)
1722 lpfc_sli_release_iocbq(phba, iocb); 1735 lpfc_sli_release_iocbq(phba, iocb);
@@ -1757,7 +1770,7 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
1757 1770
1758 if (i == 15) { 1771 if (i == 15) {
1759 /* Do post */ 1772 /* Do post */
1760 phba->pport->port_state = LPFC_STATE_UNKNOWN; 1773 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1761 lpfc_sli_brdrestart(phba); 1774 lpfc_sli_brdrestart(phba);
1762 } 1775 }
1763 /* Read the HBA Host Status Register */ 1776 /* Read the HBA Host Status Register */
@@ -1862,8 +1875,8 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
1862 1875
1863 /* Kill HBA */ 1876 /* Kill HBA */
1864 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1877 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1865 "%d:0329 Kill HBA Data: x%x x%x\n", 1878 "%d:0329 Kill HBA Data: x%x x%x\n",
1866 phba->brd_no, phba->pport->port_state, psli->sli_flag); 1879 phba->brd_no, phba->pport->port_state, psli->sli_flag);
1867 1880
1868 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 1881 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
1869 GFP_KERNEL)) == 0) 1882 GFP_KERNEL)) == 0)
@@ -2087,7 +2100,7 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
2087 2100
2088 if (i == 15) { 2101 if (i == 15) {
2089 /* Do post */ 2102 /* Do post */
2090 phba->pport->port_state = LPFC_STATE_UNKNOWN; 2103 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2091 lpfc_sli_brdrestart(phba); 2104 lpfc_sli_brdrestart(phba);
2092 } 2105 }
2093 /* Read the HBA Host Status Register */ 2106 /* Read the HBA Host Status Register */
@@ -2117,55 +2130,10 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
2117 return 0; 2130 return 0;
2118} 2131}
2119 2132
2120static struct hbq_dmabuf *
2121lpfc_alloc_hbq_buffers(struct lpfc_hba *phba, int count)
2122{
2123 struct hbq_dmabuf *hbq_buffer_pool;
2124 int i;
2125
2126 hbq_buffer_pool = kmalloc(count * sizeof(struct hbq_dmabuf),
2127 GFP_KERNEL);
2128 if (!hbq_buffer_pool)
2129 goto out;
2130
2131 for (i = 0; i < count; ++i) {
2132 hbq_buffer_pool[i].dbuf.virt =
2133 lpfc_hbq_alloc(phba, MEM_PRI,
2134 &hbq_buffer_pool[i].dbuf.phys);
2135 if (hbq_buffer_pool[i].dbuf.virt == NULL)
2136 goto alloc_failed;
2137 hbq_buffer_pool[i].tag = i;
2138 }
2139 goto out;
2140
2141alloc_failed:
2142 while (--i >= 0)
2143 lpfc_hbq_free(phba, hbq_buffer_pool[i].dbuf.virt,
2144 hbq_buffer_pool[i].dbuf.phys);
2145 kfree(hbq_buffer_pool);
2146 hbq_buffer_pool = NULL;
2147
2148out:
2149 phba->hbq_buffer_pool = hbq_buffer_pool;
2150 return hbq_buffer_pool;
2151}
2152
2153static struct lpfc_hbq_init lpfc_els_hbq = {
2154 .rn = 1,
2155 .entry_count = 1200,
2156 .mask_count = 0,
2157 .profile = 0,
2158 .ring_mask = 1 << LPFC_ELS_RING,
2159};
2160
2161static struct lpfc_hbq_init *lpfc_hbq_definitions[] = {
2162 &lpfc_els_hbq,
2163};
2164
2165static int 2133static int
2166lpfc_sli_hbq_count(void) 2134lpfc_sli_hbq_count(void)
2167{ 2135{
2168 return ARRAY_SIZE(lpfc_hbq_definitions); 2136 return ARRAY_SIZE(lpfc_hbq_defs);
2169} 2137}
2170 2138
2171static int 2139static int
@@ -2176,7 +2144,7 @@ lpfc_sli_hbq_entry_count(void)
2176 int i; 2144 int i;
2177 2145
2178 for (i = 0; i < hbq_count; ++i) 2146 for (i = 0; i < hbq_count; ++i)
2179 count += lpfc_hbq_definitions[i]->entry_count; 2147 count += lpfc_hbq_defs[i]->entry_count;
2180 return count; 2148 return count;
2181} 2149}
2182 2150
@@ -2194,18 +2162,10 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2194 MAILBOX_t *pmbox; 2162 MAILBOX_t *pmbox;
2195 uint32_t hbqno; 2163 uint32_t hbqno;
2196 uint32_t hbq_entry_index; 2164 uint32_t hbq_entry_index;
2197 uint32_t hbq_buffer_count;
2198
2199 /* count hbq buffers */
2200 hbq_buffer_count = lpfc_sli_hbq_entry_count();
2201 if (!lpfc_alloc_hbq_buffers(phba, hbq_buffer_count))
2202 return -ENOMEM;
2203 2165
2204 phba->hbq_buffer_count = hbq_buffer_count; 2166 /* Get a Mailbox buffer to setup mailbox
2205 2167 * commands for HBA initialization
2206 /* Get a Mailbox buffer to setup mailbox 2168 */
2207 * commands for HBA initialization
2208 */
2209 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2169 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2210 2170
2211 if (!pmb) 2171 if (!pmb)
@@ -2222,9 +2182,9 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2222 phba->hbqs[hbqno].hbqPutIdx = 0; 2182 phba->hbqs[hbqno].hbqPutIdx = 0;
2223 phba->hbqs[hbqno].local_hbqGetIdx = 0; 2183 phba->hbqs[hbqno].local_hbqGetIdx = 0;
2224 phba->hbqs[hbqno].entry_count = 2184 phba->hbqs[hbqno].entry_count =
2225 lpfc_hbq_definitions[hbqno]->entry_count; 2185 lpfc_hbq_defs[hbqno]->entry_count;
2226 lpfc_config_hbq(phba, lpfc_hbq_definitions[hbqno], 2186 lpfc_config_hbq(phba, lpfc_hbq_defs[hbqno], hbq_entry_index,
2227 hbq_entry_index, pmb); 2187 pmb);
2228 hbq_entry_index += phba->hbqs[hbqno].entry_count; 2188 hbq_entry_index += phba->hbqs[hbqno].entry_count;
2229 2189
2230 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 2190 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
@@ -2232,7 +2192,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2232 mbxStatus <status>, ring <num> */ 2192 mbxStatus <status>, ring <num> */
2233 2193
2234 lpfc_printf_log(phba, KERN_ERR, 2194 lpfc_printf_log(phba, KERN_ERR,
2235 LOG_SLI, 2195 LOG_SLI | LOG_VPORT,
2236 "%d:1805 Adapter failed to init. " 2196 "%d:1805 Adapter failed to init. "
2237 "Data: x%x x%x x%x\n", 2197 "Data: x%x x%x x%x\n",
2238 phba->brd_no, pmbox->mbxCommand, 2198 phba->brd_no, pmbox->mbxCommand,
@@ -2240,17 +2200,18 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2240 2200
2241 phba->link_state = LPFC_HBA_ERROR; 2201 phba->link_state = LPFC_HBA_ERROR;
2242 mempool_free(pmb, phba->mbox_mem_pool); 2202 mempool_free(pmb, phba->mbox_mem_pool);
2243 /* Free all HBQ memory */
2244 lpfc_sli_hbqbuf_free_all(phba);
2245 return ENXIO; 2203 return ENXIO;
2246 } 2204 }
2247 } 2205 }
2248 phba->hbq_count = hbq_count; 2206 phba->hbq_count = hbq_count;
2249 2207
2250 /* Initially populate or replenish the HBQs */
2251 lpfc_sli_hbqbuf_fill_hbqs(phba);
2252 mempool_free(pmb, phba->mbox_mem_pool); 2208 mempool_free(pmb, phba->mbox_mem_pool);
2253 2209
2210 /* Initially populate or replenish the HBQs */
2211 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2212 if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
2213 return -ENOMEM;
2214 }
2254 return 0; 2215 return 0;
2255} 2216}
2256 2217
@@ -2271,7 +2232,7 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
2271 spin_lock_irq(&phba->hbalock); 2232 spin_lock_irq(&phba->hbalock);
2272 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 2233 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2273 spin_unlock_irq(&phba->hbalock); 2234 spin_unlock_irq(&phba->hbalock);
2274 phba->pport->port_state = LPFC_STATE_UNKNOWN; 2235 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2275 lpfc_sli_brdrestart(phba); 2236 lpfc_sli_brdrestart(phba);
2276 msleep(2500); 2237 msleep(2500);
2277 rc = lpfc_sli_chipset_init(phba); 2238 rc = lpfc_sli_chipset_init(phba);
@@ -2301,20 +2262,20 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
2301 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 2262 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
2302 if (rc != MBX_SUCCESS) { 2263 if (rc != MBX_SUCCESS) {
2303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2264 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2304 "%d:0442 Adapter failed to init, " 2265 "%d:0442 Adapter failed to init, mbxCmd x%x "
2305 "mbxCmd x%x CONFIG_PORT, mbxStatus " 2266 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
2306 "x%x Data: x%x\n", 2267 phba->brd_no, pmb->mb.mbxCommand,
2307 phba->brd_no, pmb->mb.mbxCommand, 2268 pmb->mb.mbxStatus, 0);
2308 pmb->mb.mbxStatus, 0);
2309 spin_lock_irq(&phba->hbalock); 2269 spin_lock_irq(&phba->hbalock);
2310 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 2270 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
2311 spin_unlock_irq(&phba->hbalock); 2271 spin_unlock_irq(&phba->hbalock);
2312 rc = -ENXIO; 2272 rc = -ENXIO;
2313 } else { 2273 } else {
2314 done = 1; 2274 done = 1;
2315 /* DBG: Do we need max_vpi, reg_vpi for that matter 2275 phba->max_vpi = (phba->max_vpi &&
2316 phba->max_vpi = 0; 2276 pmb->mb.un.varCfgPort.gmv) != 0
2317 */ 2277 ? pmb->mb.un.varCfgPort.max_vpi
2278 : 0;
2318 } 2279 }
2319 } 2280 }
2320 2281
@@ -2324,13 +2285,13 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
2324 } 2285 }
2325 2286
2326 if ((pmb->mb.un.varCfgPort.sli_mode == 3) && 2287 if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
2327 (!pmb->mb.un.varCfgPort.cMA)) { 2288 (!pmb->mb.un.varCfgPort.cMA)) {
2328 rc = -ENXIO; 2289 rc = -ENXIO;
2329 goto do_prep_failed; 2290 goto do_prep_failed;
2330 } 2291 }
2331 return rc; 2292 return rc;
2332 2293
2333 do_prep_failed: 2294do_prep_failed:
2334 mempool_free(pmb, phba->mbox_mem_pool); 2295 mempool_free(pmb, phba->mbox_mem_pool);
2335 return rc; 2296 return rc;
2336} 2297}
@@ -2339,17 +2300,24 @@ int
2339lpfc_sli_hba_setup(struct lpfc_hba *phba) 2300lpfc_sli_hba_setup(struct lpfc_hba *phba)
2340{ 2301{
2341 uint32_t rc; 2302 uint32_t rc;
2342 int mode = 3; 2303 int mode = 3;
2343 2304
2344 switch (lpfc_sli_mode) { 2305 switch (lpfc_sli_mode) {
2345 case 2: 2306 case 2:
2307 if (lpfc_npiv_enable) {
2308 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2309 "%d:1824 NPIV enabled: Override lpfc_sli_mode "
2310 "parameter (%d) to auto (0).\n",
2311 phba->brd_no, lpfc_sli_mode);
2312 break;
2313 }
2346 mode = 2; 2314 mode = 2;
2347 break; 2315 break;
2348 case 0: 2316 case 0:
2349 case 3: 2317 case 3:
2350 break; 2318 break;
2351 default: 2319 default:
2352 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2320 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2353 "%d:1819 Unrecognized lpfc_sli_mode " 2321 "%d:1819 Unrecognized lpfc_sli_mode "
2354 "parameter: %d.\n", 2322 "parameter: %d.\n",
2355 phba->brd_no, lpfc_sli_mode); 2323 phba->brd_no, lpfc_sli_mode);
@@ -2359,7 +2327,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
2359 2327
2360 rc = lpfc_do_config_port(phba, mode); 2328 rc = lpfc_do_config_port(phba, mode);
2361 if (rc && lpfc_sli_mode == 3) 2329 if (rc && lpfc_sli_mode == 3)
2362 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2363 "%d:1820 Unable to select SLI-3. " 2331 "%d:1820 Unable to select SLI-3. "
2364 "Not supported by adapter.\n", 2332 "Not supported by adapter.\n",
2365 phba->brd_no); 2333 phba->brd_no);
@@ -2377,18 +2345,18 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
2377 } else { 2345 } else {
2378 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 2346 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
2379 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 2347 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
2380 phba->sli3_options = 0x0; 2348 phba->sli3_options = 0;
2381 } 2349 }
2382 2350
2383 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2351 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2384 "%d:0444 Firmware in SLI %x mode.\n", 2352 "%d:0444 Firmware in SLI %x mode. Max_vpi %d\n",
2385 phba->brd_no, phba->sli_rev); 2353 phba->brd_no, phba->sli_rev, phba->max_vpi);
2386 rc = lpfc_sli_ring_map(phba); 2354 rc = lpfc_sli_ring_map(phba);
2387 2355
2388 if (rc) 2356 if (rc)
2389 goto lpfc_sli_hba_setup_error; 2357 goto lpfc_sli_hba_setup_error;
2390 2358
2391 /* Init HBQs */ 2359 /* Init HBQs */
2392 2360
2393 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2361 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2394 rc = lpfc_sli_hbq_setup(phba); 2362 rc = lpfc_sli_hbq_setup(phba);
@@ -2404,7 +2372,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
2404 2372
2405 return rc; 2373 return rc;
2406 2374
2407 lpfc_sli_hba_setup_error: 2375lpfc_sli_hba_setup_error:
2408 phba->link_state = LPFC_HBA_ERROR; 2376 phba->link_state = LPFC_HBA_ERROR;
2409 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2377 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2410 "%d:0445 Firmware initialization failed\n", 2378 "%d:0445 Firmware initialization failed\n",
@@ -2428,19 +2396,21 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
2428void 2396void
2429lpfc_mbox_timeout(unsigned long ptr) 2397lpfc_mbox_timeout(unsigned long ptr)
2430{ 2398{
2431 struct lpfc_hba *phba = (struct lpfc_hba *) phba; 2399 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2432 unsigned long iflag; 2400 unsigned long iflag;
2433 uint32_t tmo_posted; 2401 uint32_t tmo_posted;
2434 2402
2435 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 2403 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
2436 tmo_posted = (phba->pport->work_port_events & WORKER_MBOX_TMO); 2404 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
2437 if (!tmo_posted) 2405 if (!tmo_posted)
2438 phba->pport->work_port_events |= WORKER_MBOX_TMO; 2406 phba->pport->work_port_events |= WORKER_MBOX_TMO;
2439 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 2407 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
2440 2408
2441 if (!tmo_posted) { 2409 if (!tmo_posted) {
2410 spin_lock_irqsave(&phba->hbalock, iflag);
2442 if (phba->work_wait) 2411 if (phba->work_wait)
2443 wake_up(phba->work_wait); 2412 lpfc_worker_wake_up(phba);
2413 spin_unlock_irqrestore(&phba->hbalock, iflag);
2444 } 2414 }
2445} 2415}
2446 2416
@@ -2458,12 +2428,13 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2458 2428
2459 /* Mbox cmd <mbxCommand> timeout */ 2429 /* Mbox cmd <mbxCommand> timeout */
2460 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2430 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2461 "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 2431 "%d:0310 Mailbox command x%x timeout Data: x%x x%x "
2462 phba->brd_no, 2432 "x%p\n",
2463 mb->mbxCommand, 2433 phba->brd_no,
2464 phba->pport->port_state, 2434 mb->mbxCommand,
2465 phba->sli.sli_flag, 2435 phba->pport->port_state,
2466 phba->sli.mbox_active); 2436 phba->sli.sli_flag,
2437 phba->sli.mbox_active);
2467 2438
2468 /* Setting state unknown so lpfc_sli_abort_iocb_ring 2439 /* Setting state unknown so lpfc_sli_abort_iocb_ring
2469 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 2440 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
@@ -2510,10 +2481,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2510 void __iomem *to_slim; 2481 void __iomem *to_slim;
2511 2482
2512 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 2483 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
2513 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 2484 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
2514 if(!pmbox->vport) { 2485 if(!pmbox->vport) {
2515 lpfc_printf_log(phba, KERN_ERR, 2486 lpfc_printf_log(phba, KERN_ERR,
2516 LOG_MBOX, 2487 LOG_MBOX | LOG_VPORT,
2517 "%d:1806 Mbox x%x failed. No vport\n", 2488 "%d:1806 Mbox x%x failed. No vport\n",
2518 phba->brd_no, 2489 phba->brd_no,
2519 pmbox->mb.mbxCommand); 2490 pmbox->mb.mbxCommand);
@@ -2522,12 +2493,15 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2522 } 2493 }
2523 } 2494 }
2524 2495
2496
2525 /* If the PCI channel is in offline state, do not post mbox. */ 2497 /* If the PCI channel is in offline state, do not post mbox. */
2526 if (unlikely(pci_channel_offline(phba->pcidev))) 2498 if (unlikely(pci_channel_offline(phba->pcidev)))
2527 return MBX_NOT_FINISHED; 2499 return MBX_NOT_FINISHED;
2528 2500
2529 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2501 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2530 psli = &phba->sli; 2502 psli = &phba->sli;
2503
2504
2531 mb = &pmbox->mb; 2505 mb = &pmbox->mb;
2532 status = MBX_SUCCESS; 2506 status = MBX_SUCCESS;
2533 2507
@@ -2535,14 +2509,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2535 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2509 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2536 2510
2537 /* Mbox command <mbxCommand> cannot issue */ 2511 /* Mbox command <mbxCommand> cannot issue */
2538 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2512 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2539 return MBX_NOT_FINISHED; 2513 return MBX_NOT_FINISHED;
2540 } 2514 }
2541 2515
2542 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 2516 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2543 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 2517 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2544 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2518 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2545 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2519 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2546 return MBX_NOT_FINISHED; 2520 return MBX_NOT_FINISHED;
2547 } 2521 }
2548 2522
@@ -2556,14 +2530,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2556 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2530 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2557 2531
2558 /* Mbox command <mbxCommand> cannot issue */ 2532 /* Mbox command <mbxCommand> cannot issue */
2559 LOG_MBOX_CANNOT_ISSUE_DATA(phba, mb, psli, flag); 2533 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2560 return MBX_NOT_FINISHED; 2534 return MBX_NOT_FINISHED;
2561 } 2535 }
2562 2536
2563 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 2537 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2564 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2538 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2565 /* Mbox command <mbxCommand> cannot issue */ 2539 /* Mbox command <mbxCommand> cannot issue */
2566 LOG_MBOX_CANNOT_ISSUE_DATA(phba, mb, psli, flag); 2540 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2567 return MBX_NOT_FINISHED; 2541 return MBX_NOT_FINISHED;
2568 } 2542 }
2569 2543
@@ -2589,10 +2563,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2589 2563
2590 /* Mbox cmd issue - BUSY */ 2564 /* Mbox cmd issue - BUSY */
2591 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2565 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2592 "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n", 2566 "%d (%d):0308 Mbox cmd issue - BUSY Data: "
2593 phba->brd_no, 2567 "x%x x%x x%x x%x\n",
2594 mb->mbxCommand, phba->pport->port_state, 2568 phba->brd_no,
2595 psli->sli_flag, flag); 2569 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
2570 mb->mbxCommand, phba->pport->port_state,
2571 psli->sli_flag, flag);
2596 2572
2597 psli->slistat.mbox_busy++; 2573 psli->slistat.mbox_busy++;
2598 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2574 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -2626,7 +2602,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2626 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2602 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2627 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2603 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2628 /* Mbox command <mbxCommand> cannot issue */ 2604 /* Mbox command <mbxCommand> cannot issue */
2629 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag); 2605 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2630 return MBX_NOT_FINISHED; 2606 return MBX_NOT_FINISHED;
2631 } 2607 }
2632 /* timeout active mbox command */ 2608 /* timeout active mbox command */
@@ -2636,10 +2612,11 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2636 2612
2637 /* Mailbox cmd <cmd> issue */ 2613 /* Mailbox cmd <cmd> issue */
2638 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2614 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2639 "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n", 2615 "%d (%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
2640 phba->brd_no, 2616 "x%x\n",
2641 mb->mbxCommand, phba->pport->port_state, 2617 phba->brd_no, pmbox->vport ? pmbox->vport->vpi : 0,
2642 psli->sli_flag, flag); 2618 mb->mbxCommand, phba->pport->port_state,
2619 psli->sli_flag, flag);
2643 2620
2644 psli->slistat.mbox_cmd++; 2621 psli->slistat.mbox_cmd++;
2645 evtctr = psli->slistat.mbox_event; 2622 evtctr = psli->slistat.mbox_event;
@@ -2654,7 +2631,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2654 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2631 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2655 /* copy command data into host mbox for cmpl */ 2632 /* copy command data into host mbox for cmpl */
2656 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, 2633 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
2657 MAILBOX_CMD_SIZE); 2634 MAILBOX_CMD_SIZE);
2658 } 2635 }
2659 2636
2660 /* First copy mbox command data to HBA SLIM, skip past first 2637 /* First copy mbox command data to HBA SLIM, skip past first
@@ -2756,14 +2733,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2756 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2733 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2757 /* copy results back to user */ 2734 /* copy results back to user */
2758 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb, 2735 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
2759 MAILBOX_CMD_SIZE); 2736 MAILBOX_CMD_SIZE);
2760 } else { 2737 } else {
2761 /* First copy command data */ 2738 /* First copy command data */
2762 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 2739 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
2763 MAILBOX_CMD_SIZE); 2740 MAILBOX_CMD_SIZE);
2764 if ((mb->mbxCommand == MBX_DUMP_MEMORY) && 2741 if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
2765 pmbox->context2) { 2742 pmbox->context2) {
2766 lpfc_memcpy_from_slim((void *) pmbox->context2, 2743 lpfc_memcpy_from_slim((void *)pmbox->context2,
2767 phba->MBslimaddr + DMP_RSP_OFFSET, 2744 phba->MBslimaddr + DMP_RSP_OFFSET,
2768 mb->un.varDmp.word_cnt); 2745 mb->un.varDmp.word_cnt);
2769 } 2746 }
@@ -2780,17 +2757,16 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2780 return status; 2757 return status;
2781} 2758}
2782 2759
2760/*
2761 * Caller needs to hold lock.
2762 */
2783static int 2763static int
2784lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2764__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2785 struct lpfc_iocbq *piocb) 2765 struct lpfc_iocbq *piocb)
2786{ 2766{
2787 unsigned long iflags;
2788
2789 /* Insert the caller's iocb in the txq tail for later processing. */ 2767 /* Insert the caller's iocb in the txq tail for later processing. */
2790 spin_lock_irqsave(&phba->hbalock, iflags);
2791 list_add_tail(&piocb->list, &pring->txq); 2768 list_add_tail(&piocb->list, &pring->txq);
2792 pring->txq_cnt++; 2769 pring->txq_cnt++;
2793 spin_unlock_irqrestore(&phba->hbalock, iflags);
2794 return 0; 2770 return 0;
2795} 2771}
2796 2772
@@ -2809,14 +2785,29 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2809 return nextiocb; 2785 return nextiocb;
2810} 2786}
2811 2787
2788/*
2789 * Lockless version of lpfc_sli_issue_iocb.
2790 */
2812int 2791int
2813lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2792__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2814 struct lpfc_iocbq *piocb, uint32_t flag) 2793 struct lpfc_iocbq *piocb, uint32_t flag)
2815{ 2794{
2816 struct lpfc_iocbq *nextiocb; 2795 struct lpfc_iocbq *nextiocb;
2817 unsigned long iflags;
2818 IOCB_t *iocb; 2796 IOCB_t *iocb;
2819 2797
2798 if (piocb->iocb_cmpl && (!piocb->vport) &&
2799 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2800 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
2801 lpfc_printf_log(phba, KERN_ERR,
2802 LOG_SLI | LOG_VPORT,
2803 "%d:1807 IOCB x%x failed. No vport\n",
2804 phba->brd_no,
2805 piocb->iocb.ulpCommand);
2806 dump_stack();
2807 return IOCB_ERROR;
2808 }
2809
2810
2820 /* If the PCI channel is in offline state, do not post iocbs. */ 2811 /* If the PCI channel is in offline state, do not post iocbs. */
2821 if (unlikely(pci_channel_offline(phba->pcidev))) 2812 if (unlikely(pci_channel_offline(phba->pcidev)))
2822 return IOCB_ERROR; 2813 return IOCB_ERROR;
@@ -2862,10 +2853,10 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2862 * attention events. 2853 * attention events.
2863 */ 2854 */
2864 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 2855 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2865 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) 2856 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
2866 goto iocb_busy; 2857 goto iocb_busy;
2858 }
2867 2859
2868 spin_lock_irqsave(&phba->hbalock, iflags);
2869 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 2860 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2870 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 2861 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2871 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 2862 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
@@ -2874,7 +2865,6 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2874 lpfc_sli_update_ring(phba, pring); 2865 lpfc_sli_update_ring(phba, pring);
2875 else 2866 else
2876 lpfc_sli_update_full_ring(phba, pring); 2867 lpfc_sli_update_full_ring(phba, pring);
2877 spin_unlock_irqrestore(&phba->hbalock, iflags);
2878 2868
2879 if (!piocb) 2869 if (!piocb)
2880 return IOCB_SUCCESS; 2870 return IOCB_SUCCESS;
@@ -2882,20 +2872,33 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2882 goto out_busy; 2872 goto out_busy;
2883 2873
2884 iocb_busy: 2874 iocb_busy:
2885 spin_lock_irqsave(&phba->hbalock, iflags);
2886 pring->stats.iocb_cmd_delay++; 2875 pring->stats.iocb_cmd_delay++;
2887 spin_unlock_irqrestore(&phba->hbalock, iflags);
2888 2876
2889 out_busy: 2877 out_busy:
2890 2878
2891 if (!(flag & SLI_IOCB_RET_IOCB)) { 2879 if (!(flag & SLI_IOCB_RET_IOCB)) {
2892 lpfc_sli_ringtx_put(phba, pring, piocb); 2880 __lpfc_sli_ringtx_put(phba, pring, piocb);
2893 return IOCB_SUCCESS; 2881 return IOCB_SUCCESS;
2894 } 2882 }
2895 2883
2896 return IOCB_BUSY; 2884 return IOCB_BUSY;
2897} 2885}
2898 2886
2887
2888int
2889lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2890 struct lpfc_iocbq *piocb, uint32_t flag)
2891{
2892 unsigned long iflags;
2893 int rc;
2894
2895 spin_lock_irqsave(&phba->hbalock, iflags);
2896 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
2897 spin_unlock_irqrestore(&phba->hbalock, iflags);
2898
2899 return rc;
2900}
2901
2899static int 2902static int
2900lpfc_extra_ring_setup( struct lpfc_hba *phba) 2903lpfc_extra_ring_setup( struct lpfc_hba *phba)
2901{ 2904{
@@ -2960,14 +2963,14 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2960 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 2963 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2961 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 2964 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2962 pring->sizeCiocb = (phba->sli_rev == 3) ? 2965 pring->sizeCiocb = (phba->sli_rev == 3) ?
2963 SLI3_IOCB_CMD_SIZE : 2966 SLI3_IOCB_CMD_SIZE :
2964 SLI2_IOCB_CMD_SIZE; 2967 SLI2_IOCB_CMD_SIZE;
2965 pring->sizeRiocb = (phba->sli_rev == 3) ? 2968 pring->sizeRiocb = (phba->sli_rev == 3) ?
2966 SLI3_IOCB_RSP_SIZE : 2969 SLI3_IOCB_RSP_SIZE :
2967 SLI2_IOCB_RSP_SIZE; 2970 SLI2_IOCB_RSP_SIZE;
2968 pring->iotag_ctr = 0; 2971 pring->iotag_ctr = 0;
2969 pring->iotag_max = 2972 pring->iotag_max =
2970 (phba->cfg_hba_queue_depth * 2); 2973 (phba->cfg_hba_queue_depth * 2);
2971 pring->fast_iotag = pring->iotag_max; 2974 pring->fast_iotag = pring->iotag_max;
2972 pring->num_mask = 0; 2975 pring->num_mask = 0;
2973 break; 2976 break;
@@ -2976,11 +2979,11 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2976 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 2979 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
2977 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 2980 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
2978 pring->sizeCiocb = (phba->sli_rev == 3) ? 2981 pring->sizeCiocb = (phba->sli_rev == 3) ?
2979 SLI3_IOCB_CMD_SIZE : 2982 SLI3_IOCB_CMD_SIZE :
2980 SLI2_IOCB_CMD_SIZE; 2983 SLI2_IOCB_CMD_SIZE;
2981 pring->sizeRiocb = (phba->sli_rev == 3) ? 2984 pring->sizeRiocb = (phba->sli_rev == 3) ?
2982 SLI3_IOCB_RSP_SIZE : 2985 SLI3_IOCB_RSP_SIZE :
2983 SLI2_IOCB_RSP_SIZE; 2986 SLI2_IOCB_RSP_SIZE;
2984 pring->iotag_max = phba->cfg_hba_queue_depth; 2987 pring->iotag_max = phba->cfg_hba_queue_depth;
2985 pring->num_mask = 0; 2988 pring->num_mask = 0;
2986 break; 2989 break;
@@ -2989,11 +2992,11 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2989 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 2992 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
2990 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 2993 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
2991 pring->sizeCiocb = (phba->sli_rev == 3) ? 2994 pring->sizeCiocb = (phba->sli_rev == 3) ?
2992 SLI3_IOCB_CMD_SIZE : 2995 SLI3_IOCB_CMD_SIZE :
2993 SLI2_IOCB_CMD_SIZE; 2996 SLI2_IOCB_CMD_SIZE;
2994 pring->sizeRiocb = (phba->sli_rev == 3) ? 2997 pring->sizeRiocb = (phba->sli_rev == 3) ?
2995 SLI3_IOCB_RSP_SIZE : 2998 SLI3_IOCB_RSP_SIZE :
2996 SLI2_IOCB_RSP_SIZE; 2999 SLI2_IOCB_RSP_SIZE;
2997 pring->fast_iotag = 0; 3000 pring->fast_iotag = 0;
2998 pring->iotag_ctr = 0; 3001 pring->iotag_ctr = 0;
2999 pring->iotag_max = 4096; 3002 pring->iotag_max = 4096;
@@ -3002,30 +3005,30 @@ lpfc_sli_setup(struct lpfc_hba *phba)
3002 pring->prt[0].rctl = FC_ELS_REQ; 3005 pring->prt[0].rctl = FC_ELS_REQ;
3003 pring->prt[0].type = FC_ELS_DATA; 3006 pring->prt[0].type = FC_ELS_DATA;
3004 pring->prt[0].lpfc_sli_rcv_unsol_event = 3007 pring->prt[0].lpfc_sli_rcv_unsol_event =
3005 lpfc_els_unsol_event; 3008 lpfc_els_unsol_event;
3006 pring->prt[1].profile = 0; /* Mask 1 */ 3009 pring->prt[1].profile = 0; /* Mask 1 */
3007 pring->prt[1].rctl = FC_ELS_RSP; 3010 pring->prt[1].rctl = FC_ELS_RSP;
3008 pring->prt[1].type = FC_ELS_DATA; 3011 pring->prt[1].type = FC_ELS_DATA;
3009 pring->prt[1].lpfc_sli_rcv_unsol_event = 3012 pring->prt[1].lpfc_sli_rcv_unsol_event =
3010 lpfc_els_unsol_event; 3013 lpfc_els_unsol_event;
3011 pring->prt[2].profile = 0; /* Mask 2 */ 3014 pring->prt[2].profile = 0; /* Mask 2 */
3012 /* NameServer Inquiry */ 3015 /* NameServer Inquiry */
3013 pring->prt[2].rctl = FC_UNSOL_CTL; 3016 pring->prt[2].rctl = FC_UNSOL_CTL;
3014 /* NameServer */ 3017 /* NameServer */
3015 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; 3018 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
3016 pring->prt[2].lpfc_sli_rcv_unsol_event = 3019 pring->prt[2].lpfc_sli_rcv_unsol_event =
3017 lpfc_ct_unsol_event; 3020 lpfc_ct_unsol_event;
3018 pring->prt[3].profile = 0; /* Mask 3 */ 3021 pring->prt[3].profile = 0; /* Mask 3 */
3019 /* NameServer response */ 3022 /* NameServer response */
3020 pring->prt[3].rctl = FC_SOL_CTL; 3023 pring->prt[3].rctl = FC_SOL_CTL;
3021 /* NameServer */ 3024 /* NameServer */
3022 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; 3025 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
3023 pring->prt[3].lpfc_sli_rcv_unsol_event = 3026 pring->prt[3].lpfc_sli_rcv_unsol_event =
3024 lpfc_ct_unsol_event; 3027 lpfc_ct_unsol_event;
3025 break; 3028 break;
3026 } 3029 }
3027 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 3030 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
3028 (pring->numRiocb * pring->sizeRiocb); 3031 (pring->numRiocb * pring->sizeRiocb);
3029 } 3032 }
3030 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 3033 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
3031 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 3034 /* Too many cmd / rsp ring entries in SLI2 SLIM */
@@ -3051,6 +3054,7 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
3051 psli = &phba->sli; 3054 psli = &phba->sli;
3052 spin_lock_irq(&phba->hbalock); 3055 spin_lock_irq(&phba->hbalock);
3053 INIT_LIST_HEAD(&psli->mboxq); 3056 INIT_LIST_HEAD(&psli->mboxq);
3057 INIT_LIST_HEAD(&psli->mboxq_cmpl);
3054 /* Initialize list headers for txq and txcmplq as double linked lists */ 3058 /* Initialize list headers for txq and txcmplq as double linked lists */
3055 for (i = 0; i < psli->num_rings; i++) { 3059 for (i = 0; i < psli->num_rings; i++) {
3056 pring = &psli->ring[i]; 3060 pring = &psli->ring[i];
@@ -3068,6 +3072,64 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
3068} 3072}
3069 3073
3070int 3074int
3075lpfc_sli_host_down(struct lpfc_vport *vport)
3076{
3077 struct lpfc_hba *phba = vport->phba;
3078 struct lpfc_sli *psli = &phba->sli;
3079 struct lpfc_sli_ring *pring;
3080 struct lpfc_iocbq *iocb, *next_iocb;
3081 IOCB_t *icmd = NULL;
3082 int i;
3083 unsigned long flags = 0;
3084 uint16_t prev_pring_flag;
3085
3086 lpfc_cleanup_discovery_resources(vport);
3087
3088 spin_lock_irqsave(&phba->hbalock, flags);
3089
3090 for (i = 0; i < psli->num_rings; i++) {
3091 pring = &psli->ring[i];
3092 prev_pring_flag = pring->flag;
3093 pring->flag |= LPFC_DEFERRED_RING_EVENT;
3094
3095 /*
3096 * Error everything on the txq since these iocbs have not been
3097 * given to the FW yet.
3098 */
3099
3100 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
3101 if (iocb->vport != vport)
3102 continue;
3103 list_del_init(&iocb->list);
3104 pring->txq_cnt--;
3105 if (iocb->iocb_cmpl) {
3106 icmd = &iocb->iocb;
3107 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3108 icmd->un.ulpWord[4] = IOERR_SLI_DOWN;
3109 spin_unlock_irqrestore(&phba->hbalock, flags);
3110 (iocb->iocb_cmpl) (phba, iocb, iocb);
3111 spin_lock_irqsave(&phba->hbalock, flags);
3112 } else
3113 lpfc_sli_release_iocbq(phba, iocb);
3114 }
3115
3116 /* Next issue ABTS for everything on the txcmplq */
3117 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
3118 list) {
3119 if (iocb->vport != vport)
3120 continue;
3121 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3122 }
3123
3124 pring->flag = prev_pring_flag;
3125 }
3126
3127 spin_unlock_irqrestore(&phba->hbalock, flags);
3128
3129 return 1;
3130}
3131
3132int
3071lpfc_sli_hba_down(struct lpfc_hba *phba) 3133lpfc_sli_hba_down(struct lpfc_hba *phba)
3072{ 3134{
3073 LIST_HEAD(completions); 3135 LIST_HEAD(completions);
@@ -3081,6 +3143,8 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
3081 3143
3082 lpfc_hba_down_prep(phba); 3144 lpfc_hba_down_prep(phba);
3083 3145
3146 lpfc_fabric_abort_hba(phba);
3147
3084 spin_lock_irqsave(&phba->hbalock, flags); 3148 spin_lock_irqsave(&phba->hbalock, flags);
3085 for (i = 0; i < psli->num_rings; i++) { 3149 for (i = 0; i < psli->num_rings; i++) {
3086 pring = &psli->ring[i]; 3150 pring = &psli->ring[i];
@@ -3097,9 +3161,8 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
3097 spin_unlock_irqrestore(&phba->hbalock, flags); 3161 spin_unlock_irqrestore(&phba->hbalock, flags);
3098 3162
3099 while (!list_empty(&completions)) { 3163 while (!list_empty(&completions)) {
3100 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 3164 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3101 cmd = &iocb->iocb; 3165 cmd = &iocb->iocb;
3102 list_del(&iocb->list);
3103 3166
3104 if (!iocb->iocb_cmpl) 3167 if (!iocb->iocb_cmpl)
3105 lpfc_sli_release_iocbq(phba, iocb); 3168 lpfc_sli_release_iocbq(phba, iocb);
@@ -3112,34 +3175,33 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
3112 3175
3113 /* Return any active mbox cmds */ 3176 /* Return any active mbox cmds */
3114 del_timer_sync(&psli->mbox_tmo); 3177 del_timer_sync(&psli->mbox_tmo);
3178 spin_lock_irqsave(&phba->hbalock, flags);
3115 3179
3116 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 3180 spin_lock(&phba->pport->work_port_lock);
3117 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 3181 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3118 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 3182 spin_unlock(&phba->pport->work_port_lock);
3119 3183
3120 spin_lock_irqsave(&phba->hbalock, flags); 3184 if (psli->mbox_active) {
3121 pmb = psli->mbox_active; 3185 list_add_tail(&psli->mbox_active->list, &completions);
3122 if (pmb) {
3123 psli->mbox_active = NULL; 3186 psli->mbox_active = NULL;
3124 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3125 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3187 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3126 if (pmb->mbox_cmpl) {
3127 pmb->mbox_cmpl(phba,pmb);
3128 }
3129 } 3188 }
3130 3189
3131 /* Return any pending mbox cmds */ 3190 /* Return any pending or completed mbox cmds */
3132 while ((pmb = lpfc_mbox_get(phba)) != NULL) { 3191 list_splice_init(&phba->sli.mboxq, &completions);
3192 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
3193 INIT_LIST_HEAD(&psli->mboxq);
3194 INIT_LIST_HEAD(&psli->mboxq_cmpl);
3195
3196 spin_unlock_irqrestore(&phba->hbalock, flags);
3197
3198 while (!list_empty(&completions)) {
3199 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
3133 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 3200 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3134 if (pmb->mbox_cmpl) { 3201 if (pmb->mbox_cmpl) {
3135 pmb->mbox_cmpl(phba,pmb); 3202 pmb->mbox_cmpl(phba,pmb);
3136 } 3203 }
3137 } 3204 }
3138 INIT_LIST_HEAD(&psli->mboxq);
3139
3140 /* Free all HBQ memory */
3141 lpfc_sli_hbqbuf_free_all(phba);
3142
3143 return 1; 3205 return 1;
3144} 3206}
3145 3207
@@ -3196,7 +3258,7 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3196 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3258 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3197 "%d:0410 Cannot find virtual addr for mapped buf on " 3259 "%d:0410 Cannot find virtual addr for mapped buf on "
3198 "ring %d Data x%llx x%p x%p x%x\n", 3260 "ring %d Data x%llx x%p x%p x%x\n",
3199 phba->brd_no, pring->ringno, (unsigned long long) phys, 3261 phba->brd_no, pring->ringno, (unsigned long long)phys,
3200 slp->next, slp->prev, pring->postbufq_cnt); 3262 slp->next, slp->prev, pring->postbufq_cnt);
3201 return NULL; 3263 return NULL;
3202} 3264}
@@ -3207,7 +3269,7 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3207{ 3269{
3208 IOCB_t *irsp = &rspiocb->iocb; 3270 IOCB_t *irsp = &rspiocb->iocb;
3209 uint16_t abort_iotag, abort_context; 3271 uint16_t abort_iotag, abort_context;
3210 struct lpfc_iocbq *abort_iocb, *rsp_ab_iocb; 3272 struct lpfc_iocbq *abort_iocb;
3211 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3273 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3212 3274
3213 abort_iocb = NULL; 3275 abort_iocb = NULL;
@@ -3220,11 +3282,13 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3220 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) 3282 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
3221 abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; 3283 abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
3222 3284
3223 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3285 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
3224 "%d:0327 Cannot abort els iocb %p" 3286 "%d:0327 Cannot abort els iocb %p "
3225 " with tag %x context %x\n", 3287 "with tag %x context %x, abort status %x, "
3226 phba->brd_no, abort_iocb, 3288 "abort code %x\n",
3227 abort_iotag, abort_context); 3289 phba->brd_no, abort_iocb, abort_iotag,
3290 abort_context, irsp->ulpStatus,
3291 irsp->un.ulpWord[4]);
3228 3292
3229 /* 3293 /*
3230 * make sure we have the right iocbq before taking it 3294 * make sure we have the right iocbq before taking it
@@ -3235,23 +3299,14 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3235 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) 3299 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
3236 spin_unlock_irq(&phba->hbalock); 3300 spin_unlock_irq(&phba->hbalock);
3237 else { 3301 else {
3238 list_del(&abort_iocb->list); 3302 list_del_init(&abort_iocb->list);
3239 pring->txcmplq_cnt--; 3303 pring->txcmplq_cnt--;
3240 spin_unlock_irq(&phba->hbalock); 3304 spin_unlock_irq(&phba->hbalock);
3241 3305
3242 rsp_ab_iocb = lpfc_sli_get_iocbq(phba); 3306 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3243 if (rsp_ab_iocb == NULL) 3307 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3244 lpfc_sli_release_iocbq(phba, abort_iocb); 3308 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
3245 else { 3309 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
3246 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3247 rsp_ab_iocb->iocb.ulpStatus =
3248 IOSTAT_LOCAL_REJECT;
3249 rsp_ab_iocb->iocb.un.ulpWord[4] =
3250 IOERR_SLI_ABORTED;
3251 (abort_iocb->iocb_cmpl)(phba, abort_iocb,
3252 rsp_ab_iocb);
3253 lpfc_sli_release_iocbq(phba, rsp_ab_iocb);
3254 }
3255 } 3310 }
3256 } 3311 }
3257 3312
@@ -3259,6 +3314,23 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3259 return; 3314 return;
3260} 3315}
3261 3316
3317static void
3318lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3319 struct lpfc_iocbq *rspiocb)
3320{
3321 IOCB_t *irsp = &rspiocb->iocb;
3322
3323 /* ELS cmd tag <ulpIoTag> completes */
3324 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3325 "%d (X):0133 Ignoring ELS cmd tag x%x completion Data: "
3326 "x%x x%x x%x\n",
3327 phba->brd_no, irsp->ulpIoTag, irsp->ulpStatus,
3328 irsp->un.ulpWord[4], irsp->ulpTimeout);
3329
3330 lpfc_els_free_iocb(phba, cmdiocb);
3331 return;
3332}
3333
3262int 3334int
3263lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3335lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3264 struct lpfc_iocbq *cmdiocb) 3336 struct lpfc_iocbq *cmdiocb)
@@ -3269,22 +3341,30 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3269 IOCB_t *iabt = NULL; 3341 IOCB_t *iabt = NULL;
3270 int retval = IOCB_ERROR; 3342 int retval = IOCB_ERROR;
3271 3343
3272 /* There are certain command types we don't want 3344 /*
3273 * to abort. 3345 * There are certain command types we don't want to abort. And we
3346 * don't want to abort commands that are already in the process of
3347 * being aborted.
3274 */ 3348 */
3275 icmd = &cmdiocb->iocb; 3349 icmd = &cmdiocb->iocb;
3276 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 3350 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
3277 icmd->ulpCommand == CMD_CLOSE_XRI_CN) 3351 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3352 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
3278 return 0; 3353 return 0;
3279 3354
3280 /* If we're unloading, interrupts are disabled so we 3355 /* If we're unloading, don't abort the iocb, but change the callback so
3281 * need to cleanup the iocb here. 3356 * that nothing happens when it finishes.
3282 */ 3357 */
3283 if (vport->load_flag & FC_UNLOADING) 3358 if (vport->load_flag & FC_UNLOADING) {
3359 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
3360 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
3361 else
3362 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
3284 goto abort_iotag_exit; 3363 goto abort_iotag_exit;
3364 }
3285 3365
3286 /* issue ABTS for this IOCB based on iotag */ 3366 /* issue ABTS for this IOCB based on iotag */
3287 abtsiocbp = lpfc_sli_get_iocbq(phba); 3367 abtsiocbp = __lpfc_sli_get_iocbq(phba);
3288 if (abtsiocbp == NULL) 3368 if (abtsiocbp == NULL)
3289 return 0; 3369 return 0;
3290 3370
@@ -3308,11 +3388,12 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3308 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 3388 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
3309 3389
3310 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3390 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3311 "%d:0339 Abort xri x%x, original iotag x%x, abort " 3391 "%d (%d):0339 Abort xri x%x, original iotag x%x, "
3312 "cmd iotag x%x\n", 3392 "abort cmd iotag x%x\n",
3313 phba->brd_no, iabt->un.acxri.abortContextTag, 3393 phba->brd_no, vport->vpi,
3394 iabt->un.acxri.abortContextTag,
3314 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 3395 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
3315 retval = lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 3396 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
3316 3397
3317abort_iotag_exit: 3398abort_iotag_exit:
3318 /* 3399 /*
@@ -3471,6 +3552,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3471 * lpfc_sli_issue_call since the wake routine sets a unique value and by 3552 * lpfc_sli_issue_call since the wake routine sets a unique value and by
3472 * definition this is a wait function. 3553 * definition this is a wait function.
3473 */ 3554 */
3555
3474int 3556int
3475lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 3557lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
3476 struct lpfc_sli_ring *pring, 3558 struct lpfc_sli_ring *pring,
@@ -3558,9 +3640,8 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
3558 int retval; 3640 int retval;
3559 3641
3560 /* The caller must leave context1 empty. */ 3642 /* The caller must leave context1 empty. */
3561 if (pmboxq->context1 != 0) { 3643 if (pmboxq->context1 != 0)
3562 return MBX_NOT_FINISHED; 3644 return MBX_NOT_FINISHED;
3563 }
3564 3645
3565 /* setup wake call as IOCB callback */ 3646 /* setup wake call as IOCB callback */
3566 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 3647 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
@@ -3630,6 +3711,10 @@ lpfc_intr_handler(int irq, void *dev_id)
3630 int i; 3711 int i;
3631 uint32_t control; 3712 uint32_t control;
3632 3713
3714 MAILBOX_t *mbox, *pmbox;
3715 LPFC_MBOXQ_t *pmb;
3716 int rc;
3717
3633 /* 3718 /*
3634 * Get the driver's phba structure from the dev_id and 3719 * Get the driver's phba structure from the dev_id and
3635 * assume the HBA is not interrupting. 3720 * assume the HBA is not interrupting.
@@ -3729,10 +3814,71 @@ lpfc_intr_handler(int irq, void *dev_id)
3729 phba->pport->stopped = 1; 3814 phba->pport->stopped = 1;
3730 } 3815 }
3731 3816
3817 if ((work_ha_copy & HA_MBATT) &&
3818 (phba->sli.mbox_active)) {
3819 pmb = phba->sli.mbox_active;
3820 pmbox = &pmb->mb;
3821 mbox = &phba->slim2p->mbx;
3822
3823 /* First check out the status word */
3824 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
3825 if (pmbox->mbxOwner != OWN_HOST) {
3826 /*
3827 * Stray Mailbox Interrupt, mbxCommand <cmd>
3828 * mbxStatus <status>
3829 */
3830 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
3831 LOG_SLI,
3832 "%d (%d):0304 Stray Mailbox "
3833 "Interrupt mbxCommand x%x "
3834 "mbxStatus x%x\n",
3835 phba->brd_no,
3836 (pmb->vport
3837 ? pmb->vport->vpi
3838 : 0),
3839 pmbox->mbxCommand,
3840 pmbox->mbxStatus);
3841 }
3842 del_timer_sync(&phba->sli.mbox_tmo);
3843
3844 spin_lock(&phba->pport->work_port_lock);
3845 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3846 spin_unlock(&phba->pport->work_port_lock);
3847 phba->sli.mbox_active = NULL;
3848 if (pmb->mbox_cmpl) {
3849 lpfc_sli_pcimem_bcopy(mbox, pmbox,
3850 MAILBOX_CMD_SIZE);
3851 }
3852 lpfc_mbox_cmpl_put(phba, pmb);
3853 }
3854 if ((work_ha_copy & HA_MBATT) &&
3855 (phba->sli.mbox_active == NULL)) {
3856send_next_mbox:
3857 spin_lock(&phba->hbalock);
3858 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3859 pmb = lpfc_mbox_get(phba);
3860 spin_unlock(&phba->hbalock);
3861
3862 /* Process next mailbox command if there is one */
3863 if (pmb != NULL) {
3864 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3865 if (rc == MBX_NOT_FINISHED) {
3866 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3867 lpfc_mbox_cmpl_put(phba, pmb);
3868 goto send_next_mbox;
3869 }
3870 } else {
3871 /* Turn on IOCB processing */
3872 for (i = 0; i < phba->sli.num_rings; i++)
3873 lpfc_sli_turn_on_ring(phba, i);
3874 }
3875
3876 }
3877
3732 spin_lock(&phba->hbalock); 3878 spin_lock(&phba->hbalock);
3733 phba->work_ha |= work_ha_copy; 3879 phba->work_ha |= work_ha_copy;
3734 if (phba->work_wait) 3880 if (phba->work_wait)
3735 wake_up(phba->work_wait); 3881 lpfc_worker_wake_up(phba);
3736 spin_unlock(&phba->hbalock); 3882 spin_unlock(&phba->hbalock);
3737 } 3883 }
3738 3884