aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c429
1 files changed, 318 insertions, 111 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ecebdfa00470..3205f7488d1c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -212,6 +212,18 @@ out_free_mbox:
212 return 0; 212 return 0;
213} 213}
214 214
215/* Completion handler for config async event mailbox command. */
216static void
217lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
218{
219 if (pmboxq->mb.mbxStatus == MBX_SUCCESS)
220 phba->temp_sensor_support = 1;
221 else
222 phba->temp_sensor_support = 0;
223 mempool_free(pmboxq, phba->mbox_mem_pool);
224 return;
225}
226
215/************************************************************************/ 227/************************************************************************/
216/* */ 228/* */
217/* lpfc_config_port_post */ 229/* lpfc_config_port_post */
@@ -234,6 +246,15 @@ lpfc_config_port_post(struct lpfc_hba *phba)
234 int i, j; 246 int i, j;
235 int rc; 247 int rc;
236 248
249 spin_lock_irq(&phba->hbalock);
250 /*
251 * If the Config port completed correctly the HBA is not
252 * over heated any more.
253 */
254 if (phba->over_temp_state == HBA_OVER_TEMP)
255 phba->over_temp_state = HBA_NORMAL_TEMP;
256 spin_unlock_irq(&phba->hbalock);
257
237 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 258 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
238 if (!pmb) { 259 if (!pmb) {
239 phba->link_state = LPFC_HBA_ERROR; 260 phba->link_state = LPFC_HBA_ERROR;
@@ -343,7 +364,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
343 364
344 phba->link_state = LPFC_LINK_DOWN; 365 phba->link_state = LPFC_LINK_DOWN;
345 366
346 /* Only process IOCBs on ring 0 till hba_state is READY */ 367 /* Only process IOCBs on ELS ring till hba_state is READY */
347 if (psli->ring[psli->extra_ring].cmdringaddr) 368 if (psli->ring[psli->extra_ring].cmdringaddr)
348 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 369 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
349 if (psli->ring[psli->fcp_ring].cmdringaddr) 370 if (psli->ring[psli->fcp_ring].cmdringaddr)
@@ -409,7 +430,21 @@ lpfc_config_port_post(struct lpfc_hba *phba)
409 return -EIO; 430 return -EIO;
410 } 431 }
411 /* MBOX buffer will be freed in mbox compl */ 432 /* MBOX buffer will be freed in mbox compl */
433 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
434 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
435 pmb->mbox_cmpl = lpfc_config_async_cmpl;
436 pmb->vport = phba->pport;
437 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
412 438
439 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
440 lpfc_printf_log(phba,
441 KERN_ERR,
442 LOG_INIT,
443 "0456 Adapter failed to issue "
444 "ASYNCEVT_ENABLE mbox status x%x \n.",
445 rc);
446 mempool_free(pmb, phba->mbox_mem_pool);
447 }
413 return (0); 448 return (0);
414} 449}
415 450
@@ -449,6 +484,9 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
449 struct lpfc_sli *psli = &phba->sli; 484 struct lpfc_sli *psli = &phba->sli;
450 struct lpfc_sli_ring *pring; 485 struct lpfc_sli_ring *pring;
451 struct lpfc_dmabuf *mp, *next_mp; 486 struct lpfc_dmabuf *mp, *next_mp;
487 struct lpfc_iocbq *iocb;
488 IOCB_t *cmd = NULL;
489 LIST_HEAD(completions);
452 int i; 490 int i;
453 491
454 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 492 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
@@ -464,16 +502,42 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
464 } 502 }
465 } 503 }
466 504
505 spin_lock_irq(&phba->hbalock);
467 for (i = 0; i < psli->num_rings; i++) { 506 for (i = 0; i < psli->num_rings; i++) {
468 pring = &psli->ring[i]; 507 pring = &psli->ring[i];
508
509 /* At this point in time the HBA is either reset or DOA. Either
510 * way, nothing should be on txcmplq as it will NEVER complete.
511 */
512 list_splice_init(&pring->txcmplq, &completions);
513 pring->txcmplq_cnt = 0;
514 spin_unlock_irq(&phba->hbalock);
515
516 while (!list_empty(&completions)) {
517 iocb = list_get_first(&completions, struct lpfc_iocbq,
518 list);
519 cmd = &iocb->iocb;
520 list_del_init(&iocb->list);
521
522 if (!iocb->iocb_cmpl)
523 lpfc_sli_release_iocbq(phba, iocb);
524 else {
525 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
526 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
527 (iocb->iocb_cmpl) (phba, iocb, iocb);
528 }
529 }
530
469 lpfc_sli_abort_iocb_ring(phba, pring); 531 lpfc_sli_abort_iocb_ring(phba, pring);
532 spin_lock_irq(&phba->hbalock);
470 } 533 }
534 spin_unlock_irq(&phba->hbalock);
471 535
472 return 0; 536 return 0;
473} 537}
474 538
475/* HBA heart beat timeout handler */ 539/* HBA heart beat timeout handler */
476void 540static void
477lpfc_hb_timeout(unsigned long ptr) 541lpfc_hb_timeout(unsigned long ptr)
478{ 542{
479 struct lpfc_hba *phba; 543 struct lpfc_hba *phba;
@@ -512,8 +576,10 @@ void
512lpfc_hb_timeout_handler(struct lpfc_hba *phba) 576lpfc_hb_timeout_handler(struct lpfc_hba *phba)
513{ 577{
514 LPFC_MBOXQ_t *pmboxq; 578 LPFC_MBOXQ_t *pmboxq;
579 struct lpfc_dmabuf *buf_ptr;
515 int retval; 580 int retval;
516 struct lpfc_sli *psli = &phba->sli; 581 struct lpfc_sli *psli = &phba->sli;
582 LIST_HEAD(completions);
517 583
518 if ((phba->link_state == LPFC_HBA_ERROR) || 584 if ((phba->link_state == LPFC_HBA_ERROR) ||
519 (phba->pport->load_flag & FC_UNLOADING) || 585 (phba->pport->load_flag & FC_UNLOADING) ||
@@ -540,49 +606,88 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
540 } 606 }
541 spin_unlock_irq(&phba->pport->work_port_lock); 607 spin_unlock_irq(&phba->pport->work_port_lock);
542 608
543 /* If there is no heart beat outstanding, issue a heartbeat command */ 609 if (phba->elsbuf_cnt &&
544 if (!phba->hb_outstanding) { 610 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
545 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 611 spin_lock_irq(&phba->hbalock);
546 if (!pmboxq) { 612 list_splice_init(&phba->elsbuf, &completions);
547 mod_timer(&phba->hb_tmofunc, 613 phba->elsbuf_cnt = 0;
548 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 614 phba->elsbuf_prev_cnt = 0;
549 return; 615 spin_unlock_irq(&phba->hbalock);
616
617 while (!list_empty(&completions)) {
618 list_remove_head(&completions, buf_ptr,
619 struct lpfc_dmabuf, list);
620 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
621 kfree(buf_ptr);
550 } 622 }
623 }
624 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
551 625
552 lpfc_heart_beat(phba, pmboxq); 626 /* If there is no heart beat outstanding, issue a heartbeat command */
553 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 627 if (phba->cfg_enable_hba_heartbeat) {
554 pmboxq->vport = phba->pport; 628 if (!phba->hb_outstanding) {
555 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 629 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
630 if (!pmboxq) {
631 mod_timer(&phba->hb_tmofunc,
632 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
633 return;
634 }
556 635
557 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 636 lpfc_heart_beat(phba, pmboxq);
558 mempool_free(pmboxq, phba->mbox_mem_pool); 637 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
638 pmboxq->vport = phba->pport;
639 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
640
641 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
642 mempool_free(pmboxq, phba->mbox_mem_pool);
643 mod_timer(&phba->hb_tmofunc,
644 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
645 return;
646 }
559 mod_timer(&phba->hb_tmofunc, 647 mod_timer(&phba->hb_tmofunc,
560 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 648 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
649 phba->hb_outstanding = 1;
561 return; 650 return;
651 } else {
652 /*
653 * If heart beat timeout called with hb_outstanding set
654 * we need to take the HBA offline.
655 */
656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
657 "0459 Adapter heartbeat failure, "
658 "taking this port offline.\n");
659
660 spin_lock_irq(&phba->hbalock);
661 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
662 spin_unlock_irq(&phba->hbalock);
663
664 lpfc_offline_prep(phba);
665 lpfc_offline(phba);
666 lpfc_unblock_mgmt_io(phba);
667 phba->link_state = LPFC_HBA_ERROR;
668 lpfc_hba_down_post(phba);
562 } 669 }
563 mod_timer(&phba->hb_tmofunc, 670 }
564 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 671}
565 phba->hb_outstanding = 1;
566 return;
567 } else {
568 /*
569 * If heart beat timeout called with hb_outstanding set we
570 * need to take the HBA offline.
571 */
572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
573 "0459 Adapter heartbeat failure, taking "
574 "this port offline.\n");
575 672
576 spin_lock_irq(&phba->hbalock); 673static void
577 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 674lpfc_offline_eratt(struct lpfc_hba *phba)
578 spin_unlock_irq(&phba->hbalock); 675{
676 struct lpfc_sli *psli = &phba->sli;
579 677
580 lpfc_offline_prep(phba); 678 spin_lock_irq(&phba->hbalock);
581 lpfc_offline(phba); 679 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
582 lpfc_unblock_mgmt_io(phba); 680 spin_unlock_irq(&phba->hbalock);
583 phba->link_state = LPFC_HBA_ERROR; 681 lpfc_offline_prep(phba);
584 lpfc_hba_down_post(phba); 682
585 } 683 lpfc_offline(phba);
684 lpfc_reset_barrier(phba);
685 lpfc_sli_brdreset(phba);
686 lpfc_hba_down_post(phba);
687 lpfc_sli_brdready(phba, HS_MBRDY);
688 lpfc_unblock_mgmt_io(phba);
689 phba->link_state = LPFC_HBA_ERROR;
690 return;
586} 691}
587 692
588/************************************************************************/ 693/************************************************************************/
@@ -601,6 +706,8 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
601 struct lpfc_sli_ring *pring; 706 struct lpfc_sli_ring *pring;
602 struct lpfc_vport **vports; 707 struct lpfc_vport **vports;
603 uint32_t event_data; 708 uint32_t event_data;
709 unsigned long temperature;
710 struct temp_event temp_event_data;
604 struct Scsi_Host *shost; 711 struct Scsi_Host *shost;
605 int i; 712 int i;
606 713
@@ -608,6 +715,9 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
608 * since we cannot communicate with the pci card anyway. */ 715 * since we cannot communicate with the pci card anyway. */
609 if (pci_channel_offline(phba->pcidev)) 716 if (pci_channel_offline(phba->pcidev))
610 return; 717 return;
718 /* If resets are disabled then leave the HBA alone and return */
719 if (!phba->cfg_enable_hba_reset)
720 return;
611 721
612 if (phba->work_hs & HS_FFER6 || 722 if (phba->work_hs & HS_FFER6 ||
613 phba->work_hs & HS_FFER5) { 723 phba->work_hs & HS_FFER5) {
@@ -620,14 +730,14 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
620 vports = lpfc_create_vport_work_array(phba); 730 vports = lpfc_create_vport_work_array(phba);
621 if (vports != NULL) 731 if (vports != NULL)
622 for(i = 0; 732 for(i = 0;
623 i < LPFC_MAX_VPORTS && vports[i] != NULL; 733 i <= phba->max_vpi && vports[i] != NULL;
624 i++){ 734 i++){
625 shost = lpfc_shost_from_vport(vports[i]); 735 shost = lpfc_shost_from_vport(vports[i]);
626 spin_lock_irq(shost->host_lock); 736 spin_lock_irq(shost->host_lock);
627 vports[i]->fc_flag |= FC_ESTABLISH_LINK; 737 vports[i]->fc_flag |= FC_ESTABLISH_LINK;
628 spin_unlock_irq(shost->host_lock); 738 spin_unlock_irq(shost->host_lock);
629 } 739 }
630 lpfc_destroy_vport_work_array(vports); 740 lpfc_destroy_vport_work_array(phba, vports);
631 spin_lock_irq(&phba->hbalock); 741 spin_lock_irq(&phba->hbalock);
632 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 742 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
633 spin_unlock_irq(&phba->hbalock); 743 spin_unlock_irq(&phba->hbalock);
@@ -655,6 +765,31 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
655 return; 765 return;
656 } 766 }
657 lpfc_unblock_mgmt_io(phba); 767 lpfc_unblock_mgmt_io(phba);
768 } else if (phba->work_hs & HS_CRIT_TEMP) {
769 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
770 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
771 temp_event_data.event_code = LPFC_CRIT_TEMP;
772 temp_event_data.data = (uint32_t)temperature;
773
774 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
775 "0459 Adapter maximum temperature exceeded "
776 "(%ld), taking this port offline "
777 "Data: x%x x%x x%x\n",
778 temperature, phba->work_hs,
779 phba->work_status[0], phba->work_status[1]);
780
781 shost = lpfc_shost_from_vport(phba->pport);
782 fc_host_post_vendor_event(shost, fc_get_event_number(),
783 sizeof(temp_event_data),
784 (char *) &temp_event_data,
785 SCSI_NL_VID_TYPE_PCI
786 | PCI_VENDOR_ID_EMULEX);
787
788 spin_lock_irq(&phba->hbalock);
789 phba->over_temp_state = HBA_OVER_TEMP;
790 spin_unlock_irq(&phba->hbalock);
791 lpfc_offline_eratt(phba);
792
658 } else { 793 } else {
659 /* The if clause above forces this code path when the status 794 /* The if clause above forces this code path when the status
660 * failure is a value other than FFER6. Do not call the offline 795 * failure is a value other than FFER6. Do not call the offline
@@ -672,14 +807,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
672 sizeof(event_data), (char *) &event_data, 807 sizeof(event_data), (char *) &event_data,
673 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 808 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
674 809
675 spin_lock_irq(&phba->hbalock); 810 lpfc_offline_eratt(phba);
676 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
677 spin_unlock_irq(&phba->hbalock);
678 lpfc_offline_prep(phba);
679 lpfc_offline(phba);
680 lpfc_unblock_mgmt_io(phba);
681 phba->link_state = LPFC_HBA_ERROR;
682 lpfc_hba_down_post(phba);
683 } 811 }
684} 812}
685 813
@@ -699,21 +827,25 @@ lpfc_handle_latt(struct lpfc_hba *phba)
699 LPFC_MBOXQ_t *pmb; 827 LPFC_MBOXQ_t *pmb;
700 volatile uint32_t control; 828 volatile uint32_t control;
701 struct lpfc_dmabuf *mp; 829 struct lpfc_dmabuf *mp;
702 int rc = -ENOMEM; 830 int rc = 0;
703 831
704 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 832 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
705 if (!pmb) 833 if (!pmb) {
834 rc = 1;
706 goto lpfc_handle_latt_err_exit; 835 goto lpfc_handle_latt_err_exit;
836 }
707 837
708 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 838 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
709 if (!mp) 839 if (!mp) {
840 rc = 2;
710 goto lpfc_handle_latt_free_pmb; 841 goto lpfc_handle_latt_free_pmb;
842 }
711 843
712 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 844 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
713 if (!mp->virt) 845 if (!mp->virt) {
846 rc = 3;
714 goto lpfc_handle_latt_free_mp; 847 goto lpfc_handle_latt_free_mp;
715 848 }
716 rc = -EIO;
717 849
718 /* Cleanup any outstanding ELS commands */ 850 /* Cleanup any outstanding ELS commands */
719 lpfc_els_flush_all_cmd(phba); 851 lpfc_els_flush_all_cmd(phba);
@@ -722,9 +854,11 @@ lpfc_handle_latt(struct lpfc_hba *phba)
722 lpfc_read_la(phba, pmb, mp); 854 lpfc_read_la(phba, pmb, mp);
723 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 855 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
724 pmb->vport = vport; 856 pmb->vport = vport;
725 rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)); 857 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
726 if (rc == MBX_NOT_FINISHED) 858 if (rc == MBX_NOT_FINISHED) {
859 rc = 4;
727 goto lpfc_handle_latt_free_mbuf; 860 goto lpfc_handle_latt_free_mbuf;
861 }
728 862
729 /* Clear Link Attention in HA REG */ 863 /* Clear Link Attention in HA REG */
730 spin_lock_irq(&phba->hbalock); 864 spin_lock_irq(&phba->hbalock);
@@ -756,10 +890,8 @@ lpfc_handle_latt_err_exit:
756 lpfc_linkdown(phba); 890 lpfc_linkdown(phba);
757 phba->link_state = LPFC_HBA_ERROR; 891 phba->link_state = LPFC_HBA_ERROR;
758 892
759 /* The other case is an error from issue_mbox */ 893 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
760 if (rc == -ENOMEM) 894 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
761 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
762 "0300 READ_LA: no buffers\n");
763 895
764 return; 896 return;
765} 897}
@@ -1088,9 +1220,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
1088 /* Allocate buffer to post */ 1220 /* Allocate buffer to post */
1089 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1221 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1090 if (mp1) 1222 if (mp1)
1091 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1223 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1092 &mp1->phys); 1224 if (!mp1 || !mp1->virt) {
1093 if (mp1 == 0 || mp1->virt == 0) {
1094 kfree(mp1); 1225 kfree(mp1);
1095 lpfc_sli_release_iocbq(phba, iocb); 1226 lpfc_sli_release_iocbq(phba, iocb);
1096 pring->missbufcnt = cnt; 1227 pring->missbufcnt = cnt;
@@ -1104,7 +1235,7 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
1104 if (mp2) 1235 if (mp2)
1105 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1236 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1106 &mp2->phys); 1237 &mp2->phys);
1107 if (mp2 == 0 || mp2->virt == 0) { 1238 if (!mp2 || !mp2->virt) {
1108 kfree(mp2); 1239 kfree(mp2);
1109 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1240 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1110 kfree(mp1); 1241 kfree(mp1);
@@ -1280,15 +1411,39 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1280 kfree(HashWorking); 1411 kfree(HashWorking);
1281} 1412}
1282 1413
1283static void 1414void
1284lpfc_cleanup(struct lpfc_vport *vport) 1415lpfc_cleanup(struct lpfc_vport *vport)
1285{ 1416{
1417 struct lpfc_hba *phba = vport->phba;
1286 struct lpfc_nodelist *ndlp, *next_ndlp; 1418 struct lpfc_nodelist *ndlp, *next_ndlp;
1419 int i = 0;
1287 1420
1288 /* clean up phba - lpfc specific */ 1421 if (phba->link_state > LPFC_LINK_DOWN)
1289 lpfc_can_disctmo(vport); 1422 lpfc_port_link_failure(vport);
1290 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) 1423
1291 lpfc_nlp_put(ndlp); 1424 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1425 if (ndlp->nlp_type & NLP_FABRIC)
1426 lpfc_disc_state_machine(vport, ndlp, NULL,
1427 NLP_EVT_DEVICE_RECOVERY);
1428 lpfc_disc_state_machine(vport, ndlp, NULL,
1429 NLP_EVT_DEVICE_RM);
1430 }
1431
1432 /* At this point, ALL ndlp's should be gone
1433 * because of the previous NLP_EVT_DEVICE_RM.
1434 * Lets wait for this to happen, if needed.
1435 */
1436 while (!list_empty(&vport->fc_nodes)) {
1437
1438 if (i++ > 3000) {
1439 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1440 "0233 Nodelist not empty\n");
1441 break;
1442 }
1443
1444 /* Wait for any activity on ndlps to settle */
1445 msleep(10);
1446 }
1292 return; 1447 return;
1293} 1448}
1294 1449
@@ -1307,14 +1462,14 @@ lpfc_establish_link_tmo(unsigned long ptr)
1307 phba->pport->fc_flag, phba->pport->port_state); 1462 phba->pport->fc_flag, phba->pport->port_state);
1308 vports = lpfc_create_vport_work_array(phba); 1463 vports = lpfc_create_vport_work_array(phba);
1309 if (vports != NULL) 1464 if (vports != NULL)
1310 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { 1465 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1311 struct Scsi_Host *shost; 1466 struct Scsi_Host *shost;
1312 shost = lpfc_shost_from_vport(vports[i]); 1467 shost = lpfc_shost_from_vport(vports[i]);
1313 spin_lock_irqsave(shost->host_lock, iflag); 1468 spin_lock_irqsave(shost->host_lock, iflag);
1314 vports[i]->fc_flag &= ~FC_ESTABLISH_LINK; 1469 vports[i]->fc_flag &= ~FC_ESTABLISH_LINK;
1315 spin_unlock_irqrestore(shost->host_lock, iflag); 1470 spin_unlock_irqrestore(shost->host_lock, iflag);
1316 } 1471 }
1317 lpfc_destroy_vport_work_array(vports); 1472 lpfc_destroy_vport_work_array(phba, vports);
1318} 1473}
1319 1474
1320void 1475void
@@ -1339,6 +1494,16 @@ lpfc_stop_phba_timers(struct lpfc_hba *phba)
1339 return; 1494 return;
1340} 1495}
1341 1496
1497static void
1498lpfc_block_mgmt_io(struct lpfc_hba * phba)
1499{
1500 unsigned long iflag;
1501
1502 spin_lock_irqsave(&phba->hbalock, iflag);
1503 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
1504 spin_unlock_irqrestore(&phba->hbalock, iflag);
1505}
1506
1342int 1507int
1343lpfc_online(struct lpfc_hba *phba) 1508lpfc_online(struct lpfc_hba *phba)
1344{ 1509{
@@ -1369,7 +1534,7 @@ lpfc_online(struct lpfc_hba *phba)
1369 1534
1370 vports = lpfc_create_vport_work_array(phba); 1535 vports = lpfc_create_vport_work_array(phba);
1371 if (vports != NULL) 1536 if (vports != NULL)
1372 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { 1537 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1373 struct Scsi_Host *shost; 1538 struct Scsi_Host *shost;
1374 shost = lpfc_shost_from_vport(vports[i]); 1539 shost = lpfc_shost_from_vport(vports[i]);
1375 spin_lock_irq(shost->host_lock); 1540 spin_lock_irq(shost->host_lock);
@@ -1378,23 +1543,13 @@ lpfc_online(struct lpfc_hba *phba)
1378 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 1543 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
1379 spin_unlock_irq(shost->host_lock); 1544 spin_unlock_irq(shost->host_lock);
1380 } 1545 }
1381 lpfc_destroy_vport_work_array(vports); 1546 lpfc_destroy_vport_work_array(phba, vports);
1382 1547
1383 lpfc_unblock_mgmt_io(phba); 1548 lpfc_unblock_mgmt_io(phba);
1384 return 0; 1549 return 0;
1385} 1550}
1386 1551
1387void 1552void
1388lpfc_block_mgmt_io(struct lpfc_hba * phba)
1389{
1390 unsigned long iflag;
1391
1392 spin_lock_irqsave(&phba->hbalock, iflag);
1393 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
1394 spin_unlock_irqrestore(&phba->hbalock, iflag);
1395}
1396
1397void
1398lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 1553lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
1399{ 1554{
1400 unsigned long iflag; 1555 unsigned long iflag;
@@ -1409,6 +1564,8 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1409{ 1564{
1410 struct lpfc_vport *vport = phba->pport; 1565 struct lpfc_vport *vport = phba->pport;
1411 struct lpfc_nodelist *ndlp, *next_ndlp; 1566 struct lpfc_nodelist *ndlp, *next_ndlp;
1567 struct lpfc_vport **vports;
1568 int i;
1412 1569
1413 if (vport->fc_flag & FC_OFFLINE_MODE) 1570 if (vport->fc_flag & FC_OFFLINE_MODE)
1414 return; 1571 return;
@@ -1417,10 +1574,34 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1417 1574
1418 lpfc_linkdown(phba); 1575 lpfc_linkdown(phba);
1419 1576
1420 /* Issue an unreg_login to all nodes */ 1577 /* Issue an unreg_login to all nodes on all vports */
1421 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) 1578 vports = lpfc_create_vport_work_array(phba);
1422 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) 1579 if (vports != NULL) {
1423 lpfc_unreg_rpi(vport, ndlp); 1580 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1581 struct Scsi_Host *shost;
1582
1583 if (vports[i]->load_flag & FC_UNLOADING)
1584 continue;
1585 shost = lpfc_shost_from_vport(vports[i]);
1586 list_for_each_entry_safe(ndlp, next_ndlp,
1587 &vports[i]->fc_nodes,
1588 nlp_listp) {
1589 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
1590 continue;
1591 if (ndlp->nlp_type & NLP_FABRIC) {
1592 lpfc_disc_state_machine(vports[i], ndlp,
1593 NULL, NLP_EVT_DEVICE_RECOVERY);
1594 lpfc_disc_state_machine(vports[i], ndlp,
1595 NULL, NLP_EVT_DEVICE_RM);
1596 }
1597 spin_lock_irq(shost->host_lock);
1598 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1599 spin_unlock_irq(shost->host_lock);
1600 lpfc_unreg_rpi(vports[i], ndlp);
1601 }
1602 }
1603 }
1604 lpfc_destroy_vport_work_array(phba, vports);
1424 1605
1425 lpfc_sli_flush_mbox_queue(phba); 1606 lpfc_sli_flush_mbox_queue(phba);
1426} 1607}
@@ -1439,9 +1620,9 @@ lpfc_offline(struct lpfc_hba *phba)
1439 lpfc_stop_phba_timers(phba); 1620 lpfc_stop_phba_timers(phba);
1440 vports = lpfc_create_vport_work_array(phba); 1621 vports = lpfc_create_vport_work_array(phba);
1441 if (vports != NULL) 1622 if (vports != NULL)
1442 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) 1623 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
1443 lpfc_stop_vport_timers(vports[i]); 1624 lpfc_stop_vport_timers(vports[i]);
1444 lpfc_destroy_vport_work_array(vports); 1625 lpfc_destroy_vport_work_array(phba, vports);
1445 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1626 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1446 "0460 Bring Adapter offline\n"); 1627 "0460 Bring Adapter offline\n");
1447 /* Bring down the SLI Layer and cleanup. The HBA is offline 1628 /* Bring down the SLI Layer and cleanup. The HBA is offline
@@ -1452,15 +1633,14 @@ lpfc_offline(struct lpfc_hba *phba)
1452 spin_unlock_irq(&phba->hbalock); 1633 spin_unlock_irq(&phba->hbalock);
1453 vports = lpfc_create_vport_work_array(phba); 1634 vports = lpfc_create_vport_work_array(phba);
1454 if (vports != NULL) 1635 if (vports != NULL)
1455 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { 1636 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1456 shost = lpfc_shost_from_vport(vports[i]); 1637 shost = lpfc_shost_from_vport(vports[i]);
1457 lpfc_cleanup(vports[i]);
1458 spin_lock_irq(shost->host_lock); 1638 spin_lock_irq(shost->host_lock);
1459 vports[i]->work_port_events = 0; 1639 vports[i]->work_port_events = 0;
1460 vports[i]->fc_flag |= FC_OFFLINE_MODE; 1640 vports[i]->fc_flag |= FC_OFFLINE_MODE;
1461 spin_unlock_irq(shost->host_lock); 1641 spin_unlock_irq(shost->host_lock);
1462 } 1642 }
1463 lpfc_destroy_vport_work_array(vports); 1643 lpfc_destroy_vport_work_array(phba, vports);
1464} 1644}
1465 1645
1466/****************************************************************************** 1646/******************************************************************************
@@ -1674,6 +1854,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
1674 fc_host_supported_speeds(shost) = 0; 1854 fc_host_supported_speeds(shost) = 0;
1675 if (phba->lmt & LMT_10Gb) 1855 if (phba->lmt & LMT_10Gb)
1676 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 1856 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
1857 if (phba->lmt & LMT_8Gb)
1858 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
1677 if (phba->lmt & LMT_4Gb) 1859 if (phba->lmt & LMT_4Gb)
1678 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 1860 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
1679 if (phba->lmt & LMT_2Gb) 1861 if (phba->lmt & LMT_2Gb)
@@ -1707,13 +1889,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1707 struct Scsi_Host *shost = NULL; 1889 struct Scsi_Host *shost = NULL;
1708 void *ptr; 1890 void *ptr;
1709 unsigned long bar0map_len, bar2map_len; 1891 unsigned long bar0map_len, bar2map_len;
1710 int error = -ENODEV; 1892 int error = -ENODEV, retval;
1711 int i, hbq_count; 1893 int i, hbq_count;
1712 uint16_t iotag; 1894 uint16_t iotag;
1895 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
1713 1896
1714 if (pci_enable_device(pdev)) 1897 if (pci_enable_device_bars(pdev, bars))
1715 goto out; 1898 goto out;
1716 if (pci_request_regions(pdev, LPFC_DRIVER_NAME)) 1899 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
1717 goto out_disable_device; 1900 goto out_disable_device;
1718 1901
1719 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); 1902 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL);
@@ -1823,9 +2006,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1823 lpfc_sli_setup(phba); 2006 lpfc_sli_setup(phba);
1824 lpfc_sli_queue_setup(phba); 2007 lpfc_sli_queue_setup(phba);
1825 2008
1826 error = lpfc_mem_alloc(phba); 2009 retval = lpfc_mem_alloc(phba);
1827 if (error) 2010 if (retval) {
2011 error = retval;
1828 goto out_free_hbqslimp; 2012 goto out_free_hbqslimp;
2013 }
1829 2014
1830 /* Initialize and populate the iocb list per host. */ 2015 /* Initialize and populate the iocb list per host. */
1831 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 2016 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
@@ -1880,6 +2065,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1880 /* Initialize list of fabric iocbs */ 2065 /* Initialize list of fabric iocbs */
1881 INIT_LIST_HEAD(&phba->fabric_iocb_list); 2066 INIT_LIST_HEAD(&phba->fabric_iocb_list);
1882 2067
2068 /* Initialize list to save ELS buffers */
2069 INIT_LIST_HEAD(&phba->elsbuf);
2070
1883 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 2071 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
1884 if (!vport) 2072 if (!vport)
1885 goto out_kthread_stop; 2073 goto out_kthread_stop;
@@ -1891,8 +2079,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1891 pci_set_drvdata(pdev, shost); 2079 pci_set_drvdata(pdev, shost);
1892 2080
1893 if (phba->cfg_use_msi) { 2081 if (phba->cfg_use_msi) {
1894 error = pci_enable_msi(phba->pcidev); 2082 retval = pci_enable_msi(phba->pcidev);
1895 if (!error) 2083 if (!retval)
1896 phba->using_msi = 1; 2084 phba->using_msi = 1;
1897 else 2085 else
1898 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2086 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -1900,11 +2088,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1900 "with IRQ\n"); 2088 "with IRQ\n");
1901 } 2089 }
1902 2090
1903 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, 2091 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
1904 LPFC_DRIVER_NAME, phba); 2092 LPFC_DRIVER_NAME, phba);
1905 if (error) { 2093 if (retval) {
1906 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2094 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1907 "0451 Enable interrupt handler failed\n"); 2095 "0451 Enable interrupt handler failed\n");
2096 error = retval;
1908 goto out_disable_msi; 2097 goto out_disable_msi;
1909 } 2098 }
1910 2099
@@ -1914,11 +2103,15 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1914 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 2103 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
1915 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 2104 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
1916 2105
1917 if (lpfc_alloc_sysfs_attr(vport)) 2106 if (lpfc_alloc_sysfs_attr(vport)) {
2107 error = -ENOMEM;
1918 goto out_free_irq; 2108 goto out_free_irq;
2109 }
1919 2110
1920 if (lpfc_sli_hba_setup(phba)) 2111 if (lpfc_sli_hba_setup(phba)) {
2112 error = -ENODEV;
1921 goto out_remove_device; 2113 goto out_remove_device;
2114 }
1922 2115
1923 /* 2116 /*
1924 * hba setup may have changed the hba_queue_depth so we need to adjust 2117 * hba setup may have changed the hba_queue_depth so we need to adjust
@@ -1975,7 +2168,7 @@ out_idr_remove:
1975out_free_phba: 2168out_free_phba:
1976 kfree(phba); 2169 kfree(phba);
1977out_release_regions: 2170out_release_regions:
1978 pci_release_regions(pdev); 2171 pci_release_selected_regions(pdev, bars);
1979out_disable_device: 2172out_disable_device:
1980 pci_disable_device(pdev); 2173 pci_disable_device(pdev);
1981out: 2174out:
@@ -1991,6 +2184,8 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
1991 struct Scsi_Host *shost = pci_get_drvdata(pdev); 2184 struct Scsi_Host *shost = pci_get_drvdata(pdev);
1992 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2185 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1993 struct lpfc_hba *phba = vport->phba; 2186 struct lpfc_hba *phba = vport->phba;
2187 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2188
1994 spin_lock_irq(&phba->hbalock); 2189 spin_lock_irq(&phba->hbalock);
1995 vport->load_flag |= FC_UNLOADING; 2190 vport->load_flag |= FC_UNLOADING;
1996 spin_unlock_irq(&phba->hbalock); 2191 spin_unlock_irq(&phba->hbalock);
@@ -1998,8 +2193,12 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
1998 kfree(vport->vname); 2193 kfree(vport->vname);
1999 lpfc_free_sysfs_attr(vport); 2194 lpfc_free_sysfs_attr(vport);
2000 2195
2196 kthread_stop(phba->worker_thread);
2197
2001 fc_remove_host(shost); 2198 fc_remove_host(shost);
2002 scsi_remove_host(shost); 2199 scsi_remove_host(shost);
2200 lpfc_cleanup(vport);
2201
2003 /* 2202 /*
2004 * Bring down the SLI Layer. This step disable all interrupts, 2203 * Bring down the SLI Layer. This step disable all interrupts,
2005 * clears the rings, discards all mailbox commands, and resets 2204 * clears the rings, discards all mailbox commands, and resets
@@ -2014,9 +2213,6 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
2014 spin_unlock_irq(&phba->hbalock); 2213 spin_unlock_irq(&phba->hbalock);
2015 2214
2016 lpfc_debugfs_terminate(vport); 2215 lpfc_debugfs_terminate(vport);
2017 lpfc_cleanup(vport);
2018
2019 kthread_stop(phba->worker_thread);
2020 2216
2021 /* Release the irq reservation */ 2217 /* Release the irq reservation */
2022 free_irq(phba->pcidev->irq, phba); 2218 free_irq(phba->pcidev->irq, phba);
@@ -2048,7 +2244,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
2048 2244
2049 kfree(phba); 2245 kfree(phba);
2050 2246
2051 pci_release_regions(pdev); 2247 pci_release_selected_regions(pdev, bars);
2052 pci_disable_device(pdev); 2248 pci_disable_device(pdev);
2053} 2249}
2054 2250
@@ -2239,12 +2435,22 @@ lpfc_init(void)
2239 printk(LPFC_MODULE_DESC "\n"); 2435 printk(LPFC_MODULE_DESC "\n");
2240 printk(LPFC_COPYRIGHT "\n"); 2436 printk(LPFC_COPYRIGHT "\n");
2241 2437
2438 if (lpfc_enable_npiv) {
2439 lpfc_transport_functions.vport_create = lpfc_vport_create;
2440 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
2441 }
2242 lpfc_transport_template = 2442 lpfc_transport_template =
2243 fc_attach_transport(&lpfc_transport_functions); 2443 fc_attach_transport(&lpfc_transport_functions);
2244 lpfc_vport_transport_template = 2444 if (lpfc_transport_template == NULL)
2245 fc_attach_transport(&lpfc_vport_transport_functions);
2246 if (!lpfc_transport_template || !lpfc_vport_transport_template)
2247 return -ENOMEM; 2445 return -ENOMEM;
2446 if (lpfc_enable_npiv) {
2447 lpfc_vport_transport_template =
2448 fc_attach_transport(&lpfc_vport_transport_functions);
2449 if (lpfc_vport_transport_template == NULL) {
2450 fc_release_transport(lpfc_transport_template);
2451 return -ENOMEM;
2452 }
2453 }
2248 error = pci_register_driver(&lpfc_driver); 2454 error = pci_register_driver(&lpfc_driver);
2249 if (error) { 2455 if (error) {
2250 fc_release_transport(lpfc_transport_template); 2456 fc_release_transport(lpfc_transport_template);
@@ -2259,7 +2465,8 @@ lpfc_exit(void)
2259{ 2465{
2260 pci_unregister_driver(&lpfc_driver); 2466 pci_unregister_driver(&lpfc_driver);
2261 fc_release_transport(lpfc_transport_template); 2467 fc_release_transport(lpfc_transport_template);
2262 fc_release_transport(lpfc_vport_transport_template); 2468 if (lpfc_enable_npiv)
2469 fc_release_transport(lpfc_vport_transport_template);
2263} 2470}
2264 2471
2265module_init(lpfc_init); 2472module_init(lpfc_init);