aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-18 19:54:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-18 19:54:31 -0400
commit961cde93dee2658000ead32abffb8ddf0727abe0 (patch)
tree2419e204132abe2ec2bb7f08bd20042573cc9bd6 /drivers/scsi/lpfc
parentf82c37e7bb4c4d9b6a476c642d5c2d2efbd6f240 (diff)
parent0d9dc7c8b9b7fa0f53647423b41056ee1beed735 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (69 commits) [SCSI] scsi_transport_fc: Fix synchronization issue while deleting vport [SCSI] bfa: Update the driver version to 2.1.2.1. [SCSI] bfa: Remove unused header files and did some cleanup. [SCSI] bfa: Handle SCSI IO underrun case. [SCSI] bfa: FCS and include file changes. [SCSI] bfa: Modified the portstats get/clear logic [SCSI] bfa: Replace bfa_get_attr() with specific APIs [SCSI] bfa: New portlog entries for events (FIP/FLOGI/FDISC/LOGO). [SCSI] bfa: Rename pport to fcport in BFA FCS. [SCSI] bfa: IOC fixes, check for IOC down condition. [SCSI] bfa: In MSIX mode, ignore spurious RME interrupts when FCoE ports are in FW mismatch state. [SCSI] bfa: Fix Command Queue (CPE) full condition check and ack CPE interrupt. [SCSI] bfa: IOC recovery fix in fcmode. [SCSI] bfa: AEN and byte alignment fixes. [SCSI] bfa: Introduce a link notification state machine. [SCSI] bfa: Added firmware save clear feature for BFA driver. [SCSI] bfa: FCS authentication related changes. [SCSI] bfa: PCI VPD, FIP and include file changes. [SCSI] bfa: Fix to copy fpma MAC when requested by user space application. [SCSI] bfa: RPORT state machine: direct attach mode fix. ...
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c332
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c142
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c527
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c277
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c413
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h38
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c7
16 files changed, 1556 insertions, 277 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 84b696463a58..565e16dd74fc 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -37,6 +37,9 @@ struct lpfc_sli2_slim;
37 the NameServer before giving up. */ 37 the NameServer before giving up. */
38#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ 38#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
39#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ 39#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
40#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
41 cmnd for menlo needs nearly twice as for firmware
42 downloads using bsg */
40#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ 43#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
41#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ 44#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
42#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ 45#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
@@ -509,7 +512,6 @@ struct lpfc_hba {
509 int (*lpfc_hba_down_link) 512 int (*lpfc_hba_down_link)
510 (struct lpfc_hba *); 513 (struct lpfc_hba *);
511 514
512
513 /* SLI4 specific HBA data structure */ 515 /* SLI4 specific HBA data structure */
514 struct lpfc_sli4_hba sli4_hba; 516 struct lpfc_sli4_hba sli4_hba;
515 517
@@ -623,6 +625,9 @@ struct lpfc_hba {
623 uint32_t cfg_log_verbose; 625 uint32_t cfg_log_verbose;
624 uint32_t cfg_aer_support; 626 uint32_t cfg_aer_support;
625 uint32_t cfg_suppress_link_up; 627 uint32_t cfg_suppress_link_up;
628#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
629#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
630#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
626 631
627 lpfc_vpd_t vpd; /* vital product data */ 632 lpfc_vpd_t vpd; /* vital product data */
628 633
@@ -804,6 +809,9 @@ struct lpfc_hba {
804 struct list_head ct_ev_waiters; 809 struct list_head ct_ev_waiters;
805 struct unsol_rcv_ct_ctx ct_ctx[64]; 810 struct unsol_rcv_ct_ctx ct_ctx[64];
806 uint32_t ctx_idx; 811 uint32_t ctx_idx;
812
813 uint8_t menlo_flag; /* menlo generic flags */
814#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
807}; 815};
808 816
809static inline struct Scsi_Host * 817static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c992e8328f9e..64cd17eedb64 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1939,7 +1939,9 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO,
1939# 0x2 = never bring up link 1939# 0x2 = never bring up link
1940# Default value is 0. 1940# Default value is 0.
1941*/ 1941*/
1942LPFC_ATTR_R(suppress_link_up, 0, 0, 2, "Suppress Link Up at initialization"); 1942LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
1943 LPFC_DELAY_INIT_LINK_INDEFINITELY,
1944 "Suppress Link Up at initialization");
1943 1945
1944/* 1946/*
1945# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 1947# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
@@ -1966,8 +1968,7 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
1966{ 1968{
1967 struct Scsi_Host *shost = class_to_shost(dev); 1969 struct Scsi_Host *shost = class_to_shost(dev);
1968 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1970 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1969 int val = 0; 1971
1970 val = vport->cfg_devloss_tmo;
1971 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); 1972 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
1972} 1973}
1973 1974
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f3f1bf1a0a71..692c29f6048e 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -83,15 +83,28 @@ struct lpfc_bsg_mbox {
83 struct fc_bsg_job *set_job; 83 struct fc_bsg_job *set_job;
84}; 84};
85 85
86#define MENLO_DID 0x0000FC0E
87
88struct lpfc_bsg_menlo {
89 struct lpfc_iocbq *cmdiocbq;
90 struct lpfc_iocbq *rspiocbq;
91 struct lpfc_dmabuf *bmp;
92
93 /* job waiting for this iocb to finish */
94 struct fc_bsg_job *set_job;
95};
96
86#define TYPE_EVT 1 97#define TYPE_EVT 1
87#define TYPE_IOCB 2 98#define TYPE_IOCB 2
88#define TYPE_MBOX 3 99#define TYPE_MBOX 3
100#define TYPE_MENLO 4
89struct bsg_job_data { 101struct bsg_job_data {
90 uint32_t type; 102 uint32_t type;
91 union { 103 union {
92 struct lpfc_bsg_event *evt; 104 struct lpfc_bsg_event *evt;
93 struct lpfc_bsg_iocb iocb; 105 struct lpfc_bsg_iocb iocb;
94 struct lpfc_bsg_mbox mbox; 106 struct lpfc_bsg_mbox mbox;
107 struct lpfc_bsg_menlo menlo;
95 } context_un; 108 } context_un;
96}; 109};
97 110
@@ -2456,6 +2469,18 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2456 case MBX_PORT_IOV_CONTROL: 2469 case MBX_PORT_IOV_CONTROL:
2457 break; 2470 break;
2458 case MBX_SET_VARIABLE: 2471 case MBX_SET_VARIABLE:
2472 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2473 "1226 mbox: set_variable 0x%x, 0x%x\n",
2474 mb->un.varWords[0],
2475 mb->un.varWords[1]);
2476 if ((mb->un.varWords[0] == SETVAR_MLOMNT)
2477 && (mb->un.varWords[1] == 1)) {
2478 phba->wait_4_mlo_maint_flg = 1;
2479 } else if (mb->un.varWords[0] == SETVAR_MLORST) {
2480 phba->link_flag &= ~LS_LOOPBACK_MODE;
2481 phba->fc_topology = TOPOLOGY_PT_PT;
2482 }
2483 break;
2459 case MBX_RUN_BIU_DIAG64: 2484 case MBX_RUN_BIU_DIAG64:
2460 case MBX_READ_EVENT_LOG: 2485 case MBX_READ_EVENT_LOG:
2461 case MBX_READ_SPARM64: 2486 case MBX_READ_SPARM64:
@@ -2638,6 +2663,297 @@ job_error:
2638} 2663}
2639 2664
2640/** 2665/**
2666 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
2667 * @phba: Pointer to HBA context object.
2668 * @cmdiocbq: Pointer to command iocb.
2669 * @rspiocbq: Pointer to response iocb.
2670 *
2671 * This function is the completion handler for iocbs issued using
2672 * lpfc_menlo_cmd function. This function is called by the
2673 * ring event handler function without any lock held. This function
2674 * can be called from both worker thread context and interrupt
2675 * context. This function also can be called from another thread which
2676 * cleans up the SLI layer objects.
2677 * This function copies the contents of the response iocb to the
2678 * response iocb memory object provided by the caller of
2679 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
2680 * sleeps for the iocb completion.
2681 **/
2682static void
2683lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
2684 struct lpfc_iocbq *cmdiocbq,
2685 struct lpfc_iocbq *rspiocbq)
2686{
2687 struct bsg_job_data *dd_data;
2688 struct fc_bsg_job *job;
2689 IOCB_t *rsp;
2690 struct lpfc_dmabuf *bmp;
2691 struct lpfc_bsg_menlo *menlo;
2692 unsigned long flags;
2693 struct menlo_response *menlo_resp;
2694 int rc = 0;
2695
2696 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2697 dd_data = cmdiocbq->context1;
2698 if (!dd_data) {
2699 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2700 return;
2701 }
2702
2703 menlo = &dd_data->context_un.menlo;
2704 job = menlo->set_job;
2705 job->dd_data = NULL; /* so timeout handler does not reply */
2706
2707 spin_lock_irqsave(&phba->hbalock, flags);
2708 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
2709 if (cmdiocbq->context2 && rspiocbq)
2710 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
2711 &rspiocbq->iocb, sizeof(IOCB_t));
2712 spin_unlock_irqrestore(&phba->hbalock, flags);
2713
2714 bmp = menlo->bmp;
2715 rspiocbq = menlo->rspiocbq;
2716 rsp = &rspiocbq->iocb;
2717
2718 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
2719 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2720 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
2721 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2722
2723 /* always return the xri, this would be used in the case
2724 * of a menlo download to allow the data to be sent as a continuation
2725 * of the exchange.
2726 */
2727 menlo_resp = (struct menlo_response *)
2728 job->reply->reply_data.vendor_reply.vendor_rsp;
2729 menlo_resp->xri = rsp->ulpContext;
2730 if (rsp->ulpStatus) {
2731 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
2732 switch (rsp->un.ulpWord[4] & 0xff) {
2733 case IOERR_SEQUENCE_TIMEOUT:
2734 rc = -ETIMEDOUT;
2735 break;
2736 case IOERR_INVALID_RPI:
2737 rc = -EFAULT;
2738 break;
2739 default:
2740 rc = -EACCES;
2741 break;
2742 }
2743 } else
2744 rc = -EACCES;
2745 } else
2746 job->reply->reply_payload_rcv_len =
2747 rsp->un.genreq64.bdl.bdeSize;
2748
2749 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
2750 lpfc_sli_release_iocbq(phba, rspiocbq);
2751 lpfc_sli_release_iocbq(phba, cmdiocbq);
2752 kfree(bmp);
2753 kfree(dd_data);
2754 /* make error code available to userspace */
2755 job->reply->result = rc;
2756 /* complete the job back to userspace */
2757 job->job_done(job);
2758 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2759 return;
2760}
2761
2762/**
2763 * lpfc_menlo_cmd - send an ioctl for menlo hardware
2764 * @job: fc_bsg_job to handle
2765 *
2766 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
2767 * all the command completions will return the xri for the command.
2768 * For menlo data requests a gen request 64 CX is used to continue the exchange
2769 * supplied in the menlo request header xri field.
2770 **/
2771static int
2772lpfc_menlo_cmd(struct fc_bsg_job *job)
2773{
2774 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2775 struct lpfc_hba *phba = vport->phba;
2776 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2777 IOCB_t *cmd, *rsp;
2778 int rc = 0;
2779 struct menlo_command *menlo_cmd;
2780 struct menlo_response *menlo_resp;
2781 struct lpfc_dmabuf *bmp = NULL;
2782 int request_nseg;
2783 int reply_nseg;
2784 struct scatterlist *sgel = NULL;
2785 int numbde;
2786 dma_addr_t busaddr;
2787 struct bsg_job_data *dd_data;
2788 struct ulp_bde64 *bpl = NULL;
2789
2790 /* in case no data is returned return just the return code */
2791 job->reply->reply_payload_rcv_len = 0;
2792
2793 if (job->request_len <
2794 sizeof(struct fc_bsg_request) +
2795 sizeof(struct menlo_command)) {
2796 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2797 "2784 Received MENLO_CMD request below "
2798 "minimum size\n");
2799 rc = -ERANGE;
2800 goto no_dd_data;
2801 }
2802
2803 if (job->reply_len <
2804 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
2805 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2806 "2785 Received MENLO_CMD reply below "
2807 "minimum size\n");
2808 rc = -ERANGE;
2809 goto no_dd_data;
2810 }
2811
2812 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
2813 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2814 "2786 Adapter does not support menlo "
2815 "commands\n");
2816 rc = -EPERM;
2817 goto no_dd_data;
2818 }
2819
2820 menlo_cmd = (struct menlo_command *)
2821 job->request->rqst_data.h_vendor.vendor_cmd;
2822
2823 menlo_resp = (struct menlo_response *)
2824 job->reply->reply_data.vendor_reply.vendor_rsp;
2825
2826 /* allocate our bsg tracking structure */
2827 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2828 if (!dd_data) {
2829 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2830 "2787 Failed allocation of dd_data\n");
2831 rc = -ENOMEM;
2832 goto no_dd_data;
2833 }
2834
2835 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2836 if (!bmp) {
2837 rc = -ENOMEM;
2838 goto free_dd;
2839 }
2840
2841 cmdiocbq = lpfc_sli_get_iocbq(phba);
2842 if (!cmdiocbq) {
2843 rc = -ENOMEM;
2844 goto free_bmp;
2845 }
2846
2847 rspiocbq = lpfc_sli_get_iocbq(phba);
2848 if (!rspiocbq) {
2849 rc = -ENOMEM;
2850 goto free_cmdiocbq;
2851 }
2852
2853 rsp = &rspiocbq->iocb;
2854
2855 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
2856 if (!bmp->virt) {
2857 rc = -ENOMEM;
2858 goto free_rspiocbq;
2859 }
2860
2861 INIT_LIST_HEAD(&bmp->list);
2862 bpl = (struct ulp_bde64 *) bmp->virt;
2863 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
2864 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2865 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
2866 busaddr = sg_dma_address(sgel);
2867 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2868 bpl->tus.f.bdeSize = sg_dma_len(sgel);
2869 bpl->tus.w = cpu_to_le32(bpl->tus.w);
2870 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
2871 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
2872 bpl++;
2873 }
2874
2875 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
2876 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2877 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
2878 busaddr = sg_dma_address(sgel);
2879 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2880 bpl->tus.f.bdeSize = sg_dma_len(sgel);
2881 bpl->tus.w = cpu_to_le32(bpl->tus.w);
2882 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
2883 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
2884 bpl++;
2885 }
2886
2887 cmd = &cmdiocbq->iocb;
2888 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
2889 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
2890 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
2891 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2892 cmd->un.genreq64.bdl.bdeSize =
2893 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
2894 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
2895 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
2896 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
2897 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
2898 cmd->ulpBdeCount = 1;
2899 cmd->ulpClass = CLASS3;
2900 cmd->ulpOwner = OWN_CHIP;
2901 cmd->ulpLe = 1; /* Limited Edition */
2902 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2903 cmdiocbq->vport = phba->pport;
2904 /* We want the firmware to timeout before we do */
2905 cmd->ulpTimeout = MENLO_TIMEOUT - 5;
2906 cmdiocbq->context3 = bmp;
2907 cmdiocbq->context2 = rspiocbq;
2908 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
2909 cmdiocbq->context1 = dd_data;
2910 cmdiocbq->context2 = rspiocbq;
2911 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
2912 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
2913 cmd->ulpPU = MENLO_PU; /* 3 */
2914 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
2915 cmd->ulpContext = MENLO_CONTEXT; /* 0 */
2916 } else {
2917 cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
2918 cmd->ulpPU = 1;
2919 cmd->un.ulpWord[4] = 0;
2920 cmd->ulpContext = menlo_cmd->xri;
2921 }
2922
2923 dd_data->type = TYPE_MENLO;
2924 dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
2925 dd_data->context_un.menlo.rspiocbq = rspiocbq;
2926 dd_data->context_un.menlo.set_job = job;
2927 dd_data->context_un.menlo.bmp = bmp;
2928
2929 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2930 MENLO_TIMEOUT - 5);
2931 if (rc == IOCB_SUCCESS)
2932 return 0; /* done for now */
2933
2934 /* iocb failed so cleanup */
2935 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
2936 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2937 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
2938 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2939
2940 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
2941
2942free_rspiocbq:
2943 lpfc_sli_release_iocbq(phba, rspiocbq);
2944free_cmdiocbq:
2945 lpfc_sli_release_iocbq(phba, cmdiocbq);
2946free_bmp:
2947 kfree(bmp);
2948free_dd:
2949 kfree(dd_data);
2950no_dd_data:
2951 /* make error code available to userspace */
2952 job->reply->result = rc;
2953 job->dd_data = NULL;
2954 return rc;
2955}
2956/**
2641 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 2957 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
2642 * @job: fc_bsg_job to handle 2958 * @job: fc_bsg_job to handle
2643 **/ 2959 **/
@@ -2669,6 +2985,10 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
2669 case LPFC_BSG_VENDOR_MBOX: 2985 case LPFC_BSG_VENDOR_MBOX:
2670 rc = lpfc_bsg_mbox_cmd(job); 2986 rc = lpfc_bsg_mbox_cmd(job);
2671 break; 2987 break;
2988 case LPFC_BSG_VENDOR_MENLO_CMD:
2989 case LPFC_BSG_VENDOR_MENLO_DATA:
2990 rc = lpfc_menlo_cmd(job);
2991 break;
2672 default: 2992 default:
2673 rc = -EINVAL; 2993 rc = -EINVAL;
2674 job->reply->reply_payload_rcv_len = 0; 2994 job->reply->reply_payload_rcv_len = 0;
@@ -2728,6 +3048,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
2728 struct lpfc_bsg_event *evt; 3048 struct lpfc_bsg_event *evt;
2729 struct lpfc_bsg_iocb *iocb; 3049 struct lpfc_bsg_iocb *iocb;
2730 struct lpfc_bsg_mbox *mbox; 3050 struct lpfc_bsg_mbox *mbox;
3051 struct lpfc_bsg_menlo *menlo;
2731 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3052 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
2732 struct bsg_job_data *dd_data; 3053 struct bsg_job_data *dd_data;
2733 unsigned long flags; 3054 unsigned long flags;
@@ -2775,6 +3096,17 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
2775 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3096 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2776 job->job_done(job); 3097 job->job_done(job);
2777 break; 3098 break;
3099 case TYPE_MENLO:
3100 menlo = &dd_data->context_un.menlo;
3101 cmdiocb = menlo->cmdiocbq;
3102 /* hint to completion handler that the job timed out */
3103 job->reply->result = -EAGAIN;
3104 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3105 /* this will call our completion handler */
3106 spin_lock_irq(&phba->hbalock);
3107 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
3108 spin_unlock_irq(&phba->hbalock);
3109 break;
2778 default: 3110 default:
2779 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3111 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2780 break; 3112 break;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 6c8f87e39b98..5bc630819b9e 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -31,6 +31,8 @@
31#define LPFC_BSG_VENDOR_DIAG_TEST 5 31#define LPFC_BSG_VENDOR_DIAG_TEST 5
32#define LPFC_BSG_VENDOR_GET_MGMT_REV 6 32#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
33#define LPFC_BSG_VENDOR_MBOX 7 33#define LPFC_BSG_VENDOR_MBOX 7
34#define LPFC_BSG_VENDOR_MENLO_CMD 8
35#define LPFC_BSG_VENDOR_MENLO_DATA 9
34 36
35struct set_ct_event { 37struct set_ct_event {
36 uint32_t command; 38 uint32_t command;
@@ -96,3 +98,13 @@ struct dfc_mbox_req {
96 uint8_t mbOffset; 98 uint8_t mbOffset;
97}; 99};
98 100
101/* Used for menlo command or menlo data. The xri is only used for menlo data */
102struct menlo_command {
103 uint32_t cmd;
104 uint32_t xri;
105};
106
107struct menlo_response {
108 uint32_t xri; /* return the xri of the iocb exchange */
109};
110
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 6f0fb51eb461..5087c4211b43 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -63,6 +63,7 @@ void lpfc_linkdown_port(struct lpfc_vport *);
63void lpfc_port_link_failure(struct lpfc_vport *); 63void lpfc_port_link_failure(struct lpfc_vport *);
64void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 64void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
65void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 65void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
66void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
66void lpfc_retry_pport_discovery(struct lpfc_hba *); 67void lpfc_retry_pport_discovery(struct lpfc_hba *);
67 68
68void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 69void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -221,6 +222,10 @@ void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
221void lpfc_unregister_unused_fcf(struct lpfc_hba *); 222void lpfc_unregister_unused_fcf(struct lpfc_hba *);
222int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *); 223int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
223void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *); 224void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
225void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
226uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
227int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
228void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
224 229
225int lpfc_mem_alloc(struct lpfc_hba *, int align); 230int lpfc_mem_alloc(struct lpfc_hba *, int align);
226void lpfc_mem_free(struct lpfc_hba *); 231void lpfc_mem_free(struct lpfc_hba *);
@@ -385,7 +390,7 @@ void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
385int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 390int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
386void lpfc_start_fdiscs(struct lpfc_hba *phba); 391void lpfc_start_fdiscs(struct lpfc_hba *phba);
387struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t); 392struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
388 393struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
389#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 394#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
390#define HBA_EVENT_RSCN 5 395#define HBA_EVENT_RSCN 5
391#define HBA_EVENT_LINK_UP 2 396#define HBA_EVENT_LINK_UP 2
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2a40a6eabf4d..ee980bd66869 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -771,6 +771,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
771 struct lpfc_nodelist *ndlp = cmdiocb->context1; 771 struct lpfc_nodelist *ndlp = cmdiocb->context1;
772 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 772 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
773 struct serv_parm *sp; 773 struct serv_parm *sp;
774 uint16_t fcf_index;
774 int rc; 775 int rc;
775 776
776 /* Check to see if link went down during discovery */ 777 /* Check to see if link went down during discovery */
@@ -788,6 +789,54 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
788 vport->port_state); 789 vport->port_state);
789 790
790 if (irsp->ulpStatus) { 791 if (irsp->ulpStatus) {
792 /*
793 * In case of FIP mode, perform round robin FCF failover
794 * due to new FCF discovery
795 */
796 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
797 (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
798 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
799 "2611 FLOGI failed on registered "
800 "FCF record fcf_index:%d, trying "
801 "to perform round robin failover\n",
802 phba->fcf.current_rec.fcf_indx);
803 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
804 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
805 /*
806 * Exhausted the eligible FCF record list,
807 * fail through to retry FLOGI on current
808 * FCF record.
809 */
810 lpfc_printf_log(phba, KERN_WARNING,
811 LOG_FIP | LOG_ELS,
812 "2760 FLOGI exhausted FCF "
813 "round robin failover list, "
814 "retry FLOGI on the current "
815 "registered FCF index:%d\n",
816 phba->fcf.current_rec.fcf_indx);
817 spin_lock_irq(&phba->hbalock);
818 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
819 spin_unlock_irq(&phba->hbalock);
820 } else {
821 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
822 fcf_index);
823 if (rc) {
824 lpfc_printf_log(phba, KERN_WARNING,
825 LOG_FIP | LOG_ELS,
826 "2761 FLOGI round "
827 "robin FCF failover "
828 "read FCF failed "
829 "rc:x%x, fcf_index:"
830 "%d\n", rc,
831 phba->fcf.current_rec.fcf_indx);
832 spin_lock_irq(&phba->hbalock);
833 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
834 spin_unlock_irq(&phba->hbalock);
835 } else
836 goto out;
837 }
838 }
839
791 /* Check for retry */ 840 /* Check for retry */
792 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 841 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
793 goto out; 842 goto out;
@@ -806,9 +855,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
806 } 855 }
807 856
808 /* FLOGI failure */ 857 /* FLOGI failure */
809 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 858 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
810 "0100 FLOGI failure Data: x%x x%x " 859 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
811 "x%x\n",
812 irsp->ulpStatus, irsp->un.ulpWord[4], 860 irsp->ulpStatus, irsp->un.ulpWord[4],
813 irsp->ulpTimeout); 861 irsp->ulpTimeout);
814 goto flogifail; 862 goto flogifail;
@@ -842,8 +890,18 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
842 else 890 else
843 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 891 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
844 892
845 if (!rc) 893 if (!rc) {
894 /* Mark the FCF discovery process done */
895 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS,
896 "2769 FLOGI successful on FCF record: "
897 "current_fcf_index:x%x, terminate FCF "
898 "round robin failover process\n",
899 phba->fcf.current_rec.fcf_indx);
900 spin_lock_irq(&phba->hbalock);
901 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
902 spin_unlock_irq(&phba->hbalock);
846 goto out; 903 goto out;
904 }
847 } 905 }
848 906
849flogifail: 907flogifail:
@@ -1409,6 +1467,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1409 goto out; 1467 goto out;
1410 } 1468 }
1411 /* PLOGI failed */ 1469 /* PLOGI failed */
1470 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1471 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1472 ndlp->nlp_DID, irsp->ulpStatus,
1473 irsp->un.ulpWord[4]);
1412 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1474 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1413 if (lpfc_error_lost_link(irsp)) 1475 if (lpfc_error_lost_link(irsp))
1414 rc = NLP_STE_FREED_NODE; 1476 rc = NLP_STE_FREED_NODE;
@@ -1577,6 +1639,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1577 goto out; 1639 goto out;
1578 } 1640 }
1579 /* PRLI failed */ 1641 /* PRLI failed */
1642 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1643 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
1644 ndlp->nlp_DID, irsp->ulpStatus,
1645 irsp->un.ulpWord[4]);
1580 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1646 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1581 if (lpfc_error_lost_link(irsp)) 1647 if (lpfc_error_lost_link(irsp))
1582 goto out; 1648 goto out;
@@ -1860,6 +1926,10 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1860 goto out; 1926 goto out;
1861 } 1927 }
1862 /* ADISC failed */ 1928 /* ADISC failed */
1929 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1930 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
1931 ndlp->nlp_DID, irsp->ulpStatus,
1932 irsp->un.ulpWord[4]);
1863 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1933 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1864 if (!lpfc_error_lost_link(irsp)) 1934 if (!lpfc_error_lost_link(irsp))
1865 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1935 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
@@ -2009,6 +2079,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2009 /* ELS command is being retried */ 2079 /* ELS command is being retried */
2010 goto out; 2080 goto out;
2011 /* LOGO failed */ 2081 /* LOGO failed */
2082 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2083 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2084 ndlp->nlp_DID, irsp->ulpStatus,
2085 irsp->un.ulpWord[4]);
2012 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2086 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2013 if (lpfc_error_lost_link(irsp)) 2087 if (lpfc_error_lost_link(irsp))
2014 goto out; 2088 goto out;
@@ -5989,7 +6063,12 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5989 if (phba->sli_rev < LPFC_SLI_REV4) 6063 if (phba->sli_rev < LPFC_SLI_REV4)
5990 lpfc_issue_fabric_reglogin(vport); 6064 lpfc_issue_fabric_reglogin(vport);
5991 else { 6065 else {
5992 lpfc_start_fdiscs(phba); 6066 /*
6067 * If the physical port is instantiated using
6068 * FDISC, do not start vport discovery.
6069 */
6070 if (vport->port_state != LPFC_FDISC)
6071 lpfc_start_fdiscs(phba);
5993 lpfc_do_scr_ns_plogi(phba, vport); 6072 lpfc_do_scr_ns_plogi(phba, vport);
5994 } 6073 }
5995 } else 6074 } else
@@ -6055,21 +6134,18 @@ mbox_err_exit:
6055} 6134}
6056 6135
6057/** 6136/**
6058 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 6137 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
6059 * @phba: pointer to lpfc hba data structure. 6138 * @phba: pointer to lpfc hba data structure.
6060 * 6139 *
6061 * This routine abort all pending discovery commands and 6140 * This routine cancels the retry delay timers to all the vports.
6062 * start a timer to retry FLOGI for the physical port
6063 * discovery.
6064 **/ 6141 **/
6065void 6142void
6066lpfc_retry_pport_discovery(struct lpfc_hba *phba) 6143lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
6067{ 6144{
6068 struct lpfc_vport **vports; 6145 struct lpfc_vport **vports;
6069 struct lpfc_nodelist *ndlp; 6146 struct lpfc_nodelist *ndlp;
6070 struct Scsi_Host *shost;
6071 int i;
6072 uint32_t link_state; 6147 uint32_t link_state;
6148 int i;
6073 6149
6074 /* Treat this failure as linkdown for all vports */ 6150 /* Treat this failure as linkdown for all vports */
6075 link_state = phba->link_state; 6151 link_state = phba->link_state;
@@ -6087,13 +6163,30 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
6087 } 6163 }
6088 lpfc_destroy_vport_work_array(phba, vports); 6164 lpfc_destroy_vport_work_array(phba, vports);
6089 } 6165 }
6166}
6167
6168/**
6169 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
6170 * @phba: pointer to lpfc hba data structure.
6171 *
6172 * This routine abort all pending discovery commands and
6173 * start a timer to retry FLOGI for the physical port
6174 * discovery.
6175 **/
6176void
6177lpfc_retry_pport_discovery(struct lpfc_hba *phba)
6178{
6179 struct lpfc_nodelist *ndlp;
6180 struct Scsi_Host *shost;
6181
6182 /* Cancel the all vports retry delay retry timers */
6183 lpfc_cancel_all_vport_retry_delay_timer(phba);
6090 6184
6091 /* If fabric require FLOGI, then re-instantiate physical login */ 6185 /* If fabric require FLOGI, then re-instantiate physical login */
6092 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 6186 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6093 if (!ndlp) 6187 if (!ndlp)
6094 return; 6188 return;
6095 6189
6096
6097 shost = lpfc_shost_from_vport(phba->pport); 6190 shost = lpfc_shost_from_vport(phba->pport);
6098 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 6191 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
6099 spin_lock_irq(shost->host_lock); 6192 spin_lock_irq(shost->host_lock);
@@ -6219,7 +6312,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6219 lpfc_mbx_unreg_vpi(vport); 6312 lpfc_mbx_unreg_vpi(vport);
6220 spin_lock_irq(shost->host_lock); 6313 spin_lock_irq(shost->host_lock);
6221 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 6314 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6222 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 6315 if (phba->sli_rev == LPFC_SLI_REV4)
6316 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6223 spin_unlock_irq(shost->host_lock); 6317 spin_unlock_irq(shost->host_lock);
6224 } 6318 }
6225 6319
@@ -6797,21 +6891,27 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6797 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 6891 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6798 unsigned long iflag = 0; 6892 unsigned long iflag = 0;
6799 6893
6800 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); 6894 spin_lock_irqsave(&phba->hbalock, iflag);
6895 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
6801 list_for_each_entry_safe(sglq_entry, sglq_next, 6896 list_for_each_entry_safe(sglq_entry, sglq_next,
6802 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 6897 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
6803 if (sglq_entry->sli4_xritag == xri) { 6898 if (sglq_entry->sli4_xritag == xri) {
6804 list_del(&sglq_entry->list); 6899 list_del(&sglq_entry->list);
6805 spin_unlock_irqrestore(
6806 &phba->sli4_hba.abts_sgl_list_lock,
6807 iflag);
6808 spin_lock_irqsave(&phba->hbalock, iflag);
6809
6810 list_add_tail(&sglq_entry->list, 6900 list_add_tail(&sglq_entry->list,
6811 &phba->sli4_hba.lpfc_sgl_list); 6901 &phba->sli4_hba.lpfc_sgl_list);
6902 sglq_entry->state = SGL_FREED;
6903 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6812 spin_unlock_irqrestore(&phba->hbalock, iflag); 6904 spin_unlock_irqrestore(&phba->hbalock, iflag);
6813 return; 6905 return;
6814 } 6906 }
6815 } 6907 }
6816 spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag); 6908 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6909 sglq_entry = __lpfc_get_active_sglq(phba, xri);
6910 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
6911 spin_unlock_irqrestore(&phba->hbalock, iflag);
6912 return;
6913 }
6914 sglq_entry->state = SGL_XRI_ABORTED;
6915 spin_unlock_irqrestore(&phba->hbalock, iflag);
6916 return;
6817} 6917}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2359d0bfb734..c555e3b7f202 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1481,8 +1481,6 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1481int 1481int
1482lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) 1482lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1483{ 1483{
1484 LPFC_MBOXQ_t *mbox;
1485 int rc;
1486 /* 1484 /*
1487 * If the Link is up and no FCoE events while in the 1485 * If the Link is up and no FCoE events while in the
1488 * FCF discovery, no need to restart FCF discovery. 1486 * FCF discovery, no need to restart FCF discovery.
@@ -1491,86 +1489,70 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1491 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 1489 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1492 return 0; 1490 return 0;
1493 1491
1492 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1493 "2768 Pending link or FCF event during current "
1494 "handling of the previous event: link_state:x%x, "
1495 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1496 phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
1497 phba->fcoe_eventtag);
1498
1494 spin_lock_irq(&phba->hbalock); 1499 spin_lock_irq(&phba->hbalock);
1495 phba->fcf.fcf_flag &= ~FCF_AVAILABLE; 1500 phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1496 spin_unlock_irq(&phba->hbalock); 1501 spin_unlock_irq(&phba->hbalock);
1497 1502
1498 if (phba->link_state >= LPFC_LINK_UP) 1503 if (phba->link_state >= LPFC_LINK_UP) {
1499 lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 1504 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1500 else { 1505 "2780 Restart FCF table scan due to "
1506 "pending FCF event:evt_tag_at_scan:x%x, "
1507 "evt_tag_current:x%x\n",
1508 phba->fcoe_eventtag_at_fcf_scan,
1509 phba->fcoe_eventtag);
1510 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
1511 } else {
1501 /* 1512 /*
1502 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS 1513 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
1503 * flag 1514 * flag
1504 */ 1515 */
1505 spin_lock_irq(&phba->hbalock); 1516 spin_lock_irq(&phba->hbalock);
1506 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1517 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1507 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 1518 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1508 spin_unlock_irq(&phba->hbalock); 1519 spin_unlock_irq(&phba->hbalock);
1509 } 1520 }
1510 1521
1522 /* Unregister the currently registered FCF if required */
1511 if (unreg_fcf) { 1523 if (unreg_fcf) {
1512 spin_lock_irq(&phba->hbalock); 1524 spin_lock_irq(&phba->hbalock);
1513 phba->fcf.fcf_flag &= ~FCF_REGISTERED; 1525 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1514 spin_unlock_irq(&phba->hbalock); 1526 spin_unlock_irq(&phba->hbalock);
1515 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1527 lpfc_sli4_unregister_fcf(phba);
1516 if (!mbox) {
1517 lpfc_printf_log(phba, KERN_ERR,
1518 LOG_DISCOVERY|LOG_MBOX,
1519 "2610 UNREG_FCFI mbox allocation failed\n");
1520 return 1;
1521 }
1522 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
1523 mbox->vport = phba->pport;
1524 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
1525 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1526 if (rc == MBX_NOT_FINISHED) {
1527 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
1528 "2611 UNREG_FCFI issue mbox failed\n");
1529 mempool_free(mbox, phba->mbox_mem_pool);
1530 }
1531 } 1528 }
1532
1533 return 1; 1529 return 1;
1534} 1530}
1535 1531
1536/** 1532/**
1537 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. 1533 * lpfc_sli4_fcf_rec_mbox_parse - parse non-embedded fcf record mailbox command
1538 * @phba: pointer to lpfc hba data structure. 1534 * @phba: pointer to lpfc hba data structure.
1539 * @mboxq: pointer to mailbox object. 1535 * @mboxq: pointer to mailbox object.
1536 * @next_fcf_index: pointer to holder of next fcf index.
1540 * 1537 *
1541 * This function iterate through all the fcf records available in 1538 * This routine parses the non-embedded fcf mailbox command by performing the
1542 * HBA and choose the optimal FCF record for discovery. After finding 1539 * necessarily error checking, non-embedded read FCF record mailbox command
1543 * the FCF for discovery it register the FCF record and kick start 1540 * SGE parsing, and endianness swapping.
1544 * discovery. 1541 *
1545 * If FCF_IN_USE flag is set in currently used FCF, the routine try to 1542 * Returns the pointer to the new FCF record in the non-embedded mailbox
1546 * use a FCF record which match fabric name and mac address of the 1543 * command DMA memory if successfully, other NULL.
1547 * currently used FCF record.
1548 * If the driver support only one FCF, it will try to use the FCF record
1549 * used by BOOT_BIOS.
1550 */ 1544 */
1551void 1545static struct fcf_record *
1552lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1546lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1547 uint16_t *next_fcf_index)
1553{ 1548{
1554 void *virt_addr; 1549 void *virt_addr;
1555 dma_addr_t phys_addr; 1550 dma_addr_t phys_addr;
1556 uint8_t *bytep;
1557 struct lpfc_mbx_sge sge; 1551 struct lpfc_mbx_sge sge;
1558 struct lpfc_mbx_read_fcf_tbl *read_fcf; 1552 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1559 uint32_t shdr_status, shdr_add_status; 1553 uint32_t shdr_status, shdr_add_status;
1560 union lpfc_sli4_cfg_shdr *shdr; 1554 union lpfc_sli4_cfg_shdr *shdr;
1561 struct fcf_record *new_fcf_record; 1555 struct fcf_record *new_fcf_record;
1562 uint32_t boot_flag, addr_mode;
1563 uint32_t next_fcf_index;
1564 struct lpfc_fcf_rec *fcf_rec = NULL;
1565 unsigned long iflags;
1566 uint16_t vlan_id;
1567 int rc;
1568
1569 /* If there is pending FCoE event restart FCF table scan */
1570 if (lpfc_check_pending_fcoe_event(phba, 0)) {
1571 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1572 return;
1573 }
1574 1556
1575 /* Get the first SGE entry from the non-embedded DMA memory. This 1557 /* Get the first SGE entry from the non-embedded DMA memory. This
1576 * routine only uses a single SGE. 1558 * routine only uses a single SGE.
@@ -1581,59 +1563,183 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1581 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1563 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1582 "2524 Failed to get the non-embedded SGE " 1564 "2524 Failed to get the non-embedded SGE "
1583 "virtual address\n"); 1565 "virtual address\n");
1584 goto out; 1566 return NULL;
1585 } 1567 }
1586 virt_addr = mboxq->sge_array->addr[0]; 1568 virt_addr = mboxq->sge_array->addr[0];
1587 1569
1588 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; 1570 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1589 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 1571 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1590 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 1572 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1591 &shdr->response);
1592 /*
1593 * The FCF Record was read and there is no reason for the driver
1594 * to maintain the FCF record data or memory. Instead, just need
1595 * to book keeping the FCFIs can be used.
1596 */
1597 if (shdr_status || shdr_add_status) { 1573 if (shdr_status || shdr_add_status) {
1598 if (shdr_status == STATUS_FCF_TABLE_EMPTY) { 1574 if (shdr_status == STATUS_FCF_TABLE_EMPTY)
1599 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1575 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1600 "2726 READ_FCF_RECORD Indicates empty " 1576 "2726 READ_FCF_RECORD Indicates empty "
1601 "FCF table.\n"); 1577 "FCF table.\n");
1602 } else { 1578 else
1603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1579 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1604 "2521 READ_FCF_RECORD mailbox failed " 1580 "2521 READ_FCF_RECORD mailbox failed "
1605 "with status x%x add_status x%x, mbx\n", 1581 "with status x%x add_status x%x, "
1606 shdr_status, shdr_add_status); 1582 "mbx\n", shdr_status, shdr_add_status);
1607 } 1583 return NULL;
1608 goto out;
1609 } 1584 }
1610 /* Interpreting the returned information of FCF records */ 1585
1586 /* Interpreting the returned information of the FCF record */
1611 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; 1587 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1612 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, 1588 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1613 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1589 sizeof(struct lpfc_mbx_read_fcf_tbl));
1614 next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); 1590 *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1615
1616 new_fcf_record = (struct fcf_record *)(virt_addr + 1591 new_fcf_record = (struct fcf_record *)(virt_addr +
1617 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1592 sizeof(struct lpfc_mbx_read_fcf_tbl));
1618 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, 1593 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1619 sizeof(struct fcf_record)); 1594 sizeof(struct fcf_record));
1620 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1621 1595
1596 return new_fcf_record;
1597}
1598
1599/**
1600 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1601 * @phba: pointer to lpfc hba data structure.
1602 * @fcf_record: pointer to the fcf record.
1603 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1604 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1605 *
1606 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1607 * enabled.
1608 **/
1609static void
1610lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1611 struct fcf_record *fcf_record,
1612 uint16_t vlan_id,
1613 uint16_t next_fcf_index)
1614{
1615 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1616 "2764 READ_FCF_RECORD:\n"
1617 "\tFCF_Index : x%x\n"
1618 "\tFCF_Avail : x%x\n"
1619 "\tFCF_Valid : x%x\n"
1620 "\tFIP_Priority : x%x\n"
1621 "\tMAC_Provider : x%x\n"
1622 "\tLowest VLANID : x%x\n"
1623 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1624 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1625 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1626 "\tNext_FCF_Index: x%x\n",
1627 bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1628 bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1629 bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
1630 fcf_record->fip_priority,
1631 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1632 vlan_id,
1633 bf_get(lpfc_fcf_record_mac_0, fcf_record),
1634 bf_get(lpfc_fcf_record_mac_1, fcf_record),
1635 bf_get(lpfc_fcf_record_mac_2, fcf_record),
1636 bf_get(lpfc_fcf_record_mac_3, fcf_record),
1637 bf_get(lpfc_fcf_record_mac_4, fcf_record),
1638 bf_get(lpfc_fcf_record_mac_5, fcf_record),
1639 bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
1640 bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
1641 bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
1642 bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
1643 bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
1644 bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
1645 bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
1646 bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
1647 bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
1648 bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
1649 bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
1650 bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
1651 bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
1652 bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
1653 bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
1654 bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
1655 next_fcf_index);
1656}
1657
1658/**
1659 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1660 * @phba: pointer to lpfc hba data structure.
1661 * @mboxq: pointer to mailbox object.
1662 *
1663 * This function iterates through all the fcf records available in
1664 * HBA and chooses the optimal FCF record for discovery. After finding
1665 * the FCF for discovery it registers the FCF record and kicks start
1666 * discovery.
1667 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
1668 * use an FCF record which matches fabric name and mac address of the
1669 * currently used FCF record.
1670 * If the driver supports only one FCF, it will try to use the FCF record
1671 * used by BOOT_BIOS.
1672 */
1673void
1674lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1675{
1676 struct fcf_record *new_fcf_record;
1677 uint32_t boot_flag, addr_mode;
1678 uint16_t fcf_index, next_fcf_index;
1679 struct lpfc_fcf_rec *fcf_rec = NULL;
1680 uint16_t vlan_id;
1681 int rc;
1682
1683 /* If there is pending FCoE event restart FCF table scan */
1684 if (lpfc_check_pending_fcoe_event(phba, 0)) {
1685 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1686 return;
1687 }
1688
1689 /* Parse the FCF record from the non-embedded mailbox command */
1690 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
1691 &next_fcf_index);
1692 if (!new_fcf_record) {
1693 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1694 "2765 Mailbox command READ_FCF_RECORD "
1695 "failed to retrieve a FCF record.\n");
1696 /* Let next new FCF event trigger fast failover */
1697 spin_lock_irq(&phba->hbalock);
1698 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1699 spin_unlock_irq(&phba->hbalock);
1700 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1701 return;
1702 }
1703
1704 /* Check the FCF record against the connection list */
1622 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 1705 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
1623 &addr_mode, &vlan_id); 1706 &addr_mode, &vlan_id);
1707
1708 /* Log the FCF record information if turned on */
1709 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
1710 next_fcf_index);
1711
1624 /* 1712 /*
1625 * If the fcf record does not match with connect list entries 1713 * If the fcf record does not match with connect list entries
1626 * read the next entry. 1714 * read the next entry; otherwise, this is an eligible FCF
1715 * record for round robin FCF failover.
1627 */ 1716 */
1628 if (!rc) 1717 if (!rc) {
1718 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1719 "2781 FCF record fcf_index:x%x failed FCF "
1720 "connection list check, fcf_avail:x%x, "
1721 "fcf_valid:x%x\n",
1722 bf_get(lpfc_fcf_record_fcf_index,
1723 new_fcf_record),
1724 bf_get(lpfc_fcf_record_fcf_avail,
1725 new_fcf_record),
1726 bf_get(lpfc_fcf_record_fcf_valid,
1727 new_fcf_record));
1629 goto read_next_fcf; 1728 goto read_next_fcf;
1729 } else {
1730 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1731 rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
1732 if (rc)
1733 goto read_next_fcf;
1734 }
1735
1630 /* 1736 /*
1631 * If this is not the first FCF discovery of the HBA, use last 1737 * If this is not the first FCF discovery of the HBA, use last
1632 * FCF record for the discovery. The condition that a rescan 1738 * FCF record for the discovery. The condition that a rescan
1633 * matches the in-use FCF record: fabric name, switch name, mac 1739 * matches the in-use FCF record: fabric name, switch name, mac
1634 * address, and vlan_id. 1740 * address, and vlan_id.
1635 */ 1741 */
1636 spin_lock_irqsave(&phba->hbalock, iflags); 1742 spin_lock_irq(&phba->hbalock);
1637 if (phba->fcf.fcf_flag & FCF_IN_USE) { 1743 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1638 if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, 1744 if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name,
1639 new_fcf_record) && 1745 new_fcf_record) &&
@@ -1649,8 +1755,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1649 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 1755 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
1650 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) 1756 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
1651 /* If in fast failover, mark it's completed */ 1757 /* If in fast failover, mark it's completed */
1652 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 1758 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV |
1653 spin_unlock_irqrestore(&phba->hbalock, iflags); 1759 FCF_DISCOVERY);
1760 spin_unlock_irq(&phba->hbalock);
1654 goto out; 1761 goto out;
1655 } 1762 }
1656 /* 1763 /*
@@ -1661,7 +1768,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1661 * next candidate. 1768 * next candidate.
1662 */ 1769 */
1663 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 1770 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
1664 spin_unlock_irqrestore(&phba->hbalock, iflags); 1771 spin_unlock_irq(&phba->hbalock);
1665 goto read_next_fcf; 1772 goto read_next_fcf;
1666 } 1773 }
1667 } 1774 }
@@ -1669,14 +1776,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1669 * Update on failover FCF record only if it's in FCF fast-failover 1776 * Update on failover FCF record only if it's in FCF fast-failover
1670 * period; otherwise, update on current FCF record. 1777 * period; otherwise, update on current FCF record.
1671 */ 1778 */
1672 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { 1779 if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
1673 /* Fast FCF failover only to the same fabric name */ 1780 fcf_rec = &phba->fcf.failover_rec;
1674 if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, 1781 else
1675 new_fcf_record))
1676 fcf_rec = &phba->fcf.failover_rec;
1677 else
1678 goto read_next_fcf;
1679 } else
1680 fcf_rec = &phba->fcf.current_rec; 1782 fcf_rec = &phba->fcf.current_rec;
1681 1783
1682 if (phba->fcf.fcf_flag & FCF_AVAILABLE) { 1784 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
@@ -1689,7 +1791,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1689 /* Choose this FCF record */ 1791 /* Choose this FCF record */
1690 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 1792 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1691 addr_mode, vlan_id, BOOT_ENABLE); 1793 addr_mode, vlan_id, BOOT_ENABLE);
1692 spin_unlock_irqrestore(&phba->hbalock, iflags); 1794 spin_unlock_irq(&phba->hbalock);
1693 goto read_next_fcf; 1795 goto read_next_fcf;
1694 } 1796 }
1695 /* 1797 /*
@@ -1698,20 +1800,19 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1698 * the next FCF record. 1800 * the next FCF record.
1699 */ 1801 */
1700 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { 1802 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
1701 spin_unlock_irqrestore(&phba->hbalock, iflags); 1803 spin_unlock_irq(&phba->hbalock);
1702 goto read_next_fcf; 1804 goto read_next_fcf;
1703 } 1805 }
1704 /* 1806 /*
1705 * If the new hba FCF record has lower priority value 1807 * If the new hba FCF record has lower priority value
1706 * than the driver FCF record, use the new record. 1808 * than the driver FCF record, use the new record.
1707 */ 1809 */
1708 if (lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record) && 1810 if (new_fcf_record->fip_priority < fcf_rec->priority) {
1709 (new_fcf_record->fip_priority < fcf_rec->priority)) {
1710 /* Choose this FCF record */ 1811 /* Choose this FCF record */
1711 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 1812 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1712 addr_mode, vlan_id, 0); 1813 addr_mode, vlan_id, 0);
1713 } 1814 }
1714 spin_unlock_irqrestore(&phba->hbalock, iflags); 1815 spin_unlock_irq(&phba->hbalock);
1715 goto read_next_fcf; 1816 goto read_next_fcf;
1716 } 1817 }
1717 /* 1818 /*
@@ -1724,7 +1825,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1724 BOOT_ENABLE : 0)); 1825 BOOT_ENABLE : 0));
1725 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1826 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1726 } 1827 }
1727 spin_unlock_irqrestore(&phba->hbalock, iflags); 1828 spin_unlock_irq(&phba->hbalock);
1728 goto read_next_fcf; 1829 goto read_next_fcf;
1729 1830
1730read_next_fcf: 1831read_next_fcf:
@@ -1740,9 +1841,22 @@ read_next_fcf:
1740 * FCF scan inprogress, and do nothing 1841 * FCF scan inprogress, and do nothing
1741 */ 1842 */
1742 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { 1843 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
1743 spin_lock_irqsave(&phba->hbalock, iflags); 1844 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1845 "2782 No suitable FCF record "
1846 "found during this round of "
1847 "post FCF rediscovery scan: "
1848 "fcf_evt_tag:x%x, fcf_index: "
1849 "x%x\n",
1850 phba->fcoe_eventtag_at_fcf_scan,
1851 bf_get(lpfc_fcf_record_fcf_index,
1852 new_fcf_record));
1853 /*
1854 * Let next new FCF event trigger fast
1855 * failover
1856 */
1857 spin_lock_irq(&phba->hbalock);
1744 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1858 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1745 spin_unlock_irqrestore(&phba->hbalock, iflags); 1859 spin_unlock_irq(&phba->hbalock);
1746 return; 1860 return;
1747 } 1861 }
1748 /* 1862 /*
@@ -1754,16 +1868,23 @@ read_next_fcf:
1754 * record. 1868 * record.
1755 */ 1869 */
1756 1870
1757 /* unregister the current in-use FCF record */ 1871 /* Unregister the current in-use FCF record */
1758 lpfc_unregister_fcf(phba); 1872 lpfc_unregister_fcf(phba);
1759 /* replace in-use record with the new record */ 1873
1874 /* Replace in-use record with the new record */
1760 memcpy(&phba->fcf.current_rec, 1875 memcpy(&phba->fcf.current_rec,
1761 &phba->fcf.failover_rec, 1876 &phba->fcf.failover_rec,
1762 sizeof(struct lpfc_fcf_rec)); 1877 sizeof(struct lpfc_fcf_rec));
1763 /* mark the FCF fast failover completed */ 1878 /* mark the FCF fast failover completed */
1764 spin_lock_irqsave(&phba->hbalock, iflags); 1879 spin_lock_irq(&phba->hbalock);
1765 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 1880 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
1766 spin_unlock_irqrestore(&phba->hbalock, iflags); 1881 spin_unlock_irq(&phba->hbalock);
1882 /*
1883 * Set up the initial registered FCF index for FLOGI
1884 * round robin FCF failover.
1885 */
1886 phba->fcf.fcf_rr_init_indx =
1887 phba->fcf.failover_rec.fcf_indx;
1767 /* Register to the new FCF record */ 1888 /* Register to the new FCF record */
1768 lpfc_register_fcf(phba); 1889 lpfc_register_fcf(phba);
1769 } else { 1890 } else {
@@ -1776,13 +1897,25 @@ read_next_fcf:
1776 return; 1897 return;
1777 /* 1898 /*
1778 * Otherwise, initial scan or post linkdown rescan, 1899 * Otherwise, initial scan or post linkdown rescan,
1779 * register with the best fit FCF record found so 1900 * register with the best FCF record found so far
1780 * far through the scanning process. 1901 * through the FCF scanning process.
1902 */
1903
1904 /* mark the initial FCF discovery completed */
1905 spin_lock_irq(&phba->hbalock);
1906 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
1907 spin_unlock_irq(&phba->hbalock);
1908 /*
1909 * Set up the initial registered FCF index for FLOGI
1910 * round robin FCF failover
1781 */ 1911 */
1912 phba->fcf.fcf_rr_init_indx =
1913 phba->fcf.current_rec.fcf_indx;
1914 /* Register to the new FCF record */
1782 lpfc_register_fcf(phba); 1915 lpfc_register_fcf(phba);
1783 } 1916 }
1784 } else 1917 } else
1785 lpfc_sli4_read_fcf_record(phba, next_fcf_index); 1918 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
1786 return; 1919 return;
1787 1920
1788out: 1921out:
@@ -1793,6 +1926,141 @@ out:
1793} 1926}
1794 1927
1795/** 1928/**
1929 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler
1930 * @phba: pointer to lpfc hba data structure.
1931 * @mboxq: pointer to mailbox object.
1932 *
1933 * This is the callback function for FLOGI failure round robin FCF failover
1934 * read FCF record mailbox command from the eligible FCF record bmask for
1935 * performing the failover. If the FCF read back is not valid/available, it
1936 * fails through to retrying FLOGI to the currently registered FCF again.
1937 * Otherwise, if the FCF read back is valid and available, it will set the
1938 * newly read FCF record to the failover FCF record, unregister currently
1939 * registered FCF record, copy the failover FCF record to the current
1940 * FCF record, and then register the current FCF record before proceeding
1941 * to trying FLOGI on the new failover FCF.
1942 */
1943void
1944lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1945{
1946 struct fcf_record *new_fcf_record;
1947 uint32_t boot_flag, addr_mode;
1948 uint16_t next_fcf_index;
1949 uint16_t current_fcf_index;
1950 uint16_t vlan_id;
1951
1952 /* If link state is not up, stop the round robin failover process */
1953 if (phba->link_state < LPFC_LINK_UP) {
1954 spin_lock_irq(&phba->hbalock);
1955 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1956 spin_unlock_irq(&phba->hbalock);
1957 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1958 return;
1959 }
1960
1961 /* Parse the FCF record from the non-embedded mailbox command */
1962 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
1963 &next_fcf_index);
1964 if (!new_fcf_record) {
1965 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1966 "2766 Mailbox command READ_FCF_RECORD "
1967 "failed to retrieve a FCF record.\n");
1968 goto out;
1969 }
1970
1971 /* Get the needed parameters from FCF record */
1972 lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
1973 &addr_mode, &vlan_id);
1974
1975 /* Log the FCF record information if turned on */
1976 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
1977 next_fcf_index);
1978
1979 /* Upload new FCF record to the failover FCF record */
1980 spin_lock_irq(&phba->hbalock);
1981 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
1982 new_fcf_record, addr_mode, vlan_id,
1983 (boot_flag ? BOOT_ENABLE : 0));
1984 spin_unlock_irq(&phba->hbalock);
1985
1986 current_fcf_index = phba->fcf.current_rec.fcf_indx;
1987
1988 /* Unregister the current in-use FCF record */
1989 lpfc_unregister_fcf(phba);
1990
1991 /* Replace in-use record with the new record */
1992 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
1993 sizeof(struct lpfc_fcf_rec));
1994
1995 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1996 "2783 FLOGI round robin FCF failover from FCF "
1997 "(index:x%x) to FCF (index:x%x).\n",
1998 current_fcf_index,
1999 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
2000
2001out:
2002 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2003 lpfc_register_fcf(phba);
2004}
2005
2006/**
2007 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2008 * @phba: pointer to lpfc hba data structure.
2009 * @mboxq: pointer to mailbox object.
2010 *
2011 * This is the callback function of read FCF record mailbox command for
2012 * updating the eligible FCF bmask for FLOGI failure round robin FCF
2013 * failover when a new FCF event happened. If the FCF read back is
2014 * valid/available and it passes the connection list check, it updates
2015 * the bmask for the eligible FCF record for round robin failover.
2016 */
2017void
2018lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2019{
2020 struct fcf_record *new_fcf_record;
2021 uint32_t boot_flag, addr_mode;
2022 uint16_t fcf_index, next_fcf_index;
2023 uint16_t vlan_id;
2024 int rc;
2025
2026 /* If link state is not up, no need to proceed */
2027 if (phba->link_state < LPFC_LINK_UP)
2028 goto out;
2029
2030 /* If FCF discovery period is over, no need to proceed */
2031 if (phba->fcf.fcf_flag & FCF_DISCOVERY)
2032 goto out;
2033
2034 /* Parse the FCF record from the non-embedded mailbox command */
2035 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2036 &next_fcf_index);
2037 if (!new_fcf_record) {
2038 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2039 "2767 Mailbox command READ_FCF_RECORD "
2040 "failed to retrieve a FCF record.\n");
2041 goto out;
2042 }
2043
2044 /* Check the connection list for eligibility */
2045 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2046 &addr_mode, &vlan_id);
2047
2048 /* Log the FCF record information if turned on */
2049 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2050 next_fcf_index);
2051
2052 if (!rc)
2053 goto out;
2054
2055 /* Update the eligible FCF record index bmask */
2056 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2057 rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
2058
2059out:
2060 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2061}
2062
2063/**
1796 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. 2064 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
1797 * @phba: pointer to lpfc hba data structure. 2065 * @phba: pointer to lpfc hba data structure.
1798 * @mboxq: pointer to mailbox data structure. 2066 * @mboxq: pointer to mailbox data structure.
@@ -2024,8 +2292,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
2024 int rc; 2292 int rc;
2025 struct fcf_record *fcf_record; 2293 struct fcf_record *fcf_record;
2026 2294
2027 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2028
2029 spin_lock_irq(&phba->hbalock); 2295 spin_lock_irq(&phba->hbalock);
2030 switch (la->UlnkSpeed) { 2296 switch (la->UlnkSpeed) {
2031 case LA_1GHZ_LINK: 2297 case LA_1GHZ_LINK:
@@ -2117,18 +2383,24 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
2117 spin_unlock_irq(&phba->hbalock); 2383 spin_unlock_irq(&phba->hbalock);
2118 2384
2119 lpfc_linkup(phba); 2385 lpfc_linkup(phba);
2120 if (sparam_mbox) { 2386 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2121 lpfc_read_sparam(phba, sparam_mbox, 0); 2387 if (!sparam_mbox)
2122 sparam_mbox->vport = vport; 2388 goto out;
2123 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 2389
2124 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); 2390 rc = lpfc_read_sparam(phba, sparam_mbox, 0);
2125 if (rc == MBX_NOT_FINISHED) { 2391 if (rc) {
2126 mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 2392 mempool_free(sparam_mbox, phba->mbox_mem_pool);
2127 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2393 goto out;
2128 kfree(mp); 2394 }
2129 mempool_free(sparam_mbox, phba->mbox_mem_pool); 2395 sparam_mbox->vport = vport;
2130 goto out; 2396 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
2131 } 2397 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
2398 if (rc == MBX_NOT_FINISHED) {
2399 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
2400 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2401 kfree(mp);
2402 mempool_free(sparam_mbox, phba->mbox_mem_pool);
2403 goto out;
2132 } 2404 }
2133 2405
2134 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) { 2406 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
@@ -2186,10 +2458,20 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
2186 spin_unlock_irq(&phba->hbalock); 2458 spin_unlock_irq(&phba->hbalock);
2187 return; 2459 return;
2188 } 2460 }
2461 /* This is the initial FCF discovery scan */
2462 phba->fcf.fcf_flag |= FCF_INIT_DISC;
2189 spin_unlock_irq(&phba->hbalock); 2463 spin_unlock_irq(&phba->hbalock);
2190 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 2464 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
2191 if (rc) 2465 "2778 Start FCF table scan at linkup\n");
2466
2467 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2468 LPFC_FCOE_FCF_GET_FIRST);
2469 if (rc) {
2470 spin_lock_irq(&phba->hbalock);
2471 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
2472 spin_unlock_irq(&phba->hbalock);
2192 goto out; 2473 goto out;
2474 }
2193 } 2475 }
2194 2476
2195 return; 2477 return;
@@ -3379,8 +3661,12 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
3379 shost = lpfc_shost_from_vport(vports[i]); 3661 shost = lpfc_shost_from_vport(vports[i]);
3380 spin_lock_irq(shost->host_lock); 3662 spin_lock_irq(shost->host_lock);
3381 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 3663 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
3382 if (ndlp->nlp_flag & NLP_RPI_VALID) 3664 if (ndlp->nlp_flag & NLP_RPI_VALID) {
3665 /* The mempool_alloc might sleep */
3666 spin_unlock_irq(shost->host_lock);
3383 lpfc_unreg_rpi(vports[i], ndlp); 3667 lpfc_unreg_rpi(vports[i], ndlp);
3668 spin_lock_irq(shost->host_lock);
3669 }
3384 } 3670 }
3385 spin_unlock_irq(shost->host_lock); 3671 spin_unlock_irq(shost->host_lock);
3386 } 3672 }
@@ -4756,6 +5042,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
4756 return; 5042 return;
4757 /* Reset HBA FCF states after successful unregister FCF */ 5043 /* Reset HBA FCF states after successful unregister FCF */
4758 phba->fcf.fcf_flag = 0; 5044 phba->fcf.fcf_flag = 0;
5045 phba->fcf.current_rec.flag = 0;
4759 5046
4760 /* 5047 /*
4761 * If driver is not unloading, check if there is any other 5048 * If driver is not unloading, check if there is any other
@@ -4765,13 +5052,21 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
4765 (phba->link_state < LPFC_LINK_UP)) 5052 (phba->link_state < LPFC_LINK_UP))
4766 return; 5053 return;
4767 5054
4768 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 5055 /* This is considered as the initial FCF discovery scan */
5056 spin_lock_irq(&phba->hbalock);
5057 phba->fcf.fcf_flag |= FCF_INIT_DISC;
5058 spin_unlock_irq(&phba->hbalock);
5059 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
4769 5060
4770 if (rc) 5061 if (rc) {
5062 spin_lock_irq(&phba->hbalock);
5063 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
5064 spin_unlock_irq(&phba->hbalock);
4771 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 5065 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4772 "2553 lpfc_unregister_unused_fcf failed " 5066 "2553 lpfc_unregister_unused_fcf failed "
4773 "to read FCF record HBA state x%x\n", 5067 "to read FCF record HBA state x%x\n",
4774 phba->pport->port_state); 5068 phba->pport->port_state);
5069 }
4775} 5070}
4776 5071
4777/** 5072/**
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index d29ac7c317d9..ea44239eeb33 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -350,7 +350,12 @@ lpfc_config_port_post(struct lpfc_hba *phba)
350 mb = &pmb->u.mb; 350 mb = &pmb->u.mb;
351 351
352 /* Get login parameters for NID. */ 352 /* Get login parameters for NID. */
353 lpfc_read_sparam(phba, pmb, 0); 353 rc = lpfc_read_sparam(phba, pmb, 0);
354 if (rc) {
355 mempool_free(pmb, phba->mbox_mem_pool);
356 return -ENOMEM;
357 }
358
354 pmb->vport = vport; 359 pmb->vport = vport;
355 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 360 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -359,7 +364,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
359 mb->mbxCommand, mb->mbxStatus); 364 mb->mbxCommand, mb->mbxStatus);
360 phba->link_state = LPFC_HBA_ERROR; 365 phba->link_state = LPFC_HBA_ERROR;
361 mp = (struct lpfc_dmabuf *) pmb->context1; 366 mp = (struct lpfc_dmabuf *) pmb->context1;
362 mempool_free( pmb, phba->mbox_mem_pool); 367 mempool_free(pmb, phba->mbox_mem_pool);
363 lpfc_mbuf_free(phba, mp->virt, mp->phys); 368 lpfc_mbuf_free(phba, mp->virt, mp->phys);
364 kfree(mp); 369 kfree(mp);
365 return -EIO; 370 return -EIO;
@@ -544,7 +549,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
544 mempool_free(pmb, phba->mbox_mem_pool); 549 mempool_free(pmb, phba->mbox_mem_pool);
545 return -EIO; 550 return -EIO;
546 } 551 }
547 } else if (phba->cfg_suppress_link_up == 0) { 552 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
548 lpfc_init_link(phba, pmb, phba->cfg_topology, 553 lpfc_init_link(phba, pmb, phba->cfg_topology,
549 phba->cfg_link_speed); 554 phba->cfg_link_speed);
550 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 555 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -571,6 +576,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
571 } 576 }
572 /* MBOX buffer will be freed in mbox compl */ 577 /* MBOX buffer will be freed in mbox compl */
573 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 578 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
579 if (!pmb) {
580 phba->link_state = LPFC_HBA_ERROR;
581 return -ENOMEM;
582 }
583
574 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 584 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
575 pmb->mbox_cmpl = lpfc_config_async_cmpl; 585 pmb->mbox_cmpl = lpfc_config_async_cmpl;
576 pmb->vport = phba->pport; 586 pmb->vport = phba->pport;
@@ -588,6 +598,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
588 598
589 /* Get Option rom version */ 599 /* Get Option rom version */
590 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 600 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
601 if (!pmb) {
602 phba->link_state = LPFC_HBA_ERROR;
603 return -ENOMEM;
604 }
605
591 lpfc_dump_wakeup_param(phba, pmb); 606 lpfc_dump_wakeup_param(phba, pmb);
592 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 607 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
593 pmb->vport = phba->pport; 608 pmb->vport = phba->pport;
@@ -652,7 +667,7 @@ lpfc_hba_init_link(struct lpfc_hba *phba)
652 mempool_free(pmb, phba->mbox_mem_pool); 667 mempool_free(pmb, phba->mbox_mem_pool);
653 return -EIO; 668 return -EIO;
654 } 669 }
655 phba->cfg_suppress_link_up = 0; 670 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
656 671
657 return 0; 672 return 0;
658} 673}
@@ -807,6 +822,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
807 LIST_HEAD(aborts); 822 LIST_HEAD(aborts);
808 int ret; 823 int ret;
809 unsigned long iflag = 0; 824 unsigned long iflag = 0;
825 struct lpfc_sglq *sglq_entry = NULL;
826
810 ret = lpfc_hba_down_post_s3(phba); 827 ret = lpfc_hba_down_post_s3(phba);
811 if (ret) 828 if (ret)
812 return ret; 829 return ret;
@@ -822,6 +839,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
822 * list. 839 * list.
823 */ 840 */
824 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 841 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
842 list_for_each_entry(sglq_entry,
843 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
844 sglq_entry->state = SGL_FREED;
845
825 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 846 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
826 &phba->sli4_hba.lpfc_sgl_list); 847 &phba->sli4_hba.lpfc_sgl_list);
827 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 848 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
@@ -2178,8 +2199,10 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
2178void 2199void
2179__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2200__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2180{ 2201{
2181 /* Clear pending FCF rediscovery wait timer */ 2202 /* Clear pending FCF rediscovery wait and failover in progress flags */
2182 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2203 phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
2204 FCF_DEAD_DISC |
2205 FCF_ACVL_DISC);
2183 /* Now, try to stop the timer */ 2206 /* Now, try to stop the timer */
2184 del_timer(&phba->fcf.redisc_wait); 2207 del_timer(&phba->fcf.redisc_wait);
2185} 2208}
@@ -2576,6 +2599,14 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2576 init_timer(&vport->els_tmofunc); 2599 init_timer(&vport->els_tmofunc);
2577 vport->els_tmofunc.function = lpfc_els_timeout; 2600 vport->els_tmofunc.function = lpfc_els_timeout;
2578 vport->els_tmofunc.data = (unsigned long)vport; 2601 vport->els_tmofunc.data = (unsigned long)vport;
2602 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
2603 phba->menlo_flag |= HBA_MENLO_SUPPORT;
2604 /* check for menlo minimum sg count */
2605 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
2606 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
2607 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2608 }
2609 }
2579 2610
2580 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2611 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2581 if (error) 2612 if (error)
@@ -2912,6 +2943,9 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2912 /* FCF rediscovery event to worker thread */ 2943 /* FCF rediscovery event to worker thread */
2913 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2944 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2914 spin_unlock_irq(&phba->hbalock); 2945 spin_unlock_irq(&phba->hbalock);
2946 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2947 "2776 FCF rediscover wait timer expired, post "
2948 "a worker thread event for FCF table scan\n");
2915 /* wake up worker thread */ 2949 /* wake up worker thread */
2916 lpfc_worker_wake_up(phba); 2950 lpfc_worker_wake_up(phba);
2917} 2951}
@@ -3183,6 +3217,68 @@ out_free_pmb:
3183} 3217}
3184 3218
3185/** 3219/**
3220 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3221 * @vport: pointer to vport data structure.
3222 *
3223 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3224 * response to a CVL event.
3225 *
3226 * Return the pointer to the ndlp with the vport if successful, otherwise
3227 * return NULL.
3228 **/
3229static struct lpfc_nodelist *
3230lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3231{
3232 struct lpfc_nodelist *ndlp;
3233 struct Scsi_Host *shost;
3234 struct lpfc_hba *phba;
3235
3236 if (!vport)
3237 return NULL;
3238 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3239 if (!ndlp)
3240 return NULL;
3241 phba = vport->phba;
3242 if (!phba)
3243 return NULL;
3244 if (phba->pport->port_state <= LPFC_FLOGI)
3245 return NULL;
3246 /* If virtual link is not yet instantiated ignore CVL */
3247 if (vport->port_state <= LPFC_FDISC)
3248 return NULL;
3249 shost = lpfc_shost_from_vport(vport);
3250 if (!shost)
3251 return NULL;
3252 lpfc_linkdown_port(vport);
3253 lpfc_cleanup_pending_mbox(vport);
3254 spin_lock_irq(shost->host_lock);
3255 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3256 spin_unlock_irq(shost->host_lock);
3257
3258 return ndlp;
3259}
3260
3261/**
3262 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3263 * @vport: pointer to lpfc hba data structure.
3264 *
3265 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3266 * response to a FCF dead event.
3267 **/
3268static void
3269lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3270{
3271 struct lpfc_vport **vports;
3272 int i;
3273
3274 vports = lpfc_create_vport_work_array(phba);
3275 if (vports)
3276 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3277 lpfc_sli4_perform_vport_cvl(vports[i]);
3278 lpfc_destroy_vport_work_array(phba, vports);
3279}
3280
3281/**
3186 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 3282 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
3187 * @phba: pointer to lpfc hba data structure. 3283 * @phba: pointer to lpfc hba data structure.
3188 * @acqe_link: pointer to the async fcoe completion queue entry. 3284 * @acqe_link: pointer to the async fcoe completion queue entry.
@@ -3198,7 +3294,6 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3198 struct lpfc_vport *vport; 3294 struct lpfc_vport *vport;
3199 struct lpfc_nodelist *ndlp; 3295 struct lpfc_nodelist *ndlp;
3200 struct Scsi_Host *shost; 3296 struct Scsi_Host *shost;
3201 uint32_t link_state;
3202 int active_vlink_present; 3297 int active_vlink_present;
3203 struct lpfc_vport **vports; 3298 struct lpfc_vport **vports;
3204 int i; 3299 int i;
@@ -3208,10 +3303,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3208 switch (event_type) { 3303 switch (event_type) {
3209 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3304 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
3210 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: 3305 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
3211 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3306 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3212 "2546 New FCF found index 0x%x tag 0x%x\n", 3307 "2546 New FCF found/FCF parameter modified event: "
3213 acqe_fcoe->index, 3308 "evt_tag:x%x, fcf_index:x%x\n",
3214 acqe_fcoe->event_tag); 3309 acqe_fcoe->event_tag, acqe_fcoe->index);
3310
3215 spin_lock_irq(&phba->hbalock); 3311 spin_lock_irq(&phba->hbalock);
3216 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || 3312 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
3217 (phba->hba_flag & FCF_DISC_INPROGRESS)) { 3313 (phba->hba_flag & FCF_DISC_INPROGRESS)) {
@@ -3222,6 +3318,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3222 spin_unlock_irq(&phba->hbalock); 3318 spin_unlock_irq(&phba->hbalock);
3223 break; 3319 break;
3224 } 3320 }
3321
3225 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3322 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3226 /* 3323 /*
3227 * If fast FCF failover rescan event is pending, 3324 * If fast FCF failover rescan event is pending,
@@ -3232,12 +3329,33 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3232 } 3329 }
3233 spin_unlock_irq(&phba->hbalock); 3330 spin_unlock_irq(&phba->hbalock);
3234 3331
3235 /* Read the FCF table and re-discover SAN. */ 3332 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
3236 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3333 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
3334 /*
3335 * During period of FCF discovery, read the FCF
3336 * table record indexed by the event to update
3337 * FCF round robin failover eligible FCF bmask.
3338 */
3339 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3340 LOG_DISCOVERY,
3341 "2779 Read new FCF record with "
3342 "fcf_index:x%x for updating FCF "
3343 "round robin failover bmask\n",
3344 acqe_fcoe->index);
3345 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3346 }
3347
3348 /* Otherwise, scan the entire FCF table and re-discover SAN */
3349 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3350 "2770 Start FCF table scan due to new FCF "
3351 "event: evt_tag:x%x, fcf_index:x%x\n",
3352 acqe_fcoe->event_tag, acqe_fcoe->index);
3353 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3354 LPFC_FCOE_FCF_GET_FIRST);
3237 if (rc) 3355 if (rc)
3238 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3356 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3239 "2547 Read FCF record failed 0x%x\n", 3357 "2547 Issue FCF scan read FCF mailbox "
3240 rc); 3358 "command failed 0x%x\n", rc);
3241 break; 3359 break;
3242 3360
3243 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3361 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -3248,47 +3366,63 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3248 break; 3366 break;
3249 3367
3250 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3368 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
3251 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3369 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3252 "2549 FCF disconnected from network index 0x%x" 3370 "2549 FCF disconnected from network index 0x%x"
3253 " tag 0x%x\n", acqe_fcoe->index, 3371 " tag 0x%x\n", acqe_fcoe->index,
3254 acqe_fcoe->event_tag); 3372 acqe_fcoe->event_tag);
3255 /* If the event is not for currently used fcf do nothing */ 3373 /* If the event is not for currently used fcf do nothing */
3256 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) 3374 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
3257 break; 3375 break;
3258 /* 3376 /* We request port to rediscover the entire FCF table for
3259 * Currently, driver support only one FCF - so treat this as 3377 * a fast recovery from case that the current FCF record
3260 * a link down, but save the link state because we don't want 3378 * is no longer valid if we are not in the middle of FCF
3261 * it to be changed to Link Down unless it is already down. 3379 * failover process already.
3262 */ 3380 */
3263 link_state = phba->link_state; 3381 spin_lock_irq(&phba->hbalock);
3264 lpfc_linkdown(phba); 3382 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3265 phba->link_state = link_state; 3383 spin_unlock_irq(&phba->hbalock);
3266 /* Unregister FCF if no devices connected to it */ 3384 /* Update FLOGI FCF failover eligible FCF bmask */
3267 lpfc_unregister_unused_fcf(phba); 3385 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
3386 break;
3387 }
3388 /* Mark the fast failover process in progress */
3389 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3390 spin_unlock_irq(&phba->hbalock);
3391 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3392 "2771 Start FCF fast failover process due to "
3393 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3394 "\n", acqe_fcoe->event_tag, acqe_fcoe->index);
3395 rc = lpfc_sli4_redisc_fcf_table(phba);
3396 if (rc) {
3397 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3398 LOG_DISCOVERY,
3399 "2772 Issue FCF rediscover mabilbox "
3400 "command failed, fail through to FCF "
3401 "dead event\n");
3402 spin_lock_irq(&phba->hbalock);
3403 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3404 spin_unlock_irq(&phba->hbalock);
3405 /*
3406 * Last resort will fail over by treating this
3407 * as a link down to FCF registration.
3408 */
3409 lpfc_sli4_fcf_dead_failthrough(phba);
3410 } else
3411 /* Handling fast FCF failover to a DEAD FCF event
3412 * is considered equalivant to receiving CVL to all
3413 * vports.
3414 */
3415 lpfc_sli4_perform_all_vport_cvl(phba);
3268 break; 3416 break;
3269 case LPFC_FCOE_EVENT_TYPE_CVL: 3417 case LPFC_FCOE_EVENT_TYPE_CVL:
3270 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3418 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3271 "2718 Clear Virtual Link Received for VPI 0x%x" 3419 "2718 Clear Virtual Link Received for VPI 0x%x"
3272 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3420 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3273 vport = lpfc_find_vport_by_vpid(phba, 3421 vport = lpfc_find_vport_by_vpid(phba,
3274 acqe_fcoe->index - phba->vpi_base); 3422 acqe_fcoe->index - phba->vpi_base);
3275 if (!vport) 3423 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3276 break;
3277 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3278 if (!ndlp) 3424 if (!ndlp)
3279 break; 3425 break;
3280 shost = lpfc_shost_from_vport(vport);
3281 if (phba->pport->port_state <= LPFC_FLOGI)
3282 break;
3283 /* If virtual link is not yet instantiated ignore CVL */
3284 if (vport->port_state <= LPFC_FDISC)
3285 break;
3286
3287 lpfc_linkdown_port(vport);
3288 lpfc_cleanup_pending_mbox(vport);
3289 spin_lock_irq(shost->host_lock);
3290 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3291 spin_unlock_irq(shost->host_lock);
3292 active_vlink_present = 0; 3426 active_vlink_present = 0;
3293 3427
3294 vports = lpfc_create_vport_work_array(phba); 3428 vports = lpfc_create_vport_work_array(phba);
@@ -3311,6 +3445,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3311 * re-instantiate the Vlink using FDISC. 3445 * re-instantiate the Vlink using FDISC.
3312 */ 3446 */
3313 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3447 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3448 shost = lpfc_shost_from_vport(vport);
3314 spin_lock_irq(shost->host_lock); 3449 spin_lock_irq(shost->host_lock);
3315 ndlp->nlp_flag |= NLP_DELAY_TMO; 3450 ndlp->nlp_flag |= NLP_DELAY_TMO;
3316 spin_unlock_irq(shost->host_lock); 3451 spin_unlock_irq(shost->host_lock);
@@ -3321,15 +3456,38 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3321 * Otherwise, we request port to rediscover 3456 * Otherwise, we request port to rediscover
3322 * the entire FCF table for a fast recovery 3457 * the entire FCF table for a fast recovery
3323 * from possible case that the current FCF 3458 * from possible case that the current FCF
3324 * is no longer valid. 3459 * is no longer valid if we are not already
3460 * in the FCF failover process.
3325 */ 3461 */
3462 spin_lock_irq(&phba->hbalock);
3463 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3464 spin_unlock_irq(&phba->hbalock);
3465 break;
3466 }
3467 /* Mark the fast failover process in progress */
3468 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3469 spin_unlock_irq(&phba->hbalock);
3470 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3471 LOG_DISCOVERY,
3472 "2773 Start FCF fast failover due "
3473 "to CVL event: evt_tag:x%x\n",
3474 acqe_fcoe->event_tag);
3326 rc = lpfc_sli4_redisc_fcf_table(phba); 3475 rc = lpfc_sli4_redisc_fcf_table(phba);
3327 if (rc) 3476 if (rc) {
3477 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3478 LOG_DISCOVERY,
3479 "2774 Issue FCF rediscover "
3480 "mabilbox command failed, "
3481 "through to CVL event\n");
3482 spin_lock_irq(&phba->hbalock);
3483 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3484 spin_unlock_irq(&phba->hbalock);
3328 /* 3485 /*
3329 * Last resort will be re-try on the 3486 * Last resort will be re-try on the
3330 * the current registered FCF entry. 3487 * the current registered FCF entry.
3331 */ 3488 */
3332 lpfc_retry_pport_discovery(phba); 3489 lpfc_retry_pport_discovery(phba);
3490 }
3333 } 3491 }
3334 break; 3492 break;
3335 default: 3493 default:
@@ -3426,11 +3584,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3426 spin_unlock_irq(&phba->hbalock); 3584 spin_unlock_irq(&phba->hbalock);
3427 3585
3428 /* Scan FCF table from the first entry to re-discover SAN */ 3586 /* Scan FCF table from the first entry to re-discover SAN */
3429 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3587 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3588 "2777 Start FCF table scan after FCF "
3589 "rediscovery quiescent period over\n");
3590 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3430 if (rc) 3591 if (rc)
3431 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3592 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3432 "2747 Post FCF rediscovery read FCF record " 3593 "2747 Issue FCF scan read FCF mailbox "
3433 "failed 0x%x\n", rc); 3594 "command failed 0x%x\n", rc);
3434} 3595}
3435 3596
3436/** 3597/**
@@ -3722,6 +3883,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3722 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 3883 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3723 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 3884 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3724 struct lpfc_mqe *mqe; 3885 struct lpfc_mqe *mqe;
3886 int longs;
3725 3887
3726 /* Before proceed, wait for POST done and device ready */ 3888 /* Before proceed, wait for POST done and device ready */
3727 rc = lpfc_sli4_post_status_check(phba); 3889 rc = lpfc_sli4_post_status_check(phba);
@@ -3898,13 +4060,24 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3898 goto out_free_active_sgl; 4060 goto out_free_active_sgl;
3899 } 4061 }
3900 4062
4063 /* Allocate eligible FCF bmask memory for FCF round robin failover */
4064 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4065 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4066 GFP_KERNEL);
4067 if (!phba->fcf.fcf_rr_bmask) {
4068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4069 "2759 Failed allocate memory for FCF round "
4070 "robin failover bmask\n");
4071 goto out_remove_rpi_hdrs;
4072 }
4073
3901 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4074 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3902 phba->cfg_fcp_eq_count), GFP_KERNEL); 4075 phba->cfg_fcp_eq_count), GFP_KERNEL);
3903 if (!phba->sli4_hba.fcp_eq_hdl) { 4076 if (!phba->sli4_hba.fcp_eq_hdl) {
3904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4077 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3905 "2572 Failed allocate memory for fast-path " 4078 "2572 Failed allocate memory for fast-path "
3906 "per-EQ handle array\n"); 4079 "per-EQ handle array\n");
3907 goto out_remove_rpi_hdrs; 4080 goto out_free_fcf_rr_bmask;
3908 } 4081 }
3909 4082
3910 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4083 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -3957,6 +4130,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3957 4130
3958out_free_fcp_eq_hdl: 4131out_free_fcp_eq_hdl:
3959 kfree(phba->sli4_hba.fcp_eq_hdl); 4132 kfree(phba->sli4_hba.fcp_eq_hdl);
4133out_free_fcf_rr_bmask:
4134 kfree(phba->fcf.fcf_rr_bmask);
3960out_remove_rpi_hdrs: 4135out_remove_rpi_hdrs:
3961 lpfc_sli4_remove_rpi_hdrs(phba); 4136 lpfc_sli4_remove_rpi_hdrs(phba);
3962out_free_active_sgl: 4137out_free_active_sgl:
@@ -4002,6 +4177,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4002 lpfc_sli4_remove_rpi_hdrs(phba); 4177 lpfc_sli4_remove_rpi_hdrs(phba);
4003 lpfc_sli4_remove_rpis(phba); 4178 lpfc_sli4_remove_rpis(phba);
4004 4179
4180 /* Free eligible FCF index bmask */
4181 kfree(phba->fcf.fcf_rr_bmask);
4182
4005 /* Free the ELS sgl list */ 4183 /* Free the ELS sgl list */
4006 lpfc_free_active_sgl(phba); 4184 lpfc_free_active_sgl(phba);
4007 lpfc_free_sgl_list(phba); 4185 lpfc_free_sgl_list(phba);
@@ -4397,6 +4575,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4397 4575
4398 /* The list order is used by later block SGL registraton */ 4576 /* The list order is used by later block SGL registraton */
4399 spin_lock_irq(&phba->hbalock); 4577 spin_lock_irq(&phba->hbalock);
4578 sglq_entry->state = SGL_FREED;
4400 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4579 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4401 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4580 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4402 phba->sli4_hba.total_sglq_bufs++; 4581 phba->sli4_hba.total_sglq_bufs++;
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 954ba57970a3..bb59e9273126 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -35,6 +35,7 @@
35#define LOG_VPORT 0x00004000 /* NPIV events */ 35#define LOG_VPORT 0x00004000 /* NPIV events */
36#define LOF_SECURITY 0x00008000 /* Security events */ 36#define LOF_SECURITY 0x00008000 /* Security events */
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ 37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_FIP 0x00020000 /* FIP events */
38#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ 39#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
39 40
40#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 41#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 6c4dce1a30ca..1e61ae3bc4eb 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1748,7 +1748,7 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1748} 1748}
1749 1749
1750/** 1750/**
1751 * lpfc_sli4_mbx_read_fcf_record - Allocate and construct read fcf mbox cmd 1751 * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd
1752 * @phba: pointer to lpfc hba data structure. 1752 * @phba: pointer to lpfc hba data structure.
1753 * @fcf_index: index to fcf table. 1753 * @fcf_index: index to fcf table.
1754 * 1754 *
@@ -1759,9 +1759,9 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1759 * NULL. 1759 * NULL.
1760 **/ 1760 **/
1761int 1761int
1762lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *phba, 1762lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
1763 struct lpfcMboxq *mboxq, 1763 struct lpfcMboxq *mboxq,
1764 uint16_t fcf_index) 1764 uint16_t fcf_index)
1765{ 1765{
1766 void *virt_addr; 1766 void *virt_addr;
1767 dma_addr_t phys_addr; 1767 dma_addr_t phys_addr;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 483fb74bc592..b16bb2c9978b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -620,23 +620,40 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
620 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 620 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
621 struct lpfc_scsi_buf *psb, *next_psb; 621 struct lpfc_scsi_buf *psb, *next_psb;
622 unsigned long iflag = 0; 622 unsigned long iflag = 0;
623 struct lpfc_iocbq *iocbq;
624 int i;
623 625
624 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); 626 spin_lock_irqsave(&phba->hbalock, iflag);
627 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
625 list_for_each_entry_safe(psb, next_psb, 628 list_for_each_entry_safe(psb, next_psb,
626 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { 629 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
627 if (psb->cur_iocbq.sli4_xritag == xri) { 630 if (psb->cur_iocbq.sli4_xritag == xri) {
628 list_del(&psb->list); 631 list_del(&psb->list);
629 psb->exch_busy = 0; 632 psb->exch_busy = 0;
630 psb->status = IOSTAT_SUCCESS; 633 psb->status = IOSTAT_SUCCESS;
631 spin_unlock_irqrestore( 634 spin_unlock(
632 &phba->sli4_hba.abts_scsi_buf_list_lock, 635 &phba->sli4_hba.abts_scsi_buf_list_lock);
633 iflag); 636 spin_unlock_irqrestore(&phba->hbalock, iflag);
634 lpfc_release_scsi_buf_s4(phba, psb); 637 lpfc_release_scsi_buf_s4(phba, psb);
635 return; 638 return;
636 } 639 }
637 } 640 }
638 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, 641 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
639 iflag); 642 for (i = 1; i <= phba->sli.last_iotag; i++) {
643 iocbq = phba->sli.iocbq_lookup[i];
644
645 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
646 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
647 continue;
648 if (iocbq->sli4_xritag != xri)
649 continue;
650 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
651 psb->exch_busy = 0;
652 spin_unlock_irqrestore(&phba->hbalock, iflag);
653 return;
654
655 }
656 spin_unlock_irqrestore(&phba->hbalock, iflag);
640} 657}
641 658
642/** 659/**
@@ -1006,6 +1023,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1006 struct scatterlist *sgel = NULL; 1023 struct scatterlist *sgel = NULL;
1007 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 1024 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1008 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 1025 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1026 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
1009 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 1027 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1010 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; 1028 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1011 dma_addr_t physaddr; 1029 dma_addr_t physaddr;
@@ -1056,6 +1074,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1056 physaddr = sg_dma_address(sgel); 1074 physaddr = sg_dma_address(sgel);
1057 if (phba->sli_rev == 3 && 1075 if (phba->sli_rev == 3 &&
1058 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 1076 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1077 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1059 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 1078 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1060 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1079 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1061 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 1080 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
@@ -1082,7 +1101,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1082 * explicitly reinitialized since all iocb memory resources are reused. 1101 * explicitly reinitialized since all iocb memory resources are reused.
1083 */ 1102 */
1084 if (phba->sli_rev == 3 && 1103 if (phba->sli_rev == 3 &&
1085 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 1104 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1105 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1086 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 1106 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1087 /* 1107 /*
1088 * The extended IOCB format can only fit 3 BDE or a BPL. 1108 * The extended IOCB format can only fit 3 BDE or a BPL.
@@ -1107,6 +1127,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1107 } else { 1127 } else {
1108 iocb_cmd->un.fcpi64.bdl.bdeSize = 1128 iocb_cmd->un.fcpi64.bdl.bdeSize =
1109 ((num_bde + 2) * sizeof(struct ulp_bde64)); 1129 ((num_bde + 2) * sizeof(struct ulp_bde64));
1130 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1110 } 1131 }
1111 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 1132 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1112 1133
@@ -2079,8 +2100,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2079 2100
2080 if (resp_info & RSP_LEN_VALID) { 2101 if (resp_info & RSP_LEN_VALID) {
2081 rsplen = be32_to_cpu(fcprsp->rspRspLen); 2102 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2082 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || 2103 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
2083 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
2084 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 2104 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2085 "2719 Invalid response length: " 2105 "2719 Invalid response length: "
2086 "tgt x%x lun x%x cmnd x%x rsplen x%x\n", 2106 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
@@ -2090,6 +2110,17 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2090 host_status = DID_ERROR; 2110 host_status = DID_ERROR;
2091 goto out; 2111 goto out;
2092 } 2112 }
2113 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
2114 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2115 "2757 Protocol failure detected during "
2116 "processing of FCP I/O op: "
2117 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
2118 cmnd->device->id,
2119 cmnd->device->lun, cmnd->cmnd[0],
2120 fcprsp->rspInfo3);
2121 host_status = DID_ERROR;
2122 goto out;
2123 }
2093 } 2124 }
2094 2125
2095 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 2126 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 35e3b96d4e07..fe6660ca6452 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -494,7 +494,7 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
494 * 494 *
495 * Returns sglq ponter = success, NULL = Failure. 495 * Returns sglq ponter = success, NULL = Failure.
496 **/ 496 **/
497static struct lpfc_sglq * 497struct lpfc_sglq *
498__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 498__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
499{ 499{
500 uint16_t adj_xri; 500 uint16_t adj_xri;
@@ -526,6 +526,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba)
526 return NULL; 526 return NULL;
527 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; 527 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
528 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 528 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
529 sglq->state = SGL_ALLOCATED;
529 return sglq; 530 return sglq;
530} 531}
531 532
@@ -580,15 +581,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
580 else 581 else
581 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 582 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
582 if (sglq) { 583 if (sglq) {
583 if (iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) { 584 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
585 (sglq->state != SGL_XRI_ABORTED)) {
584 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 586 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
585 iflag); 587 iflag);
586 list_add(&sglq->list, 588 list_add(&sglq->list,
587 &phba->sli4_hba.lpfc_abts_els_sgl_list); 589 &phba->sli4_hba.lpfc_abts_els_sgl_list);
588 spin_unlock_irqrestore( 590 spin_unlock_irqrestore(
589 &phba->sli4_hba.abts_sgl_list_lock, iflag); 591 &phba->sli4_hba.abts_sgl_list_lock, iflag);
590 } else 592 } else {
593 sglq->state = SGL_FREED;
591 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); 594 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
595 }
592 } 596 }
593 597
594 598
@@ -2258,41 +2262,56 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2258 spin_unlock_irqrestore(&phba->hbalock, 2262 spin_unlock_irqrestore(&phba->hbalock,
2259 iflag); 2263 iflag);
2260 } 2264 }
2261 if ((phba->sli_rev == LPFC_SLI_REV4) && 2265 if (phba->sli_rev == LPFC_SLI_REV4) {
2262 (saveq->iocb_flag & LPFC_EXCHANGE_BUSY)) { 2266 if (saveq->iocb_flag &
2263 /* Set cmdiocb flag for the exchange 2267 LPFC_EXCHANGE_BUSY) {
2264 * busy so sgl (xri) will not be 2268 /* Set cmdiocb flag for the
2265 * released until the abort xri is 2269 * exchange busy so sgl (xri)
2266 * received from hba, clear the 2270 * will not be released until
2267 * LPFC_DRIVER_ABORTED bit in case 2271 * the abort xri is received
2268 * it was driver initiated abort. 2272 * from hba.
2269 */ 2273 */
2270 spin_lock_irqsave(&phba->hbalock, 2274 spin_lock_irqsave(
2271 iflag); 2275 &phba->hbalock, iflag);
2272 cmdiocbp->iocb_flag &= 2276 cmdiocbp->iocb_flag |=
2273 ~LPFC_DRIVER_ABORTED; 2277 LPFC_EXCHANGE_BUSY;
2274 cmdiocbp->iocb_flag |= 2278 spin_unlock_irqrestore(
2275 LPFC_EXCHANGE_BUSY; 2279 &phba->hbalock, iflag);
2276 spin_unlock_irqrestore(&phba->hbalock, 2280 }
2277 iflag); 2281 if (cmdiocbp->iocb_flag &
2278 cmdiocbp->iocb.ulpStatus = 2282 LPFC_DRIVER_ABORTED) {
2279 IOSTAT_LOCAL_REJECT; 2283 /*
2280 cmdiocbp->iocb.un.ulpWord[4] = 2284 * Clear LPFC_DRIVER_ABORTED
2281 IOERR_ABORT_REQUESTED; 2285 * bit in case it was driver
2282 /* 2286 * initiated abort.
2283 * For SLI4, irsiocb contains NO_XRI 2287 */
2284 * in sli_xritag, it shall not affect 2288 spin_lock_irqsave(
2285 * releasing sgl (xri) process. 2289 &phba->hbalock, iflag);
2286 */ 2290 cmdiocbp->iocb_flag &=
2287 saveq->iocb.ulpStatus = 2291 ~LPFC_DRIVER_ABORTED;
2288 IOSTAT_LOCAL_REJECT; 2292 spin_unlock_irqrestore(
2289 saveq->iocb.un.ulpWord[4] = 2293 &phba->hbalock, iflag);
2290 IOERR_SLI_ABORTED; 2294 cmdiocbp->iocb.ulpStatus =
2291 spin_lock_irqsave(&phba->hbalock, 2295 IOSTAT_LOCAL_REJECT;
2292 iflag); 2296 cmdiocbp->iocb.un.ulpWord[4] =
2293 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2297 IOERR_ABORT_REQUESTED;
2294 spin_unlock_irqrestore(&phba->hbalock, 2298 /*
2295 iflag); 2299 * For SLI4, irsiocb contains
2300 * NO_XRI in sli_xritag, it
2301 * shall not affect releasing
2302 * sgl (xri) process.
2303 */
2304 saveq->iocb.ulpStatus =
2305 IOSTAT_LOCAL_REJECT;
2306 saveq->iocb.un.ulpWord[4] =
2307 IOERR_SLI_ABORTED;
2308 spin_lock_irqsave(
2309 &phba->hbalock, iflag);
2310 saveq->iocb_flag |=
2311 LPFC_DELAY_MEM_FREE;
2312 spin_unlock_irqrestore(
2313 &phba->hbalock, iflag);
2314 }
2296 } 2315 }
2297 } 2316 }
2298 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2317 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@@ -2515,14 +2534,16 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2515 2534
2516 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2535 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2517 &rspiocbq); 2536 &rspiocbq);
2518 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 2537 if (unlikely(!cmdiocbq))
2519 spin_unlock_irqrestore(&phba->hbalock, 2538 break;
2520 iflag); 2539 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
2521 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2540 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
2522 &rspiocbq); 2541 if (cmdiocbq->iocb_cmpl) {
2523 spin_lock_irqsave(&phba->hbalock, 2542 spin_unlock_irqrestore(&phba->hbalock, iflag);
2524 iflag); 2543 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2525 } 2544 &rspiocbq);
2545 spin_lock_irqsave(&phba->hbalock, iflag);
2546 }
2526 break; 2547 break;
2527 case LPFC_UNSOL_IOCB: 2548 case LPFC_UNSOL_IOCB:
2528 spin_unlock_irqrestore(&phba->hbalock, iflag); 2549 spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -3091,6 +3112,12 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3091 3112
3092 /* Check to see if any errors occurred during init */ 3113 /* Check to see if any errors occurred during init */
3093 if ((status & HS_FFERM) || (i >= 20)) { 3114 if ((status & HS_FFERM) || (i >= 20)) {
3115 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3116 "2751 Adapter failed to restart, "
3117 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3118 status,
3119 readl(phba->MBslimaddr + 0xa8),
3120 readl(phba->MBslimaddr + 0xac));
3094 phba->link_state = LPFC_HBA_ERROR; 3121 phba->link_state = LPFC_HBA_ERROR;
3095 retval = 1; 3122 retval = 1;
3096 } 3123 }
@@ -3278,6 +3305,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
3278 if (retval != MBX_SUCCESS) { 3305 if (retval != MBX_SUCCESS) {
3279 if (retval != MBX_BUSY) 3306 if (retval != MBX_BUSY)
3280 mempool_free(pmb, phba->mbox_mem_pool); 3307 mempool_free(pmb, phba->mbox_mem_pool);
3308 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3309 "2752 KILL_BOARD command failed retval %d\n",
3310 retval);
3281 spin_lock_irq(&phba->hbalock); 3311 spin_lock_irq(&phba->hbalock);
3282 phba->link_flag &= ~LS_IGNORE_ERATT; 3312 phba->link_flag &= ~LS_IGNORE_ERATT;
3283 spin_unlock_irq(&phba->hbalock); 3313 spin_unlock_irq(&phba->hbalock);
@@ -4035,7 +4065,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
4035 4065
4036lpfc_sli_hba_setup_error: 4066lpfc_sli_hba_setup_error:
4037 phba->link_state = LPFC_HBA_ERROR; 4067 phba->link_state = LPFC_HBA_ERROR;
4038 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4039 "0445 Firmware initialization failed\n"); 4069 "0445 Firmware initialization failed\n");
4040 return rc; 4070 return rc;
4041} 4071}
@@ -4388,7 +4418,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4388 spin_unlock_irq(&phba->hbalock); 4418 spin_unlock_irq(&phba->hbalock);
4389 4419
4390 /* Read the port's service parameters. */ 4420 /* Read the port's service parameters. */
4391 lpfc_read_sparam(phba, mboxq, vport->vpi); 4421 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
4422 if (rc) {
4423 phba->link_state = LPFC_HBA_ERROR;
4424 rc = -ENOMEM;
4425 goto out_free_vpd;
4426 }
4427
4392 mboxq->vport = vport; 4428 mboxq->vport = vport;
4393 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4429 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4394 mp = (struct lpfc_dmabuf *) mboxq->context1; 4430 mp = (struct lpfc_dmabuf *) mboxq->context1;
@@ -4483,6 +4519,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4483 /* Post receive buffers to the device */ 4519 /* Post receive buffers to the device */
4484 lpfc_sli4_rb_setup(phba); 4520 lpfc_sli4_rb_setup(phba);
4485 4521
4522 /* Reset HBA FCF states after HBA reset */
4523 phba->fcf.fcf_flag = 0;
4524 phba->fcf.current_rec.flag = 0;
4525
4486 /* Start the ELS watchdog timer */ 4526 /* Start the ELS watchdog timer */
4487 mod_timer(&vport->els_tmofunc, 4527 mod_timer(&vport->els_tmofunc,
4488 jiffies + HZ * (phba->fc_ratov * 2)); 4528 jiffies + HZ * (phba->fc_ratov * 2));
@@ -7436,6 +7476,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7436{ 7476{
7437 wait_queue_head_t *pdone_q; 7477 wait_queue_head_t *pdone_q;
7438 unsigned long iflags; 7478 unsigned long iflags;
7479 struct lpfc_scsi_buf *lpfc_cmd;
7439 7480
7440 spin_lock_irqsave(&phba->hbalock, iflags); 7481 spin_lock_irqsave(&phba->hbalock, iflags);
7441 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 7482 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
@@ -7443,6 +7484,14 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7443 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 7484 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
7444 &rspiocbq->iocb, sizeof(IOCB_t)); 7485 &rspiocbq->iocb, sizeof(IOCB_t));
7445 7486
7487 /* Set the exchange busy flag for task management commands */
7488 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
7489 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
7490 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
7491 cur_iocbq);
7492 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
7493 }
7494
7446 pdone_q = cmdiocbq->context_un.wait_queue; 7495 pdone_q = cmdiocbq->context_un.wait_queue;
7447 if (pdone_q) 7496 if (pdone_q)
7448 wake_up(pdone_q); 7497 wake_up(pdone_q);
@@ -9061,6 +9110,12 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
9061 /* Fake the irspiocb and copy necessary response information */ 9110 /* Fake the irspiocb and copy necessary response information */
9062 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 9111 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
9063 9112
9113 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
9114 spin_lock_irqsave(&phba->hbalock, iflags);
9115 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
9116 spin_unlock_irqrestore(&phba->hbalock, iflags);
9117 }
9118
9064 /* Pass the cmd_iocb and the rsp state to the upper layer */ 9119 /* Pass the cmd_iocb and the rsp state to the upper layer */
9065 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 9120 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
9066} 9121}
@@ -11941,15 +11996,19 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11941} 11996}
11942 11997
11943/** 11998/**
11944 * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record. 11999 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
11945 * @phba: pointer to lpfc hba data structure. 12000 * @phba: pointer to lpfc hba data structure.
11946 * @fcf_index: FCF table entry offset. 12001 * @fcf_index: FCF table entry offset.
11947 * 12002 *
11948 * This routine is invoked to read up to @fcf_num of FCF record from the 12003 * This routine is invoked to scan the entire FCF table by reading FCF
11949 * device starting with the given @fcf_index. 12004 * record and processing it one at a time starting from the @fcf_index
12005 * for initial FCF discovery or fast FCF failover rediscovery.
12006 *
12007 * Return 0 if the mailbox command is submitted sucessfully, none 0
12008 * otherwise.
11950 **/ 12009 **/
11951int 12010int
11952lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) 12011lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
11953{ 12012{
11954 int rc = 0, error; 12013 int rc = 0, error;
11955 LPFC_MBOXQ_t *mboxq; 12014 LPFC_MBOXQ_t *mboxq;
@@ -11961,17 +12020,17 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11961 "2000 Failed to allocate mbox for " 12020 "2000 Failed to allocate mbox for "
11962 "READ_FCF cmd\n"); 12021 "READ_FCF cmd\n");
11963 error = -ENOMEM; 12022 error = -ENOMEM;
11964 goto fail_fcfscan; 12023 goto fail_fcf_scan;
11965 } 12024 }
11966 /* Construct the read FCF record mailbox command */ 12025 /* Construct the read FCF record mailbox command */
11967 rc = lpfc_sli4_mbx_read_fcf_record(phba, mboxq, fcf_index); 12026 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
11968 if (rc) { 12027 if (rc) {
11969 error = -EINVAL; 12028 error = -EINVAL;
11970 goto fail_fcfscan; 12029 goto fail_fcf_scan;
11971 } 12030 }
11972 /* Issue the mailbox command asynchronously */ 12031 /* Issue the mailbox command asynchronously */
11973 mboxq->vport = phba->pport; 12032 mboxq->vport = phba->pport;
11974 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; 12033 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
11975 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12034 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11976 if (rc == MBX_NOT_FINISHED) 12035 if (rc == MBX_NOT_FINISHED)
11977 error = -EIO; 12036 error = -EIO;
@@ -11979,9 +12038,13 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11979 spin_lock_irq(&phba->hbalock); 12038 spin_lock_irq(&phba->hbalock);
11980 phba->hba_flag |= FCF_DISC_INPROGRESS; 12039 phba->hba_flag |= FCF_DISC_INPROGRESS;
11981 spin_unlock_irq(&phba->hbalock); 12040 spin_unlock_irq(&phba->hbalock);
12041 /* Reset FCF round robin index bmask for new scan */
12042 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
12043 memset(phba->fcf.fcf_rr_bmask, 0,
12044 sizeof(*phba->fcf.fcf_rr_bmask));
11982 error = 0; 12045 error = 0;
11983 } 12046 }
11984fail_fcfscan: 12047fail_fcf_scan:
11985 if (error) { 12048 if (error) {
11986 if (mboxq) 12049 if (mboxq)
11987 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12050 lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -11994,6 +12057,181 @@ fail_fcfscan:
11994} 12057}
11995 12058
11996/** 12059/**
12060 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf.
12061 * @phba: pointer to lpfc hba data structure.
12062 * @fcf_index: FCF table entry offset.
12063 *
12064 * This routine is invoked to read an FCF record indicated by @fcf_index
12065 * and to use it for FLOGI round robin FCF failover.
12066 *
12067 * Return 0 if the mailbox command is submitted sucessfully, none 0
12068 * otherwise.
12069 **/
12070int
12071lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12072{
12073 int rc = 0, error;
12074 LPFC_MBOXQ_t *mboxq;
12075
12076 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12077 if (!mboxq) {
12078 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
12079 "2763 Failed to allocate mbox for "
12080 "READ_FCF cmd\n");
12081 error = -ENOMEM;
12082 goto fail_fcf_read;
12083 }
12084 /* Construct the read FCF record mailbox command */
12085 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12086 if (rc) {
12087 error = -EINVAL;
12088 goto fail_fcf_read;
12089 }
12090 /* Issue the mailbox command asynchronously */
12091 mboxq->vport = phba->pport;
12092 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
12093 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12094 if (rc == MBX_NOT_FINISHED)
12095 error = -EIO;
12096 else
12097 error = 0;
12098
12099fail_fcf_read:
12100 if (error && mboxq)
12101 lpfc_sli4_mbox_cmd_free(phba, mboxq);
12102 return error;
12103}
12104
12105/**
12106 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
12107 * @phba: pointer to lpfc hba data structure.
12108 * @fcf_index: FCF table entry offset.
12109 *
12110 * This routine is invoked to read an FCF record indicated by @fcf_index to
12111 * determine whether it's eligible for FLOGI round robin failover list.
12112 *
12113 * Return 0 if the mailbox command is submitted sucessfully, none 0
12114 * otherwise.
12115 **/
12116int
12117lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12118{
12119 int rc = 0, error;
12120 LPFC_MBOXQ_t *mboxq;
12121
12122 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12123 if (!mboxq) {
12124 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
12125 "2758 Failed to allocate mbox for "
12126 "READ_FCF cmd\n");
12127 error = -ENOMEM;
12128 goto fail_fcf_read;
12129 }
12130 /* Construct the read FCF record mailbox command */
12131 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12132 if (rc) {
12133 error = -EINVAL;
12134 goto fail_fcf_read;
12135 }
12136 /* Issue the mailbox command asynchronously */
12137 mboxq->vport = phba->pport;
12138 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
12139 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12140 if (rc == MBX_NOT_FINISHED)
12141 error = -EIO;
12142 else
12143 error = 0;
12144
12145fail_fcf_read:
12146 if (error && mboxq)
12147 lpfc_sli4_mbox_cmd_free(phba, mboxq);
12148 return error;
12149}
12150
12151/**
12152 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
12153 * @phba: pointer to lpfc hba data structure.
12154 *
12155 * This routine is to get the next eligible FCF record index in a round
12156 * robin fashion. If the next eligible FCF record index equals to the
12157 * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
12158 * shall be returned, otherwise, the next eligible FCF record's index
12159 * shall be returned.
12160 **/
12161uint16_t
12162lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
12163{
12164 uint16_t next_fcf_index;
12165
12166 /* Search from the currently registered FCF index */
12167 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12168 LPFC_SLI4_FCF_TBL_INDX_MAX,
12169 phba->fcf.current_rec.fcf_indx);
12170 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
12171 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
12172 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12173 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
12174 /* Round robin failover stop condition */
12175 if (next_fcf_index == phba->fcf.fcf_rr_init_indx)
12176 return LPFC_FCOE_FCF_NEXT_NONE;
12177
12178 return next_fcf_index;
12179}
12180
12181/**
12182 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
12183 * @phba: pointer to lpfc hba data structure.
12184 *
12185 * This routine sets the FCF record index in to the eligible bmask for
12186 * round robin failover search. It checks to make sure that the index
12187 * does not go beyond the range of the driver allocated bmask dimension
12188 * before setting the bit.
12189 *
12190 * Returns 0 if the index bit successfully set, otherwise, it returns
12191 * -EINVAL.
12192 **/
12193int
12194lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
12195{
12196 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12197 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12198 "2610 HBA FCF index reached driver's "
12199 "book keeping dimension: fcf_index:%d, "
12200 "driver_bmask_max:%d\n",
12201 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
12202 return -EINVAL;
12203 }
12204 /* Set the eligible FCF record index bmask */
12205 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12206
12207 return 0;
12208}
12209
12210/**
12211 * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index
12212 * @phba: pointer to lpfc hba data structure.
12213 *
12214 * This routine clears the FCF record index from the eligible bmask for
12215 * round robin failover search. It checks to make sure that the index
12216 * does not go beyond the range of the driver allocated bmask dimension
12217 * before clearing the bit.
12218 **/
12219void
12220lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
12221{
12222 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12223 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12224 "2762 HBA FCF index goes beyond driver's "
12225 "book keeping dimension: fcf_index:%d, "
12226 "driver_bmask_max:%d\n",
12227 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
12228 return;
12229 }
12230 /* Clear the eligible FCF record index bmask */
12231 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12232}
12233
12234/**
11997 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 12235 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
11998 * @phba: pointer to lpfc hba data structure. 12236 * @phba: pointer to lpfc hba data structure.
11999 * 12237 *
@@ -12014,21 +12252,40 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
12014 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 12252 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
12015 &redisc_fcf->header.cfg_shdr.response); 12253 &redisc_fcf->header.cfg_shdr.response);
12016 if (shdr_status || shdr_add_status) { 12254 if (shdr_status || shdr_add_status) {
12017 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12255 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12018 "2746 Requesting for FCF rediscovery failed " 12256 "2746 Requesting for FCF rediscovery failed "
12019 "status x%x add_status x%x\n", 12257 "status x%x add_status x%x\n",
12020 shdr_status, shdr_add_status); 12258 shdr_status, shdr_add_status);
12021 /* 12259 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
12022 * Request failed, last resort to re-try current 12260 spin_lock_irq(&phba->hbalock);
12023 * registered FCF entry 12261 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
12024 */ 12262 spin_unlock_irq(&phba->hbalock);
12025 lpfc_retry_pport_discovery(phba); 12263 /*
12026 } else 12264 * CVL event triggered FCF rediscover request failed,
12265 * last resort to re-try current registered FCF entry.
12266 */
12267 lpfc_retry_pport_discovery(phba);
12268 } else {
12269 spin_lock_irq(&phba->hbalock);
12270 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
12271 spin_unlock_irq(&phba->hbalock);
12272 /*
12273 * DEAD FCF event triggered FCF rediscover request
12274 * failed, last resort to fail over as a link down
12275 * to FCF registration.
12276 */
12277 lpfc_sli4_fcf_dead_failthrough(phba);
12278 }
12279 } else {
12280 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12281 "2775 Start FCF rediscovery quiescent period "
12282 "wait timer before scaning FCF table\n");
12027 /* 12283 /*
12028 * Start FCF rediscovery wait timer for pending FCF 12284 * Start FCF rediscovery wait timer for pending FCF
12029 * before rescan FCF record table. 12285 * before rescan FCF record table.
12030 */ 12286 */
12031 lpfc_fcf_redisc_wait_start_timer(phba); 12287 lpfc_fcf_redisc_wait_start_timer(phba);
12288 }
12032 12289
12033 mempool_free(mbox, phba->mbox_mem_pool); 12290 mempool_free(mbox, phba->mbox_mem_pool);
12034} 12291}
@@ -12047,6 +12304,9 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
12047 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 12304 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
12048 int rc, length; 12305 int rc, length;
12049 12306
12307 /* Cancel retry delay timers to all vports before FCF rediscover */
12308 lpfc_cancel_all_vport_retry_delay_timer(phba);
12309
12050 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12310 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12051 if (!mbox) { 12311 if (!mbox) {
12052 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12312 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12078,6 +12338,31 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
12078} 12338}
12079 12339
12080/** 12340/**
12341 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
12342 * @phba: pointer to lpfc hba data structure.
12343 *
12344 * This function is the failover routine as a last resort to the FCF DEAD
12345 * event when driver failed to perform fast FCF failover.
12346 **/
12347void
12348lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
12349{
12350 uint32_t link_state;
12351
12352 /*
12353 * Last resort as FCF DEAD event failover will treat this as
12354 * a link down, but save the link state because we don't want
12355 * it to be changed to Link Down unless it is already down.
12356 */
12357 link_state = phba->link_state;
12358 lpfc_linkdown(phba);
12359 phba->link_state = link_state;
12360
12361 /* Unregister FCF if no devices connected to it */
12362 lpfc_unregister_unused_fcf(phba);
12363}
12364
12365/**
12081 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 12366 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
12082 * @phba: pointer to lpfc hba data structure. 12367 * @phba: pointer to lpfc hba data structure.
12083 * 12368 *
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index dfcf5437d1f5..b4a639c47616 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -62,6 +62,7 @@ struct lpfc_iocbq {
62#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ 62#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
63#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */ 63#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */
64#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */ 64#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
65#define DSS_SECURITY_OP 0x100 /* security IO */
65 66
66#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ 67#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
67#define LPFC_FIP_ELS_ID_SHIFT 14 68#define LPFC_FIP_ELS_ID_SHIFT 14
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 86308836600f..4a35e7b9bc5b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -153,15 +153,27 @@ struct lpfc_fcf {
153#define FCF_REGISTERED 0x02 /* FCF registered with FW */ 153#define FCF_REGISTERED 0x02 /* FCF registered with FW */
154#define FCF_SCAN_DONE 0x04 /* FCF table scan done */ 154#define FCF_SCAN_DONE 0x04 /* FCF table scan done */
155#define FCF_IN_USE 0x08 /* Atleast one discovery completed */ 155#define FCF_IN_USE 0x08 /* Atleast one discovery completed */
156#define FCF_REDISC_PEND 0x10 /* FCF rediscovery pending */ 156#define FCF_INIT_DISC 0x10 /* Initial FCF discovery */
157#define FCF_REDISC_EVT 0x20 /* FCF rediscovery event to worker thread */ 157#define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */
158#define FCF_REDISC_FOV 0x40 /* Post FCF rediscovery fast failover */ 158#define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */
159#define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC)
160#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
161#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
162#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
159 uint32_t addr_mode; 163 uint32_t addr_mode;
164 uint16_t fcf_rr_init_indx;
160 struct lpfc_fcf_rec current_rec; 165 struct lpfc_fcf_rec current_rec;
161 struct lpfc_fcf_rec failover_rec; 166 struct lpfc_fcf_rec failover_rec;
162 struct timer_list redisc_wait; 167 struct timer_list redisc_wait;
168 unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
163}; 169};
164 170
171/*
172 * Maximum FCF table index, it is for driver internal book keeping, it
173 * just needs to be no less than the supported HBA's FCF table size.
174 */
175#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
176
165#define LPFC_REGION23_SIGNATURE "RG23" 177#define LPFC_REGION23_SIGNATURE "RG23"
166#define LPFC_REGION23_VERSION 1 178#define LPFC_REGION23_VERSION 1
167#define LPFC_REGION23_LAST_REC 0xff 179#define LPFC_REGION23_LAST_REC 0xff
@@ -431,11 +443,18 @@ enum lpfc_sge_type {
431 SCSI_BUFF_TYPE 443 SCSI_BUFF_TYPE
432}; 444};
433 445
446enum lpfc_sgl_state {
447 SGL_FREED,
448 SGL_ALLOCATED,
449 SGL_XRI_ABORTED
450};
451
434struct lpfc_sglq { 452struct lpfc_sglq {
435 /* lpfc_sglqs are used in double linked lists */ 453 /* lpfc_sglqs are used in double linked lists */
436 struct list_head list; 454 struct list_head list;
437 struct list_head clist; 455 struct list_head clist;
438 enum lpfc_sge_type buff_type; /* is this a scsi sgl */ 456 enum lpfc_sge_type buff_type; /* is this a scsi sgl */
457 enum lpfc_sgl_state state;
439 uint16_t iotag; /* pre-assigned IO tag */ 458 uint16_t iotag; /* pre-assigned IO tag */
440 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 459 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
441 struct sli4_sge *sgl; /* pre-assigned SGL */ 460 struct sli4_sge *sgl; /* pre-assigned SGL */
@@ -463,8 +482,8 @@ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
463void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); 482void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
464void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, 483void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
465 struct lpfc_mbx_sge *); 484 struct lpfc_mbx_sge *);
466int lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *, struct lpfcMboxq *, 485int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
467 uint16_t); 486 uint16_t);
468 487
469void lpfc_sli4_hba_reset(struct lpfc_hba *); 488void lpfc_sli4_hba_reset(struct lpfc_hba *);
470struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, 489struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
@@ -523,8 +542,13 @@ int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
523uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); 542uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
524uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); 543uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
525void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); 544void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
526int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t); 545int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
527void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *); 546int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
547int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t);
548void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
549void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
550void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
551int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
528int lpfc_sli4_post_status_check(struct lpfc_hba *); 552int lpfc_sli4_post_status_check(struct lpfc_hba *);
529uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); 553uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
530 554
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ac276aa46fba..013deec5dae8 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.9" 21#define LPFC_DRIVER_VERSION "8.3.10"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index dc86e873102a..869f76cbc58a 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -123,7 +123,12 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
123 } 123 }
124 mb = &pmb->u.mb; 124 mb = &pmb->u.mb;
125 125
126 lpfc_read_sparam(phba, pmb, vport->vpi); 126 rc = lpfc_read_sparam(phba, pmb, vport->vpi);
127 if (rc) {
128 mempool_free(pmb, phba->mbox_mem_pool);
129 return -ENOMEM;
130 }
131
127 /* 132 /*
128 * Grab buffer pointer and clear context1 so we can use 133 * Grab buffer pointer and clear context1 so we can use
129 * lpfc_sli_issue_box_wait 134 * lpfc_sli_issue_box_wait