aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2010-04-22 20:08:44 -0400
committerJiri Kosina <jkosina@suse.cz>2010-04-22 20:08:44 -0400
commit6c9468e9eb1252eaefd94ce7f06e1be9b0b641b1 (patch)
tree797676a336b050bfa1ef879377c07e541b9075d6 /drivers/scsi/lpfc
parent4cb3ca7cd7e2cae8d1daf5345ec99a1e8502cf3f (diff)
parentc81eddb0e3728661d1585fbc564449c94165cc36 (diff)
Merge branch 'master' into for-next
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c337
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c143
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c528
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c278
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c50
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c414
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h38
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c8
20 files changed, 1571 insertions, 279 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 84b696463a58..565e16dd74fc 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -37,6 +37,9 @@ struct lpfc_sli2_slim;
37 the NameServer before giving up. */ 37 the NameServer before giving up. */
38#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ 38#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
39#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ 39#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
40#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
41 cmnd for menlo needs nearly twice as for firmware
42 downloads using bsg */
40#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ 43#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
41#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ 44#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
42#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ 45#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
@@ -509,7 +512,6 @@ struct lpfc_hba {
509 int (*lpfc_hba_down_link) 512 int (*lpfc_hba_down_link)
510 (struct lpfc_hba *); 513 (struct lpfc_hba *);
511 514
512
513 /* SLI4 specific HBA data structure */ 515 /* SLI4 specific HBA data structure */
514 struct lpfc_sli4_hba sli4_hba; 516 struct lpfc_sli4_hba sli4_hba;
515 517
@@ -623,6 +625,9 @@ struct lpfc_hba {
623 uint32_t cfg_log_verbose; 625 uint32_t cfg_log_verbose;
624 uint32_t cfg_aer_support; 626 uint32_t cfg_aer_support;
625 uint32_t cfg_suppress_link_up; 627 uint32_t cfg_suppress_link_up;
628#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
629#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
630#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
626 631
627 lpfc_vpd_t vpd; /* vital product data */ 632 lpfc_vpd_t vpd; /* vital product data */
628 633
@@ -804,6 +809,9 @@ struct lpfc_hba {
804 struct list_head ct_ev_waiters; 809 struct list_head ct_ev_waiters;
805 struct unsol_rcv_ct_ctx ct_ctx[64]; 810 struct unsol_rcv_ct_ctx ct_ctx[64];
806 uint32_t ctx_idx; 811 uint32_t ctx_idx;
812
813 uint8_t menlo_flag; /* menlo generic flags */
814#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
807}; 815};
808 816
809static inline struct Scsi_Host * 817static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c992e8328f9e..1849e33e68f9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -24,6 +24,7 @@
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/aer.h> 26#include <linux/aer.h>
27#include <linux/gfp.h>
27 28
28#include <scsi/scsi.h> 29#include <scsi/scsi.h>
29#include <scsi/scsi_device.h> 30#include <scsi/scsi_device.h>
@@ -1939,7 +1940,9 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO,
1939# 0x2 = never bring up link 1940# 0x2 = never bring up link
1940# Default value is 0. 1941# Default value is 0.
1941*/ 1942*/
1942LPFC_ATTR_R(suppress_link_up, 0, 0, 2, "Suppress Link Up at initialization"); 1943LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
1944 LPFC_DELAY_INIT_LINK_INDEFINITELY,
1945 "Suppress Link Up at initialization");
1943 1946
1944/* 1947/*
1945# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 1948# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
@@ -1966,8 +1969,7 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
1966{ 1969{
1967 struct Scsi_Host *shost = class_to_shost(dev); 1970 struct Scsi_Host *shost = class_to_shost(dev);
1968 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1971 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1969 int val = 0; 1972
1970 val = vport->cfg_devloss_tmo;
1971 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); 1973 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
1972} 1974}
1973 1975
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f3f1bf1a0a71..d62b3e467926 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -21,6 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/mempool.h> 22#include <linux/mempool.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h>
24#include <linux/delay.h> 25#include <linux/delay.h>
25 26
26#include <scsi/scsi.h> 27#include <scsi/scsi.h>
@@ -83,15 +84,28 @@ struct lpfc_bsg_mbox {
83 struct fc_bsg_job *set_job; 84 struct fc_bsg_job *set_job;
84}; 85};
85 86
87#define MENLO_DID 0x0000FC0E
88
89struct lpfc_bsg_menlo {
90 struct lpfc_iocbq *cmdiocbq;
91 struct lpfc_iocbq *rspiocbq;
92 struct lpfc_dmabuf *bmp;
93
94 /* job waiting for this iocb to finish */
95 struct fc_bsg_job *set_job;
96};
97
86#define TYPE_EVT 1 98#define TYPE_EVT 1
87#define TYPE_IOCB 2 99#define TYPE_IOCB 2
88#define TYPE_MBOX 3 100#define TYPE_MBOX 3
101#define TYPE_MENLO 4
89struct bsg_job_data { 102struct bsg_job_data {
90 uint32_t type; 103 uint32_t type;
91 union { 104 union {
92 struct lpfc_bsg_event *evt; 105 struct lpfc_bsg_event *evt;
93 struct lpfc_bsg_iocb iocb; 106 struct lpfc_bsg_iocb iocb;
94 struct lpfc_bsg_mbox mbox; 107 struct lpfc_bsg_mbox mbox;
108 struct lpfc_bsg_menlo menlo;
95 } context_un; 109 } context_un;
96}; 110};
97 111
@@ -419,7 +433,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
419 dd_data = cmdiocbq->context1; 433 dd_data = cmdiocbq->context1;
420 /* normal completion and timeout crossed paths, already done */ 434 /* normal completion and timeout crossed paths, already done */
421 if (!dd_data) { 435 if (!dd_data) {
422 spin_unlock_irqrestore(&phba->hbalock, flags); 436 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
423 return; 437 return;
424 } 438 }
425 439
@@ -1182,7 +1196,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1182 dd_data = cmdiocbq->context1; 1196 dd_data = cmdiocbq->context1;
1183 /* normal completion and timeout crossed paths, already done */ 1197 /* normal completion and timeout crossed paths, already done */
1184 if (!dd_data) { 1198 if (!dd_data) {
1185 spin_unlock_irqrestore(&phba->hbalock, flags); 1199 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1186 return; 1200 return;
1187 } 1201 }
1188 1202
@@ -2456,6 +2470,18 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2456 case MBX_PORT_IOV_CONTROL: 2470 case MBX_PORT_IOV_CONTROL:
2457 break; 2471 break;
2458 case MBX_SET_VARIABLE: 2472 case MBX_SET_VARIABLE:
2473 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2474 "1226 mbox: set_variable 0x%x, 0x%x\n",
2475 mb->un.varWords[0],
2476 mb->un.varWords[1]);
2477 if ((mb->un.varWords[0] == SETVAR_MLOMNT)
2478 && (mb->un.varWords[1] == 1)) {
2479 phba->wait_4_mlo_maint_flg = 1;
2480 } else if (mb->un.varWords[0] == SETVAR_MLORST) {
2481 phba->link_flag &= ~LS_LOOPBACK_MODE;
2482 phba->fc_topology = TOPOLOGY_PT_PT;
2483 }
2484 break;
2459 case MBX_RUN_BIU_DIAG64: 2485 case MBX_RUN_BIU_DIAG64:
2460 case MBX_READ_EVENT_LOG: 2486 case MBX_READ_EVENT_LOG:
2461 case MBX_READ_SPARM64: 2487 case MBX_READ_SPARM64:
@@ -2638,6 +2664,297 @@ job_error:
2638} 2664}
2639 2665
2640/** 2666/**
2667 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
2668 * @phba: Pointer to HBA context object.
2669 * @cmdiocbq: Pointer to command iocb.
2670 * @rspiocbq: Pointer to response iocb.
2671 *
2672 * This function is the completion handler for iocbs issued using
2673 * lpfc_menlo_cmd function. This function is called by the
2674 * ring event handler function without any lock held. This function
2675 * can be called from both worker thread context and interrupt
2676 * context. This function also can be called from another thread which
2677 * cleans up the SLI layer objects.
2678 * This function copies the contents of the response iocb to the
2679 * response iocb memory object provided by the caller of
2680 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
2681 * sleeps for the iocb completion.
2682 **/
2683static void
2684lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
2685 struct lpfc_iocbq *cmdiocbq,
2686 struct lpfc_iocbq *rspiocbq)
2687{
2688 struct bsg_job_data *dd_data;
2689 struct fc_bsg_job *job;
2690 IOCB_t *rsp;
2691 struct lpfc_dmabuf *bmp;
2692 struct lpfc_bsg_menlo *menlo;
2693 unsigned long flags;
2694 struct menlo_response *menlo_resp;
2695 int rc = 0;
2696
2697 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2698 dd_data = cmdiocbq->context1;
2699 if (!dd_data) {
2700 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2701 return;
2702 }
2703
2704 menlo = &dd_data->context_un.menlo;
2705 job = menlo->set_job;
2706 job->dd_data = NULL; /* so timeout handler does not reply */
2707
2708 spin_lock_irqsave(&phba->hbalock, flags);
2709 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
2710 if (cmdiocbq->context2 && rspiocbq)
2711 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
2712 &rspiocbq->iocb, sizeof(IOCB_t));
2713 spin_unlock_irqrestore(&phba->hbalock, flags);
2714
2715 bmp = menlo->bmp;
2716 rspiocbq = menlo->rspiocbq;
2717 rsp = &rspiocbq->iocb;
2718
2719 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
2720 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2721 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
2722 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2723
2724 /* always return the xri, this would be used in the case
2725 * of a menlo download to allow the data to be sent as a continuation
2726 * of the exchange.
2727 */
2728 menlo_resp = (struct menlo_response *)
2729 job->reply->reply_data.vendor_reply.vendor_rsp;
2730 menlo_resp->xri = rsp->ulpContext;
2731 if (rsp->ulpStatus) {
2732 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
2733 switch (rsp->un.ulpWord[4] & 0xff) {
2734 case IOERR_SEQUENCE_TIMEOUT:
2735 rc = -ETIMEDOUT;
2736 break;
2737 case IOERR_INVALID_RPI:
2738 rc = -EFAULT;
2739 break;
2740 default:
2741 rc = -EACCES;
2742 break;
2743 }
2744 } else
2745 rc = -EACCES;
2746 } else
2747 job->reply->reply_payload_rcv_len =
2748 rsp->un.genreq64.bdl.bdeSize;
2749
2750 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
2751 lpfc_sli_release_iocbq(phba, rspiocbq);
2752 lpfc_sli_release_iocbq(phba, cmdiocbq);
2753 kfree(bmp);
2754 kfree(dd_data);
2755 /* make error code available to userspace */
2756 job->reply->result = rc;
2757 /* complete the job back to userspace */
2758 job->job_done(job);
2759 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2760 return;
2761}
2762
2763/**
2764 * lpfc_menlo_cmd - send an ioctl for menlo hardware
2765 * @job: fc_bsg_job to handle
2766 *
2767 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
2768 * all the command completions will return the xri for the command.
2769 * For menlo data requests a gen request 64 CX is used to continue the exchange
2770 * supplied in the menlo request header xri field.
2771 **/
2772static int
2773lpfc_menlo_cmd(struct fc_bsg_job *job)
2774{
2775 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2776 struct lpfc_hba *phba = vport->phba;
2777 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2778 IOCB_t *cmd, *rsp;
2779 int rc = 0;
2780 struct menlo_command *menlo_cmd;
2781 struct menlo_response *menlo_resp;
2782 struct lpfc_dmabuf *bmp = NULL;
2783 int request_nseg;
2784 int reply_nseg;
2785 struct scatterlist *sgel = NULL;
2786 int numbde;
2787 dma_addr_t busaddr;
2788 struct bsg_job_data *dd_data;
2789 struct ulp_bde64 *bpl = NULL;
2790
2791 /* in case no data is returned return just the return code */
2792 job->reply->reply_payload_rcv_len = 0;
2793
2794 if (job->request_len <
2795 sizeof(struct fc_bsg_request) +
2796 sizeof(struct menlo_command)) {
2797 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2798 "2784 Received MENLO_CMD request below "
2799 "minimum size\n");
2800 rc = -ERANGE;
2801 goto no_dd_data;
2802 }
2803
2804 if (job->reply_len <
2805 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
2806 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2807 "2785 Received MENLO_CMD reply below "
2808 "minimum size\n");
2809 rc = -ERANGE;
2810 goto no_dd_data;
2811 }
2812
2813 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
2814 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2815 "2786 Adapter does not support menlo "
2816 "commands\n");
2817 rc = -EPERM;
2818 goto no_dd_data;
2819 }
2820
2821 menlo_cmd = (struct menlo_command *)
2822 job->request->rqst_data.h_vendor.vendor_cmd;
2823
2824 menlo_resp = (struct menlo_response *)
2825 job->reply->reply_data.vendor_reply.vendor_rsp;
2826
2827 /* allocate our bsg tracking structure */
2828 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2829 if (!dd_data) {
2830 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2831 "2787 Failed allocation of dd_data\n");
2832 rc = -ENOMEM;
2833 goto no_dd_data;
2834 }
2835
2836 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2837 if (!bmp) {
2838 rc = -ENOMEM;
2839 goto free_dd;
2840 }
2841
2842 cmdiocbq = lpfc_sli_get_iocbq(phba);
2843 if (!cmdiocbq) {
2844 rc = -ENOMEM;
2845 goto free_bmp;
2846 }
2847
2848 rspiocbq = lpfc_sli_get_iocbq(phba);
2849 if (!rspiocbq) {
2850 rc = -ENOMEM;
2851 goto free_cmdiocbq;
2852 }
2853
2854 rsp = &rspiocbq->iocb;
2855
2856 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
2857 if (!bmp->virt) {
2858 rc = -ENOMEM;
2859 goto free_rspiocbq;
2860 }
2861
2862 INIT_LIST_HEAD(&bmp->list);
2863 bpl = (struct ulp_bde64 *) bmp->virt;
2864 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
2865 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2866 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
2867 busaddr = sg_dma_address(sgel);
2868 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2869 bpl->tus.f.bdeSize = sg_dma_len(sgel);
2870 bpl->tus.w = cpu_to_le32(bpl->tus.w);
2871 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
2872 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
2873 bpl++;
2874 }
2875
2876 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
2877 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2878 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
2879 busaddr = sg_dma_address(sgel);
2880 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2881 bpl->tus.f.bdeSize = sg_dma_len(sgel);
2882 bpl->tus.w = cpu_to_le32(bpl->tus.w);
2883 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
2884 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
2885 bpl++;
2886 }
2887
2888 cmd = &cmdiocbq->iocb;
2889 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
2890 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
2891 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
2892 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2893 cmd->un.genreq64.bdl.bdeSize =
2894 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
2895 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
2896 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
2897 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
2898 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
2899 cmd->ulpBdeCount = 1;
2900 cmd->ulpClass = CLASS3;
2901 cmd->ulpOwner = OWN_CHIP;
2902 cmd->ulpLe = 1; /* Limited Edition */
2903 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2904 cmdiocbq->vport = phba->pport;
2905 /* We want the firmware to timeout before we do */
2906 cmd->ulpTimeout = MENLO_TIMEOUT - 5;
2907 cmdiocbq->context3 = bmp;
2908 cmdiocbq->context2 = rspiocbq;
2909 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
2910 cmdiocbq->context1 = dd_data;
2911 cmdiocbq->context2 = rspiocbq;
2912 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
2913 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
2914 cmd->ulpPU = MENLO_PU; /* 3 */
2915 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
2916 cmd->ulpContext = MENLO_CONTEXT; /* 0 */
2917 } else {
2918 cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
2919 cmd->ulpPU = 1;
2920 cmd->un.ulpWord[4] = 0;
2921 cmd->ulpContext = menlo_cmd->xri;
2922 }
2923
2924 dd_data->type = TYPE_MENLO;
2925 dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
2926 dd_data->context_un.menlo.rspiocbq = rspiocbq;
2927 dd_data->context_un.menlo.set_job = job;
2928 dd_data->context_un.menlo.bmp = bmp;
2929
2930 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2931 MENLO_TIMEOUT - 5);
2932 if (rc == IOCB_SUCCESS)
2933 return 0; /* done for now */
2934
2935 /* iocb failed so cleanup */
2936 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
2937 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2938 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
2939 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2940
2941 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
2942
2943free_rspiocbq:
2944 lpfc_sli_release_iocbq(phba, rspiocbq);
2945free_cmdiocbq:
2946 lpfc_sli_release_iocbq(phba, cmdiocbq);
2947free_bmp:
2948 kfree(bmp);
2949free_dd:
2950 kfree(dd_data);
2951no_dd_data:
2952 /* make error code available to userspace */
2953 job->reply->result = rc;
2954 job->dd_data = NULL;
2955 return rc;
2956}
2957/**
2641 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 2958 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
2642 * @job: fc_bsg_job to handle 2959 * @job: fc_bsg_job to handle
2643 **/ 2960 **/
@@ -2669,6 +2986,10 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
2669 case LPFC_BSG_VENDOR_MBOX: 2986 case LPFC_BSG_VENDOR_MBOX:
2670 rc = lpfc_bsg_mbox_cmd(job); 2987 rc = lpfc_bsg_mbox_cmd(job);
2671 break; 2988 break;
2989 case LPFC_BSG_VENDOR_MENLO_CMD:
2990 case LPFC_BSG_VENDOR_MENLO_DATA:
2991 rc = lpfc_menlo_cmd(job);
2992 break;
2672 default: 2993 default:
2673 rc = -EINVAL; 2994 rc = -EINVAL;
2674 job->reply->reply_payload_rcv_len = 0; 2995 job->reply->reply_payload_rcv_len = 0;
@@ -2728,6 +3049,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
2728 struct lpfc_bsg_event *evt; 3049 struct lpfc_bsg_event *evt;
2729 struct lpfc_bsg_iocb *iocb; 3050 struct lpfc_bsg_iocb *iocb;
2730 struct lpfc_bsg_mbox *mbox; 3051 struct lpfc_bsg_mbox *mbox;
3052 struct lpfc_bsg_menlo *menlo;
2731 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3053 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
2732 struct bsg_job_data *dd_data; 3054 struct bsg_job_data *dd_data;
2733 unsigned long flags; 3055 unsigned long flags;
@@ -2775,6 +3097,17 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
2775 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3097 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2776 job->job_done(job); 3098 job->job_done(job);
2777 break; 3099 break;
3100 case TYPE_MENLO:
3101 menlo = &dd_data->context_un.menlo;
3102 cmdiocb = menlo->cmdiocbq;
3103 /* hint to completion handler that the job timed out */
3104 job->reply->result = -EAGAIN;
3105 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3106 /* this will call our completion handler */
3107 spin_lock_irq(&phba->hbalock);
3108 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
3109 spin_unlock_irq(&phba->hbalock);
3110 break;
2778 default: 3111 default:
2779 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3112 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2780 break; 3113 break;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 6c8f87e39b98..5bc630819b9e 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -31,6 +31,8 @@
31#define LPFC_BSG_VENDOR_DIAG_TEST 5 31#define LPFC_BSG_VENDOR_DIAG_TEST 5
32#define LPFC_BSG_VENDOR_GET_MGMT_REV 6 32#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
33#define LPFC_BSG_VENDOR_MBOX 7 33#define LPFC_BSG_VENDOR_MBOX 7
34#define LPFC_BSG_VENDOR_MENLO_CMD 8
35#define LPFC_BSG_VENDOR_MENLO_DATA 9
34 36
35struct set_ct_event { 37struct set_ct_event {
36 uint32_t command; 38 uint32_t command;
@@ -96,3 +98,13 @@ struct dfc_mbox_req {
96 uint8_t mbOffset; 98 uint8_t mbOffset;
97}; 99};
98 100
101/* Used for menlo command or menlo data. The xri is only used for menlo data */
102struct menlo_command {
103 uint32_t cmd;
104 uint32_t xri;
105};
106
107struct menlo_response {
108 uint32_t xri; /* return the xri of the iocb exchange */
109};
110
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 6f0fb51eb461..5087c4211b43 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -63,6 +63,7 @@ void lpfc_linkdown_port(struct lpfc_vport *);
63void lpfc_port_link_failure(struct lpfc_vport *); 63void lpfc_port_link_failure(struct lpfc_vport *);
64void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 64void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
65void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 65void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
66void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
66void lpfc_retry_pport_discovery(struct lpfc_hba *); 67void lpfc_retry_pport_discovery(struct lpfc_hba *);
67 68
68void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 69void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -221,6 +222,10 @@ void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
221void lpfc_unregister_unused_fcf(struct lpfc_hba *); 222void lpfc_unregister_unused_fcf(struct lpfc_hba *);
222int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *); 223int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
223void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *); 224void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
225void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
226uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
227int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
228void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
224 229
225int lpfc_mem_alloc(struct lpfc_hba *, int align); 230int lpfc_mem_alloc(struct lpfc_hba *, int align);
226void lpfc_mem_free(struct lpfc_hba *); 231void lpfc_mem_free(struct lpfc_hba *);
@@ -385,7 +390,7 @@ void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
385int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 390int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
386void lpfc_start_fdiscs(struct lpfc_hba *phba); 391void lpfc_start_fdiscs(struct lpfc_hba *phba);
387struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t); 392struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
388 393struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
389#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 394#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
390#define HBA_EVENT_RSCN 5 395#define HBA_EVENT_RSCN 5
391#define HBA_EVENT_LINK_UP 2 396#define HBA_EVENT_LINK_UP 2
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index c7e921973f66..463b74902ac4 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -25,6 +25,7 @@
25#include <linux/blkdev.h> 25#include <linux/blkdev.h>
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/slab.h>
28#include <linux/utsname.h> 29#include <linux/utsname.h>
29 30
30#include <scsi/scsi.h> 31#include <scsi/scsi.h>
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 391584183d81..a80d938fafc9 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -24,6 +24,7 @@
24#include <linux/idr.h> 24#include <linux/idr.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/kthread.h> 26#include <linux/kthread.h>
27#include <linux/slab.h>
27#include <linux/pci.h> 28#include <linux/pci.h>
28#include <linux/spinlock.h> 29#include <linux/spinlock.h>
29#include <linux/ctype.h> 30#include <linux/ctype.h>
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2a40a6eabf4d..5fbdb22c1899 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -21,6 +21,7 @@
21/* See Fibre Channel protocol T11 FC-LS for details */ 21/* See Fibre Channel protocol T11 FC-LS for details */
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25 26
26#include <scsi/scsi.h> 27#include <scsi/scsi.h>
@@ -771,6 +772,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
771 struct lpfc_nodelist *ndlp = cmdiocb->context1; 772 struct lpfc_nodelist *ndlp = cmdiocb->context1;
772 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 773 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
773 struct serv_parm *sp; 774 struct serv_parm *sp;
775 uint16_t fcf_index;
774 int rc; 776 int rc;
775 777
776 /* Check to see if link went down during discovery */ 778 /* Check to see if link went down during discovery */
@@ -788,6 +790,54 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
788 vport->port_state); 790 vport->port_state);
789 791
790 if (irsp->ulpStatus) { 792 if (irsp->ulpStatus) {
793 /*
794 * In case of FIP mode, perform round robin FCF failover
795 * due to new FCF discovery
796 */
797 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
798 (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
799 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
800 "2611 FLOGI failed on registered "
801 "FCF record fcf_index:%d, trying "
802 "to perform round robin failover\n",
803 phba->fcf.current_rec.fcf_indx);
804 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
805 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
806 /*
807 * Exhausted the eligible FCF record list,
808 * fail through to retry FLOGI on current
809 * FCF record.
810 */
811 lpfc_printf_log(phba, KERN_WARNING,
812 LOG_FIP | LOG_ELS,
813 "2760 FLOGI exhausted FCF "
814 "round robin failover list, "
815 "retry FLOGI on the current "
816 "registered FCF index:%d\n",
817 phba->fcf.current_rec.fcf_indx);
818 spin_lock_irq(&phba->hbalock);
819 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
820 spin_unlock_irq(&phba->hbalock);
821 } else {
822 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
823 fcf_index);
824 if (rc) {
825 lpfc_printf_log(phba, KERN_WARNING,
826 LOG_FIP | LOG_ELS,
827 "2761 FLOGI round "
828 "robin FCF failover "
829 "read FCF failed "
830 "rc:x%x, fcf_index:"
831 "%d\n", rc,
832 phba->fcf.current_rec.fcf_indx);
833 spin_lock_irq(&phba->hbalock);
834 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
835 spin_unlock_irq(&phba->hbalock);
836 } else
837 goto out;
838 }
839 }
840
791 /* Check for retry */ 841 /* Check for retry */
792 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 842 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
793 goto out; 843 goto out;
@@ -806,9 +856,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
806 } 856 }
807 857
808 /* FLOGI failure */ 858 /* FLOGI failure */
809 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 859 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
810 "0100 FLOGI failure Data: x%x x%x " 860 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
811 "x%x\n",
812 irsp->ulpStatus, irsp->un.ulpWord[4], 861 irsp->ulpStatus, irsp->un.ulpWord[4],
813 irsp->ulpTimeout); 862 irsp->ulpTimeout);
814 goto flogifail; 863 goto flogifail;
@@ -842,8 +891,18 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
842 else 891 else
843 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 892 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
844 893
845 if (!rc) 894 if (!rc) {
895 /* Mark the FCF discovery process done */
896 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS,
897 "2769 FLOGI successful on FCF record: "
898 "current_fcf_index:x%x, terminate FCF "
899 "round robin failover process\n",
900 phba->fcf.current_rec.fcf_indx);
901 spin_lock_irq(&phba->hbalock);
902 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
903 spin_unlock_irq(&phba->hbalock);
846 goto out; 904 goto out;
905 }
847 } 906 }
848 907
849flogifail: 908flogifail:
@@ -1409,6 +1468,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1409 goto out; 1468 goto out;
1410 } 1469 }
1411 /* PLOGI failed */ 1470 /* PLOGI failed */
1471 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1472 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1473 ndlp->nlp_DID, irsp->ulpStatus,
1474 irsp->un.ulpWord[4]);
1412 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1475 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1413 if (lpfc_error_lost_link(irsp)) 1476 if (lpfc_error_lost_link(irsp))
1414 rc = NLP_STE_FREED_NODE; 1477 rc = NLP_STE_FREED_NODE;
@@ -1577,6 +1640,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1577 goto out; 1640 goto out;
1578 } 1641 }
1579 /* PRLI failed */ 1642 /* PRLI failed */
1643 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1644 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
1645 ndlp->nlp_DID, irsp->ulpStatus,
1646 irsp->un.ulpWord[4]);
1580 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1647 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1581 if (lpfc_error_lost_link(irsp)) 1648 if (lpfc_error_lost_link(irsp))
1582 goto out; 1649 goto out;
@@ -1860,6 +1927,10 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1860 goto out; 1927 goto out;
1861 } 1928 }
1862 /* ADISC failed */ 1929 /* ADISC failed */
1930 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1931 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
1932 ndlp->nlp_DID, irsp->ulpStatus,
1933 irsp->un.ulpWord[4]);
1863 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1934 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1864 if (!lpfc_error_lost_link(irsp)) 1935 if (!lpfc_error_lost_link(irsp))
1865 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1936 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
@@ -2009,6 +2080,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2009 /* ELS command is being retried */ 2080 /* ELS command is being retried */
2010 goto out; 2081 goto out;
2011 /* LOGO failed */ 2082 /* LOGO failed */
2083 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2084 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2085 ndlp->nlp_DID, irsp->ulpStatus,
2086 irsp->un.ulpWord[4]);
2012 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2087 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2013 if (lpfc_error_lost_link(irsp)) 2088 if (lpfc_error_lost_link(irsp))
2014 goto out; 2089 goto out;
@@ -5989,7 +6064,12 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5989 if (phba->sli_rev < LPFC_SLI_REV4) 6064 if (phba->sli_rev < LPFC_SLI_REV4)
5990 lpfc_issue_fabric_reglogin(vport); 6065 lpfc_issue_fabric_reglogin(vport);
5991 else { 6066 else {
5992 lpfc_start_fdiscs(phba); 6067 /*
6068 * If the physical port is instantiated using
6069 * FDISC, do not start vport discovery.
6070 */
6071 if (vport->port_state != LPFC_FDISC)
6072 lpfc_start_fdiscs(phba);
5993 lpfc_do_scr_ns_plogi(phba, vport); 6073 lpfc_do_scr_ns_plogi(phba, vport);
5994 } 6074 }
5995 } else 6075 } else
@@ -6055,21 +6135,18 @@ mbox_err_exit:
6055} 6135}
6056 6136
6057/** 6137/**
6058 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 6138 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
6059 * @phba: pointer to lpfc hba data structure. 6139 * @phba: pointer to lpfc hba data structure.
6060 * 6140 *
6061 * This routine abort all pending discovery commands and 6141 * This routine cancels the retry delay timers to all the vports.
6062 * start a timer to retry FLOGI for the physical port
6063 * discovery.
6064 **/ 6142 **/
6065void 6143void
6066lpfc_retry_pport_discovery(struct lpfc_hba *phba) 6144lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
6067{ 6145{
6068 struct lpfc_vport **vports; 6146 struct lpfc_vport **vports;
6069 struct lpfc_nodelist *ndlp; 6147 struct lpfc_nodelist *ndlp;
6070 struct Scsi_Host *shost;
6071 int i;
6072 uint32_t link_state; 6148 uint32_t link_state;
6149 int i;
6073 6150
6074 /* Treat this failure as linkdown for all vports */ 6151 /* Treat this failure as linkdown for all vports */
6075 link_state = phba->link_state; 6152 link_state = phba->link_state;
@@ -6087,13 +6164,30 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
6087 } 6164 }
6088 lpfc_destroy_vport_work_array(phba, vports); 6165 lpfc_destroy_vport_work_array(phba, vports);
6089 } 6166 }
6167}
6168
6169/**
6170 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
6171 * @phba: pointer to lpfc hba data structure.
6172 *
6173 * This routine abort all pending discovery commands and
6174 * start a timer to retry FLOGI for the physical port
6175 * discovery.
6176 **/
6177void
6178lpfc_retry_pport_discovery(struct lpfc_hba *phba)
6179{
6180 struct lpfc_nodelist *ndlp;
6181 struct Scsi_Host *shost;
6182
6183 /* Cancel the all vports retry delay retry timers */
6184 lpfc_cancel_all_vport_retry_delay_timer(phba);
6090 6185
6091 /* If fabric require FLOGI, then re-instantiate physical login */ 6186 /* If fabric require FLOGI, then re-instantiate physical login */
6092 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 6187 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6093 if (!ndlp) 6188 if (!ndlp)
6094 return; 6189 return;
6095 6190
6096
6097 shost = lpfc_shost_from_vport(phba->pport); 6191 shost = lpfc_shost_from_vport(phba->pport);
6098 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 6192 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
6099 spin_lock_irq(shost->host_lock); 6193 spin_lock_irq(shost->host_lock);
@@ -6219,7 +6313,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6219 lpfc_mbx_unreg_vpi(vport); 6313 lpfc_mbx_unreg_vpi(vport);
6220 spin_lock_irq(shost->host_lock); 6314 spin_lock_irq(shost->host_lock);
6221 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 6315 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6222 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 6316 if (phba->sli_rev == LPFC_SLI_REV4)
6317 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6223 spin_unlock_irq(shost->host_lock); 6318 spin_unlock_irq(shost->host_lock);
6224 } 6319 }
6225 6320
@@ -6797,21 +6892,27 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6797 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 6892 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6798 unsigned long iflag = 0; 6893 unsigned long iflag = 0;
6799 6894
6800 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); 6895 spin_lock_irqsave(&phba->hbalock, iflag);
6896 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
6801 list_for_each_entry_safe(sglq_entry, sglq_next, 6897 list_for_each_entry_safe(sglq_entry, sglq_next,
6802 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 6898 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
6803 if (sglq_entry->sli4_xritag == xri) { 6899 if (sglq_entry->sli4_xritag == xri) {
6804 list_del(&sglq_entry->list); 6900 list_del(&sglq_entry->list);
6805 spin_unlock_irqrestore(
6806 &phba->sli4_hba.abts_sgl_list_lock,
6807 iflag);
6808 spin_lock_irqsave(&phba->hbalock, iflag);
6809
6810 list_add_tail(&sglq_entry->list, 6901 list_add_tail(&sglq_entry->list,
6811 &phba->sli4_hba.lpfc_sgl_list); 6902 &phba->sli4_hba.lpfc_sgl_list);
6903 sglq_entry->state = SGL_FREED;
6904 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6812 spin_unlock_irqrestore(&phba->hbalock, iflag); 6905 spin_unlock_irqrestore(&phba->hbalock, iflag);
6813 return; 6906 return;
6814 } 6907 }
6815 } 6908 }
6816 spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag); 6909 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6910 sglq_entry = __lpfc_get_active_sglq(phba, xri);
6911 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
6912 spin_unlock_irqrestore(&phba->hbalock, iflag);
6913 return;
6914 }
6915 sglq_entry->state = SGL_XRI_ABORTED;
6916 spin_unlock_irqrestore(&phba->hbalock, iflag);
6917 return;
6817} 6918}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2359d0bfb734..e1466eec56b7 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -20,6 +20,7 @@
20 *******************************************************************/ 20 *******************************************************************/
21 21
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/slab.h>
23#include <linux/pci.h> 24#include <linux/pci.h>
24#include <linux/kthread.h> 25#include <linux/kthread.h>
25#include <linux/interrupt.h> 26#include <linux/interrupt.h>
@@ -1481,8 +1482,6 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1481int 1482int
1482lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) 1483lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1483{ 1484{
1484 LPFC_MBOXQ_t *mbox;
1485 int rc;
1486 /* 1485 /*
1487 * If the Link is up and no FCoE events while in the 1486 * If the Link is up and no FCoE events while in the
1488 * FCF discovery, no need to restart FCF discovery. 1487 * FCF discovery, no need to restart FCF discovery.
@@ -1491,86 +1490,70 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1491 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 1490 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1492 return 0; 1491 return 0;
1493 1492
1493 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1494 "2768 Pending link or FCF event during current "
1495 "handling of the previous event: link_state:x%x, "
1496 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1497 phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
1498 phba->fcoe_eventtag);
1499
1494 spin_lock_irq(&phba->hbalock); 1500 spin_lock_irq(&phba->hbalock);
1495 phba->fcf.fcf_flag &= ~FCF_AVAILABLE; 1501 phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1496 spin_unlock_irq(&phba->hbalock); 1502 spin_unlock_irq(&phba->hbalock);
1497 1503
1498 if (phba->link_state >= LPFC_LINK_UP) 1504 if (phba->link_state >= LPFC_LINK_UP) {
1499 lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 1505 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1500 else { 1506 "2780 Restart FCF table scan due to "
1507 "pending FCF event:evt_tag_at_scan:x%x, "
1508 "evt_tag_current:x%x\n",
1509 phba->fcoe_eventtag_at_fcf_scan,
1510 phba->fcoe_eventtag);
1511 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
1512 } else {
1501 /* 1513 /*
1502 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS 1514 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
1503 * flag 1515 * flag
1504 */ 1516 */
1505 spin_lock_irq(&phba->hbalock); 1517 spin_lock_irq(&phba->hbalock);
1506 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1518 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1507 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 1519 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1508 spin_unlock_irq(&phba->hbalock); 1520 spin_unlock_irq(&phba->hbalock);
1509 } 1521 }
1510 1522
1523 /* Unregister the currently registered FCF if required */
1511 if (unreg_fcf) { 1524 if (unreg_fcf) {
1512 spin_lock_irq(&phba->hbalock); 1525 spin_lock_irq(&phba->hbalock);
1513 phba->fcf.fcf_flag &= ~FCF_REGISTERED; 1526 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1514 spin_unlock_irq(&phba->hbalock); 1527 spin_unlock_irq(&phba->hbalock);
1515 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1528 lpfc_sli4_unregister_fcf(phba);
1516 if (!mbox) {
1517 lpfc_printf_log(phba, KERN_ERR,
1518 LOG_DISCOVERY|LOG_MBOX,
1519 "2610 UNREG_FCFI mbox allocation failed\n");
1520 return 1;
1521 }
1522 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
1523 mbox->vport = phba->pport;
1524 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
1525 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1526 if (rc == MBX_NOT_FINISHED) {
1527 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
1528 "2611 UNREG_FCFI issue mbox failed\n");
1529 mempool_free(mbox, phba->mbox_mem_pool);
1530 }
1531 } 1529 }
1532
1533 return 1; 1530 return 1;
1534} 1531}
1535 1532
1536/** 1533/**
1537 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. 1534 * lpfc_sli4_fcf_rec_mbox_parse - parse non-embedded fcf record mailbox command
1538 * @phba: pointer to lpfc hba data structure. 1535 * @phba: pointer to lpfc hba data structure.
1539 * @mboxq: pointer to mailbox object. 1536 * @mboxq: pointer to mailbox object.
1537 * @next_fcf_index: pointer to holder of next fcf index.
1540 * 1538 *
1541 * This function iterate through all the fcf records available in 1539 * This routine parses the non-embedded fcf mailbox command by performing the
1542 * HBA and choose the optimal FCF record for discovery. After finding 1540 * necessarily error checking, non-embedded read FCF record mailbox command
1543 * the FCF for discovery it register the FCF record and kick start 1541 * SGE parsing, and endianness swapping.
1544 * discovery. 1542 *
1545 * If FCF_IN_USE flag is set in currently used FCF, the routine try to 1543 * Returns the pointer to the new FCF record in the non-embedded mailbox
1546 * use a FCF record which match fabric name and mac address of the 1544 * command DMA memory if successfully, other NULL.
1547 * currently used FCF record.
1548 * If the driver support only one FCF, it will try to use the FCF record
1549 * used by BOOT_BIOS.
1550 */ 1545 */
1551void 1546static struct fcf_record *
1552lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1547lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1548 uint16_t *next_fcf_index)
1553{ 1549{
1554 void *virt_addr; 1550 void *virt_addr;
1555 dma_addr_t phys_addr; 1551 dma_addr_t phys_addr;
1556 uint8_t *bytep;
1557 struct lpfc_mbx_sge sge; 1552 struct lpfc_mbx_sge sge;
1558 struct lpfc_mbx_read_fcf_tbl *read_fcf; 1553 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1559 uint32_t shdr_status, shdr_add_status; 1554 uint32_t shdr_status, shdr_add_status;
1560 union lpfc_sli4_cfg_shdr *shdr; 1555 union lpfc_sli4_cfg_shdr *shdr;
1561 struct fcf_record *new_fcf_record; 1556 struct fcf_record *new_fcf_record;
1562 uint32_t boot_flag, addr_mode;
1563 uint32_t next_fcf_index;
1564 struct lpfc_fcf_rec *fcf_rec = NULL;
1565 unsigned long iflags;
1566 uint16_t vlan_id;
1567 int rc;
1568
1569 /* If there is pending FCoE event restart FCF table scan */
1570 if (lpfc_check_pending_fcoe_event(phba, 0)) {
1571 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1572 return;
1573 }
1574 1557
1575 /* Get the first SGE entry from the non-embedded DMA memory. This 1558 /* Get the first SGE entry from the non-embedded DMA memory. This
1576 * routine only uses a single SGE. 1559 * routine only uses a single SGE.
@@ -1581,59 +1564,183 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1581 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1564 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1582 "2524 Failed to get the non-embedded SGE " 1565 "2524 Failed to get the non-embedded SGE "
1583 "virtual address\n"); 1566 "virtual address\n");
1584 goto out; 1567 return NULL;
1585 } 1568 }
1586 virt_addr = mboxq->sge_array->addr[0]; 1569 virt_addr = mboxq->sge_array->addr[0];
1587 1570
1588 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; 1571 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1589 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 1572 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1590 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 1573 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1591 &shdr->response);
1592 /*
1593 * The FCF Record was read and there is no reason for the driver
1594 * to maintain the FCF record data or memory. Instead, just need
1595 * to book keeping the FCFIs can be used.
1596 */
1597 if (shdr_status || shdr_add_status) { 1574 if (shdr_status || shdr_add_status) {
1598 if (shdr_status == STATUS_FCF_TABLE_EMPTY) { 1575 if (shdr_status == STATUS_FCF_TABLE_EMPTY)
1599 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1576 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1600 "2726 READ_FCF_RECORD Indicates empty " 1577 "2726 READ_FCF_RECORD Indicates empty "
1601 "FCF table.\n"); 1578 "FCF table.\n");
1602 } else { 1579 else
1603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1580 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1604 "2521 READ_FCF_RECORD mailbox failed " 1581 "2521 READ_FCF_RECORD mailbox failed "
1605 "with status x%x add_status x%x, mbx\n", 1582 "with status x%x add_status x%x, "
1606 shdr_status, shdr_add_status); 1583 "mbx\n", shdr_status, shdr_add_status);
1607 } 1584 return NULL;
1608 goto out;
1609 } 1585 }
1610 /* Interpreting the returned information of FCF records */ 1586
1587 /* Interpreting the returned information of the FCF record */
1611 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; 1588 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1612 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, 1589 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1613 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1590 sizeof(struct lpfc_mbx_read_fcf_tbl));
1614 next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); 1591 *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1615
1616 new_fcf_record = (struct fcf_record *)(virt_addr + 1592 new_fcf_record = (struct fcf_record *)(virt_addr +
1617 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1593 sizeof(struct lpfc_mbx_read_fcf_tbl));
1618 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, 1594 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1619 sizeof(struct fcf_record)); 1595 sizeof(struct fcf_record));
1620 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1621 1596
1597 return new_fcf_record;
1598}
1599
1600/**
1601 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1602 * @phba: pointer to lpfc hba data structure.
1603 * @fcf_record: pointer to the fcf record.
1604 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1605 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1606 *
1607 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1608 * enabled.
1609 **/
1610static void
1611lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1612 struct fcf_record *fcf_record,
1613 uint16_t vlan_id,
1614 uint16_t next_fcf_index)
1615{
1616 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1617 "2764 READ_FCF_RECORD:\n"
1618 "\tFCF_Index : x%x\n"
1619 "\tFCF_Avail : x%x\n"
1620 "\tFCF_Valid : x%x\n"
1621 "\tFIP_Priority : x%x\n"
1622 "\tMAC_Provider : x%x\n"
1623 "\tLowest VLANID : x%x\n"
1624 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1625 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1626 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1627 "\tNext_FCF_Index: x%x\n",
1628 bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1629 bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1630 bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
1631 fcf_record->fip_priority,
1632 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1633 vlan_id,
1634 bf_get(lpfc_fcf_record_mac_0, fcf_record),
1635 bf_get(lpfc_fcf_record_mac_1, fcf_record),
1636 bf_get(lpfc_fcf_record_mac_2, fcf_record),
1637 bf_get(lpfc_fcf_record_mac_3, fcf_record),
1638 bf_get(lpfc_fcf_record_mac_4, fcf_record),
1639 bf_get(lpfc_fcf_record_mac_5, fcf_record),
1640 bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
1641 bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
1642 bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
1643 bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
1644 bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
1645 bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
1646 bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
1647 bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
1648 bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
1649 bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
1650 bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
1651 bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
1652 bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
1653 bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
1654 bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
1655 bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
1656 next_fcf_index);
1657}
1658
1659/**
1660 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1661 * @phba: pointer to lpfc hba data structure.
1662 * @mboxq: pointer to mailbox object.
1663 *
1664 * This function iterates through all the fcf records available in
1665 * HBA and chooses the optimal FCF record for discovery. After finding
1666 * the FCF for discovery it registers the FCF record and kicks start
1667 * discovery.
1668 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
1669 * use an FCF record which matches fabric name and mac address of the
1670 * currently used FCF record.
1671 * If the driver supports only one FCF, it will try to use the FCF record
1672 * used by BOOT_BIOS.
1673 */
1674void
1675lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1676{
1677 struct fcf_record *new_fcf_record;
1678 uint32_t boot_flag, addr_mode;
1679 uint16_t fcf_index, next_fcf_index;
1680 struct lpfc_fcf_rec *fcf_rec = NULL;
1681 uint16_t vlan_id;
1682 int rc;
1683
1684 /* If there is pending FCoE event restart FCF table scan */
1685 if (lpfc_check_pending_fcoe_event(phba, 0)) {
1686 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1687 return;
1688 }
1689
1690 /* Parse the FCF record from the non-embedded mailbox command */
1691 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
1692 &next_fcf_index);
1693 if (!new_fcf_record) {
1694 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1695 "2765 Mailbox command READ_FCF_RECORD "
1696 "failed to retrieve a FCF record.\n");
1697 /* Let next new FCF event trigger fast failover */
1698 spin_lock_irq(&phba->hbalock);
1699 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1700 spin_unlock_irq(&phba->hbalock);
1701 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1702 return;
1703 }
1704
1705 /* Check the FCF record against the connection list */
1622 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 1706 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
1623 &addr_mode, &vlan_id); 1707 &addr_mode, &vlan_id);
1708
1709 /* Log the FCF record information if turned on */
1710 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
1711 next_fcf_index);
1712
1624 /* 1713 /*
1625 * If the fcf record does not match with connect list entries 1714 * If the fcf record does not match with connect list entries
1626 * read the next entry. 1715 * read the next entry; otherwise, this is an eligible FCF
1716 * record for round robin FCF failover.
1627 */ 1717 */
1628 if (!rc) 1718 if (!rc) {
1719 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1720 "2781 FCF record fcf_index:x%x failed FCF "
1721 "connection list check, fcf_avail:x%x, "
1722 "fcf_valid:x%x\n",
1723 bf_get(lpfc_fcf_record_fcf_index,
1724 new_fcf_record),
1725 bf_get(lpfc_fcf_record_fcf_avail,
1726 new_fcf_record),
1727 bf_get(lpfc_fcf_record_fcf_valid,
1728 new_fcf_record));
1629 goto read_next_fcf; 1729 goto read_next_fcf;
1730 } else {
1731 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1732 rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
1733 if (rc)
1734 goto read_next_fcf;
1735 }
1736
1630 /* 1737 /*
1631 * If this is not the first FCF discovery of the HBA, use last 1738 * If this is not the first FCF discovery of the HBA, use last
1632 * FCF record for the discovery. The condition that a rescan 1739 * FCF record for the discovery. The condition that a rescan
1633 * matches the in-use FCF record: fabric name, switch name, mac 1740 * matches the in-use FCF record: fabric name, switch name, mac
1634 * address, and vlan_id. 1741 * address, and vlan_id.
1635 */ 1742 */
1636 spin_lock_irqsave(&phba->hbalock, iflags); 1743 spin_lock_irq(&phba->hbalock);
1637 if (phba->fcf.fcf_flag & FCF_IN_USE) { 1744 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1638 if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, 1745 if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name,
1639 new_fcf_record) && 1746 new_fcf_record) &&
@@ -1649,8 +1756,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1649 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 1756 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
1650 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) 1757 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
1651 /* If in fast failover, mark it's completed */ 1758 /* If in fast failover, mark it's completed */
1652 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 1759 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV |
1653 spin_unlock_irqrestore(&phba->hbalock, iflags); 1760 FCF_DISCOVERY);
1761 spin_unlock_irq(&phba->hbalock);
1654 goto out; 1762 goto out;
1655 } 1763 }
1656 /* 1764 /*
@@ -1661,7 +1769,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1661 * next candidate. 1769 * next candidate.
1662 */ 1770 */
1663 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 1771 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
1664 spin_unlock_irqrestore(&phba->hbalock, iflags); 1772 spin_unlock_irq(&phba->hbalock);
1665 goto read_next_fcf; 1773 goto read_next_fcf;
1666 } 1774 }
1667 } 1775 }
@@ -1669,14 +1777,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1669 * Update on failover FCF record only if it's in FCF fast-failover 1777 * Update on failover FCF record only if it's in FCF fast-failover
1670 * period; otherwise, update on current FCF record. 1778 * period; otherwise, update on current FCF record.
1671 */ 1779 */
1672 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { 1780 if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
1673 /* Fast FCF failover only to the same fabric name */ 1781 fcf_rec = &phba->fcf.failover_rec;
1674 if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, 1782 else
1675 new_fcf_record))
1676 fcf_rec = &phba->fcf.failover_rec;
1677 else
1678 goto read_next_fcf;
1679 } else
1680 fcf_rec = &phba->fcf.current_rec; 1783 fcf_rec = &phba->fcf.current_rec;
1681 1784
1682 if (phba->fcf.fcf_flag & FCF_AVAILABLE) { 1785 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
@@ -1689,7 +1792,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1689 /* Choose this FCF record */ 1792 /* Choose this FCF record */
1690 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 1793 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1691 addr_mode, vlan_id, BOOT_ENABLE); 1794 addr_mode, vlan_id, BOOT_ENABLE);
1692 spin_unlock_irqrestore(&phba->hbalock, iflags); 1795 spin_unlock_irq(&phba->hbalock);
1693 goto read_next_fcf; 1796 goto read_next_fcf;
1694 } 1797 }
1695 /* 1798 /*
@@ -1698,20 +1801,19 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1698 * the next FCF record. 1801 * the next FCF record.
1699 */ 1802 */
1700 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { 1803 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
1701 spin_unlock_irqrestore(&phba->hbalock, iflags); 1804 spin_unlock_irq(&phba->hbalock);
1702 goto read_next_fcf; 1805 goto read_next_fcf;
1703 } 1806 }
1704 /* 1807 /*
1705 * If the new hba FCF record has lower priority value 1808 * If the new hba FCF record has lower priority value
1706 * than the driver FCF record, use the new record. 1809 * than the driver FCF record, use the new record.
1707 */ 1810 */
1708 if (lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record) && 1811 if (new_fcf_record->fip_priority < fcf_rec->priority) {
1709 (new_fcf_record->fip_priority < fcf_rec->priority)) {
1710 /* Choose this FCF record */ 1812 /* Choose this FCF record */
1711 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 1813 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1712 addr_mode, vlan_id, 0); 1814 addr_mode, vlan_id, 0);
1713 } 1815 }
1714 spin_unlock_irqrestore(&phba->hbalock, iflags); 1816 spin_unlock_irq(&phba->hbalock);
1715 goto read_next_fcf; 1817 goto read_next_fcf;
1716 } 1818 }
1717 /* 1819 /*
@@ -1724,7 +1826,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1724 BOOT_ENABLE : 0)); 1826 BOOT_ENABLE : 0));
1725 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1827 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1726 } 1828 }
1727 spin_unlock_irqrestore(&phba->hbalock, iflags); 1829 spin_unlock_irq(&phba->hbalock);
1728 goto read_next_fcf; 1830 goto read_next_fcf;
1729 1831
1730read_next_fcf: 1832read_next_fcf:
@@ -1740,9 +1842,22 @@ read_next_fcf:
1740 * FCF scan inprogress, and do nothing 1842 * FCF scan inprogress, and do nothing
1741 */ 1843 */
1742 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { 1844 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
1743 spin_lock_irqsave(&phba->hbalock, iflags); 1845 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1846 "2782 No suitable FCF record "
1847 "found during this round of "
1848 "post FCF rediscovery scan: "
1849 "fcf_evt_tag:x%x, fcf_index: "
1850 "x%x\n",
1851 phba->fcoe_eventtag_at_fcf_scan,
1852 bf_get(lpfc_fcf_record_fcf_index,
1853 new_fcf_record));
1854 /*
1855 * Let next new FCF event trigger fast
1856 * failover
1857 */
1858 spin_lock_irq(&phba->hbalock);
1744 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1859 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1745 spin_unlock_irqrestore(&phba->hbalock, iflags); 1860 spin_unlock_irq(&phba->hbalock);
1746 return; 1861 return;
1747 } 1862 }
1748 /* 1863 /*
@@ -1754,16 +1869,23 @@ read_next_fcf:
1754 * record. 1869 * record.
1755 */ 1870 */
1756 1871
1757 /* unregister the current in-use FCF record */ 1872 /* Unregister the current in-use FCF record */
1758 lpfc_unregister_fcf(phba); 1873 lpfc_unregister_fcf(phba);
1759 /* replace in-use record with the new record */ 1874
1875 /* Replace in-use record with the new record */
1760 memcpy(&phba->fcf.current_rec, 1876 memcpy(&phba->fcf.current_rec,
1761 &phba->fcf.failover_rec, 1877 &phba->fcf.failover_rec,
1762 sizeof(struct lpfc_fcf_rec)); 1878 sizeof(struct lpfc_fcf_rec));
1763 /* mark the FCF fast failover completed */ 1879 /* mark the FCF fast failover completed */
1764 spin_lock_irqsave(&phba->hbalock, iflags); 1880 spin_lock_irq(&phba->hbalock);
1765 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 1881 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
1766 spin_unlock_irqrestore(&phba->hbalock, iflags); 1882 spin_unlock_irq(&phba->hbalock);
1883 /*
1884 * Set up the initial registered FCF index for FLOGI
1885 * round robin FCF failover.
1886 */
1887 phba->fcf.fcf_rr_init_indx =
1888 phba->fcf.failover_rec.fcf_indx;
1767 /* Register to the new FCF record */ 1889 /* Register to the new FCF record */
1768 lpfc_register_fcf(phba); 1890 lpfc_register_fcf(phba);
1769 } else { 1891 } else {
@@ -1776,13 +1898,25 @@ read_next_fcf:
1776 return; 1898 return;
1777 /* 1899 /*
1778 * Otherwise, initial scan or post linkdown rescan, 1900 * Otherwise, initial scan or post linkdown rescan,
1779 * register with the best fit FCF record found so 1901 * register with the best FCF record found so far
1780 * far through the scanning process. 1902 * through the FCF scanning process.
1903 */
1904
1905 /* mark the initial FCF discovery completed */
1906 spin_lock_irq(&phba->hbalock);
1907 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
1908 spin_unlock_irq(&phba->hbalock);
1909 /*
1910 * Set up the initial registered FCF index for FLOGI
1911 * round robin FCF failover
1781 */ 1912 */
1913 phba->fcf.fcf_rr_init_indx =
1914 phba->fcf.current_rec.fcf_indx;
1915 /* Register to the new FCF record */
1782 lpfc_register_fcf(phba); 1916 lpfc_register_fcf(phba);
1783 } 1917 }
1784 } else 1918 } else
1785 lpfc_sli4_read_fcf_record(phba, next_fcf_index); 1919 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
1786 return; 1920 return;
1787 1921
1788out: 1922out:
@@ -1793,6 +1927,141 @@ out:
1793} 1927}
1794 1928
1795/** 1929/**
1930 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler
1931 * @phba: pointer to lpfc hba data structure.
1932 * @mboxq: pointer to mailbox object.
1933 *
1934 * This is the callback function for FLOGI failure round robin FCF failover
1935 * read FCF record mailbox command from the eligible FCF record bmask for
1936 * performing the failover. If the FCF read back is not valid/available, it
1937 * fails through to retrying FLOGI to the currently registered FCF again.
1938 * Otherwise, if the FCF read back is valid and available, it will set the
1939 * newly read FCF record to the failover FCF record, unregister currently
1940 * registered FCF record, copy the failover FCF record to the current
1941 * FCF record, and then register the current FCF record before proceeding
1942 * to trying FLOGI on the new failover FCF.
1943 */
1944void
1945lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1946{
1947 struct fcf_record *new_fcf_record;
1948 uint32_t boot_flag, addr_mode;
1949 uint16_t next_fcf_index;
1950 uint16_t current_fcf_index;
1951 uint16_t vlan_id;
1952
1953 /* If link state is not up, stop the round robin failover process */
1954 if (phba->link_state < LPFC_LINK_UP) {
1955 spin_lock_irq(&phba->hbalock);
1956 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1957 spin_unlock_irq(&phba->hbalock);
1958 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1959 return;
1960 }
1961
1962 /* Parse the FCF record from the non-embedded mailbox command */
1963 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
1964 &next_fcf_index);
1965 if (!new_fcf_record) {
1966 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1967 "2766 Mailbox command READ_FCF_RECORD "
1968 "failed to retrieve a FCF record.\n");
1969 goto out;
1970 }
1971
1972 /* Get the needed parameters from FCF record */
1973 lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
1974 &addr_mode, &vlan_id);
1975
1976 /* Log the FCF record information if turned on */
1977 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
1978 next_fcf_index);
1979
1980 /* Upload new FCF record to the failover FCF record */
1981 spin_lock_irq(&phba->hbalock);
1982 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
1983 new_fcf_record, addr_mode, vlan_id,
1984 (boot_flag ? BOOT_ENABLE : 0));
1985 spin_unlock_irq(&phba->hbalock);
1986
1987 current_fcf_index = phba->fcf.current_rec.fcf_indx;
1988
1989 /* Unregister the current in-use FCF record */
1990 lpfc_unregister_fcf(phba);
1991
1992 /* Replace in-use record with the new record */
1993 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
1994 sizeof(struct lpfc_fcf_rec));
1995
1996 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1997 "2783 FLOGI round robin FCF failover from FCF "
1998 "(index:x%x) to FCF (index:x%x).\n",
1999 current_fcf_index,
2000 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
2001
2002out:
2003 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2004 lpfc_register_fcf(phba);
2005}
2006
2007/**
2008 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2009 * @phba: pointer to lpfc hba data structure.
2010 * @mboxq: pointer to mailbox object.
2011 *
2012 * This is the callback function of read FCF record mailbox command for
2013 * updating the eligible FCF bmask for FLOGI failure round robin FCF
2014 * failover when a new FCF event happened. If the FCF read back is
2015 * valid/available and it passes the connection list check, it updates
2016 * the bmask for the eligible FCF record for round robin failover.
2017 */
2018void
2019lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2020{
2021 struct fcf_record *new_fcf_record;
2022 uint32_t boot_flag, addr_mode;
2023 uint16_t fcf_index, next_fcf_index;
2024 uint16_t vlan_id;
2025 int rc;
2026
2027 /* If link state is not up, no need to proceed */
2028 if (phba->link_state < LPFC_LINK_UP)
2029 goto out;
2030
2031 /* If FCF discovery period is over, no need to proceed */
2032 if (phba->fcf.fcf_flag & FCF_DISCOVERY)
2033 goto out;
2034
2035 /* Parse the FCF record from the non-embedded mailbox command */
2036 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2037 &next_fcf_index);
2038 if (!new_fcf_record) {
2039 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2040 "2767 Mailbox command READ_FCF_RECORD "
2041 "failed to retrieve a FCF record.\n");
2042 goto out;
2043 }
2044
2045 /* Check the connection list for eligibility */
2046 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2047 &addr_mode, &vlan_id);
2048
2049 /* Log the FCF record information if turned on */
2050 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2051 next_fcf_index);
2052
2053 if (!rc)
2054 goto out;
2055
2056 /* Update the eligible FCF record index bmask */
2057 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2058 rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
2059
2060out:
2061 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2062}
2063
2064/**
1796 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. 2065 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
1797 * @phba: pointer to lpfc hba data structure. 2066 * @phba: pointer to lpfc hba data structure.
1798 * @mboxq: pointer to mailbox data structure. 2067 * @mboxq: pointer to mailbox data structure.
@@ -2024,8 +2293,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
2024 int rc; 2293 int rc;
2025 struct fcf_record *fcf_record; 2294 struct fcf_record *fcf_record;
2026 2295
2027 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2028
2029 spin_lock_irq(&phba->hbalock); 2296 spin_lock_irq(&phba->hbalock);
2030 switch (la->UlnkSpeed) { 2297 switch (la->UlnkSpeed) {
2031 case LA_1GHZ_LINK: 2298 case LA_1GHZ_LINK:
@@ -2117,18 +2384,24 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
2117 spin_unlock_irq(&phba->hbalock); 2384 spin_unlock_irq(&phba->hbalock);
2118 2385
2119 lpfc_linkup(phba); 2386 lpfc_linkup(phba);
2120 if (sparam_mbox) { 2387 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2121 lpfc_read_sparam(phba, sparam_mbox, 0); 2388 if (!sparam_mbox)
2122 sparam_mbox->vport = vport; 2389 goto out;
2123 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 2390
2124 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); 2391 rc = lpfc_read_sparam(phba, sparam_mbox, 0);
2125 if (rc == MBX_NOT_FINISHED) { 2392 if (rc) {
2126 mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 2393 mempool_free(sparam_mbox, phba->mbox_mem_pool);
2127 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2394 goto out;
2128 kfree(mp); 2395 }
2129 mempool_free(sparam_mbox, phba->mbox_mem_pool); 2396 sparam_mbox->vport = vport;
2130 goto out; 2397 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
2131 } 2398 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
2399 if (rc == MBX_NOT_FINISHED) {
2400 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
2401 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2402 kfree(mp);
2403 mempool_free(sparam_mbox, phba->mbox_mem_pool);
2404 goto out;
2132 } 2405 }
2133 2406
2134 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) { 2407 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
@@ -2186,10 +2459,20 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
2186 spin_unlock_irq(&phba->hbalock); 2459 spin_unlock_irq(&phba->hbalock);
2187 return; 2460 return;
2188 } 2461 }
2462 /* This is the initial FCF discovery scan */
2463 phba->fcf.fcf_flag |= FCF_INIT_DISC;
2189 spin_unlock_irq(&phba->hbalock); 2464 spin_unlock_irq(&phba->hbalock);
2190 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 2465 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
2191 if (rc) 2466 "2778 Start FCF table scan at linkup\n");
2467
2468 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2469 LPFC_FCOE_FCF_GET_FIRST);
2470 if (rc) {
2471 spin_lock_irq(&phba->hbalock);
2472 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
2473 spin_unlock_irq(&phba->hbalock);
2192 goto out; 2474 goto out;
2475 }
2193 } 2476 }
2194 2477
2195 return; 2478 return;
@@ -3379,8 +3662,12 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
3379 shost = lpfc_shost_from_vport(vports[i]); 3662 shost = lpfc_shost_from_vport(vports[i]);
3380 spin_lock_irq(shost->host_lock); 3663 spin_lock_irq(shost->host_lock);
3381 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 3664 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
3382 if (ndlp->nlp_flag & NLP_RPI_VALID) 3665 if (ndlp->nlp_flag & NLP_RPI_VALID) {
3666 /* The mempool_alloc might sleep */
3667 spin_unlock_irq(shost->host_lock);
3383 lpfc_unreg_rpi(vports[i], ndlp); 3668 lpfc_unreg_rpi(vports[i], ndlp);
3669 spin_lock_irq(shost->host_lock);
3670 }
3384 } 3671 }
3385 spin_unlock_irq(shost->host_lock); 3672 spin_unlock_irq(shost->host_lock);
3386 } 3673 }
@@ -4756,6 +5043,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
4756 return; 5043 return;
4757 /* Reset HBA FCF states after successful unregister FCF */ 5044 /* Reset HBA FCF states after successful unregister FCF */
4758 phba->fcf.fcf_flag = 0; 5045 phba->fcf.fcf_flag = 0;
5046 phba->fcf.current_rec.flag = 0;
4759 5047
4760 /* 5048 /*
4761 * If driver is not unloading, check if there is any other 5049 * If driver is not unloading, check if there is any other
@@ -4765,13 +5053,21 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
4765 (phba->link_state < LPFC_LINK_UP)) 5053 (phba->link_state < LPFC_LINK_UP))
4766 return; 5054 return;
4767 5055
4768 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 5056 /* This is considered as the initial FCF discovery scan */
5057 spin_lock_irq(&phba->hbalock);
5058 phba->fcf.fcf_flag |= FCF_INIT_DISC;
5059 spin_unlock_irq(&phba->hbalock);
5060 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
4769 5061
4770 if (rc) 5062 if (rc) {
5063 spin_lock_irq(&phba->hbalock);
5064 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
5065 spin_unlock_irq(&phba->hbalock);
4771 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 5066 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4772 "2553 lpfc_unregister_unused_fcf failed " 5067 "2553 lpfc_unregister_unused_fcf failed "
4773 "to read FCF record HBA state x%x\n", 5068 "to read FCF record HBA state x%x\n",
4774 phba->pport->port_state); 5069 phba->pport->port_state);
5070 }
4775} 5071}
4776 5072
4777/** 5073/**
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index d29ac7c317d9..774663e8e1fe 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -29,6 +29,7 @@
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/ctype.h> 30#include <linux/ctype.h>
31#include <linux/aer.h> 31#include <linux/aer.h>
32#include <linux/slab.h>
32 33
33#include <scsi/scsi.h> 34#include <scsi/scsi.h>
34#include <scsi/scsi_device.h> 35#include <scsi/scsi_device.h>
@@ -350,7 +351,12 @@ lpfc_config_port_post(struct lpfc_hba *phba)
350 mb = &pmb->u.mb; 351 mb = &pmb->u.mb;
351 352
352 /* Get login parameters for NID. */ 353 /* Get login parameters for NID. */
353 lpfc_read_sparam(phba, pmb, 0); 354 rc = lpfc_read_sparam(phba, pmb, 0);
355 if (rc) {
356 mempool_free(pmb, phba->mbox_mem_pool);
357 return -ENOMEM;
358 }
359
354 pmb->vport = vport; 360 pmb->vport = vport;
355 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 361 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -359,7 +365,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
359 mb->mbxCommand, mb->mbxStatus); 365 mb->mbxCommand, mb->mbxStatus);
360 phba->link_state = LPFC_HBA_ERROR; 366 phba->link_state = LPFC_HBA_ERROR;
361 mp = (struct lpfc_dmabuf *) pmb->context1; 367 mp = (struct lpfc_dmabuf *) pmb->context1;
362 mempool_free( pmb, phba->mbox_mem_pool); 368 mempool_free(pmb, phba->mbox_mem_pool);
363 lpfc_mbuf_free(phba, mp->virt, mp->phys); 369 lpfc_mbuf_free(phba, mp->virt, mp->phys);
364 kfree(mp); 370 kfree(mp);
365 return -EIO; 371 return -EIO;
@@ -544,7 +550,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
544 mempool_free(pmb, phba->mbox_mem_pool); 550 mempool_free(pmb, phba->mbox_mem_pool);
545 return -EIO; 551 return -EIO;
546 } 552 }
547 } else if (phba->cfg_suppress_link_up == 0) { 553 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
548 lpfc_init_link(phba, pmb, phba->cfg_topology, 554 lpfc_init_link(phba, pmb, phba->cfg_topology,
549 phba->cfg_link_speed); 555 phba->cfg_link_speed);
550 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 556 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -571,6 +577,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
571 } 577 }
572 /* MBOX buffer will be freed in mbox compl */ 578 /* MBOX buffer will be freed in mbox compl */
573 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 579 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
580 if (!pmb) {
581 phba->link_state = LPFC_HBA_ERROR;
582 return -ENOMEM;
583 }
584
574 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 585 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
575 pmb->mbox_cmpl = lpfc_config_async_cmpl; 586 pmb->mbox_cmpl = lpfc_config_async_cmpl;
576 pmb->vport = phba->pport; 587 pmb->vport = phba->pport;
@@ -588,6 +599,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
588 599
589 /* Get Option rom version */ 600 /* Get Option rom version */
590 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 601 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
602 if (!pmb) {
603 phba->link_state = LPFC_HBA_ERROR;
604 return -ENOMEM;
605 }
606
591 lpfc_dump_wakeup_param(phba, pmb); 607 lpfc_dump_wakeup_param(phba, pmb);
592 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 608 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
593 pmb->vport = phba->pport; 609 pmb->vport = phba->pport;
@@ -652,7 +668,7 @@ lpfc_hba_init_link(struct lpfc_hba *phba)
652 mempool_free(pmb, phba->mbox_mem_pool); 668 mempool_free(pmb, phba->mbox_mem_pool);
653 return -EIO; 669 return -EIO;
654 } 670 }
655 phba->cfg_suppress_link_up = 0; 671 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
656 672
657 return 0; 673 return 0;
658} 674}
@@ -807,6 +823,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
807 LIST_HEAD(aborts); 823 LIST_HEAD(aborts);
808 int ret; 824 int ret;
809 unsigned long iflag = 0; 825 unsigned long iflag = 0;
826 struct lpfc_sglq *sglq_entry = NULL;
827
810 ret = lpfc_hba_down_post_s3(phba); 828 ret = lpfc_hba_down_post_s3(phba);
811 if (ret) 829 if (ret)
812 return ret; 830 return ret;
@@ -822,6 +840,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
822 * list. 840 * list.
823 */ 841 */
824 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 842 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
843 list_for_each_entry(sglq_entry,
844 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
845 sglq_entry->state = SGL_FREED;
846
825 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 847 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
826 &phba->sli4_hba.lpfc_sgl_list); 848 &phba->sli4_hba.lpfc_sgl_list);
827 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 849 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
@@ -2178,8 +2200,10 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
2178void 2200void
2179__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2201__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2180{ 2202{
2181 /* Clear pending FCF rediscovery wait timer */ 2203 /* Clear pending FCF rediscovery wait and failover in progress flags */
2182 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2204 phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
2205 FCF_DEAD_DISC |
2206 FCF_ACVL_DISC);
2183 /* Now, try to stop the timer */ 2207 /* Now, try to stop the timer */
2184 del_timer(&phba->fcf.redisc_wait); 2208 del_timer(&phba->fcf.redisc_wait);
2185} 2209}
@@ -2576,6 +2600,14 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2576 init_timer(&vport->els_tmofunc); 2600 init_timer(&vport->els_tmofunc);
2577 vport->els_tmofunc.function = lpfc_els_timeout; 2601 vport->els_tmofunc.function = lpfc_els_timeout;
2578 vport->els_tmofunc.data = (unsigned long)vport; 2602 vport->els_tmofunc.data = (unsigned long)vport;
2603 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
2604 phba->menlo_flag |= HBA_MENLO_SUPPORT;
2605 /* check for menlo minimum sg count */
2606 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
2607 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
2608 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2609 }
2610 }
2579 2611
2580 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2612 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2581 if (error) 2613 if (error)
@@ -2912,6 +2944,9 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2912 /* FCF rediscovery event to worker thread */ 2944 /* FCF rediscovery event to worker thread */
2913 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2945 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2914 spin_unlock_irq(&phba->hbalock); 2946 spin_unlock_irq(&phba->hbalock);
2947 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2948 "2776 FCF rediscover wait timer expired, post "
2949 "a worker thread event for FCF table scan\n");
2915 /* wake up worker thread */ 2950 /* wake up worker thread */
2916 lpfc_worker_wake_up(phba); 2951 lpfc_worker_wake_up(phba);
2917} 2952}
@@ -3183,6 +3218,68 @@ out_free_pmb:
3183} 3218}
3184 3219
3185/** 3220/**
3221 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3222 * @vport: pointer to vport data structure.
3223 *
3224 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3225 * response to a CVL event.
3226 *
3227 * Return the pointer to the ndlp with the vport if successful, otherwise
3228 * return NULL.
3229 **/
3230static struct lpfc_nodelist *
3231lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3232{
3233 struct lpfc_nodelist *ndlp;
3234 struct Scsi_Host *shost;
3235 struct lpfc_hba *phba;
3236
3237 if (!vport)
3238 return NULL;
3239 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3240 if (!ndlp)
3241 return NULL;
3242 phba = vport->phba;
3243 if (!phba)
3244 return NULL;
3245 if (phba->pport->port_state <= LPFC_FLOGI)
3246 return NULL;
3247 /* If virtual link is not yet instantiated ignore CVL */
3248 if (vport->port_state <= LPFC_FDISC)
3249 return NULL;
3250 shost = lpfc_shost_from_vport(vport);
3251 if (!shost)
3252 return NULL;
3253 lpfc_linkdown_port(vport);
3254 lpfc_cleanup_pending_mbox(vport);
3255 spin_lock_irq(shost->host_lock);
3256 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3257 spin_unlock_irq(shost->host_lock);
3258
3259 return ndlp;
3260}
3261
3262/**
3263 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3264 * @vport: pointer to lpfc hba data structure.
3265 *
3266 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3267 * response to a FCF dead event.
3268 **/
3269static void
3270lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3271{
3272 struct lpfc_vport **vports;
3273 int i;
3274
3275 vports = lpfc_create_vport_work_array(phba);
3276 if (vports)
3277 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3278 lpfc_sli4_perform_vport_cvl(vports[i]);
3279 lpfc_destroy_vport_work_array(phba, vports);
3280}
3281
3282/**
3186 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 3283 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
3187 * @phba: pointer to lpfc hba data structure. 3284 * @phba: pointer to lpfc hba data structure.
3188 * @acqe_link: pointer to the async fcoe completion queue entry. 3285 * @acqe_link: pointer to the async fcoe completion queue entry.
@@ -3198,7 +3295,6 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3198 struct lpfc_vport *vport; 3295 struct lpfc_vport *vport;
3199 struct lpfc_nodelist *ndlp; 3296 struct lpfc_nodelist *ndlp;
3200 struct Scsi_Host *shost; 3297 struct Scsi_Host *shost;
3201 uint32_t link_state;
3202 int active_vlink_present; 3298 int active_vlink_present;
3203 struct lpfc_vport **vports; 3299 struct lpfc_vport **vports;
3204 int i; 3300 int i;
@@ -3208,10 +3304,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3208 switch (event_type) { 3304 switch (event_type) {
3209 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3305 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
3210 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: 3306 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
3211 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3307 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3212 "2546 New FCF found index 0x%x tag 0x%x\n", 3308 "2546 New FCF found/FCF parameter modified event: "
3213 acqe_fcoe->index, 3309 "evt_tag:x%x, fcf_index:x%x\n",
3214 acqe_fcoe->event_tag); 3310 acqe_fcoe->event_tag, acqe_fcoe->index);
3311
3215 spin_lock_irq(&phba->hbalock); 3312 spin_lock_irq(&phba->hbalock);
3216 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || 3313 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
3217 (phba->hba_flag & FCF_DISC_INPROGRESS)) { 3314 (phba->hba_flag & FCF_DISC_INPROGRESS)) {
@@ -3222,6 +3319,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3222 spin_unlock_irq(&phba->hbalock); 3319 spin_unlock_irq(&phba->hbalock);
3223 break; 3320 break;
3224 } 3321 }
3322
3225 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3323 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3226 /* 3324 /*
3227 * If fast FCF failover rescan event is pending, 3325 * If fast FCF failover rescan event is pending,
@@ -3232,12 +3330,33 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3232 } 3330 }
3233 spin_unlock_irq(&phba->hbalock); 3331 spin_unlock_irq(&phba->hbalock);
3234 3332
3235 /* Read the FCF table and re-discover SAN. */ 3333 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
3236 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3334 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
3335 /*
3336 * During period of FCF discovery, read the FCF
3337 * table record indexed by the event to update
3338 * FCF round robin failover eligible FCF bmask.
3339 */
3340 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3341 LOG_DISCOVERY,
3342 "2779 Read new FCF record with "
3343 "fcf_index:x%x for updating FCF "
3344 "round robin failover bmask\n",
3345 acqe_fcoe->index);
3346 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3347 }
3348
3349 /* Otherwise, scan the entire FCF table and re-discover SAN */
3350 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3351 "2770 Start FCF table scan due to new FCF "
3352 "event: evt_tag:x%x, fcf_index:x%x\n",
3353 acqe_fcoe->event_tag, acqe_fcoe->index);
3354 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3355 LPFC_FCOE_FCF_GET_FIRST);
3237 if (rc) 3356 if (rc)
3238 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3357 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3239 "2547 Read FCF record failed 0x%x\n", 3358 "2547 Issue FCF scan read FCF mailbox "
3240 rc); 3359 "command failed 0x%x\n", rc);
3241 break; 3360 break;
3242 3361
3243 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3362 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -3248,47 +3367,63 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3248 break; 3367 break;
3249 3368
3250 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3369 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
3251 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3370 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3252 "2549 FCF disconnected from network index 0x%x" 3371 "2549 FCF disconnected from network index 0x%x"
3253 " tag 0x%x\n", acqe_fcoe->index, 3372 " tag 0x%x\n", acqe_fcoe->index,
3254 acqe_fcoe->event_tag); 3373 acqe_fcoe->event_tag);
3255 /* If the event is not for currently used fcf do nothing */ 3374 /* If the event is not for currently used fcf do nothing */
3256 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) 3375 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
3257 break; 3376 break;
3258 /* 3377 /* We request port to rediscover the entire FCF table for
3259 * Currently, driver support only one FCF - so treat this as 3378 * a fast recovery from case that the current FCF record
3260 * a link down, but save the link state because we don't want 3379 * is no longer valid if we are not in the middle of FCF
3261 * it to be changed to Link Down unless it is already down. 3380 * failover process already.
3262 */ 3381 */
3263 link_state = phba->link_state; 3382 spin_lock_irq(&phba->hbalock);
3264 lpfc_linkdown(phba); 3383 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3265 phba->link_state = link_state; 3384 spin_unlock_irq(&phba->hbalock);
3266 /* Unregister FCF if no devices connected to it */ 3385 /* Update FLOGI FCF failover eligible FCF bmask */
3267 lpfc_unregister_unused_fcf(phba); 3386 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
3387 break;
3388 }
3389 /* Mark the fast failover process in progress */
3390 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3391 spin_unlock_irq(&phba->hbalock);
3392 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3393 "2771 Start FCF fast failover process due to "
3394 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3395 "\n", acqe_fcoe->event_tag, acqe_fcoe->index);
3396 rc = lpfc_sli4_redisc_fcf_table(phba);
3397 if (rc) {
3398 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3399 LOG_DISCOVERY,
3400 "2772 Issue FCF rediscover mabilbox "
3401 "command failed, fail through to FCF "
3402 "dead event\n");
3403 spin_lock_irq(&phba->hbalock);
3404 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3405 spin_unlock_irq(&phba->hbalock);
3406 /*
3407 * Last resort will fail over by treating this
3408 * as a link down to FCF registration.
3409 */
3410 lpfc_sli4_fcf_dead_failthrough(phba);
3411 } else
3412 /* Handling fast FCF failover to a DEAD FCF event
3413 * is considered equalivant to receiving CVL to all
3414 * vports.
3415 */
3416 lpfc_sli4_perform_all_vport_cvl(phba);
3268 break; 3417 break;
3269 case LPFC_FCOE_EVENT_TYPE_CVL: 3418 case LPFC_FCOE_EVENT_TYPE_CVL:
3270 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3419 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3271 "2718 Clear Virtual Link Received for VPI 0x%x" 3420 "2718 Clear Virtual Link Received for VPI 0x%x"
3272 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3421 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3273 vport = lpfc_find_vport_by_vpid(phba, 3422 vport = lpfc_find_vport_by_vpid(phba,
3274 acqe_fcoe->index - phba->vpi_base); 3423 acqe_fcoe->index - phba->vpi_base);
3275 if (!vport) 3424 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3276 break;
3277 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3278 if (!ndlp) 3425 if (!ndlp)
3279 break; 3426 break;
3280 shost = lpfc_shost_from_vport(vport);
3281 if (phba->pport->port_state <= LPFC_FLOGI)
3282 break;
3283 /* If virtual link is not yet instantiated ignore CVL */
3284 if (vport->port_state <= LPFC_FDISC)
3285 break;
3286
3287 lpfc_linkdown_port(vport);
3288 lpfc_cleanup_pending_mbox(vport);
3289 spin_lock_irq(shost->host_lock);
3290 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3291 spin_unlock_irq(shost->host_lock);
3292 active_vlink_present = 0; 3427 active_vlink_present = 0;
3293 3428
3294 vports = lpfc_create_vport_work_array(phba); 3429 vports = lpfc_create_vport_work_array(phba);
@@ -3311,6 +3446,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3311 * re-instantiate the Vlink using FDISC. 3446 * re-instantiate the Vlink using FDISC.
3312 */ 3447 */
3313 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3448 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3449 shost = lpfc_shost_from_vport(vport);
3314 spin_lock_irq(shost->host_lock); 3450 spin_lock_irq(shost->host_lock);
3315 ndlp->nlp_flag |= NLP_DELAY_TMO; 3451 ndlp->nlp_flag |= NLP_DELAY_TMO;
3316 spin_unlock_irq(shost->host_lock); 3452 spin_unlock_irq(shost->host_lock);
@@ -3321,15 +3457,38 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3321 * Otherwise, we request port to rediscover 3457 * Otherwise, we request port to rediscover
3322 * the entire FCF table for a fast recovery 3458 * the entire FCF table for a fast recovery
3323 * from possible case that the current FCF 3459 * from possible case that the current FCF
3324 * is no longer valid. 3460 * is no longer valid if we are not already
3461 * in the FCF failover process.
3325 */ 3462 */
3463 spin_lock_irq(&phba->hbalock);
3464 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3465 spin_unlock_irq(&phba->hbalock);
3466 break;
3467 }
3468 /* Mark the fast failover process in progress */
3469 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3470 spin_unlock_irq(&phba->hbalock);
3471 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3472 LOG_DISCOVERY,
3473 "2773 Start FCF fast failover due "
3474 "to CVL event: evt_tag:x%x\n",
3475 acqe_fcoe->event_tag);
3326 rc = lpfc_sli4_redisc_fcf_table(phba); 3476 rc = lpfc_sli4_redisc_fcf_table(phba);
3327 if (rc) 3477 if (rc) {
3478 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3479 LOG_DISCOVERY,
3480 "2774 Issue FCF rediscover "
3481 "mabilbox command failed, "
3482 "through to CVL event\n");
3483 spin_lock_irq(&phba->hbalock);
3484 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3485 spin_unlock_irq(&phba->hbalock);
3328 /* 3486 /*
3329 * Last resort will be re-try on the 3487 * Last resort will be re-try on the
3330 * the current registered FCF entry. 3488 * the current registered FCF entry.
3331 */ 3489 */
3332 lpfc_retry_pport_discovery(phba); 3490 lpfc_retry_pport_discovery(phba);
3491 }
3333 } 3492 }
3334 break; 3493 break;
3335 default: 3494 default:
@@ -3426,11 +3585,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3426 spin_unlock_irq(&phba->hbalock); 3585 spin_unlock_irq(&phba->hbalock);
3427 3586
3428 /* Scan FCF table from the first entry to re-discover SAN */ 3587 /* Scan FCF table from the first entry to re-discover SAN */
3429 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3588 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3589 "2777 Start FCF table scan after FCF "
3590 "rediscovery quiescent period over\n");
3591 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3430 if (rc) 3592 if (rc)
3431 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3593 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3432 "2747 Post FCF rediscovery read FCF record " 3594 "2747 Issue FCF scan read FCF mailbox "
3433 "failed 0x%x\n", rc); 3595 "command failed 0x%x\n", rc);
3434} 3596}
3435 3597
3436/** 3598/**
@@ -3722,6 +3884,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3722 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 3884 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3723 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 3885 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3724 struct lpfc_mqe *mqe; 3886 struct lpfc_mqe *mqe;
3887 int longs;
3725 3888
3726 /* Before proceed, wait for POST done and device ready */ 3889 /* Before proceed, wait for POST done and device ready */
3727 rc = lpfc_sli4_post_status_check(phba); 3890 rc = lpfc_sli4_post_status_check(phba);
@@ -3898,13 +4061,24 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3898 goto out_free_active_sgl; 4061 goto out_free_active_sgl;
3899 } 4062 }
3900 4063
4064 /* Allocate eligible FCF bmask memory for FCF round robin failover */
4065 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4066 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4067 GFP_KERNEL);
4068 if (!phba->fcf.fcf_rr_bmask) {
4069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4070 "2759 Failed allocate memory for FCF round "
4071 "robin failover bmask\n");
4072 goto out_remove_rpi_hdrs;
4073 }
4074
3901 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4075 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3902 phba->cfg_fcp_eq_count), GFP_KERNEL); 4076 phba->cfg_fcp_eq_count), GFP_KERNEL);
3903 if (!phba->sli4_hba.fcp_eq_hdl) { 4077 if (!phba->sli4_hba.fcp_eq_hdl) {
3904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3905 "2572 Failed allocate memory for fast-path " 4079 "2572 Failed allocate memory for fast-path "
3906 "per-EQ handle array\n"); 4080 "per-EQ handle array\n");
3907 goto out_remove_rpi_hdrs; 4081 goto out_free_fcf_rr_bmask;
3908 } 4082 }
3909 4083
3910 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4084 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -3957,6 +4131,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3957 4131
3958out_free_fcp_eq_hdl: 4132out_free_fcp_eq_hdl:
3959 kfree(phba->sli4_hba.fcp_eq_hdl); 4133 kfree(phba->sli4_hba.fcp_eq_hdl);
4134out_free_fcf_rr_bmask:
4135 kfree(phba->fcf.fcf_rr_bmask);
3960out_remove_rpi_hdrs: 4136out_remove_rpi_hdrs:
3961 lpfc_sli4_remove_rpi_hdrs(phba); 4137 lpfc_sli4_remove_rpi_hdrs(phba);
3962out_free_active_sgl: 4138out_free_active_sgl:
@@ -4002,6 +4178,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4002 lpfc_sli4_remove_rpi_hdrs(phba); 4178 lpfc_sli4_remove_rpi_hdrs(phba);
4003 lpfc_sli4_remove_rpis(phba); 4179 lpfc_sli4_remove_rpis(phba);
4004 4180
4181 /* Free eligible FCF index bmask */
4182 kfree(phba->fcf.fcf_rr_bmask);
4183
4005 /* Free the ELS sgl list */ 4184 /* Free the ELS sgl list */
4006 lpfc_free_active_sgl(phba); 4185 lpfc_free_active_sgl(phba);
4007 lpfc_free_sgl_list(phba); 4186 lpfc_free_sgl_list(phba);
@@ -4397,6 +4576,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4397 4576
4398 /* The list order is used by later block SGL registraton */ 4577 /* The list order is used by later block SGL registraton */
4399 spin_lock_irq(&phba->hbalock); 4578 spin_lock_irq(&phba->hbalock);
4579 sglq_entry->state = SGL_FREED;
4400 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4580 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4401 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4581 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4402 phba->sli4_hba.total_sglq_bufs++; 4582 phba->sli4_hba.total_sglq_bufs++;
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 954ba57970a3..bb59e9273126 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -35,6 +35,7 @@
35#define LOG_VPORT 0x00004000 /* NPIV events */ 35#define LOG_VPORT 0x00004000 /* NPIV events */
36#define LOF_SECURITY 0x00008000 /* Security events */ 36#define LOF_SECURITY 0x00008000 /* Security events */
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ 37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_FIP 0x00020000 /* FIP events */
38#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ 39#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
39 40
40#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 41#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 6c4dce1a30ca..72e6adb0643e 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25 26
26#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
@@ -1748,7 +1749,7 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1748} 1749}
1749 1750
1750/** 1751/**
1751 * lpfc_sli4_mbx_read_fcf_record - Allocate and construct read fcf mbox cmd 1752 * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd
1752 * @phba: pointer to lpfc hba data structure. 1753 * @phba: pointer to lpfc hba data structure.
1753 * @fcf_index: index to fcf table. 1754 * @fcf_index: index to fcf table.
1754 * 1755 *
@@ -1759,9 +1760,9 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1759 * NULL. 1760 * NULL.
1760 **/ 1761 **/
1761int 1762int
1762lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *phba, 1763lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
1763 struct lpfcMboxq *mboxq, 1764 struct lpfcMboxq *mboxq,
1764 uint16_t fcf_index) 1765 uint16_t fcf_index)
1765{ 1766{
1766 void *virt_addr; 1767 void *virt_addr;
1767 dma_addr_t phys_addr; 1768 dma_addr_t phys_addr;
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index a1b6db6016da..8f879e477e9d 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -20,6 +20,7 @@
20 *******************************************************************/ 20 *******************************************************************/
21 21
22#include <linux/mempool.h> 22#include <linux/mempool.h>
23#include <linux/slab.h>
23#include <linux/pci.h> 24#include <linux/pci.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25 26
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d20ae6b3b3cf..e331204a4d56 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25 26
26#include <scsi/scsi.h> 27#include <scsi/scsi.h>
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 483fb74bc592..dccdb822328c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -19,6 +19,7 @@
19 * included with this package. * 19 * included with this package. *
20 *******************************************************************/ 20 *******************************************************************/
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/slab.h>
22#include <linux/interrupt.h> 23#include <linux/interrupt.h>
23#include <linux/delay.h> 24#include <linux/delay.h>
24#include <asm/unaligned.h> 25#include <asm/unaligned.h>
@@ -620,23 +621,40 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
620 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 621 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
621 struct lpfc_scsi_buf *psb, *next_psb; 622 struct lpfc_scsi_buf *psb, *next_psb;
622 unsigned long iflag = 0; 623 unsigned long iflag = 0;
624 struct lpfc_iocbq *iocbq;
625 int i;
623 626
624 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); 627 spin_lock_irqsave(&phba->hbalock, iflag);
628 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
625 list_for_each_entry_safe(psb, next_psb, 629 list_for_each_entry_safe(psb, next_psb,
626 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { 630 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
627 if (psb->cur_iocbq.sli4_xritag == xri) { 631 if (psb->cur_iocbq.sli4_xritag == xri) {
628 list_del(&psb->list); 632 list_del(&psb->list);
629 psb->exch_busy = 0; 633 psb->exch_busy = 0;
630 psb->status = IOSTAT_SUCCESS; 634 psb->status = IOSTAT_SUCCESS;
631 spin_unlock_irqrestore( 635 spin_unlock(
632 &phba->sli4_hba.abts_scsi_buf_list_lock, 636 &phba->sli4_hba.abts_scsi_buf_list_lock);
633 iflag); 637 spin_unlock_irqrestore(&phba->hbalock, iflag);
634 lpfc_release_scsi_buf_s4(phba, psb); 638 lpfc_release_scsi_buf_s4(phba, psb);
635 return; 639 return;
636 } 640 }
637 } 641 }
638 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, 642 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
639 iflag); 643 for (i = 1; i <= phba->sli.last_iotag; i++) {
644 iocbq = phba->sli.iocbq_lookup[i];
645
646 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
647 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
648 continue;
649 if (iocbq->sli4_xritag != xri)
650 continue;
651 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
652 psb->exch_busy = 0;
653 spin_unlock_irqrestore(&phba->hbalock, iflag);
654 return;
655
656 }
657 spin_unlock_irqrestore(&phba->hbalock, iflag);
640} 658}
641 659
642/** 660/**
@@ -1006,6 +1024,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1006 struct scatterlist *sgel = NULL; 1024 struct scatterlist *sgel = NULL;
1007 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 1025 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1008 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 1026 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1027 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
1009 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 1028 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1010 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; 1029 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1011 dma_addr_t physaddr; 1030 dma_addr_t physaddr;
@@ -1056,6 +1075,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1056 physaddr = sg_dma_address(sgel); 1075 physaddr = sg_dma_address(sgel);
1057 if (phba->sli_rev == 3 && 1076 if (phba->sli_rev == 3 &&
1058 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 1077 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1078 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1059 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 1079 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1060 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1080 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1061 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 1081 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
@@ -1082,7 +1102,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1082 * explicitly reinitialized since all iocb memory resources are reused. 1102 * explicitly reinitialized since all iocb memory resources are reused.
1083 */ 1103 */
1084 if (phba->sli_rev == 3 && 1104 if (phba->sli_rev == 3 &&
1085 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 1105 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1106 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1086 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 1107 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1087 /* 1108 /*
1088 * The extended IOCB format can only fit 3 BDE or a BPL. 1109 * The extended IOCB format can only fit 3 BDE or a BPL.
@@ -1107,6 +1128,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1107 } else { 1128 } else {
1108 iocb_cmd->un.fcpi64.bdl.bdeSize = 1129 iocb_cmd->un.fcpi64.bdl.bdeSize =
1109 ((num_bde + 2) * sizeof(struct ulp_bde64)); 1130 ((num_bde + 2) * sizeof(struct ulp_bde64));
1131 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1110 } 1132 }
1111 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 1133 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1112 1134
@@ -2079,8 +2101,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2079 2101
2080 if (resp_info & RSP_LEN_VALID) { 2102 if (resp_info & RSP_LEN_VALID) {
2081 rsplen = be32_to_cpu(fcprsp->rspRspLen); 2103 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2082 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || 2104 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
2083 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
2084 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 2105 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2085 "2719 Invalid response length: " 2106 "2719 Invalid response length: "
2086 "tgt x%x lun x%x cmnd x%x rsplen x%x\n", 2107 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
@@ -2090,6 +2111,17 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2090 host_status = DID_ERROR; 2111 host_status = DID_ERROR;
2091 goto out; 2112 goto out;
2092 } 2113 }
2114 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
2115 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2116 "2757 Protocol failure detected during "
2117 "processing of FCP I/O op: "
2118 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
2119 cmnd->device->id,
2120 cmnd->device->lun, cmnd->cmnd[0],
2121 fcprsp->rspInfo3);
2122 host_status = DID_ERROR;
2123 goto out;
2124 }
2093 } 2125 }
2094 2126
2095 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 2127 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 35e3b96d4e07..049fb9a17b3f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/slab.h>
26 27
27#include <scsi/scsi.h> 28#include <scsi/scsi.h>
28#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
@@ -494,7 +495,7 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
494 * 495 *
495 * Returns sglq ponter = success, NULL = Failure. 496 * Returns sglq ponter = success, NULL = Failure.
496 **/ 497 **/
497static struct lpfc_sglq * 498struct lpfc_sglq *
498__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 499__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
499{ 500{
500 uint16_t adj_xri; 501 uint16_t adj_xri;
@@ -526,6 +527,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba)
526 return NULL; 527 return NULL;
527 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; 528 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
528 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 529 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
530 sglq->state = SGL_ALLOCATED;
529 return sglq; 531 return sglq;
530} 532}
531 533
@@ -580,15 +582,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
580 else 582 else
581 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 583 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
582 if (sglq) { 584 if (sglq) {
583 if (iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) { 585 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
586 (sglq->state != SGL_XRI_ABORTED)) {
584 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 587 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
585 iflag); 588 iflag);
586 list_add(&sglq->list, 589 list_add(&sglq->list,
587 &phba->sli4_hba.lpfc_abts_els_sgl_list); 590 &phba->sli4_hba.lpfc_abts_els_sgl_list);
588 spin_unlock_irqrestore( 591 spin_unlock_irqrestore(
589 &phba->sli4_hba.abts_sgl_list_lock, iflag); 592 &phba->sli4_hba.abts_sgl_list_lock, iflag);
590 } else 593 } else {
594 sglq->state = SGL_FREED;
591 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); 595 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
596 }
592 } 597 }
593 598
594 599
@@ -2258,41 +2263,56 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2258 spin_unlock_irqrestore(&phba->hbalock, 2263 spin_unlock_irqrestore(&phba->hbalock,
2259 iflag); 2264 iflag);
2260 } 2265 }
2261 if ((phba->sli_rev == LPFC_SLI_REV4) && 2266 if (phba->sli_rev == LPFC_SLI_REV4) {
2262 (saveq->iocb_flag & LPFC_EXCHANGE_BUSY)) { 2267 if (saveq->iocb_flag &
2263 /* Set cmdiocb flag for the exchange 2268 LPFC_EXCHANGE_BUSY) {
2264 * busy so sgl (xri) will not be 2269 /* Set cmdiocb flag for the
2265 * released until the abort xri is 2270 * exchange busy so sgl (xri)
2266 * received from hba, clear the 2271 * will not be released until
2267 * LPFC_DRIVER_ABORTED bit in case 2272 * the abort xri is received
2268 * it was driver initiated abort. 2273 * from hba.
2269 */ 2274 */
2270 spin_lock_irqsave(&phba->hbalock, 2275 spin_lock_irqsave(
2271 iflag); 2276 &phba->hbalock, iflag);
2272 cmdiocbp->iocb_flag &= 2277 cmdiocbp->iocb_flag |=
2273 ~LPFC_DRIVER_ABORTED; 2278 LPFC_EXCHANGE_BUSY;
2274 cmdiocbp->iocb_flag |= 2279 spin_unlock_irqrestore(
2275 LPFC_EXCHANGE_BUSY; 2280 &phba->hbalock, iflag);
2276 spin_unlock_irqrestore(&phba->hbalock, 2281 }
2277 iflag); 2282 if (cmdiocbp->iocb_flag &
2278 cmdiocbp->iocb.ulpStatus = 2283 LPFC_DRIVER_ABORTED) {
2279 IOSTAT_LOCAL_REJECT; 2284 /*
2280 cmdiocbp->iocb.un.ulpWord[4] = 2285 * Clear LPFC_DRIVER_ABORTED
2281 IOERR_ABORT_REQUESTED; 2286 * bit in case it was driver
2282 /* 2287 * initiated abort.
2283 * For SLI4, irsiocb contains NO_XRI 2288 */
2284 * in sli_xritag, it shall not affect 2289 spin_lock_irqsave(
2285 * releasing sgl (xri) process. 2290 &phba->hbalock, iflag);
2286 */ 2291 cmdiocbp->iocb_flag &=
2287 saveq->iocb.ulpStatus = 2292 ~LPFC_DRIVER_ABORTED;
2288 IOSTAT_LOCAL_REJECT; 2293 spin_unlock_irqrestore(
2289 saveq->iocb.un.ulpWord[4] = 2294 &phba->hbalock, iflag);
2290 IOERR_SLI_ABORTED; 2295 cmdiocbp->iocb.ulpStatus =
2291 spin_lock_irqsave(&phba->hbalock, 2296 IOSTAT_LOCAL_REJECT;
2292 iflag); 2297 cmdiocbp->iocb.un.ulpWord[4] =
2293 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2298 IOERR_ABORT_REQUESTED;
2294 spin_unlock_irqrestore(&phba->hbalock, 2299 /*
2295 iflag); 2300 * For SLI4, irsiocb contains
2301 * NO_XRI in sli_xritag, it
2302 * shall not affect releasing
2303 * sgl (xri) process.
2304 */
2305 saveq->iocb.ulpStatus =
2306 IOSTAT_LOCAL_REJECT;
2307 saveq->iocb.un.ulpWord[4] =
2308 IOERR_SLI_ABORTED;
2309 spin_lock_irqsave(
2310 &phba->hbalock, iflag);
2311 saveq->iocb_flag |=
2312 LPFC_DELAY_MEM_FREE;
2313 spin_unlock_irqrestore(
2314 &phba->hbalock, iflag);
2315 }
2296 } 2316 }
2297 } 2317 }
2298 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2318 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@@ -2515,14 +2535,16 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2515 2535
2516 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2536 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2517 &rspiocbq); 2537 &rspiocbq);
2518 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 2538 if (unlikely(!cmdiocbq))
2519 spin_unlock_irqrestore(&phba->hbalock, 2539 break;
2520 iflag); 2540 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
2521 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2541 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
2522 &rspiocbq); 2542 if (cmdiocbq->iocb_cmpl) {
2523 spin_lock_irqsave(&phba->hbalock, 2543 spin_unlock_irqrestore(&phba->hbalock, iflag);
2524 iflag); 2544 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2525 } 2545 &rspiocbq);
2546 spin_lock_irqsave(&phba->hbalock, iflag);
2547 }
2526 break; 2548 break;
2527 case LPFC_UNSOL_IOCB: 2549 case LPFC_UNSOL_IOCB:
2528 spin_unlock_irqrestore(&phba->hbalock, iflag); 2550 spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -3091,6 +3113,12 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3091 3113
3092 /* Check to see if any errors occurred during init */ 3114 /* Check to see if any errors occurred during init */
3093 if ((status & HS_FFERM) || (i >= 20)) { 3115 if ((status & HS_FFERM) || (i >= 20)) {
3116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3117 "2751 Adapter failed to restart, "
3118 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3119 status,
3120 readl(phba->MBslimaddr + 0xa8),
3121 readl(phba->MBslimaddr + 0xac));
3094 phba->link_state = LPFC_HBA_ERROR; 3122 phba->link_state = LPFC_HBA_ERROR;
3095 retval = 1; 3123 retval = 1;
3096 } 3124 }
@@ -3278,6 +3306,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
3278 if (retval != MBX_SUCCESS) { 3306 if (retval != MBX_SUCCESS) {
3279 if (retval != MBX_BUSY) 3307 if (retval != MBX_BUSY)
3280 mempool_free(pmb, phba->mbox_mem_pool); 3308 mempool_free(pmb, phba->mbox_mem_pool);
3309 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3310 "2752 KILL_BOARD command failed retval %d\n",
3311 retval);
3281 spin_lock_irq(&phba->hbalock); 3312 spin_lock_irq(&phba->hbalock);
3282 phba->link_flag &= ~LS_IGNORE_ERATT; 3313 phba->link_flag &= ~LS_IGNORE_ERATT;
3283 spin_unlock_irq(&phba->hbalock); 3314 spin_unlock_irq(&phba->hbalock);
@@ -4035,7 +4066,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
4035 4066
4036lpfc_sli_hba_setup_error: 4067lpfc_sli_hba_setup_error:
4037 phba->link_state = LPFC_HBA_ERROR; 4068 phba->link_state = LPFC_HBA_ERROR;
4038 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4039 "0445 Firmware initialization failed\n"); 4070 "0445 Firmware initialization failed\n");
4040 return rc; 4071 return rc;
4041} 4072}
@@ -4388,7 +4419,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4388 spin_unlock_irq(&phba->hbalock); 4419 spin_unlock_irq(&phba->hbalock);
4389 4420
4390 /* Read the port's service parameters. */ 4421 /* Read the port's service parameters. */
4391 lpfc_read_sparam(phba, mboxq, vport->vpi); 4422 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
4423 if (rc) {
4424 phba->link_state = LPFC_HBA_ERROR;
4425 rc = -ENOMEM;
4426 goto out_free_vpd;
4427 }
4428
4392 mboxq->vport = vport; 4429 mboxq->vport = vport;
4393 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4430 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4394 mp = (struct lpfc_dmabuf *) mboxq->context1; 4431 mp = (struct lpfc_dmabuf *) mboxq->context1;
@@ -4483,6 +4520,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4483 /* Post receive buffers to the device */ 4520 /* Post receive buffers to the device */
4484 lpfc_sli4_rb_setup(phba); 4521 lpfc_sli4_rb_setup(phba);
4485 4522
4523 /* Reset HBA FCF states after HBA reset */
4524 phba->fcf.fcf_flag = 0;
4525 phba->fcf.current_rec.flag = 0;
4526
4486 /* Start the ELS watchdog timer */ 4527 /* Start the ELS watchdog timer */
4487 mod_timer(&vport->els_tmofunc, 4528 mod_timer(&vport->els_tmofunc,
4488 jiffies + HZ * (phba->fc_ratov * 2)); 4529 jiffies + HZ * (phba->fc_ratov * 2));
@@ -7436,6 +7477,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7436{ 7477{
7437 wait_queue_head_t *pdone_q; 7478 wait_queue_head_t *pdone_q;
7438 unsigned long iflags; 7479 unsigned long iflags;
7480 struct lpfc_scsi_buf *lpfc_cmd;
7439 7481
7440 spin_lock_irqsave(&phba->hbalock, iflags); 7482 spin_lock_irqsave(&phba->hbalock, iflags);
7441 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 7483 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
@@ -7443,6 +7485,14 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7443 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 7485 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
7444 &rspiocbq->iocb, sizeof(IOCB_t)); 7486 &rspiocbq->iocb, sizeof(IOCB_t));
7445 7487
7488 /* Set the exchange busy flag for task management commands */
7489 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
7490 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
7491 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
7492 cur_iocbq);
7493 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
7494 }
7495
7446 pdone_q = cmdiocbq->context_un.wait_queue; 7496 pdone_q = cmdiocbq->context_un.wait_queue;
7447 if (pdone_q) 7497 if (pdone_q)
7448 wake_up(pdone_q); 7498 wake_up(pdone_q);
@@ -9061,6 +9111,12 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
9061 /* Fake the irspiocb and copy necessary response information */ 9111 /* Fake the irspiocb and copy necessary response information */
9062 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 9112 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
9063 9113
9114 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
9115 spin_lock_irqsave(&phba->hbalock, iflags);
9116 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
9117 spin_unlock_irqrestore(&phba->hbalock, iflags);
9118 }
9119
9064 /* Pass the cmd_iocb and the rsp state to the upper layer */ 9120 /* Pass the cmd_iocb and the rsp state to the upper layer */
9065 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 9121 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
9066} 9122}
@@ -11941,15 +11997,19 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11941} 11997}
11942 11998
11943/** 11999/**
11944 * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record. 12000 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
11945 * @phba: pointer to lpfc hba data structure. 12001 * @phba: pointer to lpfc hba data structure.
11946 * @fcf_index: FCF table entry offset. 12002 * @fcf_index: FCF table entry offset.
11947 * 12003 *
11948 * This routine is invoked to read up to @fcf_num of FCF record from the 12004 * This routine is invoked to scan the entire FCF table by reading FCF
11949 * device starting with the given @fcf_index. 12005 * record and processing it one at a time starting from the @fcf_index
12006 * for initial FCF discovery or fast FCF failover rediscovery.
12007 *
12008 * Return 0 if the mailbox command is submitted sucessfully, none 0
12009 * otherwise.
11950 **/ 12010 **/
11951int 12011int
11952lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) 12012lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
11953{ 12013{
11954 int rc = 0, error; 12014 int rc = 0, error;
11955 LPFC_MBOXQ_t *mboxq; 12015 LPFC_MBOXQ_t *mboxq;
@@ -11961,17 +12021,17 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11961 "2000 Failed to allocate mbox for " 12021 "2000 Failed to allocate mbox for "
11962 "READ_FCF cmd\n"); 12022 "READ_FCF cmd\n");
11963 error = -ENOMEM; 12023 error = -ENOMEM;
11964 goto fail_fcfscan; 12024 goto fail_fcf_scan;
11965 } 12025 }
11966 /* Construct the read FCF record mailbox command */ 12026 /* Construct the read FCF record mailbox command */
11967 rc = lpfc_sli4_mbx_read_fcf_record(phba, mboxq, fcf_index); 12027 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
11968 if (rc) { 12028 if (rc) {
11969 error = -EINVAL; 12029 error = -EINVAL;
11970 goto fail_fcfscan; 12030 goto fail_fcf_scan;
11971 } 12031 }
11972 /* Issue the mailbox command asynchronously */ 12032 /* Issue the mailbox command asynchronously */
11973 mboxq->vport = phba->pport; 12033 mboxq->vport = phba->pport;
11974 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; 12034 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
11975 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12035 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11976 if (rc == MBX_NOT_FINISHED) 12036 if (rc == MBX_NOT_FINISHED)
11977 error = -EIO; 12037 error = -EIO;
@@ -11979,9 +12039,13 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11979 spin_lock_irq(&phba->hbalock); 12039 spin_lock_irq(&phba->hbalock);
11980 phba->hba_flag |= FCF_DISC_INPROGRESS; 12040 phba->hba_flag |= FCF_DISC_INPROGRESS;
11981 spin_unlock_irq(&phba->hbalock); 12041 spin_unlock_irq(&phba->hbalock);
12042 /* Reset FCF round robin index bmask for new scan */
12043 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
12044 memset(phba->fcf.fcf_rr_bmask, 0,
12045 sizeof(*phba->fcf.fcf_rr_bmask));
11982 error = 0; 12046 error = 0;
11983 } 12047 }
11984fail_fcfscan: 12048fail_fcf_scan:
11985 if (error) { 12049 if (error) {
11986 if (mboxq) 12050 if (mboxq)
11987 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12051 lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -11994,6 +12058,181 @@ fail_fcfscan:
11994} 12058}
11995 12059
11996/** 12060/**
12061 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf.
12062 * @phba: pointer to lpfc hba data structure.
12063 * @fcf_index: FCF table entry offset.
12064 *
12065 * This routine is invoked to read an FCF record indicated by @fcf_index
12066 * and to use it for FLOGI round robin FCF failover.
12067 *
12068 * Return 0 if the mailbox command is submitted sucessfully, none 0
12069 * otherwise.
12070 **/
12071int
12072lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12073{
12074 int rc = 0, error;
12075 LPFC_MBOXQ_t *mboxq;
12076
12077 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12078 if (!mboxq) {
12079 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
12080 "2763 Failed to allocate mbox for "
12081 "READ_FCF cmd\n");
12082 error = -ENOMEM;
12083 goto fail_fcf_read;
12084 }
12085 /* Construct the read FCF record mailbox command */
12086 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12087 if (rc) {
12088 error = -EINVAL;
12089 goto fail_fcf_read;
12090 }
12091 /* Issue the mailbox command asynchronously */
12092 mboxq->vport = phba->pport;
12093 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
12094 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12095 if (rc == MBX_NOT_FINISHED)
12096 error = -EIO;
12097 else
12098 error = 0;
12099
12100fail_fcf_read:
12101 if (error && mboxq)
12102 lpfc_sli4_mbox_cmd_free(phba, mboxq);
12103 return error;
12104}
12105
12106/**
12107 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
12108 * @phba: pointer to lpfc hba data structure.
12109 * @fcf_index: FCF table entry offset.
12110 *
12111 * This routine is invoked to read an FCF record indicated by @fcf_index to
12112 * determine whether it's eligible for FLOGI round robin failover list.
12113 *
12114 * Return 0 if the mailbox command is submitted sucessfully, none 0
12115 * otherwise.
12116 **/
12117int
12118lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12119{
12120 int rc = 0, error;
12121 LPFC_MBOXQ_t *mboxq;
12122
12123 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12124 if (!mboxq) {
12125 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
12126 "2758 Failed to allocate mbox for "
12127 "READ_FCF cmd\n");
12128 error = -ENOMEM;
12129 goto fail_fcf_read;
12130 }
12131 /* Construct the read FCF record mailbox command */
12132 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12133 if (rc) {
12134 error = -EINVAL;
12135 goto fail_fcf_read;
12136 }
12137 /* Issue the mailbox command asynchronously */
12138 mboxq->vport = phba->pport;
12139 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
12140 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12141 if (rc == MBX_NOT_FINISHED)
12142 error = -EIO;
12143 else
12144 error = 0;
12145
12146fail_fcf_read:
12147 if (error && mboxq)
12148 lpfc_sli4_mbox_cmd_free(phba, mboxq);
12149 return error;
12150}
12151
12152/**
12153 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
12154 * @phba: pointer to lpfc hba data structure.
12155 *
12156 * This routine is to get the next eligible FCF record index in a round
12157 * robin fashion. If the next eligible FCF record index equals to the
12158 * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
12159 * shall be returned, otherwise, the next eligible FCF record's index
12160 * shall be returned.
12161 **/
12162uint16_t
12163lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
12164{
12165 uint16_t next_fcf_index;
12166
12167 /* Search from the currently registered FCF index */
12168 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12169 LPFC_SLI4_FCF_TBL_INDX_MAX,
12170 phba->fcf.current_rec.fcf_indx);
12171 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
12172 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
12173 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12174 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
12175 /* Round robin failover stop condition */
12176 if (next_fcf_index == phba->fcf.fcf_rr_init_indx)
12177 return LPFC_FCOE_FCF_NEXT_NONE;
12178
12179 return next_fcf_index;
12180}
12181
12182/**
12183 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
12184 * @phba: pointer to lpfc hba data structure.
12185 *
12186 * This routine sets the FCF record index in to the eligible bmask for
12187 * round robin failover search. It checks to make sure that the index
12188 * does not go beyond the range of the driver allocated bmask dimension
12189 * before setting the bit.
12190 *
12191 * Returns 0 if the index bit successfully set, otherwise, it returns
12192 * -EINVAL.
12193 **/
12194int
12195lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
12196{
12197 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12198 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12199 "2610 HBA FCF index reached driver's "
12200 "book keeping dimension: fcf_index:%d, "
12201 "driver_bmask_max:%d\n",
12202 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
12203 return -EINVAL;
12204 }
12205 /* Set the eligible FCF record index bmask */
12206 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12207
12208 return 0;
12209}
12210
12211/**
12212 * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index
12213 * @phba: pointer to lpfc hba data structure.
12214 *
12215 * This routine clears the FCF record index from the eligible bmask for
12216 * round robin failover search. It checks to make sure that the index
12217 * does not go beyond the range of the driver allocated bmask dimension
12218 * before clearing the bit.
12219 **/
12220void
12221lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
12222{
12223 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12224 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12225 "2762 HBA FCF index goes beyond driver's "
12226 "book keeping dimension: fcf_index:%d, "
12227 "driver_bmask_max:%d\n",
12228 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
12229 return;
12230 }
12231 /* Clear the eligible FCF record index bmask */
12232 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12233}
12234
12235/**
11997 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 12236 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
11998 * @phba: pointer to lpfc hba data structure. 12237 * @phba: pointer to lpfc hba data structure.
11999 * 12238 *
@@ -12014,21 +12253,40 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
12014 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 12253 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
12015 &redisc_fcf->header.cfg_shdr.response); 12254 &redisc_fcf->header.cfg_shdr.response);
12016 if (shdr_status || shdr_add_status) { 12255 if (shdr_status || shdr_add_status) {
12017 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12256 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12018 "2746 Requesting for FCF rediscovery failed " 12257 "2746 Requesting for FCF rediscovery failed "
12019 "status x%x add_status x%x\n", 12258 "status x%x add_status x%x\n",
12020 shdr_status, shdr_add_status); 12259 shdr_status, shdr_add_status);
12021 /* 12260 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
12022 * Request failed, last resort to re-try current 12261 spin_lock_irq(&phba->hbalock);
12023 * registered FCF entry 12262 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
12024 */ 12263 spin_unlock_irq(&phba->hbalock);
12025 lpfc_retry_pport_discovery(phba); 12264 /*
12026 } else 12265 * CVL event triggered FCF rediscover request failed,
12266 * last resort to re-try current registered FCF entry.
12267 */
12268 lpfc_retry_pport_discovery(phba);
12269 } else {
12270 spin_lock_irq(&phba->hbalock);
12271 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
12272 spin_unlock_irq(&phba->hbalock);
12273 /*
12274 * DEAD FCF event triggered FCF rediscover request
12275 * failed, last resort to fail over as a link down
12276 * to FCF registration.
12277 */
12278 lpfc_sli4_fcf_dead_failthrough(phba);
12279 }
12280 } else {
12281 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12282 "2775 Start FCF rediscovery quiescent period "
12283 "wait timer before scaning FCF table\n");
12027 /* 12284 /*
12028 * Start FCF rediscovery wait timer for pending FCF 12285 * Start FCF rediscovery wait timer for pending FCF
12029 * before rescan FCF record table. 12286 * before rescan FCF record table.
12030 */ 12287 */
12031 lpfc_fcf_redisc_wait_start_timer(phba); 12288 lpfc_fcf_redisc_wait_start_timer(phba);
12289 }
12032 12290
12033 mempool_free(mbox, phba->mbox_mem_pool); 12291 mempool_free(mbox, phba->mbox_mem_pool);
12034} 12292}
@@ -12047,6 +12305,9 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
12047 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 12305 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
12048 int rc, length; 12306 int rc, length;
12049 12307
12308 /* Cancel retry delay timers to all vports before FCF rediscover */
12309 lpfc_cancel_all_vport_retry_delay_timer(phba);
12310
12050 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12311 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12051 if (!mbox) { 12312 if (!mbox) {
12052 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12078,6 +12339,31 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
12078} 12339}
12079 12340
12080/** 12341/**
12342 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
12343 * @phba: pointer to lpfc hba data structure.
12344 *
12345 * This function is the failover routine as a last resort to the FCF DEAD
12346 * event when driver failed to perform fast FCF failover.
12347 **/
12348void
12349lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
12350{
12351 uint32_t link_state;
12352
12353 /*
12354 * Last resort as FCF DEAD event failover will treat this as
12355 * a link down, but save the link state because we don't want
12356 * it to be changed to Link Down unless it is already down.
12357 */
12358 link_state = phba->link_state;
12359 lpfc_linkdown(phba);
12360 phba->link_state = link_state;
12361
12362 /* Unregister FCF if no devices connected to it */
12363 lpfc_unregister_unused_fcf(phba);
12364}
12365
12366/**
12081 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 12367 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
12082 * @phba: pointer to lpfc hba data structure. 12368 * @phba: pointer to lpfc hba data structure.
12083 * 12369 *
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index dfcf5437d1f5..b4a639c47616 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -62,6 +62,7 @@ struct lpfc_iocbq {
62#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ 62#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
63#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */ 63#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */
64#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */ 64#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
65#define DSS_SECURITY_OP 0x100 /* security IO */
65 66
66#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ 67#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
67#define LPFC_FIP_ELS_ID_SHIFT 14 68#define LPFC_FIP_ELS_ID_SHIFT 14
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 86308836600f..4a35e7b9bc5b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -153,15 +153,27 @@ struct lpfc_fcf {
153#define FCF_REGISTERED 0x02 /* FCF registered with FW */ 153#define FCF_REGISTERED 0x02 /* FCF registered with FW */
154#define FCF_SCAN_DONE 0x04 /* FCF table scan done */ 154#define FCF_SCAN_DONE 0x04 /* FCF table scan done */
155#define FCF_IN_USE 0x08 /* Atleast one discovery completed */ 155#define FCF_IN_USE 0x08 /* Atleast one discovery completed */
156#define FCF_REDISC_PEND 0x10 /* FCF rediscovery pending */ 156#define FCF_INIT_DISC 0x10 /* Initial FCF discovery */
157#define FCF_REDISC_EVT 0x20 /* FCF rediscovery event to worker thread */ 157#define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */
158#define FCF_REDISC_FOV 0x40 /* Post FCF rediscovery fast failover */ 158#define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */
159#define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC)
160#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
161#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
162#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
159 uint32_t addr_mode; 163 uint32_t addr_mode;
164 uint16_t fcf_rr_init_indx;
160 struct lpfc_fcf_rec current_rec; 165 struct lpfc_fcf_rec current_rec;
161 struct lpfc_fcf_rec failover_rec; 166 struct lpfc_fcf_rec failover_rec;
162 struct timer_list redisc_wait; 167 struct timer_list redisc_wait;
168 unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
163}; 169};
164 170
171/*
172 * Maximum FCF table index, it is for driver internal book keeping, it
173 * just needs to be no less than the supported HBA's FCF table size.
174 */
175#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
176
165#define LPFC_REGION23_SIGNATURE "RG23" 177#define LPFC_REGION23_SIGNATURE "RG23"
166#define LPFC_REGION23_VERSION 1 178#define LPFC_REGION23_VERSION 1
167#define LPFC_REGION23_LAST_REC 0xff 179#define LPFC_REGION23_LAST_REC 0xff
@@ -431,11 +443,18 @@ enum lpfc_sge_type {
431 SCSI_BUFF_TYPE 443 SCSI_BUFF_TYPE
432}; 444};
433 445
446enum lpfc_sgl_state {
447 SGL_FREED,
448 SGL_ALLOCATED,
449 SGL_XRI_ABORTED
450};
451
434struct lpfc_sglq { 452struct lpfc_sglq {
435 /* lpfc_sglqs are used in double linked lists */ 453 /* lpfc_sglqs are used in double linked lists */
436 struct list_head list; 454 struct list_head list;
437 struct list_head clist; 455 struct list_head clist;
438 enum lpfc_sge_type buff_type; /* is this a scsi sgl */ 456 enum lpfc_sge_type buff_type; /* is this a scsi sgl */
457 enum lpfc_sgl_state state;
439 uint16_t iotag; /* pre-assigned IO tag */ 458 uint16_t iotag; /* pre-assigned IO tag */
440 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 459 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
441 struct sli4_sge *sgl; /* pre-assigned SGL */ 460 struct sli4_sge *sgl; /* pre-assigned SGL */
@@ -463,8 +482,8 @@ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
463void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); 482void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
464void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, 483void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
465 struct lpfc_mbx_sge *); 484 struct lpfc_mbx_sge *);
466int lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *, struct lpfcMboxq *, 485int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
467 uint16_t); 486 uint16_t);
468 487
469void lpfc_sli4_hba_reset(struct lpfc_hba *); 488void lpfc_sli4_hba_reset(struct lpfc_hba *);
470struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, 489struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
@@ -523,8 +542,13 @@ int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
523uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); 542uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
524uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); 543uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
525void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); 544void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
526int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t); 545int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
527void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *); 546int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
547int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t);
548void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
549void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
550void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
551int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
528int lpfc_sli4_post_status_check(struct lpfc_hba *); 552int lpfc_sli4_post_status_check(struct lpfc_hba *);
529uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); 553uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
530 554
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ac276aa46fba..013deec5dae8 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.9" 21#define LPFC_DRIVER_VERSION "8.3.10"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index dc86e873102a..ffd575c379f3 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -26,6 +26,7 @@
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/kthread.h> 27#include <linux/kthread.h>
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/slab.h>
29#include <linux/spinlock.h> 30#include <linux/spinlock.h>
30 31
31#include <scsi/scsi.h> 32#include <scsi/scsi.h>
@@ -123,7 +124,12 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
123 } 124 }
124 mb = &pmb->u.mb; 125 mb = &pmb->u.mb;
125 126
126 lpfc_read_sparam(phba, pmb, vport->vpi); 127 rc = lpfc_read_sparam(phba, pmb, vport->vpi);
128 if (rc) {
129 mempool_free(pmb, phba->mbox_mem_pool);
130 return -ENOMEM;
131 }
132
127 /* 133 /*
128 * Grab buffer pointer and clear context1 so we can use 134 * Grab buffer pointer and clear context1 so we can use
129 * lpfc_sli_issue_box_wait 135 * lpfc_sli_issue_box_wait