aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c262
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c53
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c76
-rwxr-xr-x[-rw-r--r--]drivers/scsi/lpfc/lpfc_hbadisc.c129
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h34
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h131
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c515
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c394
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1240
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h27
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h34
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
20 files changed, 1999 insertions, 995 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index aa10f7951634..1cc23a69db5e 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -109,7 +109,8 @@ struct hbq_dmabuf {
109 struct lpfc_dmabuf dbuf; 109 struct lpfc_dmabuf dbuf;
110 uint32_t size; 110 uint32_t size;
111 uint32_t tag; 111 uint32_t tag;
112 struct lpfc_rcqe rcqe; 112 struct lpfc_cq_event cq_event;
113 unsigned long time_stamp;
113}; 114};
114 115
115/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ 116/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@@ -201,6 +202,7 @@ struct lpfc_stats {
201 uint32_t elsRcvLIRR; 202 uint32_t elsRcvLIRR;
202 uint32_t elsRcvRPS; 203 uint32_t elsRcvRPS;
203 uint32_t elsRcvRPL; 204 uint32_t elsRcvRPL;
205 uint32_t elsRcvRRQ;
204 uint32_t elsXmitFLOGI; 206 uint32_t elsXmitFLOGI;
205 uint32_t elsXmitFDISC; 207 uint32_t elsXmitFDISC;
206 uint32_t elsXmitPLOGI; 208 uint32_t elsXmitPLOGI;
@@ -289,8 +291,8 @@ struct lpfc_vport {
289 291
290 uint16_t vpi; 292 uint16_t vpi;
291 uint16_t vfi; 293 uint16_t vfi;
292 uint8_t vfi_state; 294 uint8_t vpi_state;
293#define LPFC_VFI_REGISTERED 0x1 295#define LPFC_VPI_REGISTERED 0x1
294 296
295 uint32_t fc_flag; /* FC flags */ 297 uint32_t fc_flag; /* FC flags */
296/* Several of these flags are HBA centric and should be moved to 298/* Several of these flags are HBA centric and should be moved to
@@ -405,6 +407,7 @@ struct lpfc_vport {
405 uint8_t stat_data_enabled; 407 uint8_t stat_data_enabled;
406 uint8_t stat_data_blocked; 408 uint8_t stat_data_blocked;
407 struct list_head rcv_buffer_list; 409 struct list_head rcv_buffer_list;
410 unsigned long rcv_buffer_time_stamp;
408 uint32_t vport_flag; 411 uint32_t vport_flag;
409#define STATIC_VPORT 1 412#define STATIC_VPORT 1
410}; 413};
@@ -527,13 +530,16 @@ struct lpfc_hba {
527#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 530#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
528#define DEFER_ERATT 0x2 /* Deferred error attention in progress */ 531#define DEFER_ERATT 0x2 /* Deferred error attention in progress */
529#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */ 532#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */
530#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */ 533#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
531#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ 534#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
532#define FCP_XRI_ABORT_EVENT 0x20 535#define FCP_XRI_ABORT_EVENT 0x20
533#define ELS_XRI_ABORT_EVENT 0x40 536#define ELS_XRI_ABORT_EVENT 0x40
534#define ASYNC_EVENT 0x80 537#define ASYNC_EVENT 0x80
535#define LINK_DISABLED 0x100 /* Link disabled by user */ 538#define LINK_DISABLED 0x100 /* Link disabled by user */
536#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */ 539#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */
540#define HBA_FIP_SUPPORT 0x400 /* FIP support in HBA */
541#define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */
542 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
537 struct lpfc_dmabuf slim2p; 543 struct lpfc_dmabuf slim2p;
538 544
539 MAILBOX_t *mbox; 545 MAILBOX_t *mbox;
@@ -551,6 +557,7 @@ struct lpfc_hba {
551 uint8_t fc_linkspeed; /* Link speed after last READ_LA */ 557 uint8_t fc_linkspeed; /* Link speed after last READ_LA */
552 558
553 uint32_t fc_eventTag; /* event tag for link attention */ 559 uint32_t fc_eventTag; /* event tag for link attention */
560 uint32_t link_events;
554 561
555 /* These fields used to be binfo */ 562 /* These fields used to be binfo */
556 uint32_t fc_pref_DID; /* preferred D_ID */ 563 uint32_t fc_pref_DID; /* preferred D_ID */
@@ -604,8 +611,8 @@ struct lpfc_hba {
604 uint32_t cfg_enable_hba_reset; 611 uint32_t cfg_enable_hba_reset;
605 uint32_t cfg_enable_hba_heartbeat; 612 uint32_t cfg_enable_hba_heartbeat;
606 uint32_t cfg_enable_bg; 613 uint32_t cfg_enable_bg;
607 uint32_t cfg_enable_fip;
608 uint32_t cfg_log_verbose; 614 uint32_t cfg_log_verbose;
615 uint32_t cfg_aer_support;
609 616
610 lpfc_vpd_t vpd; /* vital product data */ 617 lpfc_vpd_t vpd; /* vital product data */
611 618
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 9bd19aa14249..91542f786edf 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -23,12 +23,14 @@
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/aer.h>
26 27
27#include <scsi/scsi.h> 28#include <scsi/scsi.h>
28#include <scsi/scsi_device.h> 29#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
30#include <scsi/scsi_tcq.h> 31#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_transport_fc.h> 32#include <scsi/scsi_transport_fc.h>
33#include <scsi/fc/fc_fs.h>
32 34
33#include "lpfc_hw4.h" 35#include "lpfc_hw4.h"
34#include "lpfc_hw.h" 36#include "lpfc_hw.h"
@@ -98,6 +100,28 @@ lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
98 return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n"); 100 return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
99} 101}
100 102
103/**
104 * lpfc_enable_fip_show - Return the fip mode of the HBA
105 * @dev: class unused variable.
106 * @attr: device attribute, not used.
107 * @buf: on return contains the module description text.
108 *
109 * Returns: size of formatted string.
110 **/
111static ssize_t
112lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
113 char *buf)
114{
115 struct Scsi_Host *shost = class_to_shost(dev);
116 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
117 struct lpfc_hba *phba = vport->phba;
118
119 if (phba->hba_flag & HBA_FIP_SUPPORT)
120 return snprintf(buf, PAGE_SIZE, "1\n");
121 else
122 return snprintf(buf, PAGE_SIZE, "0\n");
123}
124
101static ssize_t 125static ssize_t
102lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, 126lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
103 char *buf) 127 char *buf)
@@ -762,9 +786,15 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
762 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) 786 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
763 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 787 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
764 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0) 788 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
765 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START); 789 if (phba->sli_rev == LPFC_SLI_REV4)
790 return -EINVAL;
791 else
792 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
766 else if (strncmp(buf, "error", sizeof("error") - 1) == 0) 793 else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
767 status = lpfc_do_offline(phba, LPFC_EVT_KILL); 794 if (phba->sli_rev == LPFC_SLI_REV4)
795 return -EINVAL;
796 else
797 status = lpfc_do_offline(phba, LPFC_EVT_KILL);
768 else 798 else
769 return -EINVAL; 799 return -EINVAL;
770 800
@@ -1126,6 +1156,9 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
1126 if ((val & 0x3) != val) 1156 if ((val & 0x3) != val)
1127 return -EINVAL; 1157 return -EINVAL;
1128 1158
1159 if (phba->sli_rev == LPFC_SLI_REV4)
1160 val = 0;
1161
1129 spin_lock_irq(&phba->hbalock); 1162 spin_lock_irq(&phba->hbalock);
1130 1163
1131 old_val = phba->cfg_poll; 1164 old_val = phba->cfg_poll;
@@ -1589,6 +1622,7 @@ static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
1589static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL); 1622static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
1590static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL); 1623static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
1591static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL); 1624static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL);
1625static DEVICE_ATTR(lpfc_enable_fip, S_IRUGO, lpfc_enable_fip_show, NULL);
1592static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, 1626static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
1593 lpfc_board_mode_show, lpfc_board_mode_store); 1627 lpfc_board_mode_show, lpfc_board_mode_store);
1594static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); 1628static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
@@ -2759,6 +2793,196 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
2759 lpfc_link_speed_show, lpfc_link_speed_store); 2793 lpfc_link_speed_show, lpfc_link_speed_store);
2760 2794
2761/* 2795/*
2796# lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
2797# 0 = aer disabled or not supported
2798# 1 = aer supported and enabled (default)
2799# Value range is [0,1]. Default value is 1.
2800*/
2801
2802/**
2803 * lpfc_aer_support_store - Set the adapter for aer support
2804 *
2805 * @dev: class device that is converted into a Scsi_host.
2806 * @attr: device attribute, not used.
2807 * @buf: containing the string "selective".
2808 * @count: unused variable.
2809 *
2810 * Description:
2811 * If the val is 1 and currently the device's AER capability was not
2812 * enabled, invoke the kernel's enable AER helper routine, trying to
2813 * enable the device's AER capability. If the helper routine enabling
2814 * AER returns success, update the device's cfg_aer_support flag to
2815 * indicate AER is supported by the device; otherwise, if the device
2816 * AER capability is already enabled to support AER, then do nothing.
2817 *
2818 * If the val is 0 and currently the device's AER support was enabled,
2819 * invoke the kernel's disable AER helper routine. After that, update
2820 * the device's cfg_aer_support flag to indicate AER is not supported
2821 * by the device; otherwise, if the device AER capability is already
2822 * disabled from supporting AER, then do nothing.
2823 *
2824 * Returns:
2825 * length of the buf on success if val is in range the intended mode
2826 * is supported.
2827 * -EINVAL if val out of range or intended mode is not supported.
2828 **/
2829static ssize_t
2830lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
2831 const char *buf, size_t count)
2832{
2833 struct Scsi_Host *shost = class_to_shost(dev);
2834 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
2835 struct lpfc_hba *phba = vport->phba;
2836 int val = 0, rc = -EINVAL;
2837
2838 /* AER not supported on OC devices yet */
2839 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
2840 return -EPERM;
2841 if (!isdigit(buf[0]))
2842 return -EINVAL;
2843 if (sscanf(buf, "%i", &val) != 1)
2844 return -EINVAL;
2845
2846 switch (val) {
2847 case 0:
2848 if (phba->hba_flag & HBA_AER_ENABLED) {
2849 rc = pci_disable_pcie_error_reporting(phba->pcidev);
2850 if (!rc) {
2851 spin_lock_irq(&phba->hbalock);
2852 phba->hba_flag &= ~HBA_AER_ENABLED;
2853 spin_unlock_irq(&phba->hbalock);
2854 phba->cfg_aer_support = 0;
2855 rc = strlen(buf);
2856 } else
2857 rc = -EPERM;
2858 } else {
2859 phba->cfg_aer_support = 0;
2860 rc = strlen(buf);
2861 }
2862 break;
2863 case 1:
2864 if (!(phba->hba_flag & HBA_AER_ENABLED)) {
2865 rc = pci_enable_pcie_error_reporting(phba->pcidev);
2866 if (!rc) {
2867 spin_lock_irq(&phba->hbalock);
2868 phba->hba_flag |= HBA_AER_ENABLED;
2869 spin_unlock_irq(&phba->hbalock);
2870 phba->cfg_aer_support = 1;
2871 rc = strlen(buf);
2872 } else
2873 rc = -EPERM;
2874 } else {
2875 phba->cfg_aer_support = 1;
2876 rc = strlen(buf);
2877 }
2878 break;
2879 default:
2880 rc = -EINVAL;
2881 break;
2882 }
2883 return rc;
2884}
2885
2886static int lpfc_aer_support = 1;
2887module_param(lpfc_aer_support, int, 1);
2888MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
2889lpfc_param_show(aer_support)
2890
2891/**
2892 * lpfc_aer_support_init - Set the initial adapters aer support flag
2893 * @phba: lpfc_hba pointer.
2894 * @val: link speed value.
2895 *
2896 * Description:
2897 * If val is in a valid range [0,1], then set the adapter's initial
2898 * cfg_aer_support field. It will be up to the driver's probe_one
2899 * routine to determine whether the device's AER support can be set
2900 * or not.
2901 *
2902 * Notes:
2903 * If the value is not in range log a kernel error message, and
2904 * choose the default value of setting AER support and return.
2905 *
2906 * Returns:
2907 * zero if val saved.
2908 * -EINVAL val out of range
2909 **/
2910static int
2911lpfc_aer_support_init(struct lpfc_hba *phba, int val)
2912{
2913 /* AER not supported on OC devices yet */
2914 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
2915 phba->cfg_aer_support = 0;
2916 return -EPERM;
2917 }
2918
2919 if (val == 0 || val == 1) {
2920 phba->cfg_aer_support = val;
2921 return 0;
2922 }
2923 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2924 "2712 lpfc_aer_support attribute value %d out "
2925 "of range, allowed values are 0|1, setting it "
2926 "to default value of 1\n", val);
2927 /* By default, try to enable AER on a device */
2928 phba->cfg_aer_support = 1;
2929 return -EINVAL;
2930}
2931
2932static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
2933 lpfc_aer_support_show, lpfc_aer_support_store);
2934
2935/**
2936 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
2937 * @dev: class device that is converted into a Scsi_host.
2938 * @attr: device attribute, not used.
2939 * @buf: containing the string "selective".
2940 * @count: unused variable.
2941 *
2942 * Description:
2943 * If the @buf contains 1 and the device currently has the AER support
2944 * enabled, then invokes the kernel AER helper routine
2945 * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable
2946 * error status register.
2947 *
2948 * Notes:
2949 *
2950 * Returns:
2951 * -EINVAL if the buf does not contain the 1 or the device is not currently
2952 * enabled with the AER support.
2953 **/
2954static ssize_t
2955lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
2956 const char *buf, size_t count)
2957{
2958 struct Scsi_Host *shost = class_to_shost(dev);
2959 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2960 struct lpfc_hba *phba = vport->phba;
2961 int val, rc = -1;
2962
2963 /* AER not supported on OC devices yet */
2964 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
2965 return -EPERM;
2966 if (!isdigit(buf[0]))
2967 return -EINVAL;
2968 if (sscanf(buf, "%i", &val) != 1)
2969 return -EINVAL;
2970 if (val != 1)
2971 return -EINVAL;
2972
2973 if (phba->hba_flag & HBA_AER_ENABLED)
2974 rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev);
2975
2976 if (rc == 0)
2977 return strlen(buf);
2978 else
2979 return -EPERM;
2980}
2981
2982static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
2983 lpfc_aer_cleanup_state);
2984
2985/*
2762# lpfc_fcp_class: Determines FC class to use for the FCP protocol. 2986# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
2763# Value range is [2,3]. Default value is 3. 2987# Value range is [2,3]. Default value is 3.
2764*/ 2988*/
@@ -2846,7 +3070,7 @@ LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
2846# identifies what rctl value to configure the additional ring for. 3070# identifies what rctl value to configure the additional ring for.
2847# Value range is [1,0xff]. Default value is 4 (Unsolicated Data). 3071# Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
2848*/ 3072*/
2849LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1, 3073LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
2850 255, "Identifies RCTL for additional ring configuration"); 3074 255, "Identifies RCTL for additional ring configuration");
2851 3075
2852/* 3076/*
@@ -2854,7 +3078,7 @@ LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1,
2854# identifies what type value to configure the additional ring for. 3078# identifies what type value to configure the additional ring for.
2855# Value range is [1,0xff]. Default value is 5 (LLC/SNAP). 3079# Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
2856*/ 3080*/
2857LPFC_ATTR_R(multi_ring_type, FC_LLC_SNAP, 1, 3081LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
2858 255, "Identifies TYPE for additional ring configuration"); 3082 255, "Identifies TYPE for additional ring configuration");
2859 3083
2860/* 3084/*
@@ -2947,15 +3171,6 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
2947LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); 3171LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
2948 3172
2949/* 3173/*
2950# lpfc_enable_fip: When set, FIP is required to start discovery. If not
2951# set, the driver will add an FCF record manually if the port has no
2952# FCF records available and start discovery.
2953# Value range is [0,1]. Default value is 1 (enabled)
2954*/
2955LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
2956
2957
2958/*
2959# lpfc_prot_mask: i 3174# lpfc_prot_mask: i
2960# - Bit mask of host protection capabilities used to register with the 3175# - Bit mask of host protection capabilities used to register with the
2961# SCSI mid-layer 3176# SCSI mid-layer
@@ -3013,6 +3228,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3013 &dev_attr_num_discovered_ports, 3228 &dev_attr_num_discovered_ports,
3014 &dev_attr_menlo_mgmt_mode, 3229 &dev_attr_menlo_mgmt_mode,
3015 &dev_attr_lpfc_drvr_version, 3230 &dev_attr_lpfc_drvr_version,
3231 &dev_attr_lpfc_enable_fip,
3016 &dev_attr_lpfc_temp_sensor, 3232 &dev_attr_lpfc_temp_sensor,
3017 &dev_attr_lpfc_log_verbose, 3233 &dev_attr_lpfc_log_verbose,
3018 &dev_attr_lpfc_lun_queue_depth, 3234 &dev_attr_lpfc_lun_queue_depth,
@@ -3020,7 +3236,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
3020 &dev_attr_lpfc_peer_port_login, 3236 &dev_attr_lpfc_peer_port_login,
3021 &dev_attr_lpfc_nodev_tmo, 3237 &dev_attr_lpfc_nodev_tmo,
3022 &dev_attr_lpfc_devloss_tmo, 3238 &dev_attr_lpfc_devloss_tmo,
3023 &dev_attr_lpfc_enable_fip,
3024 &dev_attr_lpfc_fcp_class, 3239 &dev_attr_lpfc_fcp_class,
3025 &dev_attr_lpfc_use_adisc, 3240 &dev_attr_lpfc_use_adisc,
3026 &dev_attr_lpfc_ack0, 3241 &dev_attr_lpfc_ack0,
@@ -3061,6 +3276,8 @@ struct device_attribute *lpfc_hba_attrs[] = {
3061 &dev_attr_lpfc_max_scsicmpl_time, 3276 &dev_attr_lpfc_max_scsicmpl_time,
3062 &dev_attr_lpfc_stat_data_ctrl, 3277 &dev_attr_lpfc_stat_data_ctrl,
3063 &dev_attr_lpfc_prot_sg_seg_cnt, 3278 &dev_attr_lpfc_prot_sg_seg_cnt,
3279 &dev_attr_lpfc_aer_support,
3280 &dev_attr_lpfc_aer_state_cleanup,
3064 NULL, 3281 NULL,
3065}; 3282};
3066 3283
@@ -3073,7 +3290,6 @@ struct device_attribute *lpfc_vport_attrs[] = {
3073 &dev_attr_lpfc_lun_queue_depth, 3290 &dev_attr_lpfc_lun_queue_depth,
3074 &dev_attr_lpfc_nodev_tmo, 3291 &dev_attr_lpfc_nodev_tmo,
3075 &dev_attr_lpfc_devloss_tmo, 3292 &dev_attr_lpfc_devloss_tmo,
3076 &dev_attr_lpfc_enable_fip,
3077 &dev_attr_lpfc_hba_queue_depth, 3293 &dev_attr_lpfc_hba_queue_depth,
3078 &dev_attr_lpfc_peer_port_login, 3294 &dev_attr_lpfc_peer_port_login,
3079 &dev_attr_lpfc_restrict_login, 3295 &dev_attr_lpfc_restrict_login,
@@ -3815,7 +4031,11 @@ lpfc_get_stats(struct Scsi_Host *shost)
3815 hs->invalid_crc_count -= lso->invalid_crc_count; 4031 hs->invalid_crc_count -= lso->invalid_crc_count;
3816 hs->error_frames -= lso->error_frames; 4032 hs->error_frames -= lso->error_frames;
3817 4033
3818 if (phba->fc_topology == TOPOLOGY_LOOP) { 4034 if (phba->hba_flag & HBA_FCOE_SUPPORT) {
4035 hs->lip_count = -1;
4036 hs->nos_count = (phba->link_events >> 1);
4037 hs->nos_count -= lso->link_events;
4038 } else if (phba->fc_topology == TOPOLOGY_LOOP) {
3819 hs->lip_count = (phba->fc_eventTag >> 1); 4039 hs->lip_count = (phba->fc_eventTag >> 1);
3820 hs->lip_count -= lso->link_events; 4040 hs->lip_count -= lso->link_events;
3821 hs->nos_count = -1; 4041 hs->nos_count = -1;
@@ -3906,7 +4126,10 @@ lpfc_reset_stats(struct Scsi_Host *shost)
3906 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; 4126 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
3907 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; 4127 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
3908 lso->error_frames = pmb->un.varRdLnk.crcCnt; 4128 lso->error_frames = pmb->un.varRdLnk.crcCnt;
3909 lso->link_events = (phba->fc_eventTag >> 1); 4129 if (phba->hba_flag & HBA_FCOE_SUPPORT)
4130 lso->link_events = (phba->link_events >> 1);
4131 else
4132 lso->link_events = (phba->fc_eventTag >> 1);
3910 4133
3911 psli->stats_start = get_seconds(); 4134 psli->stats_start = get_seconds();
3912 4135
@@ -4222,14 +4445,17 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4222 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 4445 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
4223 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 4446 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
4224 lpfc_enable_bg_init(phba, lpfc_enable_bg); 4447 lpfc_enable_bg_init(phba, lpfc_enable_bg);
4448 if (phba->sli_rev == LPFC_SLI_REV4)
4449 phba->cfg_poll = 0;
4450 else
4225 phba->cfg_poll = lpfc_poll; 4451 phba->cfg_poll = lpfc_poll;
4226 phba->cfg_soft_wwnn = 0L; 4452 phba->cfg_soft_wwnn = 0L;
4227 phba->cfg_soft_wwpn = 0L; 4453 phba->cfg_soft_wwpn = 0L;
4228 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); 4454 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
4229 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); 4455 lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
4230 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); 4456 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
4231 lpfc_enable_fip_init(phba, lpfc_enable_fip);
4232 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); 4457 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
4458 lpfc_aer_support_init(phba, lpfc_aer_support);
4233 4459
4234 return; 4460 return;
4235} 4461}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index da6bf5aac9dd..a5d9048235d9 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -26,6 +26,7 @@
26#include <scsi/scsi_host.h> 26#include <scsi/scsi_host.h>
27#include <scsi/scsi_transport_fc.h> 27#include <scsi/scsi_transport_fc.h>
28#include <scsi/scsi_bsg_fc.h> 28#include <scsi/scsi_bsg_fc.h>
29#include <scsi/fc/fc_fs.h>
29 30
30#include "lpfc_hw4.h" 31#include "lpfc_hw4.h"
31#include "lpfc_hw.h" 32#include "lpfc_hw.h"
@@ -148,8 +149,8 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job)
148 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 149 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
149 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 150 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
150 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 151 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
151 cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL; 152 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
152 cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; 153 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
153 cmd->ulpBdeCount = 1; 154 cmd->ulpBdeCount = 1;
154 cmd->ulpLe = 1; 155 cmd->ulpLe = 1;
155 cmd->ulpClass = CLASS3; 156 cmd->ulpClass = CLASS3;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 0830f37409a3..650494d622c1 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -49,6 +49,8 @@ void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
49void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); 49void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
50 50
51struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); 51struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
52void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
53void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
52void lpfc_cleanup_rpis(struct lpfc_vport *, int); 54void lpfc_cleanup_rpis(struct lpfc_vport *, int);
53int lpfc_linkdown(struct lpfc_hba *); 55int lpfc_linkdown(struct lpfc_hba *);
54void lpfc_linkdown_port(struct lpfc_vport *); 56void lpfc_linkdown_port(struct lpfc_vport *);
@@ -144,6 +146,8 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *);
144 146
145void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 147void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
146 struct lpfc_iocbq *); 148 struct lpfc_iocbq *);
149void lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
150 struct lpfc_iocbq *);
147int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); 151int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
148int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); 152int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
149void lpfc_fdmi_tmo(unsigned long); 153void lpfc_fdmi_tmo(unsigned long);
@@ -188,7 +192,7 @@ int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
188void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); 192void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
189void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); 193void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
190void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t); 194void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
191void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t); 195void lpfc_unreg_vfi(struct lpfcMboxq *, struct lpfc_vport *);
192void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); 196void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
193void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); 197void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
194void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); 198void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
@@ -212,7 +216,10 @@ void lpfc_stop_vport_timers(struct lpfc_vport *);
212void lpfc_poll_timeout(unsigned long ptr); 216void lpfc_poll_timeout(unsigned long ptr);
213void lpfc_poll_start_timer(struct lpfc_hba *); 217void lpfc_poll_start_timer(struct lpfc_hba *);
214void lpfc_poll_eratt(unsigned long); 218void lpfc_poll_eratt(unsigned long);
215void lpfc_sli_poll_fcp_ring(struct lpfc_hba *); 219int
220lpfc_sli_handle_fast_ring_event(struct lpfc_hba *,
221 struct lpfc_sli_ring *, uint32_t);
222
216struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); 223struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
217void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *); 224void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
218uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); 225uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
@@ -235,7 +242,7 @@ void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
235int lpfc_sli_check_eratt(struct lpfc_hba *); 242int lpfc_sli_check_eratt(struct lpfc_hba *);
236void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, 243void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
237 struct lpfc_sli_ring *, uint32_t); 244 struct lpfc_sli_ring *, uint32_t);
238int lpfc_sli4_handle_received_buffer(struct lpfc_hba *); 245void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *);
239void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 246void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
240int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, 247int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
241 struct lpfc_iocbq *, uint32_t); 248 struct lpfc_iocbq *, uint32_t);
@@ -361,6 +368,7 @@ void lpfc_stop_port(struct lpfc_hba *);
361void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); 368void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
362int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 369int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
363void lpfc_start_fdiscs(struct lpfc_hba *phba); 370void lpfc_start_fdiscs(struct lpfc_hba *phba);
371struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
364 372
365#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 373#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
366#define HBA_EVENT_RSCN 5 374#define HBA_EVENT_RSCN 5
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 9a1bd9534d74..0ebcd9baca79 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -31,6 +31,7 @@
31#include <scsi/scsi_device.h> 31#include <scsi/scsi_device.h>
32#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34#include <scsi/fc/fc_fs.h>
34 35
35#include "lpfc_hw4.h" 36#include "lpfc_hw4.h"
36#include "lpfc_hw.h" 37#include "lpfc_hw.h"
@@ -87,7 +88,6 @@ void
87lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 88lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
88 struct lpfc_iocbq *piocbq) 89 struct lpfc_iocbq *piocbq)
89{ 90{
90
91 struct lpfc_dmabuf *mp = NULL; 91 struct lpfc_dmabuf *mp = NULL;
92 IOCB_t *icmd = &piocbq->iocb; 92 IOCB_t *icmd = &piocbq->iocb;
93 int i; 93 int i;
@@ -160,6 +160,39 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
160 } 160 }
161} 161}
162 162
163/**
164 * lpfc_sli4_ct_abort_unsol_event - Default handle for sli4 unsol abort
165 * @phba: Pointer to HBA context object.
166 * @pring: Pointer to the driver internal I/O ring.
167 * @piocbq: Pointer to the IOCBQ.
168 *
169 * This function serves as the default handler for the sli4 unsolicited
170 * abort event. It shall be invoked when there is no application interface
171 * registered unsolicited abort handler. This handler does nothing but
172 * just simply releases the dma buffer used by the unsol abort event.
173 **/
174void
175lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba,
176 struct lpfc_sli_ring *pring,
177 struct lpfc_iocbq *piocbq)
178{
179 IOCB_t *icmd = &piocbq->iocb;
180 struct lpfc_dmabuf *bdeBuf;
181 uint32_t size;
182
183 /* Forward abort event to any process registered to receive ct event */
184 lpfc_bsg_ct_unsol_event(phba, pring, piocbq);
185
186 /* If there is no BDE associated with IOCB, there is nothing to do */
187 if (icmd->ulpBdeCount == 0)
188 return;
189 bdeBuf = piocbq->context2;
190 piocbq->context2 = NULL;
191 size = icmd->un.cont64[0].tus.f.bdeSize;
192 lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
193 lpfc_in_buf_free(phba, bdeBuf);
194}
195
163static void 196static void
164lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) 197lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
165{ 198{
@@ -304,8 +337,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
304 /* Fill in rest of iocb */ 337 /* Fill in rest of iocb */
305 icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 338 icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
306 icmd->un.genreq64.w5.hcsw.Dfctl = 0; 339 icmd->un.genreq64.w5.hcsw.Dfctl = 0;
307 icmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL; 340 icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
308 icmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; 341 icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
309 342
310 if (!tmo) { 343 if (!tmo) {
311 /* FC spec states we need 3 * ratov for CT requests */ 344 /* FC spec states we need 3 * ratov for CT requests */
@@ -363,9 +396,14 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
363 outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt); 396 outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt);
364 if (!outmp) 397 if (!outmp)
365 return -ENOMEM; 398 return -ENOMEM;
366 399 /*
400 * Form the CT IOCB. The total number of BDEs in this IOCB
401 * is the single command plus response count from
402 * lpfc_alloc_ct_rsp.
403 */
404 cnt += 1;
367 status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0, 405 status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0,
368 cnt+1, 0, retry); 406 cnt, 0, retry);
369 if (status) { 407 if (status) {
370 lpfc_free_ct_rsp(phba, outmp); 408 lpfc_free_ct_rsp(phba, outmp);
371 return -ENOMEM; 409 return -ENOMEM;
@@ -501,6 +539,9 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
501 SLI_CTNS_GFF_ID, 539 SLI_CTNS_GFF_ID,
502 0, Did) == 0) 540 0, Did) == 0)
503 vport->num_disc_nodes++; 541 vport->num_disc_nodes++;
542 else
543 lpfc_setup_disc_node
544 (vport, Did);
504 } 545 }
505 else { 546 else {
506 lpfc_debugfs_disc_trc(vport, 547 lpfc_debugfs_disc_trc(vport,
@@ -1209,7 +1250,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1209 be16_to_cpu(SLI_CTNS_RFF_ID); 1250 be16_to_cpu(SLI_CTNS_RFF_ID);
1210 CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID); 1251 CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);
1211 CtReq->un.rff.fbits = FC4_FEATURE_INIT; 1252 CtReq->un.rff.fbits = FC4_FEATURE_INIT;
1212 CtReq->un.rff.type_code = FC_FCP_DATA; 1253 CtReq->un.rff.type_code = FC_TYPE_FCP;
1213 cmpl = lpfc_cmpl_ct_cmd_rff_id; 1254 cmpl = lpfc_cmpl_ct_cmd_rff_id;
1214 break; 1255 break;
1215 } 1256 }
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 8d0f0de76b63..391584183d81 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -926,7 +926,7 @@ lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
926 goto out; 926 goto out;
927 927
928 /* Round to page boundry */ 928 /* Round to page boundry */
929 printk(KERN_ERR "BLKGRD %s: _dump_buf_data=0x%p\n", 929 printk(KERN_ERR "9059 BLKGRD: %s: _dump_buf_data=0x%p\n",
930 __func__, _dump_buf_data); 930 __func__, _dump_buf_data);
931 debug->buffer = _dump_buf_data; 931 debug->buffer = _dump_buf_data;
932 if (!debug->buffer) { 932 if (!debug->buffer) {
@@ -956,8 +956,8 @@ lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
956 goto out; 956 goto out;
957 957
958 /* Round to page boundry */ 958 /* Round to page boundry */
959 printk(KERN_ERR "BLKGRD %s: _dump_buf_dif=0x%p file=%s\n", __func__, 959 printk(KERN_ERR "9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%s\n",
960 _dump_buf_dif, file->f_dentry->d_name.name); 960 __func__, _dump_buf_dif, file->f_dentry->d_name.name);
961 debug->buffer = _dump_buf_dif; 961 debug->buffer = _dump_buf_dif;
962 if (!debug->buffer) { 962 if (!debug->buffer) {
963 kfree(debug); 963 kfree(debug);
@@ -1377,7 +1377,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
1377 debugfs_create_dir(name, phba->hba_debugfs_root); 1377 debugfs_create_dir(name, phba->hba_debugfs_root);
1378 if (!vport->vport_debugfs_root) { 1378 if (!vport->vport_debugfs_root) {
1379 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1379 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1380 "0417 Cant create debugfs"); 1380 "0417 Cant create debugfs\n");
1381 goto debug_failed; 1381 goto debug_failed;
1382 } 1382 }
1383 atomic_inc(&phba->debugfs_vport_count); 1383 atomic_inc(&phba->debugfs_vport_count);
@@ -1430,7 +1430,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
1430 vport, &lpfc_debugfs_op_nodelist); 1430 vport, &lpfc_debugfs_op_nodelist);
1431 if (!vport->debug_nodelist) { 1431 if (!vport->debug_nodelist) {
1432 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 1432 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1433 "0409 Cant create debugfs nodelist"); 1433 "0409 Cant create debugfs nodelist\n");
1434 goto debug_failed; 1434 goto debug_failed;
1435 } 1435 }
1436debug_failed: 1436debug_failed:
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 1142070e9484..2851d75ffc6f 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -19,7 +19,7 @@
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define FC_MAX_HOLD_RSCN 32 /* max number of deferred RSCNs */ 21#define FC_MAX_HOLD_RSCN 32 /* max number of deferred RSCNs */
22#define FC_MAX_NS_RSP 65536 /* max size NameServer rsp */ 22#define FC_MAX_NS_RSP 64512 /* max size NameServer rsp */
23#define FC_MAXLOOP 126 /* max devices supported on a fc loop */ 23#define FC_MAXLOOP 126 /* max devices supported on a fc loop */
24#define LPFC_DISC_FLOGI_TMO 10 /* Discovery FLOGI ratov */ 24#define LPFC_DISC_FLOGI_TMO 10 /* Discovery FLOGI ratov */
25 25
@@ -105,8 +105,6 @@ struct lpfc_nodelist {
105 struct lpfc_vport *vport; 105 struct lpfc_vport *vport;
106 struct lpfc_work_evt els_retry_evt; 106 struct lpfc_work_evt els_retry_evt;
107 struct lpfc_work_evt dev_loss_evt; 107 struct lpfc_work_evt dev_loss_evt;
108 unsigned long last_ramp_up_time; /* jiffy of last ramp up */
109 unsigned long last_q_full_time; /* jiffy of last queue full */
110 struct kref kref; 108 struct kref kref;
111 atomic_t cmd_pending; 109 atomic_t cmd_pending;
112 uint32_t cmd_qdepth; 110 uint32_t cmd_qdepth;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index a14ab4580d4e..ce522702a6c1 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -173,13 +173,26 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
173 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 173 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
174 */ 174 */
175 if ((did == Fabric_DID) && 175 if ((did == Fabric_DID) &&
176 bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags) && 176 (phba->hba_flag & HBA_FIP_SUPPORT) &&
177 ((elscmd == ELS_CMD_FLOGI) || 177 ((elscmd == ELS_CMD_FLOGI) ||
178 (elscmd == ELS_CMD_FDISC) || 178 (elscmd == ELS_CMD_FDISC) ||
179 (elscmd == ELS_CMD_LOGO))) 179 (elscmd == ELS_CMD_LOGO)))
180 elsiocb->iocb_flag |= LPFC_FIP_ELS; 180 switch (elscmd) {
181 case ELS_CMD_FLOGI:
182 elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
183 & LPFC_FIP_ELS_ID_MASK);
184 break;
185 case ELS_CMD_FDISC:
186 elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
187 & LPFC_FIP_ELS_ID_MASK);
188 break;
189 case ELS_CMD_LOGO:
190 elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
191 & LPFC_FIP_ELS_ID_MASK);
192 break;
193 }
181 else 194 else
182 elsiocb->iocb_flag &= ~LPFC_FIP_ELS; 195 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
183 196
184 icmd = &elsiocb->iocb; 197 icmd = &elsiocb->iocb;
185 198
@@ -591,7 +604,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
591 } else { 604 } else {
592 ndlp->nlp_type |= NLP_FABRIC; 605 ndlp->nlp_type |= NLP_FABRIC;
593 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 606 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
594 if (vport->vfi_state & LPFC_VFI_REGISTERED) { 607 if (vport->vpi_state & LPFC_VPI_REGISTERED) {
595 lpfc_start_fdiscs(phba); 608 lpfc_start_fdiscs(phba);
596 lpfc_do_scr_ns_plogi(phba, vport); 609 lpfc_do_scr_ns_plogi(phba, vport);
597 } else 610 } else
@@ -2452,6 +2465,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
2452 */ 2465 */
2453 del_timer_sync(&ndlp->nlp_delayfunc); 2466 del_timer_sync(&ndlp->nlp_delayfunc);
2454 retry = ndlp->nlp_retry; 2467 retry = ndlp->nlp_retry;
2468 ndlp->nlp_retry = 0;
2455 2469
2456 switch (cmd) { 2470 switch (cmd) {
2457 case ELS_CMD_FLOGI: 2471 case ELS_CMD_FLOGI:
@@ -2711,12 +2725,16 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2711 !lpfc_error_lost_link(irsp)) { 2725 !lpfc_error_lost_link(irsp)) {
2712 /* FLOGI retry policy */ 2726 /* FLOGI retry policy */
2713 retry = 1; 2727 retry = 1;
2714 maxretry = 48; 2728 /* retry forever */
2715 if (cmdiocb->retry >= 32) 2729 maxretry = 0;
2730 if (cmdiocb->retry >= 100)
2731 delay = 5000;
2732 else if (cmdiocb->retry >= 32)
2716 delay = 1000; 2733 delay = 1000;
2717 } 2734 }
2718 2735
2719 if ((++cmdiocb->retry) >= maxretry) { 2736 cmdiocb->retry++;
2737 if (maxretry && (cmdiocb->retry >= maxretry)) {
2720 phba->fc_stat.elsRetryExceeded++; 2738 phba->fc_stat.elsRetryExceeded++;
2721 retry = 0; 2739 retry = 0;
2722 } 2740 }
@@ -4503,6 +4521,29 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4503} 4521}
4504 4522
4505/** 4523/**
4524 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
4525 * @vport: pointer to a host virtual N_Port data structure.
4526 * @cmdiocb: pointer to lpfc command iocb data structure.
4527 * @ndlp: pointer to a node-list data structure.
4528 *
4529 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
4530 * received as an ELS unsolicited event. A request to RRQ shall only
4531 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
4532 * Nx_Port N_Port_ID of the target Exchange is the same as the
4533 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
4534 * not accepted, an LS_RJT with reason code "Unable to perform
4535 * command request" and reason code explanation "Invalid Originator
4536 * S_ID" shall be returned. For now, we just unconditionally accept
4537 * RRQ from the target.
4538 **/
4539static void
4540lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4541 struct lpfc_nodelist *ndlp)
4542{
4543 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4544}
4545
4546/**
4506 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 4547 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
4507 * @phba: pointer to lpfc hba data structure. 4548 * @phba: pointer to lpfc hba data structure.
4508 * @pmb: pointer to the driver internal queue element for mailbox command. 4549 * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -5396,7 +5437,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5396 if (lpfc_els_chk_latt(vport)) 5437 if (lpfc_els_chk_latt(vport))
5397 goto dropit; 5438 goto dropit;
5398 5439
5399 /* Ignore traffic recevied during vport shutdown. */ 5440 /* Ignore traffic received during vport shutdown. */
5400 if (vport->load_flag & FC_UNLOADING) 5441 if (vport->load_flag & FC_UNLOADING)
5401 goto dropit; 5442 goto dropit;
5402 5443
@@ -5618,6 +5659,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5618 if (newnode) 5659 if (newnode)
5619 lpfc_nlp_put(ndlp); 5660 lpfc_nlp_put(ndlp);
5620 break; 5661 break;
5662 case ELS_CMD_RRQ:
5663 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5664 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
5665 did, vport->port_state, ndlp->nlp_flag);
5666
5667 phba->fc_stat.elsRcvRRQ++;
5668 lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
5669 if (newnode)
5670 lpfc_nlp_put(ndlp);
5671 break;
5621 default: 5672 default:
5622 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 5673 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5623 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 5674 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
@@ -5670,7 +5721,7 @@ dropit:
5670 * NULL - No vport with the matching @vpi found 5721 * NULL - No vport with the matching @vpi found
5671 * Otherwise - Address to the vport with the matching @vpi. 5722 * Otherwise - Address to the vport with the matching @vpi.
5672 **/ 5723 **/
5673static struct lpfc_vport * 5724struct lpfc_vport *
5674lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) 5725lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
5675{ 5726{
5676 struct lpfc_vport *vport; 5727 struct lpfc_vport *vport;
@@ -6024,11 +6075,6 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6024 irsp->ulpStatus, irsp->un.ulpWord[4]); 6075 irsp->ulpStatus, irsp->un.ulpWord[4]);
6025 goto fdisc_failed; 6076 goto fdisc_failed;
6026 } 6077 }
6027 if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
6028 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6029 lpfc_nlp_put(ndlp);
6030 /* giving up on FDISC. Cancel discovery timer */
6031 lpfc_can_disctmo(vport);
6032 spin_lock_irq(shost->host_lock); 6078 spin_lock_irq(shost->host_lock);
6033 vport->fc_flag |= FC_FABRIC; 6079 vport->fc_flag |= FC_FABRIC;
6034 if (vport->phba->fc_topology == TOPOLOGY_LOOP) 6080 if (vport->phba->fc_topology == TOPOLOGY_LOOP)
@@ -6107,6 +6153,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6107 int did = ndlp->nlp_DID; 6153 int did = ndlp->nlp_DID;
6108 int rc; 6154 int rc;
6109 6155
6156 vport->port_state = LPFC_FDISC;
6110 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 6157 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
6111 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 6158 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
6112 ELS_CMD_FDISC); 6159 ELS_CMD_FDISC);
@@ -6172,7 +6219,6 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6172 return 1; 6219 return 1;
6173 } 6220 }
6174 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 6221 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
6175 vport->port_state = LPFC_FDISC;
6176 return 0; 6222 return 0;
6177} 6223}
6178 6224
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e6a47e25b218..3b9424427652 100644..100755
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -525,8 +525,6 @@ lpfc_work_done(struct lpfc_hba *phba)
525 spin_unlock_irq(&phba->hbalock); 525 spin_unlock_irq(&phba->hbalock);
526 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 526 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
527 } 527 }
528 if (phba->hba_flag & HBA_RECEIVE_BUFFER)
529 lpfc_sli4_handle_received_buffer(phba);
530 } 528 }
531 529
532 vports = lpfc_create_vport_work_array(phba); 530 vports = lpfc_create_vport_work_array(phba);
@@ -568,8 +566,9 @@ lpfc_work_done(struct lpfc_hba *phba)
568 pring = &phba->sli.ring[LPFC_ELS_RING]; 566 pring = &phba->sli.ring[LPFC_ELS_RING];
569 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 567 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
570 status >>= (4*LPFC_ELS_RING); 568 status >>= (4*LPFC_ELS_RING);
571 if ((status & HA_RXMASK) 569 if ((status & HA_RXMASK) ||
572 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { 570 (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
571 (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
573 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 572 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
574 pring->flag |= LPFC_DEFERRED_RING_EVENT; 573 pring->flag |= LPFC_DEFERRED_RING_EVENT;
575 /* Set the lpfc data pending flag */ 574 /* Set the lpfc data pending flag */
@@ -688,7 +687,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
688 lpfc_unreg_rpi(vport, ndlp); 687 lpfc_unreg_rpi(vport, ndlp);
689 688
690 /* Leave Fabric nodes alone on link down */ 689 /* Leave Fabric nodes alone on link down */
691 if (!remove && ndlp->nlp_type & NLP_FABRIC) 690 if ((phba->sli_rev < LPFC_SLI_REV4) &&
691 (!remove && ndlp->nlp_type & NLP_FABRIC))
692 continue; 692 continue;
693 rc = lpfc_disc_state_machine(vport, ndlp, NULL, 693 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
694 remove 694 remove
@@ -706,6 +706,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
706void 706void
707lpfc_port_link_failure(struct lpfc_vport *vport) 707lpfc_port_link_failure(struct lpfc_vport *vport)
708{ 708{
709 /* Cleanup any outstanding received buffers */
710 lpfc_cleanup_rcv_buffers(vport);
711
709 /* Cleanup any outstanding RSCN activity */ 712 /* Cleanup any outstanding RSCN activity */
710 lpfc_els_flush_rscn(vport); 713 lpfc_els_flush_rscn(vport);
711 714
@@ -1015,13 +1018,12 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1015 mempool_free(mboxq, phba->mbox_mem_pool); 1018 mempool_free(mboxq, phba->mbox_mem_pool);
1016 return; 1019 return;
1017 } 1020 }
1018 if (vport->port_state != LPFC_FLOGI) { 1021 spin_lock_irqsave(&phba->hbalock, flags);
1019 spin_lock_irqsave(&phba->hbalock, flags); 1022 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1020 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); 1023 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1021 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1024 spin_unlock_irqrestore(&phba->hbalock, flags);
1022 spin_unlock_irqrestore(&phba->hbalock, flags); 1025 if (vport->port_state != LPFC_FLOGI)
1023 lpfc_initial_flogi(vport); 1026 lpfc_initial_flogi(vport);
1024 }
1025 1027
1026 mempool_free(mboxq, phba->mbox_mem_pool); 1028 mempool_free(mboxq, phba->mbox_mem_pool);
1027 return; 1029 return;
@@ -1199,6 +1201,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1199 1201
1200 /* If the FCF is not availabe do nothing. */ 1202 /* If the FCF is not availabe do nothing. */
1201 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1203 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1204 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1202 spin_unlock_irqrestore(&phba->hbalock, flags); 1205 spin_unlock_irqrestore(&phba->hbalock, flags);
1203 return; 1206 return;
1204 } 1207 }
@@ -1216,15 +1219,23 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1216 1219
1217 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, 1220 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
1218 GFP_KERNEL); 1221 GFP_KERNEL);
1219 if (!fcf_mbxq) 1222 if (!fcf_mbxq) {
1223 spin_lock_irqsave(&phba->hbalock, flags);
1224 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1225 spin_unlock_irqrestore(&phba->hbalock, flags);
1220 return; 1226 return;
1227 }
1221 1228
1222 lpfc_reg_fcfi(phba, fcf_mbxq); 1229 lpfc_reg_fcfi(phba, fcf_mbxq);
1223 fcf_mbxq->vport = phba->pport; 1230 fcf_mbxq->vport = phba->pport;
1224 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; 1231 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1225 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1232 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1226 if (rc == MBX_NOT_FINISHED) 1233 if (rc == MBX_NOT_FINISHED) {
1234 spin_lock_irqsave(&phba->hbalock, flags);
1235 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1236 spin_unlock_irqrestore(&phba->hbalock, flags);
1227 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1237 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1238 }
1228 1239
1229 return; 1240 return;
1230} 1241}
@@ -1253,13 +1264,27 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1253 uint16_t *vlan_id) 1264 uint16_t *vlan_id)
1254{ 1265{
1255 struct lpfc_fcf_conn_entry *conn_entry; 1266 struct lpfc_fcf_conn_entry *conn_entry;
1267 int i, j, fcf_vlan_id = 0;
1268
1269 /* Find the lowest VLAN id in the FCF record */
1270 for (i = 0; i < 512; i++) {
1271 if (new_fcf_record->vlan_bitmap[i]) {
1272 fcf_vlan_id = i * 8;
1273 j = 0;
1274 while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1275 j++;
1276 fcf_vlan_id++;
1277 }
1278 break;
1279 }
1280 }
1256 1281
1257 /* If FCF not available return 0 */ 1282 /* If FCF not available return 0 */
1258 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || 1283 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1259 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) 1284 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record))
1260 return 0; 1285 return 0;
1261 1286
1262 if (!phba->cfg_enable_fip) { 1287 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1263 *boot_flag = 0; 1288 *boot_flag = 0;
1264 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1289 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1265 new_fcf_record); 1290 new_fcf_record);
@@ -1286,7 +1311,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1286 if (*addr_mode & LPFC_FCF_FPMA) 1311 if (*addr_mode & LPFC_FCF_FPMA)
1287 *addr_mode = LPFC_FCF_FPMA; 1312 *addr_mode = LPFC_FCF_FPMA;
1288 1313
1289 *vlan_id = 0xFFFF; 1314 /* If FCF record report a vlan id use that vlan id */
1315 if (fcf_vlan_id)
1316 *vlan_id = fcf_vlan_id;
1317 else
1318 *vlan_id = 0xFFFF;
1290 return 1; 1319 return 1;
1291 } 1320 }
1292 1321
@@ -1384,8 +1413,15 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1384 (*addr_mode & LPFC_FCF_FPMA)) 1413 (*addr_mode & LPFC_FCF_FPMA))
1385 *addr_mode = LPFC_FCF_FPMA; 1414 *addr_mode = LPFC_FCF_FPMA;
1386 1415
1416 /* If matching connect list has a vlan id, use it */
1387 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) 1417 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1388 *vlan_id = conn_entry->conn_rec.vlan_tag; 1418 *vlan_id = conn_entry->conn_rec.vlan_tag;
1419 /*
1420 * If no vlan id is specified in connect list, use the vlan id
1421 * in the FCF record
1422 */
1423 else if (fcf_vlan_id)
1424 *vlan_id = fcf_vlan_id;
1389 else 1425 else
1390 *vlan_id = 0xFFFF; 1426 *vlan_id = 0xFFFF;
1391 1427
@@ -1423,6 +1459,15 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1423 1459
1424 if (phba->link_state >= LPFC_LINK_UP) 1460 if (phba->link_state >= LPFC_LINK_UP)
1425 lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 1461 lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
1462 else {
1463 /*
1464 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
1465 * flag
1466 */
1467 spin_lock_irq(&phba->hbalock);
1468 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1469 spin_unlock_irq(&phba->hbalock);
1470 }
1426 1471
1427 if (unreg_fcf) { 1472 if (unreg_fcf) {
1428 spin_lock_irq(&phba->hbalock); 1473 spin_lock_irq(&phba->hbalock);
@@ -1659,9 +1704,8 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1659 lpfc_initial_fdisc(vport); 1704 lpfc_initial_fdisc(vport);
1660 else { 1705 else {
1661 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); 1706 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
1662 lpfc_printf_vlog(vport, KERN_ERR, 1707 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1663 LOG_ELS, 1708 "2606 No NPIV Fabric support\n");
1664 "2606 No NPIV Fabric support\n");
1665 } 1709 }
1666 return; 1710 return;
1667} 1711}
@@ -1756,8 +1800,8 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1756 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1800 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1757 goto fail_free_mem; 1801 goto fail_free_mem;
1758 } 1802 }
1759 /* Mark the vport has registered with its VFI */ 1803 /* The VPI is implicitly registered when the VFI is registered */
1760 vport->vfi_state |= LPFC_VFI_REGISTERED; 1804 vport->vpi_state |= LPFC_VPI_REGISTERED;
1761 1805
1762 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 1806 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1763 lpfc_start_fdiscs(phba); 1807 lpfc_start_fdiscs(phba);
@@ -1861,7 +1905,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1861 if (phba->fc_topology == TOPOLOGY_LOOP) { 1905 if (phba->fc_topology == TOPOLOGY_LOOP) {
1862 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 1906 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
1863 1907
1864 if (phba->cfg_enable_npiv) 1908 /* if npiv is enabled and this adapter supports npiv log
1909 * a message that npiv is not supported in this topology
1910 */
1911 if (phba->cfg_enable_npiv && phba->max_vpi)
1865 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1912 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1866 "1309 Link Up Event npiv not supported in loop " 1913 "1309 Link Up Event npiv not supported in loop "
1867 "topology\n"); 1914 "topology\n");
@@ -1955,7 +2002,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1955 * is phase 1 implementation that support FCF index 0 and driver 2002 * is phase 1 implementation that support FCF index 0 and driver
1956 * defaults. 2003 * defaults.
1957 */ 2004 */
1958 if (phba->cfg_enable_fip == 0) { 2005 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1959 fcf_record = kzalloc(sizeof(struct fcf_record), 2006 fcf_record = kzalloc(sizeof(struct fcf_record),
1960 GFP_KERNEL); 2007 GFP_KERNEL);
1961 if (unlikely(!fcf_record)) { 2008 if (unlikely(!fcf_record)) {
@@ -2085,6 +2132,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2085 else 2132 else
2086 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; 2133 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
2087 2134
2135 phba->link_events++;
2088 if (la->attType == AT_LINK_UP && (!la->mm)) { 2136 if (la->attType == AT_LINK_UP && (!la->mm)) {
2089 phba->fc_stat.LinkUp++; 2137 phba->fc_stat.LinkUp++;
2090 if (phba->link_flag & LS_LOOPBACK_MODE) { 2138 if (phba->link_flag & LS_LOOPBACK_MODE) {
@@ -2211,13 +2259,14 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2211 mb->mbxStatus); 2259 mb->mbxStatus);
2212 break; 2260 break;
2213 } 2261 }
2262 vport->vpi_state &= ~LPFC_VPI_REGISTERED;
2214 vport->unreg_vpi_cmpl = VPORT_OK; 2263 vport->unreg_vpi_cmpl = VPORT_OK;
2215 mempool_free(pmb, phba->mbox_mem_pool); 2264 mempool_free(pmb, phba->mbox_mem_pool);
2216 /* 2265 /*
2217 * This shost reference might have been taken at the beginning of 2266 * This shost reference might have been taken at the beginning of
2218 * lpfc_vport_delete() 2267 * lpfc_vport_delete()
2219 */ 2268 */
2220 if (vport->load_flag & FC_UNLOADING) 2269 if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
2221 scsi_host_put(shost); 2270 scsi_host_put(shost);
2222} 2271}
2223 2272
@@ -2268,6 +2317,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2268 goto out; 2317 goto out;
2269 } 2318 }
2270 2319
2320 vport->vpi_state |= LPFC_VPI_REGISTERED;
2271 vport->num_disc_nodes = 0; 2321 vport->num_disc_nodes = 0;
2272 /* go thru NPR list and issue ELS PLOGIs */ 2322 /* go thru NPR list and issue ELS PLOGIs */
2273 if (vport->fc_npr_cnt) 2323 if (vport->fc_npr_cnt)
@@ -3077,7 +3127,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3077 struct lpfc_sli *psli; 3127 struct lpfc_sli *psli;
3078 struct lpfc_sli_ring *pring; 3128 struct lpfc_sli_ring *pring;
3079 struct lpfc_iocbq *iocb, *next_iocb; 3129 struct lpfc_iocbq *iocb, *next_iocb;
3080 uint32_t rpi, i; 3130 uint32_t i;
3081 3131
3082 lpfc_fabric_abort_nport(ndlp); 3132 lpfc_fabric_abort_nport(ndlp);
3083 3133
@@ -3086,7 +3136,6 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3086 * by firmware with a no rpi error. 3136 * by firmware with a no rpi error.
3087 */ 3137 */
3088 psli = &phba->sli; 3138 psli = &phba->sli;
3089 rpi = ndlp->nlp_rpi;
3090 if (ndlp->nlp_flag & NLP_RPI_VALID) { 3139 if (ndlp->nlp_flag & NLP_RPI_VALID) {
3091 /* Now process each ring */ 3140 /* Now process each ring */
3092 for (i = 0; i < psli->num_rings; i++) { 3141 for (i = 0; i < psli->num_rings; i++) {
@@ -4322,6 +4371,14 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
4322 ret = 1; 4371 ret = 1;
4323 spin_unlock_irq(shost->host_lock); 4372 spin_unlock_irq(shost->host_lock);
4324 goto out; 4373 goto out;
4374 } else {
4375 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
4376 "2624 RPI %x DID %x flg %x still "
4377 "logged in\n",
4378 ndlp->nlp_rpi, ndlp->nlp_DID,
4379 ndlp->nlp_flag);
4380 if (ndlp->nlp_flag & NLP_RPI_VALID)
4381 ret = 1;
4325 } 4382 }
4326 } 4383 }
4327 spin_unlock_irq(shost->host_lock); 4384 spin_unlock_irq(shost->host_lock);
@@ -4400,7 +4457,7 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4400 */ 4457 */
4401 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || 4458 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
4402 !(phba->fcf.fcf_flag & FCF_REGISTERED) || 4459 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
4403 (phba->cfg_enable_fip == 0)) { 4460 (!(phba->hba_flag & HBA_FIP_SUPPORT))) {
4404 spin_unlock_irq(&phba->hbalock); 4461 spin_unlock_irq(&phba->hbalock);
4405 return; 4462 return;
4406 } 4463 }
@@ -4409,6 +4466,8 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4409 if (lpfc_fcf_inuse(phba)) 4466 if (lpfc_fcf_inuse(phba))
4410 return; 4467 return;
4411 4468
4469 /* At this point, all discovery is aborted */
4470 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4412 4471
4413 /* Unregister VPIs */ 4472 /* Unregister VPIs */
4414 vports = lpfc_create_vport_work_array(phba); 4473 vports = lpfc_create_vport_work_array(phba);
@@ -4416,8 +4475,8 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4416 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) 4475 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
4417 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4476 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4418 lpfc_mbx_unreg_vpi(vports[i]); 4477 lpfc_mbx_unreg_vpi(vports[i]);
4419 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4478 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
4420 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; 4479 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
4421 } 4480 }
4422 lpfc_destroy_vport_work_array(phba, vports); 4481 lpfc_destroy_vport_work_array(phba, vports);
4423 4482
@@ -4431,7 +4490,7 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
4431 return; 4490 return;
4432 } 4491 }
4433 4492
4434 lpfc_unreg_vfi(mbox, phba->pport->vfi); 4493 lpfc_unreg_vfi(mbox, phba->pport);
4435 mbox->vport = phba->pport; 4494 mbox->vport = phba->pport;
4436 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl; 4495 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
4437 4496
@@ -4512,8 +4571,10 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
4512 4571
4513 /* Free the current connect table */ 4572 /* Free the current connect table */
4514 list_for_each_entry_safe(conn_entry, next_conn_entry, 4573 list_for_each_entry_safe(conn_entry, next_conn_entry,
4515 &phba->fcf_conn_rec_list, list) 4574 &phba->fcf_conn_rec_list, list) {
4575 list_del_init(&conn_entry->list);
4516 kfree(conn_entry); 4576 kfree(conn_entry);
4577 }
4517 4578
4518 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; 4579 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
4519 record_count = conn_hdr->length * sizeof(uint32_t)/ 4580 record_count = conn_hdr->length * sizeof(uint32_t)/
@@ -4569,14 +4630,6 @@ lpfc_read_fcoe_param(struct lpfc_hba *phba,
4569 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) 4630 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
4570 return; 4631 return;
4571 4632
4572 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4573 FIPP_MODE_ON)
4574 phba->cfg_enable_fip = 1;
4575
4576 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
4577 FIPP_MODE_OFF)
4578 phba->cfg_enable_fip = 0;
4579
4580 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { 4633 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
4581 phba->valid_vlan = 1; 4634 phba->valid_vlan = 1;
4582 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & 4635 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index ccb26724dc53..c9faa1d8c3c8 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1124,21 +1124,6 @@ typedef struct {
1124/* Number of 4-byte words in an IOCB. */ 1124/* Number of 4-byte words in an IOCB. */
1125#define IOCB_WORD_SZ 8 1125#define IOCB_WORD_SZ 8
1126 1126
1127/* defines for type field in fc header */
1128#define FC_ELS_DATA 0x1
1129#define FC_LLC_SNAP 0x5
1130#define FC_FCP_DATA 0x8
1131#define FC_COMMON_TRANSPORT_ULP 0x20
1132
1133/* defines for rctl field in fc header */
1134#define FC_DEV_DATA 0x0
1135#define FC_UNSOL_CTL 0x2
1136#define FC_SOL_CTL 0x3
1137#define FC_UNSOL_DATA 0x4
1138#define FC_FCP_CMND 0x6
1139#define FC_ELS_REQ 0x22
1140#define FC_ELS_RSP 0x23
1141
1142/* network headers for Dfctl field */ 1127/* network headers for Dfctl field */
1143#define FC_NET_HDR 0x20 1128#define FC_NET_HDR 0x20
1144 1129
@@ -1183,6 +1168,8 @@ typedef struct {
1183#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 1168#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
1184#define PCI_VENDOR_ID_SERVERENGINE 0x19a2 1169#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
1185#define PCI_DEVICE_ID_TIGERSHARK 0x0704 1170#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1171#define PCI_DEVICE_ID_TOMCAT 0x0714
1172#define PCI_DEVICE_ID_FALCON 0xf180
1186 1173
1187#define JEDEC_ID_ADDRESS 0x0080001c 1174#define JEDEC_ID_ADDRESS 0x0080001c
1188#define FIREFLY_JEDEC_ID 0x1ACC 1175#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1444,6 +1431,7 @@ typedef struct { /* FireFly BIU registers */
1444#define CMD_ABORT_MXRI64_CN 0x8C 1431#define CMD_ABORT_MXRI64_CN 0x8C
1445#define CMD_RCV_ELS_REQ64_CX 0x8D 1432#define CMD_RCV_ELS_REQ64_CX 0x8D
1446#define CMD_XMIT_ELS_RSP64_CX 0x95 1433#define CMD_XMIT_ELS_RSP64_CX 0x95
1434#define CMD_XMIT_BLS_RSP64_CX 0x97
1447#define CMD_FCP_IWRITE64_CR 0x98 1435#define CMD_FCP_IWRITE64_CR 0x98
1448#define CMD_FCP_IWRITE64_CX 0x99 1436#define CMD_FCP_IWRITE64_CX 0x99
1449#define CMD_FCP_IREAD64_CR 0x9A 1437#define CMD_FCP_IREAD64_CR 0x9A
@@ -2306,8 +2294,7 @@ typedef struct {
2306 uint32_t rsvd1; 2294 uint32_t rsvd1;
2307 uint32_t rsvd2:8; 2295 uint32_t rsvd2:8;
2308 uint32_t sid:24; 2296 uint32_t sid:24;
2309 uint32_t rsvd3; 2297 uint32_t wwn[2];
2310 uint32_t rsvd4;
2311 uint32_t rsvd5; 2298 uint32_t rsvd5;
2312 uint16_t vfi; 2299 uint16_t vfi;
2313 uint16_t vpi; 2300 uint16_t vpi;
@@ -2315,8 +2302,7 @@ typedef struct {
2315 uint32_t rsvd1; 2302 uint32_t rsvd1;
2316 uint32_t sid:24; 2303 uint32_t sid:24;
2317 uint32_t rsvd2:8; 2304 uint32_t rsvd2:8;
2318 uint32_t rsvd3; 2305 uint32_t wwn[2];
2319 uint32_t rsvd4;
2320 uint32_t rsvd5; 2306 uint32_t rsvd5;
2321 uint16_t vpi; 2307 uint16_t vpi;
2322 uint16_t vfi; 2308 uint16_t vfi;
@@ -2326,7 +2312,13 @@ typedef struct {
2326/* Structure for MB Command UNREG_VPI (0x97) */ 2312/* Structure for MB Command UNREG_VPI (0x97) */
2327typedef struct { 2313typedef struct {
2328 uint32_t rsvd1; 2314 uint32_t rsvd1;
2329 uint32_t rsvd2; 2315#ifdef __BIG_ENDIAN_BITFIELD
2316 uint16_t rsvd2;
2317 uint16_t sli4_vpi;
2318#else /* __LITTLE_ENDIAN */
2319 uint16_t sli4_vpi;
2320 uint16_t rsvd2;
2321#endif
2330 uint32_t rsvd3; 2322 uint32_t rsvd3;
2331 uint32_t rsvd4; 2323 uint32_t rsvd4;
2332 uint32_t rsvd5; 2324 uint32_t rsvd5;
@@ -3547,7 +3539,7 @@ typedef struct _IOCB { /* IOCB structure */
3547 ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ 3539 ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */
3548 QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ 3540 QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */
3549 struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ 3541 struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */
3550 3542 struct sli4_bls_acc bls_acc; /* UNSOL ABTS BLS_ACC params */
3551 uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ 3543 uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */
3552 } un; 3544 } un;
3553 union { 3545 union {
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 3689eee04535..1585148a17e5 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -194,6 +194,26 @@ struct lpfc_sli4_flags {
194#define lpfc_fip_flag_WORD word0 194#define lpfc_fip_flag_WORD word0
195}; 195};
196 196
197struct sli4_bls_acc {
198 uint32_t word0_rsvd; /* Word0 must be reserved */
199 uint32_t word1;
200#define lpfc_abts_orig_SHIFT 0
201#define lpfc_abts_orig_MASK 0x00000001
202#define lpfc_abts_orig_WORD word1
203#define LPFC_ABTS_UNSOL_RSP 1
204#define LPFC_ABTS_UNSOL_INT 0
205 uint32_t word2;
206#define lpfc_abts_rxid_SHIFT 0
207#define lpfc_abts_rxid_MASK 0x0000FFFF
208#define lpfc_abts_rxid_WORD word2
209#define lpfc_abts_oxid_SHIFT 16
210#define lpfc_abts_oxid_MASK 0x0000FFFF
211#define lpfc_abts_oxid_WORD word2
212 uint32_t word3;
213 uint32_t word4;
214 uint32_t word5_rsvd; /* Word5 must be reserved */
215};
216
197/* event queue entry structure */ 217/* event queue entry structure */
198struct lpfc_eqe { 218struct lpfc_eqe {
199 uint32_t word0; 219 uint32_t word0;
@@ -425,7 +445,7 @@ struct lpfc_wqe_generic{
425#define lpfc_wqe_gen_status_MASK 0x0000000F 445#define lpfc_wqe_gen_status_MASK 0x0000000F
426#define lpfc_wqe_gen_status_WORD word7 446#define lpfc_wqe_gen_status_WORD word7
427#define lpfc_wqe_gen_ct_SHIFT 2 447#define lpfc_wqe_gen_ct_SHIFT 2
428#define lpfc_wqe_gen_ct_MASK 0x00000007 448#define lpfc_wqe_gen_ct_MASK 0x00000003
429#define lpfc_wqe_gen_ct_WORD word7 449#define lpfc_wqe_gen_ct_WORD word7
430 uint32_t abort_tag; 450 uint32_t abort_tag;
431 uint32_t word9; 451 uint32_t word9;
@@ -453,6 +473,13 @@ struct lpfc_wqe_generic{
453#define lpfc_wqe_gen_wqec_SHIFT 7 473#define lpfc_wqe_gen_wqec_SHIFT 7
454#define lpfc_wqe_gen_wqec_MASK 0x00000001 474#define lpfc_wqe_gen_wqec_MASK 0x00000001
455#define lpfc_wqe_gen_wqec_WORD word11 475#define lpfc_wqe_gen_wqec_WORD word11
476#define ELS_ID_FLOGI 3
477#define ELS_ID_FDISC 2
478#define ELS_ID_LOGO 1
479#define ELS_ID_DEFAULT 0
480#define lpfc_wqe_gen_els_id_SHIFT 4
481#define lpfc_wqe_gen_els_id_MASK 0x00000003
482#define lpfc_wqe_gen_els_id_WORD word11
456#define lpfc_wqe_gen_cmd_type_SHIFT 0 483#define lpfc_wqe_gen_cmd_type_SHIFT 0
457#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F 484#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F
458#define lpfc_wqe_gen_cmd_type_WORD word11 485#define lpfc_wqe_gen_cmd_type_WORD word11
@@ -487,8 +514,8 @@ struct lpfc_register {
487 514
488#define LPFC_UERR_STATUS_HI 0x00A4 515#define LPFC_UERR_STATUS_HI 0x00A4
489#define LPFC_UERR_STATUS_LO 0x00A0 516#define LPFC_UERR_STATUS_LO 0x00A0
490#define LPFC_ONLINE0 0x00B0 517#define LPFC_UE_MASK_HI 0x00AC
491#define LPFC_ONLINE1 0x00B4 518#define LPFC_UE_MASK_LO 0x00A8
492#define LPFC_SCRATCHPAD 0x0058 519#define LPFC_SCRATCHPAD 0x0058
493 520
494/* BAR0 Registers */ 521/* BAR0 Registers */
@@ -760,6 +787,7 @@ struct mbox_header {
760#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 787#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
761#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 788#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
762#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 789#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
790#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
763#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D 791#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
764 792
765/* FCoE Opcodes */ 793/* FCoE Opcodes */
@@ -1273,6 +1301,51 @@ struct lpfc_mbx_del_fcf_tbl_entry {
1273#define lpfc_mbx_del_fcf_tbl_index_WORD word10 1301#define lpfc_mbx_del_fcf_tbl_index_WORD word10
1274}; 1302};
1275 1303
1304struct lpfc_mbx_query_fw_cfg {
1305 struct mbox_header header;
1306 uint32_t config_number;
1307 uint32_t asic_rev;
1308 uint32_t phys_port;
1309 uint32_t function_mode;
1310/* firmware Function Mode */
1311#define lpfc_function_mode_toe_SHIFT 0
1312#define lpfc_function_mode_toe_MASK 0x00000001
1313#define lpfc_function_mode_toe_WORD function_mode
1314#define lpfc_function_mode_nic_SHIFT 1
1315#define lpfc_function_mode_nic_MASK 0x00000001
1316#define lpfc_function_mode_nic_WORD function_mode
1317#define lpfc_function_mode_rdma_SHIFT 2
1318#define lpfc_function_mode_rdma_MASK 0x00000001
1319#define lpfc_function_mode_rdma_WORD function_mode
1320#define lpfc_function_mode_vm_SHIFT 3
1321#define lpfc_function_mode_vm_MASK 0x00000001
1322#define lpfc_function_mode_vm_WORD function_mode
1323#define lpfc_function_mode_iscsi_i_SHIFT 4
1324#define lpfc_function_mode_iscsi_i_MASK 0x00000001
1325#define lpfc_function_mode_iscsi_i_WORD function_mode
1326#define lpfc_function_mode_iscsi_t_SHIFT 5
1327#define lpfc_function_mode_iscsi_t_MASK 0x00000001
1328#define lpfc_function_mode_iscsi_t_WORD function_mode
1329#define lpfc_function_mode_fcoe_i_SHIFT 6
1330#define lpfc_function_mode_fcoe_i_MASK 0x00000001
1331#define lpfc_function_mode_fcoe_i_WORD function_mode
1332#define lpfc_function_mode_fcoe_t_SHIFT 7
1333#define lpfc_function_mode_fcoe_t_MASK 0x00000001
1334#define lpfc_function_mode_fcoe_t_WORD function_mode
1335#define lpfc_function_mode_dal_SHIFT 8
1336#define lpfc_function_mode_dal_MASK 0x00000001
1337#define lpfc_function_mode_dal_WORD function_mode
1338#define lpfc_function_mode_lro_SHIFT 9
1339#define lpfc_function_mode_lro_MASK 0x00000001
1340#define lpfc_function_mode_lro_WORD function_mode9
1341#define lpfc_function_mode_flex10_SHIFT 10
1342#define lpfc_function_mode_flex10_MASK 0x00000001
1343#define lpfc_function_mode_flex10_WORD function_mode
1344#define lpfc_function_mode_ncsi_SHIFT 11
1345#define lpfc_function_mode_ncsi_MASK 0x00000001
1346#define lpfc_function_mode_ncsi_WORD function_mode
1347};
1348
1276/* Status field for embedded SLI_CONFIG mailbox command */ 1349/* Status field for embedded SLI_CONFIG mailbox command */
1277#define STATUS_SUCCESS 0x0 1350#define STATUS_SUCCESS 0x0
1278#define STATUS_FAILED 0x1 1351#define STATUS_FAILED 0x1
@@ -1349,8 +1422,7 @@ struct lpfc_mbx_reg_vfi {
1349#define lpfc_reg_vfi_fcfi_SHIFT 0 1422#define lpfc_reg_vfi_fcfi_SHIFT 0
1350#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF 1423#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF
1351#define lpfc_reg_vfi_fcfi_WORD word2 1424#define lpfc_reg_vfi_fcfi_WORD word2
1352 uint32_t word3_rsvd; 1425 uint32_t wwn[2];
1353 uint32_t word4_rsvd;
1354 struct ulp_bde64 bde; 1426 struct ulp_bde64 bde;
1355 uint32_t word8_rsvd; 1427 uint32_t word8_rsvd;
1356 uint32_t word9_rsvd; 1428 uint32_t word9_rsvd;
@@ -1555,6 +1627,11 @@ struct lpfc_mbx_read_rev {
1555#define lpfc_mbx_rd_rev_fcoe_SHIFT 20 1627#define lpfc_mbx_rd_rev_fcoe_SHIFT 20
1556#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001 1628#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001
1557#define lpfc_mbx_rd_rev_fcoe_WORD word1 1629#define lpfc_mbx_rd_rev_fcoe_WORD word1
1630#define lpfc_mbx_rd_rev_cee_ver_SHIFT 21
1631#define lpfc_mbx_rd_rev_cee_ver_MASK 0x00000003
1632#define lpfc_mbx_rd_rev_cee_ver_WORD word1
1633#define LPFC_PREDCBX_CEE_MODE 0
1634#define LPFC_DCBX_CEE_MODE 1
1558#define lpfc_mbx_rd_rev_vpd_SHIFT 29 1635#define lpfc_mbx_rd_rev_vpd_SHIFT 29
1559#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001 1636#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001
1560#define lpfc_mbx_rd_rev_vpd_WORD word1 1637#define lpfc_mbx_rd_rev_vpd_WORD word1
@@ -1804,6 +1881,7 @@ struct lpfc_mqe {
1804 struct lpfc_mbx_read_config rd_config; 1881 struct lpfc_mbx_read_config rd_config;
1805 struct lpfc_mbx_request_features req_ftrs; 1882 struct lpfc_mbx_request_features req_ftrs;
1806 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; 1883 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
1884 struct lpfc_mbx_query_fw_cfg query_fw_cfg;
1807 struct lpfc_mbx_nop nop; 1885 struct lpfc_mbx_nop nop;
1808 } un; 1886 } un;
1809}; 1887};
@@ -1885,7 +1963,7 @@ struct lpfc_acqe_link {
1885}; 1963};
1886 1964
1887struct lpfc_acqe_fcoe { 1965struct lpfc_acqe_fcoe {
1888 uint32_t fcf_index; 1966 uint32_t index;
1889 uint32_t word1; 1967 uint32_t word1;
1890#define lpfc_acqe_fcoe_fcf_count_SHIFT 0 1968#define lpfc_acqe_fcoe_fcf_count_SHIFT 0
1891#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF 1969#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF
@@ -1896,6 +1974,7 @@ struct lpfc_acqe_fcoe {
1896#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1 1974#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1
1897#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2 1975#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2
1898#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3 1976#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3
1977#define LPFC_FCOE_EVENT_TYPE_CVL 0x4
1899 uint32_t event_tag; 1978 uint32_t event_tag;
1900 uint32_t trailer; 1979 uint32_t trailer;
1901}; 1980};
@@ -1921,12 +2000,13 @@ struct lpfc_bmbx_create {
1921#define SGL_ALIGN_SZ 64 2000#define SGL_ALIGN_SZ 64
1922#define SGL_PAGE_SIZE 4096 2001#define SGL_PAGE_SIZE 4096
1923/* align SGL addr on a size boundary - adjust address up */ 2002/* align SGL addr on a size boundary - adjust address up */
1924#define NO_XRI ((uint16_t)-1) 2003#define NO_XRI ((uint16_t)-1)
2004
1925struct wqe_common { 2005struct wqe_common {
1926 uint32_t word6; 2006 uint32_t word6;
1927#define wqe_xri_SHIFT 0 2007#define wqe_xri_tag_SHIFT 0
1928#define wqe_xri_MASK 0x0000FFFF 2008#define wqe_xri_tag_MASK 0x0000FFFF
1929#define wqe_xri_WORD word6 2009#define wqe_xri_tag_WORD word6
1930#define wqe_ctxt_tag_SHIFT 16 2010#define wqe_ctxt_tag_SHIFT 16
1931#define wqe_ctxt_tag_MASK 0x0000FFFF 2011#define wqe_ctxt_tag_MASK 0x0000FFFF
1932#define wqe_ctxt_tag_WORD word6 2012#define wqe_ctxt_tag_WORD word6
@@ -1987,7 +2067,7 @@ struct wqe_common {
1987#define wqe_wqec_MASK 0x00000001 2067#define wqe_wqec_MASK 0x00000001
1988#define wqe_wqec_WORD word11 2068#define wqe_wqec_WORD word11
1989#define wqe_cqid_SHIFT 16 2069#define wqe_cqid_SHIFT 16
1990#define wqe_cqid_MASK 0x000003ff 2070#define wqe_cqid_MASK 0x0000ffff
1991#define wqe_cqid_WORD word11 2071#define wqe_cqid_WORD word11
1992}; 2072};
1993 2073
@@ -1996,6 +2076,9 @@ struct wqe_did {
1996#define wqe_els_did_SHIFT 0 2076#define wqe_els_did_SHIFT 0
1997#define wqe_els_did_MASK 0x00FFFFFF 2077#define wqe_els_did_MASK 0x00FFFFFF
1998#define wqe_els_did_WORD word5 2078#define wqe_els_did_WORD word5
2079#define wqe_xmit_bls_pt_SHIFT 28
2080#define wqe_xmit_bls_pt_MASK 0x00000003
2081#define wqe_xmit_bls_pt_WORD word5
1999#define wqe_xmit_bls_ar_SHIFT 30 2082#define wqe_xmit_bls_ar_SHIFT 30
2000#define wqe_xmit_bls_ar_MASK 0x00000001 2083#define wqe_xmit_bls_ar_MASK 0x00000001
2001#define wqe_xmit_bls_ar_WORD word5 2084#define wqe_xmit_bls_ar_WORD word5
@@ -2044,6 +2127,23 @@ struct xmit_els_rsp64_wqe {
2044 2127
2045struct xmit_bls_rsp64_wqe { 2128struct xmit_bls_rsp64_wqe {
2046 uint32_t payload0; 2129 uint32_t payload0;
2130/* Payload0 for BA_ACC */
2131#define xmit_bls_rsp64_acc_seq_id_SHIFT 16
2132#define xmit_bls_rsp64_acc_seq_id_MASK 0x000000ff
2133#define xmit_bls_rsp64_acc_seq_id_WORD payload0
2134#define xmit_bls_rsp64_acc_seq_id_vald_SHIFT 24
2135#define xmit_bls_rsp64_acc_seq_id_vald_MASK 0x000000ff
2136#define xmit_bls_rsp64_acc_seq_id_vald_WORD payload0
2137/* Payload0 for BA_RJT */
2138#define xmit_bls_rsp64_rjt_vspec_SHIFT 0
2139#define xmit_bls_rsp64_rjt_vspec_MASK 0x000000ff
2140#define xmit_bls_rsp64_rjt_vspec_WORD payload0
2141#define xmit_bls_rsp64_rjt_expc_SHIFT 8
2142#define xmit_bls_rsp64_rjt_expc_MASK 0x000000ff
2143#define xmit_bls_rsp64_rjt_expc_WORD payload0
2144#define xmit_bls_rsp64_rjt_rsnc_SHIFT 16
2145#define xmit_bls_rsp64_rjt_rsnc_MASK 0x000000ff
2146#define xmit_bls_rsp64_rjt_rsnc_WORD payload0
2047 uint32_t word1; 2147 uint32_t word1;
2048#define xmit_bls_rsp64_rxid_SHIFT 0 2148#define xmit_bls_rsp64_rxid_SHIFT 0
2049#define xmit_bls_rsp64_rxid_MASK 0x0000ffff 2149#define xmit_bls_rsp64_rxid_MASK 0x0000ffff
@@ -2052,18 +2152,19 @@ struct xmit_bls_rsp64_wqe {
2052#define xmit_bls_rsp64_oxid_MASK 0x0000ffff 2152#define xmit_bls_rsp64_oxid_MASK 0x0000ffff
2053#define xmit_bls_rsp64_oxid_WORD word1 2153#define xmit_bls_rsp64_oxid_WORD word1
2054 uint32_t word2; 2154 uint32_t word2;
2055#define xmit_bls_rsp64_seqcntlo_SHIFT 0 2155#define xmit_bls_rsp64_seqcnthi_SHIFT 0
2056#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff
2057#define xmit_bls_rsp64_seqcntlo_WORD word2
2058#define xmit_bls_rsp64_seqcnthi_SHIFT 16
2059#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff 2156#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff
2060#define xmit_bls_rsp64_seqcnthi_WORD word2 2157#define xmit_bls_rsp64_seqcnthi_WORD word2
2158#define xmit_bls_rsp64_seqcntlo_SHIFT 16
2159#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff
2160#define xmit_bls_rsp64_seqcntlo_WORD word2
2061 uint32_t rsrvd3; 2161 uint32_t rsrvd3;
2062 uint32_t rsrvd4; 2162 uint32_t rsrvd4;
2063 struct wqe_did wqe_dest; 2163 struct wqe_did wqe_dest;
2064 struct wqe_common wqe_com; /* words 6-11 */ 2164 struct wqe_common wqe_com; /* words 6-11 */
2065 uint32_t rsvd_12_15[4]; 2165 uint32_t rsvd_12_15[4];
2066}; 2166};
2167
2067struct wqe_rctl_dfctl { 2168struct wqe_rctl_dfctl {
2068 uint32_t word5; 2169 uint32_t word5;
2069#define wqe_si_SHIFT 2 2170#define wqe_si_SHIFT 2
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 82f8ab5c72cd..226920d15ea1 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -28,6 +28,7 @@
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/ctype.h> 30#include <linux/ctype.h>
31#include <linux/aer.h>
31 32
32#include <scsi/scsi.h> 33#include <scsi/scsi.h>
33#include <scsi/scsi_device.h> 34#include <scsi/scsi_device.h>
@@ -852,12 +853,19 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
852void 853void
853lpfc_hb_timeout_handler(struct lpfc_hba *phba) 854lpfc_hb_timeout_handler(struct lpfc_hba *phba)
854{ 855{
856 struct lpfc_vport **vports;
855 LPFC_MBOXQ_t *pmboxq; 857 LPFC_MBOXQ_t *pmboxq;
856 struct lpfc_dmabuf *buf_ptr; 858 struct lpfc_dmabuf *buf_ptr;
857 int retval; 859 int retval, i;
858 struct lpfc_sli *psli = &phba->sli; 860 struct lpfc_sli *psli = &phba->sli;
859 LIST_HEAD(completions); 861 LIST_HEAD(completions);
860 862
863 vports = lpfc_create_vport_work_array(phba);
864 if (vports != NULL)
865 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
866 lpfc_rcv_seq_check_edtov(vports[i]);
867 lpfc_destroy_vport_work_array(phba, vports);
868
861 if ((phba->link_state == LPFC_HBA_ERROR) || 869 if ((phba->link_state == LPFC_HBA_ERROR) ||
862 (phba->pport->load_flag & FC_UNLOADING) || 870 (phba->pport->load_flag & FC_UNLOADING) ||
863 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 871 (phba->pport->fc_flag & FC_OFFLINE_MODE))
@@ -1521,10 +1529,10 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1521 int GE = 0; 1529 int GE = 0;
1522 int oneConnect = 0; /* default is not a oneConnect */ 1530 int oneConnect = 0; /* default is not a oneConnect */
1523 struct { 1531 struct {
1524 char * name; 1532 char *name;
1525 int max_speed; 1533 char *bus;
1526 char * bus; 1534 char *function;
1527 } m = {"<Unknown>", 0, ""}; 1535 } m = {"<Unknown>", "", ""};
1528 1536
1529 if (mdp && mdp[0] != '\0' 1537 if (mdp && mdp[0] != '\0'
1530 && descp && descp[0] != '\0') 1538 && descp && descp[0] != '\0')
@@ -1545,132 +1553,155 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1545 1553
1546 switch (dev_id) { 1554 switch (dev_id) {
1547 case PCI_DEVICE_ID_FIREFLY: 1555 case PCI_DEVICE_ID_FIREFLY:
1548 m = (typeof(m)){"LP6000", max_speed, "PCI"}; 1556 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1549 break; 1557 break;
1550 case PCI_DEVICE_ID_SUPERFLY: 1558 case PCI_DEVICE_ID_SUPERFLY:
1551 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1559 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1552 m = (typeof(m)){"LP7000", max_speed, "PCI"}; 1560 m = (typeof(m)){"LP7000", "PCI",
1561 "Fibre Channel Adapter"};
1553 else 1562 else
1554 m = (typeof(m)){"LP7000E", max_speed, "PCI"}; 1563 m = (typeof(m)){"LP7000E", "PCI",
1564 "Fibre Channel Adapter"};
1555 break; 1565 break;
1556 case PCI_DEVICE_ID_DRAGONFLY: 1566 case PCI_DEVICE_ID_DRAGONFLY:
1557 m = (typeof(m)){"LP8000", max_speed, "PCI"}; 1567 m = (typeof(m)){"LP8000", "PCI",
1568 "Fibre Channel Adapter"};
1558 break; 1569 break;
1559 case PCI_DEVICE_ID_CENTAUR: 1570 case PCI_DEVICE_ID_CENTAUR:
1560 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1571 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1561 m = (typeof(m)){"LP9002", max_speed, "PCI"}; 1572 m = (typeof(m)){"LP9002", "PCI",
1573 "Fibre Channel Adapter"};
1562 else 1574 else
1563 m = (typeof(m)){"LP9000", max_speed, "PCI"}; 1575 m = (typeof(m)){"LP9000", "PCI",
1576 "Fibre Channel Adapter"};
1564 break; 1577 break;
1565 case PCI_DEVICE_ID_RFLY: 1578 case PCI_DEVICE_ID_RFLY:
1566 m = (typeof(m)){"LP952", max_speed, "PCI"}; 1579 m = (typeof(m)){"LP952", "PCI",
1580 "Fibre Channel Adapter"};
1567 break; 1581 break;
1568 case PCI_DEVICE_ID_PEGASUS: 1582 case PCI_DEVICE_ID_PEGASUS:
1569 m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; 1583 m = (typeof(m)){"LP9802", "PCI-X",
1584 "Fibre Channel Adapter"};
1570 break; 1585 break;
1571 case PCI_DEVICE_ID_THOR: 1586 case PCI_DEVICE_ID_THOR:
1572 m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; 1587 m = (typeof(m)){"LP10000", "PCI-X",
1588 "Fibre Channel Adapter"};
1573 break; 1589 break;
1574 case PCI_DEVICE_ID_VIPER: 1590 case PCI_DEVICE_ID_VIPER:
1575 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; 1591 m = (typeof(m)){"LPX1000", "PCI-X",
1592 "Fibre Channel Adapter"};
1576 break; 1593 break;
1577 case PCI_DEVICE_ID_PFLY: 1594 case PCI_DEVICE_ID_PFLY:
1578 m = (typeof(m)){"LP982", max_speed, "PCI-X"}; 1595 m = (typeof(m)){"LP982", "PCI-X",
1596 "Fibre Channel Adapter"};
1579 break; 1597 break;
1580 case PCI_DEVICE_ID_TFLY: 1598 case PCI_DEVICE_ID_TFLY:
1581 m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; 1599 m = (typeof(m)){"LP1050", "PCI-X",
1600 "Fibre Channel Adapter"};
1582 break; 1601 break;
1583 case PCI_DEVICE_ID_HELIOS: 1602 case PCI_DEVICE_ID_HELIOS:
1584 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; 1603 m = (typeof(m)){"LP11000", "PCI-X2",
1604 "Fibre Channel Adapter"};
1585 break; 1605 break;
1586 case PCI_DEVICE_ID_HELIOS_SCSP: 1606 case PCI_DEVICE_ID_HELIOS_SCSP:
1587 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; 1607 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1608 "Fibre Channel Adapter"};
1588 break; 1609 break;
1589 case PCI_DEVICE_ID_HELIOS_DCSP: 1610 case PCI_DEVICE_ID_HELIOS_DCSP:
1590 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; 1611 m = (typeof(m)){"LP11002-SP", "PCI-X2",
1612 "Fibre Channel Adapter"};
1591 break; 1613 break;
1592 case PCI_DEVICE_ID_NEPTUNE: 1614 case PCI_DEVICE_ID_NEPTUNE:
1593 m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; 1615 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1594 break; 1616 break;
1595 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1617 case PCI_DEVICE_ID_NEPTUNE_SCSP:
1596 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; 1618 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1597 break; 1619 break;
1598 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1620 case PCI_DEVICE_ID_NEPTUNE_DCSP:
1599 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; 1621 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1600 break; 1622 break;
1601 case PCI_DEVICE_ID_BMID: 1623 case PCI_DEVICE_ID_BMID:
1602 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; 1624 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1603 break; 1625 break;
1604 case PCI_DEVICE_ID_BSMB: 1626 case PCI_DEVICE_ID_BSMB:
1605 m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; 1627 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1606 break; 1628 break;
1607 case PCI_DEVICE_ID_ZEPHYR: 1629 case PCI_DEVICE_ID_ZEPHYR:
1608 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1630 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1609 break; 1631 break;
1610 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1632 case PCI_DEVICE_ID_ZEPHYR_SCSP:
1611 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1633 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1612 break; 1634 break;
1613 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1635 case PCI_DEVICE_ID_ZEPHYR_DCSP:
1614 m = (typeof(m)){"LP2105", max_speed, "PCIe"}; 1636 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1615 GE = 1; 1637 GE = 1;
1616 break; 1638 break;
1617 case PCI_DEVICE_ID_ZMID: 1639 case PCI_DEVICE_ID_ZMID:
1618 m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; 1640 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1619 break; 1641 break;
1620 case PCI_DEVICE_ID_ZSMB: 1642 case PCI_DEVICE_ID_ZSMB:
1621 m = (typeof(m)){"LPe111", max_speed, "PCIe"}; 1643 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1622 break; 1644 break;
1623 case PCI_DEVICE_ID_LP101: 1645 case PCI_DEVICE_ID_LP101:
1624 m = (typeof(m)){"LP101", max_speed, "PCI-X"}; 1646 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1625 break; 1647 break;
1626 case PCI_DEVICE_ID_LP10000S: 1648 case PCI_DEVICE_ID_LP10000S:
1627 m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; 1649 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1628 break; 1650 break;
1629 case PCI_DEVICE_ID_LP11000S: 1651 case PCI_DEVICE_ID_LP11000S:
1630 m = (typeof(m)){"LP11000-S", max_speed, 1652 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1631 "PCI-X2"};
1632 break; 1653 break;
1633 case PCI_DEVICE_ID_LPE11000S: 1654 case PCI_DEVICE_ID_LPE11000S:
1634 m = (typeof(m)){"LPe11000-S", max_speed, 1655 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1635 "PCIe"};
1636 break; 1656 break;
1637 case PCI_DEVICE_ID_SAT: 1657 case PCI_DEVICE_ID_SAT:
1638 m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; 1658 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1639 break; 1659 break;
1640 case PCI_DEVICE_ID_SAT_MID: 1660 case PCI_DEVICE_ID_SAT_MID:
1641 m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; 1661 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1642 break; 1662 break;
1643 case PCI_DEVICE_ID_SAT_SMB: 1663 case PCI_DEVICE_ID_SAT_SMB:
1644 m = (typeof(m)){"LPe121", max_speed, "PCIe"}; 1664 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1645 break; 1665 break;
1646 case PCI_DEVICE_ID_SAT_DCSP: 1666 case PCI_DEVICE_ID_SAT_DCSP:
1647 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; 1667 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1648 break; 1668 break;
1649 case PCI_DEVICE_ID_SAT_SCSP: 1669 case PCI_DEVICE_ID_SAT_SCSP:
1650 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; 1670 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1651 break; 1671 break;
1652 case PCI_DEVICE_ID_SAT_S: 1672 case PCI_DEVICE_ID_SAT_S:
1653 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; 1673 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1654 break; 1674 break;
1655 case PCI_DEVICE_ID_HORNET: 1675 case PCI_DEVICE_ID_HORNET:
1656 m = (typeof(m)){"LP21000", max_speed, "PCIe"}; 1676 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1657 GE = 1; 1677 GE = 1;
1658 break; 1678 break;
1659 case PCI_DEVICE_ID_PROTEUS_VF: 1679 case PCI_DEVICE_ID_PROTEUS_VF:
1660 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1680 m = (typeof(m)){"LPev12000", "PCIe IOV",
1681 "Fibre Channel Adapter"};
1661 break; 1682 break;
1662 case PCI_DEVICE_ID_PROTEUS_PF: 1683 case PCI_DEVICE_ID_PROTEUS_PF:
1663 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1684 m = (typeof(m)){"LPev12000", "PCIe IOV",
1685 "Fibre Channel Adapter"};
1664 break; 1686 break;
1665 case PCI_DEVICE_ID_PROTEUS_S: 1687 case PCI_DEVICE_ID_PROTEUS_S:
1666 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1688 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1689 "Fibre Channel Adapter"};
1667 break; 1690 break;
1668 case PCI_DEVICE_ID_TIGERSHARK: 1691 case PCI_DEVICE_ID_TIGERSHARK:
1669 oneConnect = 1; 1692 oneConnect = 1;
1670 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; 1693 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1694 break;
1695 case PCI_DEVICE_ID_TOMCAT:
1696 oneConnect = 1;
1697 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1698 break;
1699 case PCI_DEVICE_ID_FALCON:
1700 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1701 "EmulexSecure Fibre"};
1671 break; 1702 break;
1672 default: 1703 default:
1673 m = (typeof(m)){ NULL }; 1704 m = (typeof(m)){"Unknown", "", ""};
1674 break; 1705 break;
1675 } 1706 }
1676 1707
@@ -1682,17 +1713,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1682 if (descp && descp[0] == '\0') { 1713 if (descp && descp[0] == '\0') {
1683 if (oneConnect) 1714 if (oneConnect)
1684 snprintf(descp, 255, 1715 snprintf(descp, 255,
1685 "Emulex OneConnect %s, FCoE Initiator, Port %s", 1716 "Emulex OneConnect %s, %s Initiator, Port %s",
1686 m.name, 1717 m.name, m.function,
1687 phba->Port); 1718 phba->Port);
1688 else 1719 else
1689 snprintf(descp, 255, 1720 snprintf(descp, 255,
1690 "Emulex %s %d%s %s %s", 1721 "Emulex %s %d%s %s %s",
1691 m.name, m.max_speed, 1722 m.name, max_speed, (GE) ? "GE" : "Gb",
1692 (GE) ? "GE" : "Gb", 1723 m.bus, m.function);
1693 m.bus,
1694 (GE) ? "FCoE Adapter" :
1695 "Fibre Channel Adapter");
1696 } 1724 }
1697} 1725}
1698 1726
@@ -2217,7 +2245,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
2217 2245
2218 if (vports[i]->load_flag & FC_UNLOADING) 2246 if (vports[i]->load_flag & FC_UNLOADING)
2219 continue; 2247 continue;
2220 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; 2248 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2221 shost = lpfc_shost_from_vport(vports[i]); 2249 shost = lpfc_shost_from_vport(vports[i]);
2222 list_for_each_entry_safe(ndlp, next_ndlp, 2250 list_for_each_entry_safe(ndlp, next_ndlp,
2223 &vports[i]->fc_nodes, 2251 &vports[i]->fc_nodes,
@@ -2308,6 +2336,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2308 2336
2309 spin_lock_irq(&phba->hbalock); 2337 spin_lock_irq(&phba->hbalock);
2310 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2338 /* Release all the lpfc_scsi_bufs maintained by this host. */
2339 spin_lock(&phba->scsi_buf_list_lock);
2311 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2340 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2312 list_del(&sb->list); 2341 list_del(&sb->list);
2313 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2342 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
@@ -2315,6 +2344,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2315 kfree(sb); 2344 kfree(sb);
2316 phba->total_scsi_bufs--; 2345 phba->total_scsi_bufs--;
2317 } 2346 }
2347 spin_unlock(&phba->scsi_buf_list_lock);
2318 2348
2319 /* Release all the lpfc_iocbq entries maintained by this host. */ 2349 /* Release all the lpfc_iocbq entries maintained by this host. */
2320 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2350 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
@@ -2322,9 +2352,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
2322 kfree(io); 2352 kfree(io);
2323 phba->total_iocbq_bufs--; 2353 phba->total_iocbq_bufs--;
2324 } 2354 }
2325
2326 spin_unlock_irq(&phba->hbalock); 2355 spin_unlock_irq(&phba->hbalock);
2327
2328 return 0; 2356 return 0;
2329} 2357}
2330 2358
@@ -2408,7 +2436,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2408 vport->els_tmofunc.function = lpfc_els_timeout; 2436 vport->els_tmofunc.function = lpfc_els_timeout;
2409 vport->els_tmofunc.data = (unsigned long)vport; 2437 vport->els_tmofunc.data = (unsigned long)vport;
2410 2438
2411 error = scsi_add_host(shost, dev); 2439 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2412 if (error) 2440 if (error)
2413 goto out_put_shost; 2441 goto out_put_shost;
2414 2442
@@ -2699,6 +2727,63 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
2699} 2727}
2700 2728
2701/** 2729/**
2730 * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
2731 * @phba: pointer to lpfc hba data structure.
2732 *
2733 * This function uses the QUERY_FW_CFG mailbox command to determine if the
2734 * firmware loaded supports FCoE. A return of zero indicates that the mailbox
2735 * was successful and the firmware supports FCoE. Any other return indicates
2736 * a error. It is assumed that this function will be called before interrupts
2737 * are enabled.
2738 **/
2739static int
2740lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
2741{
2742 int rc = 0;
2743 LPFC_MBOXQ_t *mboxq;
2744 struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
2745 uint32_t length;
2746 uint32_t shdr_status, shdr_add_status;
2747
2748 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2749 if (!mboxq) {
2750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2751 "2621 Failed to allocate mbox for "
2752 "query firmware config cmd\n");
2753 return -ENOMEM;
2754 }
2755 query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
2756 length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
2757 sizeof(struct lpfc_sli4_cfg_mhdr));
2758 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
2759 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
2760 length, LPFC_SLI4_MBX_EMBED);
2761 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
2762 /* The IOCTL status is embedded in the mailbox subheader. */
2763 shdr_status = bf_get(lpfc_mbox_hdr_status,
2764 &query_fw_cfg->header.cfg_shdr.response);
2765 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
2766 &query_fw_cfg->header.cfg_shdr.response);
2767 if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
2768 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2769 "2622 Query Firmware Config failed "
2770 "mbx status x%x, status x%x add_status x%x\n",
2771 rc, shdr_status, shdr_add_status);
2772 return -EINVAL;
2773 }
2774 if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
2775 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2776 "2623 FCoE Function not supported by firmware. "
2777 "Function mode = %08x\n",
2778 query_fw_cfg->function_mode);
2779 return -EINVAL;
2780 }
2781 if (rc != MBX_TIMEOUT)
2782 mempool_free(mboxq, phba->mbox_mem_pool);
2783 return 0;
2784}
2785
2786/**
2702 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 2787 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
2703 * @phba: pointer to lpfc hba data structure. 2788 * @phba: pointer to lpfc hba data structure.
2704 * @acqe_link: pointer to the async link completion queue entry. 2789 * @acqe_link: pointer to the async link completion queue entry.
@@ -2918,13 +3003,17 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2918{ 3003{
2919 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 3004 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2920 int rc; 3005 int rc;
3006 struct lpfc_vport *vport;
3007 struct lpfc_nodelist *ndlp;
3008 struct Scsi_Host *shost;
2921 3009
3010 phba->fc_eventTag = acqe_fcoe->event_tag;
2922 phba->fcoe_eventtag = acqe_fcoe->event_tag; 3011 phba->fcoe_eventtag = acqe_fcoe->event_tag;
2923 switch (event_type) { 3012 switch (event_type) {
2924 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3013 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2925 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3014 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2926 "2546 New FCF found index 0x%x tag 0x%x\n", 3015 "2546 New FCF found index 0x%x tag 0x%x\n",
2927 acqe_fcoe->fcf_index, 3016 acqe_fcoe->index,
2928 acqe_fcoe->event_tag); 3017 acqe_fcoe->event_tag);
2929 /* 3018 /*
2930 * If the current FCF is in discovered state, or 3019 * If the current FCF is in discovered state, or
@@ -2939,12 +3028,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2939 spin_unlock_irq(&phba->hbalock); 3028 spin_unlock_irq(&phba->hbalock);
2940 3029
2941 /* Read the FCF table and re-discover SAN. */ 3030 /* Read the FCF table and re-discover SAN. */
2942 rc = lpfc_sli4_read_fcf_record(phba, 3031 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
2943 LPFC_FCOE_FCF_GET_FIRST);
2944 if (rc) 3032 if (rc)
2945 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3033 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2946 "2547 Read FCF record failed 0x%x\n", 3034 "2547 Read FCF record failed 0x%x\n",
2947 rc); 3035 rc);
2948 break; 3036 break;
2949 3037
2950 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3038 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -2956,11 +3044,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2956 3044
2957 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3045 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2958 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3046 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2959 "2549 FCF disconnected fron network index 0x%x" 3047 "2549 FCF disconnected from network index 0x%x"
2960 " tag 0x%x\n", acqe_fcoe->fcf_index, 3048 " tag 0x%x\n", acqe_fcoe->index,
2961 acqe_fcoe->event_tag); 3049 acqe_fcoe->event_tag);
2962 /* If the event is not for currently used fcf do nothing */ 3050 /* If the event is not for currently used fcf do nothing */
2963 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) 3051 if (phba->fcf.fcf_indx != acqe_fcoe->index)
2964 break; 3052 break;
2965 /* 3053 /*
2966 * Currently, driver support only one FCF - so treat this as 3054 * Currently, driver support only one FCF - so treat this as
@@ -2970,7 +3058,28 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2970 /* Unregister FCF if no devices connected to it */ 3058 /* Unregister FCF if no devices connected to it */
2971 lpfc_unregister_unused_fcf(phba); 3059 lpfc_unregister_unused_fcf(phba);
2972 break; 3060 break;
2973 3061 case LPFC_FCOE_EVENT_TYPE_CVL:
3062 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
3063 "2718 Clear Virtual Link Received for VPI 0x%x"
3064 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3065 vport = lpfc_find_vport_by_vpid(phba,
3066 acqe_fcoe->index - phba->vpi_base);
3067 if (!vport)
3068 break;
3069 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3070 if (!ndlp)
3071 break;
3072 shost = lpfc_shost_from_vport(vport);
3073 lpfc_linkdown_port(vport);
3074 if (vport->port_type != LPFC_NPIV_PORT) {
3075 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3076 spin_lock_irq(shost->host_lock);
3077 ndlp->nlp_flag |= NLP_DELAY_TMO;
3078 spin_unlock_irq(shost->host_lock);
3079 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
3080 vport->port_state = LPFC_FLOGI;
3081 }
3082 break;
2974 default: 3083 default:
2975 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3084 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2976 "0288 Unknown FCoE event type 0x%x event tag " 3085 "0288 Unknown FCoE event type 0x%x event tag "
@@ -2990,6 +3099,7 @@ static void
2990lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3099lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
2991 struct lpfc_acqe_dcbx *acqe_dcbx) 3100 struct lpfc_acqe_dcbx *acqe_dcbx)
2992{ 3101{
3102 phba->fc_eventTag = acqe_dcbx->event_tag;
2993 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3103 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2994 "0290 The SLI4 DCBX asynchronous event is not " 3104 "0290 The SLI4 DCBX asynchronous event is not "
2995 "handled yet\n"); 3105 "handled yet\n");
@@ -3432,7 +3542,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3432 /* Driver internel slow-path CQ Event pool */ 3542 /* Driver internel slow-path CQ Event pool */
3433 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 3543 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
3434 /* Response IOCB work queue list */ 3544 /* Response IOCB work queue list */
3435 INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); 3545 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
3436 /* Asynchronous event CQ Event work queue list */ 3546 /* Asynchronous event CQ Event work queue list */
3437 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 3547 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
3438 /* Fast-path XRI aborted CQ Event work queue list */ 3548 /* Fast-path XRI aborted CQ Event work queue list */
@@ -3461,6 +3571,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3461 if (unlikely(rc)) 3571 if (unlikely(rc))
3462 goto out_free_bsmbx; 3572 goto out_free_bsmbx;
3463 3573
3574 rc = lpfc_sli4_fw_cfg_check(phba);
3575 if (unlikely(rc))
3576 goto out_free_bsmbx;
3577
3464 /* Set up the hba's configuration parameters. */ 3578 /* Set up the hba's configuration parameters. */
3465 rc = lpfc_sli4_read_config(phba); 3579 rc = lpfc_sli4_read_config(phba);
3466 if (unlikely(rc)) 3580 if (unlikely(rc))
@@ -3594,8 +3708,10 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3594 3708
3595 /* Free the current connect table */ 3709 /* Free the current connect table */
3596 list_for_each_entry_safe(conn_entry, next_conn_entry, 3710 list_for_each_entry_safe(conn_entry, next_conn_entry,
3597 &phba->fcf_conn_rec_list, list) 3711 &phba->fcf_conn_rec_list, list) {
3712 list_del_init(&conn_entry->list);
3598 kfree(conn_entry); 3713 kfree(conn_entry);
3714 }
3599 3715
3600 return; 3716 return;
3601} 3717}
@@ -3824,7 +3940,7 @@ lpfc_free_sgl_list(struct lpfc_hba *phba)
3824 rc = lpfc_sli4_remove_all_sgl_pages(phba); 3940 rc = lpfc_sli4_remove_all_sgl_pages(phba);
3825 if (rc) { 3941 if (rc) {
3826 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3942 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3827 "2005 Unable to deregister pages from HBA: %x", rc); 3943 "2005 Unable to deregister pages from HBA: %x\n", rc);
3828 } 3944 }
3829 kfree(phba->sli4_hba.lpfc_els_sgl_array); 3945 kfree(phba->sli4_hba.lpfc_els_sgl_array);
3830} 3946}
@@ -4273,7 +4389,8 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4273 _dump_buf_data = 4389 _dump_buf_data =
4274 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4390 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4275 if (_dump_buf_data) { 4391 if (_dump_buf_data) {
4276 printk(KERN_ERR "BLKGRD allocated %d pages for " 4392 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4393 "9043 BLKGRD: allocated %d pages for "
4277 "_dump_buf_data at 0x%p\n", 4394 "_dump_buf_data at 0x%p\n",
4278 (1 << pagecnt), _dump_buf_data); 4395 (1 << pagecnt), _dump_buf_data);
4279 _dump_buf_data_order = pagecnt; 4396 _dump_buf_data_order = pagecnt;
@@ -4284,17 +4401,20 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4284 --pagecnt; 4401 --pagecnt;
4285 } 4402 }
4286 if (!_dump_buf_data_order) 4403 if (!_dump_buf_data_order)
4287 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 4404 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4405 "9044 BLKGRD: ERROR unable to allocate "
4288 "memory for hexdump\n"); 4406 "memory for hexdump\n");
4289 } else 4407 } else
4290 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" 4408 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4409 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
4291 "\n", _dump_buf_data); 4410 "\n", _dump_buf_data);
4292 if (!_dump_buf_dif) { 4411 if (!_dump_buf_dif) {
4293 while (pagecnt) { 4412 while (pagecnt) {
4294 _dump_buf_dif = 4413 _dump_buf_dif =
4295 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 4414 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
4296 if (_dump_buf_dif) { 4415 if (_dump_buf_dif) {
4297 printk(KERN_ERR "BLKGRD allocated %d pages for " 4416 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4417 "9046 BLKGRD: allocated %d pages for "
4298 "_dump_buf_dif at 0x%p\n", 4418 "_dump_buf_dif at 0x%p\n",
4299 (1 << pagecnt), _dump_buf_dif); 4419 (1 << pagecnt), _dump_buf_dif);
4300 _dump_buf_dif_order = pagecnt; 4420 _dump_buf_dif_order = pagecnt;
@@ -4305,10 +4425,12 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
4305 --pagecnt; 4425 --pagecnt;
4306 } 4426 }
4307 if (!_dump_buf_dif_order) 4427 if (!_dump_buf_dif_order)
4308 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 4428 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4429 "9047 BLKGRD: ERROR unable to allocate "
4309 "memory for hexdump\n"); 4430 "memory for hexdump\n");
4310 } else 4431 } else
4311 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", 4432 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4433 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
4312 _dump_buf_dif); 4434 _dump_buf_dif);
4313} 4435}
4314 4436
@@ -4512,7 +4634,6 @@ int
4512lpfc_sli4_post_status_check(struct lpfc_hba *phba) 4634lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4513{ 4635{
4514 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; 4636 struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
4515 uint32_t onlnreg0, onlnreg1;
4516 int i, port_error = -ENODEV; 4637 int i, port_error = -ENODEV;
4517 4638
4518 if (!phba->sli4_hba.STAregaddr) 4639 if (!phba->sli4_hba.STAregaddr)
@@ -4556,21 +4677,20 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4556 bf_get(lpfc_scratchpad_slirev, &scratchpad), 4677 bf_get(lpfc_scratchpad_slirev, &scratchpad),
4557 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), 4678 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4558 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); 4679 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4559 4680 phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
4681 phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
4560 /* With uncoverable error, log the error message and return error */ 4682 /* With uncoverable error, log the error message and return error */
4561 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); 4683 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4562 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); 4684 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4563 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { 4685 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
4564 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); 4686 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
4565 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); 4687 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4566 if (uerrlo_reg.word0 || uerrhi_reg.word0) { 4688 "1422 HBA Unrecoverable error: "
4567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4689 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4568 "1422 HBA Unrecoverable error: " 4690 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
4569 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 4691 uerrlo_reg.word0, uerrhi_reg.word0,
4570 "online0_reg=0x%x, online1_reg=0x%x\n", 4692 phba->sli4_hba.ue_mask_lo,
4571 uerrlo_reg.word0, uerrhi_reg.word0, 4693 phba->sli4_hba.ue_mask_hi);
4572 onlnreg0, onlnreg1);
4573 }
4574 return -ENODEV; 4694 return -ENODEV;
4575 } 4695 }
4576 4696
@@ -4591,10 +4711,10 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
4591 LPFC_UERR_STATUS_LO; 4711 LPFC_UERR_STATUS_LO;
4592 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + 4712 phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4593 LPFC_UERR_STATUS_HI; 4713 LPFC_UERR_STATUS_HI;
4594 phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p + 4714 phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
4595 LPFC_ONLINE0; 4715 LPFC_UE_MASK_LO;
4596 phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p + 4716 phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
4597 LPFC_ONLINE1; 4717 LPFC_UE_MASK_HI;
4598 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + 4718 phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
4599 LPFC_SCRATCHPAD; 4719 LPFC_SCRATCHPAD;
4600} 4720}
@@ -4825,7 +4945,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
4825 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 4945 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
4826 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 4946 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
4827 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4947 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
4828 phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi; 4948 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
4949 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
4829 phba->max_vports = phba->max_vpi; 4950 phba->max_vports = phba->max_vpi;
4830 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4951 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4831 "2003 cfg params XRI(B:%d M:%d), " 4952 "2003 cfg params XRI(B:%d M:%d), "
@@ -4979,10 +5100,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
4979 /* It does not make sense to have more EQs than WQs */ 5100 /* It does not make sense to have more EQs than WQs */
4980 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 5101 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
4981 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5102 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4982 "2593 The number of FCP EQs (%d) is more " 5103 "2593 The FCP EQ count(%d) cannot be greater "
4983 "than the number of FCP WQs (%d), take " 5104 "than the FCP WQ count(%d), limiting the "
4984 "the number of FCP EQs same as than of " 5105 "FCP EQ count to %d\n", cfg_fcp_eq_count,
4985 "WQs (%d)\n", cfg_fcp_eq_count,
4986 phba->cfg_fcp_wq_count, 5106 phba->cfg_fcp_wq_count,
4987 phba->cfg_fcp_wq_count); 5107 phba->cfg_fcp_wq_count);
4988 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 5108 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
@@ -5058,15 +5178,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
5058 } 5178 }
5059 phba->sli4_hba.els_cq = qdesc; 5179 phba->sli4_hba.els_cq = qdesc;
5060 5180
5061 /* Create slow-path Unsolicited Receive Complete Queue */
5062 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
5063 phba->sli4_hba.cq_ecount);
5064 if (!qdesc) {
5065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5066 "0502 Failed allocate slow-path USOL RX CQ\n");
5067 goto out_free_els_cq;
5068 }
5069 phba->sli4_hba.rxq_cq = qdesc;
5070 5181
5071 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 5182 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
5072 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 5183 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
@@ -5075,7 +5186,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
5075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5186 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5076 "2577 Failed allocate memory for fast-path " 5187 "2577 Failed allocate memory for fast-path "
5077 "CQ record array\n"); 5188 "CQ record array\n");
5078 goto out_free_rxq_cq; 5189 goto out_free_els_cq;
5079 } 5190 }
5080 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5191 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5081 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 5192 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
@@ -5188,9 +5299,6 @@ out_free_fcp_cq:
5188 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 5299 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
5189 } 5300 }
5190 kfree(phba->sli4_hba.fcp_cq); 5301 kfree(phba->sli4_hba.fcp_cq);
5191out_free_rxq_cq:
5192 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5193 phba->sli4_hba.rxq_cq = NULL;
5194out_free_els_cq: 5302out_free_els_cq:
5195 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5303 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5196 phba->sli4_hba.els_cq = NULL; 5304 phba->sli4_hba.els_cq = NULL;
@@ -5247,10 +5355,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
5247 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 5355 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
5248 phba->sli4_hba.dat_rq = NULL; 5356 phba->sli4_hba.dat_rq = NULL;
5249 5357
5250 /* Release unsolicited receive complete queue */
5251 lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
5252 phba->sli4_hba.rxq_cq = NULL;
5253
5254 /* Release ELS complete queue */ 5358 /* Release ELS complete queue */
5255 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 5359 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
5256 phba->sli4_hba.els_cq = NULL; 5360 phba->sli4_hba.els_cq = NULL;
@@ -5383,25 +5487,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5383 phba->sli4_hba.els_cq->queue_id, 5487 phba->sli4_hba.els_cq->queue_id,
5384 phba->sli4_hba.sp_eq->queue_id); 5488 phba->sli4_hba.sp_eq->queue_id);
5385 5489
5386 /* Set up slow-path Unsolicited Receive Complete Queue */
5387 if (!phba->sli4_hba.rxq_cq) {
5388 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5389 "0532 USOL RX CQ not allocated\n");
5390 goto out_destroy_els_cq;
5391 }
5392 rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
5393 LPFC_RCQ, LPFC_USOL);
5394 if (rc) {
5395 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5396 "0533 Failed setup of slow-path USOL RX CQ: "
5397 "rc = 0x%x\n", rc);
5398 goto out_destroy_els_cq;
5399 }
5400 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5401 "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
5402 phba->sli4_hba.rxq_cq->queue_id,
5403 phba->sli4_hba.sp_eq->queue_id);
5404
5405 /* Set up fast-path FCP Response Complete Queue */ 5490 /* Set up fast-path FCP Response Complete Queue */
5406 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 5491 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
5407 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 5492 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
@@ -5507,7 +5592,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5507 goto out_destroy_fcp_wq; 5592 goto out_destroy_fcp_wq;
5508 } 5593 }
5509 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 5594 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
5510 phba->sli4_hba.rxq_cq, LPFC_USOL); 5595 phba->sli4_hba.els_cq, LPFC_USOL);
5511 if (rc) { 5596 if (rc) {
5512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5597 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5513 "0541 Failed setup of Receive Queue: " 5598 "0541 Failed setup of Receive Queue: "
@@ -5519,7 +5604,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
5519 "parent cq-id=%d\n", 5604 "parent cq-id=%d\n",
5520 phba->sli4_hba.hdr_rq->queue_id, 5605 phba->sli4_hba.hdr_rq->queue_id,
5521 phba->sli4_hba.dat_rq->queue_id, 5606 phba->sli4_hba.dat_rq->queue_id,
5522 phba->sli4_hba.rxq_cq->queue_id); 5607 phba->sli4_hba.els_cq->queue_id);
5523 return 0; 5608 return 0;
5524 5609
5525out_destroy_fcp_wq: 5610out_destroy_fcp_wq:
@@ -5531,8 +5616,6 @@ out_destroy_mbx_wq:
5531out_destroy_fcp_cq: 5616out_destroy_fcp_cq:
5532 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 5617 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
5533 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 5618 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
5534 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5535out_destroy_els_cq:
5536 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5619 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5537out_destroy_mbx_cq: 5620out_destroy_mbx_cq:
5538 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5621 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
@@ -5574,8 +5657,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
5574 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 5657 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
5575 /* Unset ELS complete queue */ 5658 /* Unset ELS complete queue */
5576 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 5659 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
5577 /* Unset unsolicited receive complete queue */
5578 lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
5579 /* Unset FCP response complete queue */ 5660 /* Unset FCP response complete queue */
5580 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 5661 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
5581 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 5662 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
@@ -6722,6 +6803,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6722{ 6803{
6723 struct lpfc_hba *phba; 6804 struct lpfc_hba *phba;
6724 struct lpfc_vport *vport = NULL; 6805 struct lpfc_vport *vport = NULL;
6806 struct Scsi_Host *shost = NULL;
6725 int error; 6807 int error;
6726 uint32_t cfg_mode, intr_mode; 6808 uint32_t cfg_mode, intr_mode;
6727 6809
@@ -6800,6 +6882,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
6800 goto out_destroy_shost; 6882 goto out_destroy_shost;
6801 } 6883 }
6802 6884
6885 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
6803 /* Now, trying to enable interrupt and bring up the device */ 6886 /* Now, trying to enable interrupt and bring up the device */
6804 cfg_mode = phba->cfg_use_msi; 6887 cfg_mode = phba->cfg_use_msi;
6805 while (true) { 6888 while (true) {
@@ -6866,6 +6949,8 @@ out_unset_pci_mem_s3:
6866 lpfc_sli_pci_mem_unset(phba); 6949 lpfc_sli_pci_mem_unset(phba);
6867out_disable_pci_dev: 6950out_disable_pci_dev:
6868 lpfc_disable_pci_dev(phba); 6951 lpfc_disable_pci_dev(phba);
6952 if (shost)
6953 scsi_host_put(shost);
6869out_free_phba: 6954out_free_phba:
6870 lpfc_hba_free(phba); 6955 lpfc_hba_free(phba);
6871 return error; 6956 return error;
@@ -7036,6 +7121,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7036 /* Restore device state from PCI config space */ 7121 /* Restore device state from PCI config space */
7037 pci_set_power_state(pdev, PCI_D0); 7122 pci_set_power_state(pdev, PCI_D0);
7038 pci_restore_state(pdev); 7123 pci_restore_state(pdev);
7124
7039 if (pdev->is_busmaster) 7125 if (pdev->is_busmaster)
7040 pci_set_master(pdev); 7126 pci_set_master(pdev);
7041 7127
@@ -7070,6 +7156,75 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7070} 7156}
7071 7157
7072/** 7158/**
7159 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
7160 * @phba: pointer to lpfc hba data structure.
7161 *
7162 * This routine is called to prepare the SLI3 device for PCI slot recover. It
7163 * aborts and stops all the on-going I/Os on the pci device.
7164 **/
7165static void
7166lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7167{
7168 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7169 "2723 PCI channel I/O abort preparing for recovery\n");
7170 /* Prepare for bringing HBA offline */
7171 lpfc_offline_prep(phba);
7172 /* Clear sli active flag to prevent sysfs access to HBA */
7173 spin_lock_irq(&phba->hbalock);
7174 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
7175 spin_unlock_irq(&phba->hbalock);
7176 /* Stop and flush all I/Os and bring HBA offline */
7177 lpfc_offline(phba);
7178}
7179
7180/**
7181 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
7182 * @phba: pointer to lpfc hba data structure.
7183 *
7184 * This routine is called to prepare the SLI3 device for PCI slot reset. It
7185 * disables the device interrupt and pci device, and aborts the internal FCP
7186 * pending I/Os.
7187 **/
7188static void
7189lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7190{
7191 struct lpfc_sli *psli = &phba->sli;
7192 struct lpfc_sli_ring *pring;
7193
7194 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7195 "2710 PCI channel disable preparing for reset\n");
7196 /* Disable interrupt and pci device */
7197 lpfc_sli_disable_intr(phba);
7198 pci_disable_device(phba->pcidev);
7199 /*
7200 * There may be I/Os dropped by the firmware.
7201 * Error iocb (I/O) on txcmplq and let the SCSI layer
7202 * retry it after re-establishing link.
7203 */
7204 pring = &psli->ring[psli->fcp_ring];
7205 lpfc_sli_abort_iocb_ring(phba, pring);
7206}
7207
7208/**
7209 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
7210 * @phba: pointer to lpfc hba data structure.
7211 *
7212 * This routine is called to prepare the SLI3 device for PCI slot permanently
7213 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
7214 * pending I/Os.
7215 **/
7216static void
7217lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7218{
7219 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7220 "2711 PCI channel permanent disable for failure\n");
7221 /* Block all SCSI devices' I/Os on the host */
7222 lpfc_scsi_dev_block(phba);
7223 /* Clean up all driver's outstanding SCSI I/Os */
7224 lpfc_sli_flush_fcp_rings(phba);
7225}
7226
7227/**
7073 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 7228 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
7074 * @pdev: pointer to PCI device. 7229 * @pdev: pointer to PCI device.
7075 * @state: the current PCI connection state. 7230 * @state: the current PCI connection state.
@@ -7083,6 +7238,7 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7083 * as desired. 7238 * as desired.
7084 * 7239 *
7085 * Return codes 7240 * Return codes
7241 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
7086 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 7242 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
7087 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 7243 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
7088 **/ 7244 **/
@@ -7091,33 +7247,27 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7091{ 7247{
7092 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7248 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7093 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7249 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7094 struct lpfc_sli *psli = &phba->sli;
7095 struct lpfc_sli_ring *pring;
7096 7250
7097 if (state == pci_channel_io_perm_failure) { 7251 switch (state) {
7098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7252 case pci_channel_io_normal:
7099 "0472 PCI channel I/O permanent failure\n"); 7253 /* Non-fatal error, prepare for recovery */
7100 /* Block all SCSI devices' I/Os on the host */ 7254 lpfc_sli_prep_dev_for_recover(phba);
7101 lpfc_scsi_dev_block(phba); 7255 return PCI_ERS_RESULT_CAN_RECOVER;
7102 /* Clean up all driver's outstanding SCSI I/Os */ 7256 case pci_channel_io_frozen:
7103 lpfc_sli_flush_fcp_rings(phba); 7257 /* Fatal error, prepare for slot reset */
7258 lpfc_sli_prep_dev_for_reset(phba);
7259 return PCI_ERS_RESULT_NEED_RESET;
7260 case pci_channel_io_perm_failure:
7261 /* Permanent failure, prepare for device down */
7262 lpfc_prep_dev_for_perm_failure(phba);
7104 return PCI_ERS_RESULT_DISCONNECT; 7263 return PCI_ERS_RESULT_DISCONNECT;
7264 default:
7265 /* Unknown state, prepare and request slot reset */
7266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7267 "0472 Unknown PCI error state: x%x\n", state);
7268 lpfc_sli_prep_dev_for_reset(phba);
7269 return PCI_ERS_RESULT_NEED_RESET;
7105 } 7270 }
7106
7107 pci_disable_device(pdev);
7108 /*
7109 * There may be I/Os dropped by the firmware.
7110 * Error iocb (I/O) on txcmplq and let the SCSI layer
7111 * retry it after re-establishing link.
7112 */
7113 pring = &psli->ring[psli->fcp_ring];
7114 lpfc_sli_abort_iocb_ring(phba, pring);
7115
7116 /* Disable interrupt */
7117 lpfc_sli_disable_intr(phba);
7118
7119 /* Request a slot reset. */
7120 return PCI_ERS_RESULT_NEED_RESET;
7121} 7271}
7122 7272
7123/** 7273/**
@@ -7197,7 +7347,12 @@ lpfc_io_resume_s3(struct pci_dev *pdev)
7197 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7347 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7198 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7348 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7199 7349
7350 /* Bring the device online */
7200 lpfc_online(phba); 7351 lpfc_online(phba);
7352
7353 /* Clean up Advanced Error Reporting (AER) if needed */
7354 if (phba->hba_flag & HBA_AER_ENABLED)
7355 pci_cleanup_aer_uncorrect_error_status(pdev);
7201} 7356}
7202 7357
7203/** 7358/**
@@ -7213,15 +7368,15 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7213 7368
7214 if (phba->sli_rev == LPFC_SLI_REV4) { 7369 if (phba->sli_rev == LPFC_SLI_REV4) {
7215 if (max_xri <= 100) 7370 if (max_xri <= 100)
7216 return 4; 7371 return 10;
7217 else if (max_xri <= 256) 7372 else if (max_xri <= 256)
7218 return 8; 7373 return 25;
7219 else if (max_xri <= 512) 7374 else if (max_xri <= 512)
7220 return 16; 7375 return 50;
7221 else if (max_xri <= 1024) 7376 else if (max_xri <= 1024)
7222 return 32; 7377 return 100;
7223 else 7378 else
7224 return 48; 7379 return 150;
7225 } else 7380 } else
7226 return 0; 7381 return 0;
7227} 7382}
@@ -7249,6 +7404,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7249{ 7404{
7250 struct lpfc_hba *phba; 7405 struct lpfc_hba *phba;
7251 struct lpfc_vport *vport = NULL; 7406 struct lpfc_vport *vport = NULL;
7407 struct Scsi_Host *shost = NULL;
7252 int error; 7408 int error;
7253 uint32_t cfg_mode, intr_mode; 7409 uint32_t cfg_mode, intr_mode;
7254 int mcnt; 7410 int mcnt;
@@ -7329,6 +7485,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7329 goto out_destroy_shost; 7485 goto out_destroy_shost;
7330 } 7486 }
7331 7487
7488 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
7332 /* Now, trying to enable interrupt and bring up the device */ 7489 /* Now, trying to enable interrupt and bring up the device */
7333 cfg_mode = phba->cfg_use_msi; 7490 cfg_mode = phba->cfg_use_msi;
7334 while (true) { 7491 while (true) {
@@ -7397,6 +7554,8 @@ out_unset_pci_mem_s4:
7397 lpfc_sli4_pci_mem_unset(phba); 7554 lpfc_sli4_pci_mem_unset(phba);
7398out_disable_pci_dev: 7555out_disable_pci_dev:
7399 lpfc_disable_pci_dev(phba); 7556 lpfc_disable_pci_dev(phba);
7557 if (shost)
7558 scsi_host_put(shost);
7400out_free_phba: 7559out_free_phba:
7401 lpfc_hba_free(phba); 7560 lpfc_hba_free(phba);
7402 return error; 7561 return error;
@@ -7971,6 +8130,10 @@ static struct pci_device_id lpfc_id_table[] = {
7971 PCI_ANY_ID, PCI_ANY_ID, }, 8130 PCI_ANY_ID, PCI_ANY_ID, },
7972 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 8131 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7973 PCI_ANY_ID, PCI_ANY_ID, }, 8132 PCI_ANY_ID, PCI_ANY_ID, },
8133 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
8134 PCI_ANY_ID, PCI_ANY_ID, },
8135 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
8136 PCI_ANY_ID, PCI_ANY_ID, },
7974 { 0 } 8137 { 0 }
7975}; 8138};
7976 8139
@@ -8053,15 +8216,15 @@ lpfc_exit(void)
8053 if (lpfc_enable_npiv) 8216 if (lpfc_enable_npiv)
8054 fc_release_transport(lpfc_vport_transport_template); 8217 fc_release_transport(lpfc_vport_transport_template);
8055 if (_dump_buf_data) { 8218 if (_dump_buf_data) {
8056 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data " 8219 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
8057 "at 0x%p\n", 8220 "_dump_buf_data at 0x%p\n",
8058 (1L << _dump_buf_data_order), _dump_buf_data); 8221 (1L << _dump_buf_data_order), _dump_buf_data);
8059 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 8222 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
8060 } 8223 }
8061 8224
8062 if (_dump_buf_dif) { 8225 if (_dump_buf_dif) {
8063 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif " 8226 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
8064 "at 0x%p\n", 8227 "_dump_buf_dif at 0x%p\n",
8065 (1L << _dump_buf_dif_order), _dump_buf_dif); 8228 (1L << _dump_buf_dif_order), _dump_buf_dif);
8066 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 8229 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
8067 } 8230 }
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 1ab405902a18..a9afd8b94b6a 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -25,8 +25,8 @@
25 25
26#include <scsi/scsi_device.h> 26#include <scsi/scsi_device.h>
27#include <scsi/scsi_transport_fc.h> 27#include <scsi/scsi_transport_fc.h>
28
29#include <scsi/scsi.h> 28#include <scsi/scsi.h>
29#include <scsi/fc/fc_fs.h>
30 30
31#include "lpfc_hw4.h" 31#include "lpfc_hw4.h"
32#include "lpfc_hw.h" 32#include "lpfc_hw.h"
@@ -820,6 +820,10 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
820 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base; 820 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
821 mb->un.varRegVpi.sid = vport->fc_myDID; 821 mb->un.varRegVpi.sid = vport->fc_myDID;
822 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; 822 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
823 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
824 sizeof(struct lpfc_name));
825 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
826 mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]);
823 827
824 mb->mbxCommand = MBX_REG_VPI; 828 mb->mbxCommand = MBX_REG_VPI;
825 mb->mbxOwner = OWN_HOST; 829 mb->mbxOwner = OWN_HOST;
@@ -849,7 +853,10 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
849 MAILBOX_t *mb = &pmb->u.mb; 853 MAILBOX_t *mb = &pmb->u.mb;
850 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 854 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
851 855
852 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base; 856 if (phba->sli_rev < LPFC_SLI_REV4)
857 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
858 else
859 mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base;
853 860
854 mb->mbxCommand = MBX_UNREG_VPI; 861 mb->mbxCommand = MBX_UNREG_VPI;
855 mb->mbxOwner = OWN_HOST; 862 mb->mbxOwner = OWN_HOST;
@@ -1132,7 +1139,7 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1132 /* Otherwise we setup specific rctl / type masks for this ring */ 1139 /* Otherwise we setup specific rctl / type masks for this ring */
1133 for (i = 0; i < pring->num_mask; i++) { 1140 for (i = 0; i < pring->num_mask; i++) {
1134 mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl; 1141 mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
1135 if (mb->un.varCfgRing.rrRegs[i].rval != FC_ELS_REQ) 1142 if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ)
1136 mb->un.varCfgRing.rrRegs[i].rmask = 0xff; 1143 mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
1137 else 1144 else
1138 mb->un.varCfgRing.rrRegs[i].rmask = 0xfe; 1145 mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
@@ -1654,9 +1661,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1654 /* Allocate record for keeping SGE virtual addresses */ 1661 /* Allocate record for keeping SGE virtual addresses */
1655 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), 1662 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1656 GFP_KERNEL); 1663 GFP_KERNEL);
1657 if (!mbox->sge_array) 1664 if (!mbox->sge_array) {
1665 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1666 "2527 Failed to allocate non-embedded SGE "
1667 "array.\n");
1658 return 0; 1668 return 0;
1659 1669 }
1660 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) { 1670 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1661 /* The DMA memory is always allocated in the length of a 1671 /* The DMA memory is always allocated in the length of a
1662 * page even though the last SGE might not fill up to a 1672 * page even though the last SGE might not fill up to a
@@ -1753,11 +1763,6 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
1753 /* Set up host requested features. */ 1763 /* Set up host requested features. */
1754 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); 1764 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
1755 1765
1756 if (phba->cfg_enable_fip)
1757 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
1758 else
1759 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 1);
1760
1761 /* Enable DIF (block guard) only if configured to do so. */ 1766 /* Enable DIF (block guard) only if configured to do so. */
1762 if (phba->cfg_enable_bg) 1767 if (phba->cfg_enable_bg)
1763 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1); 1768 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
@@ -1817,6 +1822,9 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1817 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base); 1822 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
1818 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); 1823 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
1819 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base); 1824 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
1825 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
1826 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
1827 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
1820 reg_vfi->bde.addrHigh = putPaddrHigh(phys); 1828 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
1821 reg_vfi->bde.addrLow = putPaddrLow(phys); 1829 reg_vfi->bde.addrLow = putPaddrLow(phys);
1822 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); 1830 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
@@ -1850,7 +1858,7 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
1850/** 1858/**
1851 * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command 1859 * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
1852 * @mbox: pointer to lpfc mbox command to initialize. 1860 * @mbox: pointer to lpfc mbox command to initialize.
1853 * @vfi: VFI to be unregistered. 1861 * @vport: vport associated with the VF.
1854 * 1862 *
1855 * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric 1863 * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
1856 * (logical NPort) into the inactive state. The SLI Host must have logged out 1864 * (logical NPort) into the inactive state. The SLI Host must have logged out
@@ -1859,11 +1867,12 @@ lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
1859 * fabric inactive. 1867 * fabric inactive.
1860 **/ 1868 **/
1861void 1869void
1862lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi) 1870lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
1863{ 1871{
1864 memset(mbox, 0, sizeof(*mbox)); 1872 memset(mbox, 0, sizeof(*mbox));
1865 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); 1873 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
1866 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi); 1874 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
1875 vport->vfi + vport->phba->vfi_base);
1867} 1876}
1868 1877
1869/** 1878/**
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 3e74136f1ede..2ed6af194932 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1223,6 +1223,12 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1223 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1223 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1224 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 1224 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1225 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1225 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1226 if (phba->sli_rev == LPFC_SLI_REV4) {
1227 spin_unlock_irq(&phba->hbalock);
1228 lpfc_sli4_free_rpi(phba,
1229 mb->u.mb.un.varRegLogin.rpi);
1230 spin_lock_irq(&phba->hbalock);
1231 }
1226 mp = (struct lpfc_dmabuf *) (mb->context1); 1232 mp = (struct lpfc_dmabuf *) (mb->context1);
1227 if (mp) { 1233 if (mp) {
1228 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 1234 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1230,6 +1236,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1230 } 1236 }
1231 lpfc_nlp_put(ndlp); 1237 lpfc_nlp_put(ndlp);
1232 list_del(&mb->list); 1238 list_del(&mb->list);
1239 phba->sli.mboxq_cnt--;
1233 mempool_free(mb, phba->mbox_mem_pool); 1240 mempool_free(mb, phba->mbox_mem_pool);
1234 } 1241 }
1235 } 1242 }
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c88f59f0ce30..a246410ce9df 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -59,22 +59,26 @@ static char *dif_op_str[] = {
59}; 59};
60static void 60static void
61lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 61lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
62static void
63lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
62 64
63static void 65static void
64lpfc_debug_save_data(struct scsi_cmnd *cmnd) 66lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
65{ 67{
66 void *src, *dst; 68 void *src, *dst;
67 struct scatterlist *sgde = scsi_sglist(cmnd); 69 struct scatterlist *sgde = scsi_sglist(cmnd);
68 70
69 if (!_dump_buf_data) { 71 if (!_dump_buf_data) {
70 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n", 72 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
73 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
71 __func__); 74 __func__);
72 return; 75 return;
73 } 76 }
74 77
75 78
76 if (!sgde) { 79 if (!sgde) {
77 printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n"); 80 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
81 "9051 BLKGRD: ERROR: data scatterlist is null\n");
78 return; 82 return;
79 } 83 }
80 84
@@ -88,19 +92,21 @@ lpfc_debug_save_data(struct scsi_cmnd *cmnd)
88} 92}
89 93
90static void 94static void
91lpfc_debug_save_dif(struct scsi_cmnd *cmnd) 95lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
92{ 96{
93 void *src, *dst; 97 void *src, *dst;
94 struct scatterlist *sgde = scsi_prot_sglist(cmnd); 98 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
95 99
96 if (!_dump_buf_dif) { 100 if (!_dump_buf_dif) {
97 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n", 101 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
102 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
98 __func__); 103 __func__);
99 return; 104 return;
100 } 105 }
101 106
102 if (!sgde) { 107 if (!sgde) {
103 printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n"); 108 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
109 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
104 return; 110 return;
105 } 111 }
106 112
@@ -242,6 +248,36 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
242} 248}
243 249
244/** 250/**
251 * lpfc_change_queue_depth - Alter scsi device queue depth
252 * @sdev: Pointer the scsi device on which to change the queue depth.
253 * @qdepth: New queue depth to set the sdev to.
254 * @reason: The reason for the queue depth change.
255 *
256 * This function is called by the midlayer and the LLD to alter the queue
257 * depth for a scsi device. This function sets the queue depth to the new
258 * value and sends an event out to log the queue depth change.
259 **/
260int
261lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
262{
263 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
264 struct lpfc_hba *phba = vport->phba;
265 struct lpfc_rport_data *rdata;
266 unsigned long new_queue_depth, old_queue_depth;
267
268 old_queue_depth = sdev->queue_depth;
269 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
270 new_queue_depth = sdev->queue_depth;
271 rdata = sdev->hostdata;
272 if (rdata)
273 lpfc_send_sdev_queuedepth_change_event(phba, vport,
274 rdata->pnode, sdev->lun,
275 old_queue_depth,
276 new_queue_depth);
277 return sdev->queue_depth;
278}
279
280/**
245 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 281 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
246 * @phba: The Hba for which this call is being executed. 282 * @phba: The Hba for which this call is being executed.
247 * 283 *
@@ -305,8 +341,10 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
305 if (vport->cfg_lun_queue_depth <= queue_depth) 341 if (vport->cfg_lun_queue_depth <= queue_depth)
306 return; 342 return;
307 spin_lock_irqsave(&phba->hbalock, flags); 343 spin_lock_irqsave(&phba->hbalock, flags);
308 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) || 344 if (time_before(jiffies,
309 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) { 345 phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
346 time_before(jiffies,
347 phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
310 spin_unlock_irqrestore(&phba->hbalock, flags); 348 spin_unlock_irqrestore(&phba->hbalock, flags);
311 return; 349 return;
312 } 350 }
@@ -338,10 +376,9 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
338 struct lpfc_vport **vports; 376 struct lpfc_vport **vports;
339 struct Scsi_Host *shost; 377 struct Scsi_Host *shost;
340 struct scsi_device *sdev; 378 struct scsi_device *sdev;
341 unsigned long new_queue_depth, old_queue_depth; 379 unsigned long new_queue_depth;
342 unsigned long num_rsrc_err, num_cmd_success; 380 unsigned long num_rsrc_err, num_cmd_success;
343 int i; 381 int i;
344 struct lpfc_rport_data *rdata;
345 382
346 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 383 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
347 num_cmd_success = atomic_read(&phba->num_cmd_success); 384 num_cmd_success = atomic_read(&phba->num_cmd_success);
@@ -359,22 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
359 else 396 else
360 new_queue_depth = sdev->queue_depth - 397 new_queue_depth = sdev->queue_depth -
361 new_queue_depth; 398 new_queue_depth;
362 old_queue_depth = sdev->queue_depth; 399 lpfc_change_queue_depth(sdev, new_queue_depth,
363 if (sdev->ordered_tags) 400 SCSI_QDEPTH_DEFAULT);
364 scsi_adjust_queue_depth(sdev,
365 MSG_ORDERED_TAG,
366 new_queue_depth);
367 else
368 scsi_adjust_queue_depth(sdev,
369 MSG_SIMPLE_TAG,
370 new_queue_depth);
371 rdata = sdev->hostdata;
372 if (rdata)
373 lpfc_send_sdev_queuedepth_change_event(
374 phba, vports[i],
375 rdata->pnode,
376 sdev->lun, old_queue_depth,
377 new_queue_depth);
378 } 401 }
379 } 402 }
380 lpfc_destroy_vport_work_array(phba, vports); 403 lpfc_destroy_vport_work_array(phba, vports);
@@ -398,7 +421,6 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
398 struct Scsi_Host *shost; 421 struct Scsi_Host *shost;
399 struct scsi_device *sdev; 422 struct scsi_device *sdev;
400 int i; 423 int i;
401 struct lpfc_rport_data *rdata;
402 424
403 vports = lpfc_create_vport_work_array(phba); 425 vports = lpfc_create_vport_work_array(phba);
404 if (vports != NULL) 426 if (vports != NULL)
@@ -408,22 +430,9 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
408 if (vports[i]->cfg_lun_queue_depth <= 430 if (vports[i]->cfg_lun_queue_depth <=
409 sdev->queue_depth) 431 sdev->queue_depth)
410 continue; 432 continue;
411 if (sdev->ordered_tags) 433 lpfc_change_queue_depth(sdev,
412 scsi_adjust_queue_depth(sdev, 434 sdev->queue_depth+1,
413 MSG_ORDERED_TAG, 435 SCSI_QDEPTH_RAMP_UP);
414 sdev->queue_depth+1);
415 else
416 scsi_adjust_queue_depth(sdev,
417 MSG_SIMPLE_TAG,
418 sdev->queue_depth+1);
419 rdata = sdev->hostdata;
420 if (rdata)
421 lpfc_send_sdev_queuedepth_change_event(
422 phba, vports[i],
423 rdata->pnode,
424 sdev->lun,
425 sdev->queue_depth - 1,
426 sdev->queue_depth);
427 } 436 }
428 } 437 }
429 lpfc_destroy_vport_work_array(phba, vports); 438 lpfc_destroy_vport_work_array(phba, vports);
@@ -589,7 +598,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
589 iocb->ulpClass = CLASS3; 598 iocb->ulpClass = CLASS3;
590 psb->status = IOSTAT_SUCCESS; 599 psb->status = IOSTAT_SUCCESS;
591 /* Put it back into the SCSI buffer list */ 600 /* Put it back into the SCSI buffer list */
592 lpfc_release_scsi_buf_s4(phba, psb); 601 lpfc_release_scsi_buf_s3(phba, psb);
593 602
594 } 603 }
595 604
@@ -1024,7 +1033,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1024 1033
1025 lpfc_cmd->seg_cnt = nseg; 1034 lpfc_cmd->seg_cnt = nseg;
1026 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1035 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1027 printk(KERN_ERR "%s: Too many sg segments from " 1036 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1037 "9064 BLKGRD: %s: Too many sg segments from "
1028 "dma_map_sg. Config %d, seg_cnt %d\n", 1038 "dma_map_sg. Config %d, seg_cnt %d\n",
1029 __func__, phba->cfg_sg_seg_cnt, 1039 __func__, phba->cfg_sg_seg_cnt,
1030 lpfc_cmd->seg_cnt); 1040 lpfc_cmd->seg_cnt);
@@ -1112,7 +1122,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1112 * with the cmd 1122 * with the cmd
1113 */ 1123 */
1114static int 1124static int
1115lpfc_sc_to_sli_prof(struct scsi_cmnd *sc) 1125lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1116{ 1126{
1117 uint8_t guard_type = scsi_host_get_guard(sc->device->host); 1127 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1118 uint8_t ret_prof = LPFC_PROF_INVALID; 1128 uint8_t ret_prof = LPFC_PROF_INVALID;
@@ -1136,7 +1146,8 @@ lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
1136 1146
1137 case SCSI_PROT_NORMAL: 1147 case SCSI_PROT_NORMAL:
1138 default: 1148 default:
1139 printk(KERN_ERR "Bad op/guard:%d/%d combination\n", 1149 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1150 "9063 BLKGRD:Bad op/guard:%d/%d combination\n",
1140 scsi_get_prot_op(sc), guard_type); 1151 scsi_get_prot_op(sc), guard_type);
1141 break; 1152 break;
1142 1153
@@ -1157,7 +1168,8 @@ lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
1157 case SCSI_PROT_WRITE_STRIP: 1168 case SCSI_PROT_WRITE_STRIP:
1158 case SCSI_PROT_NORMAL: 1169 case SCSI_PROT_NORMAL:
1159 default: 1170 default:
1160 printk(KERN_ERR "Bad op/guard:%d/%d combination\n", 1171 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1172 "9075 BLKGRD: Bad op/guard:%d/%d combination\n",
1161 scsi_get_prot_op(sc), guard_type); 1173 scsi_get_prot_op(sc), guard_type);
1162 break; 1174 break;
1163 } 1175 }
@@ -1259,7 +1271,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1259 uint16_t apptagmask, apptagval; 1271 uint16_t apptagmask, apptagval;
1260 1272
1261 pde1 = (struct lpfc_pde *) bpl; 1273 pde1 = (struct lpfc_pde *) bpl;
1262 prof = lpfc_sc_to_sli_prof(sc); 1274 prof = lpfc_sc_to_sli_prof(phba, sc);
1263 1275
1264 if (prof == LPFC_PROF_INVALID) 1276 if (prof == LPFC_PROF_INVALID)
1265 goto out; 1277 goto out;
@@ -1359,7 +1371,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1359 return 0; 1371 return 0;
1360 } 1372 }
1361 1373
1362 prof = lpfc_sc_to_sli_prof(sc); 1374 prof = lpfc_sc_to_sli_prof(phba, sc);
1363 if (prof == LPFC_PROF_INVALID) 1375 if (prof == LPFC_PROF_INVALID)
1364 goto out; 1376 goto out;
1365 1377
@@ -1408,7 +1420,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1408 subtotal = 0; /* total bytes processed for current prot grp */ 1420 subtotal = 0; /* total bytes processed for current prot grp */
1409 while (!pgdone) { 1421 while (!pgdone) {
1410 if (!sgde) { 1422 if (!sgde) {
1411 printk(KERN_ERR "%s Invalid data segment\n", 1423 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1424 "9065 BLKGRD:%s Invalid data segment\n",
1412 __func__); 1425 __func__);
1413 return 0; 1426 return 0;
1414 } 1427 }
@@ -1462,7 +1475,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1462 reftag += protgrp_blks; 1475 reftag += protgrp_blks;
1463 } else { 1476 } else {
1464 /* if we're here, we have a bug */ 1477 /* if we're here, we have a bug */
1465 printk(KERN_ERR "BLKGRD: bug in %s\n", __func__); 1478 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1479 "9054 BLKGRD: bug in %s\n", __func__);
1466 } 1480 }
1467 1481
1468 } while (!alldone); 1482 } while (!alldone);
@@ -1544,8 +1558,10 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1544 1558
1545 lpfc_cmd->seg_cnt = datasegcnt; 1559 lpfc_cmd->seg_cnt = datasegcnt;
1546 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1560 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1547 printk(KERN_ERR "%s: Too many sg segments from " 1561 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1548 "dma_map_sg. Config %d, seg_cnt %d\n", 1562 "9067 BLKGRD: %s: Too many sg segments"
1563 " from dma_map_sg. Config %d, seg_cnt"
1564 " %d\n",
1549 __func__, phba->cfg_sg_seg_cnt, 1565 __func__, phba->cfg_sg_seg_cnt,
1550 lpfc_cmd->seg_cnt); 1566 lpfc_cmd->seg_cnt);
1551 scsi_dma_unmap(scsi_cmnd); 1567 scsi_dma_unmap(scsi_cmnd);
@@ -1579,8 +1595,9 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1579 lpfc_cmd->prot_seg_cnt = protsegcnt; 1595 lpfc_cmd->prot_seg_cnt = protsegcnt;
1580 if (lpfc_cmd->prot_seg_cnt 1596 if (lpfc_cmd->prot_seg_cnt
1581 > phba->cfg_prot_sg_seg_cnt) { 1597 > phba->cfg_prot_sg_seg_cnt) {
1582 printk(KERN_ERR "%s: Too many prot sg segments " 1598 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1583 "from dma_map_sg. Config %d," 1599 "9068 BLKGRD: %s: Too many prot sg "
1600 "segments from dma_map_sg. Config %d,"
1584 "prot_seg_cnt %d\n", __func__, 1601 "prot_seg_cnt %d\n", __func__,
1585 phba->cfg_prot_sg_seg_cnt, 1602 phba->cfg_prot_sg_seg_cnt,
1586 lpfc_cmd->prot_seg_cnt); 1603 lpfc_cmd->prot_seg_cnt);
@@ -1671,23 +1688,26 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1671 uint32_t bgstat = bgf->bgstat; 1688 uint32_t bgstat = bgf->bgstat;
1672 uint64_t failing_sector = 0; 1689 uint64_t failing_sector = 0;
1673 1690
1674 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x " 1691 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
1692 " 0x%x lba 0x%llx blk cnt 0x%x "
1675 "bgstat=0x%x bghm=0x%x\n", 1693 "bgstat=0x%x bghm=0x%x\n",
1676 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), 1694 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1677 blk_rq_sectors(cmd->request), bgstat, bghm); 1695 blk_rq_sectors(cmd->request), bgstat, bghm);
1678 1696
1679 spin_lock(&_dump_buf_lock); 1697 spin_lock(&_dump_buf_lock);
1680 if (!_dump_buf_done) { 1698 if (!_dump_buf_done) {
1681 printk(KERN_ERR "Saving Data for %u blocks to debugfs\n", 1699 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
1700 " Data for %u blocks to debugfs\n",
1682 (cmd->cmnd[7] << 8 | cmd->cmnd[8])); 1701 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1683 lpfc_debug_save_data(cmd); 1702 lpfc_debug_save_data(phba, cmd);
1684 1703
1685 /* If we have a prot sgl, save the DIF buffer */ 1704 /* If we have a prot sgl, save the DIF buffer */
1686 if (lpfc_prot_group_type(phba, cmd) == 1705 if (lpfc_prot_group_type(phba, cmd) ==
1687 LPFC_PG_TYPE_DIF_BUF) { 1706 LPFC_PG_TYPE_DIF_BUF) {
1688 printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n", 1707 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
1689 (cmd->cmnd[7] << 8 | cmd->cmnd[8])); 1708 "Saving DIF for %u blocks to debugfs\n",
1690 lpfc_debug_save_dif(cmd); 1709 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1710 lpfc_debug_save_dif(phba, cmd);
1691 } 1711 }
1692 1712
1693 _dump_buf_done = 1; 1713 _dump_buf_done = 1;
@@ -1696,15 +1716,17 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1696 1716
1697 if (lpfc_bgs_get_invalid_prof(bgstat)) { 1717 if (lpfc_bgs_get_invalid_prof(bgstat)) {
1698 cmd->result = ScsiResult(DID_ERROR, 0); 1718 cmd->result = ScsiResult(DID_ERROR, 0);
1699 printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n", 1719 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
1700 bgstat); 1720 " BlockGuard profile. bgstat:0x%x\n",
1721 bgstat);
1701 ret = (-1); 1722 ret = (-1);
1702 goto out; 1723 goto out;
1703 } 1724 }
1704 1725
1705 if (lpfc_bgs_get_uninit_dif_block(bgstat)) { 1726 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
1706 cmd->result = ScsiResult(DID_ERROR, 0); 1727 cmd->result = ScsiResult(DID_ERROR, 0);
1707 printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n", 1728 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
1729 "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
1708 bgstat); 1730 bgstat);
1709 ret = (-1); 1731 ret = (-1);
1710 goto out; 1732 goto out;
@@ -1718,7 +1740,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1718 cmd->result = DRIVER_SENSE << 24 1740 cmd->result = DRIVER_SENSE << 24
1719 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1741 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1720 phba->bg_guard_err_cnt++; 1742 phba->bg_guard_err_cnt++;
1721 printk(KERN_ERR "BLKGRD: guard_tag error\n"); 1743 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1744 "9055 BLKGRD: guard_tag error\n");
1722 } 1745 }
1723 1746
1724 if (lpfc_bgs_get_reftag_err(bgstat)) { 1747 if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -1730,7 +1753,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1730 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1753 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1731 1754
1732 phba->bg_reftag_err_cnt++; 1755 phba->bg_reftag_err_cnt++;
1733 printk(KERN_ERR "BLKGRD: ref_tag error\n"); 1756 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1757 "9056 BLKGRD: ref_tag error\n");
1734 } 1758 }
1735 1759
1736 if (lpfc_bgs_get_apptag_err(bgstat)) { 1760 if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -1742,7 +1766,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1742 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1766 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1743 1767
1744 phba->bg_apptag_err_cnt++; 1768 phba->bg_apptag_err_cnt++;
1745 printk(KERN_ERR "BLKGRD: app_tag error\n"); 1769 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1770 "9061 BLKGRD: app_tag error\n");
1746 } 1771 }
1747 1772
1748 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 1773 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -1763,7 +1788,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1763 if (!ret) { 1788 if (!ret) {
1764 /* No error was reported - problem in FW? */ 1789 /* No error was reported - problem in FW? */
1765 cmd->result = ScsiResult(DID_ERROR, 0); 1790 cmd->result = ScsiResult(DID_ERROR, 0);
1766 printk(KERN_ERR "BLKGRD: no errors reported!\n"); 1791 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1792 "9057 BLKGRD: no errors reported!\n");
1767 } 1793 }
1768 1794
1769out: 1795out:
@@ -1822,9 +1848,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1822 1848
1823 lpfc_cmd->seg_cnt = nseg; 1849 lpfc_cmd->seg_cnt = nseg;
1824 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1850 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1825 printk(KERN_ERR "%s: Too many sg segments from " 1851 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
1826 "dma_map_sg. Config %d, seg_cnt %d\n", 1852 " %s: Too many sg segments from "
1827 __func__, phba->cfg_sg_seg_cnt, 1853 "dma_map_sg. Config %d, seg_cnt %d\n",
1854 __func__, phba->cfg_sg_seg_cnt,
1828 lpfc_cmd->seg_cnt); 1855 lpfc_cmd->seg_cnt);
1829 scsi_dma_unmap(scsi_cmnd); 1856 scsi_dma_unmap(scsi_cmnd);
1830 return 1; 1857 return 1;
@@ -2050,6 +2077,21 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2050 goto out; 2077 goto out;
2051 } 2078 }
2052 2079
2080 if (resp_info & RSP_LEN_VALID) {
2081 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2082 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
2083 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
2084 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2085 "2719 Invalid response length: "
2086 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
2087 cmnd->device->id,
2088 cmnd->device->lun, cmnd->cmnd[0],
2089 rsplen);
2090 host_status = DID_ERROR;
2091 goto out;
2092 }
2093 }
2094
2053 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 2095 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
2054 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 2096 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
2055 if (snslen > SCSI_SENSE_BUFFERSIZE) 2097 if (snslen > SCSI_SENSE_BUFFERSIZE)
@@ -2074,15 +2116,6 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2074 be32_to_cpu(fcprsp->rspRspLen), 2116 be32_to_cpu(fcprsp->rspRspLen),
2075 fcprsp->rspInfo3); 2117 fcprsp->rspInfo3);
2076 2118
2077 if (resp_info & RSP_LEN_VALID) {
2078 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2079 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
2080 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
2081 host_status = DID_ERROR;
2082 goto out;
2083 }
2084 }
2085
2086 scsi_set_resid(cmnd, 0); 2119 scsi_set_resid(cmnd, 0);
2087 if (resp_info & RESID_UNDER) { 2120 if (resp_info & RESID_UNDER) {
2088 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 2121 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
@@ -2180,7 +2213,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2180 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2213 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2181 int result; 2214 int result;
2182 struct scsi_device *tmp_sdev; 2215 struct scsi_device *tmp_sdev;
2183 int depth = 0; 2216 int depth;
2184 unsigned long flags; 2217 unsigned long flags;
2185 struct lpfc_fast_path_event *fast_path_evt; 2218 struct lpfc_fast_path_event *fast_path_evt;
2186 struct Scsi_Host *shost = cmd->device->host; 2219 struct Scsi_Host *shost = cmd->device->host;
@@ -2264,7 +2297,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2264 lpfc_printf_vlog(vport, KERN_WARNING, 2297 lpfc_printf_vlog(vport, KERN_WARNING,
2265 LOG_BG, 2298 LOG_BG,
2266 "9031 non-zero BGSTAT " 2299 "9031 non-zero BGSTAT "
2267 "on unprotected cmd"); 2300 "on unprotected cmd\n");
2268 } 2301 }
2269 } 2302 }
2270 2303
@@ -2347,67 +2380,29 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2347 return; 2380 return;
2348 } 2381 }
2349 2382
2350
2351 if (!result) 2383 if (!result)
2352 lpfc_rampup_queue_depth(vport, queue_depth); 2384 lpfc_rampup_queue_depth(vport, queue_depth);
2353 2385
2354 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
2355 ((jiffies - pnode->last_ramp_up_time) >
2356 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
2357 ((jiffies - pnode->last_q_full_time) >
2358 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
2359 (vport->cfg_lun_queue_depth > queue_depth)) {
2360 shost_for_each_device(tmp_sdev, shost) {
2361 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
2362 if (tmp_sdev->id != scsi_id)
2363 continue;
2364 if (tmp_sdev->ordered_tags)
2365 scsi_adjust_queue_depth(tmp_sdev,
2366 MSG_ORDERED_TAG,
2367 tmp_sdev->queue_depth+1);
2368 else
2369 scsi_adjust_queue_depth(tmp_sdev,
2370 MSG_SIMPLE_TAG,
2371 tmp_sdev->queue_depth+1);
2372
2373 pnode->last_ramp_up_time = jiffies;
2374 }
2375 }
2376 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
2377 0xFFFFFFFF,
2378 queue_depth , queue_depth + 1);
2379 }
2380
2381 /* 2386 /*
2382 * Check for queue full. If the lun is reporting queue full, then 2387 * Check for queue full. If the lun is reporting queue full, then
2383 * back off the lun queue depth to prevent target overloads. 2388 * back off the lun queue depth to prevent target overloads.
2384 */ 2389 */
2385 if (result == SAM_STAT_TASK_SET_FULL && pnode && 2390 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
2386 NLP_CHK_NODE_ACT(pnode)) { 2391 NLP_CHK_NODE_ACT(pnode)) {
2387 pnode->last_q_full_time = jiffies;
2388
2389 shost_for_each_device(tmp_sdev, shost) { 2392 shost_for_each_device(tmp_sdev, shost) {
2390 if (tmp_sdev->id != scsi_id) 2393 if (tmp_sdev->id != scsi_id)
2391 continue; 2394 continue;
2392 depth = scsi_track_queue_full(tmp_sdev, 2395 depth = scsi_track_queue_full(tmp_sdev,
2393 tmp_sdev->queue_depth - 1); 2396 tmp_sdev->queue_depth-1);
2394 } 2397 if (depth <= 0)
2395 /* 2398 continue;
2396 * The queue depth cannot be lowered any more.
2397 * Modify the returned error code to store
2398 * the final depth value set by
2399 * scsi_track_queue_full.
2400 */
2401 if (depth == -1)
2402 depth = shost->cmd_per_lun;
2403
2404 if (depth) {
2405 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 2399 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2406 "0711 detected queue full - lun queue " 2400 "0711 detected queue full - lun queue "
2407 "depth adjusted to %d.\n", depth); 2401 "depth adjusted to %d.\n", depth);
2408 lpfc_send_sdev_queuedepth_change_event(phba, vport, 2402 lpfc_send_sdev_queuedepth_change_event(phba, vport,
2409 pnode, 0xFFFFFFFF, 2403 pnode,
2410 depth+1, depth); 2404 tmp_sdev->lun,
2405 depth+1, depth);
2411 } 2406 }
2412 } 2407 }
2413 2408
@@ -2745,7 +2740,9 @@ void lpfc_poll_timeout(unsigned long ptr)
2745 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 2740 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2746 2741
2747 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2742 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2748 lpfc_sli_poll_fcp_ring (phba); 2743 lpfc_sli_handle_fast_ring_event(phba,
2744 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
2745
2749 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 2746 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2750 lpfc_poll_rearm_timer(phba); 2747 lpfc_poll_rearm_timer(phba);
2751 } 2748 }
@@ -2771,7 +2768,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2771 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2768 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2772 struct lpfc_hba *phba = vport->phba; 2769 struct lpfc_hba *phba = vport->phba;
2773 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 2770 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2774 struct lpfc_nodelist *ndlp = rdata->pnode; 2771 struct lpfc_nodelist *ndlp;
2775 struct lpfc_scsi_buf *lpfc_cmd; 2772 struct lpfc_scsi_buf *lpfc_cmd;
2776 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 2773 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2777 int err; 2774 int err;
@@ -2781,13 +2778,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2781 cmnd->result = err; 2778 cmnd->result = err;
2782 goto out_fail_command; 2779 goto out_fail_command;
2783 } 2780 }
2781 ndlp = rdata->pnode;
2784 2782
2785 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 2783 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
2786 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 2784 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2787 2785
2788 printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x " 2786 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2789 "str=%s without registering for BlockGuard - " 2787 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
2790 "Rejecting command\n", 2788 " op:%02x str=%s without registering for"
2789 " BlockGuard - Rejecting command\n",
2791 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 2790 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2792 dif_op_str[scsi_get_prot_op(cmnd)]); 2791 dif_op_str[scsi_get_prot_op(cmnd)]);
2793 goto out_fail_command; 2792 goto out_fail_command;
@@ -2827,61 +2826,66 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2827 cmnd->scsi_done = done; 2826 cmnd->scsi_done = done;
2828 2827
2829 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 2828 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2830 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2829 if (vport->phba->cfg_enable_bg) {
2830 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2831 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x " 2831 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
2832 "str=%s\n", 2832 "str=%s\n",
2833 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 2833 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2834 dif_op_str[scsi_get_prot_op(cmnd)]); 2834 dif_op_str[scsi_get_prot_op(cmnd)]);
2835 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2835 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2836 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x " 2836 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2837 "%02x %02x %02x %02x %02x\n", 2837 "%02x %02x %02x %02x %02x\n",
2838 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], 2838 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2839 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], 2839 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2840 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], 2840 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2841 cmnd->cmnd[9]); 2841 cmnd->cmnd[9]);
2842 if (cmnd->cmnd[0] == READ_10) 2842 if (cmnd->cmnd[0] == READ_10)
2843 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2843 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2844 "9035 BLKGRD: READ @ sector %llu, " 2844 "9035 BLKGRD: READ @ sector %llu, "
2845 "count %u\n", 2845 "count %u\n",
2846 (unsigned long long)scsi_get_lba(cmnd), 2846 (unsigned long long)scsi_get_lba(cmnd),
2847 blk_rq_sectors(cmnd->request)); 2847 blk_rq_sectors(cmnd->request));
2848 else if (cmnd->cmnd[0] == WRITE_10) 2848 else if (cmnd->cmnd[0] == WRITE_10)
2849 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2849 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2850 "9036 BLKGRD: WRITE @ sector %llu, " 2850 "9036 BLKGRD: WRITE @ sector %llu, "
2851 "count %u cmd=%p\n", 2851 "count %u cmd=%p\n",
2852 (unsigned long long)scsi_get_lba(cmnd), 2852 (unsigned long long)scsi_get_lba(cmnd),
2853 blk_rq_sectors(cmnd->request), 2853 blk_rq_sectors(cmnd->request),
2854 cmnd); 2854 cmnd);
2855 }
2855 2856
2856 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 2857 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
2857 } else { 2858 } else {
2858 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2859 if (vport->phba->cfg_enable_bg) {
2859 "9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x"
2860 " str=%s\n",
2861 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2862 dif_op_str[scsi_get_prot_op(cmnd)]);
2863 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2864 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2865 "%02x %02x %02x %02x %02x\n",
2866 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2867 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2868 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2869 cmnd->cmnd[9]);
2870 if (cmnd->cmnd[0] == READ_10)
2871 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2860 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2872 "9040 dbg: READ @ sector %llu, " 2861 "9038 BLKGRD: rcvd unprotected cmd:"
2873 "count %u\n", 2862 "%02x op:%02x str=%s\n",
2874 (unsigned long long)scsi_get_lba(cmnd), 2863 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2864 dif_op_str[scsi_get_prot_op(cmnd)]);
2865 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2866 "9039 BLKGRD: CDB: %02x %02x %02x "
2867 "%02x %02x %02x %02x %02x %02x %02x\n",
2868 cmnd->cmnd[0], cmnd->cmnd[1],
2869 cmnd->cmnd[2], cmnd->cmnd[3],
2870 cmnd->cmnd[4], cmnd->cmnd[5],
2871 cmnd->cmnd[6], cmnd->cmnd[7],
2872 cmnd->cmnd[8], cmnd->cmnd[9]);
2873 if (cmnd->cmnd[0] == READ_10)
2874 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2875 "9040 dbg: READ @ sector %llu, "
2876 "count %u\n",
2877 (unsigned long long)scsi_get_lba(cmnd),
2875 blk_rq_sectors(cmnd->request)); 2878 blk_rq_sectors(cmnd->request));
2876 else if (cmnd->cmnd[0] == WRITE_10) 2879 else if (cmnd->cmnd[0] == WRITE_10)
2877 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2880 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2878 "9041 dbg: WRITE @ sector %llu, " 2881 "9041 dbg: WRITE @ sector %llu, "
2879 "count %u cmd=%p\n", 2882 "count %u cmd=%p\n",
2880 (unsigned long long)scsi_get_lba(cmnd), 2883 (unsigned long long)scsi_get_lba(cmnd),
2881 blk_rq_sectors(cmnd->request), cmnd); 2884 blk_rq_sectors(cmnd->request), cmnd);
2882 else 2885 else
2883 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2886 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2884 "9042 dbg: parser not implemented\n"); 2887 "9042 dbg: parser not implemented\n");
2888 }
2885 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 2889 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
2886 } 2890 }
2887 2891
@@ -2898,7 +2902,11 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2898 goto out_host_busy_free_buf; 2902 goto out_host_busy_free_buf;
2899 } 2903 }
2900 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2904 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2901 lpfc_sli_poll_fcp_ring(phba); 2905 spin_unlock(shost->host_lock);
2906 lpfc_sli_handle_fast_ring_event(phba,
2907 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
2908
2909 spin_lock(shost->host_lock);
2902 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 2910 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2903 lpfc_poll_rearm_timer(phba); 2911 lpfc_poll_rearm_timer(phba);
2904 } 2912 }
@@ -2917,28 +2925,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2917} 2925}
2918 2926
2919/** 2927/**
2920 * lpfc_block_error_handler - Routine to block error handler
2921 * @cmnd: Pointer to scsi_cmnd data structure.
2922 *
2923 * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
2924 **/
2925static void
2926lpfc_block_error_handler(struct scsi_cmnd *cmnd)
2927{
2928 struct Scsi_Host *shost = cmnd->device->host;
2929 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2930
2931 spin_lock_irq(shost->host_lock);
2932 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
2933 spin_unlock_irq(shost->host_lock);
2934 msleep(1000);
2935 spin_lock_irq(shost->host_lock);
2936 }
2937 spin_unlock_irq(shost->host_lock);
2938 return;
2939}
2940
2941/**
2942 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point 2928 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
2943 * @cmnd: Pointer to scsi_cmnd data structure. 2929 * @cmnd: Pointer to scsi_cmnd data structure.
2944 * 2930 *
@@ -2961,7 +2947,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
2961 int ret = SUCCESS; 2947 int ret = SUCCESS;
2962 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 2948 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
2963 2949
2964 lpfc_block_error_handler(cmnd); 2950 fc_block_scsi_eh(cmnd);
2965 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 2951 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
2966 BUG_ON(!lpfc_cmd); 2952 BUG_ON(!lpfc_cmd);
2967 2953
@@ -3001,6 +2987,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3001 2987
3002 icmd->ulpLe = 1; 2988 icmd->ulpLe = 1;
3003 icmd->ulpClass = cmd->ulpClass; 2989 icmd->ulpClass = cmd->ulpClass;
2990
2991 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
2992 abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
2993
3004 if (lpfc_is_link_up(phba)) 2994 if (lpfc_is_link_up(phba))
3005 icmd->ulpCommand = CMD_ABORT_XRI_CN; 2995 icmd->ulpCommand = CMD_ABORT_XRI_CN;
3006 else 2996 else
@@ -3016,7 +3006,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3016 } 3006 }
3017 3007
3018 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3008 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3019 lpfc_sli_poll_fcp_ring (phba); 3009 lpfc_sli_handle_fast_ring_event(phba,
3010 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3020 3011
3021 lpfc_cmd->waitq = &waitq; 3012 lpfc_cmd->waitq = &waitq;
3022 /* Wait for abort to complete */ 3013 /* Wait for abort to complete */
@@ -3166,9 +3157,15 @@ static int
3166lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) 3157lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
3167{ 3158{
3168 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3159 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3169 struct lpfc_nodelist *pnode = rdata->pnode; 3160 struct lpfc_nodelist *pnode;
3170 unsigned long later; 3161 unsigned long later;
3171 3162
3163 if (!rdata) {
3164 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3165 "0797 Tgt Map rport failure: rdata x%p\n", rdata);
3166 return FAILED;
3167 }
3168 pnode = rdata->pnode;
3172 /* 3169 /*
3173 * If target is not in a MAPPED state, delay until 3170 * If target is not in a MAPPED state, delay until
3174 * target is rediscovered or devloss timeout expires. 3171 * target is rediscovered or devloss timeout expires.
@@ -3253,13 +3250,19 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
3253 struct Scsi_Host *shost = cmnd->device->host; 3250 struct Scsi_Host *shost = cmnd->device->host;
3254 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3251 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3255 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3252 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3256 struct lpfc_nodelist *pnode = rdata->pnode; 3253 struct lpfc_nodelist *pnode;
3257 unsigned tgt_id = cmnd->device->id; 3254 unsigned tgt_id = cmnd->device->id;
3258 unsigned int lun_id = cmnd->device->lun; 3255 unsigned int lun_id = cmnd->device->lun;
3259 struct lpfc_scsi_event_header scsi_event; 3256 struct lpfc_scsi_event_header scsi_event;
3260 int status; 3257 int status;
3261 3258
3262 lpfc_block_error_handler(cmnd); 3259 if (!rdata) {
3260 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3261 "0798 Device Reset rport failure: rdata x%p\n", rdata);
3262 return FAILED;
3263 }
3264 pnode = rdata->pnode;
3265 fc_block_scsi_eh(cmnd);
3263 3266
3264 status = lpfc_chk_tgt_mapped(vport, cmnd); 3267 status = lpfc_chk_tgt_mapped(vport, cmnd);
3265 if (status == FAILED) { 3268 if (status == FAILED) {
@@ -3312,13 +3315,19 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
3312 struct Scsi_Host *shost = cmnd->device->host; 3315 struct Scsi_Host *shost = cmnd->device->host;
3313 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3316 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3314 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3317 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3315 struct lpfc_nodelist *pnode = rdata->pnode; 3318 struct lpfc_nodelist *pnode;
3316 unsigned tgt_id = cmnd->device->id; 3319 unsigned tgt_id = cmnd->device->id;
3317 unsigned int lun_id = cmnd->device->lun; 3320 unsigned int lun_id = cmnd->device->lun;
3318 struct lpfc_scsi_event_header scsi_event; 3321 struct lpfc_scsi_event_header scsi_event;
3319 int status; 3322 int status;
3320 3323
3321 lpfc_block_error_handler(cmnd); 3324 if (!rdata) {
3325 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3326 "0799 Target Reset rport failure: rdata x%p\n", rdata);
3327 return FAILED;
3328 }
3329 pnode = rdata->pnode;
3330 fc_block_scsi_eh(cmnd);
3322 3331
3323 status = lpfc_chk_tgt_mapped(vport, cmnd); 3332 status = lpfc_chk_tgt_mapped(vport, cmnd);
3324 if (status == FAILED) { 3333 if (status == FAILED) {
@@ -3384,7 +3393,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3384 fc_host_post_vendor_event(shost, fc_get_event_number(), 3393 fc_host_post_vendor_event(shost, fc_get_event_number(),
3385 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 3394 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3386 3395
3387 lpfc_block_error_handler(cmnd); 3396 fc_block_scsi_eh(cmnd);
3388 3397
3389 /* 3398 /*
3390 * Since the driver manages a single bus device, reset all 3399 * Since the driver manages a single bus device, reset all
@@ -3498,6 +3507,8 @@ lpfc_slave_alloc(struct scsi_device *sdev)
3498 "Allocated %d buffers.\n", 3507 "Allocated %d buffers.\n",
3499 num_to_alloc, num_allocated); 3508 num_to_alloc, num_allocated);
3500 } 3509 }
3510 if (num_allocated > 0)
3511 phba->total_scsi_bufs += num_allocated;
3501 return 0; 3512 return 0;
3502} 3513}
3503 3514
@@ -3534,7 +3545,8 @@ lpfc_slave_configure(struct scsi_device *sdev)
3534 rport->dev_loss_tmo = vport->cfg_devloss_tmo; 3545 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3535 3546
3536 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 3547 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3537 lpfc_sli_poll_fcp_ring(phba); 3548 lpfc_sli_handle_fast_ring_event(phba,
3549 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3538 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3550 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3539 lpfc_poll_rearm_timer(phba); 3551 lpfc_poll_rearm_timer(phba);
3540 } 3552 }
@@ -3576,6 +3588,7 @@ struct scsi_host_template lpfc_template = {
3576 .shost_attrs = lpfc_hba_attrs, 3588 .shost_attrs = lpfc_hba_attrs,
3577 .max_sectors = 0xFFFF, 3589 .max_sectors = 0xFFFF,
3578 .vendor_id = LPFC_NL_VENDOR_ID, 3590 .vendor_id = LPFC_NL_VENDOR_ID,
3591 .change_queue_depth = lpfc_change_queue_depth,
3579}; 3592};
3580 3593
3581struct scsi_host_template lpfc_vport_template = { 3594struct scsi_host_template lpfc_vport_template = {
@@ -3597,4 +3610,5 @@ struct scsi_host_template lpfc_vport_template = {
3597 .use_clustering = ENABLE_CLUSTERING, 3610 .use_clustering = ENABLE_CLUSTERING,
3598 .shost_attrs = lpfc_vport_attrs, 3611 .shost_attrs = lpfc_vport_attrs,
3599 .max_sectors = 0xFFFF, 3612 .max_sectors = 0xFFFF,
3613 .change_queue_depth = lpfc_change_queue_depth,
3600}; 3614};
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 42d4f3dae1d6..7935667b81a5 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -30,6 +30,7 @@
30#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h> 31#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h> 32#include <scsi/fc/fc_fs.h>
33#include <linux/aer.h>
33 34
34#include "lpfc_hw4.h" 35#include "lpfc_hw4.h"
35#include "lpfc_hw.h" 36#include "lpfc_hw.h"
@@ -58,8 +59,11 @@ typedef enum _lpfc_iocb_type {
58static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 59static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
59 uint32_t); 60 uint32_t);
60static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 61static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint8_t *, uint32_t *); 62 uint8_t *, uint32_t *);
62 63static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
64 struct lpfc_iocbq *);
65static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
66 struct hbq_dmabuf *);
63static IOCB_t * 67static IOCB_t *
64lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 68lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
65{ 69{
@@ -259,6 +263,9 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
259 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 263 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
260 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); 264 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
261 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 265 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
266 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
267 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
268 readl(q->phba->sli4_hba.EQCQDBregaddr);
262 return released; 269 return released;
263} 270}
264 271
@@ -515,6 +522,8 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba)
515 struct lpfc_sglq *sglq = NULL; 522 struct lpfc_sglq *sglq = NULL;
516 uint16_t adj_xri; 523 uint16_t adj_xri;
517 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); 524 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
525 if (!sglq)
526 return NULL;
518 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; 527 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
519 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 528 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
520 return sglq; 529 return sglq;
@@ -572,9 +581,9 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
572 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 581 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
573 if (sglq) { 582 if (sglq) {
574 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED 583 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
575 || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 584 && ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
576 && (iocbq->iocb.un.ulpWord[4] 585 && (iocbq->iocb.un.ulpWord[4]
577 == IOERR_SLI_ABORTED))) { 586 == IOERR_ABORT_REQUESTED))) {
578 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 587 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
579 iflag); 588 iflag);
580 list_add(&sglq->list, 589 list_add(&sglq->list,
@@ -767,6 +776,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
767 case CMD_CLOSE_XRI_CX: 776 case CMD_CLOSE_XRI_CX:
768 case CMD_XRI_ABORTED_CX: 777 case CMD_XRI_ABORTED_CX:
769 case CMD_ABORT_MXRI64_CN: 778 case CMD_ABORT_MXRI64_CN:
779 case CMD_XMIT_BLS_RSP64_CX:
770 type = LPFC_ABORT_IOCB; 780 type = LPFC_ABORT_IOCB;
771 break; 781 break;
772 case CMD_RCV_SEQUENCE_CX: 782 case CMD_RCV_SEQUENCE_CX:
@@ -2068,8 +2078,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2068 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2078 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2069 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2079 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2070 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2080 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2071 Rctl = FC_ELS_REQ; 2081 Rctl = FC_RCTL_ELS_REQ;
2072 Type = FC_ELS_DATA; 2082 Type = FC_TYPE_ELS;
2073 } else { 2083 } else {
2074 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2084 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2075 Rctl = w5p->hcsw.Rctl; 2085 Rctl = w5p->hcsw.Rctl;
@@ -2079,8 +2089,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2079 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2089 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2080 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2090 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2081 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2091 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2082 Rctl = FC_ELS_REQ; 2092 Rctl = FC_RCTL_ELS_REQ;
2083 Type = FC_ELS_DATA; 2093 Type = FC_TYPE_ELS;
2084 w5p->hcsw.Rctl = Rctl; 2094 w5p->hcsw.Rctl = Rctl;
2085 w5p->hcsw.Type = Type; 2095 w5p->hcsw.Type = Type;
2086 } 2096 }
@@ -2324,168 +2334,6 @@ void lpfc_poll_eratt(unsigned long ptr)
2324 return; 2334 return;
2325} 2335}
2326 2336
2327/**
2328 * lpfc_sli_poll_fcp_ring - Handle FCP ring completion in polling mode
2329 * @phba: Pointer to HBA context object.
2330 *
2331 * This function is called from lpfc_queuecommand, lpfc_poll_timeout,
2332 * lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING
2333 * is enabled.
2334 *
2335 * The caller does not hold any lock.
2336 * The function processes each response iocb in the response ring until it
2337 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2338 * LE bit set. The function will call the completion handler of the command iocb
2339 * if the response iocb indicates a completion for a command iocb or it is
2340 * an abort completion.
2341 **/
2342void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
2343{
2344 struct lpfc_sli *psli = &phba->sli;
2345 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
2346 IOCB_t *irsp = NULL;
2347 IOCB_t *entry = NULL;
2348 struct lpfc_iocbq *cmdiocbq = NULL;
2349 struct lpfc_iocbq rspiocbq;
2350 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2351 uint32_t status;
2352 uint32_t portRspPut, portRspMax;
2353 int type;
2354 uint32_t rsp_cmpl = 0;
2355 uint32_t ha_copy;
2356 unsigned long iflags;
2357
2358 pring->stats.iocb_event++;
2359
2360 /*
2361 * The next available response entry should never exceed the maximum
2362 * entries. If it does, treat it as an adapter hardware error.
2363 */
2364 portRspMax = pring->numRiocb;
2365 portRspPut = le32_to_cpu(pgp->rspPutInx);
2366 if (unlikely(portRspPut >= portRspMax)) {
2367 lpfc_sli_rsp_pointers_error(phba, pring);
2368 return;
2369 }
2370
2371 rmb();
2372 while (pring->rspidx != portRspPut) {
2373 entry = lpfc_resp_iocb(phba, pring);
2374 if (++pring->rspidx >= portRspMax)
2375 pring->rspidx = 0;
2376
2377 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2378 (uint32_t *) &rspiocbq.iocb,
2379 phba->iocb_rsp_size);
2380 irsp = &rspiocbq.iocb;
2381 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2382 pring->stats.iocb_rsp++;
2383 rsp_cmpl++;
2384
2385 if (unlikely(irsp->ulpStatus)) {
2386 /* Rsp ring <ringno> error: IOCB */
2387 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2388 "0326 Rsp Ring %d error: IOCB Data: "
2389 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2390 pring->ringno,
2391 irsp->un.ulpWord[0],
2392 irsp->un.ulpWord[1],
2393 irsp->un.ulpWord[2],
2394 irsp->un.ulpWord[3],
2395 irsp->un.ulpWord[4],
2396 irsp->un.ulpWord[5],
2397 *(uint32_t *)&irsp->un1,
2398 *((uint32_t *)&irsp->un1 + 1));
2399 }
2400
2401 switch (type) {
2402 case LPFC_ABORT_IOCB:
2403 case LPFC_SOL_IOCB:
2404 /*
2405 * Idle exchange closed via ABTS from port. No iocb
2406 * resources need to be recovered.
2407 */
2408 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
2409 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2410 "0314 IOCB cmd 0x%x "
2411 "processed. Skipping "
2412 "completion",
2413 irsp->ulpCommand);
2414 break;
2415 }
2416
2417 spin_lock_irqsave(&phba->hbalock, iflags);
2418 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2419 &rspiocbq);
2420 spin_unlock_irqrestore(&phba->hbalock, iflags);
2421 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
2422 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2423 &rspiocbq);
2424 }
2425 break;
2426 default:
2427 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2428 char adaptermsg[LPFC_MAX_ADPTMSG];
2429 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2430 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2431 MAX_MSG_DATA);
2432 dev_warn(&((phba->pcidev)->dev),
2433 "lpfc%d: %s\n",
2434 phba->brd_no, adaptermsg);
2435 } else {
2436 /* Unknown IOCB command */
2437 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2438 "0321 Unknown IOCB command "
2439 "Data: x%x, x%x x%x x%x x%x\n",
2440 type, irsp->ulpCommand,
2441 irsp->ulpStatus,
2442 irsp->ulpIoTag,
2443 irsp->ulpContext);
2444 }
2445 break;
2446 }
2447
2448 /*
2449 * The response IOCB has been processed. Update the ring
2450 * pointer in SLIM. If the port response put pointer has not
2451 * been updated, sync the pgp->rspPutInx and fetch the new port
2452 * response put pointer.
2453 */
2454 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2455
2456 if (pring->rspidx == portRspPut)
2457 portRspPut = le32_to_cpu(pgp->rspPutInx);
2458 }
2459
2460 ha_copy = readl(phba->HAregaddr);
2461 ha_copy >>= (LPFC_FCP_RING * 4);
2462
2463 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
2464 spin_lock_irqsave(&phba->hbalock, iflags);
2465 pring->stats.iocb_rsp_full++;
2466 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
2467 writel(status, phba->CAregaddr);
2468 readl(phba->CAregaddr);
2469 spin_unlock_irqrestore(&phba->hbalock, iflags);
2470 }
2471 if ((ha_copy & HA_R0CE_RSP) &&
2472 (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2473 spin_lock_irqsave(&phba->hbalock, iflags);
2474 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2475 pring->stats.iocb_cmd_empty++;
2476
2477 /* Force update of the local copy of cmdGetInx */
2478 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2479 lpfc_sli_resume_iocb(phba, pring);
2480
2481 if ((pring->lpfc_sli_cmd_available))
2482 (pring->lpfc_sli_cmd_available) (phba, pring);
2483
2484 spin_unlock_irqrestore(&phba->hbalock, iflags);
2485 }
2486
2487 return;
2488}
2489 2337
2490/** 2338/**
2491 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 2339 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
@@ -2502,9 +2350,9 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
2502 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 2350 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2503 * function if this is an unsolicited iocb. 2351 * function if this is an unsolicited iocb.
2504 * This routine presumes LPFC_FCP_RING handling and doesn't bother 2352 * This routine presumes LPFC_FCP_RING handling and doesn't bother
2505 * to check it explicitly. This function always returns 1. 2353 * to check it explicitly.
2506 **/ 2354 */
2507static int 2355int
2508lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 2356lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2509 struct lpfc_sli_ring *pring, uint32_t mask) 2357 struct lpfc_sli_ring *pring, uint32_t mask)
2510{ 2358{
@@ -2534,6 +2382,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2534 spin_unlock_irqrestore(&phba->hbalock, iflag); 2382 spin_unlock_irqrestore(&phba->hbalock, iflag);
2535 return 1; 2383 return 1;
2536 } 2384 }
2385 if (phba->fcp_ring_in_use) {
2386 spin_unlock_irqrestore(&phba->hbalock, iflag);
2387 return 1;
2388 } else
2389 phba->fcp_ring_in_use = 1;
2537 2390
2538 rmb(); 2391 rmb();
2539 while (pring->rspidx != portRspPut) { 2392 while (pring->rspidx != portRspPut) {
@@ -2604,10 +2457,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2604 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2457 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2605 &rspiocbq); 2458 &rspiocbq);
2606 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 2459 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
2607 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2608 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2609 &rspiocbq);
2610 } else {
2611 spin_unlock_irqrestore(&phba->hbalock, 2460 spin_unlock_irqrestore(&phba->hbalock,
2612 iflag); 2461 iflag);
2613 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2462 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
@@ -2615,7 +2464,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2615 spin_lock_irqsave(&phba->hbalock, 2464 spin_lock_irqsave(&phba->hbalock,
2616 iflag); 2465 iflag);
2617 } 2466 }
2618 }
2619 break; 2467 break;
2620 case LPFC_UNSOL_IOCB: 2468 case LPFC_UNSOL_IOCB:
2621 spin_unlock_irqrestore(&phba->hbalock, iflag); 2469 spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -2675,6 +2523,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2675 2523
2676 } 2524 }
2677 2525
2526 phba->fcp_ring_in_use = 0;
2678 spin_unlock_irqrestore(&phba->hbalock, iflag); 2527 spin_unlock_irqrestore(&phba->hbalock, iflag);
2679 return rc; 2528 return rc;
2680} 2529}
@@ -3018,16 +2867,39 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3018 struct lpfc_sli_ring *pring, uint32_t mask) 2867 struct lpfc_sli_ring *pring, uint32_t mask)
3019{ 2868{
3020 struct lpfc_iocbq *irspiocbq; 2869 struct lpfc_iocbq *irspiocbq;
2870 struct hbq_dmabuf *dmabuf;
2871 struct lpfc_cq_event *cq_event;
3021 unsigned long iflag; 2872 unsigned long iflag;
3022 2873
3023 while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) { 2874 spin_lock_irqsave(&phba->hbalock, iflag);
2875 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
2876 spin_unlock_irqrestore(&phba->hbalock, iflag);
2877 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3024 /* Get the response iocb from the head of work queue */ 2878 /* Get the response iocb from the head of work queue */
3025 spin_lock_irqsave(&phba->hbalock, iflag); 2879 spin_lock_irqsave(&phba->hbalock, iflag);
3026 list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue, 2880 list_remove_head(&phba->sli4_hba.sp_queue_event,
3027 irspiocbq, struct lpfc_iocbq, list); 2881 cq_event, struct lpfc_cq_event, list);
3028 spin_unlock_irqrestore(&phba->hbalock, iflag); 2882 spin_unlock_irqrestore(&phba->hbalock, iflag);
3029 /* Process the response iocb */ 2883
3030 lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); 2884 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
2885 case CQE_CODE_COMPL_WQE:
2886 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
2887 cq_event);
2888 /* Translate ELS WCQE to response IOCBQ */
2889 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
2890 irspiocbq);
2891 if (irspiocbq)
2892 lpfc_sli_sp_handle_rspiocb(phba, pring,
2893 irspiocbq);
2894 break;
2895 case CQE_CODE_RECEIVE:
2896 dmabuf = container_of(cq_event, struct hbq_dmabuf,
2897 cq_event);
2898 lpfc_sli4_handle_received_buffer(phba, dmabuf);
2899 break;
2900 default:
2901 break;
2902 }
3031 } 2903 }
3032} 2904}
3033 2905
@@ -3416,6 +3288,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
3416 3288
3417 /* perform board reset */ 3289 /* perform board reset */
3418 phba->fc_eventTag = 0; 3290 phba->fc_eventTag = 0;
3291 phba->link_events = 0;
3419 phba->pport->fc_myDID = 0; 3292 phba->pport->fc_myDID = 0;
3420 phba->pport->fc_prevDID = 0; 3293 phba->pport->fc_prevDID = 0;
3421 3294
@@ -3476,6 +3349,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3476 3349
3477 /* perform board reset */ 3350 /* perform board reset */
3478 phba->fc_eventTag = 0; 3351 phba->fc_eventTag = 0;
3352 phba->link_events = 0;
3479 phba->pport->fc_myDID = 0; 3353 phba->pport->fc_myDID = 0;
3480 phba->pport->fc_prevDID = 0; 3354 phba->pport->fc_prevDID = 0;
3481 3355
@@ -3495,7 +3369,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
3495 list_del_init(&phba->sli4_hba.dat_rq->list); 3369 list_del_init(&phba->sli4_hba.dat_rq->list);
3496 list_del_init(&phba->sli4_hba.mbx_cq->list); 3370 list_del_init(&phba->sli4_hba.mbx_cq->list);
3497 list_del_init(&phba->sli4_hba.els_cq->list); 3371 list_del_init(&phba->sli4_hba.els_cq->list);
3498 list_del_init(&phba->sli4_hba.rxq_cq->list);
3499 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) 3372 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3500 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); 3373 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3501 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) 3374 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
@@ -3531,9 +3404,13 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
3531 struct lpfc_sli *psli; 3404 struct lpfc_sli *psli;
3532 volatile uint32_t word0; 3405 volatile uint32_t word0;
3533 void __iomem *to_slim; 3406 void __iomem *to_slim;
3407 uint32_t hba_aer_enabled;
3534 3408
3535 spin_lock_irq(&phba->hbalock); 3409 spin_lock_irq(&phba->hbalock);
3536 3410
3411 /* Take PCIe device Advanced Error Reporting (AER) state */
3412 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
3413
3537 psli = &phba->sli; 3414 psli = &phba->sli;
3538 3415
3539 /* Restart HBA */ 3416 /* Restart HBA */
@@ -3573,6 +3450,10 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
3573 /* Give the INITFF and Post time to settle. */ 3450 /* Give the INITFF and Post time to settle. */
3574 mdelay(100); 3451 mdelay(100);
3575 3452
3453 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
3454 if (hba_aer_enabled)
3455 pci_disable_pcie_error_reporting(phba->pcidev);
3456
3576 lpfc_hba_down_post(phba); 3457 lpfc_hba_down_post(phba);
3577 3458
3578 return 0; 3459 return 0;
@@ -4042,6 +3923,24 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
4042 if (rc) 3923 if (rc)
4043 goto lpfc_sli_hba_setup_error; 3924 goto lpfc_sli_hba_setup_error;
4044 3925
3926 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
3927 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
3928 rc = pci_enable_pcie_error_reporting(phba->pcidev);
3929 if (!rc) {
3930 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3931 "2709 This device supports "
3932 "Advanced Error Reporting (AER)\n");
3933 spin_lock_irq(&phba->hbalock);
3934 phba->hba_flag |= HBA_AER_ENABLED;
3935 spin_unlock_irq(&phba->hbalock);
3936 } else {
3937 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3938 "2708 This device does not support "
3939 "Advanced Error Reporting (AER)\n");
3940 phba->cfg_aer_support = 0;
3941 }
3942 }
3943
4045 if (phba->sli_rev == 3) { 3944 if (phba->sli_rev == 3) {
4046 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 3945 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4047 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 3946 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
@@ -4243,7 +4142,6 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4243 4142
4244 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4143 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4245 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4144 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4246 lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
4247 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4145 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4248 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4146 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4249 LPFC_QUEUE_REARM); 4147 LPFC_QUEUE_REARM);
@@ -4322,6 +4220,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4322 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 4220 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
4323 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 4221 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
4324 phba->hba_flag |= HBA_FCOE_SUPPORT; 4222 phba->hba_flag |= HBA_FCOE_SUPPORT;
4223
4224 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
4225 LPFC_DCBX_CEE_MODE)
4226 phba->hba_flag |= HBA_FIP_SUPPORT;
4227 else
4228 phba->hba_flag &= ~HBA_FIP_SUPPORT;
4229
4325 if (phba->sli_rev != LPFC_SLI_REV4 || 4230 if (phba->sli_rev != LPFC_SLI_REV4 ||
4326 !(phba->hba_flag & HBA_FCOE_SUPPORT)) { 4231 !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
4327 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4232 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -4468,7 +4373,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4468 rc = lpfc_sli4_post_sgl_list(phba); 4373 rc = lpfc_sli4_post_sgl_list(phba);
4469 if (unlikely(rc)) { 4374 if (unlikely(rc)) {
4470 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4375 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4471 "0582 Error %d during sgl post operation", rc); 4376 "0582 Error %d during sgl post operation\n",
4377 rc);
4472 rc = -ENODEV; 4378 rc = -ENODEV;
4473 goto out_free_vpd; 4379 goto out_free_vpd;
4474 } 4380 }
@@ -4477,8 +4383,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4477 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 4383 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4478 if (unlikely(rc)) { 4384 if (unlikely(rc)) {
4479 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 4385 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4480 "0383 Error %d during scsi sgl post opeation", 4386 "0383 Error %d during scsi sgl post "
4481 rc); 4387 "operation\n", rc);
4482 /* Some Scsi buffers were moved to the abort scsi list */ 4388 /* Some Scsi buffers were moved to the abort scsi list */
4483 /* A pci function reset will repost them */ 4389 /* A pci function reset will repost them */
4484 rc = -ENODEV; 4390 rc = -ENODEV;
@@ -4494,10 +4400,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4494 rc = -ENODEV; 4400 rc = -ENODEV;
4495 goto out_free_vpd; 4401 goto out_free_vpd;
4496 } 4402 }
4497 if (phba->cfg_enable_fip)
4498 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1);
4499 else
4500 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4501 4403
4502 /* Set up all the queues to the device */ 4404 /* Set up all the queues to the device */
4503 rc = lpfc_sli4_queue_setup(phba); 4405 rc = lpfc_sli4_queue_setup(phba);
@@ -5669,7 +5571,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
5669 case CMD_GEN_REQUEST64_CX: 5571 case CMD_GEN_REQUEST64_CX:
5670 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 5572 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
5671 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 5573 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
5672 FC_FCP_CMND) || 5574 FC_RCTL_DD_UNSOL_CMD) ||
5673 (piocb->iocb.un.genreq64.w5.hcsw.Type != 5575 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
5674 MENLO_TRANSPORT_TYPE)) 5576 MENLO_TRANSPORT_TYPE))
5675 5577
@@ -5849,7 +5751,7 @@ static int
5849lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 5751lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5850 union lpfc_wqe *wqe) 5752 union lpfc_wqe *wqe)
5851{ 5753{
5852 uint32_t payload_len = 0; 5754 uint32_t xmit_len = 0, total_len = 0;
5853 uint8_t ct = 0; 5755 uint8_t ct = 0;
5854 uint32_t fip; 5756 uint32_t fip;
5855 uint32_t abort_tag; 5757 uint32_t abort_tag;
@@ -5857,12 +5759,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5857 uint8_t cmnd; 5759 uint8_t cmnd;
5858 uint16_t xritag; 5760 uint16_t xritag;
5859 struct ulp_bde64 *bpl = NULL; 5761 struct ulp_bde64 *bpl = NULL;
5762 uint32_t els_id = ELS_ID_DEFAULT;
5763 int numBdes, i;
5764 struct ulp_bde64 bde;
5860 5765
5861 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); 5766 fip = phba->hba_flag & HBA_FIP_SUPPORT;
5862 /* The fcp commands will set command type */ 5767 /* The fcp commands will set command type */
5863 if (iocbq->iocb_flag & LPFC_IO_FCP) 5768 if (iocbq->iocb_flag & LPFC_IO_FCP)
5864 command_type = FCP_COMMAND; 5769 command_type = FCP_COMMAND;
5865 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS)) 5770 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
5866 command_type = ELS_COMMAND_FIP; 5771 command_type = ELS_COMMAND_FIP;
5867 else 5772 else
5868 command_type = ELS_COMMAND_NON_FIP; 5773 command_type = ELS_COMMAND_NON_FIP;
@@ -5874,6 +5779,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5874 wqe->words[7] = 0; /* The ct field has moved so reset */ 5779 wqe->words[7] = 0; /* The ct field has moved so reset */
5875 /* words0-2 bpl convert bde */ 5780 /* words0-2 bpl convert bde */
5876 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 5781 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5782 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
5783 sizeof(struct ulp_bde64);
5877 bpl = (struct ulp_bde64 *) 5784 bpl = (struct ulp_bde64 *)
5878 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 5785 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
5879 if (!bpl) 5786 if (!bpl)
@@ -5886,9 +5793,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5886 * can assign it to the sgl. 5793 * can assign it to the sgl.
5887 */ 5794 */
5888 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 5795 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5889 payload_len = wqe->generic.bde.tus.f.bdeSize; 5796 xmit_len = wqe->generic.bde.tus.f.bdeSize;
5797 total_len = 0;
5798 for (i = 0; i < numBdes; i++) {
5799 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
5800 total_len += bde.tus.f.bdeSize;
5801 }
5890 } else 5802 } else
5891 payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 5803 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
5892 5804
5893 iocbq->iocb.ulpIoTag = iocbq->iotag; 5805 iocbq->iocb.ulpIoTag = iocbq->iotag;
5894 cmnd = iocbq->iocb.ulpCommand; 5806 cmnd = iocbq->iocb.ulpCommand;
@@ -5902,7 +5814,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5902 iocbq->iocb.ulpCommand); 5814 iocbq->iocb.ulpCommand);
5903 return IOCB_ERROR; 5815 return IOCB_ERROR;
5904 } 5816 }
5905 wqe->els_req.payload_len = payload_len; 5817 wqe->els_req.payload_len = xmit_len;
5906 /* Els_reguest64 has a TMO */ 5818 /* Els_reguest64 has a TMO */
5907 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 5819 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
5908 iocbq->iocb.ulpTimeout); 5820 iocbq->iocb.ulpTimeout);
@@ -5923,7 +5835,23 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5923 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); 5835 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
5924 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 5836 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5925 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 5837 /* CCP CCPE PV PRI in word10 were set in the memcpy */
5838
5839 if (command_type == ELS_COMMAND_FIP) {
5840 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
5841 >> LPFC_FIP_ELS_ID_SHIFT);
5842 }
5843 bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id);
5844
5926 break; 5845 break;
5846 case CMD_XMIT_SEQUENCE64_CX:
5847 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5848 iocbq->iocb.un.ulpWord[3]);
5849 wqe->generic.word3 = 0;
5850 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
5851 bf_set(wqe_xc, &wqe->generic, 1);
5852 /* The entire sequence is transmitted for this IOCB */
5853 xmit_len = total_len;
5854 cmnd = CMD_XMIT_SEQUENCE64_CR;
5927 case CMD_XMIT_SEQUENCE64_CR: 5855 case CMD_XMIT_SEQUENCE64_CR:
5928 /* word3 iocb=io_tag32 wqe=payload_offset */ 5856 /* word3 iocb=io_tag32 wqe=payload_offset */
5929 /* payload offset used for multilpe outstanding 5857 /* payload offset used for multilpe outstanding
@@ -5933,7 +5861,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5933 /* word4 relative_offset memcpy */ 5861 /* word4 relative_offset memcpy */
5934 /* word5 r_ctl/df_ctl memcpy */ 5862 /* word5 r_ctl/df_ctl memcpy */
5935 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 5863 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5936 wqe->xmit_sequence.xmit_len = payload_len; 5864 wqe->xmit_sequence.xmit_len = xmit_len;
5865 command_type = OTHER_COMMAND;
5937 break; 5866 break;
5938 case CMD_XMIT_BCAST64_CN: 5867 case CMD_XMIT_BCAST64_CN:
5939 /* word3 iocb=iotag32 wqe=payload_len */ 5868 /* word3 iocb=iotag32 wqe=payload_len */
@@ -5962,7 +5891,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5962 case CMD_FCP_IREAD64_CR: 5891 case CMD_FCP_IREAD64_CR:
5963 /* FCP_CMD is always the 1st sgl entry */ 5892 /* FCP_CMD is always the 1st sgl entry */
5964 wqe->fcp_iread.payload_len = 5893 wqe->fcp_iread.payload_len =
5965 payload_len + sizeof(struct fcp_rsp); 5894 xmit_len + sizeof(struct fcp_rsp);
5966 5895
5967 /* word 4 (xfer length) should have been set on the memcpy */ 5896 /* word 4 (xfer length) should have been set on the memcpy */
5968 5897
@@ -5999,7 +5928,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5999 * sgl[1] = rsp. 5928 * sgl[1] = rsp.
6000 * 5929 *
6001 */ 5930 */
6002 wqe->gen_req.command_len = payload_len; 5931 wqe->gen_req.command_len = xmit_len;
6003 /* Word4 parameter copied in the memcpy */ 5932 /* Word4 parameter copied in the memcpy */
6004 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */ 5933 /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
6005 /* word6 context tag copied in memcpy */ 5934 /* word6 context tag copied in memcpy */
@@ -6066,6 +5995,38 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
6066 command_type = OTHER_COMMAND; 5995 command_type = OTHER_COMMAND;
6067 xritag = 0; 5996 xritag = 0;
6068 break; 5997 break;
5998 case CMD_XMIT_BLS_RSP64_CX:
5999 /* As BLS ABTS-ACC WQE is very different from other WQEs,
6000 * we re-construct this WQE here based on information in
6001 * iocbq from scratch.
6002 */
6003 memset(wqe, 0, sizeof(union lpfc_wqe));
6004 /* OX_ID is invariable to who sent ABTS to CT exchange */
6005 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
6006 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc));
6007 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) ==
6008 LPFC_ABTS_UNSOL_INT) {
6009 /* ABTS sent by initiator to CT exchange, the
6010 * RX_ID field will be filled with the newly
6011 * allocated responder XRI.
6012 */
6013 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
6014 iocbq->sli4_xritag);
6015 } else {
6016 /* ABTS sent by responder to CT exchange, the
6017 * RX_ID field will be filled with the responder
6018 * RX_ID from ABTS.
6019 */
6020 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
6021 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc));
6022 }
6023 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
6024 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
6025 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
6026 iocbq->iocb.ulpContext);
6027 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
6028 command_type = OTHER_COMMAND;
6029 break;
6069 case CMD_XRI_ABORTED_CX: 6030 case CMD_XRI_ABORTED_CX:
6070 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 6031 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
6071 /* words0-2 are all 0's no bde */ 6032 /* words0-2 are all 0's no bde */
@@ -6120,11 +6081,10 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6120 uint16_t xritag; 6081 uint16_t xritag;
6121 union lpfc_wqe wqe; 6082 union lpfc_wqe wqe;
6122 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 6083 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
6123 uint32_t fcp_wqidx;
6124 6084
6125 if (piocb->sli4_xritag == NO_XRI) { 6085 if (piocb->sli4_xritag == NO_XRI) {
6126 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 6086 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6127 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6087 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6128 sglq = NULL; 6088 sglq = NULL;
6129 else { 6089 else {
6130 sglq = __lpfc_sli_get_sglq(phba); 6090 sglq = __lpfc_sli_get_sglq(phba);
@@ -6155,8 +6115,17 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6155 return IOCB_ERROR; 6115 return IOCB_ERROR;
6156 6116
6157 if (piocb->iocb_flag & LPFC_IO_FCP) { 6117 if (piocb->iocb_flag & LPFC_IO_FCP) {
6158 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); 6118 /*
6159 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe)) 6119 * For FCP command IOCB, get a new WQ index to distribute
6120 * WQE across the WQsr. On the other hand, for abort IOCB,
6121 * it carries the same WQ index to the original command
6122 * IOCB.
6123 */
6124 if ((piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
6125 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN))
6126 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
6127 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
6128 &wqe))
6160 return IOCB_ERROR; 6129 return IOCB_ERROR;
6161 } else { 6130 } else {
6162 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 6131 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
@@ -6449,31 +6418,37 @@ lpfc_sli_setup(struct lpfc_hba *phba)
6449 pring->iotag_max = 4096; 6418 pring->iotag_max = 4096;
6450 pring->lpfc_sli_rcv_async_status = 6419 pring->lpfc_sli_rcv_async_status =
6451 lpfc_sli_async_event_handler; 6420 lpfc_sli_async_event_handler;
6452 pring->num_mask = 4; 6421 pring->num_mask = LPFC_MAX_RING_MASK;
6453 pring->prt[0].profile = 0; /* Mask 0 */ 6422 pring->prt[0].profile = 0; /* Mask 0 */
6454 pring->prt[0].rctl = FC_ELS_REQ; 6423 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
6455 pring->prt[0].type = FC_ELS_DATA; 6424 pring->prt[0].type = FC_TYPE_ELS;
6456 pring->prt[0].lpfc_sli_rcv_unsol_event = 6425 pring->prt[0].lpfc_sli_rcv_unsol_event =
6457 lpfc_els_unsol_event; 6426 lpfc_els_unsol_event;
6458 pring->prt[1].profile = 0; /* Mask 1 */ 6427 pring->prt[1].profile = 0; /* Mask 1 */
6459 pring->prt[1].rctl = FC_ELS_RSP; 6428 pring->prt[1].rctl = FC_RCTL_ELS_REP;
6460 pring->prt[1].type = FC_ELS_DATA; 6429 pring->prt[1].type = FC_TYPE_ELS;
6461 pring->prt[1].lpfc_sli_rcv_unsol_event = 6430 pring->prt[1].lpfc_sli_rcv_unsol_event =
6462 lpfc_els_unsol_event; 6431 lpfc_els_unsol_event;
6463 pring->prt[2].profile = 0; /* Mask 2 */ 6432 pring->prt[2].profile = 0; /* Mask 2 */
6464 /* NameServer Inquiry */ 6433 /* NameServer Inquiry */
6465 pring->prt[2].rctl = FC_UNSOL_CTL; 6434 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
6466 /* NameServer */ 6435 /* NameServer */
6467 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; 6436 pring->prt[2].type = FC_TYPE_CT;
6468 pring->prt[2].lpfc_sli_rcv_unsol_event = 6437 pring->prt[2].lpfc_sli_rcv_unsol_event =
6469 lpfc_ct_unsol_event; 6438 lpfc_ct_unsol_event;
6470 pring->prt[3].profile = 0; /* Mask 3 */ 6439 pring->prt[3].profile = 0; /* Mask 3 */
6471 /* NameServer response */ 6440 /* NameServer response */
6472 pring->prt[3].rctl = FC_SOL_CTL; 6441 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
6473 /* NameServer */ 6442 /* NameServer */
6474 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; 6443 pring->prt[3].type = FC_TYPE_CT;
6475 pring->prt[3].lpfc_sli_rcv_unsol_event = 6444 pring->prt[3].lpfc_sli_rcv_unsol_event =
6476 lpfc_ct_unsol_event; 6445 lpfc_ct_unsol_event;
6446 /* abort unsolicited sequence */
6447 pring->prt[4].profile = 0; /* Mask 4 */
6448 pring->prt[4].rctl = FC_RCTL_BA_ABTS;
6449 pring->prt[4].type = FC_TYPE_BLS;
6450 pring->prt[4].lpfc_sli_rcv_unsol_event =
6451 lpfc_sli4_ct_abort_unsol_event;
6477 break; 6452 break;
6478 } 6453 }
6479 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 6454 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
@@ -6976,8 +6951,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6976 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 6951 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
6977 6952
6978 spin_lock_irq(&phba->hbalock); 6953 spin_lock_irq(&phba->hbalock);
6979 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) 6954 if (phba->sli_rev < LPFC_SLI_REV4) {
6980 abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; 6955 if (abort_iotag != 0 &&
6956 abort_iotag <= phba->sli.last_iotag)
6957 abort_iocb =
6958 phba->sli.iocbq_lookup[abort_iotag];
6959 } else
6960 /* For sli4 the abort_tag is the XRI,
6961 * so the abort routine puts the iotag of the iocb
6962 * being aborted in the context field of the abort
6963 * IOCB.
6964 */
6965 abort_iocb = phba->sli.iocbq_lookup[abort_context];
6981 6966
6982 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, 6967 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
6983 "0327 Cannot abort els iocb %p " 6968 "0327 Cannot abort els iocb %p "
@@ -6991,9 +6976,18 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6991 * might have completed already. Do not free it again. 6976 * might have completed already. Do not free it again.
6992 */ 6977 */
6993 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 6978 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
6994 spin_unlock_irq(&phba->hbalock); 6979 if (irsp->un.ulpWord[4] != IOERR_NO_XRI) {
6995 lpfc_sli_release_iocbq(phba, cmdiocb); 6980 spin_unlock_irq(&phba->hbalock);
6996 return; 6981 lpfc_sli_release_iocbq(phba, cmdiocb);
6982 return;
6983 }
6984 /* For SLI4 the ulpContext field for abort IOCB
6985 * holds the iotag of the IOCB being aborted so
6986 * the local abort_context needs to be reset to
6987 * match the aborted IOCBs ulpContext.
6988 */
6989 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
6990 abort_context = abort_iocb->iocb.ulpContext;
6997 } 6991 }
6998 /* 6992 /*
6999 * make sure we have the right iocbq before taking it 6993 * make sure we have the right iocbq before taking it
@@ -7112,13 +7106,18 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7112 iabt = &abtsiocbp->iocb; 7106 iabt = &abtsiocbp->iocb;
7113 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 7107 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
7114 iabt->un.acxri.abortContextTag = icmd->ulpContext; 7108 iabt->un.acxri.abortContextTag = icmd->ulpContext;
7115 if (phba->sli_rev == LPFC_SLI_REV4) 7109 if (phba->sli_rev == LPFC_SLI_REV4) {
7116 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 7110 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
7111 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
7112 }
7117 else 7113 else
7118 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 7114 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
7119 iabt->ulpLe = 1; 7115 iabt->ulpLe = 1;
7120 iabt->ulpClass = icmd->ulpClass; 7116 iabt->ulpClass = icmd->ulpClass;
7121 7117
7118 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
7119 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
7120
7122 if (phba->link_state >= LPFC_LINK_UP) 7121 if (phba->link_state >= LPFC_LINK_UP)
7123 iabt->ulpCommand = CMD_ABORT_XRI_CN; 7122 iabt->ulpCommand = CMD_ABORT_XRI_CN;
7124 else 7123 else
@@ -7322,6 +7321,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
7322 abtsiocb->iocb.ulpClass = cmd->ulpClass; 7321 abtsiocb->iocb.ulpClass = cmd->ulpClass;
7323 abtsiocb->vport = phba->pport; 7322 abtsiocb->vport = phba->pport;
7324 7323
7324 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
7325 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
7326
7325 if (lpfc_is_link_up(phba)) 7327 if (lpfc_is_link_up(phba))
7326 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 7328 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
7327 else 7329 else
@@ -7687,31 +7689,28 @@ static int
7687lpfc_sli4_eratt_read(struct lpfc_hba *phba) 7689lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7688{ 7690{
7689 uint32_t uerr_sta_hi, uerr_sta_lo; 7691 uint32_t uerr_sta_hi, uerr_sta_lo;
7690 uint32_t onlnreg0, onlnreg1;
7691 7692
7692 /* For now, use the SLI4 device internal unrecoverable error 7693 /* For now, use the SLI4 device internal unrecoverable error
7693 * registers for error attention. This can be changed later. 7694 * registers for error attention. This can be changed later.
7694 */ 7695 */
7695 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); 7696 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
7696 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); 7697 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
7697 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { 7698 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
7698 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); 7699 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
7699 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); 7700 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7700 if (uerr_sta_lo || uerr_sta_hi) { 7701 "1423 HBA Unrecoverable error: "
7701 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7702 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
7702 "1423 HBA Unrecoverable error: " 7703 "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
7703 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 7704 uerr_sta_lo, uerr_sta_hi,
7704 "online0_reg=0x%x, online1_reg=0x%x\n", 7705 phba->sli4_hba.ue_mask_lo,
7705 uerr_sta_lo, uerr_sta_hi, 7706 phba->sli4_hba.ue_mask_hi);
7706 onlnreg0, onlnreg1); 7707 phba->work_status[0] = uerr_sta_lo;
7707 phba->work_status[0] = uerr_sta_lo; 7708 phba->work_status[1] = uerr_sta_hi;
7708 phba->work_status[1] = uerr_sta_hi; 7709 /* Set the driver HA work bitmap */
7709 /* Set the driver HA work bitmap */ 7710 phba->work_ha |= HA_ERATT;
7710 phba->work_ha |= HA_ERATT; 7711 /* Indicate polling handles this ERATT */
7711 /* Indicate polling handles this ERATT */ 7712 phba->hba_flag |= HBA_ERATT_HANDLED;
7712 phba->hba_flag |= HBA_ERATT_HANDLED; 7713 return 1;
7713 return 1;
7714 }
7715 } 7714 }
7716 return 0; 7715 return 0;
7717} 7716}
@@ -7834,7 +7833,7 @@ irqreturn_t
7834lpfc_sli_sp_intr_handler(int irq, void *dev_id) 7833lpfc_sli_sp_intr_handler(int irq, void *dev_id)
7835{ 7834{
7836 struct lpfc_hba *phba; 7835 struct lpfc_hba *phba;
7837 uint32_t ha_copy; 7836 uint32_t ha_copy, hc_copy;
7838 uint32_t work_ha_copy; 7837 uint32_t work_ha_copy;
7839 unsigned long status; 7838 unsigned long status;
7840 unsigned long iflag; 7839 unsigned long iflag;
@@ -7892,8 +7891,13 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
7892 } 7891 }
7893 7892
7894 /* Clear up only attention source related to slow-path */ 7893 /* Clear up only attention source related to slow-path */
7894 hc_copy = readl(phba->HCregaddr);
7895 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
7896 HC_LAINT_ENA | HC_ERINT_ENA),
7897 phba->HCregaddr);
7895 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 7898 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
7896 phba->HAregaddr); 7899 phba->HAregaddr);
7900 writel(hc_copy, phba->HCregaddr);
7897 readl(phba->HAregaddr); /* flush */ 7901 readl(phba->HAregaddr); /* flush */
7898 spin_unlock_irqrestore(&phba->hbalock, iflag); 7902 spin_unlock_irqrestore(&phba->hbalock, iflag);
7899 } else 7903 } else
@@ -8049,7 +8053,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
8049 KERN_ERR, 8053 KERN_ERR,
8050 LOG_MBOX | LOG_SLI, 8054 LOG_MBOX | LOG_SLI,
8051 "0350 rc should have" 8055 "0350 rc should have"
8052 "been MBX_BUSY"); 8056 "been MBX_BUSY\n");
8053 if (rc != MBX_NOT_FINISHED) 8057 if (rc != MBX_NOT_FINISHED)
8054 goto send_current_mbox; 8058 goto send_current_mbox;
8055 } 8059 }
@@ -8078,7 +8082,7 @@ send_current_mbox:
8078 if (rc != MBX_SUCCESS) 8082 if (rc != MBX_SUCCESS)
8079 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 8083 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8080 LOG_SLI, "0349 rc should be " 8084 LOG_SLI, "0349 rc should be "
8081 "MBX_SUCCESS"); 8085 "MBX_SUCCESS\n");
8082 } 8086 }
8083 8087
8084 spin_lock_irqsave(&phba->hbalock, iflag); 8088 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -8203,6 +8207,7 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
8203 struct lpfc_hba *phba; 8207 struct lpfc_hba *phba;
8204 irqreturn_t sp_irq_rc, fp_irq_rc; 8208 irqreturn_t sp_irq_rc, fp_irq_rc;
8205 unsigned long status1, status2; 8209 unsigned long status1, status2;
8210 uint32_t hc_copy;
8206 8211
8207 /* 8212 /*
8208 * Get the driver's phba structure from the dev_id and 8213 * Get the driver's phba structure from the dev_id and
@@ -8240,7 +8245,12 @@ lpfc_sli_intr_handler(int irq, void *dev_id)
8240 } 8245 }
8241 8246
8242 /* Clear attention sources except link and error attentions */ 8247 /* Clear attention sources except link and error attentions */
8248 hc_copy = readl(phba->HCregaddr);
8249 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
8250 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
8251 phba->HCregaddr);
8243 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 8252 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
8253 writel(hc_copy, phba->HCregaddr);
8244 readl(phba->HAregaddr); /* flush */ 8254 readl(phba->HAregaddr); /* flush */
8245 spin_unlock(&phba->hbalock); 8255 spin_unlock(&phba->hbalock);
8246 8256
@@ -8351,8 +8361,6 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8351 8361
8352 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 8362 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
8353 sizeof(struct lpfc_iocbq) - offset); 8363 sizeof(struct lpfc_iocbq) - offset);
8354 memset(&pIocbIn->sli4_info, 0,
8355 sizeof(struct lpfc_sli4_rspiocb_info));
8356 /* Map WCQE parameters into irspiocb parameters */ 8364 /* Map WCQE parameters into irspiocb parameters */
8357 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); 8365 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
8358 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 8366 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
@@ -8364,16 +8372,49 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8364 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 8372 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8365 else 8373 else
8366 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 8374 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8367 /* Load in additional WCQE parameters */ 8375}
8368 pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe); 8376
8369 pIocbIn->sli4_info.bfield = 0; 8377/**
8370 if (bf_get(lpfc_wcqe_c_xb, wcqe)) 8378 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
8371 pIocbIn->sli4_info.bfield |= LPFC_XB; 8379 * @phba: Pointer to HBA context object.
8372 if (bf_get(lpfc_wcqe_c_pv, wcqe)) { 8380 * @wcqe: Pointer to work-queue completion queue entry.
8373 pIocbIn->sli4_info.bfield |= LPFC_PV; 8381 *
8374 pIocbIn->sli4_info.priority = 8382 * This routine handles an ELS work-queue completion event and construct
8375 bf_get(lpfc_wcqe_c_priority, wcqe); 8383 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
8384 * discovery engine to handle.
8385 *
8386 * Return: Pointer to the receive IOCBQ, NULL otherwise.
8387 **/
8388static struct lpfc_iocbq *
8389lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
8390 struct lpfc_iocbq *irspiocbq)
8391{
8392 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8393 struct lpfc_iocbq *cmdiocbq;
8394 struct lpfc_wcqe_complete *wcqe;
8395 unsigned long iflags;
8396
8397 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
8398 spin_lock_irqsave(&phba->hbalock, iflags);
8399 pring->stats.iocb_event++;
8400 /* Look up the ELS command IOCB and create pseudo response IOCB */
8401 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8402 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8403 spin_unlock_irqrestore(&phba->hbalock, iflags);
8404
8405 if (unlikely(!cmdiocbq)) {
8406 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8407 "0386 ELS complete with no corresponding "
8408 "cmdiocb: iotag (%d)\n",
8409 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8410 lpfc_sli_release_iocbq(phba, irspiocbq);
8411 return NULL;
8376 } 8412 }
8413
8414 /* Fake the irspiocbq and copy necessary response information */
8415 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8416
8417 return irspiocbq;
8377} 8418}
8378 8419
8379/** 8420/**
@@ -8566,45 +8607,26 @@ static bool
8566lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, 8607lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8567 struct lpfc_wcqe_complete *wcqe) 8608 struct lpfc_wcqe_complete *wcqe)
8568{ 8609{
8569 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8570 struct lpfc_iocbq *cmdiocbq;
8571 struct lpfc_iocbq *irspiocbq; 8610 struct lpfc_iocbq *irspiocbq;
8572 unsigned long iflags; 8611 unsigned long iflags;
8573 bool workposted = false;
8574
8575 spin_lock_irqsave(&phba->hbalock, iflags);
8576 pring->stats.iocb_event++;
8577 /* Look up the ELS command IOCB and create pseudo response IOCB */
8578 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8579 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8580 spin_unlock_irqrestore(&phba->hbalock, iflags);
8581 8612
8582 if (unlikely(!cmdiocbq)) { 8613 /* Get an irspiocbq for later ELS response processing use */
8583 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8584 "0386 ELS complete with no corresponding "
8585 "cmdiocb: iotag (%d)\n",
8586 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8587 return workposted;
8588 }
8589
8590 /* Fake the irspiocbq and copy necessary response information */
8591 irspiocbq = lpfc_sli_get_iocbq(phba); 8614 irspiocbq = lpfc_sli_get_iocbq(phba);
8592 if (!irspiocbq) { 8615 if (!irspiocbq) {
8593 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8616 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8594 "0387 Failed to allocate an iocbq\n"); 8617 "0387 Failed to allocate an iocbq\n");
8595 return workposted; 8618 return false;
8596 } 8619 }
8597 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8598 8620
8599 /* Add the irspiocb to the response IOCB work list */ 8621 /* Save off the slow-path queue event for work thread to process */
8622 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
8600 spin_lock_irqsave(&phba->hbalock, iflags); 8623 spin_lock_irqsave(&phba->hbalock, iflags);
8601 list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue); 8624 list_add_tail(&irspiocbq->cq_event.list,
8602 /* Indicate ELS ring attention */ 8625 &phba->sli4_hba.sp_queue_event);
8603 phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING)); 8626 phba->hba_flag |= HBA_SP_QUEUE_EVT;
8604 spin_unlock_irqrestore(&phba->hbalock, iflags); 8627 spin_unlock_irqrestore(&phba->hbalock, iflags);
8605 workposted = true;
8606 8628
8607 return workposted; 8629 return true;
8608} 8630}
8609 8631
8610/** 8632/**
@@ -8690,52 +8712,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
8690} 8712}
8691 8713
8692/** 8714/**
8693 * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
8694 * @phba: Pointer to HBA context object.
8695 * @cq: Pointer to the completion queue.
8696 * @wcqe: Pointer to a completion queue entry.
8697 *
8698 * This routine process a slow-path work-queue completion queue entry.
8699 *
8700 * Return: true if work posted to worker thread, otherwise false.
8701 **/
8702static bool
8703lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8704 struct lpfc_cqe *cqe)
8705{
8706 struct lpfc_wcqe_complete wcqe;
8707 bool workposted = false;
8708
8709 /* Copy the work queue CQE and convert endian order if needed */
8710 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8711
8712 /* Check and process for different type of WCQE and dispatch */
8713 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8714 case CQE_CODE_COMPL_WQE:
8715 /* Process the WQ complete event */
8716 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8717 (struct lpfc_wcqe_complete *)&wcqe);
8718 break;
8719 case CQE_CODE_RELEASE_WQE:
8720 /* Process the WQ release event */
8721 lpfc_sli4_sp_handle_rel_wcqe(phba,
8722 (struct lpfc_wcqe_release *)&wcqe);
8723 break;
8724 case CQE_CODE_XRI_ABORTED:
8725 /* Process the WQ XRI abort event */
8726 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8727 (struct sli4_wcqe_xri_aborted *)&wcqe);
8728 break;
8729 default:
8730 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8731 "0388 Not a valid WCQE code: x%x\n",
8732 bf_get(lpfc_wcqe_c_code, &wcqe));
8733 break;
8734 }
8735 return workposted;
8736}
8737
8738/**
8739 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 8715 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
8740 * @phba: Pointer to HBA context object. 8716 * @phba: Pointer to HBA context object.
8741 * @rcqe: Pointer to receive-queue completion queue entry. 8717 * @rcqe: Pointer to receive-queue completion queue entry.
@@ -8745,9 +8721,8 @@ lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8745 * Return: true if work posted to worker thread, otherwise false. 8721 * Return: true if work posted to worker thread, otherwise false.
8746 **/ 8722 **/
8747static bool 8723static bool
8748lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 8724lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
8749{ 8725{
8750 struct lpfc_rcqe rcqe;
8751 bool workposted = false; 8726 bool workposted = false;
8752 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 8727 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
8753 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 8728 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
@@ -8755,31 +8730,28 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8755 uint32_t status; 8730 uint32_t status;
8756 unsigned long iflags; 8731 unsigned long iflags;
8757 8732
8758 /* Copy the receive queue CQE and convert endian order if needed */ 8733 if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
8759 lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
8760 lpfc_sli4_rq_release(hrq, drq);
8761 if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
8762 goto out;
8763 if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
8764 goto out; 8734 goto out;
8765 8735
8766 status = bf_get(lpfc_rcqe_status, &rcqe); 8736 status = bf_get(lpfc_rcqe_status, rcqe);
8767 switch (status) { 8737 switch (status) {
8768 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 8738 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
8769 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8739 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8770 "2537 Receive Frame Truncated!!\n"); 8740 "2537 Receive Frame Truncated!!\n");
8771 case FC_STATUS_RQ_SUCCESS: 8741 case FC_STATUS_RQ_SUCCESS:
8742 lpfc_sli4_rq_release(hrq, drq);
8772 spin_lock_irqsave(&phba->hbalock, iflags); 8743 spin_lock_irqsave(&phba->hbalock, iflags);
8773 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 8744 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
8774 if (!dma_buf) { 8745 if (!dma_buf) {
8775 spin_unlock_irqrestore(&phba->hbalock, iflags); 8746 spin_unlock_irqrestore(&phba->hbalock, iflags);
8776 goto out; 8747 goto out;
8777 } 8748 }
8778 memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe)); 8749 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
8779 /* save off the frame for the word thread to process */ 8750 /* save off the frame for the word thread to process */
8780 list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list); 8751 list_add_tail(&dma_buf->cq_event.list,
8752 &phba->sli4_hba.sp_queue_event);
8781 /* Frame received */ 8753 /* Frame received */
8782 phba->hba_flag |= HBA_RECEIVE_BUFFER; 8754 phba->hba_flag |= HBA_SP_QUEUE_EVT;
8783 spin_unlock_irqrestore(&phba->hbalock, iflags); 8755 spin_unlock_irqrestore(&phba->hbalock, iflags);
8784 workposted = true; 8756 workposted = true;
8785 break; 8757 break;
@@ -8794,7 +8766,58 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8794 } 8766 }
8795out: 8767out:
8796 return workposted; 8768 return workposted;
8769}
8770
8771/**
8772 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
8773 * @phba: Pointer to HBA context object.
8774 * @cq: Pointer to the completion queue.
8775 * @wcqe: Pointer to a completion queue entry.
8776 *
8777 * This routine process a slow-path work-queue or recieve queue completion queue
8778 * entry.
8779 *
8780 * Return: true if work posted to worker thread, otherwise false.
8781 **/
8782static bool
8783lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8784 struct lpfc_cqe *cqe)
8785{
8786 struct lpfc_cqe cqevt;
8787 bool workposted = false;
8788
8789 /* Copy the work queue CQE and convert endian order if needed */
8790 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
8797 8791
8792 /* Check and process for different type of WCQE and dispatch */
8793 switch (bf_get(lpfc_cqe_code, &cqevt)) {
8794 case CQE_CODE_COMPL_WQE:
8795 /* Process the WQ/RQ complete event */
8796 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8797 (struct lpfc_wcqe_complete *)&cqevt);
8798 break;
8799 case CQE_CODE_RELEASE_WQE:
8800 /* Process the WQ release event */
8801 lpfc_sli4_sp_handle_rel_wcqe(phba,
8802 (struct lpfc_wcqe_release *)&cqevt);
8803 break;
8804 case CQE_CODE_XRI_ABORTED:
8805 /* Process the WQ XRI abort event */
8806 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8807 (struct sli4_wcqe_xri_aborted *)&cqevt);
8808 break;
8809 case CQE_CODE_RECEIVE:
8810 /* Process the RQ event */
8811 workposted = lpfc_sli4_sp_handle_rcqe(phba,
8812 (struct lpfc_rcqe *)&cqevt);
8813 break;
8814 default:
8815 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8816 "0388 Not a valid WCQE code: x%x\n",
8817 bf_get(lpfc_cqe_code, &cqevt));
8818 break;
8819 }
8820 return workposted;
8798} 8821}
8799 8822
8800/** 8823/**
@@ -8858,14 +8881,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8858 break; 8881 break;
8859 case LPFC_WCQ: 8882 case LPFC_WCQ:
8860 while ((cqe = lpfc_sli4_cq_get(cq))) { 8883 while ((cqe = lpfc_sli4_cq_get(cq))) {
8861 workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe); 8884 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe);
8862 if (!(++ecount % LPFC_GET_QE_REL_INT))
8863 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8864 }
8865 break;
8866 case LPFC_RCQ:
8867 while ((cqe = lpfc_sli4_cq_get(cq))) {
8868 workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
8869 if (!(++ecount % LPFC_GET_QE_REL_INT)) 8885 if (!(++ecount % LPFC_GET_QE_REL_INT))
8870 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 8886 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8871 } 8887 }
@@ -10427,8 +10443,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
10427 return xritag; 10443 return xritag;
10428 } 10444 }
10429 spin_unlock_irq(&phba->hbalock); 10445 spin_unlock_irq(&phba->hbalock);
10430 10446 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10431 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10432 "2004 Failed to allocate XRI.last XRITAG is %d" 10447 "2004 Failed to allocate XRI.last XRITAG is %d"
10433 " Max XRI is %d, Used XRI is %d\n", 10448 " Max XRI is %d, Used XRI is %d\n",
10434 phba->sli4_hba.next_xri, 10449 phba->sli4_hba.next_xri,
@@ -10492,15 +10507,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10492 lpfc_sli4_mbox_cmd_free(phba, mbox); 10507 lpfc_sli4_mbox_cmd_free(phba, mbox);
10493 return -ENOMEM; 10508 return -ENOMEM;
10494 } 10509 }
10495
10496 /* Get the first SGE entry from the non-embedded DMA memory */ 10510 /* Get the first SGE entry from the non-embedded DMA memory */
10497 if (unlikely(!mbox->sge_array)) {
10498 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10499 "2525 Failed to get the non-embedded SGE "
10500 "virtual address\n");
10501 lpfc_sli4_mbox_cmd_free(phba, mbox);
10502 return -ENOMEM;
10503 }
10504 viraddr = mbox->sge_array->addr[0]; 10511 viraddr = mbox->sge_array->addr[0];
10505 10512
10506 /* Set up the SGL pages in the non-embedded DMA pages */ 10513 /* Set up the SGL pages in the non-embedded DMA pages */
@@ -10524,8 +10531,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10524 sgl_pg_pairs++; 10531 sgl_pg_pairs++;
10525 } 10532 }
10526 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 10533 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10527 pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs; 10534 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
10528 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10529 /* Perform endian conversion if necessary */ 10535 /* Perform endian conversion if necessary */
10530 sgl->word0 = cpu_to_le32(sgl->word0); 10536 sgl->word0 = cpu_to_le32(sgl->word0);
10531 10537
@@ -10607,15 +10613,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
10607 lpfc_sli4_mbox_cmd_free(phba, mbox); 10613 lpfc_sli4_mbox_cmd_free(phba, mbox);
10608 return -ENOMEM; 10614 return -ENOMEM;
10609 } 10615 }
10610
10611 /* Get the first SGE entry from the non-embedded DMA memory */ 10616 /* Get the first SGE entry from the non-embedded DMA memory */
10612 if (unlikely(!mbox->sge_array)) {
10613 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10614 "2565 Failed to get the non-embedded SGE "
10615 "virtual address\n");
10616 lpfc_sli4_mbox_cmd_free(phba, mbox);
10617 return -ENOMEM;
10618 }
10619 viraddr = mbox->sge_array->addr[0]; 10617 viraddr = mbox->sge_array->addr[0];
10620 10618
10621 /* Set up the SGL pages in the non-embedded DMA pages */ 10619 /* Set up the SGL pages in the non-embedded DMA pages */
@@ -10802,6 +10800,105 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
10802} 10800}
10803 10801
10804/** 10802/**
10803 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
10804 * @vport: The vport to work on.
10805 *
10806 * This function updates the receive sequence time stamp for this vport. The
10807 * receive sequence time stamp indicates the time that the last frame of the
10808 * the sequence that has been idle for the longest amount of time was received.
10809 * the driver uses this time stamp to indicate if any received sequences have
10810 * timed out.
10811 **/
10812void
10813lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
10814{
10815 struct lpfc_dmabuf *h_buf;
10816 struct hbq_dmabuf *dmabuf = NULL;
10817
10818 /* get the oldest sequence on the rcv list */
10819 h_buf = list_get_first(&vport->rcv_buffer_list,
10820 struct lpfc_dmabuf, list);
10821 if (!h_buf)
10822 return;
10823 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10824 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
10825}
10826
10827/**
10828 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
10829 * @vport: The vport that the received sequences were sent to.
10830 *
10831 * This function cleans up all outstanding received sequences. This is called
10832 * by the driver when a link event or user action invalidates all the received
10833 * sequences.
10834 **/
10835void
10836lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
10837{
10838 struct lpfc_dmabuf *h_buf, *hnext;
10839 struct lpfc_dmabuf *d_buf, *dnext;
10840 struct hbq_dmabuf *dmabuf = NULL;
10841
10842 /* start with the oldest sequence on the rcv list */
10843 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
10844 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10845 list_del_init(&dmabuf->hbuf.list);
10846 list_for_each_entry_safe(d_buf, dnext,
10847 &dmabuf->dbuf.list, list) {
10848 list_del_init(&d_buf->list);
10849 lpfc_in_buf_free(vport->phba, d_buf);
10850 }
10851 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
10852 }
10853}
10854
10855/**
10856 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
10857 * @vport: The vport that the received sequences were sent to.
10858 *
10859 * This function determines whether any received sequences have timed out by
10860 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
10861 * indicates that there is at least one timed out sequence this routine will
10862 * go through the received sequences one at a time from most inactive to most
10863 * active to determine which ones need to be cleaned up. Once it has determined
10864 * that a sequence needs to be cleaned up it will simply free up the resources
10865 * without sending an abort.
10866 **/
10867void
10868lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
10869{
10870 struct lpfc_dmabuf *h_buf, *hnext;
10871 struct lpfc_dmabuf *d_buf, *dnext;
10872 struct hbq_dmabuf *dmabuf = NULL;
10873 unsigned long timeout;
10874 int abort_count = 0;
10875
10876 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
10877 vport->rcv_buffer_time_stamp);
10878 if (list_empty(&vport->rcv_buffer_list) ||
10879 time_before(jiffies, timeout))
10880 return;
10881 /* start with the oldest sequence on the rcv list */
10882 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
10883 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10884 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
10885 dmabuf->time_stamp);
10886 if (time_before(jiffies, timeout))
10887 break;
10888 abort_count++;
10889 list_del_init(&dmabuf->hbuf.list);
10890 list_for_each_entry_safe(d_buf, dnext,
10891 &dmabuf->dbuf.list, list) {
10892 list_del_init(&d_buf->list);
10893 lpfc_in_buf_free(vport->phba, d_buf);
10894 }
10895 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
10896 }
10897 if (abort_count)
10898 lpfc_update_rcv_time_stamp(vport);
10899}
10900
10901/**
10805 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 10902 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
10806 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 10903 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
10807 * 10904 *
@@ -10823,6 +10920,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10823 struct hbq_dmabuf *seq_dmabuf = NULL; 10920 struct hbq_dmabuf *seq_dmabuf = NULL;
10824 struct hbq_dmabuf *temp_dmabuf = NULL; 10921 struct hbq_dmabuf *temp_dmabuf = NULL;
10825 10922
10923 INIT_LIST_HEAD(&dmabuf->dbuf.list);
10924 dmabuf->time_stamp = jiffies;
10826 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 10925 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10827 /* Use the hdr_buf to find the sequence that this frame belongs to */ 10926 /* Use the hdr_buf to find the sequence that this frame belongs to */
10828 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 10927 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
@@ -10841,13 +10940,21 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10841 * Queue the buffer on the vport's rcv_buffer_list. 10940 * Queue the buffer on the vport's rcv_buffer_list.
10842 */ 10941 */
10843 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 10942 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10943 lpfc_update_rcv_time_stamp(vport);
10844 return dmabuf; 10944 return dmabuf;
10845 } 10945 }
10846 temp_hdr = seq_dmabuf->hbuf.virt; 10946 temp_hdr = seq_dmabuf->hbuf.virt;
10847 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { 10947 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
10848 list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list); 10948 list_del_init(&seq_dmabuf->hbuf.list);
10949 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10950 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
10951 lpfc_update_rcv_time_stamp(vport);
10849 return dmabuf; 10952 return dmabuf;
10850 } 10953 }
10954 /* move this sequence to the tail to indicate a young sequence */
10955 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
10956 seq_dmabuf->time_stamp = jiffies;
10957 lpfc_update_rcv_time_stamp(vport);
10851 /* find the correct place in the sequence to insert this frame */ 10958 /* find the correct place in the sequence to insert this frame */
10852 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 10959 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10853 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 10960 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
@@ -10865,6 +10972,210 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10865} 10972}
10866 10973
10867/** 10974/**
10975 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
10976 * @vport: pointer to a vitural port
10977 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10978 *
10979 * This function tries to abort from the partially assembed sequence, described
10980 * by the information from basic abbort @dmabuf. It checks to see whether such
10981 * partially assembled sequence held by the driver. If so, it shall free up all
10982 * the frames from the partially assembled sequence.
10983 *
10984 * Return
10985 * true -- if there is matching partially assembled sequence present and all
10986 * the frames freed with the sequence;
10987 * false -- if there is no matching partially assembled sequence present so
10988 * nothing got aborted in the lower layer driver
10989 **/
10990static bool
10991lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
10992 struct hbq_dmabuf *dmabuf)
10993{
10994 struct fc_frame_header *new_hdr;
10995 struct fc_frame_header *temp_hdr;
10996 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
10997 struct hbq_dmabuf *seq_dmabuf = NULL;
10998
10999 /* Use the hdr_buf to find the sequence that matches this frame */
11000 INIT_LIST_HEAD(&dmabuf->dbuf.list);
11001 INIT_LIST_HEAD(&dmabuf->hbuf.list);
11002 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11003 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
11004 temp_hdr = (struct fc_frame_header *)h_buf->virt;
11005 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
11006 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
11007 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
11008 continue;
11009 /* found a pending sequence that matches this frame */
11010 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
11011 break;
11012 }
11013
11014 /* Free up all the frames from the partially assembled sequence */
11015 if (seq_dmabuf) {
11016 list_for_each_entry_safe(d_buf, n_buf,
11017 &seq_dmabuf->dbuf.list, list) {
11018 list_del_init(&d_buf->list);
11019 lpfc_in_buf_free(vport->phba, d_buf);
11020 }
11021 return true;
11022 }
11023 return false;
11024}
11025
11026/**
11027 * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler
11028 * @phba: Pointer to HBA context object.
11029 * @cmd_iocbq: pointer to the command iocbq structure.
11030 * @rsp_iocbq: pointer to the response iocbq structure.
11031 *
11032 * This function handles the sequence abort accept iocb command complete
11033 * event. It properly releases the memory allocated to the sequence abort
11034 * accept iocb.
11035 **/
11036static void
11037lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba,
11038 struct lpfc_iocbq *cmd_iocbq,
11039 struct lpfc_iocbq *rsp_iocbq)
11040{
11041 if (cmd_iocbq)
11042 lpfc_sli_release_iocbq(phba, cmd_iocbq);
11043}
11044
11045/**
11046 * lpfc_sli4_seq_abort_acc - Accept sequence abort
11047 * @phba: Pointer to HBA context object.
11048 * @fc_hdr: pointer to a FC frame header.
11049 *
11050 * This function sends a basic accept to a previous unsol sequence abort
11051 * event after aborting the sequence handling.
11052 **/
11053static void
11054lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba,
11055 struct fc_frame_header *fc_hdr)
11056{
11057 struct lpfc_iocbq *ctiocb = NULL;
11058 struct lpfc_nodelist *ndlp;
11059 uint16_t oxid, rxid;
11060 uint32_t sid, fctl;
11061 IOCB_t *icmd;
11062
11063 if (!lpfc_is_link_up(phba))
11064 return;
11065
11066 sid = sli4_sid_from_fc_hdr(fc_hdr);
11067 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
11068 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
11069
11070 ndlp = lpfc_findnode_did(phba->pport, sid);
11071 if (!ndlp) {
11072 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
11073 "1268 Find ndlp returned NULL for oxid:x%x "
11074 "SID:x%x\n", oxid, sid);
11075 return;
11076 }
11077
11078 /* Allocate buffer for acc iocb */
11079 ctiocb = lpfc_sli_get_iocbq(phba);
11080 if (!ctiocb)
11081 return;
11082
11083 /* Extract the F_CTL field from FC_HDR */
11084 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
11085
11086 icmd = &ctiocb->iocb;
11087 icmd->un.xseq64.bdl.bdeSize = 0;
11088 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
11089 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
11090 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
11091 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
11092
11093 /* Fill in the rest of iocb fields */
11094 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
11095 icmd->ulpBdeCount = 0;
11096 icmd->ulpLe = 1;
11097 icmd->ulpClass = CLASS3;
11098 icmd->ulpContext = ndlp->nlp_rpi;
11099
11100 ctiocb->iocb_cmpl = NULL;
11101 ctiocb->vport = phba->pport;
11102 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl;
11103
11104 if (fctl & FC_FC_EX_CTX) {
11105 /* ABTS sent by responder to CT exchange, construction
11106 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
11107 * field and RX_ID from ABTS for RX_ID field.
11108 */
11109 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP);
11110 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid);
11111 ctiocb->sli4_xritag = oxid;
11112 } else {
11113 /* ABTS sent by initiator to CT exchange, construction
11114 * of BA_ACC will need to allocate a new XRI as for the
11115 * XRI_TAG and RX_ID fields.
11116 */
11117 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT);
11118 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI);
11119 ctiocb->sli4_xritag = NO_XRI;
11120 }
11121 bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid);
11122
11123 /* Xmit CT abts accept on exchange <xid> */
11124 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11125 "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n",
11126 CMD_XMIT_BLS_RSP64_CX, phba->link_state);
11127 lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
11128}
11129
11130/**
11131 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
11132 * @vport: Pointer to the vport on which this sequence was received
11133 * @dmabuf: pointer to a dmabuf that describes the FC sequence
11134 *
11135 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
11136 * receive sequence is only partially assembed by the driver, it shall abort
11137 * the partially assembled frames for the sequence. Otherwise, if the
11138 * unsolicited receive sequence has been completely assembled and passed to
11139 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
11140 * unsolicited sequence has been aborted. After that, it will issue a basic
11141 * accept to accept the abort.
11142 **/
11143void
11144lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
11145 struct hbq_dmabuf *dmabuf)
11146{
11147 struct lpfc_hba *phba = vport->phba;
11148 struct fc_frame_header fc_hdr;
11149 uint32_t fctl;
11150 bool abts_par;
11151
11152 /* Make a copy of fc_hdr before the dmabuf being released */
11153 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
11154 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
11155
11156 if (fctl & FC_FC_EX_CTX) {
11157 /*
11158 * ABTS sent by responder to exchange, just free the buffer
11159 */
11160 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11161 } else {
11162 /*
11163 * ABTS sent by initiator to exchange, need to do cleanup
11164 */
11165 /* Try to abort partially assembled seq */
11166 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
11167
11168 /* Send abort to ULP if partially seq abort failed */
11169 if (abts_par == false)
11170 lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
11171 else
11172 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11173 }
11174 /* Send basic accept (BA_ACC) to the abort requester */
11175 lpfc_sli4_seq_abort_acc(phba, &fc_hdr);
11176}
11177
11178/**
10868 * lpfc_seq_complete - Indicates if a sequence is complete 11179 * lpfc_seq_complete - Indicates if a sequence is complete
10869 * @dmabuf: pointer to a dmabuf that describes the FC sequence 11180 * @dmabuf: pointer to a dmabuf that describes the FC sequence
10870 * 11181 *
@@ -10935,10 +11246,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10935 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11246 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10936 /* remove from receive buffer list */ 11247 /* remove from receive buffer list */
10937 list_del_init(&seq_dmabuf->hbuf.list); 11248 list_del_init(&seq_dmabuf->hbuf.list);
11249 lpfc_update_rcv_time_stamp(vport);
10938 /* get the Remote Port's SID */ 11250 /* get the Remote Port's SID */
10939 sid = (fc_hdr->fh_s_id[0] << 16 | 11251 sid = sli4_sid_from_fc_hdr(fc_hdr);
10940 fc_hdr->fh_s_id[1] << 8 |
10941 fc_hdr->fh_s_id[2]);
10942 /* Get an iocbq struct to fill in. */ 11252 /* Get an iocbq struct to fill in. */
10943 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 11253 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
10944 if (first_iocbq) { 11254 if (first_iocbq) {
@@ -10957,7 +11267,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10957 LPFC_DATA_BUF_SIZE; 11267 LPFC_DATA_BUF_SIZE;
10958 first_iocbq->iocb.un.rcvels.remoteID = sid; 11268 first_iocbq->iocb.un.rcvels.remoteID = sid;
10959 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11269 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10960 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); 11270 bf_get(lpfc_rcqe_length,
11271 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
10961 } 11272 }
10962 iocbq = first_iocbq; 11273 iocbq = first_iocbq;
10963 /* 11274 /*
@@ -10975,7 +11286,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10975 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = 11286 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
10976 LPFC_DATA_BUF_SIZE; 11287 LPFC_DATA_BUF_SIZE;
10977 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11288 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10978 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); 11289 bf_get(lpfc_rcqe_length,
11290 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
10979 } else { 11291 } else {
10980 iocbq = lpfc_sli_get_iocbq(vport->phba); 11292 iocbq = lpfc_sli_get_iocbq(vport->phba);
10981 if (!iocbq) { 11293 if (!iocbq) {
@@ -10994,7 +11306,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10994 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 11306 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10995 LPFC_DATA_BUF_SIZE; 11307 LPFC_DATA_BUF_SIZE;
10996 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 11308 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10997 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe); 11309 bf_get(lpfc_rcqe_length,
11310 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
10998 iocbq->iocb.un.rcvels.remoteID = sid; 11311 iocbq->iocb.un.rcvels.remoteID = sid;
10999 list_add_tail(&iocbq->list, &first_iocbq->list); 11312 list_add_tail(&iocbq->list, &first_iocbq->list);
11000 } 11313 }
@@ -11002,6 +11315,43 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
11002 return first_iocbq; 11315 return first_iocbq;
11003} 11316}
11004 11317
11318static void
11319lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
11320 struct hbq_dmabuf *seq_dmabuf)
11321{
11322 struct fc_frame_header *fc_hdr;
11323 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
11324 struct lpfc_hba *phba = vport->phba;
11325
11326 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11327 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
11328 if (!iocbq) {
11329 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11330 "2707 Ring %d handler: Failed to allocate "
11331 "iocb Rctl x%x Type x%x received\n",
11332 LPFC_ELS_RING,
11333 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11334 return;
11335 }
11336 if (!lpfc_complete_unsol_iocb(phba,
11337 &phba->sli.ring[LPFC_ELS_RING],
11338 iocbq, fc_hdr->fh_r_ctl,
11339 fc_hdr->fh_type))
11340 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11341 "2540 Ring %d handler: unexpected Rctl "
11342 "x%x Type x%x received\n",
11343 LPFC_ELS_RING,
11344 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11345
11346 /* Free iocb created in lpfc_prep_seq */
11347 list_for_each_entry_safe(curr_iocb, next_iocb,
11348 &iocbq->list, list) {
11349 list_del_init(&curr_iocb->list);
11350 lpfc_sli_release_iocbq(phba, curr_iocb);
11351 }
11352 lpfc_sli_release_iocbq(phba, iocbq);
11353}
11354
11005/** 11355/**
11006 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 11356 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
11007 * @phba: Pointer to HBA context object. 11357 * @phba: Pointer to HBA context object.
@@ -11014,67 +11364,54 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
11014 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 11364 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
11015 * appropriate receive function when the final frame in a sequence is received. 11365 * appropriate receive function when the final frame in a sequence is received.
11016 **/ 11366 **/
11017int 11367void
11018lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) 11368lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
11369 struct hbq_dmabuf *dmabuf)
11019{ 11370{
11020 LIST_HEAD(cmplq); 11371 struct hbq_dmabuf *seq_dmabuf;
11021 struct hbq_dmabuf *dmabuf, *seq_dmabuf;
11022 struct fc_frame_header *fc_hdr; 11372 struct fc_frame_header *fc_hdr;
11023 struct lpfc_vport *vport; 11373 struct lpfc_vport *vport;
11024 uint32_t fcfi; 11374 uint32_t fcfi;
11025 struct lpfc_iocbq *iocbq;
11026
11027 /* Clear hba flag and get all received buffers into the cmplq */
11028 spin_lock_irq(&phba->hbalock);
11029 phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
11030 list_splice_init(&phba->rb_pend_list, &cmplq);
11031 spin_unlock_irq(&phba->hbalock);
11032 11375
11033 /* Process each received buffer */ 11376 /* Process each received buffer */
11034 while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) { 11377 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11035 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11378 /* check to see if this a valid type of frame */
11036 /* check to see if this a valid type of frame */ 11379 if (lpfc_fc_frame_check(phba, fc_hdr)) {
11037 if (lpfc_fc_frame_check(phba, fc_hdr)) { 11380 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11038 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11381 return;
11039 continue; 11382 }
11040 } 11383 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl);
11041 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe); 11384 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
11042 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 11385 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
11043 if (!vport) { 11386 /* throw out the frame */
11044 /* throw out the frame */ 11387 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11045 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11388 return;
11046 continue; 11389 }
11047 } 11390 /* Handle the basic abort sequence (BA_ABTS) event */
11048 /* Link this frame */ 11391 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
11049 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 11392 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
11050 if (!seq_dmabuf) { 11393 return;
11051 /* unable to add frame to vport - throw it out */ 11394 }
11052 lpfc_in_buf_free(phba, &dmabuf->dbuf); 11395
11053 continue; 11396 /* Link this frame */
11054 } 11397 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
11055 /* If not last frame in sequence continue processing frames. */ 11398 if (!seq_dmabuf) {
11056 if (!lpfc_seq_complete(seq_dmabuf)) { 11399 /* unable to add frame to vport - throw it out */
11057 /* 11400 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11058 * When saving off frames post a new one and mark this 11401 return;
11059 * frame to be freed when it is finished. 11402 }
11060 **/ 11403 /* If not last frame in sequence continue processing frames. */
11061 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); 11404 if (!lpfc_seq_complete(seq_dmabuf)) {
11062 dmabuf->tag = -1; 11405 /*
11063 continue; 11406 * When saving off frames post a new one and mark this
11064 } 11407 * frame to be freed when it is finished.
11065 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 11408 **/
11066 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 11409 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
11067 if (!lpfc_complete_unsol_iocb(phba, 11410 dmabuf->tag = -1;
11068 &phba->sli.ring[LPFC_ELS_RING], 11411 return;
11069 iocbq, fc_hdr->fh_r_ctl, 11412 }
11070 fc_hdr->fh_type)) 11413 /* Send the complete sequence to the upper layer protocol */
11071 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11414 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
11072 "2540 Ring %d handler: unexpected Rctl "
11073 "x%x Type x%x received\n",
11074 LPFC_ELS_RING,
11075 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11076 };
11077 return 0;
11078} 11415}
11079 11416
11080/** 11417/**
@@ -11334,6 +11671,7 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11334{ 11671{
11335 LPFC_MBOXQ_t *mboxq; 11672 LPFC_MBOXQ_t *mboxq;
11336 int rc = 0; 11673 int rc = 0;
11674 int retval = MBX_SUCCESS;
11337 uint32_t mbox_tmo; 11675 uint32_t mbox_tmo;
11338 11676
11339 if (vpi == 0) 11677 if (vpi == 0)
@@ -11344,16 +11682,17 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11344 lpfc_init_vpi(phba, mboxq, vpi); 11682 lpfc_init_vpi(phba, mboxq, vpi);
11345 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); 11683 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
11346 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11684 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11347 if (rc != MBX_TIMEOUT)
11348 mempool_free(mboxq, phba->mbox_mem_pool);
11349 if (rc != MBX_SUCCESS) { 11685 if (rc != MBX_SUCCESS) {
11350 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11686 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11351 "2022 INIT VPI Mailbox failed " 11687 "2022 INIT VPI Mailbox failed "
11352 "status %d, mbxStatus x%x\n", rc, 11688 "status %d, mbxStatus x%x\n", rc,
11353 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 11689 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11354 rc = -EIO; 11690 retval = -EIO;
11355 } 11691 }
11356 return rc; 11692 if (rc != MBX_TIMEOUT)
11693 mempool_free(mboxq, phba->mbox_mem_pool);
11694
11695 return retval;
11357} 11696}
11358 11697
11359/** 11698/**
@@ -11438,13 +11777,6 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
11438 */ 11777 */
11439 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 11778 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11440 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 11779 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11441 if (unlikely(!mboxq->sge_array)) {
11442 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11443 "2526 Failed to get the non-embedded SGE "
11444 "virtual address\n");
11445 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11446 return -ENOMEM;
11447 }
11448 virt_addr = mboxq->sge_array->addr[0]; 11780 virt_addr = mboxq->sge_array->addr[0];
11449 /* 11781 /*
11450 * Configure the FCF record for FCFI 0. This is the driver's 11782 * Configure the FCF record for FCFI 0. This is the driver's
@@ -11542,7 +11874,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11874 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11543 "2000 Failed to allocate mbox for " 11875 "2000 Failed to allocate mbox for "
11544 "READ_FCF cmd\n"); 11876 "READ_FCF cmd\n");
11545 return -ENOMEM; 11877 error = -ENOMEM;
11878 goto fail_fcfscan;
11546 } 11879 }
11547 11880
11548 req_len = sizeof(struct fcf_record) + 11881 req_len = sizeof(struct fcf_record) +
@@ -11558,8 +11891,8 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11558 "0291 Allocated DMA memory size (x%x) is " 11891 "0291 Allocated DMA memory size (x%x) is "
11559 "less than the requested DMA memory " 11892 "less than the requested DMA memory "
11560 "size (x%x)\n", alloc_len, req_len); 11893 "size (x%x)\n", alloc_len, req_len);
11561 lpfc_sli4_mbox_cmd_free(phba, mboxq); 11894 error = -ENOMEM;
11562 return -ENOMEM; 11895 goto fail_fcfscan;
11563 } 11896 }
11564 11897
11565 /* Get the first SGE entry from the non-embedded DMA memory. This 11898 /* Get the first SGE entry from the non-embedded DMA memory. This
@@ -11567,13 +11900,6 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11567 */ 11900 */
11568 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 11901 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11569 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 11902 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11570 if (unlikely(!mboxq->sge_array)) {
11571 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11572 "2527 Failed to get the non-embedded SGE "
11573 "virtual address\n");
11574 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11575 return -ENOMEM;
11576 }
11577 virt_addr = mboxq->sge_array->addr[0]; 11903 virt_addr = mboxq->sge_array->addr[0];
11578 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; 11904 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
11579 11905
@@ -11586,7 +11912,6 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11586 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; 11912 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
11587 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 11913 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11588 if (rc == MBX_NOT_FINISHED) { 11914 if (rc == MBX_NOT_FINISHED) {
11589 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11590 error = -EIO; 11915 error = -EIO;
11591 } else { 11916 } else {
11592 spin_lock_irq(&phba->hbalock); 11917 spin_lock_irq(&phba->hbalock);
@@ -11594,6 +11919,15 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11594 spin_unlock_irq(&phba->hbalock); 11919 spin_unlock_irq(&phba->hbalock);
11595 error = 0; 11920 error = 0;
11596 } 11921 }
11922fail_fcfscan:
11923 if (error) {
11924 if (mboxq)
11925 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11926 /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */
11927 spin_lock_irq(&phba->hbalock);
11928 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
11929 spin_unlock_irq(&phba->hbalock);
11930 }
11597 return error; 11931 return error;
11598} 11932}
11599 11933
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 3c53316cf6d0..ba38de3c28f1 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -29,14 +29,17 @@ typedef enum _lpfc_ctx_cmd {
29 LPFC_CTX_HOST 29 LPFC_CTX_HOST
30} lpfc_ctx_cmd; 30} lpfc_ctx_cmd;
31 31
32/* This structure is used to carry the needed response IOCB states */ 32struct lpfc_cq_event {
33struct lpfc_sli4_rspiocb_info { 33 struct list_head list;
34 uint8_t hw_status; 34 union {
35 uint8_t bfield; 35 struct lpfc_mcqe mcqe_cmpl;
36#define LPFC_XB 0x1 36 struct lpfc_acqe_link acqe_link;
37#define LPFC_PV 0x2 37 struct lpfc_acqe_fcoe acqe_fcoe;
38 uint8_t priority; 38 struct lpfc_acqe_dcbx acqe_dcbx;
39 uint8_t reserved; 39 struct lpfc_rcqe rcqe_cmpl;
40 struct sli4_wcqe_xri_aborted wcqe_axri;
41 struct lpfc_wcqe_complete wcqe_cmpl;
42 } cqe;
40}; 43};
41 44
42/* This structure is used to handle IOCB requests / responses */ 45/* This structure is used to handle IOCB requests / responses */
@@ -46,6 +49,7 @@ struct lpfc_iocbq {
46 struct list_head clist; 49 struct list_head clist;
47 uint16_t iotag; /* pre-assigned IO tag */ 50 uint16_t iotag; /* pre-assigned IO tag */
48 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 51 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
52 struct lpfc_cq_event cq_event;
49 53
50 IOCB_t iocb; /* IOCB cmd */ 54 IOCB_t iocb; /* IOCB cmd */
51 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 55 uint8_t retry; /* retry counter for IOCB cmd - if needed */
@@ -56,11 +60,13 @@ struct lpfc_iocbq {
56#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ 60#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
57#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ 61#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
58#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ 62#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
59#define LPFC_FIP_ELS 0x40 63#define LPFC_FIP_ELS_ID_MASK 0xc0 /* ELS_ID range 0-3 */
64#define LPFC_FIP_ELS_ID_SHIFT 6
60 65
61 uint8_t abort_count; 66 uint8_t abort_count;
62 uint8_t rsvd2; 67 uint8_t rsvd2;
63 uint32_t drvrTimeout; /* driver timeout in seconds */ 68 uint32_t drvrTimeout; /* driver timeout in seconds */
69 uint32_t fcp_wqidx; /* index to FCP work queue */
64 struct lpfc_vport *vport;/* virtual port pointer */ 70 struct lpfc_vport *vport;/* virtual port pointer */
65 void *context1; /* caller context information */ 71 void *context1; /* caller context information */
66 void *context2; /* caller context information */ 72 void *context2; /* caller context information */
@@ -76,7 +82,6 @@ struct lpfc_iocbq {
76 struct lpfc_iocbq *); 82 struct lpfc_iocbq *);
77 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 83 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
78 struct lpfc_iocbq *); 84 struct lpfc_iocbq *);
79 struct lpfc_sli4_rspiocb_info sli4_info;
80}; 85};
81 86
82#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ 87#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@@ -110,7 +115,7 @@ typedef struct lpfcMboxq {
110 return */ 115 return */
111#define MBX_NOWAIT 2 /* issue command then return immediately */ 116#define MBX_NOWAIT 2 /* issue command then return immediately */
112 117
113#define LPFC_MAX_RING_MASK 4 /* max num of rctl/type masks allowed per 118#define LPFC_MAX_RING_MASK 5 /* max num of rctl/type masks allowed per
114 ring */ 119 ring */
115#define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */ 120#define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */
116 121
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index b5f4ba1a5c27..25d66d070cf8 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -58,6 +58,16 @@
58#define LPFC_FCOE_FKA_ADV_PER 0 58#define LPFC_FCOE_FKA_ADV_PER 0
59#define LPFC_FCOE_FIP_PRIORITY 0x80 59#define LPFC_FCOE_FIP_PRIORITY 0x80
60 60
61#define sli4_sid_from_fc_hdr(fc_hdr) \
62 ((fc_hdr)->fh_s_id[0] << 16 | \
63 (fc_hdr)->fh_s_id[1] << 8 | \
64 (fc_hdr)->fh_s_id[2])
65
66#define sli4_fctl_from_fc_hdr(fc_hdr) \
67 ((fc_hdr)->fh_f_ctl[0] << 16 | \
68 (fc_hdr)->fh_f_ctl[1] << 8 | \
69 (fc_hdr)->fh_f_ctl[2])
70
61enum lpfc_sli4_queue_type { 71enum lpfc_sli4_queue_type {
62 LPFC_EQ, 72 LPFC_EQ,
63 LPFC_GCQ, 73 LPFC_GCQ,
@@ -110,18 +120,6 @@ struct lpfc_queue {
110 union sli4_qe qe[1]; /* array to index entries (must be last) */ 120 union sli4_qe qe[1]; /* array to index entries (must be last) */
111}; 121};
112 122
113struct lpfc_cq_event {
114 struct list_head list;
115 union {
116 struct lpfc_mcqe mcqe_cmpl;
117 struct lpfc_acqe_link acqe_link;
118 struct lpfc_acqe_fcoe acqe_fcoe;
119 struct lpfc_acqe_dcbx acqe_dcbx;
120 struct lpfc_rcqe rcqe_cmpl;
121 struct sli4_wcqe_xri_aborted wcqe_axri;
122 } cqe;
123};
124
125struct lpfc_sli4_link { 123struct lpfc_sli4_link {
126 uint8_t speed; 124 uint8_t speed;
127 uint8_t duplex; 125 uint8_t duplex;
@@ -166,7 +164,7 @@ struct lpfc_fip_param_hdr {
166#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6 164#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
167#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3 165#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
168#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags 166#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
169#define FIPP_MODE_ON 0x2 167#define FIPP_MODE_ON 0x1
170#define FIPP_MODE_OFF 0x0 168#define FIPP_MODE_OFF 0x0
171#define FIPP_VLAN_VALID 0x1 169#define FIPP_VLAN_VALID 0x1
172}; 170};
@@ -295,9 +293,8 @@ struct lpfc_sli4_hba {
295 /* BAR0 PCI config space register memory map */ 293 /* BAR0 PCI config space register memory map */
296 void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */ 294 void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
297 void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */ 295 void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
298 void __iomem *ONLINE0regaddr; /* Address to components of internal UE */ 296 void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */
299 void __iomem *ONLINE1regaddr; /* Address to components of internal UE */ 297 void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */
300#define LPFC_ONLINE_NERR 0xFFFFFFFF
301 void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */ 298 void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
302 /* BAR1 FCoE function CSR register memory map */ 299 /* BAR1 FCoE function CSR register memory map */
303 void __iomem *STAregaddr; /* Address to HST_STATE register */ 300 void __iomem *STAregaddr; /* Address to HST_STATE register */
@@ -311,6 +308,8 @@ struct lpfc_sli4_hba {
311 void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */ 308 void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */
312 void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */ 309 void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */
313 310
311 uint32_t ue_mask_lo;
312 uint32_t ue_mask_hi;
314 struct msix_entry *msix_entries; 313 struct msix_entry *msix_entries;
315 uint32_t cfg_eqn; 314 uint32_t cfg_eqn;
316 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ 315 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
@@ -325,7 +324,6 @@ struct lpfc_sli4_hba {
325 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ 324 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
326 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ 325 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
327 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ 326 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
328 struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
329 327
330 /* Setup information for various queue parameters */ 328 /* Setup information for various queue parameters */
331 int eq_esize; 329 int eq_esize;
@@ -360,7 +358,7 @@ struct lpfc_sli4_hba {
360 unsigned long *rpi_bmask; 358 unsigned long *rpi_bmask;
361 uint16_t rpi_count; 359 uint16_t rpi_count;
362 struct lpfc_sli4_flags sli4_flags; 360 struct lpfc_sli4_flags sli4_flags;
363 struct list_head sp_rspiocb_work_queue; 361 struct list_head sp_queue_event;
364 struct list_head sp_cqe_event_pool; 362 struct list_head sp_cqe_event_pool;
365 struct list_head sp_asynce_work_queue; 363 struct list_head sp_asynce_work_queue;
366 struct list_head sp_fcp_xri_aborted_work_queue; 364 struct list_head sp_fcp_xri_aborted_work_queue;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 9ae20af4bdb7..c7f3aed2aab8 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,8 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.4" 21#define LPFC_DRIVER_VERSION "8.3.6"
22
23#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
25#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 606efa767548..7d6dd83d3592 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -389,7 +389,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
389 * by the port. 389 * by the port.
390 */ 390 */
391 if ((phba->sli_rev == LPFC_SLI_REV4) && 391 if ((phba->sli_rev == LPFC_SLI_REV4) &&
392 (pport->vfi_state & LPFC_VFI_REGISTERED)) { 392 (pport->vpi_state & LPFC_VPI_REGISTERED)) {
393 rc = lpfc_sli4_init_vpi(phba, vpi); 393 rc = lpfc_sli4_init_vpi(phba, vpi);
394 if (rc) { 394 if (rc) {
395 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 395 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
@@ -700,6 +700,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
700 } 700 }
701 spin_unlock_irq(&phba->ndlp_lock); 701 spin_unlock_irq(&phba->ndlp_lock);
702 } 702 }
703 if (vport->vpi_state != LPFC_VPI_REGISTERED)
704 goto skip_logo;
703 vport->unreg_vpi_cmpl = VPORT_INVAL; 705 vport->unreg_vpi_cmpl = VPORT_INVAL;
704 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 706 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
705 if (!lpfc_issue_els_npiv_logo(vport, ndlp)) 707 if (!lpfc_issue_els_npiv_logo(vport, ndlp))