aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c661
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c264
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
16 files changed, 610 insertions, 448 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 540569849099..1877d9811831 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -457,10 +457,6 @@ struct lpfc_hba {
457 void (*lpfc_scsi_prep_cmnd) 457 void (*lpfc_scsi_prep_cmnd)
458 (struct lpfc_vport *, struct lpfc_scsi_buf *, 458 (struct lpfc_vport *, struct lpfc_scsi_buf *,
459 struct lpfc_nodelist *); 459 struct lpfc_nodelist *);
460 int (*lpfc_scsi_prep_task_mgmt_cmd)
461 (struct lpfc_vport *, struct lpfc_scsi_buf *,
462 unsigned int, uint8_t);
463
464 /* IOCB interface function jump table entries */ 460 /* IOCB interface function jump table entries */
465 int (*__lpfc_sli_issue_iocb) 461 int (*__lpfc_sli_issue_iocb)
466 (struct lpfc_hba *, uint32_t, 462 (struct lpfc_hba *, uint32_t,
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d73e677201f8..fc07be5fbce9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3113,6 +3113,9 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
3113 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3113 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3114 struct lpfc_hba *phba = vport->phba; 3114 struct lpfc_hba *phba = vport->phba;
3115 3115
3116 if (phba->sli_rev >= LPFC_SLI_REV4)
3117 return -EPERM;
3118
3116 if ((off + count) > FF_REG_AREA_SIZE) 3119 if ((off + count) > FF_REG_AREA_SIZE)
3117 return -ERANGE; 3120 return -ERANGE;
3118 3121
@@ -3163,6 +3166,9 @@ sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3163 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3166 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3164 struct lpfc_hba *phba = vport->phba; 3167 struct lpfc_hba *phba = vport->phba;
3165 3168
3169 if (phba->sli_rev >= LPFC_SLI_REV4)
3170 return -EPERM;
3171
3166 if (off > FF_REG_AREA_SIZE) 3172 if (off > FF_REG_AREA_SIZE)
3167 return -ERANGE; 3173 return -ERANGE;
3168 3174
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 1dbccfd3d022..0e532f072eb3 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1732,7 +1732,9 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1732 uint32_t *ptr, str[4]; 1732 uint32_t *ptr, str[4];
1733 uint8_t *fwname; 1733 uint8_t *fwname;
1734 1734
1735 if (vp->rev.rBit) { 1735 if (phba->sli_rev == LPFC_SLI_REV4)
1736 sprintf(fwrevision, "%s", vp->rev.opFwName);
1737 else if (vp->rev.rBit) {
1736 if (psli->sli_flag & LPFC_SLI_ACTIVE) 1738 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1737 rev = vp->rev.sli2FwRev; 1739 rev = vp->rev.sli2FwRev;
1738 else 1740 else
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 6bdeb14878a2..f72fdf23bf1b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -168,6 +168,19 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
168 if (elsiocb == NULL) 168 if (elsiocb == NULL)
169 return NULL; 169 return NULL;
170 170
171 /*
172 * If this command is for fabric controller and HBA running
173 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
174 */
175 if ((did == Fabric_DID) &&
176 bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags) &&
177 ((elscmd == ELS_CMD_FLOGI) ||
178 (elscmd == ELS_CMD_FDISC) ||
179 (elscmd == ELS_CMD_LOGO)))
180 elsiocb->iocb_flag |= LPFC_FIP_ELS;
181 else
182 elsiocb->iocb_flag &= ~LPFC_FIP_ELS;
183
171 icmd = &elsiocb->iocb; 184 icmd = &elsiocb->iocb;
172 185
173 /* fill in BDEs for command */ 186 /* fill in BDEs for command */
@@ -6108,9 +6121,17 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6108 icmd->un.elsreq64.myID = 0; 6121 icmd->un.elsreq64.myID = 0;
6109 icmd->un.elsreq64.fl = 1; 6122 icmd->un.elsreq64.fl = 1;
6110 6123
6111 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */ 6124 if (phba->sli_rev == LPFC_SLI_REV4) {
6112 icmd->ulpCt_h = 1; 6125 /* FDISC needs to be 1 for WQE VPI */
6113 icmd->ulpCt_l = 0; 6126 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
6127 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
6128 /* Set the ulpContext to the vpi */
6129 elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
6130 } else {
6131 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
6132 icmd->ulpCt_h = 1;
6133 icmd->ulpCt_l = 0;
6134 }
6114 6135
6115 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6136 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6116 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 6137 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 35c41ae75be2..ed46b24a3380 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1197,6 +1197,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1197{ 1197{
1198 struct lpfc_fcf_conn_entry *conn_entry; 1198 struct lpfc_fcf_conn_entry *conn_entry;
1199 1199
1200 /* If FCF not available return 0 */
1201 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1202 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record))
1203 return 0;
1204
1200 if (!phba->cfg_enable_fip) { 1205 if (!phba->cfg_enable_fip) {
1201 *boot_flag = 0; 1206 *boot_flag = 0;
1202 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1207 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
@@ -1216,6 +1221,14 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1216 *boot_flag = 0; 1221 *boot_flag = 0;
1217 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1222 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1218 new_fcf_record); 1223 new_fcf_record);
1224
1225 /*
1226 * When there are no FCF connect entries, use driver's default
1227 * addressing mode - FPMA.
1228 */
1229 if (*addr_mode & LPFC_FCF_FPMA)
1230 *addr_mode = LPFC_FCF_FPMA;
1231
1219 *vlan_id = 0xFFFF; 1232 *vlan_id = 0xFFFF;
1220 return 1; 1233 return 1;
1221 } 1234 }
@@ -1241,6 +1254,14 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1241 } 1254 }
1242 1255
1243 /* 1256 /*
1257 * If connection record does not support any addressing mode,
1258 * skip the FCF record.
1259 */
1260 if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1261 & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1262 continue;
1263
1264 /*
1244 * Check if the connection record specifies a required 1265 * Check if the connection record specifies a required
1245 * addressing mode. 1266 * addressing mode.
1246 */ 1267 */
@@ -1272,6 +1293,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1272 else 1293 else
1273 *boot_flag = 0; 1294 *boot_flag = 0;
1274 1295
1296 /*
1297 * If user did not specify any addressing mode, or if the
1298 * prefered addressing mode specified by user is not supported
1299 * by FCF, allow fabric to pick the addressing mode.
1300 */
1275 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1301 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1276 new_fcf_record); 1302 new_fcf_record);
1277 /* 1303 /*
@@ -1297,12 +1323,6 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1297 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 1323 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1298 (*addr_mode & LPFC_FCF_FPMA)) 1324 (*addr_mode & LPFC_FCF_FPMA))
1299 *addr_mode = LPFC_FCF_FPMA; 1325 *addr_mode = LPFC_FCF_FPMA;
1300 /*
1301 * If user did not specify any addressing mode, use FPMA if
1302 * possible else use SPMA.
1303 */
1304 else if (*addr_mode & LPFC_FCF_FPMA)
1305 *addr_mode = LPFC_FCF_FPMA;
1306 1326
1307 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) 1327 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1308 *vlan_id = conn_entry->conn_rec.vlan_tag; 1328 *vlan_id = conn_entry->conn_rec.vlan_tag;
@@ -1864,7 +1884,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1864 vport->fc_flag &= ~FC_BYPASSED_MODE; 1884 vport->fc_flag &= ~FC_BYPASSED_MODE;
1865 spin_unlock_irq(shost->host_lock); 1885 spin_unlock_irq(shost->host_lock);
1866 1886
1867 if (((phba->fc_eventTag + 1) < la->eventTag) || 1887 if ((phba->fc_eventTag < la->eventTag) ||
1868 (phba->fc_eventTag == la->eventTag)) { 1888 (phba->fc_eventTag == la->eventTag)) {
1869 phba->fc_stat.LinkMultiEvent++; 1889 phba->fc_stat.LinkMultiEvent++;
1870 if (la->attType == AT_LINK_UP) 1890 if (la->attType == AT_LINK_UP)
@@ -2925,6 +2945,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2925 lpfc_no_rpi(phba, ndlp); 2945 lpfc_no_rpi(phba, ndlp);
2926 ndlp->nlp_rpi = 0; 2946 ndlp->nlp_rpi = 0;
2927 ndlp->nlp_flag &= ~NLP_RPI_VALID; 2947 ndlp->nlp_flag &= ~NLP_RPI_VALID;
2948 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2928 return 1; 2949 return 1;
2929 } 2950 }
2930 return 0; 2951 return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 02aa016b93e9..8a3a026667e4 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1183,7 +1183,6 @@ typedef struct {
1183#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 1183#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
1184#define PCI_VENDOR_ID_SERVERENGINE 0x19a2 1184#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
1185#define PCI_DEVICE_ID_TIGERSHARK 0x0704 1185#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1186#define PCI_DEVICE_ID_TIGERSHARK_S 0x0705
1187 1186
1188#define JEDEC_ID_ADDRESS 0x0080001c 1187#define JEDEC_ID_ADDRESS 0x0080001c
1189#define FIREFLY_JEDEC_ID 0x1ACC 1188#define FIREFLY_JEDEC_ID 0x1ACC
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 39c34b3ad29d..2995d128f07f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -422,9 +422,9 @@ struct lpfc_wqe_generic{
422#define lpfc_wqe_gen_pri_WORD word10 422#define lpfc_wqe_gen_pri_WORD word10
423 uint32_t word11; 423 uint32_t word11;
424#define lpfc_wqe_gen_cq_id_SHIFT 16 424#define lpfc_wqe_gen_cq_id_SHIFT 16
425#define lpfc_wqe_gen_cq_id_MASK 0x000003FF 425#define lpfc_wqe_gen_cq_id_MASK 0x0000FFFF
426#define lpfc_wqe_gen_cq_id_WORD word11 426#define lpfc_wqe_gen_cq_id_WORD word11
427#define LPFC_WQE_CQ_ID_DEFAULT 0x3ff 427#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
428#define lpfc_wqe_gen_wqec_SHIFT 7 428#define lpfc_wqe_gen_wqec_SHIFT 7
429#define lpfc_wqe_gen_wqec_MASK 0x00000001 429#define lpfc_wqe_gen_wqec_MASK 0x00000001
430#define lpfc_wqe_gen_wqec_WORD word11 430#define lpfc_wqe_gen_wqec_WORD word11
@@ -1128,7 +1128,7 @@ struct fcf_record {
1128#define lpfc_fcf_record_mac_5_WORD word4 1128#define lpfc_fcf_record_mac_5_WORD word4
1129#define lpfc_fcf_record_fcf_avail_SHIFT 16 1129#define lpfc_fcf_record_fcf_avail_SHIFT 16
1130#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF 1130#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF
1131#define lpfc_fcf_record_fc_avail_WORD word4 1131#define lpfc_fcf_record_fcf_avail_WORD word4
1132#define lpfc_fcf_record_mac_addr_prov_SHIFT 24 1132#define lpfc_fcf_record_mac_addr_prov_SHIFT 24
1133#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF 1133#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF
1134#define lpfc_fcf_record_mac_addr_prov_WORD word4 1134#define lpfc_fcf_record_mac_addr_prov_WORD word4
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2f5907f92eea..fc67cc65c63b 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -428,7 +428,8 @@ lpfc_config_port_post(struct lpfc_hba *phba)
428 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 428 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
429 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 429 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
430 phba->cfg_hba_queue_depth = 430 phba->cfg_hba_queue_depth =
431 mb->un.varRdConfig.max_xri + 1; 431 (mb->un.varRdConfig.max_xri + 1) -
432 lpfc_sli4_get_els_iocb_cnt(phba);
432 433
433 phba->lmt = mb->un.varRdConfig.lmt; 434 phba->lmt = mb->un.varRdConfig.lmt;
434 435
@@ -1646,10 +1647,6 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1646 oneConnect = 1; 1647 oneConnect = 1;
1647 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; 1648 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
1648 break; 1649 break;
1649 case PCI_DEVICE_ID_TIGERSHARK_S:
1650 oneConnect = 1;
1651 m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
1652 break;
1653 default: 1650 default:
1654 m = (typeof(m)){ NULL }; 1651 m = (typeof(m)){ NULL };
1655 break; 1652 break;
@@ -3543,6 +3540,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3543 3540
3544 /* Free the allocated rpi headers. */ 3541 /* Free the allocated rpi headers. */
3545 lpfc_sli4_remove_rpi_hdrs(phba); 3542 lpfc_sli4_remove_rpi_hdrs(phba);
3543 lpfc_sli4_remove_rpis(phba);
3546 3544
3547 /* Free the ELS sgl list */ 3545 /* Free the ELS sgl list */
3548 lpfc_free_active_sgl(phba); 3546 lpfc_free_active_sgl(phba);
@@ -7184,16 +7182,19 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7184{ 7182{
7185 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 7183 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7186 7184
7187 if (max_xri <= 100) 7185 if (phba->sli_rev == LPFC_SLI_REV4) {
7188 return 4; 7186 if (max_xri <= 100)
7189 else if (max_xri <= 256) 7187 return 4;
7190 return 8; 7188 else if (max_xri <= 256)
7191 else if (max_xri <= 512) 7189 return 8;
7192 return 16; 7190 else if (max_xri <= 512)
7193 else if (max_xri <= 1024) 7191 return 16;
7194 return 32; 7192 else if (max_xri <= 1024)
7195 else 7193 return 32;
7196 return 48; 7194 else
7195 return 48;
7196 } else
7197 return 0;
7197} 7198}
7198 7199
7199/** 7200/**
@@ -7642,7 +7643,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7642 7643
7643 switch (dev_id) { 7644 switch (dev_id) {
7644 case PCI_DEVICE_ID_TIGERSHARK: 7645 case PCI_DEVICE_ID_TIGERSHARK:
7645 case PCI_DEVICE_ID_TIGERSHARK_S:
7646 rc = lpfc_pci_probe_one_s4(pdev, pid); 7646 rc = lpfc_pci_probe_one_s4(pdev, pid);
7647 break; 7647 break;
7648 default: 7648 default:
@@ -7941,8 +7941,6 @@ static struct pci_device_id lpfc_id_table[] = {
7941 PCI_ANY_ID, PCI_ANY_ID, }, 7941 PCI_ANY_ID, PCI_ANY_ID, },
7942 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 7942 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7943 PCI_ANY_ID, PCI_ANY_ID, }, 7943 PCI_ANY_ID, PCI_ANY_ID, },
7944 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
7945 PCI_ANY_ID, PCI_ANY_ID, },
7946 { 0 } 7944 { 0 }
7947}; 7945};
7948 7946
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index b9b451c09010..3423571dd1b3 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1631,6 +1631,7 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1631 /* In case of malloc fails, proceed with whatever we have */ 1631 /* In case of malloc fails, proceed with whatever we have */
1632 if (!viraddr) 1632 if (!viraddr)
1633 break; 1633 break;
1634 memset(viraddr, 0, PAGE_SIZE);
1634 mbox->sge_array->addr[pagen] = viraddr; 1635 mbox->sge_array->addr[pagen] = viraddr;
1635 /* Keep the first page for later sub-header construction */ 1636 /* Keep the first page for later sub-header construction */
1636 if (pagen == 0) 1637 if (pagen == 0)
@@ -1715,8 +1716,10 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
1715 /* Set up host requested features. */ 1716 /* Set up host requested features. */
1716 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); 1717 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
1717 1718
1718 /* Virtual fabrics and FIPs are not supported yet. */ 1719 if (phba->cfg_enable_fip)
1719 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0); 1720 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
1721 else
1722 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 1);
1720 1723
1721 /* Enable DIF (block guard) only if configured to do so. */ 1724 /* Enable DIF (block guard) only if configured to do so. */
1722 if (phba->cfg_enable_bg) 1725 if (phba->cfg_enable_bg)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 09f659f77bb3..3e74136f1ede 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -497,7 +497,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
497 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 497 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
498 else 498 else
499 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 499 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
500 if ((ndlp->nlp_type & NLP_FABRIC) && 500 if ((ndlp->nlp_DID == Fabric_DID) &&
501 vport->port_type == LPFC_NPIV_PORT) { 501 vport->port_type == LPFC_NPIV_PORT) {
502 lpfc_linkdown_port(vport); 502 lpfc_linkdown_port(vport);
503 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 503 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 7991ba1980ae..da59c4f0168f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -116,6 +116,27 @@ lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
116} 116}
117 117
118/** 118/**
119 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
120 * @phba: Pointer to HBA object.
121 * @lpfc_cmd: lpfc scsi command object pointer.
122 *
123 * This function is called from the lpfc_prep_task_mgmt_cmd function to
124 * set the last bit in the response sge entry.
125 **/
126static void
127lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
128 struct lpfc_scsi_buf *lpfc_cmd)
129{
130 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
131 if (sgl) {
132 sgl += 1;
133 sgl->word2 = le32_to_cpu(sgl->word2);
134 bf_set(lpfc_sli4_sge_last, sgl, 1);
135 sgl->word2 = cpu_to_le32(sgl->word2);
136 }
137}
138
139/**
119 * lpfc_update_stats - Update statistical data for the command completion 140 * lpfc_update_stats - Update statistical data for the command completion
120 * @phba: Pointer to HBA object. 141 * @phba: Pointer to HBA object.
121 * @lpfc_cmd: lpfc scsi command object pointer. 142 * @lpfc_cmd: lpfc scsi command object pointer.
@@ -1978,7 +1999,7 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1978} 1999}
1979 2000
1980/** 2001/**
1981 * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev 2002 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
1982 * @phba: The HBA for which this call is being executed. 2003 * @phba: The HBA for which this call is being executed.
1983 * @psb: The scsi buffer which is going to be un-mapped. 2004 * @psb: The scsi buffer which is going to be un-mapped.
1984 * 2005 *
@@ -1986,7 +2007,7 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1986 * field of @lpfc_cmd for device with SLI-3 interface spec. 2007 * field of @lpfc_cmd for device with SLI-3 interface spec.
1987 **/ 2008 **/
1988static void 2009static void
1989lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 2010lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1990{ 2011{
1991 /* 2012 /*
1992 * There are only two special cases to consider. (1) the scsi command 2013 * There are only two special cases to consider. (1) the scsi command
@@ -2003,36 +2024,6 @@ lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2003} 2024}
2004 2025
2005/** 2026/**
2006 * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev
2007 * @phba: The Hba for which this call is being executed.
2008 * @psb: The scsi buffer which is going to be un-mapped.
2009 *
2010 * This routine does DMA un-mapping of scatter gather list of scsi command
2011 * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to
2012 * remove the sgl for this scsi buffer then we will do it here. For now
2013 * we should be able to just call the sli3 unprep routine.
2014 **/
2015static void
2016lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2017{
2018 lpfc_scsi_unprep_dma_buf_s3(phba, psb);
2019}
2020
2021/**
2022 * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list
2023 * @phba: The Hba for which this call is being executed.
2024 * @psb: The scsi buffer which is going to be un-mapped.
2025 *
2026 * This routine does DMA un-mapping of scatter gather list of scsi command
2027 * field of @lpfc_cmd for device with SLI-4 interface spec.
2028 **/
2029static void
2030lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2031{
2032 phba->lpfc_scsi_unprep_dma_buf(phba, psb);
2033}
2034
2035/**
2036 * lpfc_handler_fcp_err - FCP response handler 2027 * lpfc_handler_fcp_err - FCP response handler
2037 * @vport: The virtual port for which this call is being executed. 2028 * @vport: The virtual port for which this call is being executed.
2038 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2029 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
@@ -2461,7 +2452,7 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
2461} 2452}
2462 2453
2463/** 2454/**
2464 * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev 2455 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
2465 * @vport: The virtual port for which this call is being executed. 2456 * @vport: The virtual port for which this call is being executed.
2466 * @lpfc_cmd: The scsi command which needs to send. 2457 * @lpfc_cmd: The scsi command which needs to send.
2467 * @pnode: Pointer to lpfc_nodelist. 2458 * @pnode: Pointer to lpfc_nodelist.
@@ -2470,7 +2461,7 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
2470 * to transfer for device with SLI3 interface spec. 2461 * to transfer for device with SLI3 interface spec.
2471 **/ 2462 **/
2472static void 2463static void
2473lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 2464lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2474 struct lpfc_nodelist *pnode) 2465 struct lpfc_nodelist *pnode)
2475{ 2466{
2476 struct lpfc_hba *phba = vport->phba; 2467 struct lpfc_hba *phba = vport->phba;
@@ -2558,46 +2549,7 @@ lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2558} 2549}
2559 2550
2560/** 2551/**
2561 * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev 2552 * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
2562 * @vport: The virtual port for which this call is being executed.
2563 * @lpfc_cmd: The scsi command which needs to send.
2564 * @pnode: Pointer to lpfc_nodelist.
2565 *
2566 * This routine initializes fcp_cmnd and iocb data structure from scsi command
2567 * to transfer for device with SLI4 interface spec.
2568 **/
2569static void
2570lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2571 struct lpfc_nodelist *pnode)
2572{
2573 /*
2574 * The prep cmnd routines do not touch the sgl or its
2575 * entries. We may not have to do anything different.
2576 * I will leave this function in place until we can
2577 * run some IO through the driver and determine if changes
2578 * are needed.
2579 */
2580 return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode);
2581}
2582
2583/**
2584 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
2585 * @vport: The virtual port for which this call is being executed.
2586 * @lpfc_cmd: The scsi command which needs to send.
2587 * @pnode: Pointer to lpfc_nodelist.
2588 *
2589 * This routine wraps the actual convert SCSI cmnd function pointer from
2590 * the lpfc_hba struct.
2591 **/
2592static inline void
2593lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2594 struct lpfc_nodelist *pnode)
2595{
2596 vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode);
2597}
2598
2599/**
2600 * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
2601 * @vport: The virtual port for which this call is being executed. 2553 * @vport: The virtual port for which this call is being executed.
2602 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2554 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2603 * @lun: Logical unit number. 2555 * @lun: Logical unit number.
@@ -2611,7 +2563,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2611 * 1 - Success 2563 * 1 - Success
2612 **/ 2564 **/
2613static int 2565static int
2614lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, 2566lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2615 struct lpfc_scsi_buf *lpfc_cmd, 2567 struct lpfc_scsi_buf *lpfc_cmd,
2616 unsigned int lun, 2568 unsigned int lun,
2617 uint8_t task_mgmt_cmd) 2569 uint8_t task_mgmt_cmd)
@@ -2653,68 +2605,13 @@ lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
2653 * The driver will provide the timeout mechanism. 2605 * The driver will provide the timeout mechanism.
2654 */ 2606 */
2655 piocb->ulpTimeout = 0; 2607 piocb->ulpTimeout = 0;
2656 } else { 2608 } else
2657 piocb->ulpTimeout = lpfc_cmd->timeout; 2609 piocb->ulpTimeout = lpfc_cmd->timeout;
2658 }
2659
2660 return 1;
2661}
2662
2663/**
2664 * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
2665 * @vport: The virtual port for which this call is being executed.
2666 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2667 * @lun: Logical unit number.
2668 * @task_mgmt_cmd: SCSI task management command.
2669 *
2670 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2671 * for device with SLI-4 interface spec.
2672 *
2673 * Return codes:
2674 * 0 - Error
2675 * 1 - Success
2676 **/
2677static int
2678lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
2679 struct lpfc_scsi_buf *lpfc_cmd,
2680 unsigned int lun,
2681 uint8_t task_mgmt_cmd)
2682{
2683 /*
2684 * The prep cmnd routines do not touch the sgl or its
2685 * entries. We may not have to do anything different.
2686 * I will leave this function in place until we can
2687 * run some IO through the driver and determine if changes
2688 * are needed.
2689 */
2690 return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun,
2691 task_mgmt_cmd);
2692}
2693 2610
2694/** 2611 if (vport->phba->sli_rev == LPFC_SLI_REV4)
2695 * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info 2612 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
2696 * @vport: The virtual port for which this call is being executed.
2697 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2698 * @lun: Logical unit number.
2699 * @task_mgmt_cmd: SCSI task management command.
2700 *
2701 * This routine wraps the actual convert SCSI TM to FCP information unit
2702 * function pointer from the lpfc_hba struct.
2703 *
2704 * Return codes:
2705 * 0 - Error
2706 * 1 - Success
2707 **/
2708static inline int
2709lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2710 struct lpfc_scsi_buf *lpfc_cmd,
2711 unsigned int lun,
2712 uint8_t task_mgmt_cmd)
2713{
2714 struct lpfc_hba *phba = vport->phba;
2715 2613
2716 return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, 2614 return 1;
2717 task_mgmt_cmd);
2718} 2615}
2719 2616
2720/** 2617/**
@@ -2730,23 +2627,19 @@ int
2730lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 2627lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2731{ 2628{
2732 2629
2630 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
2631 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
2632 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2633
2733 switch (dev_grp) { 2634 switch (dev_grp) {
2734 case LPFC_PCI_DEV_LP: 2635 case LPFC_PCI_DEV_LP:
2735 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; 2636 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
2736 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; 2637 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
2737 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3;
2738 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3;
2739 phba->lpfc_scsi_prep_task_mgmt_cmd =
2740 lpfc_scsi_prep_task_mgmt_cmd_s3;
2741 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; 2638 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2742 break; 2639 break;
2743 case LPFC_PCI_DEV_OC: 2640 case LPFC_PCI_DEV_OC:
2744 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; 2641 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2745 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; 2642 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
2746 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4;
2747 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4;
2748 phba->lpfc_scsi_prep_task_mgmt_cmd =
2749 lpfc_scsi_prep_task_mgmt_cmd_s4;
2750 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; 2643 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2751 break; 2644 break;
2752 default: 2645 default:
@@ -2783,72 +2676,6 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
2783} 2676}
2784 2677
2785/** 2678/**
2786 * lpfc_scsi_tgt_reset - Target reset handler
2787 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
2788 * @vport: The virtual port for which this call is being executed.
2789 * @tgt_id: Target ID.
2790 * @lun: Lun number.
2791 * @rdata: Pointer to lpfc_rport_data.
2792 *
2793 * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
2794 *
2795 * Return Code:
2796 * 0x2003 - Error
2797 * 0x2002 - Success.
2798 **/
2799static int
2800lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
2801 unsigned tgt_id, unsigned int lun,
2802 struct lpfc_rport_data *rdata)
2803{
2804 struct lpfc_hba *phba = vport->phba;
2805 struct lpfc_iocbq *iocbq;
2806 struct lpfc_iocbq *iocbqrsp;
2807 int ret;
2808 int status;
2809
2810 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
2811 return FAILED;
2812
2813 lpfc_cmd->rdata = rdata;
2814 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
2815 FCP_TARGET_RESET);
2816 if (!status)
2817 return FAILED;
2818
2819 iocbq = &lpfc_cmd->cur_iocbq;
2820 iocbqrsp = lpfc_sli_get_iocbq(phba);
2821
2822 if (!iocbqrsp)
2823 return FAILED;
2824
2825 /* Issue Target Reset to TGT <num> */
2826 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2827 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
2828 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
2829 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
2830 iocbq, iocbqrsp, lpfc_cmd->timeout);
2831 if (status != IOCB_SUCCESS) {
2832 if (status == IOCB_TIMEDOUT) {
2833 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
2834 ret = TIMEOUT_ERROR;
2835 } else
2836 ret = FAILED;
2837 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2838 } else {
2839 ret = SUCCESS;
2840 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
2841 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
2842 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
2843 (lpfc_cmd->result & IOERR_DRVR_MASK))
2844 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2845 }
2846
2847 lpfc_sli_release_iocbq(phba, iocbqrsp);
2848 return ret;
2849}
2850
2851/**
2852 * lpfc_info - Info entry point of scsi_host_template data structure 2679 * lpfc_info - Info entry point of scsi_host_template data structure
2853 * @host: The scsi host for which this call is being executed. 2680 * @host: The scsi host for which this call is being executed.
2854 * 2681 *
@@ -3228,156 +3055,334 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3228 return ret; 3055 return ret;
3229} 3056}
3230 3057
3058static char *
3059lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
3060{
3061 switch (task_mgmt_cmd) {
3062 case FCP_ABORT_TASK_SET:
3063 return "ABORT_TASK_SET";
3064 case FCP_CLEAR_TASK_SET:
3065 return "FCP_CLEAR_TASK_SET";
3066 case FCP_BUS_RESET:
3067 return "FCP_BUS_RESET";
3068 case FCP_LUN_RESET:
3069 return "FCP_LUN_RESET";
3070 case FCP_TARGET_RESET:
3071 return "FCP_TARGET_RESET";
3072 case FCP_CLEAR_ACA:
3073 return "FCP_CLEAR_ACA";
3074 case FCP_TERMINATE_TASK:
3075 return "FCP_TERMINATE_TASK";
3076 default:
3077 return "unknown";
3078 }
3079}
3080
3231/** 3081/**
3232 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point 3082 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
3233 * @cmnd: Pointer to scsi_cmnd data structure. 3083 * @vport: The virtual port for which this call is being executed.
3084 * @rdata: Pointer to remote port local data
3085 * @tgt_id: Target ID of remote device.
3086 * @lun_id: Lun number for the TMF
3087 * @task_mgmt_cmd: type of TMF to send
3234 * 3088 *
3235 * This routine does a device reset by sending a TARGET_RESET task management 3089 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
3236 * command. 3090 * a remote port.
3237 * 3091 *
3238 * Return code : 3092 * Return Code:
3239 * 0x2003 - Error 3093 * 0x2003 - Error
3240 * 0x2002 - Success 3094 * 0x2002 - Success.
3241 **/ 3095 **/
3242static int 3096static int
3243lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 3097lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3098 unsigned tgt_id, unsigned int lun_id,
3099 uint8_t task_mgmt_cmd)
3244{ 3100{
3245 struct Scsi_Host *shost = cmnd->device->host;
3246 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3247 struct lpfc_hba *phba = vport->phba; 3101 struct lpfc_hba *phba = vport->phba;
3248 struct lpfc_scsi_buf *lpfc_cmd; 3102 struct lpfc_scsi_buf *lpfc_cmd;
3249 struct lpfc_iocbq *iocbq, *iocbqrsp; 3103 struct lpfc_iocbq *iocbq;
3250 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3104 struct lpfc_iocbq *iocbqrsp;
3251 struct lpfc_nodelist *pnode = rdata->pnode; 3105 int ret;
3252 unsigned long later;
3253 int ret = SUCCESS;
3254 int status; 3106 int status;
3255 int cnt;
3256 struct lpfc_scsi_event_header scsi_event;
3257 3107
3258 lpfc_block_error_handler(cmnd); 3108 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
3259 /*
3260 * If target is not in a MAPPED state, delay the reset until
3261 * target is rediscovered or devloss timeout expires.
3262 */
3263 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3264 while (time_after(later, jiffies)) {
3265 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3266 return FAILED;
3267 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
3268 break;
3269 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
3270 rdata = cmnd->device->hostdata;
3271 if (!rdata)
3272 break;
3273 pnode = rdata->pnode;
3274 }
3275
3276 scsi_event.event_type = FC_REG_SCSI_EVENT;
3277 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
3278 scsi_event.lun = 0;
3279 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3280 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3281
3282 fc_host_post_vendor_event(shost,
3283 fc_get_event_number(),
3284 sizeof(scsi_event),
3285 (char *)&scsi_event,
3286 LPFC_NL_VENDOR_ID);
3287
3288 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
3289 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3290 "0721 LUN Reset rport "
3291 "failure: msec x%x rdata x%p\n",
3292 jiffies_to_msecs(jiffies - later), rdata);
3293 return FAILED; 3109 return FAILED;
3294 } 3110
3295 lpfc_cmd = lpfc_get_scsi_buf(phba); 3111 lpfc_cmd = lpfc_get_scsi_buf(phba);
3296 if (lpfc_cmd == NULL) 3112 if (lpfc_cmd == NULL)
3297 return FAILED; 3113 return FAILED;
3298 lpfc_cmd->timeout = 60; 3114 lpfc_cmd->timeout = 60;
3299 lpfc_cmd->rdata = rdata; 3115 lpfc_cmd->rdata = rdata;
3300 3116
3301 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, 3117 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
3302 cmnd->device->lun, 3118 task_mgmt_cmd);
3303 FCP_TARGET_RESET);
3304 if (!status) { 3119 if (!status) {
3305 lpfc_release_scsi_buf(phba, lpfc_cmd); 3120 lpfc_release_scsi_buf(phba, lpfc_cmd);
3306 return FAILED; 3121 return FAILED;
3307 } 3122 }
3308 iocbq = &lpfc_cmd->cur_iocbq;
3309 3123
3310 /* get a buffer for this IOCB command response */ 3124 iocbq = &lpfc_cmd->cur_iocbq;
3311 iocbqrsp = lpfc_sli_get_iocbq(phba); 3125 iocbqrsp = lpfc_sli_get_iocbq(phba);
3312 if (iocbqrsp == NULL) { 3126 if (iocbqrsp == NULL) {
3313 lpfc_release_scsi_buf(phba, lpfc_cmd); 3127 lpfc_release_scsi_buf(phba, lpfc_cmd);
3314 return FAILED; 3128 return FAILED;
3315 } 3129 }
3130
3316 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3131 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3317 "0703 Issue target reset to TGT %d LUN %d " 3132 "0702 Issue %s to TGT %d LUN %d "
3318 "rpi x%x nlp_flag x%x\n", cmnd->device->id, 3133 "rpi x%x nlp_flag x%x\n",
3319 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 3134 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
3135 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
3136
3320 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 3137 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
3321 iocbq, iocbqrsp, lpfc_cmd->timeout); 3138 iocbq, iocbqrsp, lpfc_cmd->timeout);
3322 if (status == IOCB_TIMEDOUT) { 3139 if (status != IOCB_SUCCESS) {
3323 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 3140 if (status == IOCB_TIMEDOUT) {
3324 ret = TIMEOUT_ERROR; 3141 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
3325 } else { 3142 ret = TIMEOUT_ERROR;
3326 if (status != IOCB_SUCCESS) 3143 } else
3327 ret = FAILED; 3144 ret = FAILED;
3328 lpfc_release_scsi_buf(phba, lpfc_cmd); 3145 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3329 } 3146 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3330 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3147 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
3331 "0713 SCSI layer issued device reset (%d, %d) " 3148 lpfc_taskmgmt_name(task_mgmt_cmd),
3332 "return x%x status x%x result x%x\n", 3149 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
3333 cmnd->device->id, cmnd->device->lun, ret,
3334 iocbqrsp->iocb.ulpStatus,
3335 iocbqrsp->iocb.un.ulpWord[4]); 3150 iocbqrsp->iocb.un.ulpWord[4]);
3151 } else
3152 ret = SUCCESS;
3153
3336 lpfc_sli_release_iocbq(phba, iocbqrsp); 3154 lpfc_sli_release_iocbq(phba, iocbqrsp);
3337 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun, 3155
3338 LPFC_CTX_TGT); 3156 if (ret != TIMEOUT_ERROR)
3157 lpfc_release_scsi_buf(phba, lpfc_cmd);
3158
3159 return ret;
3160}
3161
3162/**
3163 * lpfc_chk_tgt_mapped -
3164 * @vport: The virtual port to check on
3165 * @cmnd: Pointer to scsi_cmnd data structure.
3166 *
3167 * This routine delays until the scsi target (aka rport) for the
3168 * command exists (is present and logged in) or we declare it non-existent.
3169 *
3170 * Return code :
3171 * 0x2003 - Error
3172 * 0x2002 - Success
3173 **/
3174static int
3175lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
3176{
3177 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3178 struct lpfc_nodelist *pnode = rdata->pnode;
3179 unsigned long later;
3180
3181 /*
3182 * If target is not in a MAPPED state, delay until
3183 * target is rediscovered or devloss timeout expires.
3184 */
3185 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3186 while (time_after(later, jiffies)) {
3187 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3188 return FAILED;
3189 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
3190 return SUCCESS;
3191 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
3192 rdata = cmnd->device->hostdata;
3193 if (!rdata)
3194 return FAILED;
3195 pnode = rdata->pnode;
3196 }
3197 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
3198 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3199 return FAILED;
3200 return SUCCESS;
3201}
3202
3203/**
3204 * lpfc_reset_flush_io_context -
3205 * @vport: The virtual port (scsi_host) for the flush context
3206 * @tgt_id: If aborting by Target contect - specifies the target id
3207 * @lun_id: If aborting by Lun context - specifies the lun id
3208 * @context: specifies the context level to flush at.
3209 *
3210 * After a reset condition via TMF, we need to flush orphaned i/o
3211 * contexts from the adapter. This routine aborts any contexts
3212 * outstanding, then waits for their completions. The wait is
3213 * bounded by devloss_tmo though.
3214 *
3215 * Return code :
3216 * 0x2003 - Error
3217 * 0x2002 - Success
3218 **/
3219static int
3220lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
3221 uint64_t lun_id, lpfc_ctx_cmd context)
3222{
3223 struct lpfc_hba *phba = vport->phba;
3224 unsigned long later;
3225 int cnt;
3226
3227 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
3339 if (cnt) 3228 if (cnt)
3340 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 3229 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
3341 cmnd->device->id, cmnd->device->lun, 3230 tgt_id, lun_id, context);
3342 LPFC_CTX_TGT);
3343 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 3231 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3344 while (time_after(later, jiffies) && cnt) { 3232 while (time_after(later, jiffies) && cnt) {
3345 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); 3233 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
3346 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, 3234 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
3347 cmnd->device->lun, LPFC_CTX_TGT);
3348 } 3235 }
3349 if (cnt) { 3236 if (cnt) {
3350 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3237 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3351 "0719 device reset I/O flush failure: " 3238 "0724 I/O flush failure for context %s : cnt x%x\n",
3352 "cnt x%x\n", cnt); 3239 ((context == LPFC_CTX_LUN) ? "LUN" :
3353 ret = FAILED; 3240 ((context == LPFC_CTX_TGT) ? "TGT" :
3241 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
3242 cnt);
3243 return FAILED;
3354 } 3244 }
3355 return ret; 3245 return SUCCESS;
3246}
3247
3248/**
3249 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
3250 * @cmnd: Pointer to scsi_cmnd data structure.
3251 *
3252 * This routine does a device reset by sending a LUN_RESET task management
3253 * command.
3254 *
3255 * Return code :
3256 * 0x2003 - Error
3257 * 0x2002 - Success
3258 **/
3259static int
3260lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
3261{
3262 struct Scsi_Host *shost = cmnd->device->host;
3263 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3264 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3265 struct lpfc_nodelist *pnode = rdata->pnode;
3266 unsigned tgt_id = cmnd->device->id;
3267 unsigned int lun_id = cmnd->device->lun;
3268 struct lpfc_scsi_event_header scsi_event;
3269 int status;
3270
3271 lpfc_block_error_handler(cmnd);
3272
3273 status = lpfc_chk_tgt_mapped(vport, cmnd);
3274 if (status == FAILED) {
3275 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3276 "0721 Device Reset rport failure: rdata x%p\n", rdata);
3277 return FAILED;
3278 }
3279
3280 scsi_event.event_type = FC_REG_SCSI_EVENT;
3281 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
3282 scsi_event.lun = lun_id;
3283 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3284 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3285
3286 fc_host_post_vendor_event(shost, fc_get_event_number(),
3287 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3288
3289 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3290 FCP_LUN_RESET);
3291
3292 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3293 "0713 SCSI layer issued Device Reset (%d, %d) "
3294 "return x%x\n", tgt_id, lun_id, status);
3295
3296 /*
3297 * We have to clean up i/o as : they may be orphaned by the TMF;
3298 * or if the TMF failed, they may be in an indeterminate state.
3299 * So, continue on.
3300 * We will report success if all the i/o aborts successfully.
3301 */
3302 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3303 LPFC_CTX_LUN);
3304 return status;
3305}
3306
3307/**
3308 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
3309 * @cmnd: Pointer to scsi_cmnd data structure.
3310 *
3311 * This routine does a target reset by sending a TARGET_RESET task management
3312 * command.
3313 *
3314 * Return code :
3315 * 0x2003 - Error
3316 * 0x2002 - Success
3317 **/
3318static int
3319lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
3320{
3321 struct Scsi_Host *shost = cmnd->device->host;
3322 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3323 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3324 struct lpfc_nodelist *pnode = rdata->pnode;
3325 unsigned tgt_id = cmnd->device->id;
3326 unsigned int lun_id = cmnd->device->lun;
3327 struct lpfc_scsi_event_header scsi_event;
3328 int status;
3329
3330 lpfc_block_error_handler(cmnd);
3331
3332 status = lpfc_chk_tgt_mapped(vport, cmnd);
3333 if (status == FAILED) {
3334 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3335 "0722 Target Reset rport failure: rdata x%p\n", rdata);
3336 return FAILED;
3337 }
3338
3339 scsi_event.event_type = FC_REG_SCSI_EVENT;
3340 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
3341 scsi_event.lun = 0;
3342 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3343 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3344
3345 fc_host_post_vendor_event(shost, fc_get_event_number(),
3346 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3347
3348 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3349 FCP_TARGET_RESET);
3350
3351 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3352 "0723 SCSI layer issued Target Reset (%d, %d) "
3353 "return x%x\n", tgt_id, lun_id, status);
3354
3355 /*
3356 * We have to clean up i/o as : they may be orphaned by the TMF;
3357 * or if the TMF failed, they may be in an indeterminate state.
3358 * So, continue on.
3359 * We will report success if all the i/o aborts successfully.
3360 */
3361 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3362 LPFC_CTX_TGT);
3363 return status;
3356} 3364}
3357 3365
3358/** 3366/**
3359 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point 3367 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
3360 * @cmnd: Pointer to scsi_cmnd data structure. 3368 * @cmnd: Pointer to scsi_cmnd data structure.
3361 * 3369 *
3362 * This routine does target reset to all target on @cmnd->device->host. 3370 * This routine does target reset to all targets on @cmnd->device->host.
3371 * This emulates Parallel SCSI Bus Reset Semantics.
3363 * 3372 *
3364 * Return Code: 3373 * Return code :
3365 * 0x2003 - Error 3374 * 0x2003 - Error
3366 * 0x2002 - Success 3375 * 0x2002 - Success
3367 **/ 3376 **/
3368static int 3377static int
3369lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 3378lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3370{ 3379{
3371 struct Scsi_Host *shost = cmnd->device->host; 3380 struct Scsi_Host *shost = cmnd->device->host;
3372 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3381 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3373 struct lpfc_hba *phba = vport->phba;
3374 struct lpfc_nodelist *ndlp = NULL; 3382 struct lpfc_nodelist *ndlp = NULL;
3375 int match;
3376 int ret = SUCCESS, status = SUCCESS, i;
3377 int cnt;
3378 struct lpfc_scsi_buf * lpfc_cmd;
3379 unsigned long later;
3380 struct lpfc_scsi_event_header scsi_event; 3383 struct lpfc_scsi_event_header scsi_event;
3384 int match;
3385 int ret = SUCCESS, status, i;
3381 3386
3382 scsi_event.event_type = FC_REG_SCSI_EVENT; 3387 scsi_event.event_type = FC_REG_SCSI_EVENT;
3383 scsi_event.subcategory = LPFC_EVENT_BUSRESET; 3388 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
@@ -3385,13 +3390,11 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3385 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); 3390 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
3386 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); 3391 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
3387 3392
3388 fc_host_post_vendor_event(shost, 3393 fc_host_post_vendor_event(shost, fc_get_event_number(),
3389 fc_get_event_number(), 3394 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3390 sizeof(scsi_event),
3391 (char *)&scsi_event,
3392 LPFC_NL_VENDOR_ID);
3393 3395
3394 lpfc_block_error_handler(cmnd); 3396 lpfc_block_error_handler(cmnd);
3397
3395 /* 3398 /*
3396 * Since the driver manages a single bus device, reset all 3399 * Since the driver manages a single bus device, reset all
3397 * targets known to the driver. Should any target reset 3400 * targets known to the driver. Should any target reset
@@ -3414,16 +3417,11 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3414 spin_unlock_irq(shost->host_lock); 3417 spin_unlock_irq(shost->host_lock);
3415 if (!match) 3418 if (!match)
3416 continue; 3419 continue;
3417 lpfc_cmd = lpfc_get_scsi_buf(phba); 3420
3418 if (lpfc_cmd) { 3421 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
3419 lpfc_cmd->timeout = 60; 3422 i, 0, FCP_TARGET_RESET);
3420 status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, 3423
3421 cmnd->device->lun, 3424 if (status != SUCCESS) {
3422 ndlp->rport->dd_data);
3423 if (status != TIMEOUT_ERROR)
3424 lpfc_release_scsi_buf(phba, lpfc_cmd);
3425 }
3426 if (!lpfc_cmd || status != SUCCESS) {
3427 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3425 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3428 "0700 Bus Reset on target %d failed\n", 3426 "0700 Bus Reset on target %d failed\n",
3429 i); 3427 i);
@@ -3431,25 +3429,16 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3431 } 3429 }
3432 } 3430 }
3433 /* 3431 /*
3434 * All outstanding txcmplq I/Os should have been aborted by 3432 * We have to clean up i/o as : they may be orphaned by the TMFs
3435 * the targets. Unfortunately, some targets do not abide by 3433 * above; or if any of the TMFs failed, they may be in an
3436 * this forcing the driver to double check. 3434 * indeterminate state.
3435 * We will report success if all the i/o aborts successfully.
3437 */ 3436 */
3438 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); 3437
3439 if (cnt) 3438 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
3440 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 3439 if (status != SUCCESS)
3441 0, 0, LPFC_CTX_HOST);
3442 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3443 while (time_after(later, jiffies) && cnt) {
3444 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
3445 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
3446 }
3447 if (cnt) {
3448 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3449 "0715 Bus Reset I/O flush failure: "
3450 "cnt x%x left x%x\n", cnt, i);
3451 ret = FAILED; 3440 ret = FAILED;
3452 } 3441
3453 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3442 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3454 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 3443 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
3455 return ret; 3444 return ret;
@@ -3582,7 +3571,8 @@ struct scsi_host_template lpfc_template = {
3582 .info = lpfc_info, 3571 .info = lpfc_info,
3583 .queuecommand = lpfc_queuecommand, 3572 .queuecommand = lpfc_queuecommand,
3584 .eh_abort_handler = lpfc_abort_handler, 3573 .eh_abort_handler = lpfc_abort_handler,
3585 .eh_device_reset_handler= lpfc_device_reset_handler, 3574 .eh_device_reset_handler = lpfc_device_reset_handler,
3575 .eh_target_reset_handler = lpfc_target_reset_handler,
3586 .eh_bus_reset_handler = lpfc_bus_reset_handler, 3576 .eh_bus_reset_handler = lpfc_bus_reset_handler,
3587 .slave_alloc = lpfc_slave_alloc, 3577 .slave_alloc = lpfc_slave_alloc,
3588 .slave_configure = lpfc_slave_configure, 3578 .slave_configure = lpfc_slave_configure,
@@ -3602,7 +3592,8 @@ struct scsi_host_template lpfc_vport_template = {
3602 .info = lpfc_info, 3592 .info = lpfc_info,
3603 .queuecommand = lpfc_queuecommand, 3593 .queuecommand = lpfc_queuecommand,
3604 .eh_abort_handler = lpfc_abort_handler, 3594 .eh_abort_handler = lpfc_abort_handler,
3605 .eh_device_reset_handler= lpfc_device_reset_handler, 3595 .eh_device_reset_handler = lpfc_device_reset_handler,
3596 .eh_target_reset_handler = lpfc_target_reset_handler,
3606 .eh_bus_reset_handler = lpfc_bus_reset_handler, 3597 .eh_bus_reset_handler = lpfc_bus_reset_handler,
3607 .slave_alloc = lpfc_slave_alloc, 3598 .slave_alloc = lpfc_slave_alloc,
3608 .slave_configure = lpfc_slave_configure, 3599 .slave_configure = lpfc_slave_configure,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index ff04daf18f48..acc43b061ba1 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4139,8 +4139,11 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4139 return -EIO; 4139 return -EIO;
4140 } 4140 }
4141 data_length = mqe->un.mb_words[5]; 4141 data_length = mqe->un.mb_words[5];
4142 if (data_length > DMP_FCOEPARAM_RGN_SIZE) 4142 if (data_length > DMP_FCOEPARAM_RGN_SIZE) {
4143 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4144 kfree(mp);
4143 return -EIO; 4145 return -EIO;
4146 }
4144 4147
4145 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 4148 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4146 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4149 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -4211,27 +4214,6 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4211 return -EIO; 4214 return -EIO;
4212 } 4215 }
4213 4216
4214 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4215 "(%d):0380 Mailbox cmd x%x Status x%x "
4216 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4217 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4218 "CQ: x%x x%x x%x x%x\n",
4219 mboxq->vport ? mboxq->vport->vpi : 0,
4220 bf_get(lpfc_mqe_command, mqe),
4221 bf_get(lpfc_mqe_status, mqe),
4222 mqe->un.mb_words[0], mqe->un.mb_words[1],
4223 mqe->un.mb_words[2], mqe->un.mb_words[3],
4224 mqe->un.mb_words[4], mqe->un.mb_words[5],
4225 mqe->un.mb_words[6], mqe->un.mb_words[7],
4226 mqe->un.mb_words[8], mqe->un.mb_words[9],
4227 mqe->un.mb_words[10], mqe->un.mb_words[11],
4228 mqe->un.mb_words[12], mqe->un.mb_words[13],
4229 mqe->un.mb_words[14], mqe->un.mb_words[15],
4230 mqe->un.mb_words[16], mqe->un.mb_words[50],
4231 mboxq->mcqe.word0,
4232 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4233 mboxq->mcqe.trailer);
4234
4235 /* 4217 /*
4236 * The available vpd length cannot be bigger than the 4218 * The available vpd length cannot be bigger than the
4237 * DMA buffer passed to the port. Catch the less than 4219 * DMA buffer passed to the port. Catch the less than
@@ -4337,21 +4319,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4337 goto out_free_vpd; 4319 goto out_free_vpd;
4338 4320
4339 mqe = &mboxq->u.mqe; 4321 mqe = &mboxq->u.mqe;
4340 if ((bf_get(lpfc_mbx_rd_rev_sli_lvl, 4322 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
4341 &mqe->un.read_rev) != LPFC_SLI_REV4) || 4323 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
4342 (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) { 4324 phba->hba_flag |= HBA_FCOE_SUPPORT;
4325 if (phba->sli_rev != LPFC_SLI_REV4 ||
4326 !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
4343 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4327 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4344 "0376 READ_REV Error. SLI Level %d " 4328 "0376 READ_REV Error. SLI Level %d "
4345 "FCoE enabled %d\n", 4329 "FCoE enabled %d\n",
4346 bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev), 4330 phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT);
4347 bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev));
4348 rc = -EIO; 4331 rc = -EIO;
4349 goto out_free_vpd; 4332 goto out_free_vpd;
4350 } 4333 }
4351 /* Single threaded at this point, no need for lock */
4352 spin_lock_irq(&phba->hbalock);
4353 phba->hba_flag |= HBA_FCOE_SUPPORT;
4354 spin_unlock_irq(&phba->hbalock);
4355 /* 4334 /*
4356 * Evaluate the read rev and vpd data. Populate the driver 4335 * Evaluate the read rev and vpd data. Populate the driver
4357 * state with the results. If this routine fails, the failure 4336 * state with the results. If this routine fails, the failure
@@ -4365,8 +4344,32 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4365 rc = 0; 4344 rc = 0;
4366 } 4345 }
4367 4346
4368 /* By now, we should determine the SLI revision, hard code for now */ 4347 /* Save information as VPD data */
4369 phba->sli_rev = LPFC_SLI_REV4; 4348 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
4349 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
4350 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
4351 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
4352 &mqe->un.read_rev);
4353 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
4354 &mqe->un.read_rev);
4355 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
4356 &mqe->un.read_rev);
4357 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
4358 &mqe->un.read_rev);
4359 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
4360 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
4361 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
4362 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
4363 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
4364 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
4365 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4366 "(%d):0380 READ_REV Status x%x "
4367 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
4368 mboxq->vport ? mboxq->vport->vpi : 0,
4369 bf_get(lpfc_mqe_status, mqe),
4370 phba->vpd.rev.opFwName,
4371 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
4372 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
4370 4373
4371 /* 4374 /*
4372 * Discover the port's supported feature set and match it against the 4375 * Discover the port's supported feature set and match it against the
@@ -4491,8 +4494,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4491 rc = -ENODEV; 4494 rc = -ENODEV;
4492 goto out_free_vpd; 4495 goto out_free_vpd;
4493 } 4496 }
4494 /* Temporary initialization of lpfc_fip_flag to non-fip */ 4497 if (phba->cfg_enable_fip)
4495 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0); 4498 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1);
4499 else
4500 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4496 4501
4497 /* Set up all the queues to the device */ 4502 /* Set up all the queues to the device */
4498 rc = lpfc_sli4_queue_setup(phba); 4503 rc = lpfc_sli4_queue_setup(phba);
@@ -5030,6 +5035,92 @@ out_not_finished:
5030} 5035}
5031 5036
5032/** 5037/**
5038 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
5039 * @phba: Pointer to HBA context object.
5040 *
5041 * The function blocks the posting of SLI4 asynchronous mailbox commands from
5042 * the driver internal pending mailbox queue. It will then try to wait out the
5043 * possible outstanding mailbox command before return.
5044 *
5045 * Returns:
5046 * 0 - the outstanding mailbox command completed; otherwise, the wait for
5047 * the outstanding mailbox command timed out.
5048 **/
5049static int
5050lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
5051{
5052 struct lpfc_sli *psli = &phba->sli;
5053 uint8_t actcmd = MBX_HEARTBEAT;
5054 int rc = 0;
5055 unsigned long timeout;
5056
5057 /* Mark the asynchronous mailbox command posting as blocked */
5058 spin_lock_irq(&phba->hbalock);
5059 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
5060 if (phba->sli.mbox_active)
5061 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
5062 spin_unlock_irq(&phba->hbalock);
5063 /* Determine how long we might wait for the active mailbox
5064 * command to be gracefully completed by firmware.
5065 */
5066 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
5067 jiffies;
5068 /* Wait for the outstnading mailbox command to complete */
5069 while (phba->sli.mbox_active) {
5070 /* Check active mailbox complete status every 2ms */
5071 msleep(2);
5072 if (time_after(jiffies, timeout)) {
5073 /* Timeout, marked the outstanding cmd not complete */
5074 rc = 1;
5075 break;
5076 }
5077 }
5078
5079 /* Can not cleanly block async mailbox command, fails it */
5080 if (rc) {
5081 spin_lock_irq(&phba->hbalock);
5082 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5083 spin_unlock_irq(&phba->hbalock);
5084 }
5085 return rc;
5086}
5087
5088/**
5089 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
5090 * @phba: Pointer to HBA context object.
5091 *
5092 * The function unblocks and resume posting of SLI4 asynchronous mailbox
5093 * commands from the driver internal pending mailbox queue. It makes sure
5094 * that there is no outstanding mailbox command before resuming posting
5095 * asynchronous mailbox commands. If, for any reason, there is outstanding
5096 * mailbox command, it will try to wait it out before resuming asynchronous
5097 * mailbox command posting.
5098 **/
5099static void
5100lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
5101{
5102 struct lpfc_sli *psli = &phba->sli;
5103
5104 spin_lock_irq(&phba->hbalock);
5105 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5106 /* Asynchronous mailbox posting is not blocked, do nothing */
5107 spin_unlock_irq(&phba->hbalock);
5108 return;
5109 }
5110
5111 /* Outstanding synchronous mailbox command is guaranteed to be done,
5112 * successful or timeout, after timing-out the outstanding mailbox
5113 * command shall always be removed, so just unblock posting async
5114 * mailbox command and resume
5115 */
5116 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5117 spin_unlock_irq(&phba->hbalock);
5118
5119 /* wake up worker thread to post asynchronlous mailbox command */
5120 lpfc_worker_wake_up(phba);
5121}
5122
5123/**
5033 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 5124 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
5034 * @phba: Pointer to HBA context object. 5125 * @phba: Pointer to HBA context object.
5035 * @mboxq: Pointer to mailbox object. 5126 * @mboxq: Pointer to mailbox object.
@@ -5204,14 +5295,35 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5204 psli->sli_flag, flag); 5295 psli->sli_flag, flag);
5205 return rc; 5296 return rc;
5206 } else if (flag == MBX_POLL) { 5297 } else if (flag == MBX_POLL) {
5207 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5298 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
5208 "(%d):2542 Mailbox command x%x (x%x) " 5299 "(%d):2542 Try to issue mailbox command "
5209 "cannot issue Data: x%x x%x\n", 5300 "x%x (x%x) synchronously ahead of async"
5301 "mailbox command queue: x%x x%x\n",
5210 mboxq->vport ? mboxq->vport->vpi : 0, 5302 mboxq->vport ? mboxq->vport->vpi : 0,
5211 mboxq->u.mb.mbxCommand, 5303 mboxq->u.mb.mbxCommand,
5212 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5304 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5213 psli->sli_flag, flag); 5305 psli->sli_flag, flag);
5214 return -EIO; 5306 /* Try to block the asynchronous mailbox posting */
5307 rc = lpfc_sli4_async_mbox_block(phba);
5308 if (!rc) {
5309 /* Successfully blocked, now issue sync mbox cmd */
5310 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5311 if (rc != MBX_SUCCESS)
5312 lpfc_printf_log(phba, KERN_ERR,
5313 LOG_MBOX | LOG_SLI,
5314 "(%d):2597 Mailbox command "
5315 "x%x (x%x) cannot issue "
5316 "Data: x%x x%x\n",
5317 mboxq->vport ?
5318 mboxq->vport->vpi : 0,
5319 mboxq->u.mb.mbxCommand,
5320 lpfc_sli4_mbox_opcode_get(phba,
5321 mboxq),
5322 psli->sli_flag, flag);
5323 /* Unblock the async mailbox posting afterward */
5324 lpfc_sli4_async_mbox_unblock(phba);
5325 }
5326 return rc;
5215 } 5327 }
5216 5328
5217 /* Now, interrupt mode asynchrous mailbox command */ 5329 /* Now, interrupt mode asynchrous mailbox command */
@@ -5749,18 +5861,13 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5749 5861
5750 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); 5862 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
5751 /* The fcp commands will set command type */ 5863 /* The fcp commands will set command type */
5752 if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip)) 5864 if (iocbq->iocb_flag & LPFC_IO_FCP)
5753 command_type = ELS_COMMAND_NON_FIP;
5754 else if (!(iocbq->iocb_flag & LPFC_IO_FCP))
5755 command_type = ELS_COMMAND_FIP;
5756 else if (iocbq->iocb_flag & LPFC_IO_FCP)
5757 command_type = FCP_COMMAND; 5865 command_type = FCP_COMMAND;
5758 else { 5866 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS))
5759 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5867 command_type = ELS_COMMAND_FIP;
5760 "2019 Invalid cmd 0x%x\n", 5868 else
5761 iocbq->iocb.ulpCommand); 5869 command_type = ELS_COMMAND_NON_FIP;
5762 return IOCB_ERROR; 5870
5763 }
5764 /* Some of the fields are in the right position already */ 5871 /* Some of the fields are in the right position already */
5765 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 5872 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
5766 abort_tag = (uint32_t) iocbq->iotag; 5873 abort_tag = (uint32_t) iocbq->iotag;
@@ -5814,11 +5921,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5814 bf_set(lpfc_wqe_gen_context, &wqe->generic, 5921 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5815 iocbq->iocb.ulpContext); 5922 iocbq->iocb.ulpContext);
5816 5923
5817 if (iocbq->vport->fc_myDID != 0) {
5818 bf_set(els_req64_sid, &wqe->els_req,
5819 iocbq->vport->fc_myDID);
5820 bf_set(els_req64_sp, &wqe->els_req, 1);
5821 }
5822 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); 5924 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
5823 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 5925 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5824 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 5926 /* CCP CCPE PV PRI in word10 were set in the memcpy */
@@ -5877,14 +5979,19 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5877 * is set and we are sending our 2nd or greater command on 5979 * is set and we are sending our 2nd or greater command on
5878 * this exchange. 5980 * this exchange.
5879 */ 5981 */
5982 /* Always open the exchange */
5983 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5880 5984
5881 /* ALLOW read & write to fall through to ICMD64 */ 5985 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
5986 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5987 break;
5882 case CMD_FCP_ICMND64_CR: 5988 case CMD_FCP_ICMND64_CR:
5883 /* Always open the exchange */ 5989 /* Always open the exchange */
5884 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 5990 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5885 5991
5992 wqe->words[4] = 0;
5886 wqe->words[10] &= 0xffff0000; /* zero out ebde count */ 5993 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
5887 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); 5994 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5888 break; 5995 break;
5889 case CMD_GEN_REQUEST64_CR: 5996 case CMD_GEN_REQUEST64_CR:
5890 /* word3 command length is described as byte offset to the 5997 /* word3 command length is described as byte offset to the
@@ -7247,6 +7354,32 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7247} 7354}
7248 7355
7249/** 7356/**
7357 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
7358 * @phba: Pointer to HBA context object..
7359 * @piocbq: Pointer to command iocb.
7360 * @flag: Flag to test.
7361 *
7362 * This routine grabs the hbalock and then test the iocb_flag to
7363 * see if the passed in flag is set.
7364 * Returns:
7365 * 1 if flag is set.
7366 * 0 if flag is not set.
7367 **/
7368static int
7369lpfc_chk_iocb_flg(struct lpfc_hba *phba,
7370 struct lpfc_iocbq *piocbq, uint32_t flag)
7371{
7372 unsigned long iflags;
7373 int ret;
7374
7375 spin_lock_irqsave(&phba->hbalock, iflags);
7376 ret = piocbq->iocb_flag & flag;
7377 spin_unlock_irqrestore(&phba->hbalock, iflags);
7378 return ret;
7379
7380}
7381
7382/**
7250 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 7383 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
7251 * @phba: Pointer to HBA context object.. 7384 * @phba: Pointer to HBA context object..
7252 * @pring: Pointer to sli ring. 7385 * @pring: Pointer to sli ring.
@@ -7313,7 +7446,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
7313 if (retval == IOCB_SUCCESS) { 7446 if (retval == IOCB_SUCCESS) {
7314 timeout_req = timeout * HZ; 7447 timeout_req = timeout * HZ;
7315 timeleft = wait_event_timeout(done_q, 7448 timeleft = wait_event_timeout(done_q,
7316 piocb->iocb_flag & LPFC_IO_WAKE, 7449 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
7317 timeout_req); 7450 timeout_req);
7318 7451
7319 if (piocb->iocb_flag & LPFC_IO_WAKE) { 7452 if (piocb->iocb_flag & LPFC_IO_WAKE) {
@@ -7498,20 +7631,16 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
7498 if ((HS_FFER1 & phba->work_hs) && 7631 if ((HS_FFER1 & phba->work_hs) &&
7499 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 7632 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7500 HS_FFER6 | HS_FFER7) & phba->work_hs)) { 7633 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
7501 spin_lock_irq(&phba->hbalock);
7502 phba->hba_flag |= DEFER_ERATT; 7634 phba->hba_flag |= DEFER_ERATT;
7503 spin_unlock_irq(&phba->hbalock);
7504 /* Clear all interrupt enable conditions */ 7635 /* Clear all interrupt enable conditions */
7505 writel(0, phba->HCregaddr); 7636 writel(0, phba->HCregaddr);
7506 readl(phba->HCregaddr); 7637 readl(phba->HCregaddr);
7507 } 7638 }
7508 7639
7509 /* Set the driver HA work bitmap */ 7640 /* Set the driver HA work bitmap */
7510 spin_lock_irq(&phba->hbalock);
7511 phba->work_ha |= HA_ERATT; 7641 phba->work_ha |= HA_ERATT;
7512 /* Indicate polling handles this ERATT */ 7642 /* Indicate polling handles this ERATT */
7513 phba->hba_flag |= HBA_ERATT_HANDLED; 7643 phba->hba_flag |= HBA_ERATT_HANDLED;
7514 spin_unlock_irq(&phba->hbalock);
7515 return 1; 7644 return 1;
7516 } 7645 }
7517 return 0; 7646 return 0;
@@ -7557,12 +7686,10 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7557 return 0; 7686 return 0;
7558 phba->work_status[0] = uerr_sta_lo; 7687 phba->work_status[0] = uerr_sta_lo;
7559 phba->work_status[1] = uerr_sta_hi; 7688 phba->work_status[1] = uerr_sta_hi;
7560 spin_lock_irq(&phba->hbalock);
7561 /* Set the driver HA work bitmap */ 7689 /* Set the driver HA work bitmap */
7562 phba->work_ha |= HA_ERATT; 7690 phba->work_ha |= HA_ERATT;
7563 /* Indicate polling handles this ERATT */ 7691 /* Indicate polling handles this ERATT */
7564 phba->hba_flag |= HBA_ERATT_HANDLED; 7692 phba->hba_flag |= HBA_ERATT_HANDLED;
7565 spin_unlock_irq(&phba->hbalock);
7566 return 1; 7693 return 1;
7567 } 7694 }
7568 } 7695 }
@@ -9245,6 +9372,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9245 kfree(dmabuf); 9372 kfree(dmabuf);
9246 goto out_fail; 9373 goto out_fail;
9247 } 9374 }
9375 memset(dmabuf->virt, 0, PAGE_SIZE);
9248 dmabuf->buffer_tag = x; 9376 dmabuf->buffer_tag = x;
9249 list_add_tail(&dmabuf->list, &queue->page_list); 9377 list_add_tail(&dmabuf->list, &queue->page_list);
9250 /* initialize queue's entry array */ 9378 /* initialize queue's entry array */
@@ -9667,7 +9795,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9667 /* link the wq onto the parent cq child list */ 9795 /* link the wq onto the parent cq child list */
9668 list_add_tail(&wq->list, &cq->child_list); 9796 list_add_tail(&wq->list, &cq->child_list);
9669out: 9797out:
9670 if (rc == MBX_TIMEOUT) 9798 if (rc != MBX_TIMEOUT)
9671 mempool_free(mbox, phba->mbox_mem_pool); 9799 mempool_free(mbox, phba->mbox_mem_pool);
9672 return status; 9800 return status;
9673} 9801}
@@ -11020,10 +11148,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
11020 rpi_page->start_rpi); 11148 rpi_page->start_rpi);
11021 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 11149 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
11022 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 11150 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
11023 if (!phba->sli4_hba.intr_enable) 11151 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11024 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11025 else
11026 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11027 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 11152 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
11028 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11153 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11029 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11154 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
@@ -11363,6 +11488,7 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11363 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 11488 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
11364 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 11489 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
11365 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 11490 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
11491 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
11366 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 11492 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
11367 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 11493 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
11368 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 11494 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 7d37eb7459bf..3c53316cf6d0 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -56,6 +56,7 @@ struct lpfc_iocbq {
56#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ 56#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
57#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ 57#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
58#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ 58#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
59#define LPFC_FIP_ELS 0x40
59 60
60 uint8_t abort_count; 61 uint8_t abort_count;
61 uint8_t rsvd2; 62 uint8_t rsvd2;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 5196b46608d7..3b276b47d18f 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -229,7 +229,7 @@ struct lpfc_bmbx {
229 229
230#define LPFC_EQE_DEF_COUNT 1024 230#define LPFC_EQE_DEF_COUNT 1024
231#define LPFC_CQE_DEF_COUNT 256 231#define LPFC_CQE_DEF_COUNT 256
232#define LPFC_WQE_DEF_COUNT 64 232#define LPFC_WQE_DEF_COUNT 256
233#define LPFC_MQE_DEF_COUNT 16 233#define LPFC_MQE_DEF_COUNT 16
234#define LPFC_RQE_DEF_COUNT 512 234#define LPFC_RQE_DEF_COUNT 512
235 235
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 6b8a148f0a55..41094e02304b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.2" 21#define LPFC_DRIVER_VERSION "8.3.3"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index a6313ee84ac5..e0b49922193e 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -695,8 +695,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
695 } 695 }
696 vport->unreg_vpi_cmpl = VPORT_INVAL; 696 vport->unreg_vpi_cmpl = VPORT_INVAL;
697 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 697 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
698 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
699 goto skip_logo;
700 if (!lpfc_issue_els_npiv_logo(vport, ndlp)) 698 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
701 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) 699 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
702 timeout = schedule_timeout(timeout); 700 timeout = schedule_timeout(timeout);