summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 14:23:28 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 14:23:28 -0500
commit28bc6fb9596fe1e577d09fc17ee6e1bb051c6ba3 (patch)
tree6293b282a960720fc5008e3e5fa4d096d974b2f1 /drivers
parent0be600a5add76e8e8b9e1119f2a7426ff849aca8 (diff)
parenta2390348c19d0819d525d375414a7cfdacb51a68 (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly updates of the usual driver suspects: arcmsr, scsi_debug, mpt3sas, lpfc, cxlflash, qla2xxx, aacraid, megaraid_sas, hisi_sas. We also have a rework of the libsas hotplug handling to make it more robust, a slew of 32 bit time conversions and fixes, and a host of the usual minor updates and style changes. The biggest potential for regressions is the libsas hotplug changes, but so far they seem stable under testing" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (313 commits) scsi: qla2xxx: Fix logo flag for qlt_free_session_done() scsi: arcmsr: avoid do_gettimeofday scsi: core: Add VENDOR_SPECIFIC sense code definitions scsi: qedi: Drop cqe response during connection recovery scsi: fas216: fix sense buffer initialization scsi: ibmvfc: Remove unneeded semicolons scsi: hisi_sas: fix a bug in hisi_sas_dev_gone() scsi: hisi_sas: directly attached disk LED feature for v2 hw scsi: hisi_sas: devicetree: bindings: add LED feature for v2 hw scsi: megaraid_sas: NVMe passthrough command support scsi: megaraid: use ktime_get_real for firmware time scsi: fnic: use 64-bit timestamps scsi: qedf: Fix error return code in __qedf_probe() scsi: devinfo: fix format of the device list scsi: qla2xxx: Update driver version to 10.00.00.05-k scsi: qla2xxx: Add XCB counters to debugfs scsi: qla2xxx: Fix queue ID for async abort with Multiqueue scsi: qla2xxx: Fix warning for code intentation in __qla24xx_handle_gpdb_event() scsi: qla2xxx: Fix warning during port_name debug print scsi: qla2xxx: Fix warning in qla2x00_async_iocb_timeout() ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/message/fusion/mptbase.c59
-rw-r--r--drivers/message/fusion/mptctl.c25
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/scsi/3w-9xxx.c26
-rw-r--r--drivers/scsi/3w-9xxx.h2
-rw-r--r--drivers/scsi/3w-sas.c15
-rw-r--r--drivers/scsi/aacraid/aachba.c473
-rw-r--r--drivers/scsi/aacraid/aacraid.h54
-rw-r--r--drivers/scsi/aacraid/commctrl.c6
-rw-r--r--drivers/scsi/aacraid/comminit.c49
-rw-r--r--drivers/scsi/aacraid/commsup.c220
-rw-r--r--drivers/scsi/aacraid/linit.c26
-rw-r--r--drivers/scsi/aacraid/sa.c32
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h567
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1318
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/bfa/bfa_core.c2
-rw-r--r--drivers/scsi/bfa/bfa_cs.h6
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h3
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c8
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c3
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h4
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c78
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c62
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c10
-rw-r--r--drivers/scsi/bfa/bfa_port.c15
-rw-r--r--drivers/scsi/bfa/bfa_port.h2
-rw-r--r--drivers/scsi/bfa/bfa_svc.c51
-rw-r--r--drivers/scsi/bfa/bfa_svc.h2
-rw-r--r--drivers/scsi/bfa/bfad.c23
-rw-r--r--drivers/scsi/bfa/bfad_attr.c5
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c10
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c8
-rw-r--r--drivers/scsi/bfa/bfad_im.h32
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c60
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c51
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c19
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.h1
-rw-r--r--drivers/scsi/csiostor/csio_mb.c6
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2
-rw-r--r--drivers/scsi/cxlflash/Makefile2
-rw-r--r--drivers/scsi/cxlflash/backend.h41
-rw-r--r--drivers/scsi/cxlflash/common.h8
-rw-r--r--drivers/scsi/cxlflash/cxl_hw.c168
-rw-r--r--drivers/scsi/cxlflash/main.c100
-rw-r--r--drivers/scsi/cxlflash/superpipe.c64
-rw-r--r--drivers/scsi/cxlflash/superpipe.h4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c37
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c22
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c4
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c2
-rw-r--r--drivers/scsi/fnic/fnic_stats.h4
-rw-r--r--drivers/scsi/fnic/fnic_trace.c58
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h46
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c267
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c194
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c332
-rw-r--r--drivers/scsi/hosts.c6
-rw-r--r--drivers/scsi/hpsa.c18
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c34
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c320
-rw-r--r--drivers/scsi/ipr.c4
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libiscsi.c28
-rw-r--r--drivers/scsi/libiscsi_tcp.c9
-rw-r--r--drivers/scsi/libsas/sas_ata.c1
-rw-r--r--drivers/scsi/libsas/sas_discover.c34
-rw-r--r--drivers/scsi/libsas/sas_event.c86
-rw-r--r--drivers/scsi/libsas/sas_expander.c12
-rw-r--r--drivers/scsi/libsas/sas_init.c107
-rw-r--r--drivers/scsi/libsas/sas_internal.h7
-rw-r--r--drivers/scsi/libsas/sas_phy.c69
-rw-r--r--drivers/scsi/libsas/sas_port.c25
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c20
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c59
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c294
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c88
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c356
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c71
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c193
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h37
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c171
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c28
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c99
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c207
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h29
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c31
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c332
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c33
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/ppa.c4
-rw-r--r--drivers/scsi/qedf/qedf_main.c3
-rw-r--r--drivers/scsi/qedi/qedi_fw.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c46
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h173
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c39
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h32
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c1497
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1044
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c65
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c56
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c151
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c528
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c761
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c21
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c12
-rw-r--r--drivers/scsi/scsi_common.c14
-rw-r--r--drivers/scsi/scsi_debug.c724
-rw-r--r--drivers/scsi/scsi_devinfo.c39
-rw-r--r--drivers/scsi/scsi_dh.c5
-rw-r--r--drivers/scsi/scsi_error.c21
-rw-r--r--drivers/scsi/scsi_lib.c50
-rw-r--r--drivers/scsi/scsi_priv.h15
-rw-r--r--drivers/scsi/scsi_sysfs.c3
-rw-r--r--drivers/scsi/scsi_transport_fc.c4
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/ses.c11
-rw-r--r--drivers/scsi/smartpqi/Makefile2
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/ufs/ufshci.h16
-rw-r--r--drivers/scsi/wd719x.c4
151 files changed, 8881 insertions, 4506 deletions
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 7a93400eea2a..51eb1b027963 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -958,7 +958,7 @@ mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
958{ 958{
959 u32 mf_dma_addr; 959 u32 mf_dma_addr;
960 int req_offset; 960 int req_offset;
961 u16 req_idx; /* Request index */ 961 u16 req_idx; /* Request index */
962 962
963 /* ensure values are reset properly! */ 963 /* ensure values are reset properly! */
964 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */ 964 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
@@ -994,7 +994,7 @@ mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
994{ 994{
995 u32 mf_dma_addr; 995 u32 mf_dma_addr;
996 int req_offset; 996 int req_offset;
997 u16 req_idx; /* Request index */ 997 u16 req_idx; /* Request index */
998 998
999 /* ensure values are reset properly! */ 999 /* ensure values are reset properly! */
1000 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; 1000 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
@@ -1128,11 +1128,12 @@ mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
1128static void 1128static void
1129mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr) 1129mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
1130{ 1130{
1131 SGEChain32_t *pChain = (SGEChain32_t *) pAddr; 1131 SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
1132 pChain->Length = cpu_to_le16(length); 1132
1133 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 1133 pChain->Length = cpu_to_le16(length);
1134 pChain->NextChainOffset = next; 1134 pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1135 pChain->Address = cpu_to_le32(dma_addr); 1135 pChain->NextChainOffset = next;
1136 pChain->Address = cpu_to_le32(dma_addr);
1136} 1137}
1137 1138
1138/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1139/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1147,18 +1148,18 @@ mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
1147static void 1148static void
1148mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr) 1149mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
1149{ 1150{
1150 SGEChain64_t *pChain = (SGEChain64_t *) pAddr; 1151 SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
1151 u32 tmp = dma_addr & 0xFFFFFFFF; 1152 u32 tmp = dma_addr & 0xFFFFFFFF;
1152 1153
1153 pChain->Length = cpu_to_le16(length); 1154 pChain->Length = cpu_to_le16(length);
1154 pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT | 1155 pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
1155 MPI_SGE_FLAGS_64_BIT_ADDRESSING); 1156 MPI_SGE_FLAGS_64_BIT_ADDRESSING);
1156 1157
1157 pChain->NextChainOffset = next; 1158 pChain->NextChainOffset = next;
1158 1159
1159 pChain->Address.Low = cpu_to_le32(tmp); 1160 pChain->Address.Low = cpu_to_le32(tmp);
1160 tmp = (u32)(upper_32_bits(dma_addr)); 1161 tmp = (u32)(upper_32_bits(dma_addr));
1161 pChain->Address.High = cpu_to_le32(tmp); 1162 pChain->Address.High = cpu_to_le32(tmp);
1162} 1163}
1163 1164
1164/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1165/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1360,7 +1361,7 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
1360 ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma); 1361 ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
1361 ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE; 1362 ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
1362 1363
1363return 0; 1364 return 0;
1364} 1365}
1365 1366
1366/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1367/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2152,7 +2153,7 @@ mpt_suspend(struct pci_dev *pdev, pm_message_t state)
2152 device_state); 2153 device_state);
2153 2154
2154 /* put ioc into READY_STATE */ 2155 /* put ioc into READY_STATE */
2155 if(SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) { 2156 if (SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
2156 printk(MYIOC_s_ERR_FMT 2157 printk(MYIOC_s_ERR_FMT
2157 "pci-suspend: IOC msg unit reset failed!\n", ioc->name); 2158 "pci-suspend: IOC msg unit reset failed!\n", ioc->name);
2158 } 2159 }
@@ -6348,7 +6349,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
6348 u8 page_type = 0, extend_page; 6349 u8 page_type = 0, extend_page;
6349 unsigned long timeleft; 6350 unsigned long timeleft;
6350 unsigned long flags; 6351 unsigned long flags;
6351 int in_isr; 6352 int in_isr;
6352 u8 issue_hard_reset = 0; 6353 u8 issue_hard_reset = 0;
6353 u8 retry_count = 0; 6354 u8 retry_count = 0;
6354 6355
@@ -7697,7 +7698,7 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
7697 break; 7698 break;
7698 } 7699 }
7699 if (ds) 7700 if (ds)
7700 strncpy(evStr, ds, EVENT_DESCR_STR_SZ); 7701 strlcpy(evStr, ds, EVENT_DESCR_STR_SZ);
7701 7702
7702 7703
7703 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT 7704 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
@@ -8092,15 +8093,15 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
8092static void 8093static void
8093mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info, u8 cb_idx) 8094mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info, u8 cb_idx)
8094{ 8095{
8095union loginfo_type { 8096 union loginfo_type {
8096 u32 loginfo; 8097 u32 loginfo;
8097 struct { 8098 struct {
8098 u32 subcode:16; 8099 u32 subcode:16;
8099 u32 code:8; 8100 u32 code:8;
8100 u32 originator:4; 8101 u32 originator:4;
8101 u32 bus_type:4; 8102 u32 bus_type:4;
8102 }dw; 8103 } dw;
8103}; 8104 };
8104 union loginfo_type sas_loginfo; 8105 union loginfo_type sas_loginfo;
8105 char *originator_desc = NULL; 8106 char *originator_desc = NULL;
8106 char *code_desc = NULL; 8107 char *code_desc = NULL;
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 7b3b41368931..8d12017b9893 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2481,24 +2481,13 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2481 else 2481 else
2482 karg.host_no = -1; 2482 karg.host_no = -1;
2483 2483
2484 /* Reformat the fw_version into a string 2484 /* Reformat the fw_version into a string */
2485 */ 2485 snprintf(karg.fw_version, sizeof(karg.fw_version),
2486 karg.fw_version[0] = ioc->facts.FWVersion.Struct.Major >= 10 ? 2486 "%.2hhu.%.2hhu.%.2hhu.%.2hhu",
2487 ((ioc->facts.FWVersion.Struct.Major / 10) + '0') : '0'; 2487 ioc->facts.FWVersion.Struct.Major,
2488 karg.fw_version[1] = (ioc->facts.FWVersion.Struct.Major % 10 ) + '0'; 2488 ioc->facts.FWVersion.Struct.Minor,
2489 karg.fw_version[2] = '.'; 2489 ioc->facts.FWVersion.Struct.Unit,
2490 karg.fw_version[3] = ioc->facts.FWVersion.Struct.Minor >= 10 ? 2490 ioc->facts.FWVersion.Struct.Dev);
2491 ((ioc->facts.FWVersion.Struct.Minor / 10) + '0') : '0';
2492 karg.fw_version[4] = (ioc->facts.FWVersion.Struct.Minor % 10 ) + '0';
2493 karg.fw_version[5] = '.';
2494 karg.fw_version[6] = ioc->facts.FWVersion.Struct.Unit >= 10 ?
2495 ((ioc->facts.FWVersion.Struct.Unit / 10) + '0') : '0';
2496 karg.fw_version[7] = (ioc->facts.FWVersion.Struct.Unit % 10 ) + '0';
2497 karg.fw_version[8] = '.';
2498 karg.fw_version[9] = ioc->facts.FWVersion.Struct.Dev >= 10 ?
2499 ((ioc->facts.FWVersion.Struct.Dev / 10) + '0') : '0';
2500 karg.fw_version[10] = (ioc->facts.FWVersion.Struct.Dev % 10 ) + '0';
2501 karg.fw_version[11] = '\0';
2502 2491
2503 /* Issue a config request to get the device serial number 2492 /* Issue a config request to get the device serial number
2504 */ 2493 */
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 345f6035599e..439ee9c5f535 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1165,7 +1165,6 @@ mptsas_schedule_target_reset(void *iocp)
1165 * issue target reset to next device in the queue 1165 * issue target reset to next device in the queue
1166 */ 1166 */
1167 1167
1168 head = &hd->target_reset_list;
1169 if (list_empty(head)) 1168 if (list_empty(head))
1170 return; 1169 return;
1171 1170
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 00e7968a1d70..b42c9c479d4b 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -369,7 +369,6 @@ out:
369static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header) 369static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
370{ 370{
371 u32 local_time; 371 u32 local_time;
372 struct timeval time;
373 TW_Event *event; 372 TW_Event *event;
374 unsigned short aen; 373 unsigned short aen;
375 char host[16]; 374 char host[16];
@@ -392,8 +391,8 @@ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
392 memset(event, 0, sizeof(TW_Event)); 391 memset(event, 0, sizeof(TW_Event));
393 392
394 event->severity = TW_SEV_OUT(header->status_block.severity__reserved); 393 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
395 do_gettimeofday(&time); 394 /* event->time_stamp_sec overflows in y2106 */
396 local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60)); 395 local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
397 event->time_stamp_sec = local_time; 396 event->time_stamp_sec = local_time;
398 event->aen_code = aen; 397 event->aen_code = aen;
399 event->retrieved = TW_AEN_NOT_RETRIEVED; 398 event->retrieved = TW_AEN_NOT_RETRIEVED;
@@ -473,11 +472,10 @@ out:
473static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id) 472static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
474{ 473{
475 u32 schedulertime; 474 u32 schedulertime;
476 struct timeval utc;
477 TW_Command_Full *full_command_packet; 475 TW_Command_Full *full_command_packet;
478 TW_Command *command_packet; 476 TW_Command *command_packet;
479 TW_Param_Apache *param; 477 TW_Param_Apache *param;
480 u32 local_time; 478 time64_t local_time;
481 479
482 /* Fill out the command packet */ 480 /* Fill out the command packet */
483 full_command_packet = tw_dev->command_packet_virt[request_id]; 481 full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -499,9 +497,8 @@ static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
499 497
500 /* Convert system time in UTC to local time seconds since last 498 /* Convert system time in UTC to local time seconds since last
501 Sunday 12:00AM */ 499 Sunday 12:00AM */
502 do_gettimeofday(&utc); 500 local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
503 local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60)); 501 div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
504 schedulertime = local_time - (3 * 86400);
505 schedulertime = cpu_to_le32(schedulertime % 604800); 502 schedulertime = cpu_to_le32(schedulertime % 604800);
506 503
507 memcpy(param->data, &schedulertime, sizeof(u32)); 504 memcpy(param->data, &schedulertime, sizeof(u32));
@@ -648,8 +645,7 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
648 TW_Command_Full *full_command_packet; 645 TW_Command_Full *full_command_packet;
649 TW_Compatibility_Info *tw_compat_info; 646 TW_Compatibility_Info *tw_compat_info;
650 TW_Event *event; 647 TW_Event *event;
651 struct timeval current_time; 648 ktime_t current_time;
652 u32 current_time_ms;
653 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)]; 649 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
654 int retval = TW_IOCTL_ERROR_OS_EFAULT; 650 int retval = TW_IOCTL_ERROR_OS_EFAULT;
655 void __user *argp = (void __user *)arg; 651 void __user *argp = (void __user *)arg;
@@ -840,17 +836,17 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
840 break; 836 break;
841 case TW_IOCTL_GET_LOCK: 837 case TW_IOCTL_GET_LOCK:
842 tw_lock = (TW_Lock *)tw_ioctl->data_buffer; 838 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
843 do_gettimeofday(&current_time); 839 current_time = ktime_get();
844 current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
845 840
846 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) { 841 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
842 ktime_after(current_time, tw_dev->ioctl_time)) {
847 tw_dev->ioctl_sem_lock = 1; 843 tw_dev->ioctl_sem_lock = 1;
848 tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec; 844 tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
849 tw_ioctl->driver_command.status = 0; 845 tw_ioctl->driver_command.status = 0;
850 tw_lock->time_remaining_msec = tw_lock->timeout_msec; 846 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
851 } else { 847 } else {
852 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED; 848 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
853 tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms; 849 tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
854 } 850 }
855 break; 851 break;
856 case TW_IOCTL_RELEASE_LOCK: 852 case TW_IOCTL_RELEASE_LOCK:
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index b6c208cc474f..d88cd3499bd5 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -666,7 +666,7 @@ typedef struct TAG_TW_Device_Extension {
666 unsigned char event_queue_wrapped; 666 unsigned char event_queue_wrapped;
667 unsigned int error_sequence_id; 667 unsigned int error_sequence_id;
668 int ioctl_sem_lock; 668 int ioctl_sem_lock;
669 u32 ioctl_msec; 669 ktime_t ioctl_time;
670 int chrdev_request_id; 670 int chrdev_request_id;
671 wait_queue_head_t ioctl_wqueue; 671 wait_queue_head_t ioctl_wqueue;
672 struct mutex ioctl_lock; 672 struct mutex ioctl_lock;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index b150e131b2e7..cf9f2a09b47d 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -221,7 +221,6 @@ out:
221static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header) 221static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
222{ 222{
223 u32 local_time; 223 u32 local_time;
224 struct timeval time;
225 TW_Event *event; 224 TW_Event *event;
226 unsigned short aen; 225 unsigned short aen;
227 char host[16]; 226 char host[16];
@@ -240,8 +239,8 @@ static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
240 memset(event, 0, sizeof(TW_Event)); 239 memset(event, 0, sizeof(TW_Event));
241 240
242 event->severity = TW_SEV_OUT(header->status_block.severity__reserved); 241 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
243 do_gettimeofday(&time); 242 /* event->time_stamp_sec overflows in y2106 */
244 local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60)); 243 local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
245 event->time_stamp_sec = local_time; 244 event->time_stamp_sec = local_time;
246 event->aen_code = aen; 245 event->aen_code = aen;
247 event->retrieved = TW_AEN_NOT_RETRIEVED; 246 event->retrieved = TW_AEN_NOT_RETRIEVED;
@@ -408,11 +407,10 @@ out:
408static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id) 407static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
409{ 408{
410 u32 schedulertime; 409 u32 schedulertime;
411 struct timeval utc;
412 TW_Command_Full *full_command_packet; 410 TW_Command_Full *full_command_packet;
413 TW_Command *command_packet; 411 TW_Command *command_packet;
414 TW_Param_Apache *param; 412 TW_Param_Apache *param;
415 u32 local_time; 413 time64_t local_time;
416 414
417 /* Fill out the command packet */ 415 /* Fill out the command packet */
418 full_command_packet = tw_dev->command_packet_virt[request_id]; 416 full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -434,10 +432,9 @@ static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
434 432
435 /* Convert system time in UTC to local time seconds since last 433 /* Convert system time in UTC to local time seconds since last
436 Sunday 12:00AM */ 434 Sunday 12:00AM */
437 do_gettimeofday(&utc); 435 local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
438 local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60)); 436 div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
439 schedulertime = local_time - (3 * 86400); 437 schedulertime = cpu_to_le32(schedulertime);
440 schedulertime = cpu_to_le32(schedulertime % 604800);
441 438
442 memcpy(param->data, &schedulertime, sizeof(u32)); 439 memcpy(param->data, &schedulertime, sizeof(u32));
443 440
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index af3e4d3f9735..e7961cbd2c55 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -42,6 +42,8 @@
42#include <linux/highmem.h> /* For flush_kernel_dcache_page */ 42#include <linux/highmem.h> /* For flush_kernel_dcache_page */
43#include <linux/module.h> 43#include <linux/module.h>
44 44
45#include <asm/unaligned.h>
46
45#include <scsi/scsi.h> 47#include <scsi/scsi.h>
46#include <scsi/scsi_cmnd.h> 48#include <scsi/scsi_cmnd.h>
47#include <scsi/scsi_device.h> 49#include <scsi/scsi_device.h>
@@ -913,8 +915,15 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
913 memset(str, ' ', sizeof(*str)); 915 memset(str, ' ', sizeof(*str));
914 916
915 if (sup_adap_info->adapter_type_text[0]) { 917 if (sup_adap_info->adapter_type_text[0]) {
916 char *cp = sup_adap_info->adapter_type_text;
917 int c; 918 int c;
919 char *cp;
920 char *cname = kmemdup(sup_adap_info->adapter_type_text,
921 sizeof(sup_adap_info->adapter_type_text),
922 GFP_ATOMIC);
923 if (!cname)
924 return;
925
926 cp = cname;
918 if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C')) 927 if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
919 inqstrcpy("SMC", str->vid); 928 inqstrcpy("SMC", str->vid);
920 else { 929 else {
@@ -923,7 +932,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
923 ++cp; 932 ++cp;
924 c = *cp; 933 c = *cp;
925 *cp = '\0'; 934 *cp = '\0';
926 inqstrcpy(sup_adap_info->adapter_type_text, str->vid); 935 inqstrcpy(cname, str->vid);
927 *cp = c; 936 *cp = c;
928 while (*cp && *cp != ' ') 937 while (*cp && *cp != ' ')
929 ++cp; 938 ++cp;
@@ -931,14 +940,11 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
931 while (*cp == ' ') 940 while (*cp == ' ')
932 ++cp; 941 ++cp;
933 /* last six chars reserved for vol type */ 942 /* last six chars reserved for vol type */
934 c = 0; 943 if (strlen(cp) > sizeof(str->pid))
935 if (strlen(cp) > sizeof(str->pid)) {
936 c = cp[sizeof(str->pid)];
937 cp[sizeof(str->pid)] = '\0'; 944 cp[sizeof(str->pid)] = '\0';
938 }
939 inqstrcpy (cp, str->pid); 945 inqstrcpy (cp, str->pid);
940 if (c) 946
941 cp[sizeof(str->pid)] = c; 947 kfree(cname);
942 } else { 948 } else {
943 struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype); 949 struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
944 950
@@ -1660,87 +1666,309 @@ static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
1660 (void *) cmd); 1666 (void *) cmd);
1661} 1667}
1662 1668
1663int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target) 1669static int aac_send_safw_bmic_cmd(struct aac_dev *dev,
1670 struct aac_srb_unit *srbu, void *xfer_buf, int xfer_len)
1664{ 1671{
1665 struct fib *fibptr; 1672 struct fib *fibptr;
1666 struct aac_srb *srbcmd; 1673 dma_addr_t addr;
1667 struct sgmap64 *sg64; 1674 int rcode;
1668 struct aac_ciss_identify_pd *identify_resp; 1675 int fibsize;
1669 dma_addr_t addr; 1676 struct aac_srb *srb;
1670 u32 vbus, vid; 1677 struct aac_srb_reply *srb_reply;
1671 u16 fibsize, datasize; 1678 struct sgmap64 *sg64;
1672 int rcode = -ENOMEM; 1679 u32 vbus;
1673 1680 u32 vid;
1681
1682 if (!dev->sa_firmware)
1683 return 0;
1674 1684
1685 /* allocate FIB */
1675 fibptr = aac_fib_alloc(dev); 1686 fibptr = aac_fib_alloc(dev);
1676 if (!fibptr) 1687 if (!fibptr)
1677 goto out; 1688 return -ENOMEM;
1678 1689
1679 fibsize = sizeof(struct aac_srb) - 1690 aac_fib_init(fibptr);
1680 sizeof(struct sgentry) + sizeof(struct sgentry64); 1691 fibptr->hw_fib_va->header.XferState &=
1681 datasize = sizeof(struct aac_ciss_identify_pd); 1692 ~cpu_to_le32(FastResponseCapable);
1682 1693
1683 identify_resp = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr, 1694 fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
1684 GFP_KERNEL); 1695 sizeof(struct sgentry64);
1685 if (!identify_resp)
1686 goto fib_free_ptr;
1687 1696
1688 vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus); 1697 /* allocate DMA buffer for response */
1689 vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target); 1698 addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len,
1699 DMA_BIDIRECTIONAL);
1700 if (dma_mapping_error(&dev->pdev->dev, addr)) {
1701 rcode = -ENOMEM;
1702 goto fib_error;
1703 }
1690 1704
1691 aac_fib_init(fibptr); 1705 srb = fib_data(fibptr);
1706 memcpy(srb, &srbu->srb, sizeof(struct aac_srb));
1692 1707
1693 srbcmd = (struct aac_srb *) fib_data(fibptr); 1708 vbus = (u32)le16_to_cpu(
1694 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); 1709 dev->supplement_adapter_info.virt_device_bus);
1695 srbcmd->channel = cpu_to_le32(vbus); 1710 vid = (u32)le16_to_cpu(
1696 srbcmd->id = cpu_to_le32(vid); 1711 dev->supplement_adapter_info.virt_device_target);
1697 srbcmd->lun = 0;
1698 srbcmd->flags = cpu_to_le32(SRB_DataIn);
1699 srbcmd->timeout = cpu_to_le32(10);
1700 srbcmd->retry_limit = 0;
1701 srbcmd->cdb_size = cpu_to_le32(12);
1702 srbcmd->count = cpu_to_le32(datasize);
1703 1712
1704 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); 1713 /* set the common request fields */
1705 srbcmd->cdb[0] = 0x26; 1714 srb->channel = cpu_to_le32(vbus);
1706 srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF); 1715 srb->id = cpu_to_le32(vid);
1707 srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE; 1716 srb->lun = 0;
1717 srb->function = cpu_to_le32(SRBF_ExecuteScsi);
1718 srb->timeout = 0;
1719 srb->retry_limit = 0;
1720 srb->cdb_size = cpu_to_le32(16);
1721 srb->count = cpu_to_le32(xfer_len);
1722
1723 sg64 = (struct sgmap64 *)&srb->sg;
1724 sg64->count = cpu_to_le32(1);
1725 sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
1726 sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
1727 sg64->sg[0].count = cpu_to_le32(xfer_len);
1708 1728
1709 sg64 = (struct sgmap64 *)&srbcmd->sg; 1729 /*
1710 sg64->count = cpu_to_le32(1); 1730 * Copy the updated data for other dumping or other usage if needed
1711 sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16)); 1731 */
1712 sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); 1732 memcpy(&srbu->srb, srb, sizeof(struct aac_srb));
1713 sg64->sg[0].count = cpu_to_le32(datasize); 1733
1734 /* issue request to the controller */
1735 rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, FsaNormal,
1736 1, 1, NULL, NULL);
1737
1738 if (rcode == -ERESTARTSYS)
1739 rcode = -ERESTART;
1740
1741 if (unlikely(rcode < 0))
1742 goto bmic_error;
1743
1744 srb_reply = (struct aac_srb_reply *)fib_data(fibptr);
1745 memcpy(&srbu->srb_reply, srb_reply, sizeof(struct aac_srb_reply));
1746
1747bmic_error:
1748 dma_unmap_single(&dev->pdev->dev, addr, xfer_len, DMA_BIDIRECTIONAL);
1749fib_error:
1750 aac_fib_complete(fibptr);
1751 aac_fib_free(fibptr);
1752 return rcode;
1753}
1754
1755static void aac_set_safw_target_qd(struct aac_dev *dev, int bus, int target)
1756{
1757
1758 struct aac_ciss_identify_pd *identify_resp;
1714 1759
1715 rcode = aac_fib_send(ScsiPortCommand64, 1760 if (dev->hba_map[bus][target].devtype != AAC_DEVTYPE_NATIVE_RAW)
1716 fibptr, fibsize, FsaNormal, 1, 1, NULL, NULL); 1761 return;
1762
1763 identify_resp = dev->hba_map[bus][target].safw_identify_resp;
1764 if (identify_resp == NULL) {
1765 dev->hba_map[bus][target].qd_limit = 32;
1766 return;
1767 }
1717 1768
1718 if (identify_resp->current_queue_depth_limit <= 0 || 1769 if (identify_resp->current_queue_depth_limit <= 0 ||
1719 identify_resp->current_queue_depth_limit > 32) 1770 identify_resp->current_queue_depth_limit > 255)
1720 dev->hba_map[bus][target].qd_limit = 32; 1771 dev->hba_map[bus][target].qd_limit = 32;
1721 else 1772 else
1722 dev->hba_map[bus][target].qd_limit = 1773 dev->hba_map[bus][target].qd_limit =
1723 identify_resp->current_queue_depth_limit; 1774 identify_resp->current_queue_depth_limit;
1775}
1724 1776
1725 dma_free_coherent(&dev->pdev->dev, datasize, identify_resp, addr); 1777static int aac_issue_safw_bmic_identify(struct aac_dev *dev,
1778 struct aac_ciss_identify_pd **identify_resp, u32 bus, u32 target)
1779{
1780 int rcode = -ENOMEM;
1781 int datasize;
1782 struct aac_srb_unit srbu;
1783 struct aac_srb *srbcmd;
1784 struct aac_ciss_identify_pd *identify_reply;
1726 1785
1727 aac_fib_complete(fibptr); 1786 datasize = sizeof(struct aac_ciss_identify_pd);
1787 identify_reply = kmalloc(datasize, GFP_KERNEL);
1788 if (!identify_reply)
1789 goto out;
1790
1791 memset(&srbu, 0, sizeof(struct aac_srb_unit));
1792
1793 srbcmd = &srbu.srb;
1794 srbcmd->flags = cpu_to_le32(SRB_DataIn);
1795 srbcmd->cdb[0] = 0x26;
1796 srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
1797 srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
1798
1799 rcode = aac_send_safw_bmic_cmd(dev, &srbu, identify_reply, datasize);
1800 if (unlikely(rcode < 0))
1801 goto mem_free_all;
1802
1803 *identify_resp = identify_reply;
1804
1805out:
1806 return rcode;
1807mem_free_all:
1808 kfree(identify_reply);
1809 goto out;
1810}
1811
1812static inline void aac_free_safw_ciss_luns(struct aac_dev *dev)
1813{
1814 kfree(dev->safw_phys_luns);
1815 dev->safw_phys_luns = NULL;
1816}
1817
1818/**
1819 * aac_get_safw_ciss_luns() Process topology change
1820 * @dev: aac_dev structure
1821 *
1822 * Execute a CISS REPORT PHYS LUNS and process the results into
1823 * the current hba_map.
1824 */
1825static int aac_get_safw_ciss_luns(struct aac_dev *dev)
1826{
1827 int rcode = -ENOMEM;
1828 int datasize;
1829 struct aac_srb *srbcmd;
1830 struct aac_srb_unit srbu;
1831 struct aac_ciss_phys_luns_resp *phys_luns;
1832
1833 datasize = sizeof(struct aac_ciss_phys_luns_resp) +
1834 (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
1835 phys_luns = kmalloc(datasize, GFP_KERNEL);
1836 if (phys_luns == NULL)
1837 goto out;
1838
1839 memset(&srbu, 0, sizeof(struct aac_srb_unit));
1840
1841 srbcmd = &srbu.srb;
1842 srbcmd->flags = cpu_to_le32(SRB_DataIn);
1843 srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
1844 srbcmd->cdb[1] = 2; /* extended reporting */
1845 srbcmd->cdb[8] = (u8)(datasize >> 8);
1846 srbcmd->cdb[9] = (u8)(datasize);
1847
1848 rcode = aac_send_safw_bmic_cmd(dev, &srbu, phys_luns, datasize);
1849 if (unlikely(rcode < 0))
1850 goto mem_free_all;
1851
1852 if (phys_luns->resp_flag != 2) {
1853 rcode = -ENOMSG;
1854 goto mem_free_all;
1855 }
1856
1857 dev->safw_phys_luns = phys_luns;
1858
1859out:
1860 return rcode;
1861mem_free_all:
1862 kfree(phys_luns);
1863 goto out;
1864}
1865
1866static inline u32 aac_get_safw_phys_lun_count(struct aac_dev *dev)
1867{
1868 return get_unaligned_be32(&dev->safw_phys_luns->list_length[0])/24;
1869}
1870
1871static inline u32 aac_get_safw_phys_bus(struct aac_dev *dev, int lun)
1872{
1873 return dev->safw_phys_luns->lun[lun].level2[1] & 0x3f;
1874}
1875
1876static inline u32 aac_get_safw_phys_target(struct aac_dev *dev, int lun)
1877{
1878 return dev->safw_phys_luns->lun[lun].level2[0];
1879}
1880
1881static inline u32 aac_get_safw_phys_expose_flag(struct aac_dev *dev, int lun)
1882{
1883 return dev->safw_phys_luns->lun[lun].bus >> 6;
1884}
1885
1886static inline u32 aac_get_safw_phys_attribs(struct aac_dev *dev, int lun)
1887{
1888 return dev->safw_phys_luns->lun[lun].node_ident[9];
1889}
1890
1891static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun)
1892{
1893 return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]);
1894}
1895
1896static inline u32 aac_get_safw_phys_device_type(struct aac_dev *dev, int lun)
1897{
1898 return dev->safw_phys_luns->lun[lun].node_ident[8];
1899}
1900
1901static inline void aac_free_safw_identify_resp(struct aac_dev *dev,
1902 int bus, int target)
1903{
1904 kfree(dev->hba_map[bus][target].safw_identify_resp);
1905 dev->hba_map[bus][target].safw_identify_resp = NULL;
1906}
1907
1908static inline void aac_free_safw_all_identify_resp(struct aac_dev *dev,
1909 int lun_count)
1910{
1911 int luns;
1912 int i;
1913 u32 bus;
1914 u32 target;
1915
1916 luns = aac_get_safw_phys_lun_count(dev);
1917
1918 if (luns < lun_count)
1919 lun_count = luns;
1920 else if (lun_count < 0)
1921 lun_count = luns;
1922
1923 for (i = 0; i < lun_count; i++) {
1924 bus = aac_get_safw_phys_bus(dev, i);
1925 target = aac_get_safw_phys_target(dev, i);
1926
1927 aac_free_safw_identify_resp(dev, bus, target);
1928 }
1929}
1930
1931static int aac_get_safw_attr_all_targets(struct aac_dev *dev)
1932{
1933 int i;
1934 int rcode = 0;
1935 u32 lun_count;
1936 u32 bus;
1937 u32 target;
1938 struct aac_ciss_identify_pd *identify_resp = NULL;
1939
1940 lun_count = aac_get_safw_phys_lun_count(dev);
1941
1942 for (i = 0; i < lun_count; ++i) {
1943
1944 bus = aac_get_safw_phys_bus(dev, i);
1945 target = aac_get_safw_phys_target(dev, i);
1946
1947 rcode = aac_issue_safw_bmic_identify(dev,
1948 &identify_resp, bus, target);
1949
1950 if (unlikely(rcode < 0))
1951 goto free_identify_resp;
1952
1953 dev->hba_map[bus][target].safw_identify_resp = identify_resp;
1954 }
1728 1955
1729fib_free_ptr:
1730 aac_fib_free(fibptr);
1731out: 1956out:
1732 return rcode; 1957 return rcode;
1958free_identify_resp:
1959 aac_free_safw_all_identify_resp(dev, i);
1960 goto out;
1733} 1961}
1734 1962
1735/** 1963/**
1736 * aac_update hba_map()- update current hba map with data from FW 1964 * aac_set_safw_attr_all_targets- update current hba map with data from FW
1737 * @dev: aac_dev structure 1965 * @dev: aac_dev structure
1738 * @phys_luns: FW information from report phys luns 1966 * @phys_luns: FW information from report phys luns
1967 * @rescan: Indicates scan type
1739 * 1968 *
1740 * Update our hba map with the information gathered from the FW 1969 * Update our hba map with the information gathered from the FW
1741 */ 1970 */
1742void aac_update_hba_map(struct aac_dev *dev, 1971static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
1743 struct aac_ciss_phys_luns_resp *phys_luns, int rescan)
1744{ 1972{
1745 /* ok and extended reporting */ 1973 /* ok and extended reporting */
1746 u32 lun_count, nexus; 1974 u32 lun_count, nexus;
@@ -1748,24 +1976,21 @@ void aac_update_hba_map(struct aac_dev *dev,
1748 u8 expose_flag, attribs; 1976 u8 expose_flag, attribs;
1749 u8 devtype; 1977 u8 devtype;
1750 1978
1751 lun_count = ((phys_luns->list_length[0] << 24) 1979 lun_count = aac_get_safw_phys_lun_count(dev);
1752 + (phys_luns->list_length[1] << 16) 1980
1753 + (phys_luns->list_length[2] << 8) 1981 dev->scan_counter++;
1754 + (phys_luns->list_length[3])) / 24;
1755 1982
1756 for (i = 0; i < lun_count; ++i) { 1983 for (i = 0; i < lun_count; ++i) {
1757 1984
1758 bus = phys_luns->lun[i].level2[1] & 0x3f; 1985 bus = aac_get_safw_phys_bus(dev, i);
1759 target = phys_luns->lun[i].level2[0]; 1986 target = aac_get_safw_phys_target(dev, i);
1760 expose_flag = phys_luns->lun[i].bus >> 6; 1987 expose_flag = aac_get_safw_phys_expose_flag(dev, i);
1761 attribs = phys_luns->lun[i].node_ident[9]; 1988 attribs = aac_get_safw_phys_attribs(dev, i);
1762 nexus = *((u32 *) &phys_luns->lun[i].node_ident[12]); 1989 nexus = aac_get_safw_phys_nexus(dev, i);
1763 1990
1764 if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS) 1991 if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS)
1765 continue; 1992 continue;
1766 1993
1767 dev->hba_map[bus][target].expose = expose_flag;
1768
1769 if (expose_flag != 0) { 1994 if (expose_flag != 0) {
1770 devtype = AAC_DEVTYPE_RAID_MEMBER; 1995 devtype = AAC_DEVTYPE_RAID_MEMBER;
1771 goto update_devtype; 1996 goto update_devtype;
@@ -1778,95 +2003,45 @@ void aac_update_hba_map(struct aac_dev *dev,
1778 } else 2003 } else
1779 devtype = AAC_DEVTYPE_ARC_RAW; 2004 devtype = AAC_DEVTYPE_ARC_RAW;
1780 2005
1781 if (devtype != AAC_DEVTYPE_NATIVE_RAW) 2006 dev->hba_map[bus][target].scan_counter = dev->scan_counter;
1782 goto update_devtype;
1783 2007
1784 if (aac_issue_bmic_identify(dev, bus, target) < 0) 2008 aac_set_safw_target_qd(dev, bus, target);
1785 dev->hba_map[bus][target].qd_limit = 32;
1786 2009
1787update_devtype: 2010update_devtype:
1788 if (rescan == AAC_INIT) 2011 dev->hba_map[bus][target].devtype = devtype;
1789 dev->hba_map[bus][target].devtype = devtype;
1790 else
1791 dev->hba_map[bus][target].new_devtype = devtype;
1792 } 2012 }
1793} 2013}
1794 2014
1795/** 2015static int aac_setup_safw_targets(struct aac_dev *dev)
1796 * aac_report_phys_luns() Process topology change
1797 * @dev: aac_dev structure
1798 * @fibptr: fib pointer
1799 *
1800 * Execute a CISS REPORT PHYS LUNS and process the results into
1801 * the current hba_map.
1802 */
1803int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan)
1804{ 2016{
1805 int fibsize, datasize;
1806 struct aac_ciss_phys_luns_resp *phys_luns;
1807 struct aac_srb *srbcmd;
1808 struct sgmap64 *sg64;
1809 dma_addr_t addr;
1810 u32 vbus, vid;
1811 int rcode = 0; 2017 int rcode = 0;
1812 2018
1813 /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */ 2019 rcode = aac_get_containers(dev);
1814 fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) 2020 if (unlikely(rcode < 0))
1815 + sizeof(struct sgentry64); 2021 goto out;
1816 datasize = sizeof(struct aac_ciss_phys_luns_resp)
1817 + (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
1818
1819 phys_luns = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
1820 GFP_KERNEL);
1821 if (phys_luns == NULL) {
1822 rcode = -ENOMEM;
1823 goto err_out;
1824 }
1825
1826 vbus = (u32) le16_to_cpu(
1827 dev->supplement_adapter_info.virt_device_bus);
1828 vid = (u32) le16_to_cpu(
1829 dev->supplement_adapter_info.virt_device_target);
1830 2022
1831 aac_fib_init(fibptr); 2023 rcode = aac_get_safw_ciss_luns(dev);
2024 if (unlikely(rcode < 0))
2025 goto out;
1832 2026
1833 srbcmd = (struct aac_srb *) fib_data(fibptr); 2027 rcode = aac_get_safw_attr_all_targets(dev);
1834 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); 2028 if (unlikely(rcode < 0))
1835 srbcmd->channel = cpu_to_le32(vbus); 2029 goto free_ciss_luns;
1836 srbcmd->id = cpu_to_le32(vid);
1837 srbcmd->lun = 0;
1838 srbcmd->flags = cpu_to_le32(SRB_DataIn);
1839 srbcmd->timeout = cpu_to_le32(10);
1840 srbcmd->retry_limit = 0;
1841 srbcmd->cdb_size = cpu_to_le32(12);
1842 srbcmd->count = cpu_to_le32(datasize);
1843 2030
1844 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); 2031 aac_set_safw_attr_all_targets(dev);
1845 srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
1846 srbcmd->cdb[1] = 2; /* extended reporting */
1847 srbcmd->cdb[8] = (u8)(datasize >> 8);
1848 srbcmd->cdb[9] = (u8)(datasize);
1849
1850 sg64 = (struct sgmap64 *) &srbcmd->sg;
1851 sg64->count = cpu_to_le32(1);
1852 sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
1853 sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
1854 sg64->sg[0].count = cpu_to_le32(datasize);
1855
1856 rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize,
1857 FsaNormal, 1, 1, NULL, NULL);
1858
1859 /* analyse data */
1860 if (rcode >= 0 && phys_luns->resp_flag == 2) {
1861 /* ok and extended reporting */
1862 aac_update_hba_map(dev, phys_luns, rescan);
1863 }
1864 2032
1865 dma_free_coherent(&dev->pdev->dev, datasize, phys_luns, addr); 2033 aac_free_safw_all_identify_resp(dev, -1);
1866err_out: 2034free_ciss_luns:
2035 aac_free_safw_ciss_luns(dev);
2036out:
1867 return rcode; 2037 return rcode;
1868} 2038}
1869 2039
2040int aac_setup_safw_adapter(struct aac_dev *dev)
2041{
2042 return aac_setup_safw_targets(dev);
2043}
2044
1870int aac_get_adapter_info(struct aac_dev* dev) 2045int aac_get_adapter_info(struct aac_dev* dev)
1871{ 2046{
1872 struct fib* fibptr; 2047 struct fib* fibptr;
@@ -1969,12 +2144,6 @@ int aac_get_adapter_info(struct aac_dev* dev)
1969 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount); 2144 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
1970 } 2145 }
1971 2146
1972 if (!dev->sync_mode && dev->sa_firmware &&
1973 dev->supplement_adapter_info.virt_device_bus != 0xffff) {
1974 /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
1975 rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT);
1976 }
1977
1978 if (!dev->in_reset) { 2147 if (!dev->in_reset) {
1979 char buffer[16]; 2148 char buffer[16];
1980 tmp = le32_to_cpu(dev->adapter_info.kernelrev); 2149 tmp = le32_to_cpu(dev->adapter_info.kernelrev);
@@ -2739,14 +2908,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2739 } 2908 }
2740 } else { /* check for physical non-dasd devices */ 2909 } else { /* check for physical non-dasd devices */
2741 bus = aac_logical_to_phys(scmd_channel(scsicmd)); 2910 bus = aac_logical_to_phys(scmd_channel(scsicmd));
2742 if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
2743 (dev->hba_map[bus][cid].expose
2744 == AAC_HIDE_DISK)){
2745 if (scsicmd->cmnd[0] == INQUIRY) {
2746 scsicmd->result = DID_NO_CONNECT << 16;
2747 goto scsi_done_ret;
2748 }
2749 }
2750 2911
2751 if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS && 2912 if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
2752 dev->hba_map[bus][cid].devtype 2913 dev->hba_map[bus][cid].devtype
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d52265416da2..0095fcbd1c88 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -41,6 +41,7 @@
41 41
42#include <linux/interrupt.h> 42#include <linux/interrupt.h>
43#include <linux/pci.h> 43#include <linux/pci.h>
44#include <scsi/scsi_host.h>
44 45
45/*------------------------------------------------------------------------------ 46/*------------------------------------------------------------------------------
46 * D E F I N E S 47 * D E F I N E S
@@ -97,7 +98,7 @@ enum {
97#define PMC_GLOBAL_INT_BIT0 0x00000001 98#define PMC_GLOBAL_INT_BIT0 0x00000001
98 99
99#ifndef AAC_DRIVER_BUILD 100#ifndef AAC_DRIVER_BUILD
100# define AAC_DRIVER_BUILD 50834 101# define AAC_DRIVER_BUILD 50877
101# define AAC_DRIVER_BRANCH "-custom" 102# define AAC_DRIVER_BRANCH "-custom"
102#endif 103#endif
103#define MAXIMUM_NUM_CONTAINERS 32 104#define MAXIMUM_NUM_CONTAINERS 32
@@ -117,9 +118,13 @@ enum {
117/* Thor: 5 phys. buses: #0: empty, 1-4: 256 targets each */ 118/* Thor: 5 phys. buses: #0: empty, 1-4: 256 targets each */
118#define AAC_MAX_BUSES 5 119#define AAC_MAX_BUSES 5
119#define AAC_MAX_TARGETS 256 120#define AAC_MAX_TARGETS 256
121#define AAC_BUS_TARGET_LOOP (AAC_MAX_BUSES * AAC_MAX_TARGETS)
120#define AAC_MAX_NATIVE_SIZE 2048 122#define AAC_MAX_NATIVE_SIZE 2048
121#define FW_ERROR_BUFFER_SIZE 512 123#define FW_ERROR_BUFFER_SIZE 512
122 124
125#define get_bus_number(x) (x/AAC_MAX_TARGETS)
126#define get_target_number(x) (x%AAC_MAX_TARGETS)
127
123/* Thor AIF events */ 128/* Thor AIF events */
124#define SA_AIF_HOTPLUG (1<<1) 129#define SA_AIF_HOTPLUG (1<<1)
125#define SA_AIF_HARDWARE (1<<2) 130#define SA_AIF_HARDWARE (1<<2)
@@ -1334,17 +1339,17 @@ struct fib {
1334#define AAC_DEVTYPE_RAID_MEMBER 1 1339#define AAC_DEVTYPE_RAID_MEMBER 1
1335#define AAC_DEVTYPE_ARC_RAW 2 1340#define AAC_DEVTYPE_ARC_RAW 2
1336#define AAC_DEVTYPE_NATIVE_RAW 3 1341#define AAC_DEVTYPE_NATIVE_RAW 3
1337#define AAC_EXPOSE_DISK 0 1342
1338#define AAC_HIDE_DISK 3 1343#define AAC_SAFW_RESCAN_DELAY (10 * HZ)
1339 1344
1340struct aac_hba_map_info { 1345struct aac_hba_map_info {
1341 __le32 rmw_nexus; /* nexus for native HBA devices */ 1346 __le32 rmw_nexus; /* nexus for native HBA devices */
1342 u8 devtype; /* device type */ 1347 u8 devtype; /* device type */
1343 u8 new_devtype;
1344 u8 reset_state; /* 0 - no reset, 1..x - */ 1348 u8 reset_state; /* 0 - no reset, 1..x - */
1345 /* after xth TM LUN reset */ 1349 /* after xth TM LUN reset */
1346 u16 qd_limit; 1350 u16 qd_limit;
1347 u8 expose; /*checks if to expose or not*/ 1351 u32 scan_counter;
1352 struct aac_ciss_identify_pd *safw_identify_resp;
1348}; 1353};
1349 1354
1350/* 1355/*
@@ -1560,6 +1565,7 @@ struct aac_dev
1560 spinlock_t fib_lock; 1565 spinlock_t fib_lock;
1561 1566
1562 struct mutex ioctl_mutex; 1567 struct mutex ioctl_mutex;
1568 struct mutex scan_mutex;
1563 struct aac_queue_block *queues; 1569 struct aac_queue_block *queues;
1564 /* 1570 /*
1565 * The user API will use an IOCTL to register itself to receive 1571 * The user API will use an IOCTL to register itself to receive
@@ -1605,6 +1611,7 @@ struct aac_dev
1605 int maximum_num_channels; 1611 int maximum_num_channels;
1606 struct fsa_dev_info *fsa_dev; 1612 struct fsa_dev_info *fsa_dev;
1607 struct task_struct *thread; 1613 struct task_struct *thread;
1614 struct delayed_work safw_rescan_work;
1608 int cardtype; 1615 int cardtype;
1609 /* 1616 /*
1610 *This lock will protect the two 32-bit 1617 *This lock will protect the two 32-bit
@@ -1668,9 +1675,11 @@ struct aac_dev
1668 u32 vector_cap; /* MSI-X vector capab.*/ 1675 u32 vector_cap; /* MSI-X vector capab.*/
1669 int msi_enabled; /* MSI/MSI-X enabled */ 1676 int msi_enabled; /* MSI/MSI-X enabled */
1670 atomic_t msix_counter; 1677 atomic_t msix_counter;
1678 u32 scan_counter;
1671 struct msix_entry msixentry[AAC_MAX_MSIX]; 1679 struct msix_entry msixentry[AAC_MAX_MSIX];
1672 struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */ 1680 struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
1673 struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS]; 1681 struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
1682 struct aac_ciss_phys_luns_resp *safw_phys_luns;
1674 u8 adapter_shutdown; 1683 u8 adapter_shutdown;
1675 u32 handle_pci_error; 1684 u32 handle_pci_error;
1676 bool init_reset; 1685 bool init_reset;
@@ -2023,6 +2032,12 @@ struct aac_srb_reply
2023 __le32 sense_data_size; 2032 __le32 sense_data_size;
2024 u8 sense_data[AAC_SENSE_BUFFERSIZE]; // Can this be SCSI_SENSE_BUFFERSIZE 2033 u8 sense_data[AAC_SENSE_BUFFERSIZE]; // Can this be SCSI_SENSE_BUFFERSIZE
2025}; 2034};
2035
2036struct aac_srb_unit {
2037 struct aac_srb srb;
2038 struct aac_srb_reply srb_reply;
2039};
2040
2026/* 2041/*
2027 * SRB Flags 2042 * SRB Flags
2028 */ 2043 */
@@ -2627,16 +2642,41 @@ static inline int aac_adapter_check_health(struct aac_dev *dev)
2627 return (dev)->a_ops.adapter_check_health(dev); 2642 return (dev)->a_ops.adapter_check_health(dev);
2628} 2643}
2629 2644
2645
2646int aac_scan_host(struct aac_dev *dev);
2647
2648static inline void aac_schedule_safw_scan_worker(struct aac_dev *dev)
2649{
2650 schedule_delayed_work(&dev->safw_rescan_work, AAC_SAFW_RESCAN_DELAY);
2651}
2652
2653static inline void aac_safw_rescan_worker(struct work_struct *work)
2654{
2655 struct aac_dev *dev = container_of(to_delayed_work(work),
2656 struct aac_dev, safw_rescan_work);
2657
2658 wait_event(dev->scsi_host_ptr->host_wait,
2659 !scsi_host_in_recovery(dev->scsi_host_ptr));
2660
2661 aac_scan_host(dev);
2662}
2663
2664static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
2665{
2666 if (dev->sa_firmware)
2667 cancel_delayed_work_sync(&dev->safw_rescan_work);
2668}
2669
2630/* SCp.phase values */ 2670/* SCp.phase values */
2631#define AAC_OWNER_MIDLEVEL 0x101 2671#define AAC_OWNER_MIDLEVEL 0x101
2632#define AAC_OWNER_LOWLEVEL 0x102 2672#define AAC_OWNER_LOWLEVEL 0x102
2633#define AAC_OWNER_ERROR_HANDLER 0x103 2673#define AAC_OWNER_ERROR_HANDLER 0x103
2634#define AAC_OWNER_FIRMWARE 0x106 2674#define AAC_OWNER_FIRMWARE 0x106
2635 2675
2676void aac_safw_rescan_worker(struct work_struct *work);
2636int aac_acquire_irq(struct aac_dev *dev); 2677int aac_acquire_irq(struct aac_dev *dev);
2637void aac_free_irq(struct aac_dev *dev); 2678void aac_free_irq(struct aac_dev *dev);
2638int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan); 2679int aac_setup_safw_adapter(struct aac_dev *dev);
2639int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target);
2640const char *aac_driverinfo(struct Scsi_Host *); 2680const char *aac_driverinfo(struct Scsi_Host *);
2641void aac_fib_vector_assign(struct aac_dev *dev); 2681void aac_fib_vector_assign(struct aac_dev *dev);
2642struct fib *aac_fib_alloc(struct aac_dev *dev); 2682struct fib *aac_fib_alloc(struct aac_dev *dev);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 9ab0fa959d83..a2b3430072c7 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -1052,9 +1052,13 @@ static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
1052 if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop))) 1052 if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
1053 return -EFAULT; 1053 return -EFAULT;
1054 1054
1055 dev->adapter_shutdown = 1;
1056
1057 mutex_unlock(&dev->ioctl_mutex);
1055 retval = aac_reset_adapter(dev, 0, reset.reset_type); 1058 retval = aac_reset_adapter(dev, 0, reset.reset_type);
1056 return retval; 1059 mutex_lock(&dev->ioctl_mutex);
1057 1060
1061 return retval;
1058} 1062}
1059 1063
1060int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) 1064int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 1bc623ad3faf..0dc7b5a4fea2 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -42,6 +42,8 @@
42#include <linux/completion.h> 42#include <linux/completion.h>
43#include <linux/mm.h> 43#include <linux/mm.h>
44#include <scsi/scsi_host.h> 44#include <scsi/scsi_host.h>
45#include <scsi/scsi_device.h>
46#include <scsi/scsi_cmnd.h>
45 47
46#include "aacraid.h" 48#include "aacraid.h"
47 49
@@ -284,6 +286,38 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
284 q->entries = qsize; 286 q->entries = qsize;
285} 287}
286 288
289static void aac_wait_for_io_completion(struct aac_dev *aac)
290{
291 unsigned long flagv = 0;
292 int i = 0;
293
294 for (i = 60; i; --i) {
295 struct scsi_device *dev;
296 struct scsi_cmnd *command;
297 int active = 0;
298
299 __shost_for_each_device(dev, aac->scsi_host_ptr) {
300 spin_lock_irqsave(&dev->list_lock, flagv);
301 list_for_each_entry(command, &dev->cmd_list, list) {
302 if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
303 active++;
304 break;
305 }
306 }
307 spin_unlock_irqrestore(&dev->list_lock, flagv);
308 if (active)
309 break;
310
311 }
312 /*
313 * We can exit If all the commands are complete
314 */
315 if (active == 0)
316 break;
317 ssleep(1);
318 }
319}
320
287/** 321/**
288 * aac_send_shutdown - shutdown an adapter 322 * aac_send_shutdown - shutdown an adapter
289 * @dev: Adapter to shutdown 323 * @dev: Adapter to shutdown
@@ -295,12 +329,10 @@ int aac_send_shutdown(struct aac_dev * dev)
295{ 329{
296 struct fib * fibctx; 330 struct fib * fibctx;
297 struct aac_close *cmd; 331 struct aac_close *cmd;
298 int status; 332 int status = 0;
299 333
300 fibctx = aac_fib_alloc(dev); 334 if (aac_adapter_check_health(dev))
301 if (!fibctx) 335 return status;
302 return -ENOMEM;
303 aac_fib_init(fibctx);
304 336
305 if (!dev->adapter_shutdown) { 337 if (!dev->adapter_shutdown) {
306 mutex_lock(&dev->ioctl_mutex); 338 mutex_lock(&dev->ioctl_mutex);
@@ -308,6 +340,13 @@ int aac_send_shutdown(struct aac_dev * dev)
308 mutex_unlock(&dev->ioctl_mutex); 340 mutex_unlock(&dev->ioctl_mutex);
309 } 341 }
310 342
343 aac_wait_for_io_completion(dev);
344
345 fibctx = aac_fib_alloc(dev);
346 if (!fibctx)
347 return -ENOMEM;
348 aac_fib_init(fibctx);
349
311 cmd = (struct aac_close *) fib_data(fibctx); 350 cmd = (struct aac_close *) fib_data(fibctx);
312 cmd->command = cpu_to_le32(VM_CloseAll); 351 cmd->command = cpu_to_le32(VM_CloseAll);
313 cmd->cid = cpu_to_le32(0xfffffffe); 352 cmd->cid = cpu_to_le32(0xfffffffe);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 80a8cb26cdea..84858d5c8257 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -33,6 +33,7 @@
33 33
34#include <linux/kernel.h> 34#include <linux/kernel.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/crash_dump.h>
36#include <linux/types.h> 37#include <linux/types.h>
37#include <linux/sched.h> 38#include <linux/sched.h>
38#include <linux/pci.h> 39#include <linux/pci.h>
@@ -1629,28 +1630,28 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1629 command->scsi_done(command); 1630 command->scsi_done(command);
1630 } 1631 }
1631 /* 1632 /*
1632 * Any Device that was already marked offline needs to be cleaned up 1633 * Any Device that was already marked offline needs to be marked
1634 * running
1633 */ 1635 */
1634 __shost_for_each_device(dev, host) { 1636 __shost_for_each_device(dev, host) {
1635 if (!scsi_device_online(dev)) { 1637 if (!scsi_device_online(dev))
1636 sdev_printk(KERN_INFO, dev, "Removing offline device\n"); 1638 scsi_device_set_state(dev, SDEV_RUNNING);
1637 scsi_remove_device(dev);
1638 scsi_device_put(dev);
1639 }
1640 } 1639 }
1641 retval = 0; 1640 retval = 0;
1642 1641
1643out: 1642out:
1644 aac->in_reset = 0; 1643 aac->in_reset = 0;
1645 scsi_unblock_requests(host); 1644 scsi_unblock_requests(host);
1645
1646 /* 1646 /*
1647 * Issue bus rescan to catch any configuration that might have 1647 * Issue bus rescan to catch any configuration that might have
1648 * occurred 1648 * occurred
1649 */ 1649 */
1650 if (!retval) { 1650 if (!retval && !is_kdump_kernel()) {
1651 dev_info(&aac->pdev->dev, "Issuing bus rescan\n"); 1651 dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
1652 scsi_scan_host(host); 1652 aac_schedule_safw_scan_worker(aac);
1653 } 1653 }
1654
1654 if (jafo) { 1655 if (jafo) {
1655 spin_lock_irq(host->host_lock); 1656 spin_lock_irq(host->host_lock);
1656 } 1657 }
@@ -1681,31 +1682,6 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1681 */ 1682 */
1682 host = aac->scsi_host_ptr; 1683 host = aac->scsi_host_ptr;
1683 scsi_block_requests(host); 1684 scsi_block_requests(host);
1684 if (forced < 2) for (retval = 60; retval; --retval) {
1685 struct scsi_device * dev;
1686 struct scsi_cmnd * command;
1687 int active = 0;
1688
1689 __shost_for_each_device(dev, host) {
1690 spin_lock_irqsave(&dev->list_lock, flagv);
1691 list_for_each_entry(command, &dev->cmd_list, list) {
1692 if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1693 active++;
1694 break;
1695 }
1696 }
1697 spin_unlock_irqrestore(&dev->list_lock, flagv);
1698 if (active)
1699 break;
1700
1701 }
1702 /*
1703 * We can exit If all the commands are complete
1704 */
1705 if (active == 0)
1706 break;
1707 ssleep(1);
1708 }
1709 1685
1710 /* Quiesce build, flush cache, write through mode */ 1686 /* Quiesce build, flush cache, write through mode */
1711 if (forced < 2) 1687 if (forced < 2)
@@ -1874,42 +1850,124 @@ out:
1874 return BlinkLED; 1850 return BlinkLED;
1875} 1851}
1876 1852
1853static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target)
1854{
1855 return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers;
1856}
1857
1858static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev,
1859 int bus,
1860 int target)
1861{
1862 if (bus != CONTAINER_CHANNEL)
1863 bus = aac_phys_to_logical(bus);
1864
1865 return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0);
1866}
1867
1868static int aac_add_safw_device(struct aac_dev *dev, int bus, int target)
1869{
1870 if (bus != CONTAINER_CHANNEL)
1871 bus = aac_phys_to_logical(bus);
1872
1873 return scsi_add_device(dev->scsi_host_ptr, bus, target, 0);
1874}
1875
1876static void aac_put_safw_scsi_device(struct scsi_device *sdev)
1877{
1878 if (sdev)
1879 scsi_device_put(sdev);
1880}
1877 1881
1878static void aac_resolve_luns(struct aac_dev *dev) 1882static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target)
1879{ 1883{
1880 int bus, target, channel;
1881 struct scsi_device *sdev; 1884 struct scsi_device *sdev;
1882 u8 devtype;
1883 u8 new_devtype;
1884 1885
1885 for (bus = 0; bus < AAC_MAX_BUSES; bus++) { 1886 sdev = aac_lookup_safw_scsi_device(dev, bus, target);
1886 for (target = 0; target < AAC_MAX_TARGETS; target++) { 1887 scsi_remove_device(sdev);
1888 aac_put_safw_scsi_device(sdev);
1889}
1887 1890
1888 if (bus == CONTAINER_CHANNEL) 1891static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev,
1889 channel = CONTAINER_CHANNEL; 1892 int bus, int target)
1890 else 1893{
1891 channel = aac_phys_to_logical(bus); 1894 return dev->hba_map[bus][target].scan_counter == dev->scan_counter;
1895}
1892 1896
1893 devtype = dev->hba_map[bus][target].devtype; 1897static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target)
1894 new_devtype = dev->hba_map[bus][target].new_devtype; 1898{
1899 if (is_safw_raid_volume(dev, bus, target))
1900 return dev->fsa_dev[target].valid;
1901 else
1902 return aac_is_safw_scan_count_equal(dev, bus, target);
1903}
1895 1904
1896 sdev = scsi_device_lookup(dev->scsi_host_ptr, channel, 1905static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target)
1897 target, 0); 1906{
1907 int is_exposed = 0;
1908 struct scsi_device *sdev;
1898 1909
1899 if (!sdev && new_devtype) 1910 sdev = aac_lookup_safw_scsi_device(dev, bus, target);
1900 scsi_add_device(dev->scsi_host_ptr, channel, 1911 if (sdev)
1901 target, 0); 1912 is_exposed = 1;
1902 else if (sdev && new_devtype != devtype) 1913 aac_put_safw_scsi_device(sdev);
1903 scsi_remove_device(sdev);
1904 else if (sdev && new_devtype == devtype)
1905 scsi_rescan_device(&sdev->sdev_gendev);
1906 1914
1907 if (sdev) 1915 return is_exposed;
1908 scsi_device_put(sdev); 1916}
1909 1917
1910 dev->hba_map[bus][target].devtype = new_devtype; 1918static int aac_update_safw_host_devices(struct aac_dev *dev)
1911 } 1919{
1920 int i;
1921 int bus;
1922 int target;
1923 int is_exposed = 0;
1924 int rcode = 0;
1925
1926 rcode = aac_setup_safw_adapter(dev);
1927 if (unlikely(rcode < 0)) {
1928 goto out;
1912 } 1929 }
1930
1931 for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) {
1932
1933 bus = get_bus_number(i);
1934 target = get_target_number(i);
1935
1936 is_exposed = aac_is_safw_device_exposed(dev, bus, target);
1937
1938 if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed)
1939 aac_add_safw_device(dev, bus, target);
1940 else if (!aac_is_safw_target_valid(dev, bus, target) &&
1941 is_exposed)
1942 aac_remove_safw_device(dev, bus, target);
1943 }
1944out:
1945 return rcode;
1946}
1947
1948static int aac_scan_safw_host(struct aac_dev *dev)
1949{
1950 int rcode = 0;
1951
1952 rcode = aac_update_safw_host_devices(dev);
1953 if (rcode)
1954 aac_schedule_safw_scan_worker(dev);
1955
1956 return rcode;
1957}
1958
1959int aac_scan_host(struct aac_dev *dev)
1960{
1961 int rcode = 0;
1962
1963 mutex_lock(&dev->scan_mutex);
1964 if (dev->sa_firmware)
1965 rcode = aac_scan_safw_host(dev);
1966 else
1967 scsi_scan_host(dev->scsi_host_ptr);
1968 mutex_unlock(&dev->scan_mutex);
1969
1970 return rcode;
1913} 1971}
1914 1972
1915/** 1973/**
@@ -1922,10 +1980,8 @@ static void aac_resolve_luns(struct aac_dev *dev)
1922 */ 1980 */
1923static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr) 1981static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
1924{ 1982{
1925 int i, bus, target, container, rcode = 0; 1983 int i;
1926 u32 events = 0; 1984 u32 events = 0;
1927 struct fib *fib;
1928 struct scsi_device *sdev;
1929 1985
1930 if (fibptr->hbacmd_size & SA_AIF_HOTPLUG) 1986 if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
1931 events = SA_AIF_HOTPLUG; 1987 events = SA_AIF_HOTPLUG;
@@ -1947,44 +2003,8 @@ static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
1947 case SA_AIF_LDEV_CHANGE: 2003 case SA_AIF_LDEV_CHANGE:
1948 case SA_AIF_BPCFG_CHANGE: 2004 case SA_AIF_BPCFG_CHANGE:
1949 2005
1950 fib = aac_fib_alloc(dev); 2006 aac_scan_host(dev);
1951 if (!fib) { 2007
1952 pr_err("aac_handle_sa_aif: out of memory\n");
1953 return;
1954 }
1955 for (bus = 0; bus < AAC_MAX_BUSES; bus++)
1956 for (target = 0; target < AAC_MAX_TARGETS; target++)
1957 dev->hba_map[bus][target].new_devtype = 0;
1958
1959 rcode = aac_report_phys_luns(dev, fib, AAC_RESCAN);
1960
1961 if (rcode != -ERESTARTSYS)
1962 aac_fib_free(fib);
1963
1964 aac_resolve_luns(dev);
1965
1966 if (events == SA_AIF_LDEV_CHANGE ||
1967 events == SA_AIF_BPCFG_CHANGE) {
1968 aac_get_containers(dev);
1969 for (container = 0; container <
1970 dev->maximum_num_containers; ++container) {
1971 sdev = scsi_device_lookup(dev->scsi_host_ptr,
1972 CONTAINER_CHANNEL,
1973 container, 0);
1974 if (dev->fsa_dev[container].valid && !sdev) {
1975 scsi_add_device(dev->scsi_host_ptr,
1976 CONTAINER_CHANNEL,
1977 container, 0);
1978 } else if (!dev->fsa_dev[container].valid &&
1979 sdev) {
1980 scsi_remove_device(sdev);
1981 scsi_device_put(sdev);
1982 } else if (sdev) {
1983 scsi_rescan_device(&sdev->sdev_gendev);
1984 scsi_device_put(sdev);
1985 }
1986 }
1987 }
1988 break; 2008 break;
1989 2009
1990 case SA_AIF_BPSTAT_CHANGE: 2010 case SA_AIF_BPSTAT_CHANGE:
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index d55332de08f9..b3b931ab77eb 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -683,6 +683,9 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
683 u32 bus, cid; 683 u32 bus, cid;
684 int ret = FAILED; 684 int ret = FAILED;
685 685
686 if (aac_adapter_check_health(aac))
687 return ret;
688
686 bus = aac_logical_to_phys(scmd_channel(cmd)); 689 bus = aac_logical_to_phys(scmd_channel(cmd));
687 cid = scmd_id(cmd); 690 cid = scmd_id(cmd);
688 if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) { 691 if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
@@ -690,7 +693,6 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
690 struct aac_hba_tm_req *tmf; 693 struct aac_hba_tm_req *tmf;
691 int status; 694 int status;
692 u64 address; 695 u64 address;
693 __le32 managed_request_id;
694 696
695 pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n", 697 pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n",
696 AAC_DRIVERNAME, 698 AAC_DRIVERNAME,
@@ -703,8 +705,6 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
703 (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) && 705 (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
704 (fib->callback_data == cmd)) { 706 (fib->callback_data == cmd)) {
705 found = 1; 707 found = 1;
706 managed_request_id = ((struct aac_hba_cmd_req *)
707 fib->hw_fib_va)->request_id;
708 break; 708 break;
709 } 709 }
710 } 710 }
@@ -1375,18 +1375,15 @@ static ssize_t aac_store_reset_adapter(struct device *device,
1375 const char *buf, size_t count) 1375 const char *buf, size_t count)
1376{ 1376{
1377 int retval = -EACCES; 1377 int retval = -EACCES;
1378 int bled = 0;
1379 struct aac_dev *aac;
1380
1381 1378
1382 if (!capable(CAP_SYS_ADMIN)) 1379 if (!capable(CAP_SYS_ADMIN))
1383 return retval; 1380 return retval;
1384 1381
1385 aac = (struct aac_dev *)class_to_shost(device)->hostdata; 1382 retval = aac_reset_adapter(shost_priv(class_to_shost(device)),
1386 bled = buf[0] == '!' ? 1:0; 1383 buf[0] == '!', IOP_HWSOFT_RESET);
1387 retval = aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
1388 if (retval >= 0) 1384 if (retval >= 0)
1389 retval = count; 1385 retval = count;
1386
1390 return retval; 1387 return retval;
1391} 1388}
1392 1389
@@ -1689,6 +1686,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1689 spin_lock_init(&aac->fib_lock); 1686 spin_lock_init(&aac->fib_lock);
1690 1687
1691 mutex_init(&aac->ioctl_mutex); 1688 mutex_init(&aac->ioctl_mutex);
1689 mutex_init(&aac->scan_mutex);
1690
1691 INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
1692 /* 1692 /*
1693 * Map in the registers from the adapter. 1693 * Map in the registers from the adapter.
1694 */ 1694 */
@@ -1792,7 +1792,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1792 error = scsi_add_host(shost, &pdev->dev); 1792 error = scsi_add_host(shost, &pdev->dev);
1793 if (error) 1793 if (error)
1794 goto out_deinit; 1794 goto out_deinit;
1795 scsi_scan_host(shost); 1795
1796 aac_scan_host(aac);
1796 1797
1797 pci_enable_pcie_error_reporting(pdev); 1798 pci_enable_pcie_error_reporting(pdev);
1798 pci_save_state(pdev); 1799 pci_save_state(pdev);
@@ -1877,6 +1878,7 @@ static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
1877 struct aac_dev *aac = (struct aac_dev *)shost->hostdata; 1878 struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
1878 1879
1879 scsi_block_requests(shost); 1880 scsi_block_requests(shost);
1881 aac_cancel_safw_rescan_worker(aac);
1880 aac_send_shutdown(aac); 1882 aac_send_shutdown(aac);
1881 1883
1882 aac_release_resources(aac); 1884 aac_release_resources(aac);
@@ -1935,6 +1937,7 @@ static void aac_remove_one(struct pci_dev *pdev)
1935 struct Scsi_Host *shost = pci_get_drvdata(pdev); 1937 struct Scsi_Host *shost = pci_get_drvdata(pdev);
1936 struct aac_dev *aac = (struct aac_dev *)shost->hostdata; 1938 struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
1937 1939
1940 aac_cancel_safw_rescan_worker(aac);
1938 scsi_remove_host(shost); 1941 scsi_remove_host(shost);
1939 1942
1940 __aac_shutdown(aac); 1943 __aac_shutdown(aac);
@@ -1992,6 +1995,7 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
1992 aac->handle_pci_error = 1; 1995 aac->handle_pci_error = 1;
1993 1996
1994 scsi_block_requests(aac->scsi_host_ptr); 1997 scsi_block_requests(aac->scsi_host_ptr);
1998 aac_cancel_safw_rescan_worker(aac);
1995 aac_flush_ios(aac); 1999 aac_flush_ios(aac);
1996 aac_release_resources(aac); 2000 aac_release_resources(aac);
1997 2001
@@ -2076,7 +2080,7 @@ static void aac_pci_resume(struct pci_dev *pdev)
2076 if (sdev->sdev_state == SDEV_OFFLINE) 2080 if (sdev->sdev_state == SDEV_OFFLINE)
2077 sdev->sdev_state = SDEV_RUNNING; 2081 sdev->sdev_state = SDEV_RUNNING;
2078 scsi_unblock_requests(aac->scsi_host_ptr); 2082 scsi_unblock_requests(aac->scsi_host_ptr);
2079 scsi_scan_host(aac->scsi_host_ptr); 2083 aac_scan_host(aac);
2080 pci_save_state(pdev); 2084 pci_save_state(pdev);
2081 2085
2082 dev_err(&pdev->dev, "aacraid: PCI error - resume\n"); 2086 dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 553922fed524..882f40353b96 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -329,6 +329,22 @@ int aac_sa_init(struct aac_dev *dev)
329 instance = dev->id; 329 instance = dev->id;
330 name = dev->name; 330 name = dev->name;
331 331
332 /*
333 * Fill in the function dispatch table.
334 */
335
336 dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
337 dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
338 dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
339 dev->a_ops.adapter_notify = aac_sa_notify_adapter;
340 dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
341 dev->a_ops.adapter_check_health = aac_sa_check_health;
342 dev->a_ops.adapter_restart = aac_sa_restart_adapter;
343 dev->a_ops.adapter_start = aac_sa_start_adapter;
344 dev->a_ops.adapter_intr = aac_sa_intr;
345 dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
346 dev->a_ops.adapter_ioremap = aac_sa_ioremap;
347
332 if (aac_sa_ioremap(dev, dev->base_size)) { 348 if (aac_sa_ioremap(dev, dev->base_size)) {
333 printk(KERN_WARNING "%s: unable to map adapter.\n", name); 349 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
334 goto error_iounmap; 350 goto error_iounmap;
@@ -363,22 +379,6 @@ int aac_sa_init(struct aac_dev *dev)
363 } 379 }
364 380
365 /* 381 /*
366 * Fill in the function dispatch table.
367 */
368
369 dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
370 dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
371 dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
372 dev->a_ops.adapter_notify = aac_sa_notify_adapter;
373 dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
374 dev->a_ops.adapter_check_health = aac_sa_check_health;
375 dev->a_ops.adapter_restart = aac_sa_restart_adapter;
376 dev->a_ops.adapter_start = aac_sa_start_adapter;
377 dev->a_ops.adapter_intr = aac_sa_intr;
378 dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
379 dev->a_ops.adapter_ioremap = aac_sa_ioremap;
380
381 /*
382 * First clear out all interrupts. Then enable the one's that 382 * First clear out all interrupts. Then enable the one's that
383 * we can handle. 383 * we can handle.
384 */ 384 */
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index a254b32eba39..f375f3557c18 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -45,52 +45,57 @@
45#include <linux/interrupt.h> 45#include <linux/interrupt.h>
46struct device_attribute; 46struct device_attribute;
47/*The limit of outstanding scsi command that firmware can handle*/ 47/*The limit of outstanding scsi command that firmware can handle*/
48#ifdef CONFIG_XEN 48#define ARCMSR_MAX_FREECCB_NUM 1024
49 #define ARCMSR_MAX_FREECCB_NUM 160 49#define ARCMSR_MAX_OUTSTANDING_CMD 1024
50#define ARCMSR_MAX_OUTSTANDING_CMD 155 50#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
51#else 51#define ARCMSR_MIN_OUTSTANDING_CMD 32
52 #define ARCMSR_MAX_FREECCB_NUM 320 52#define ARCMSR_DRIVER_VERSION "v1.40.00.04-20171130"
53#define ARCMSR_MAX_OUTSTANDING_CMD 255 53#define ARCMSR_SCSI_INITIATOR_ID 255
54#endif 54#define ARCMSR_MAX_XFER_SECTORS 512
55#define ARCMSR_DRIVER_VERSION "v1.30.00.22-20151126" 55#define ARCMSR_MAX_XFER_SECTORS_B 4096
56#define ARCMSR_SCSI_INITIATOR_ID 255 56#define ARCMSR_MAX_XFER_SECTORS_C 304
57#define ARCMSR_MAX_XFER_SECTORS 512 57#define ARCMSR_MAX_TARGETID 17
58#define ARCMSR_MAX_XFER_SECTORS_B 4096 58#define ARCMSR_MAX_TARGETLUN 8
59#define ARCMSR_MAX_XFER_SECTORS_C 304 59#define ARCMSR_MAX_CMD_PERLUN 128
60#define ARCMSR_MAX_TARGETID 17 60#define ARCMSR_DEFAULT_CMD_PERLUN 32
61#define ARCMSR_MAX_TARGETLUN 8 61#define ARCMSR_MIN_CMD_PERLUN 1
62#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD 62#define ARCMSR_MAX_QBUFFER 4096
63#define ARCMSR_MAX_QBUFFER 4096 63#define ARCMSR_DEFAULT_SG_ENTRIES 38
64#define ARCMSR_DEFAULT_SG_ENTRIES 38 64#define ARCMSR_MAX_HBB_POSTQUEUE 264
65#define ARCMSR_MAX_HBB_POSTQUEUE 264
66#define ARCMSR_MAX_ARC1214_POSTQUEUE 256 65#define ARCMSR_MAX_ARC1214_POSTQUEUE 256
67#define ARCMSR_MAX_ARC1214_DONEQUEUE 257 66#define ARCMSR_MAX_ARC1214_DONEQUEUE 257
68#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */ 67#define ARCMSR_MAX_HBE_DONEQUEUE 512
69#define ARCMSR_CDB_SG_PAGE_LENGTH 256 68#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
69#define ARCMSR_CDB_SG_PAGE_LENGTH 256
70#define ARCMST_NUM_MSIX_VECTORS 4 70#define ARCMST_NUM_MSIX_VECTORS 4
71#ifndef PCI_DEVICE_ID_ARECA_1880 71#ifndef PCI_DEVICE_ID_ARECA_1880
72#define PCI_DEVICE_ID_ARECA_1880 0x1880 72#define PCI_DEVICE_ID_ARECA_1880 0x1880
73 #endif 73#endif
74#ifndef PCI_DEVICE_ID_ARECA_1214 74#ifndef PCI_DEVICE_ID_ARECA_1214
75 #define PCI_DEVICE_ID_ARECA_1214 0x1214 75#define PCI_DEVICE_ID_ARECA_1214 0x1214
76#endif 76#endif
77#ifndef PCI_DEVICE_ID_ARECA_1203 77#ifndef PCI_DEVICE_ID_ARECA_1203
78 #define PCI_DEVICE_ID_ARECA_1203 0x1203 78#define PCI_DEVICE_ID_ARECA_1203 0x1203
79#endif 79#endif
80#ifndef PCI_DEVICE_ID_ARECA_1884
81#define PCI_DEVICE_ID_ARECA_1884 0x1884
82#endif
83#define ARCMSR_HOURS (1000 * 60 * 60 * 4)
84#define ARCMSR_MINUTES (1000 * 60 * 60)
80/* 85/*
81********************************************************************************** 86**********************************************************************************
82** 87**
83********************************************************************************** 88**********************************************************************************
84*/ 89*/
85#define ARC_SUCCESS 0 90#define ARC_SUCCESS 0
86#define ARC_FAILURE 1 91#define ARC_FAILURE 1
87/* 92/*
88******************************************************************************* 93*******************************************************************************
89** split 64bits dma addressing 94** split 64bits dma addressing
90******************************************************************************* 95*******************************************************************************
91*/ 96*/
92#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16) 97#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
93#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff) 98#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
94/* 99/*
95******************************************************************************* 100*******************************************************************************
96** MESSAGE CONTROL CODE 101** MESSAGE CONTROL CODE
@@ -130,7 +135,7 @@ struct CMD_MESSAGE_FIELD
130#define FUNCTION_SAY_HELLO 0x0807 135#define FUNCTION_SAY_HELLO 0x0807
131#define FUNCTION_SAY_GOODBYE 0x0808 136#define FUNCTION_SAY_GOODBYE 0x0808
132#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809 137#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809
133#define FUNCTION_GET_FIRMWARE_STATUS 0x080A 138#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
134#define FUNCTION_HARDWARE_RESET 0x080B 139#define FUNCTION_HARDWARE_RESET 0x080B
135/* ARECA IO CONTROL CODE*/ 140/* ARECA IO CONTROL CODE*/
136#define ARCMSR_MESSAGE_READ_RQBUFFER \ 141#define ARCMSR_MESSAGE_READ_RQBUFFER \
@@ -161,18 +166,18 @@ struct CMD_MESSAGE_FIELD
161** structure for holding DMA address data 166** structure for holding DMA address data
162************************************************************* 167*************************************************************
163*/ 168*/
164#define IS_DMA64 (sizeof(dma_addr_t) == 8) 169#define IS_DMA64 (sizeof(dma_addr_t) == 8)
165#define IS_SG64_ADDR 0x01000000 /* bit24 */ 170#define IS_SG64_ADDR 0x01000000 /* bit24 */
166struct SG32ENTRY 171struct SG32ENTRY
167{ 172{
168 __le32 length; 173 __le32 length;
169 __le32 address; 174 __le32 address;
170}__attribute__ ((packed)); 175}__attribute__ ((packed));
171struct SG64ENTRY 176struct SG64ENTRY
172{ 177{
173 __le32 length; 178 __le32 length;
174 __le32 address; 179 __le32 address;
175 __le32 addresshigh; 180 __le32 addresshigh;
176}__attribute__ ((packed)); 181}__attribute__ ((packed));
177/* 182/*
178******************************************************************** 183********************************************************************
@@ -191,50 +196,50 @@ struct QBUFFER
191*/ 196*/
192struct FIRMWARE_INFO 197struct FIRMWARE_INFO
193{ 198{
194 uint32_t signature; /*0, 00-03*/ 199 uint32_t signature; /*0, 00-03*/
195 uint32_t request_len; /*1, 04-07*/ 200 uint32_t request_len; /*1, 04-07*/
196 uint32_t numbers_queue; /*2, 08-11*/ 201 uint32_t numbers_queue; /*2, 08-11*/
197 uint32_t sdram_size; /*3, 12-15*/ 202 uint32_t sdram_size; /*3, 12-15*/
198 uint32_t ide_channels; /*4, 16-19*/ 203 uint32_t ide_channels; /*4, 16-19*/
199 char vendor[40]; /*5, 20-59*/ 204 char vendor[40]; /*5, 20-59*/
200 char model[8]; /*15, 60-67*/ 205 char model[8]; /*15, 60-67*/
201 char firmware_ver[16]; /*17, 68-83*/ 206 char firmware_ver[16]; /*17, 68-83*/
202 char device_map[16]; /*21, 84-99*/ 207 char device_map[16]; /*21, 84-99*/
203 uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/ 208 uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
204 uint8_t cfgSerial[16]; /*26,104-119*/ 209 uint8_t cfgSerial[16]; /*26,104-119*/
205 uint32_t cfgPicStatus; /*30,120-123*/ 210 uint32_t cfgPicStatus; /*30,120-123*/
206}; 211};
207/* signature of set and get firmware config */ 212/* signature of set and get firmware config */
208#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060 213#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
209#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063 214#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
210/* message code of inbound message register */ 215/* message code of inbound message register */
211#define ARCMSR_INBOUND_MESG0_NOP 0x00000000 216#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
212#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001 217#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
213#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002 218#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
214#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003 219#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
215#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004 220#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
216#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005 221#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
217#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006 222#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
218#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007 223#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
219#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008 224#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
220/* doorbell interrupt generator */ 225/* doorbell interrupt generator */
221#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001 226#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
222#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002 227#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
223#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001 228#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
224#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002 229#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
225/* ccb areca cdb flag */ 230/* ccb areca cdb flag */
226#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000 231#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
227#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000 232#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
228#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000 233#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
229#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000 234#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000
230#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001 235#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001
231/* outbound firmware ok */ 236/* outbound firmware ok */
232#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000 237#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
233/* ARC-1680 Bus Reset*/ 238/* ARC-1680 Bus Reset*/
234#define ARCMSR_ARC1680_BUS_RESET 0x00000003 239#define ARCMSR_ARC1680_BUS_RESET 0x00000003
235/* ARC-1880 Bus Reset*/ 240/* ARC-1880 Bus Reset*/
236#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024 241#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024
237#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080 242#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080
238 243
239/* 244/*
240************************************************************************ 245************************************************************************
@@ -277,9 +282,10 @@ struct FIRMWARE_INFO
277#define ARCMSR_MESSAGE_FLUSH_CACHE 0x00050008 282#define ARCMSR_MESSAGE_FLUSH_CACHE 0x00050008
278/* (ARCMSR_INBOUND_MESG0_START_BGRB<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */ 283/* (ARCMSR_INBOUND_MESG0_START_BGRB<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
279#define ARCMSR_MESSAGE_START_BGRB 0x00060008 284#define ARCMSR_MESSAGE_START_BGRB 0x00060008
285#define ARCMSR_MESSAGE_SYNC_TIMER 0x00080008
280#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008 286#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008
281#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008 287#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008
282#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008 288#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
283/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */ 289/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */
284#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000 290#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000
285/* ioctl transfer */ 291/* ioctl transfer */
@@ -288,7 +294,7 @@ struct FIRMWARE_INFO
288#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002 294#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002
289#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004 295#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004
290#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008 296#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008
291#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010 297#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
292 298
293/* data tunnel buffer between user space program and its firmware */ 299/* data tunnel buffer between user space program and its firmware */
294/* user space data to iop 128bytes */ 300/* user space data to iop 128bytes */
@@ -313,12 +319,12 @@ struct FIRMWARE_INFO
313#define ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK 0x00000008 /* When clear, the Outbound Post List FIFO Not Empty interrupt routes to the host.*/ 319#define ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK 0x00000008 /* When clear, the Outbound Post List FIFO Not Empty interrupt routes to the host.*/
314#define ARCMSR_HBCMU_ALL_INTMASKENABLE 0x0000000D /* disable all ISR */ 320#define ARCMSR_HBCMU_ALL_INTMASKENABLE 0x0000000D /* disable all ISR */
315/* Host Interrupt Status */ 321/* Host Interrupt Status */
316#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001 322#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001
317 /* 323 /*
318 ** Set when the Utility_A Interrupt bit is set in the Outbound Doorbell Register. 324 ** Set when the Utility_A Interrupt bit is set in the Outbound Doorbell Register.
319 ** It clears by writing a 1 to the Utility_A bit in the Outbound Doorbell Clear Register or through automatic clearing (if enabled). 325 ** It clears by writing a 1 to the Utility_A bit in the Outbound Doorbell Clear Register or through automatic clearing (if enabled).
320 */ 326 */
321#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004 327#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004
322 /* 328 /*
323 ** Set if Outbound Doorbell register bits 30:1 have a non-zero 329 ** Set if Outbound Doorbell register bits 30:1 have a non-zero
324 ** value. This bit clears only when Outbound Doorbell bits 330 ** value. This bit clears only when Outbound Doorbell bits
@@ -331,7 +337,7 @@ struct FIRMWARE_INFO
331 ** Register (FIFO) is not empty. It clears when the Outbound 337 ** Register (FIFO) is not empty. It clears when the Outbound
332 ** Post List FIFO is empty. 338 ** Post List FIFO is empty.
333 */ 339 */
334#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010 340#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010
335 /* 341 /*
336 ** This bit indicates a SAS interrupt from a source external to 342 ** This bit indicates a SAS interrupt from a source external to
337 ** the PCIe core. This bit is not maskable. 343 ** the PCIe core. This bit is not maskable.
@@ -340,17 +346,17 @@ struct FIRMWARE_INFO
340#define ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK 0x00000002 346#define ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK 0x00000002
341#define ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK 0x00000004 347#define ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK 0x00000004
342 /*inbound message 0 ready*/ 348 /*inbound message 0 ready*/
343#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008 349#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
344 /*more than 12 request completed in a time*/ 350 /*more than 12 request completed in a time*/
345#define ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING 0x00000010 351#define ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING 0x00000010
346#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK 0x00000002 352#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK 0x00000002
347 /*outbound DATA WRITE isr door bell clear*/ 353 /*outbound DATA WRITE isr door bell clear*/
348#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002 354#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002
349#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK 0x00000004 355#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK 0x00000004
350 /*outbound DATA READ isr door bell clear*/ 356 /*outbound DATA READ isr door bell clear*/
351#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004 357#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004
352 /*outbound message 0 ready*/ 358 /*outbound message 0 ready*/
353#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008 359#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
354 /*outbound message cmd isr door bell clear*/ 360 /*outbound message cmd isr door bell clear*/
355#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR 0x00000008 361#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR 0x00000008
356 /*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/ 362 /*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/
@@ -407,18 +413,43 @@ struct FIRMWARE_INFO
407#define ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR 0x00000001 413#define ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR 0x00000001
408/* 414/*
409******************************************************************************* 415*******************************************************************************
416** SPEC. for Areca Type E adapter
417*******************************************************************************
418*/
419#define ARCMSR_SIGNATURE_1884 0x188417D3
420
421#define ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK 0x00000002
422#define ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK 0x00000004
423#define ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
424
425#define ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK 0x00000002
426#define ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK 0x00000004
427#define ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
428
429#define ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK 0x80000000
430
431#define ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR 0x00000001
432#define ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR 0x00000008
433#define ARCMSR_HBEMU_ALL_INTMASKENABLE 0x00000009
434
435/* ARC-1884 doorbell sync */
436#define ARCMSR_HBEMU_DOORBELL_SYNC 0x100
437#define ARCMSR_ARC188X_RESET_ADAPTER 0x00000004
438#define ARCMSR_ARC1884_DiagWrite_ENABLE 0x00000080
439/*
440*******************************************************************************
410** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504) 441** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
411******************************************************************************* 442*******************************************************************************
412*/ 443*/
413struct ARCMSR_CDB 444struct ARCMSR_CDB
414{ 445{
415 uint8_t Bus; 446 uint8_t Bus;
416 uint8_t TargetID; 447 uint8_t TargetID;
417 uint8_t LUN; 448 uint8_t LUN;
418 uint8_t Function; 449 uint8_t Function;
419 uint8_t CdbLength; 450 uint8_t CdbLength;
420 uint8_t sgcount; 451 uint8_t sgcount;
421 uint8_t Flags; 452 uint8_t Flags;
422#define ARCMSR_CDB_FLAG_SGL_BSIZE 0x01 453#define ARCMSR_CDB_FLAG_SGL_BSIZE 0x01
423#define ARCMSR_CDB_FLAG_BIOS 0x02 454#define ARCMSR_CDB_FLAG_BIOS 0x02
424#define ARCMSR_CDB_FLAG_WRITE 0x04 455#define ARCMSR_CDB_FLAG_WRITE 0x04
@@ -426,21 +457,21 @@ struct ARCMSR_CDB
426#define ARCMSR_CDB_FLAG_HEADQ 0x08 457#define ARCMSR_CDB_FLAG_HEADQ 0x08
427#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10 458#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10
428 459
429 uint8_t msgPages; 460 uint8_t msgPages;
430 uint32_t msgContext; 461 uint32_t msgContext;
431 uint32_t DataLength; 462 uint32_t DataLength;
432 uint8_t Cdb[16]; 463 uint8_t Cdb[16];
433 uint8_t DeviceStatus; 464 uint8_t DeviceStatus;
434#define ARCMSR_DEV_CHECK_CONDITION 0x02 465#define ARCMSR_DEV_CHECK_CONDITION 0x02
435#define ARCMSR_DEV_SELECT_TIMEOUT 0xF0 466#define ARCMSR_DEV_SELECT_TIMEOUT 0xF0
436#define ARCMSR_DEV_ABORTED 0xF1 467#define ARCMSR_DEV_ABORTED 0xF1
437#define ARCMSR_DEV_INIT_FAIL 0xF2 468#define ARCMSR_DEV_INIT_FAIL 0xF2
438 469
439 uint8_t SenseData[15]; 470 uint8_t SenseData[15];
440 union 471 union
441 { 472 {
442 struct SG32ENTRY sg32entry[1]; 473 struct SG32ENTRY sg32entry[1];
443 struct SG64ENTRY sg64entry[1]; 474 struct SG64ENTRY sg64entry[1];
444 } u; 475 } u;
445}; 476};
446/* 477/*
@@ -480,13 +511,13 @@ struct MessageUnit_B
480 uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; 511 uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE];
481 uint32_t postq_index; 512 uint32_t postq_index;
482 uint32_t doneq_index; 513 uint32_t doneq_index;
483 uint32_t __iomem *drv2iop_doorbell; 514 uint32_t __iomem *drv2iop_doorbell;
484 uint32_t __iomem *drv2iop_doorbell_mask; 515 uint32_t __iomem *drv2iop_doorbell_mask;
485 uint32_t __iomem *iop2drv_doorbell; 516 uint32_t __iomem *iop2drv_doorbell;
486 uint32_t __iomem *iop2drv_doorbell_mask; 517 uint32_t __iomem *iop2drv_doorbell_mask;
487 uint32_t __iomem *message_rwbuffer; 518 uint32_t __iomem *message_rwbuffer;
488 uint32_t __iomem *message_wbuffer; 519 uint32_t __iomem *message_wbuffer;
489 uint32_t __iomem *message_rbuffer; 520 uint32_t __iomem *message_rbuffer;
490}; 521};
491/* 522/*
492********************************************************************* 523*********************************************************************
@@ -506,7 +537,7 @@ struct MessageUnit_C{
506 uint32_t diagnostic_rw_data; /*0024 0027*/ 537 uint32_t diagnostic_rw_data; /*0024 0027*/
507 uint32_t diagnostic_rw_address_low; /*0028 002B*/ 538 uint32_t diagnostic_rw_address_low; /*0028 002B*/
508 uint32_t diagnostic_rw_address_high; /*002C 002F*/ 539 uint32_t diagnostic_rw_address_high; /*002C 002F*/
509 uint32_t host_int_status; /*0030 0033*/ 540 uint32_t host_int_status; /*0030 0033*/
510 uint32_t host_int_mask; /*0034 0037*/ 541 uint32_t host_int_mask; /*0034 0037*/
511 uint32_t dcr_data; /*0038 003B*/ 542 uint32_t dcr_data; /*0038 003B*/
512 uint32_t dcr_address; /*003C 003F*/ 543 uint32_t dcr_address; /*003C 003F*/
@@ -518,12 +549,12 @@ struct MessageUnit_C{
518 uint32_t iop_int_mask; /*0054 0057*/ 549 uint32_t iop_int_mask; /*0054 0057*/
519 uint32_t iop_inbound_queue_port; /*0058 005B*/ 550 uint32_t iop_inbound_queue_port; /*0058 005B*/
520 uint32_t iop_outbound_queue_port; /*005C 005F*/ 551 uint32_t iop_outbound_queue_port; /*005C 005F*/
521 uint32_t inbound_free_list_index; /*0060 0063*/ 552 uint32_t inbound_free_list_index; /*0060 0063*/
522 uint32_t inbound_post_list_index; /*0064 0067*/ 553 uint32_t inbound_post_list_index; /*0064 0067*/
523 uint32_t outbound_free_list_index; /*0068 006B*/ 554 uint32_t outbound_free_list_index; /*0068 006B*/
524 uint32_t outbound_post_list_index; /*006C 006F*/ 555 uint32_t outbound_post_list_index; /*006C 006F*/
525 uint32_t inbound_doorbell_clear; /*0070 0073*/ 556 uint32_t inbound_doorbell_clear; /*0070 0073*/
526 uint32_t i2o_message_unit_control; /*0074 0077*/ 557 uint32_t i2o_message_unit_control; /*0074 0077*/
527 uint32_t last_used_message_source_address_low; /*0078 007B*/ 558 uint32_t last_used_message_source_address_low; /*0078 007B*/
528 uint32_t last_used_message_source_address_high; /*007C 007F*/ 559 uint32_t last_used_message_source_address_high; /*007C 007F*/
529 uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/ 560 uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
@@ -531,7 +562,7 @@ struct MessageUnit_C{
531 uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/ 562 uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
532 uint32_t utility_A_int_counter_timer; /*0098 009B*/ 563 uint32_t utility_A_int_counter_timer; /*0098 009B*/
533 uint32_t outbound_doorbell; /*009C 009F*/ 564 uint32_t outbound_doorbell; /*009C 009F*/
534 uint32_t outbound_doorbell_clear; /*00A0 00A3*/ 565 uint32_t outbound_doorbell_clear; /*00A0 00A3*/
535 uint32_t message_source_address_index; /*00A4 00A7*/ 566 uint32_t message_source_address_index; /*00A4 00A7*/
536 uint32_t message_done_queue_index; /*00A8 00AB*/ 567 uint32_t message_done_queue_index; /*00A8 00AB*/
537 uint32_t reserved0; /*00AC 00AF*/ 568 uint32_t reserved0; /*00AC 00AF*/
@@ -553,10 +584,10 @@ struct MessageUnit_C{
553 uint32_t last_used_message_dest_address_high; /*00EC 00EF*/ 584 uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
554 uint32_t message_done_queue_base_address_low; /*00F0 00F3*/ 585 uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
555 uint32_t message_done_queue_base_address_high; /*00F4 00F7*/ 586 uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
556 uint32_t host_diagnostic; /*00F8 00FB*/ 587 uint32_t host_diagnostic; /*00F8 00FB*/
557 uint32_t write_sequence; /*00FC 00FF*/ 588 uint32_t write_sequence; /*00FC 00FF*/
558 uint32_t reserved1[34]; /*0100 0187*/ 589 uint32_t reserved1[34]; /*0100 0187*/
559 uint32_t reserved2[1950]; /*0188 1FFF*/ 590 uint32_t reserved2[1950]; /*0188 1FFF*/
560 uint32_t message_wbuffer[32]; /*2000 207F*/ 591 uint32_t message_wbuffer[32]; /*2000 207F*/
561 uint32_t reserved3[32]; /*2080 20FF*/ 592 uint32_t reserved3[32]; /*2080 20FF*/
562 uint32_t message_rbuffer[32]; /*2100 217F*/ 593 uint32_t message_rbuffer[32]; /*2100 217F*/
@@ -614,115 +645,208 @@ struct MessageUnit_D {
614 u32 __iomem *msgcode_rwbuffer; /* 0x2200 */ 645 u32 __iomem *msgcode_rwbuffer; /* 0x2200 */
615}; 646};
616/* 647/*
648*********************************************************************
649** Messaging Unit (MU) of Type E processor(LSI)
650*********************************************************************
651*/
652struct MessageUnit_E{
653 uint32_t iobound_doorbell; /*0000 0003*/
654 uint32_t write_sequence_3xxx; /*0004 0007*/
655 uint32_t host_diagnostic_3xxx; /*0008 000B*/
656 uint32_t posted_outbound_doorbell; /*000C 000F*/
657 uint32_t master_error_attribute; /*0010 0013*/
658 uint32_t master_error_address_low; /*0014 0017*/
659 uint32_t master_error_address_high; /*0018 001B*/
660 uint32_t hcb_size; /*001C 001F*/
661 uint32_t inbound_doorbell; /*0020 0023*/
662 uint32_t diagnostic_rw_data; /*0024 0027*/
663 uint32_t diagnostic_rw_address_low; /*0028 002B*/
664 uint32_t diagnostic_rw_address_high; /*002C 002F*/
665 uint32_t host_int_status; /*0030 0033*/
666 uint32_t host_int_mask; /*0034 0037*/
667 uint32_t dcr_data; /*0038 003B*/
668 uint32_t dcr_address; /*003C 003F*/
669 uint32_t inbound_queueport; /*0040 0043*/
670 uint32_t outbound_queueport; /*0044 0047*/
671 uint32_t hcb_pci_address_low; /*0048 004B*/
672 uint32_t hcb_pci_address_high; /*004C 004F*/
673 uint32_t iop_int_status; /*0050 0053*/
674 uint32_t iop_int_mask; /*0054 0057*/
675 uint32_t iop_inbound_queue_port; /*0058 005B*/
676 uint32_t iop_outbound_queue_port; /*005C 005F*/
677 uint32_t inbound_free_list_index; /*0060 0063*/
678 uint32_t inbound_post_list_index; /*0064 0067*/
679 uint32_t reply_post_producer_index; /*0068 006B*/
680 uint32_t reply_post_consumer_index; /*006C 006F*/
681 uint32_t inbound_doorbell_clear; /*0070 0073*/
682 uint32_t i2o_message_unit_control; /*0074 0077*/
683 uint32_t last_used_message_source_address_low; /*0078 007B*/
684 uint32_t last_used_message_source_address_high; /*007C 007F*/
685 uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
686 uint32_t message_dest_address_index; /*0090 0093*/
687 uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
688 uint32_t utility_A_int_counter_timer; /*0098 009B*/
689 uint32_t outbound_doorbell; /*009C 009F*/
690 uint32_t outbound_doorbell_clear; /*00A0 00A3*/
691 uint32_t message_source_address_index; /*00A4 00A7*/
692 uint32_t message_done_queue_index; /*00A8 00AB*/
693 uint32_t reserved0; /*00AC 00AF*/
694 uint32_t inbound_msgaddr0; /*00B0 00B3*/
695 uint32_t inbound_msgaddr1; /*00B4 00B7*/
696 uint32_t outbound_msgaddr0; /*00B8 00BB*/
697 uint32_t outbound_msgaddr1; /*00BC 00BF*/
698 uint32_t inbound_queueport_low; /*00C0 00C3*/
699 uint32_t inbound_queueport_high; /*00C4 00C7*/
700 uint32_t outbound_queueport_low; /*00C8 00CB*/
701 uint32_t outbound_queueport_high; /*00CC 00CF*/
702 uint32_t iop_inbound_queue_port_low; /*00D0 00D3*/
703 uint32_t iop_inbound_queue_port_high; /*00D4 00D7*/
704 uint32_t iop_outbound_queue_port_low; /*00D8 00DB*/
705 uint32_t iop_outbound_queue_port_high; /*00DC 00DF*/
706 uint32_t message_dest_queue_port_low; /*00E0 00E3*/
707 uint32_t message_dest_queue_port_high; /*00E4 00E7*/
708 uint32_t last_used_message_dest_address_low; /*00E8 00EB*/
709 uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
710 uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
711 uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
712 uint32_t host_diagnostic; /*00F8 00FB*/
713 uint32_t write_sequence; /*00FC 00FF*/
714 uint32_t reserved1[34]; /*0100 0187*/
715 uint32_t reserved2[1950]; /*0188 1FFF*/
716 uint32_t message_wbuffer[32]; /*2000 207F*/
717 uint32_t reserved3[32]; /*2080 20FF*/
718 uint32_t message_rbuffer[32]; /*2100 217F*/
719 uint32_t reserved4[32]; /*2180 21FF*/
720 uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/
721};
722
723typedef struct deliver_completeQ {
724 uint16_t cmdFlag;
725 uint16_t cmdSMID;
726 uint16_t cmdLMID; // reserved (0)
727 uint16_t cmdFlag2; // reserved (0)
728} DeliverQ, CompletionQ, *pDeliver_Q, *pCompletion_Q;
729/*
617******************************************************************************* 730*******************************************************************************
618** Adapter Control Block 731** Adapter Control Block
619******************************************************************************* 732*******************************************************************************
620*/ 733*/
621struct AdapterControlBlock 734struct AdapterControlBlock
622{ 735{
623 uint32_t adapter_type; /* adapter A,B..... */ 736 uint32_t adapter_type; /* adapter A,B..... */
624 #define ACB_ADAPTER_TYPE_A 0x00000001 /* hba I IOP */ 737#define ACB_ADAPTER_TYPE_A 0x00000000 /* hba I IOP */
625 #define ACB_ADAPTER_TYPE_B 0x00000002 /* hbb M IOP */ 738#define ACB_ADAPTER_TYPE_B 0x00000001 /* hbb M IOP */
626 #define ACB_ADAPTER_TYPE_C 0x00000004 /* hbc P IOP */ 739#define ACB_ADAPTER_TYPE_C 0x00000002 /* hbc L IOP */
627 #define ACB_ADAPTER_TYPE_D 0x00000008 /* hbd A IOP */ 740#define ACB_ADAPTER_TYPE_D 0x00000003 /* hbd M IOP */
628 u32 roundup_ccbsize; 741#define ACB_ADAPTER_TYPE_E 0x00000004 /* hba L IOP */
629 struct pci_dev * pdev; 742 u32 roundup_ccbsize;
630 struct Scsi_Host * host; 743 struct pci_dev * pdev;
631 unsigned long vir2phy_offset; 744 struct Scsi_Host * host;
745 unsigned long vir2phy_offset;
632 /* Offset is used in making arc cdb physical to virtual calculations */ 746 /* Offset is used in making arc cdb physical to virtual calculations */
633 uint32_t outbound_int_enable; 747 uint32_t outbound_int_enable;
634 uint32_t cdb_phyaddr_hi32; 748 uint32_t cdb_phyaddr_hi32;
635 uint32_t reg_mu_acc_handle0; 749 uint32_t reg_mu_acc_handle0;
636 spinlock_t eh_lock; 750 spinlock_t eh_lock;
637 spinlock_t ccblist_lock; 751 spinlock_t ccblist_lock;
638 spinlock_t postq_lock; 752 spinlock_t postq_lock;
639 spinlock_t doneq_lock; 753 spinlock_t doneq_lock;
640 spinlock_t rqbuffer_lock; 754 spinlock_t rqbuffer_lock;
641 spinlock_t wqbuffer_lock; 755 spinlock_t wqbuffer_lock;
642 union { 756 union {
643 struct MessageUnit_A __iomem *pmuA; 757 struct MessageUnit_A __iomem *pmuA;
644 struct MessageUnit_B *pmuB; 758 struct MessageUnit_B *pmuB;
645 struct MessageUnit_C __iomem *pmuC; 759 struct MessageUnit_C __iomem *pmuC;
646 struct MessageUnit_D *pmuD; 760 struct MessageUnit_D *pmuD;
761 struct MessageUnit_E __iomem *pmuE;
647 }; 762 };
648 /* message unit ATU inbound base address0 */ 763 /* message unit ATU inbound base address0 */
649 void __iomem *mem_base0; 764 void __iomem *mem_base0;
650 void __iomem *mem_base1; 765 void __iomem *mem_base1;
651 uint32_t acb_flags; 766 uint32_t acb_flags;
652 u16 dev_id; 767 u16 dev_id;
653 uint8_t adapter_index; 768 uint8_t adapter_index;
654 #define ACB_F_SCSISTOPADAPTER 0x0001 769#define ACB_F_SCSISTOPADAPTER 0x0001
655 #define ACB_F_MSG_STOP_BGRB 0x0002 770#define ACB_F_MSG_STOP_BGRB 0x0002
656 /* stop RAID background rebuild */ 771/* stop RAID background rebuild */
657 #define ACB_F_MSG_START_BGRB 0x0004 772#define ACB_F_MSG_START_BGRB 0x0004
658 /* stop RAID background rebuild */ 773/* stop RAID background rebuild */
659 #define ACB_F_IOPDATA_OVERFLOW 0x0008 774#define ACB_F_IOPDATA_OVERFLOW 0x0008
660 /* iop message data rqbuffer overflow */ 775/* iop message data rqbuffer overflow */
661 #define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010 776#define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
662 /* message clear wqbuffer */ 777/* message clear wqbuffer */
663 #define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020 778#define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
664 /* message clear rqbuffer */ 779/* message clear rqbuffer */
665 #define ACB_F_MESSAGE_WQBUFFER_READED 0x0040 780#define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
666 #define ACB_F_BUS_RESET 0x0080 781#define ACB_F_BUS_RESET 0x0080
667 #define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */ 782#define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */
668 783
669 #define ACB_F_IOP_INITED 0x0100 784#define ACB_F_IOP_INITED 0x0100
670 /* iop init */ 785/* iop init */
671 #define ACB_F_ABORT 0x0200 786#define ACB_F_ABORT 0x0200
672 #define ACB_F_FIRMWARE_TRAP 0x0400 787#define ACB_F_FIRMWARE_TRAP 0x0400
673 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM]; 788#define ACB_F_MSG_GET_CONFIG 0x1000
789 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
674 /* used for memory free */ 790 /* used for memory free */
675 struct list_head ccb_free_list; 791 struct list_head ccb_free_list;
676 /* head of free ccb list */ 792 /* head of free ccb list */
677 793
678 atomic_t ccboutstandingcount; 794 atomic_t ccboutstandingcount;
679 /*The present outstanding command number that in the IOP that 795 /*The present outstanding command number that in the IOP that
680 waiting for being handled by FW*/ 796 waiting for being handled by FW*/
681 797
682 void * dma_coherent; 798 void * dma_coherent;
683 /* dma_coherent used for memory free */ 799 /* dma_coherent used for memory free */
684 dma_addr_t dma_coherent_handle; 800 dma_addr_t dma_coherent_handle;
685 /* dma_coherent_handle used for memory free */ 801 /* dma_coherent_handle used for memory free */
686 dma_addr_t dma_coherent_handle2; 802 dma_addr_t dma_coherent_handle2;
687 void *dma_coherent2; 803 void *dma_coherent2;
688 unsigned int uncache_size; 804 unsigned int uncache_size;
689 uint8_t rqbuffer[ARCMSR_MAX_QBUFFER]; 805 uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
690 /* data collection buffer for read from 80331 */ 806 /* data collection buffer for read from 80331 */
691 int32_t rqbuf_getIndex; 807 int32_t rqbuf_getIndex;
692 /* first of read buffer */ 808 /* first of read buffer */
693 int32_t rqbuf_putIndex; 809 int32_t rqbuf_putIndex;
694 /* last of read buffer */ 810 /* last of read buffer */
695 uint8_t wqbuffer[ARCMSR_MAX_QBUFFER]; 811 uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
696 /* data collection buffer for write to 80331 */ 812 /* data collection buffer for write to 80331 */
697 int32_t wqbuf_getIndex; 813 int32_t wqbuf_getIndex;
698 /* first of write buffer */ 814 /* first of write buffer */
699 int32_t wqbuf_putIndex; 815 int32_t wqbuf_putIndex;
700 /* last of write buffer */ 816 /* last of write buffer */
701 uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN]; 817 uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
702 /* id0 ..... id15, lun0...lun7 */ 818 /* id0 ..... id15, lun0...lun7 */
703#define ARECA_RAID_GONE 0x55 819#define ARECA_RAID_GONE 0x55
704#define ARECA_RAID_GOOD 0xaa 820#define ARECA_RAID_GOOD 0xaa
705 uint32_t num_resets; 821 uint32_t num_resets;
706 uint32_t num_aborts; 822 uint32_t num_aborts;
707 uint32_t signature; 823 uint32_t signature;
708 uint32_t firm_request_len; 824 uint32_t firm_request_len;
709 uint32_t firm_numbers_queue; 825 uint32_t firm_numbers_queue;
710 uint32_t firm_sdram_size; 826 uint32_t firm_sdram_size;
711 uint32_t firm_hd_channels; 827 uint32_t firm_hd_channels;
712 uint32_t firm_cfg_version; 828 uint32_t firm_cfg_version;
713 char firm_model[12]; 829 char firm_model[12];
714 char firm_version[20]; 830 char firm_version[20];
715 char device_map[20]; /*21,84-99*/ 831 char device_map[20]; /*21,84-99*/
716 struct work_struct arcmsr_do_message_isr_bh; 832 struct work_struct arcmsr_do_message_isr_bh;
717 struct timer_list eternal_timer; 833 struct timer_list eternal_timer;
718 unsigned short fw_flag; 834 unsigned short fw_flag;
719 #define FW_NORMAL 0x0000 835#define FW_NORMAL 0x0000
720 #define FW_BOG 0x0001 836#define FW_BOG 0x0001
721 #define FW_DEADLOCK 0x0010 837#define FW_DEADLOCK 0x0010
722 atomic_t rq_map_token; 838 atomic_t rq_map_token;
723 atomic_t ante_token_value; 839 atomic_t ante_token_value;
724 uint32_t maxOutstanding; 840 uint32_t maxOutstanding;
725 int vector_count; 841 int vector_count;
842 uint32_t maxFreeCCB;
843 struct timer_list refresh_timer;
844 uint32_t doneq_index;
845 uint32_t ccbsize;
846 uint32_t in_doorbell;
847 uint32_t out_doorbell;
848 uint32_t completionQ_entry;
849 pCompletion_Q pCompletionQ;
726};/* HW_DEVICE_EXTENSION */ 850};/* HW_DEVICE_EXTENSION */
727/* 851/*
728******************************************************************************* 852*******************************************************************************
@@ -732,29 +856,30 @@ struct AdapterControlBlock
732*/ 856*/
733struct CommandControlBlock{ 857struct CommandControlBlock{
734 /*x32:sizeof struct_CCB=(32+60)byte, x64:sizeof struct_CCB=(64+60)byte*/ 858 /*x32:sizeof struct_CCB=(32+60)byte, x64:sizeof struct_CCB=(64+60)byte*/
735 struct list_head list; /*x32: 8byte, x64: 16byte*/ 859 struct list_head list; /*x32: 8byte, x64: 16byte*/
736 struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */ 860 struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
737 struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/ 861 struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
738 uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/ 862 uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
739 uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/ 863 uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
740 uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/ 864 uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
741 #define CCB_FLAG_READ 0x0000 865#define CCB_FLAG_READ 0x0000
742 #define CCB_FLAG_WRITE 0x0001 866#define CCB_FLAG_WRITE 0x0001
743 #define CCB_FLAG_ERROR 0x0002 867#define CCB_FLAG_ERROR 0x0002
744 #define CCB_FLAG_FLUSHCACHE 0x0004 868#define CCB_FLAG_FLUSHCACHE 0x0004
745 #define CCB_FLAG_MASTER_ABORTED 0x0008 869#define CCB_FLAG_MASTER_ABORTED 0x0008
746 uint16_t startdone; /*x32:2byte,x32:2byte*/ 870 uint16_t startdone; /*x32:2byte,x32:2byte*/
747 #define ARCMSR_CCB_DONE 0x0000 871#define ARCMSR_CCB_DONE 0x0000
748 #define ARCMSR_CCB_START 0x55AA 872#define ARCMSR_CCB_START 0x55AA
749 #define ARCMSR_CCB_ABORTED 0xAA55 873#define ARCMSR_CCB_ABORTED 0xAA55
750 #define ARCMSR_CCB_ILLEGAL 0xFFFF 874#define ARCMSR_CCB_ILLEGAL 0xFFFF
751 #if BITS_PER_LONG == 64 875 uint32_t smid;
876#if BITS_PER_LONG == 64
752 /* ======================512+64 bytes======================== */ 877 /* ======================512+64 bytes======================== */
753 uint32_t reserved[5]; /*24 byte*/ 878 uint32_t reserved[4]; /*16 byte*/
754 #else 879#else
755 /* ======================512+32 bytes======================== */ 880 /* ======================512+32 bytes======================== */
756 uint32_t reserved; /*8 byte*/ 881 // uint32_t reserved; /*4 byte*/
757 #endif 882#endif
758 /* ======================================================= */ 883 /* ======================================================= */
759 struct ARCMSR_CDB arcmsr_cdb; 884 struct ARCMSR_CDB arcmsr_cdb;
760}; 885};
@@ -788,13 +913,13 @@ struct SENSE_DATA
788** Outbound Interrupt Status Register - OISR 913** Outbound Interrupt Status Register - OISR
789******************************************************************************* 914*******************************************************************************
790*/ 915*/
791#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30 916#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
792#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10 917#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
793#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08 918#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
794#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04 919#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
795#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02 920#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
796#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01 921#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
797#define ARCMSR_MU_OUTBOUND_HANDLE_INT \ 922#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
798 (ARCMSR_MU_OUTBOUND_MESSAGE0_INT \ 923 (ARCMSR_MU_OUTBOUND_MESSAGE0_INT \
799 |ARCMSR_MU_OUTBOUND_MESSAGE1_INT \ 924 |ARCMSR_MU_OUTBOUND_MESSAGE1_INT \
800 |ARCMSR_MU_OUTBOUND_DOORBELL_INT \ 925 |ARCMSR_MU_OUTBOUND_DOORBELL_INT \
@@ -805,13 +930,13 @@ struct SENSE_DATA
805** Outbound Interrupt Mask Register - OIMR 930** Outbound Interrupt Mask Register - OIMR
806******************************************************************************* 931*******************************************************************************
807*/ 932*/
808#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34 933#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
809#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10 934#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
810#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08 935#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
811#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04 936#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
812#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02 937#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
813#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01 938#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
814#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F 939#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
815 940
816extern void arcmsr_write_ioctldata2iop(struct AdapterControlBlock *); 941extern void arcmsr_write_ioctldata2iop(struct AdapterControlBlock *);
817extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *, 942extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 21f6421536a0..75e828bd30e3 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -75,6 +75,26 @@ MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver
75MODULE_LICENSE("Dual BSD/GPL"); 75MODULE_LICENSE("Dual BSD/GPL");
76MODULE_VERSION(ARCMSR_DRIVER_VERSION); 76MODULE_VERSION(ARCMSR_DRIVER_VERSION);
77 77
78static int msix_enable = 1;
79module_param(msix_enable, int, S_IRUGO);
80MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)");
81
82static int msi_enable = 1;
83module_param(msi_enable, int, S_IRUGO);
84MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)");
85
86static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
87module_param(host_can_queue, int, S_IRUGO);
88MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128");
89
90static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
91module_param(cmd_per_lun, int, S_IRUGO);
92MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32");
93
94static int set_date_time = 0;
95module_param(set_date_time, int, S_IRUGO);
96MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
97
78#define ARCMSR_SLEEPTIME 10 98#define ARCMSR_SLEEPTIME 10
79#define ARCMSR_RETRYCOUNT 12 99#define ARCMSR_RETRYCOUNT 12
80 100
@@ -102,19 +122,19 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
102static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb); 122static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
103static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb); 123static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
104static void arcmsr_request_device_map(struct timer_list *t); 124static void arcmsr_request_device_map(struct timer_list *t);
105static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
106static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
107static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
108static void arcmsr_message_isr_bh_fn(struct work_struct *work); 125static void arcmsr_message_isr_bh_fn(struct work_struct *work);
109static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb); 126static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
110static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); 127static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
111static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB); 128static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
112static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb); 129static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
130static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
131static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
113static void arcmsr_hardware_reset(struct AdapterControlBlock *acb); 132static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
114static const char *arcmsr_info(struct Scsi_Host *); 133static const char *arcmsr_info(struct Scsi_Host *);
115static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); 134static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
116static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *); 135static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
117static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb); 136static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
137static void arcmsr_set_iop_datetime(struct timer_list *);
118static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth) 138static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
119{ 139{
120 if (queue_depth > ARCMSR_MAX_CMD_PERLUN) 140 if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
@@ -127,15 +147,15 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
127 .name = "Areca SAS/SATA RAID driver", 147 .name = "Areca SAS/SATA RAID driver",
128 .info = arcmsr_info, 148 .info = arcmsr_info,
129 .queuecommand = arcmsr_queue_command, 149 .queuecommand = arcmsr_queue_command,
130 .eh_abort_handler = arcmsr_abort, 150 .eh_abort_handler = arcmsr_abort,
131 .eh_bus_reset_handler = arcmsr_bus_reset, 151 .eh_bus_reset_handler = arcmsr_bus_reset,
132 .bios_param = arcmsr_bios_param, 152 .bios_param = arcmsr_bios_param,
133 .change_queue_depth = arcmsr_adjust_disk_queue_depth, 153 .change_queue_depth = arcmsr_adjust_disk_queue_depth,
134 .can_queue = ARCMSR_MAX_OUTSTANDING_CMD, 154 .can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD,
135 .this_id = ARCMSR_SCSI_INITIATOR_ID, 155 .this_id = ARCMSR_SCSI_INITIATOR_ID,
136 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES, 156 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
137 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C, 157 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
138 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN, 158 .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
139 .use_clustering = ENABLE_CLUSTERING, 159 .use_clustering = ENABLE_CLUSTERING,
140 .shost_attrs = arcmsr_host_attrs, 160 .shost_attrs = arcmsr_host_attrs,
141 .no_write_same = 1, 161 .no_write_same = 1,
@@ -184,13 +204,15 @@ static struct pci_device_id arcmsr_device_id_table[] = {
184 .driver_data = ACB_ADAPTER_TYPE_A}, 204 .driver_data = ACB_ADAPTER_TYPE_A},
185 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880), 205 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
186 .driver_data = ACB_ADAPTER_TYPE_C}, 206 .driver_data = ACB_ADAPTER_TYPE_C},
207 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
208 .driver_data = ACB_ADAPTER_TYPE_E},
187 {0, 0}, /* Terminating entry */ 209 {0, 0}, /* Terminating entry */
188}; 210};
189MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table); 211MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
190 212
191static struct pci_driver arcmsr_pci_driver = { 213static struct pci_driver arcmsr_pci_driver = {
192 .name = "arcmsr", 214 .name = "arcmsr",
193 .id_table = arcmsr_device_id_table, 215 .id_table = arcmsr_device_id_table,
194 .probe = arcmsr_probe, 216 .probe = arcmsr_probe,
195 .remove = arcmsr_remove, 217 .remove = arcmsr_remove,
196 .suspend = arcmsr_suspend, 218 .suspend = arcmsr_suspend,
@@ -206,7 +228,8 @@ static void arcmsr_free_mu(struct AdapterControlBlock *acb)
206{ 228{
207 switch (acb->adapter_type) { 229 switch (acb->adapter_type) {
208 case ACB_ADAPTER_TYPE_B: 230 case ACB_ADAPTER_TYPE_B:
209 case ACB_ADAPTER_TYPE_D: { 231 case ACB_ADAPTER_TYPE_D:
232 case ACB_ADAPTER_TYPE_E: {
210 dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize, 233 dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
211 acb->dma_coherent2, acb->dma_coherent_handle2); 234 acb->dma_coherent2, acb->dma_coherent_handle2);
212 break; 235 break;
@@ -271,6 +294,20 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
271 acb->mem_base0 = mem_base0; 294 acb->mem_base0 = mem_base0;
272 break; 295 break;
273 } 296 }
297 case ACB_ADAPTER_TYPE_E: {
298 acb->pmuE = ioremap(pci_resource_start(pdev, 1),
299 pci_resource_len(pdev, 1));
300 if (!acb->pmuE) {
301 pr_notice("arcmsr%d: memory mapping region fail \n",
302 acb->host->host_no);
303 return false;
304 }
305 writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/
306 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */
307 acb->in_doorbell = 0;
308 acb->out_doorbell = 0;
309 break;
310 }
274 } 311 }
275 return true; 312 return true;
276} 313}
@@ -295,6 +332,9 @@ static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
295 case ACB_ADAPTER_TYPE_D: 332 case ACB_ADAPTER_TYPE_D:
296 iounmap(acb->mem_base0); 333 iounmap(acb->mem_base0);
297 break; 334 break;
335 case ACB_ADAPTER_TYPE_E:
336 iounmap(acb->pmuE);
337 break;
298 } 338 }
299} 339}
300 340
@@ -408,6 +448,24 @@ static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
408 return false; 448 return false;
409} 449}
410 450
451static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB)
452{
453 int i;
454 uint32_t read_doorbell;
455 struct MessageUnit_E __iomem *phbcmu = pACB->pmuE;
456
457 for (i = 0; i < 2000; i++) {
458 read_doorbell = readl(&phbcmu->iobound_doorbell);
459 if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
460 writel(0, &phbcmu->host_int_status); /*clear interrupt*/
461 pACB->in_doorbell = read_doorbell;
462 return true;
463 }
464 msleep(10);
465 } /* max 20 seconds */
466 return false;
467}
468
411static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb) 469static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
412{ 470{
413 struct MessageUnit_A __iomem *reg = acb->pmuA; 471 struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -475,6 +533,24 @@ static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
475 } while (retry_count != 0); 533 } while (retry_count != 0);
476} 534}
477 535
536static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB)
537{
538 int retry_count = 30;
539 struct MessageUnit_E __iomem *reg = pACB->pmuE;
540
541 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
542 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
543 writel(pACB->out_doorbell, &reg->iobound_doorbell);
544 do {
545 if (arcmsr_hbaE_wait_msgint_ready(pACB))
546 break;
547 retry_count--;
548 pr_notice("arcmsr%d: wait 'flush adapter "
549 "cache' timeout, retry count down = %d\n",
550 pACB->host->host_no, retry_count);
551 } while (retry_count != 0);
552}
553
478static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) 554static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
479{ 555{
480 switch (acb->adapter_type) { 556 switch (acb->adapter_type) {
@@ -495,6 +571,9 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
495 case ACB_ADAPTER_TYPE_D: 571 case ACB_ADAPTER_TYPE_D:
496 arcmsr_hbaD_flush_cache(acb); 572 arcmsr_hbaD_flush_cache(acb);
497 break; 573 break;
574 case ACB_ADAPTER_TYPE_E:
575 arcmsr_hbaE_flush_cache(acb);
576 break;
498 } 577 }
499} 578}
500 579
@@ -577,6 +656,23 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
577 reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER); 656 reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
578 } 657 }
579 break; 658 break;
659 case ACB_ADAPTER_TYPE_E: {
660 uint32_t completeQ_size;
661 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
662 acb->roundup_ccbsize = roundup(completeQ_size, 32);
663 dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
664 &dma_coherent_handle, GFP_KERNEL);
665 if (!dma_coherent){
666 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
667 return false;
668 }
669 acb->dma_coherent_handle2 = dma_coherent_handle;
670 acb->dma_coherent2 = dma_coherent;
671 acb->pCompletionQ = dma_coherent;
672 acb->completionQ_entry = acb->roundup_ccbsize / sizeof(struct deliver_completeQ);
673 acb->doneq_index = 0;
674 }
675 break;
580 default: 676 default:
581 break; 677 break;
582 } 678 }
@@ -610,7 +706,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
610 acb->host->max_sectors = max_xfer_len/512; 706 acb->host->max_sectors = max_xfer_len/512;
611 acb->host->sg_tablesize = max_sg_entrys; 707 acb->host->sg_tablesize = max_sg_entrys;
612 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32); 708 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
613 acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM; 709 acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
614 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL); 710 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
615 if(!dma_coherent){ 711 if(!dma_coherent){
616 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no); 712 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
@@ -619,9 +715,10 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
619 acb->dma_coherent = dma_coherent; 715 acb->dma_coherent = dma_coherent;
620 acb->dma_coherent_handle = dma_coherent_handle; 716 acb->dma_coherent_handle = dma_coherent_handle;
621 memset(dma_coherent, 0, acb->uncache_size); 717 memset(dma_coherent, 0, acb->uncache_size);
718 acb->ccbsize = roundup_ccbsize;
622 ccb_tmp = dma_coherent; 719 ccb_tmp = dma_coherent;
623 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; 720 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
624 for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){ 721 for(i = 0; i < acb->maxFreeCCB; i++){
625 cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb); 722 cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
626 switch (acb->adapter_type) { 723 switch (acb->adapter_type) {
627 case ACB_ADAPTER_TYPE_A: 724 case ACB_ADAPTER_TYPE_A:
@@ -630,11 +727,13 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
630 break; 727 break;
631 case ACB_ADAPTER_TYPE_C: 728 case ACB_ADAPTER_TYPE_C:
632 case ACB_ADAPTER_TYPE_D: 729 case ACB_ADAPTER_TYPE_D:
730 case ACB_ADAPTER_TYPE_E:
633 ccb_tmp->cdb_phyaddr = cdb_phyaddr; 731 ccb_tmp->cdb_phyaddr = cdb_phyaddr;
634 break; 732 break;
635 } 733 }
636 acb->pccb_pool[i] = ccb_tmp; 734 acb->pccb_pool[i] = ccb_tmp;
637 ccb_tmp->acb = acb; 735 ccb_tmp->acb = acb;
736 ccb_tmp->smid = (u32)i << 16;
638 INIT_LIST_HEAD(&ccb_tmp->list); 737 INIT_LIST_HEAD(&ccb_tmp->list);
639 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list); 738 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
640 ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize); 739 ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
@@ -654,6 +753,7 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
654 struct scsi_device *psdev; 753 struct scsi_device *psdev;
655 char diff, temp; 754 char diff, temp;
656 755
756 acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
657 switch (acb->adapter_type) { 757 switch (acb->adapter_type) {
658 case ACB_ADAPTER_TYPE_A: { 758 case ACB_ADAPTER_TYPE_A: {
659 struct MessageUnit_A __iomem *reg = acb->pmuA; 759 struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -683,6 +783,13 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
683 devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]); 783 devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
684 break; 784 break;
685 } 785 }
786 case ACB_ADAPTER_TYPE_E: {
787 struct MessageUnit_E __iomem *reg = acb->pmuE;
788
789 signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
790 devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
791 break;
792 }
686 } 793 }
687 atomic_inc(&acb->rq_map_token); 794 atomic_inc(&acb->rq_map_token);
688 if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG) 795 if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
@@ -723,17 +830,26 @@ arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
723 unsigned long flags; 830 unsigned long flags;
724 int nvec, i; 831 int nvec, i;
725 832
833 if (msix_enable == 0)
834 goto msi_int0;
726 nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS, 835 nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
727 PCI_IRQ_MSIX); 836 PCI_IRQ_MSIX);
728 if (nvec > 0) { 837 if (nvec > 0) {
729 pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no); 838 pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
730 flags = 0; 839 flags = 0;
731 } else { 840 } else {
732 nvec = pci_alloc_irq_vectors(pdev, 1, 1, 841msi_int0:
733 PCI_IRQ_MSI | PCI_IRQ_LEGACY); 842 if (msi_enable == 1) {
843 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
844 if (nvec == 1) {
845 dev_info(&pdev->dev, "msi enabled\n");
846 goto msi_int1;
847 }
848 }
849 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
734 if (nvec < 1) 850 if (nvec < 1)
735 return FAILED; 851 return FAILED;
736 852msi_int1:
737 flags = IRQF_SHARED; 853 flags = IRQF_SHARED;
738 } 854 }
739 855
@@ -755,6 +871,24 @@ out_free_irq:
755 return FAILED; 871 return FAILED;
756} 872}
757 873
874static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
875{
876 INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
877 atomic_set(&pacb->rq_map_token, 16);
878 atomic_set(&pacb->ante_token_value, 16);
879 pacb->fw_flag = FW_NORMAL;
880 timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0);
881 pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
882 add_timer(&pacb->eternal_timer);
883}
884
885static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
886{
887 timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
888 pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000);
889 add_timer(&pacb->refresh_timer);
890}
891
758static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 892static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
759{ 893{
760 struct Scsi_Host *host; 894 struct Scsi_Host *host;
@@ -789,8 +923,12 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
789 host->max_lun = ARCMSR_MAX_TARGETLUN; 923 host->max_lun = ARCMSR_MAX_TARGETLUN;
790 host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/ 924 host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
791 host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/ 925 host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
792 host->can_queue = ARCMSR_MAX_OUTSTANDING_CMD; 926 if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD))
793 host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN; 927 host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
928 host->can_queue = host_can_queue; /* max simultaneous cmds */
929 if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN))
930 cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
931 host->cmd_per_lun = cmd_per_lun;
794 host->this_id = ARCMSR_SCSI_INITIATOR_ID; 932 host->this_id = ARCMSR_SCSI_INITIATOR_ID;
795 host->unique_id = (bus << 8) | dev_fun; 933 host->unique_id = (bus << 8) | dev_fun;
796 pci_set_drvdata(pdev, host); 934 pci_set_drvdata(pdev, host);
@@ -833,18 +971,16 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
833 if (arcmsr_request_irq(pdev, acb) == FAILED) 971 if (arcmsr_request_irq(pdev, acb) == FAILED)
834 goto scsi_host_remove; 972 goto scsi_host_remove;
835 arcmsr_iop_init(acb); 973 arcmsr_iop_init(acb);
836 INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); 974 arcmsr_init_get_devmap_timer(acb);
837 atomic_set(&acb->rq_map_token, 16); 975 if (set_date_time)
838 atomic_set(&acb->ante_token_value, 16); 976 arcmsr_init_set_datetime_timer(acb);
839 acb->fw_flag = FW_NORMAL;
840 timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
841 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
842 add_timer(&acb->eternal_timer);
843 if(arcmsr_alloc_sysfs_attr(acb)) 977 if(arcmsr_alloc_sysfs_attr(acb))
844 goto out_free_sysfs; 978 goto out_free_sysfs;
845 scsi_scan_host(host); 979 scsi_scan_host(host);
846 return 0; 980 return 0;
847out_free_sysfs: 981out_free_sysfs:
982 if (set_date_time)
983 del_timer_sync(&acb->refresh_timer);
848 del_timer_sync(&acb->eternal_timer); 984 del_timer_sync(&acb->eternal_timer);
849 flush_work(&acb->arcmsr_do_message_isr_bh); 985 flush_work(&acb->arcmsr_do_message_isr_bh);
850 arcmsr_stop_adapter_bgrb(acb); 986 arcmsr_stop_adapter_bgrb(acb);
@@ -887,6 +1023,8 @@ static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
887 intmask_org = arcmsr_disable_outbound_ints(acb); 1023 intmask_org = arcmsr_disable_outbound_ints(acb);
888 arcmsr_free_irq(pdev, acb); 1024 arcmsr_free_irq(pdev, acb);
889 del_timer_sync(&acb->eternal_timer); 1025 del_timer_sync(&acb->eternal_timer);
1026 if (set_date_time)
1027 del_timer_sync(&acb->refresh_timer);
890 flush_work(&acb->arcmsr_do_message_isr_bh); 1028 flush_work(&acb->arcmsr_do_message_isr_bh);
891 arcmsr_stop_adapter_bgrb(acb); 1029 arcmsr_stop_adapter_bgrb(acb);
892 arcmsr_flush_adapter_cache(acb); 1030 arcmsr_flush_adapter_cache(acb);
@@ -924,13 +1062,9 @@ static int arcmsr_resume(struct pci_dev *pdev)
924 if (arcmsr_request_irq(pdev, acb) == FAILED) 1062 if (arcmsr_request_irq(pdev, acb) == FAILED)
925 goto controller_stop; 1063 goto controller_stop;
926 arcmsr_iop_init(acb); 1064 arcmsr_iop_init(acb);
927 INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); 1065 arcmsr_init_get_devmap_timer(acb);
928 atomic_set(&acb->rq_map_token, 16); 1066 if (set_date_time)
929 atomic_set(&acb->ante_token_value, 16); 1067 arcmsr_init_set_datetime_timer(acb);
930 acb->fw_flag = FW_NORMAL;
931 timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
932 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
933 add_timer(&acb->eternal_timer);
934 return 0; 1068 return 0;
935controller_stop: 1069controller_stop:
936 arcmsr_stop_adapter_bgrb(acb); 1070 arcmsr_stop_adapter_bgrb(acb);
@@ -998,6 +1132,21 @@ static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
998 return true; 1132 return true;
999} 1133}
1000 1134
1135static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB)
1136{
1137 struct MessageUnit_E __iomem *reg = pACB->pmuE;
1138
1139 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
1140 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
1141 writel(pACB->out_doorbell, &reg->iobound_doorbell);
1142 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
1143 pr_notice("arcmsr%d: wait 'abort all outstanding "
1144 "command' timeout\n", pACB->host->host_no);
1145 return false;
1146 }
1147 return true;
1148}
1149
1001static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) 1150static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
1002{ 1151{
1003 uint8_t rtnval = 0; 1152 uint8_t rtnval = 0;
@@ -1020,6 +1169,9 @@ static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
1020 case ACB_ADAPTER_TYPE_D: 1169 case ACB_ADAPTER_TYPE_D:
1021 rtnval = arcmsr_hbaD_abort_allcmd(acb); 1170 rtnval = arcmsr_hbaD_abort_allcmd(acb);
1022 break; 1171 break;
1172 case ACB_ADAPTER_TYPE_E:
1173 rtnval = arcmsr_hbaE_abort_allcmd(acb);
1174 break;
1023 } 1175 }
1024 return rtnval; 1176 return rtnval;
1025} 1177}
@@ -1050,7 +1202,7 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
1050 1202
1051 struct scsi_cmnd *pcmd = ccb->pcmd; 1203 struct scsi_cmnd *pcmd = ccb->pcmd;
1052 struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer; 1204 struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
1053 pcmd->result = DID_OK << 16; 1205 pcmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1054 if (sensebuffer) { 1206 if (sensebuffer) {
1055 int sense_data_length = 1207 int sense_data_length =
1056 sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE 1208 sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
@@ -1059,6 +1211,7 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
1059 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length); 1211 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
1060 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS; 1212 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
1061 sensebuffer->Valid = 1; 1213 sensebuffer->Valid = 1;
1214 pcmd->result |= (DRIVER_SENSE << 24);
1062 } 1215 }
1063} 1216}
1064 1217
@@ -1092,6 +1245,13 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
1092 writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable); 1245 writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
1093 } 1246 }
1094 break; 1247 break;
1248 case ACB_ADAPTER_TYPE_E: {
1249 struct MessageUnit_E __iomem *reg = acb->pmuE;
1250 orig_mask = readl(&reg->host_int_mask);
1251 writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, &reg->host_int_mask);
1252 readl(&reg->host_int_mask); /* Dummy readl to force pci flush */
1253 }
1254 break;
1095 } 1255 }
1096 return orig_mask; 1256 return orig_mask;
1097} 1257}
@@ -1196,7 +1356,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
1196 /*clear and abort all outbound posted Q*/ 1356 /*clear and abort all outbound posted Q*/
1197 writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/ 1357 writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
1198 while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) 1358 while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
1199 && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { 1359 && (i++ < acb->maxOutstanding)) {
1200 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/ 1360 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
1201 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 1361 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1202 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 1362 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
@@ -1226,7 +1386,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
1226 break; 1386 break;
1227 case ACB_ADAPTER_TYPE_C: { 1387 case ACB_ADAPTER_TYPE_C: {
1228 struct MessageUnit_C __iomem *reg = acb->pmuC; 1388 struct MessageUnit_C __iomem *reg = acb->pmuC;
1229 while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { 1389 while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) {
1230 /*need to do*/ 1390 /*need to do*/
1231 flag_ccb = readl(&reg->outbound_queueport_low); 1391 flag_ccb = readl(&reg->outbound_queueport_low);
1232 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); 1392 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
@@ -1280,6 +1440,9 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
1280 pmu->doneq_index = 0x40FF; 1440 pmu->doneq_index = 0x40FF;
1281 } 1441 }
1282 break; 1442 break;
1443 case ACB_ADAPTER_TYPE_E:
1444 arcmsr_hbaE_postqueue_isr(acb);
1445 break;
1283 } 1446 }
1284} 1447}
1285 1448
@@ -1293,13 +1456,15 @@ static void arcmsr_remove(struct pci_dev *pdev)
1293 scsi_remove_host(host); 1456 scsi_remove_host(host);
1294 flush_work(&acb->arcmsr_do_message_isr_bh); 1457 flush_work(&acb->arcmsr_do_message_isr_bh);
1295 del_timer_sync(&acb->eternal_timer); 1458 del_timer_sync(&acb->eternal_timer);
1459 if (set_date_time)
1460 del_timer_sync(&acb->refresh_timer);
1296 arcmsr_disable_outbound_ints(acb); 1461 arcmsr_disable_outbound_ints(acb);
1297 arcmsr_stop_adapter_bgrb(acb); 1462 arcmsr_stop_adapter_bgrb(acb);
1298 arcmsr_flush_adapter_cache(acb); 1463 arcmsr_flush_adapter_cache(acb);
1299 acb->acb_flags |= ACB_F_SCSISTOPADAPTER; 1464 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
1300 acb->acb_flags &= ~ACB_F_IOP_INITED; 1465 acb->acb_flags &= ~ACB_F_IOP_INITED;
1301 1466
1302 for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++){ 1467 for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){
1303 if (!atomic_read(&acb->ccboutstandingcount)) 1468 if (!atomic_read(&acb->ccboutstandingcount))
1304 break; 1469 break;
1305 arcmsr_interrupt(acb);/* FIXME: need spinlock */ 1470 arcmsr_interrupt(acb);/* FIXME: need spinlock */
@@ -1311,7 +1476,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
1311 1476
1312 arcmsr_abort_allcmd(acb); 1477 arcmsr_abort_allcmd(acb);
1313 arcmsr_done4abort_postqueue(acb); 1478 arcmsr_done4abort_postqueue(acb);
1314 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 1479 for (i = 0; i < acb->maxFreeCCB; i++) {
1315 struct CommandControlBlock *ccb = acb->pccb_pool[i]; 1480 struct CommandControlBlock *ccb = acb->pccb_pool[i];
1316 if (ccb->startdone == ARCMSR_CCB_START) { 1481 if (ccb->startdone == ARCMSR_CCB_START) {
1317 ccb->startdone = ARCMSR_CCB_ABORTED; 1482 ccb->startdone = ARCMSR_CCB_ABORTED;
@@ -1335,6 +1500,8 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
1335 struct AdapterControlBlock *acb = 1500 struct AdapterControlBlock *acb =
1336 (struct AdapterControlBlock *)host->hostdata; 1501 (struct AdapterControlBlock *)host->hostdata;
1337 del_timer_sync(&acb->eternal_timer); 1502 del_timer_sync(&acb->eternal_timer);
1503 if (set_date_time)
1504 del_timer_sync(&acb->refresh_timer);
1338 arcmsr_disable_outbound_ints(acb); 1505 arcmsr_disable_outbound_ints(acb);
1339 arcmsr_free_irq(pdev, acb); 1506 arcmsr_free_irq(pdev, acb);
1340 flush_work(&acb->arcmsr_do_message_isr_bh); 1507 flush_work(&acb->arcmsr_do_message_isr_bh);
@@ -1396,6 +1563,13 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
1396 writel(intmask_org | mask, reg->pcief0_int_enable); 1563 writel(intmask_org | mask, reg->pcief0_int_enable);
1397 break; 1564 break;
1398 } 1565 }
1566 case ACB_ADAPTER_TYPE_E: {
1567 struct MessageUnit_E __iomem *reg = acb->pmuE;
1568
1569 mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
1570 writel(intmask_org & mask, &reg->host_int_mask);
1571 break;
1572 }
1399 } 1573 }
1400} 1574}
1401 1575
@@ -1527,6 +1701,16 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
1527 spin_unlock_irqrestore(&acb->postq_lock, flags); 1701 spin_unlock_irqrestore(&acb->postq_lock, flags);
1528 break; 1702 break;
1529 } 1703 }
1704 case ACB_ADAPTER_TYPE_E: {
1705 struct MessageUnit_E __iomem *pmu = acb->pmuE;
1706 u32 ccb_post_stamp, arc_cdb_size;
1707
1708 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1709 ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6));
1710 writel(0, &pmu->inbound_queueport_high);
1711 writel(ccb_post_stamp, &pmu->inbound_queueport_low);
1712 break;
1713 }
1530 } 1714 }
1531} 1715}
1532 1716
@@ -1580,6 +1764,20 @@ static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
1580 "timeout\n", pACB->host->host_no); 1764 "timeout\n", pACB->host->host_no);
1581} 1765}
1582 1766
1767static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
1768{
1769 struct MessageUnit_E __iomem *reg = pACB->pmuE;
1770
1771 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1772 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
1773 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
1774 writel(pACB->out_doorbell, &reg->iobound_doorbell);
1775 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
1776 pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
1777 "timeout\n", pACB->host->host_no);
1778 }
1779}
1780
1583static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) 1781static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1584{ 1782{
1585 switch (acb->adapter_type) { 1783 switch (acb->adapter_type) {
@@ -1599,6 +1797,9 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1599 case ACB_ADAPTER_TYPE_D: 1797 case ACB_ADAPTER_TYPE_D:
1600 arcmsr_hbaD_stop_bgrb(acb); 1798 arcmsr_hbaD_stop_bgrb(acb);
1601 break; 1799 break;
1800 case ACB_ADAPTER_TYPE_E:
1801 arcmsr_hbaE_stop_bgrb(acb);
1802 break;
1602 } 1803 }
1603} 1804}
1604 1805
@@ -1633,6 +1834,12 @@ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1633 reg->inbound_doorbell); 1834 reg->inbound_doorbell);
1634 } 1835 }
1635 break; 1836 break;
1837 case ACB_ADAPTER_TYPE_E: {
1838 struct MessageUnit_E __iomem *reg = acb->pmuE;
1839 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
1840 writel(acb->out_doorbell, &reg->iobound_doorbell);
1841 }
1842 break;
1636 } 1843 }
1637} 1844}
1638 1845
@@ -1673,6 +1880,12 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1673 reg->inbound_doorbell); 1880 reg->inbound_doorbell);
1674 } 1881 }
1675 break; 1882 break;
1883 case ACB_ADAPTER_TYPE_E: {
1884 struct MessageUnit_E __iomem *reg = acb->pmuE;
1885 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
1886 writel(acb->out_doorbell, &reg->iobound_doorbell);
1887 }
1888 break;
1676 } 1889 }
1677} 1890}
1678 1891
@@ -1702,6 +1915,11 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
1702 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer; 1915 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
1703 } 1916 }
1704 break; 1917 break;
1918 case ACB_ADAPTER_TYPE_E: {
1919 struct MessageUnit_E __iomem *reg = acb->pmuE;
1920 qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
1921 }
1922 break;
1705 } 1923 }
1706 return qbuffer; 1924 return qbuffer;
1707} 1925}
@@ -1732,6 +1950,11 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc
1732 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer; 1950 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
1733 } 1951 }
1734 break; 1952 break;
1953 case ACB_ADAPTER_TYPE_E: {
1954 struct MessageUnit_E __iomem *reg = acb->pmuE;
1955 pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
1956 }
1957 break;
1735 } 1958 }
1736 return pqbuffer; 1959 return pqbuffer;
1737} 1960}
@@ -1785,7 +2008,7 @@ arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
1785 uint8_t __iomem *iop_data; 2008 uint8_t __iomem *iop_data;
1786 uint32_t iop_len; 2009 uint32_t iop_len;
1787 2010
1788 if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) 2011 if (acb->adapter_type > ACB_ADAPTER_TYPE_B)
1789 return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer); 2012 return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
1790 iop_data = (uint8_t __iomem *)prbuffer->data; 2013 iop_data = (uint8_t __iomem *)prbuffer->data;
1791 iop_len = readl(&prbuffer->data_len); 2014 iop_len = readl(&prbuffer->data_len);
@@ -1871,7 +2094,7 @@ arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
1871 uint8_t __iomem *iop_data; 2094 uint8_t __iomem *iop_data;
1872 int32_t allxfer_len = 0; 2095 int32_t allxfer_len = 0;
1873 2096
1874 if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) { 2097 if (acb->adapter_type > ACB_ADAPTER_TYPE_B) {
1875 arcmsr_write_ioctldata2iop_in_DWORD(acb); 2098 arcmsr_write_ioctldata2iop_in_DWORD(acb);
1876 return; 2099 return;
1877 } 2100 }
@@ -1968,6 +2191,33 @@ static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
1968 | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)); 2191 | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
1969} 2192}
1970 2193
2194static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
2195{
2196 uint32_t outbound_doorbell, in_doorbell, tmp;
2197 struct MessageUnit_E __iomem *reg = pACB->pmuE;
2198
2199 in_doorbell = readl(&reg->iobound_doorbell);
2200 outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
2201 do {
2202 writel(0, &reg->host_int_status); /* clear interrupt */
2203 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
2204 arcmsr_iop2drv_data_wrote_handle(pACB);
2205 }
2206 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) {
2207 arcmsr_iop2drv_data_read_handle(pACB);
2208 }
2209 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
2210 arcmsr_hbaE_message_isr(pACB);
2211 }
2212 tmp = in_doorbell;
2213 in_doorbell = readl(&reg->iobound_doorbell);
2214 outbound_doorbell = tmp ^ in_doorbell;
2215 } while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK
2216 | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK
2217 | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE));
2218 pACB->in_doorbell = in_doorbell;
2219}
2220
1971static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb) 2221static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
1972{ 2222{
1973 uint32_t flag_ccb; 2223 uint32_t flag_ccb;
@@ -2077,6 +2327,33 @@ static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
2077 spin_unlock_irqrestore(&acb->doneq_lock, flags); 2327 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2078} 2328}
2079 2329
2330static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb)
2331{
2332 uint32_t doneq_index;
2333 uint16_t cmdSMID;
2334 int error;
2335 struct MessageUnit_E __iomem *pmu;
2336 struct CommandControlBlock *ccb;
2337 unsigned long flags;
2338
2339 spin_lock_irqsave(&acb->doneq_lock, flags);
2340 doneq_index = acb->doneq_index;
2341 pmu = acb->pmuE;
2342 while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) {
2343 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
2344 ccb = acb->pccb_pool[cmdSMID];
2345 error = (acb->pCompletionQ[doneq_index].cmdFlag
2346 & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
2347 arcmsr_drain_donequeue(acb, ccb, error);
2348 doneq_index++;
2349 if (doneq_index >= acb->completionQ_entry)
2350 doneq_index = 0;
2351 }
2352 acb->doneq_index = doneq_index;
2353 writel(doneq_index, &pmu->reply_post_consumer_index);
2354 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2355}
2356
2080/* 2357/*
2081********************************************************************************** 2358**********************************************************************************
2082** Handle a message interrupt 2359** Handle a message interrupt
@@ -2090,7 +2367,8 @@ static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
2090 struct MessageUnit_A __iomem *reg = acb->pmuA; 2367 struct MessageUnit_A __iomem *reg = acb->pmuA;
2091 /*clear interrupt and message state*/ 2368 /*clear interrupt and message state*/
2092 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus); 2369 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
2093 schedule_work(&acb->arcmsr_do_message_isr_bh); 2370 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2371 schedule_work(&acb->arcmsr_do_message_isr_bh);
2094} 2372}
2095static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb) 2373static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
2096{ 2374{
@@ -2098,7 +2376,8 @@ static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
2098 2376
2099 /*clear interrupt and message state*/ 2377 /*clear interrupt and message state*/
2100 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 2378 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
2101 schedule_work(&acb->arcmsr_do_message_isr_bh); 2379 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2380 schedule_work(&acb->arcmsr_do_message_isr_bh);
2102} 2381}
2103/* 2382/*
2104********************************************************************************** 2383**********************************************************************************
@@ -2114,7 +2393,8 @@ static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
2114 struct MessageUnit_C __iomem *reg = acb->pmuC; 2393 struct MessageUnit_C __iomem *reg = acb->pmuC;
2115 /*clear interrupt and message state*/ 2394 /*clear interrupt and message state*/
2116 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear); 2395 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
2117 schedule_work(&acb->arcmsr_do_message_isr_bh); 2396 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2397 schedule_work(&acb->arcmsr_do_message_isr_bh);
2118} 2398}
2119 2399
2120static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb) 2400static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
@@ -2123,7 +2403,17 @@ static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
2123 2403
2124 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell); 2404 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
2125 readl(reg->outbound_doorbell); 2405 readl(reg->outbound_doorbell);
2126 schedule_work(&acb->arcmsr_do_message_isr_bh); 2406 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2407 schedule_work(&acb->arcmsr_do_message_isr_bh);
2408}
2409
2410static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb)
2411{
2412 struct MessageUnit_E __iomem *reg = acb->pmuE;
2413
2414 writel(0, &reg->host_int_status);
2415 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2416 schedule_work(&acb->arcmsr_do_message_isr_bh);
2127} 2417}
2128 2418
2129static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb) 2419static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
@@ -2229,6 +2519,31 @@ static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
2229 return IRQ_HANDLED; 2519 return IRQ_HANDLED;
2230} 2520}
2231 2521
2522static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB)
2523{
2524 uint32_t host_interrupt_status;
2525 struct MessageUnit_E __iomem *pmu = pACB->pmuE;
2526
2527 host_interrupt_status = readl(&pmu->host_int_status) &
2528 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2529 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
2530 if (!host_interrupt_status)
2531 return IRQ_NONE;
2532 do {
2533 /* MU ioctl transfer doorbell interrupts*/
2534 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) {
2535 arcmsr_hbaE_doorbell_isr(pACB);
2536 }
2537 /* MU post queue interrupts*/
2538 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) {
2539 arcmsr_hbaE_postqueue_isr(pACB);
2540 }
2541 host_interrupt_status = readl(&pmu->host_int_status);
2542 } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2543 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
2544 return IRQ_HANDLED;
2545}
2546
2232static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb) 2547static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
2233{ 2548{
2234 switch (acb->adapter_type) { 2549 switch (acb->adapter_type) {
@@ -2242,6 +2557,8 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
2242 return arcmsr_hbaC_handle_isr(acb); 2557 return arcmsr_hbaC_handle_isr(acb);
2243 case ACB_ADAPTER_TYPE_D: 2558 case ACB_ADAPTER_TYPE_D:
2244 return arcmsr_hbaD_handle_isr(acb); 2559 return arcmsr_hbaD_handle_isr(acb);
2560 case ACB_ADAPTER_TYPE_E:
2561 return arcmsr_hbaE_handle_isr(acb);
2245 default: 2562 default:
2246 return IRQ_NONE; 2563 return IRQ_NONE;
2247 } 2564 }
@@ -2636,74 +2953,66 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
2636 2953
2637static DEF_SCSI_QCMD(arcmsr_queue_command) 2954static DEF_SCSI_QCMD(arcmsr_queue_command)
2638 2955
2639static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb) 2956static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer)
2640{ 2957{
2641 struct MessageUnit_A __iomem *reg = acb->pmuA;
2642 char *acb_firm_model = acb->firm_model;
2643 char *acb_firm_version = acb->firm_version;
2644 char *acb_device_map = acb->device_map;
2645 char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
2646 char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
2647 char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);
2648 int count; 2958 int count;
2649 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); 2959 uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model;
2650 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 2960 uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version;
2651 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 2961 uint32_t *acb_device_map = (uint32_t *)pACB->device_map;
2652 miscellaneous data' timeout \n", acb->host->host_no); 2962 uint32_t *firm_model = &rwbuffer[15];
2653 return false; 2963 uint32_t *firm_version = &rwbuffer[17];
2654 } 2964 uint32_t *device_map = &rwbuffer[21];
2655 count = 8; 2965
2656 while (count){ 2966 count = 2;
2657 *acb_firm_model = readb(iop_firm_model); 2967 while (count) {
2968 *acb_firm_model = readl(firm_model);
2658 acb_firm_model++; 2969 acb_firm_model++;
2659 iop_firm_model++; 2970 firm_model++;
2660 count--; 2971 count--;
2661 } 2972 }
2662 2973 count = 4;
2663 count = 16; 2974 while (count) {
2664 while (count){ 2975 *acb_firm_version = readl(firm_version);
2665 *acb_firm_version = readb(iop_firm_version);
2666 acb_firm_version++; 2976 acb_firm_version++;
2667 iop_firm_version++; 2977 firm_version++;
2668 count--; 2978 count--;
2669 } 2979 }
2670 2980 count = 4;
2671 count=16; 2981 while (count) {
2672 while(count){ 2982 *acb_device_map = readl(device_map);
2673 *acb_device_map = readb(iop_device_map);
2674 acb_device_map++; 2983 acb_device_map++;
2675 iop_device_map++; 2984 device_map++;
2676 count--; 2985 count--;
2677 } 2986 }
2987 pACB->signature = readl(&rwbuffer[0]);
2988 pACB->firm_request_len = readl(&rwbuffer[1]);
2989 pACB->firm_numbers_queue = readl(&rwbuffer[2]);
2990 pACB->firm_sdram_size = readl(&rwbuffer[3]);
2991 pACB->firm_hd_channels = readl(&rwbuffer[4]);
2992 pACB->firm_cfg_version = readl(&rwbuffer[25]);
2678 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n", 2993 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
2679 acb->host->host_no, 2994 pACB->host->host_no,
2680 acb->firm_model, 2995 pACB->firm_model,
2681 acb->firm_version); 2996 pACB->firm_version);
2682 acb->signature = readl(&reg->message_rwbuffer[0]); 2997}
2683 acb->firm_request_len = readl(&reg->message_rwbuffer[1]); 2998
2684 acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]); 2999static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
2685 acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]); 3000{
2686 acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]); 3001 struct MessageUnit_A __iomem *reg = acb->pmuA;
2687 acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/ 3002
3003 arcmsr_wait_firmware_ready(acb);
3004 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
3005 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
3006 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3007 miscellaneous data' timeout \n", acb->host->host_no);
3008 return false;
3009 }
3010 arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
2688 return true; 3011 return true;
2689} 3012}
2690static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb) 3013static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
2691{ 3014{
2692 struct MessageUnit_B *reg = acb->pmuB; 3015 struct MessageUnit_B *reg = acb->pmuB;
2693 char *acb_firm_model = acb->firm_model;
2694 char *acb_firm_version = acb->firm_version;
2695 char *acb_device_map = acb->device_map;
2696 char __iomem *iop_firm_model;
2697 /*firm_model,15,60-67*/
2698 char __iomem *iop_firm_version;
2699 /*firm_version,17,68-83*/
2700 char __iomem *iop_device_map;
2701 /*firm_version,21,84-99*/
2702 int count;
2703
2704 iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]); /*firm_model,15,60-67*/
2705 iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]); /*firm_version,17,68-83*/
2706 iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); /*firm_version,21,84-99*/
2707 3016
2708 arcmsr_wait_firmware_ready(acb); 3017 arcmsr_wait_firmware_ready(acb);
2709 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); 3018 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
@@ -2717,127 +3026,43 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
2717 miscellaneous data' timeout \n", acb->host->host_no); 3026 miscellaneous data' timeout \n", acb->host->host_no);
2718 return false; 3027 return false;
2719 } 3028 }
2720 count = 8; 3029 arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
2721 while (count){
2722 *acb_firm_model = readb(iop_firm_model);
2723 acb_firm_model++;
2724 iop_firm_model++;
2725 count--;
2726 }
2727 count = 16;
2728 while (count){
2729 *acb_firm_version = readb(iop_firm_version);
2730 acb_firm_version++;
2731 iop_firm_version++;
2732 count--;
2733 }
2734
2735 count = 16;
2736 while(count){
2737 *acb_device_map = readb(iop_device_map);
2738 acb_device_map++;
2739 iop_device_map++;
2740 count--;
2741 }
2742
2743 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
2744 acb->host->host_no,
2745 acb->firm_model,
2746 acb->firm_version);
2747
2748 acb->signature = readl(&reg->message_rwbuffer[0]);
2749 /*firm_signature,1,00-03*/
2750 acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
2751 /*firm_request_len,1,04-07*/
2752 acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
2753 /*firm_numbers_queue,2,08-11*/
2754 acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
2755 /*firm_sdram_size,3,12-15*/
2756 acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
2757 /*firm_ide_channels,4,16-19*/
2758 acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
2759 /*firm_ide_channels,4,16-19*/
2760 return true; 3030 return true;
2761} 3031}
2762 3032
2763static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB) 3033static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
2764{ 3034{
2765 uint32_t intmask_org, Index, firmware_state = 0; 3035 uint32_t intmask_org;
2766 struct MessageUnit_C __iomem *reg = pACB->pmuC; 3036 struct MessageUnit_C __iomem *reg = pACB->pmuC;
2767 char *acb_firm_model = pACB->firm_model; 3037
2768 char *acb_firm_version = pACB->firm_version;
2769 char __iomem *iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
2770 char __iomem *iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
2771 int count;
2772 /* disable all outbound interrupt */ 3038 /* disable all outbound interrupt */
2773 intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */ 3039 intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
2774 writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask); 3040 writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
2775 /* wait firmware ready */ 3041 /* wait firmware ready */
2776 do { 3042 arcmsr_wait_firmware_ready(pACB);
2777 firmware_state = readl(&reg->outbound_msgaddr1);
2778 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
2779 /* post "get config" instruction */ 3043 /* post "get config" instruction */
2780 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); 3044 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2781 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); 3045 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
2782 /* wait message ready */ 3046 /* wait message ready */
2783 for (Index = 0; Index < 2000; Index++) { 3047 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
2784 if (readl(&reg->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
2785 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);/*clear interrupt*/
2786 break;
2787 }
2788 udelay(10);
2789 } /*max 1 seconds*/
2790 if (Index >= 2000) {
2791 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 3048 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
2792 miscellaneous data' timeout \n", pACB->host->host_no); 3049 miscellaneous data' timeout \n", pACB->host->host_no);
2793 return false; 3050 return false;
2794 } 3051 }
2795 count = 8; 3052 arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
2796 while (count) {
2797 *acb_firm_model = readb(iop_firm_model);
2798 acb_firm_model++;
2799 iop_firm_model++;
2800 count--;
2801 }
2802 count = 16;
2803 while (count) {
2804 *acb_firm_version = readb(iop_firm_version);
2805 acb_firm_version++;
2806 iop_firm_version++;
2807 count--;
2808 }
2809 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
2810 pACB->host->host_no,
2811 pACB->firm_model,
2812 pACB->firm_version);
2813 pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
2814 pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
2815 pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
2816 pACB->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]); /*firm_ide_channels,4,16-19*/
2817 pACB->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
2818 /*all interrupt service will be enable at arcmsr_iop_init*/
2819 return true; 3053 return true;
2820} 3054}
2821 3055
2822static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb) 3056static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
2823{ 3057{
2824 char *acb_firm_model = acb->firm_model;
2825 char *acb_firm_version = acb->firm_version;
2826 char *acb_device_map = acb->device_map;
2827 char __iomem *iop_firm_model;
2828 char __iomem *iop_firm_version;
2829 char __iomem *iop_device_map;
2830 u32 count;
2831 struct MessageUnit_D *reg = acb->pmuD; 3058 struct MessageUnit_D *reg = acb->pmuD;
2832 3059
2833 iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]);
2834 iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]);
2835 iop_device_map = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
2836 if (readl(acb->pmuD->outbound_doorbell) & 3060 if (readl(acb->pmuD->outbound_doorbell) &
2837 ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) { 3061 ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
2838 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, 3062 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
2839 acb->pmuD->outbound_doorbell);/*clear interrupt*/ 3063 acb->pmuD->outbound_doorbell);/*clear interrupt*/
2840 } 3064 }
3065 arcmsr_wait_firmware_ready(acb);
2841 /* post "get config" instruction */ 3066 /* post "get config" instruction */
2842 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0); 3067 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
2843 /* wait message ready */ 3068 /* wait message ready */
@@ -2846,42 +3071,33 @@ static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
2846 "miscellaneous data timeout\n", acb->host->host_no); 3071 "miscellaneous data timeout\n", acb->host->host_no);
2847 return false; 3072 return false;
2848 } 3073 }
2849 count = 8; 3074 arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer);
2850 while (count) { 3075 return true;
2851 *acb_firm_model = readb(iop_firm_model); 3076}
2852 acb_firm_model++; 3077
2853 iop_firm_model++; 3078static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB)
2854 count--; 3079{
2855 } 3080 struct MessageUnit_E __iomem *reg = pACB->pmuE;
2856 count = 16; 3081 uint32_t intmask_org;
2857 while (count) { 3082
2858 *acb_firm_version = readb(iop_firm_version); 3083 /* disable all outbound interrupt */
2859 acb_firm_version++; 3084 intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
2860 iop_firm_version++; 3085 writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, &reg->host_int_mask);
2861 count--; 3086 /* wait firmware ready */
2862 } 3087 arcmsr_wait_firmware_ready(pACB);
2863 count = 16; 3088 mdelay(20);
2864 while (count) { 3089 /* post "get config" instruction */
2865 *acb_device_map = readb(iop_device_map); 3090 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2866 acb_device_map++; 3091
2867 iop_device_map++; 3092 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
2868 count--; 3093 writel(pACB->out_doorbell, &reg->iobound_doorbell);
3094 /* wait message ready */
3095 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
3096 pr_notice("arcmsr%d: wait get adapter firmware "
3097 "miscellaneous data timeout\n", pACB->host->host_no);
3098 return false;
2869 } 3099 }
2870 acb->signature = readl(&reg->msgcode_rwbuffer[0]); 3100 arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
2871 /*firm_signature,1,00-03*/
2872 acb->firm_request_len = readl(&reg->msgcode_rwbuffer[1]);
2873 /*firm_request_len,1,04-07*/
2874 acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]);
2875 /*firm_numbers_queue,2,08-11*/
2876 acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]);
2877 /*firm_sdram_size,3,12-15*/
2878 acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]);
2879 /*firm_hd_channels,4,16-19*/
2880 acb->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);
2881 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
2882 acb->host->host_no,
2883 acb->firm_model,
2884 acb->firm_version);
2885 return true; 3101 return true;
2886} 3102}
2887 3103
@@ -2902,14 +3118,20 @@ static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
2902 case ACB_ADAPTER_TYPE_D: 3118 case ACB_ADAPTER_TYPE_D:
2903 rtn = arcmsr_hbaD_get_config(acb); 3119 rtn = arcmsr_hbaD_get_config(acb);
2904 break; 3120 break;
3121 case ACB_ADAPTER_TYPE_E:
3122 rtn = arcmsr_hbaE_get_config(acb);
3123 break;
2905 default: 3124 default:
2906 break; 3125 break;
2907 } 3126 }
2908 if (acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD) 3127 acb->maxOutstanding = acb->firm_numbers_queue - 1;
2909 acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD; 3128 if (acb->host->can_queue >= acb->firm_numbers_queue)
3129 acb->host->can_queue = acb->maxOutstanding;
2910 else 3130 else
2911 acb->maxOutstanding = acb->firm_numbers_queue - 1; 3131 acb->maxOutstanding = acb->host->can_queue;
2912 acb->host->can_queue = acb->maxOutstanding; 3132 acb->maxFreeCCB = acb->host->can_queue;
3133 if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM)
3134 acb->maxFreeCCB += 64;
2913 return rtn; 3135 return rtn;
2914} 3136}
2915 3137
@@ -3166,6 +3388,75 @@ polling_hbaD_ccb_retry:
3166 return rtn; 3388 return rtn;
3167} 3389}
3168 3390
3391static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb,
3392 struct CommandControlBlock *poll_ccb)
3393{
3394 bool error;
3395 uint32_t poll_ccb_done = 0, poll_count = 0, doneq_index;
3396 uint16_t cmdSMID;
3397 unsigned long flags;
3398 int rtn;
3399 struct CommandControlBlock *pCCB;
3400 struct MessageUnit_E __iomem *reg = acb->pmuE;
3401
3402 polling_hbaC_ccb_retry:
3403 poll_count++;
3404 while (1) {
3405 spin_lock_irqsave(&acb->doneq_lock, flags);
3406 doneq_index = acb->doneq_index;
3407 if ((readl(&reg->reply_post_producer_index) & 0xFFFF) ==
3408 doneq_index) {
3409 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3410 if (poll_ccb_done) {
3411 rtn = SUCCESS;
3412 break;
3413 } else {
3414 msleep(25);
3415 if (poll_count > 40) {
3416 rtn = FAILED;
3417 break;
3418 }
3419 goto polling_hbaC_ccb_retry;
3420 }
3421 }
3422 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
3423 doneq_index++;
3424 if (doneq_index >= acb->completionQ_entry)
3425 doneq_index = 0;
3426 acb->doneq_index = doneq_index;
3427 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3428 pCCB = acb->pccb_pool[cmdSMID];
3429 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3430 /* check if command done with no error*/
3431 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
3432 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3433 pr_notice("arcmsr%d: scsi id = %d "
3434 "lun = %d ccb = '0x%p' poll command "
3435 "abort successfully\n"
3436 , acb->host->host_no
3437 , pCCB->pcmd->device->id
3438 , (u32)pCCB->pcmd->device->lun
3439 , pCCB);
3440 pCCB->pcmd->result = DID_ABORT << 16;
3441 arcmsr_ccb_complete(pCCB);
3442 continue;
3443 }
3444 pr_notice("arcmsr%d: polling an illegal "
3445 "ccb command done ccb = '0x%p' "
3446 "ccboutstandingcount = %d\n"
3447 , acb->host->host_no
3448 , pCCB
3449 , atomic_read(&acb->ccboutstandingcount));
3450 continue;
3451 }
3452 error = (acb->pCompletionQ[doneq_index].cmdFlag &
3453 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
3454 arcmsr_report_ccb_state(acb, pCCB, error);
3455 }
3456 writel(doneq_index, &reg->reply_post_consumer_index);
3457 return rtn;
3458}
3459
3169static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, 3460static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
3170 struct CommandControlBlock *poll_ccb) 3461 struct CommandControlBlock *poll_ccb)
3171{ 3462{
@@ -3188,10 +3479,95 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
3188 case ACB_ADAPTER_TYPE_D: 3479 case ACB_ADAPTER_TYPE_D:
3189 rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb); 3480 rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
3190 break; 3481 break;
3482 case ACB_ADAPTER_TYPE_E:
3483 rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb);
3484 break;
3191 } 3485 }
3192 return rtn; 3486 return rtn;
3193} 3487}
3194 3488
3489static void arcmsr_set_iop_datetime(struct timer_list *t)
3490{
3491 struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer);
3492 unsigned int next_time;
3493 struct tm tm;
3494
3495 union {
3496 struct {
3497 uint16_t signature;
3498 uint8_t year;
3499 uint8_t month;
3500 uint8_t date;
3501 uint8_t hour;
3502 uint8_t minute;
3503 uint8_t second;
3504 } a;
3505 struct {
3506 uint32_t msg_time[2];
3507 } b;
3508 } datetime;
3509
3510 time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &tm);
3511
3512 datetime.a.signature = 0x55AA;
3513 datetime.a.year = tm.tm_year - 100; /* base 2000 instead of 1900 */
3514 datetime.a.month = tm.tm_mon;
3515 datetime.a.date = tm.tm_mday;
3516 datetime.a.hour = tm.tm_hour;
3517 datetime.a.minute = tm.tm_min;
3518 datetime.a.second = tm.tm_sec;
3519
3520 switch (pacb->adapter_type) {
3521 case ACB_ADAPTER_TYPE_A: {
3522 struct MessageUnit_A __iomem *reg = pacb->pmuA;
3523 writel(datetime.b.msg_time[0], &reg->message_rwbuffer[0]);
3524 writel(datetime.b.msg_time[1], &reg->message_rwbuffer[1]);
3525 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
3526 break;
3527 }
3528 case ACB_ADAPTER_TYPE_B: {
3529 uint32_t __iomem *rwbuffer;
3530 struct MessageUnit_B *reg = pacb->pmuB;
3531 rwbuffer = reg->message_rwbuffer;
3532 writel(datetime.b.msg_time[0], rwbuffer++);
3533 writel(datetime.b.msg_time[1], rwbuffer++);
3534 writel(ARCMSR_MESSAGE_SYNC_TIMER, reg->drv2iop_doorbell);
3535 break;
3536 }
3537 case ACB_ADAPTER_TYPE_C: {
3538 struct MessageUnit_C __iomem *reg = pacb->pmuC;
3539 writel(datetime.b.msg_time[0], &reg->msgcode_rwbuffer[0]);
3540 writel(datetime.b.msg_time[1], &reg->msgcode_rwbuffer[1]);
3541 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
3542 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
3543 break;
3544 }
3545 case ACB_ADAPTER_TYPE_D: {
3546 uint32_t __iomem *rwbuffer;
3547 struct MessageUnit_D *reg = pacb->pmuD;
3548 rwbuffer = reg->msgcode_rwbuffer;
3549 writel(datetime.b.msg_time[0], rwbuffer++);
3550 writel(datetime.b.msg_time[1], rwbuffer++);
3551 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, reg->inbound_msgaddr0);
3552 break;
3553 }
3554 case ACB_ADAPTER_TYPE_E: {
3555 struct MessageUnit_E __iomem *reg = pacb->pmuE;
3556 writel(datetime.b.msg_time[0], &reg->msgcode_rwbuffer[0]);
3557 writel(datetime.b.msg_time[1], &reg->msgcode_rwbuffer[1]);
3558 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
3559 pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3560 writel(pacb->out_doorbell, &reg->iobound_doorbell);
3561 break;
3562 }
3563 }
3564 if (sys_tz.tz_minuteswest)
3565 next_time = ARCMSR_HOURS;
3566 else
3567 next_time = ARCMSR_MINUTES;
3568 mod_timer(&pacb->refresh_timer, jiffies + msecs_to_jiffies(next_time));
3569}
3570
3195static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) 3571static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3196{ 3572{
3197 uint32_t cdb_phyaddr, cdb_phyaddr_hi32; 3573 uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
@@ -3208,6 +3584,10 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3208 case ACB_ADAPTER_TYPE_D: 3584 case ACB_ADAPTER_TYPE_D:
3209 dma_coherent_handle = acb->dma_coherent_handle2; 3585 dma_coherent_handle = acb->dma_coherent_handle2;
3210 break; 3586 break;
3587 case ACB_ADAPTER_TYPE_E:
3588 dma_coherent_handle = acb->dma_coherent_handle +
3589 offsetof(struct CommandControlBlock, arcmsr_cdb);
3590 break;
3211 default: 3591 default:
3212 dma_coherent_handle = acb->dma_coherent_handle; 3592 dma_coherent_handle = acb->dma_coherent_handle;
3213 break; 3593 break;
@@ -3316,6 +3696,29 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3316 } 3696 }
3317 } 3697 }
3318 break; 3698 break;
3699 case ACB_ADAPTER_TYPE_E: {
3700 struct MessageUnit_E __iomem *reg = acb->pmuE;
3701 writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
3702 writel(ARCMSR_SIGNATURE_1884, &reg->msgcode_rwbuffer[1]);
3703 writel(cdb_phyaddr, &reg->msgcode_rwbuffer[2]);
3704 writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[3]);
3705 writel(acb->ccbsize, &reg->msgcode_rwbuffer[4]);
3706 dma_coherent_handle = acb->dma_coherent_handle2;
3707 cdb_phyaddr = (uint32_t)(dma_coherent_handle & 0xffffffff);
3708 cdb_phyaddr_hi32 = (uint32_t)((dma_coherent_handle >> 16) >> 16);
3709 writel(cdb_phyaddr, &reg->msgcode_rwbuffer[5]);
3710 writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[6]);
3711 writel(acb->roundup_ccbsize, &reg->msgcode_rwbuffer[7]);
3712 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
3713 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3714 writel(acb->out_doorbell, &reg->iobound_doorbell);
3715 if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
3716 pr_notice("arcmsr%d: 'set command Q window' timeout \n",
3717 acb->host->host_no);
3718 return 1;
3719 }
3720 }
3721 break;
3319 } 3722 }
3320 return 0; 3723 return 0;
3321} 3724}
@@ -3356,83 +3759,22 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
3356 ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0); 3759 ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
3357 } 3760 }
3358 break; 3761 break;
3359 } 3762 case ACB_ADAPTER_TYPE_E: {
3360} 3763 struct MessageUnit_E __iomem *reg = acb->pmuE;
3361 3764 do {
3362static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb) 3765 firmware_state = readl(&reg->outbound_msgaddr1);
3363{ 3766 } while ((firmware_state & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0);
3364 struct MessageUnit_A __iomem *reg = acb->pmuA;
3365 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
3366 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3367 return;
3368 } else {
3369 acb->fw_flag = FW_NORMAL;
3370 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)){
3371 atomic_set(&acb->rq_map_token, 16);
3372 }
3373 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
3374 if (atomic_dec_and_test(&acb->rq_map_token)) {
3375 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3376 return;
3377 }
3378 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
3379 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3380 }
3381 return;
3382}
3383
3384static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb)
3385{
3386 struct MessageUnit_B *reg = acb->pmuB;
3387 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
3388 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3389 return;
3390 } else {
3391 acb->fw_flag = FW_NORMAL;
3392 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
3393 atomic_set(&acb->rq_map_token, 16);
3394 }
3395 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
3396 if (atomic_dec_and_test(&acb->rq_map_token)) {
3397 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3398 return;
3399 }
3400 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
3401 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3402 }
3403 return;
3404}
3405
3406static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb)
3407{
3408 struct MessageUnit_C __iomem *reg = acb->pmuC;
3409 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
3410 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3411 return;
3412 } else {
3413 acb->fw_flag = FW_NORMAL;
3414 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
3415 atomic_set(&acb->rq_map_token, 16);
3416 }
3417 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
3418 if (atomic_dec_and_test(&acb->rq_map_token)) {
3419 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3420 return;
3421 } 3767 }
3422 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); 3768 break;
3423 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
3424 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3425 } 3769 }
3426 return;
3427} 3770}
3428 3771
3429static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb) 3772static void arcmsr_request_device_map(struct timer_list *t)
3430{ 3773{
3431 struct MessageUnit_D *reg = acb->pmuD; 3774 struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
3432
3433 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || 3775 if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
3434 ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || 3776 (acb->acb_flags & ACB_F_BUS_RESET) ||
3435 ((acb->acb_flags & ACB_F_ABORT) != 0)) { 3777 (acb->acb_flags & ACB_F_ABORT)) {
3436 mod_timer(&acb->eternal_timer, 3778 mod_timer(&acb->eternal_timer,
3437 jiffies + msecs_to_jiffies(6 * HZ)); 3779 jiffies + msecs_to_jiffies(6 * HZ));
3438 } else { 3780 } else {
@@ -3448,32 +3790,40 @@ static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
3448 msecs_to_jiffies(6 * HZ)); 3790 msecs_to_jiffies(6 * HZ));
3449 return; 3791 return;
3450 } 3792 }
3451 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, 3793 switch (acb->adapter_type) {
3452 reg->inbound_msgaddr0);
3453 mod_timer(&acb->eternal_timer, jiffies +
3454 msecs_to_jiffies(6 * HZ));
3455 }
3456}
3457
3458static void arcmsr_request_device_map(struct timer_list *t)
3459{
3460 struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
3461 switch (acb->adapter_type) {
3462 case ACB_ADAPTER_TYPE_A: { 3794 case ACB_ADAPTER_TYPE_A: {
3463 arcmsr_hbaA_request_device_map(acb); 3795 struct MessageUnit_A __iomem *reg = acb->pmuA;
3464 } 3796 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
3465 break; 3797 break;
3798 }
3466 case ACB_ADAPTER_TYPE_B: { 3799 case ACB_ADAPTER_TYPE_B: {
3467 arcmsr_hbaB_request_device_map(acb); 3800 struct MessageUnit_B *reg = acb->pmuB;
3468 } 3801 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
3469 break; 3802 break;
3803 }
3470 case ACB_ADAPTER_TYPE_C: { 3804 case ACB_ADAPTER_TYPE_C: {
3471 arcmsr_hbaC_request_device_map(acb); 3805 struct MessageUnit_C __iomem *reg = acb->pmuC;
3806 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
3807 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
3808 break;
3809 }
3810 case ACB_ADAPTER_TYPE_D: {
3811 struct MessageUnit_D *reg = acb->pmuD;
3812 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
3813 break;
3814 }
3815 case ACB_ADAPTER_TYPE_E: {
3816 struct MessageUnit_E __iomem *reg = acb->pmuE;
3817 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
3818 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3819 writel(acb->out_doorbell, &reg->iobound_doorbell);
3820 break;
3821 }
3822 default:
3823 return;
3472 } 3824 }
3473 break; 3825 acb->acb_flags |= ACB_F_MSG_GET_CONFIG;
3474 case ACB_ADAPTER_TYPE_D: 3826 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3475 arcmsr_hbaD_request_device_map(acb);
3476 break;
3477 } 3827 }
3478} 3828}
3479 3829
@@ -3524,6 +3874,20 @@ static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
3524 } 3874 }
3525} 3875}
3526 3876
3877static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
3878{
3879 struct MessageUnit_E __iomem *pmu = pACB->pmuE;
3880
3881 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
3882 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0);
3883 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3884 writel(pACB->out_doorbell, &pmu->iobound_doorbell);
3885 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
3886 pr_notice("arcmsr%d: wait 'start adapter "
3887 "background rebulid' timeout \n", pACB->host->host_no);
3888 }
3889}
3890
3527static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) 3891static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
3528{ 3892{
3529 switch (acb->adapter_type) { 3893 switch (acb->adapter_type) {
@@ -3539,6 +3903,9 @@ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
3539 case ACB_ADAPTER_TYPE_D: 3903 case ACB_ADAPTER_TYPE_D:
3540 arcmsr_hbaD_start_bgrb(acb); 3904 arcmsr_hbaD_start_bgrb(acb);
3541 break; 3905 break;
3906 case ACB_ADAPTER_TYPE_E:
3907 arcmsr_hbaE_start_bgrb(acb);
3908 break;
3542 } 3909 }
3543} 3910}
3544 3911
@@ -3558,10 +3925,19 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
3558 3925
3559 case ACB_ADAPTER_TYPE_B: { 3926 case ACB_ADAPTER_TYPE_B: {
3560 struct MessageUnit_B *reg = acb->pmuB; 3927 struct MessageUnit_B *reg = acb->pmuB;
3561 /*clear interrupt and message state*/ 3928 uint32_t outbound_doorbell, i;
3562 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 3929 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
3563 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); 3930 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
3564 /* let IOP know data has been read */ 3931 /* let IOP know data has been read */
3932 for(i=0; i < 200; i++) {
3933 msleep(20);
3934 outbound_doorbell = readl(reg->iop2drv_doorbell);
3935 if( outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
3936 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
3937 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
3938 } else
3939 break;
3940 }
3565 } 3941 }
3566 break; 3942 break;
3567 case ACB_ADAPTER_TYPE_C: { 3943 case ACB_ADAPTER_TYPE_C: {
@@ -3607,6 +3983,27 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
3607 } 3983 }
3608 } 3984 }
3609 break; 3985 break;
3986 case ACB_ADAPTER_TYPE_E: {
3987 struct MessageUnit_E __iomem *reg = acb->pmuE;
3988 uint32_t i, tmp;
3989
3990 acb->in_doorbell = readl(&reg->iobound_doorbell);
3991 writel(0, &reg->host_int_status); /*clear interrupt*/
3992 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
3993 writel(acb->out_doorbell, &reg->iobound_doorbell);
3994 for(i=0; i < 200; i++) {
3995 msleep(20);
3996 tmp = acb->in_doorbell;
3997 acb->in_doorbell = readl(&reg->iobound_doorbell);
3998 if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
3999 writel(0, &reg->host_int_status); /*clear interrupt*/
4000 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
4001 writel(acb->out_doorbell, &reg->iobound_doorbell);
4002 } else
4003 break;
4004 }
4005 }
4006 break;
3610 } 4007 }
3611} 4008}
3612 4009
@@ -3658,6 +4055,19 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
3658 writel(0xD, &pmuC->write_sequence); 4055 writel(0xD, &pmuC->write_sequence);
3659 } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5)); 4056 } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
3660 writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic); 4057 writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
4058 } else if (acb->dev_id == 0x1884) {
4059 struct MessageUnit_E __iomem *pmuE = acb->pmuE;
4060 do {
4061 count++;
4062 writel(0x4, &pmuE->write_sequence_3xxx);
4063 writel(0xB, &pmuE->write_sequence_3xxx);
4064 writel(0x2, &pmuE->write_sequence_3xxx);
4065 writel(0x7, &pmuE->write_sequence_3xxx);
4066 writel(0xD, &pmuE->write_sequence_3xxx);
4067 mdelay(10);
4068 } while (((readl(&pmuE->host_diagnostic_3xxx) &
4069 ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
4070 writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
3661 } else if ((acb->dev_id == 0x1214)) { 4071 } else if ((acb->dev_id == 0x1214)) {
3662 writel(0x20, pmuD->reset_request); 4072 writel(0x20, pmuD->reset_request);
3663 } else { 4073 } else {
@@ -3671,6 +4081,45 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
3671 msleep(1000); 4081 msleep(1000);
3672 return; 4082 return;
3673} 4083}
4084
4085static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb)
4086{
4087 bool rtn = true;
4088
4089 switch(acb->adapter_type) {
4090 case ACB_ADAPTER_TYPE_A:{
4091 struct MessageUnit_A __iomem *reg = acb->pmuA;
4092 rtn = ((readl(&reg->outbound_msgaddr1) &
4093 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) ? true : false;
4094 }
4095 break;
4096 case ACB_ADAPTER_TYPE_B:{
4097 struct MessageUnit_B *reg = acb->pmuB;
4098 rtn = ((readl(reg->iop2drv_doorbell) &
4099 ARCMSR_MESSAGE_FIRMWARE_OK) == 0) ? true : false;
4100 }
4101 break;
4102 case ACB_ADAPTER_TYPE_C:{
4103 struct MessageUnit_C __iomem *reg = acb->pmuC;
4104 rtn = (readl(&reg->host_diagnostic) & 0x04) ? true : false;
4105 }
4106 break;
4107 case ACB_ADAPTER_TYPE_D:{
4108 struct MessageUnit_D *reg = acb->pmuD;
4109 rtn = ((readl(reg->sample_at_reset) & 0x80) == 0) ?
4110 true : false;
4111 }
4112 break;
4113 case ACB_ADAPTER_TYPE_E:{
4114 struct MessageUnit_E __iomem *reg = acb->pmuE;
4115 rtn = (readl(&reg->host_diagnostic_3xxx) &
4116 ARCMSR_ARC188X_RESET_ADAPTER) ? true : false;
4117 }
4118 break;
4119 }
4120 return rtn;
4121}
4122
3674static void arcmsr_iop_init(struct AdapterControlBlock *acb) 4123static void arcmsr_iop_init(struct AdapterControlBlock *acb)
3675{ 4124{
3676 uint32_t intmask_org; 4125 uint32_t intmask_org;
@@ -3703,7 +4152,7 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
3703 rtnval = arcmsr_abort_allcmd(acb); 4152 rtnval = arcmsr_abort_allcmd(acb);
3704 /* clear all outbound posted Q */ 4153 /* clear all outbound posted Q */
3705 arcmsr_done4abort_postqueue(acb); 4154 arcmsr_done4abort_postqueue(acb);
3706 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 4155 for (i = 0; i < acb->maxFreeCCB; i++) {
3707 ccb = acb->pccb_pool[i]; 4156 ccb = acb->pccb_pool[i];
3708 if (ccb->startdone == ARCMSR_CCB_START) { 4157 if (ccb->startdone == ARCMSR_CCB_START) {
3709 scsi_dma_unmap(ccb->pcmd); 4158 scsi_dma_unmap(ccb->pcmd);
@@ -3725,197 +4174,55 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
3725static int arcmsr_bus_reset(struct scsi_cmnd *cmd) 4174static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
3726{ 4175{
3727 struct AdapterControlBlock *acb; 4176 struct AdapterControlBlock *acb;
3728 uint32_t intmask_org, outbound_doorbell;
3729 int retry_count = 0; 4177 int retry_count = 0;
3730 int rtn = FAILED; 4178 int rtn = FAILED;
3731 acb = (struct AdapterControlBlock *) cmd->device->host->hostdata; 4179 acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
3732 printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts); 4180 pr_notice("arcmsr: executing bus reset eh.....num_resets = %d,"
4181 " num_aborts = %d \n", acb->num_resets, acb->num_aborts);
3733 acb->num_resets++; 4182 acb->num_resets++;
3734 4183
3735 switch(acb->adapter_type){ 4184 if (acb->acb_flags & ACB_F_BUS_RESET) {
3736 case ACB_ADAPTER_TYPE_A:{ 4185 long timeout;
3737 if (acb->acb_flags & ACB_F_BUS_RESET){ 4186 pr_notice("arcmsr: there is a bus reset eh proceeding...\n");
3738 long timeout; 4187 timeout = wait_event_timeout(wait_q, (acb->acb_flags
3739 printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n"); 4188 & ACB_F_BUS_RESET) == 0, 220 * HZ);
3740 timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ); 4189 if (timeout)
3741 if (timeout) { 4190 return SUCCESS;
3742 return SUCCESS; 4191 }
3743 } 4192 acb->acb_flags |= ACB_F_BUS_RESET;
3744 } 4193 if (!arcmsr_iop_reset(acb)) {
3745 acb->acb_flags |= ACB_F_BUS_RESET; 4194 arcmsr_hardware_reset(acb);
3746 if (!arcmsr_iop_reset(acb)) { 4195 acb->acb_flags &= ~ACB_F_IOP_INITED;
3747 struct MessageUnit_A __iomem *reg; 4196wait_reset_done:
3748 reg = acb->pmuA; 4197 ssleep(ARCMSR_SLEEPTIME);
3749 arcmsr_hardware_reset(acb); 4198 if (arcmsr_reset_in_progress(acb)) {
3750 acb->acb_flags &= ~ACB_F_IOP_INITED; 4199 if (retry_count > ARCMSR_RETRYCOUNT) {
3751sleep_again: 4200 acb->fw_flag = FW_DEADLOCK;
3752 ssleep(ARCMSR_SLEEPTIME); 4201 pr_notice("arcmsr%d: waiting for hw bus reset"
3753 if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { 4202 " return, RETRY TERMINATED!!\n",
3754 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count); 4203 acb->host->host_no);
3755 if (retry_count > ARCMSR_RETRYCOUNT) { 4204 return FAILED;
3756 acb->fw_flag = FW_DEADLOCK;
3757 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
3758 return FAILED;
3759 }
3760 retry_count++;
3761 goto sleep_again;
3762 }
3763 acb->acb_flags |= ACB_F_IOP_INITED;
3764 /* disable all outbound interrupt */
3765 intmask_org = arcmsr_disable_outbound_ints(acb);
3766 arcmsr_get_firmware_spec(acb);
3767 arcmsr_start_adapter_bgrb(acb);
3768 /* clear Qbuffer if door bell ringed */
3769 outbound_doorbell = readl(&reg->outbound_doorbell);
3770 writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
3771 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
3772 /* enable outbound Post Queue,outbound doorbell Interrupt */
3773 arcmsr_enable_outbound_ints(acb, intmask_org);
3774 atomic_set(&acb->rq_map_token, 16);
3775 atomic_set(&acb->ante_token_value, 16);
3776 acb->fw_flag = FW_NORMAL;
3777 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3778 acb->acb_flags &= ~ACB_F_BUS_RESET;
3779 rtn = SUCCESS;
3780 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
3781 } else {
3782 acb->acb_flags &= ~ACB_F_BUS_RESET;
3783 atomic_set(&acb->rq_map_token, 16);
3784 atomic_set(&acb->ante_token_value, 16);
3785 acb->fw_flag = FW_NORMAL;
3786 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3787 rtn = SUCCESS;
3788 }
3789 break;
3790 }
3791 case ACB_ADAPTER_TYPE_B:{
3792 acb->acb_flags |= ACB_F_BUS_RESET;
3793 if (!arcmsr_iop_reset(acb)) {
3794 acb->acb_flags &= ~ACB_F_BUS_RESET;
3795 rtn = FAILED;
3796 } else {
3797 acb->acb_flags &= ~ACB_F_BUS_RESET;
3798 atomic_set(&acb->rq_map_token, 16);
3799 atomic_set(&acb->ante_token_value, 16);
3800 acb->fw_flag = FW_NORMAL;
3801 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3802 rtn = SUCCESS;
3803 }
3804 break;
3805 }
3806 case ACB_ADAPTER_TYPE_C:{
3807 if (acb->acb_flags & ACB_F_BUS_RESET) {
3808 long timeout;
3809 printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
3810 timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
3811 if (timeout) {
3812 return SUCCESS;
3813 }
3814 }
3815 acb->acb_flags |= ACB_F_BUS_RESET;
3816 if (!arcmsr_iop_reset(acb)) {
3817 struct MessageUnit_C __iomem *reg;
3818 reg = acb->pmuC;
3819 arcmsr_hardware_reset(acb);
3820 acb->acb_flags &= ~ACB_F_IOP_INITED;
3821sleep:
3822 ssleep(ARCMSR_SLEEPTIME);
3823 if ((readl(&reg->host_diagnostic) & 0x04) != 0) {
3824 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
3825 if (retry_count > ARCMSR_RETRYCOUNT) {
3826 acb->fw_flag = FW_DEADLOCK;
3827 printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
3828 return FAILED;
3829 }
3830 retry_count++;
3831 goto sleep;
3832 }
3833 acb->acb_flags |= ACB_F_IOP_INITED;
3834 /* disable all outbound interrupt */
3835 intmask_org = arcmsr_disable_outbound_ints(acb);
3836 arcmsr_get_firmware_spec(acb);
3837 arcmsr_start_adapter_bgrb(acb);
3838 /* clear Qbuffer if door bell ringed */
3839 arcmsr_clear_doorbell_queue_buffer(acb);
3840 /* enable outbound Post Queue,outbound doorbell Interrupt */
3841 arcmsr_enable_outbound_ints(acb, intmask_org);
3842 atomic_set(&acb->rq_map_token, 16);
3843 atomic_set(&acb->ante_token_value, 16);
3844 acb->fw_flag = FW_NORMAL;
3845 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3846 acb->acb_flags &= ~ACB_F_BUS_RESET;
3847 rtn = SUCCESS;
3848 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
3849 } else {
3850 acb->acb_flags &= ~ACB_F_BUS_RESET;
3851 atomic_set(&acb->rq_map_token, 16);
3852 atomic_set(&acb->ante_token_value, 16);
3853 acb->fw_flag = FW_NORMAL;
3854 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3855 rtn = SUCCESS;
3856 }
3857 break;
3858 }
3859 case ACB_ADAPTER_TYPE_D: {
3860 if (acb->acb_flags & ACB_F_BUS_RESET) {
3861 long timeout;
3862 pr_notice("arcmsr: there is an bus reset"
3863 " eh proceeding.......\n");
3864 timeout = wait_event_timeout(wait_q, (acb->acb_flags
3865 & ACB_F_BUS_RESET) == 0, 220 * HZ);
3866 if (timeout)
3867 return SUCCESS;
3868 }
3869 acb->acb_flags |= ACB_F_BUS_RESET;
3870 if (!arcmsr_iop_reset(acb)) {
3871 struct MessageUnit_D *reg;
3872 reg = acb->pmuD;
3873 arcmsr_hardware_reset(acb);
3874 acb->acb_flags &= ~ACB_F_IOP_INITED;
3875 nap:
3876 ssleep(ARCMSR_SLEEPTIME);
3877 if ((readl(reg->sample_at_reset) & 0x80) != 0) {
3878 pr_err("arcmsr%d: waiting for "
3879 "hw bus reset return, retry=%d\n",
3880 acb->host->host_no, retry_count);
3881 if (retry_count > ARCMSR_RETRYCOUNT) {
3882 acb->fw_flag = FW_DEADLOCK;
3883 pr_err("arcmsr%d: waiting for hw bus"
3884 " reset return, "
3885 "RETRY TERMINATED!!\n",
3886 acb->host->host_no);
3887 return FAILED;
3888 }
3889 retry_count++;
3890 goto nap;
3891 }
3892 acb->acb_flags |= ACB_F_IOP_INITED;
3893 /* disable all outbound interrupt */
3894 intmask_org = arcmsr_disable_outbound_ints(acb);
3895 arcmsr_get_firmware_spec(acb);
3896 arcmsr_start_adapter_bgrb(acb);
3897 arcmsr_clear_doorbell_queue_buffer(acb);
3898 arcmsr_enable_outbound_ints(acb, intmask_org);
3899 atomic_set(&acb->rq_map_token, 16);
3900 atomic_set(&acb->ante_token_value, 16);
3901 acb->fw_flag = FW_NORMAL;
3902 mod_timer(&acb->eternal_timer,
3903 jiffies + msecs_to_jiffies(6 * HZ));
3904 acb->acb_flags &= ~ACB_F_BUS_RESET;
3905 rtn = SUCCESS;
3906 pr_err("arcmsr: scsi bus reset "
3907 "eh returns with success\n");
3908 } else {
3909 acb->acb_flags &= ~ACB_F_BUS_RESET;
3910 atomic_set(&acb->rq_map_token, 16);
3911 atomic_set(&acb->ante_token_value, 16);
3912 acb->fw_flag = FW_NORMAL;
3913 mod_timer(&acb->eternal_timer,
3914 jiffies + msecs_to_jiffies(6 * HZ));
3915 rtn = SUCCESS;
3916 } 4205 }
3917 break; 4206 retry_count++;
4207 goto wait_reset_done;
3918 } 4208 }
4209 arcmsr_iop_init(acb);
4210 atomic_set(&acb->rq_map_token, 16);
4211 atomic_set(&acb->ante_token_value, 16);
4212 acb->fw_flag = FW_NORMAL;
4213 mod_timer(&acb->eternal_timer, jiffies +
4214 msecs_to_jiffies(6 * HZ));
4215 acb->acb_flags &= ~ACB_F_BUS_RESET;
4216 rtn = SUCCESS;
4217 pr_notice("arcmsr: scsi bus reset eh returns with success\n");
4218 } else {
4219 acb->acb_flags &= ~ACB_F_BUS_RESET;
4220 atomic_set(&acb->rq_map_token, 16);
4221 atomic_set(&acb->ante_token_value, 16);
4222 acb->fw_flag = FW_NORMAL;
4223 mod_timer(&acb->eternal_timer, jiffies +
4224 msecs_to_jiffies(6 * HZ));
4225 rtn = SUCCESS;
3919 } 4226 }
3920 return rtn; 4227 return rtn;
3921} 4228}
@@ -3953,7 +4260,7 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
3953 } 4260 }
3954 4261
3955 intmask_org = arcmsr_disable_outbound_ints(acb); 4262 intmask_org = arcmsr_disable_outbound_ints(acb);
3956 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 4263 for (i = 0; i < acb->maxFreeCCB; i++) {
3957 struct CommandControlBlock *ccb = acb->pccb_pool[i]; 4264 struct CommandControlBlock *ccb = acb->pccb_pool[i];
3958 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) { 4265 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
3959 ccb->startdone = ARCMSR_CCB_ABORTED; 4266 ccb->startdone = ARCMSR_CCB_ABORTED;
@@ -3999,6 +4306,7 @@ static const char *arcmsr_info(struct Scsi_Host *host)
3999 case PCI_DEVICE_ID_ARECA_1680: 4306 case PCI_DEVICE_ID_ARECA_1680:
4000 case PCI_DEVICE_ID_ARECA_1681: 4307 case PCI_DEVICE_ID_ARECA_1681:
4001 case PCI_DEVICE_ID_ARECA_1880: 4308 case PCI_DEVICE_ID_ARECA_1880:
4309 case PCI_DEVICE_ID_ARECA_1884:
4002 type = "SAS/SATA"; 4310 type = "SAS/SATA";
4003 break; 4311 break;
4004 default: 4312 default:
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index f4775ca70bab..27bda2b05de6 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2011,7 +2011,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
2011 * have valid data in the sense buffer that could 2011 * have valid data in the sense buffer that could
2012 * confuse the higher levels. 2012 * confuse the higher levels.
2013 */ 2013 */
2014 memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer)); 2014 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2015//printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id); 2015//printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id);
2016//{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); } 2016//{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); }
2017 /* 2017 /*
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 3e1caec82554..10a63be92544 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -1957,7 +1957,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1957 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, 1957 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
1958 }; 1958 };
1959 1959
1960 *npciids = sizeof(__pciids) / sizeof(__pciids[0]); 1960 *npciids = ARRAY_SIZE(__pciids);
1961 *pciids = __pciids; 1961 *pciids = __pciids;
1962} 1962}
1963 1963
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index df6760ca0911..9685efc59b16 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -35,10 +35,10 @@
35 35
36#define BFA_TRC_TS(_trcm) \ 36#define BFA_TRC_TS(_trcm) \
37 ({ \ 37 ({ \
38 struct timeval tv; \ 38 struct timespec64 ts; \
39 \ 39 \
40 do_gettimeofday(&tv); \ 40 ktime_get_ts64(&ts); \
41 (tv.tv_sec*1000000+tv.tv_usec); \ 41 (ts.tv_sec*1000000+ts.tv_nsec / 1000); \
42 }) 42 })
43 43
44#ifndef BFA_TRC_TS 44#ifndef BFA_TRC_TS
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index e81707f938cb..3d0c96a5c873 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -1455,7 +1455,8 @@ struct bfa_aen_entry_s {
1455 enum bfa_aen_category aen_category; 1455 enum bfa_aen_category aen_category;
1456 u32 aen_type; 1456 u32 aen_type;
1457 union bfa_aen_data_u aen_data; 1457 union bfa_aen_data_u aen_data;
1458 struct timeval aen_tv; 1458 u64 aen_tv_sec;
1459 u64 aen_tv_usec;
1459 u32 seq_num; 1460 u32 seq_num;
1460 u32 bfad_num; 1461 u32 bfad_num;
1461}; 1462};
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index b8dadc9cc993..d3b00a475aeb 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1250,8 +1250,8 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1250 memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s)); 1250 memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
1251 1251
1252 rspnid->dap = s_id; 1252 rspnid->dap = s_id;
1253 rspnid->spn_len = (u8) strlen((char *)name); 1253 strlcpy(rspnid->spn, name, sizeof(rspnid->spn));
1254 strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len); 1254 rspnid->spn_len = (u8) strlen(rspnid->spn);
1255 1255
1256 return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s); 1256 return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
1257} 1257}
@@ -1271,8 +1271,8 @@ fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
1271 memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s)); 1271 memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
1272 1272
1273 rsnn_nn->node_name = node_name; 1273 rsnn_nn->node_name = node_name;
1274 rsnn_nn->snn_len = (u8) strlen((char *)name); 1274 strlcpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn));
1275 strncpy((char *)rsnn_nn->snn, (char *)name, rsnn_nn->snn_len); 1275 rsnn_nn->snn_len = (u8) strlen(rsnn_nn->snn);
1276 1276
1277 return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s); 1277 return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
1278} 1278}
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 5f53b3276234..2c85f5b1f9c1 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -468,7 +468,7 @@ bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
468} 468}
469 469
470bfa_status_t 470bfa_status_t
471bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time) 471bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time)
472{ 472{
473 struct bfa_itnim_s *itnim; 473 struct bfa_itnim_s *itnim;
474 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); 474 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
@@ -1478,6 +1478,7 @@ bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1478 return BFA_STATUS_IOPROFILE_OFF; 1478 return BFA_STATUS_IOPROFILE_OFF;
1479 1479
1480 itnim->ioprofile.index = BFA_IOBUCKET_MAX; 1480 itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1481 /* unsigned 32-bit time_t overflow here in y2106 */
1481 itnim->ioprofile.io_profile_start_time = 1482 itnim->ioprofile.io_profile_start_time =
1482 bfa_io_profile_start_time(itnim->bfa); 1483 bfa_io_profile_start_time(itnim->bfa);
1483 itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul; 1484 itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index e93921dec347..ec8f863540ae 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -136,7 +136,7 @@ struct bfa_fcpim_s {
136 struct bfa_fcpim_del_itn_stats_s del_itn_stats; 136 struct bfa_fcpim_del_itn_stats_s del_itn_stats;
137 bfa_boolean_t ioredirect; 137 bfa_boolean_t ioredirect;
138 bfa_boolean_t io_profile; 138 bfa_boolean_t io_profile;
139 u32 io_profile_start_time; 139 time64_t io_profile_start_time;
140 bfa_fcpim_profile_t profile_comp; 140 bfa_fcpim_profile_t profile_comp;
141 bfa_fcpim_profile_t profile_start; 141 bfa_fcpim_profile_t profile_start;
142}; 142};
@@ -310,7 +310,7 @@ bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
310 struct bfa_itnim_iostats_s *stats, u8 lp_tag); 310 struct bfa_itnim_iostats_s *stats, u8 lp_tag);
311void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats, 311void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
312 struct bfa_itnim_iostats_s *itnim_stats); 312 struct bfa_itnim_iostats_s *itnim_stats);
313bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time); 313bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time);
314bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa); 314bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
315 315
316#define bfa_fcpim_ioredirect_enabled(__bfa) \ 316#define bfa_fcpim_ioredirect_enabled(__bfa) \
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 4aa61e20e82d..932feb0ed4da 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -769,23 +769,23 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
769 bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); 769 bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
770 770
771 /* Model name/number */ 771 /* Model name/number */
772 strncpy((char *)&port_cfg->sym_name, model, 772 strlcpy(port_cfg->sym_name.symname, model,
773 BFA_FCS_PORT_SYMBNAME_MODEL_SZ); 773 BFA_SYMNAME_MAXLEN);
774 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, 774 strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
775 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); 775 BFA_SYMNAME_MAXLEN);
776 776
777 /* Driver Version */ 777 /* Driver Version */
778 strncat((char *)&port_cfg->sym_name, (char *)driver_info->version, 778 strlcat(port_cfg->sym_name.symname, driver_info->version,
779 BFA_FCS_PORT_SYMBNAME_VERSION_SZ); 779 BFA_SYMNAME_MAXLEN);
780 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, 780 strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
781 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); 781 BFA_SYMNAME_MAXLEN);
782 782
783 /* Host machine name */ 783 /* Host machine name */
784 strncat((char *)&port_cfg->sym_name, 784 strlcat(port_cfg->sym_name.symname,
785 (char *)driver_info->host_machine_name, 785 driver_info->host_machine_name,
786 BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ); 786 BFA_SYMNAME_MAXLEN);
787 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, 787 strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
788 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); 788 BFA_SYMNAME_MAXLEN);
789 789
790 /* 790 /*
791 * Host OS Info : 791 * Host OS Info :
@@ -793,24 +793,24 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
793 * OS name string and instead copy the entire OS info string (64 bytes). 793 * OS name string and instead copy the entire OS info string (64 bytes).
794 */ 794 */
795 if (driver_info->host_os_patch[0] == '\0') { 795 if (driver_info->host_os_patch[0] == '\0') {
796 strncat((char *)&port_cfg->sym_name, 796 strlcat(port_cfg->sym_name.symname,
797 (char *)driver_info->host_os_name, 797 driver_info->host_os_name,
798 BFA_FCS_OS_STR_LEN); 798 BFA_SYMNAME_MAXLEN);
799 strncat((char *)&port_cfg->sym_name, 799 strlcat(port_cfg->sym_name.symname,
800 BFA_FCS_PORT_SYMBNAME_SEPARATOR, 800 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
801 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); 801 BFA_SYMNAME_MAXLEN);
802 } else { 802 } else {
803 strncat((char *)&port_cfg->sym_name, 803 strlcat(port_cfg->sym_name.symname,
804 (char *)driver_info->host_os_name, 804 driver_info->host_os_name,
805 BFA_FCS_PORT_SYMBNAME_OSINFO_SZ); 805 BFA_SYMNAME_MAXLEN);
806 strncat((char *)&port_cfg->sym_name, 806 strlcat(port_cfg->sym_name.symname,
807 BFA_FCS_PORT_SYMBNAME_SEPARATOR, 807 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
808 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); 808 BFA_SYMNAME_MAXLEN);
809 809
810 /* Append host OS Patch Info */ 810 /* Append host OS Patch Info */
811 strncat((char *)&port_cfg->sym_name, 811 strlcat(port_cfg->sym_name.symname,
812 (char *)driver_info->host_os_patch, 812 driver_info->host_os_patch,
813 BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ); 813 BFA_SYMNAME_MAXLEN);
814 } 814 }
815 815
816 /* null terminate */ 816 /* null terminate */
@@ -830,26 +830,26 @@ bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
830 bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); 830 bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
831 831
832 /* Model name/number */ 832 /* Model name/number */
833 strncpy((char *)&port_cfg->node_sym_name, model, 833 strlcpy(port_cfg->node_sym_name.symname, model,
834 BFA_FCS_PORT_SYMBNAME_MODEL_SZ); 834 BFA_SYMNAME_MAXLEN);
835 strncat((char *)&port_cfg->node_sym_name, 835 strlcat(port_cfg->node_sym_name.symname,
836 BFA_FCS_PORT_SYMBNAME_SEPARATOR, 836 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
837 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); 837 BFA_SYMNAME_MAXLEN);
838 838
839 /* Driver Version */ 839 /* Driver Version */
840 strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->version, 840 strlcat(port_cfg->node_sym_name.symname, (char *)driver_info->version,
841 BFA_FCS_PORT_SYMBNAME_VERSION_SZ); 841 BFA_SYMNAME_MAXLEN);
842 strncat((char *)&port_cfg->node_sym_name, 842 strlcat(port_cfg->node_sym_name.symname,
843 BFA_FCS_PORT_SYMBNAME_SEPARATOR, 843 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
844 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); 844 BFA_SYMNAME_MAXLEN);
845 845
846 /* Host machine name */ 846 /* Host machine name */
847 strncat((char *)&port_cfg->node_sym_name, 847 strlcat(port_cfg->node_sym_name.symname,
848 (char *)driver_info->host_machine_name, 848 driver_info->host_machine_name,
849 BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ); 849 BFA_SYMNAME_MAXLEN);
850 strncat((char *)&port_cfg->node_sym_name, 850 strlcat(port_cfg->node_sym_name.symname,
851 BFA_FCS_PORT_SYMBNAME_SEPARATOR, 851 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
852 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); 852 BFA_SYMNAME_MAXLEN);
853 853
854 /* null terminate */ 854 /* null terminate */
855 port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; 855 port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 638c0a2857f7..b4f2c1d8742e 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -2642,10 +2642,10 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2642 bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc, 2642 bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
2643 hba_attr->fw_version); 2643 hba_attr->fw_version);
2644 2644
2645 strncpy(hba_attr->driver_version, (char *)driver_info->version, 2645 strlcpy(hba_attr->driver_version, (char *)driver_info->version,
2646 sizeof(hba_attr->driver_version)); 2646 sizeof(hba_attr->driver_version));
2647 2647
2648 strncpy(hba_attr->os_name, driver_info->host_os_name, 2648 strlcpy(hba_attr->os_name, driver_info->host_os_name,
2649 sizeof(hba_attr->os_name)); 2649 sizeof(hba_attr->os_name));
2650 2650
2651 /* 2651 /*
@@ -2653,23 +2653,23 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2653 * to the os name along with a separator 2653 * to the os name along with a separator
2654 */ 2654 */
2655 if (driver_info->host_os_patch[0] != '\0') { 2655 if (driver_info->host_os_patch[0] != '\0') {
2656 strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, 2656 strlcat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
2657 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); 2657 sizeof(hba_attr->os_name));
2658 strncat(hba_attr->os_name, driver_info->host_os_patch, 2658 strlcat(hba_attr->os_name, driver_info->host_os_patch,
2659 sizeof(driver_info->host_os_patch)); 2659 sizeof(hba_attr->os_name));
2660 } 2660 }
2661 2661
2662 /* Retrieve the max frame size from the port attr */ 2662 /* Retrieve the max frame size from the port attr */
2663 bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr); 2663 bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
2664 hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size; 2664 hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
2665 2665
2666 strncpy(hba_attr->node_sym_name.symname, 2666 strlcpy(hba_attr->node_sym_name.symname,
2667 port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN); 2667 port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN);
2668 strcpy(hba_attr->vendor_info, "QLogic"); 2668 strcpy(hba_attr->vendor_info, "QLogic");
2669 hba_attr->num_ports = 2669 hba_attr->num_ports =
2670 cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc)); 2670 cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc));
2671 hba_attr->fabric_name = port->fabric->lps->pr_nwwn; 2671 hba_attr->fabric_name = port->fabric->lps->pr_nwwn;
2672 strncpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN); 2672 strlcpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
2673 2673
2674} 2674}
2675 2675
@@ -2736,20 +2736,20 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
2736 /* 2736 /*
2737 * OS device Name 2737 * OS device Name
2738 */ 2738 */
2739 strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name, 2739 strlcpy(port_attr->os_device_name, driver_info->os_device_name,
2740 sizeof(port_attr->os_device_name)); 2740 sizeof(port_attr->os_device_name));
2741 2741
2742 /* 2742 /*
2743 * Host name 2743 * Host name
2744 */ 2744 */
2745 strncpy(port_attr->host_name, (char *)driver_info->host_machine_name, 2745 strlcpy(port_attr->host_name, driver_info->host_machine_name,
2746 sizeof(port_attr->host_name)); 2746 sizeof(port_attr->host_name));
2747 2747
2748 port_attr->node_name = bfa_fcs_lport_get_nwwn(port); 2748 port_attr->node_name = bfa_fcs_lport_get_nwwn(port);
2749 port_attr->port_name = bfa_fcs_lport_get_pwwn(port); 2749 port_attr->port_name = bfa_fcs_lport_get_pwwn(port);
2750 2750
2751 strncpy(port_attr->port_sym_name.symname, 2751 strlcpy(port_attr->port_sym_name.symname,
2752 (char *)&bfa_fcs_lport_get_psym_name(port), BFA_SYMNAME_MAXLEN); 2752 bfa_fcs_lport_get_psym_name(port).symname, BFA_SYMNAME_MAXLEN);
2753 bfa_fcs_lport_get_attr(port, &lport_attr); 2753 bfa_fcs_lport_get_attr(port, &lport_attr);
2754 port_attr->port_type = cpu_to_be32(lport_attr.port_type); 2754 port_attr->port_type = cpu_to_be32(lport_attr.port_type);
2755 port_attr->scos = pport_attr.cos_supported; 2755 port_attr->scos = pport_attr.cos_supported;
@@ -3229,7 +3229,7 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3229 rsp_str[gmal_entry->len-1] = 0; 3229 rsp_str[gmal_entry->len-1] = 0;
3230 3230
3231 /* copy IP Address to fabric */ 3231 /* copy IP Address to fabric */
3232 strncpy(bfa_fcs_lport_get_fabric_ipaddr(port), 3232 strlcpy(bfa_fcs_lport_get_fabric_ipaddr(port),
3233 gmal_entry->ip_addr, 3233 gmal_entry->ip_addr,
3234 BFA_FCS_FABRIC_IPADDR_SZ); 3234 BFA_FCS_FABRIC_IPADDR_SZ);
3235 break; 3235 break;
@@ -4667,21 +4667,13 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
4667 * to that of the base port. 4667 * to that of the base port.
4668 */ 4668 */
4669 4669
4670 strncpy((char *)psymbl, 4670 strlcpy(symbl,
4671 (char *) & 4671 (char *)&(bfa_fcs_lport_get_psym_name
4672 (bfa_fcs_lport_get_psym_name
4673 (bfa_fcs_get_base_port(port->fcs))), 4672 (bfa_fcs_get_base_port(port->fcs))),
4674 strlen((char *) & 4673 sizeof(symbl));
4675 bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port 4674
4676 (port->fcs)))); 4675 strlcat(symbl, (char *)&(bfa_fcs_lport_get_psym_name(port)),
4677 4676 sizeof(symbl));
4678 /* Ensure we have a null terminating string. */
4679 ((char *)psymbl)[strlen((char *) &
4680 bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
4681 (port->fcs)))] = 0;
4682 strncat((char *)psymbl,
4683 (char *) &(bfa_fcs_lport_get_psym_name(port)),
4684 strlen((char *) &bfa_fcs_lport_get_psym_name(port)));
4685 } else { 4677 } else {
4686 psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port)); 4678 psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port));
4687 } 4679 }
@@ -5173,7 +5165,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
5173 struct fchs_s fchs; 5165 struct fchs_s fchs;
5174 struct bfa_fcxp_s *fcxp; 5166 struct bfa_fcxp_s *fcxp;
5175 u8 symbl[256]; 5167 u8 symbl[256];
5176 u8 *psymbl = &symbl[0];
5177 int len; 5168 int len;
5178 5169
5179 /* Avoid sending RSPN in the following states. */ 5170 /* Avoid sending RSPN in the following states. */
@@ -5203,22 +5194,17 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
5203 * For Vports, we append the vport's port symbolic name 5194 * For Vports, we append the vport's port symbolic name
5204 * to that of the base port. 5195 * to that of the base port.
5205 */ 5196 */
5206 strncpy((char *)psymbl, (char *)&(bfa_fcs_lport_get_psym_name 5197 strlcpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name
5207 (bfa_fcs_get_base_port(port->fcs))), 5198 (bfa_fcs_get_base_port(port->fcs))),
5208 strlen((char *)&bfa_fcs_lport_get_psym_name( 5199 sizeof(symbl));
5209 bfa_fcs_get_base_port(port->fcs))));
5210
5211 /* Ensure we have a null terminating string. */
5212 ((char *)psymbl)[strlen((char *)&bfa_fcs_lport_get_psym_name(
5213 bfa_fcs_get_base_port(port->fcs)))] = 0;
5214 5200
5215 strncat((char *)psymbl, 5201 strlcat(symbl,
5216 (char *)&(bfa_fcs_lport_get_psym_name(port)), 5202 (char *)&(bfa_fcs_lport_get_psym_name(port)),
5217 strlen((char *)&bfa_fcs_lport_get_psym_name(port))); 5203 sizeof(symbl));
5218 } 5204 }
5219 5205
5220 len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 5206 len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
5221 bfa_fcs_lport_get_fcid(port), 0, psymbl); 5207 bfa_fcs_lport_get_fcid(port), 0, symbl);
5222 5208
5223 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 5209 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
5224 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 5210 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 256f4afaccf9..16d3aeb0e572 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -1809,13 +1809,12 @@ static void
1809bfa_ioc_send_enable(struct bfa_ioc_s *ioc) 1809bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1810{ 1810{
1811 struct bfi_ioc_ctrl_req_s enable_req; 1811 struct bfi_ioc_ctrl_req_s enable_req;
1812 struct timeval tv;
1813 1812
1814 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, 1813 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1815 bfa_ioc_portid(ioc)); 1814 bfa_ioc_portid(ioc));
1816 enable_req.clscode = cpu_to_be16(ioc->clscode); 1815 enable_req.clscode = cpu_to_be16(ioc->clscode);
1817 do_gettimeofday(&tv); 1816 /* unsigned 32-bit time_t overflow in y2106 */
1818 enable_req.tv_sec = be32_to_cpu(tv.tv_sec); 1817 enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1819 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); 1818 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1820} 1819}
1821 1820
@@ -1826,6 +1825,9 @@ bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1826 1825
1827 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, 1826 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1828 bfa_ioc_portid(ioc)); 1827 bfa_ioc_portid(ioc));
1828 disable_req.clscode = cpu_to_be16(ioc->clscode);
1829 /* unsigned 32-bit time_t overflow in y2106 */
1830 disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1829 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s)); 1831 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1830} 1832}
1831 1833
@@ -2803,7 +2805,7 @@ void
2803bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer) 2805bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2804{ 2806{
2805 memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); 2807 memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2806 strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); 2808 strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2807} 2809}
2808 2810
2809void 2811void
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index da1721e0d167..079bc77f4102 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -96,14 +96,11 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
96 port->stats_busy = BFA_FALSE; 96 port->stats_busy = BFA_FALSE;
97 97
98 if (status == BFA_STATUS_OK) { 98 if (status == BFA_STATUS_OK) {
99 struct timeval tv;
100
101 memcpy(port->stats, port->stats_dma.kva, 99 memcpy(port->stats, port->stats_dma.kva,
102 sizeof(union bfa_port_stats_u)); 100 sizeof(union bfa_port_stats_u));
103 bfa_port_stats_swap(port, port->stats); 101 bfa_port_stats_swap(port, port->stats);
104 102
105 do_gettimeofday(&tv); 103 port->stats->fc.secs_reset = ktime_get_seconds() - port->stats_reset_time;
106 port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time;
107 } 104 }
108 105
109 if (port->stats_cbfn) { 106 if (port->stats_cbfn) {
@@ -124,16 +121,13 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
124static void 121static void
125bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status) 122bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
126{ 123{
127 struct timeval tv;
128
129 port->stats_status = status; 124 port->stats_status = status;
130 port->stats_busy = BFA_FALSE; 125 port->stats_busy = BFA_FALSE;
131 126
132 /* 127 /*
133 * re-initialize time stamp for stats reset 128 * re-initialize time stamp for stats reset
134 */ 129 */
135 do_gettimeofday(&tv); 130 port->stats_reset_time = ktime_get_seconds();
136 port->stats_reset_time = tv.tv_sec;
137 131
138 if (port->stats_cbfn) { 132 if (port->stats_cbfn) {
139 port->stats_cbfn(port->stats_cbarg, status); 133 port->stats_cbfn(port->stats_cbarg, status);
@@ -471,8 +465,6 @@ void
471bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, 465bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
472 void *dev, struct bfa_trc_mod_s *trcmod) 466 void *dev, struct bfa_trc_mod_s *trcmod)
473{ 467{
474 struct timeval tv;
475
476 WARN_ON(!port); 468 WARN_ON(!port);
477 469
478 port->dev = dev; 470 port->dev = dev;
@@ -494,8 +486,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
494 /* 486 /*
495 * initialize time stamp for stats reset 487 * initialize time stamp for stats reset
496 */ 488 */
497 do_gettimeofday(&tv); 489 port->stats_reset_time = ktime_get_seconds();
498 port->stats_reset_time = tv.tv_sec;
499 490
500 bfa_trc(port, 0); 491 bfa_trc(port, 0);
501} 492}
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
index 26dc1bf14c85..0c3b200243ca 100644
--- a/drivers/scsi/bfa/bfa_port.h
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -36,7 +36,7 @@ struct bfa_port_s {
36 bfa_port_stats_cbfn_t stats_cbfn; 36 bfa_port_stats_cbfn_t stats_cbfn;
37 void *stats_cbarg; 37 void *stats_cbarg;
38 bfa_status_t stats_status; 38 bfa_status_t stats_status;
39 u32 stats_reset_time; 39 time64_t stats_reset_time;
40 union bfa_port_stats_u *stats; 40 union bfa_port_stats_u *stats;
41 struct bfa_dma_s stats_dma; 41 struct bfa_dma_s stats_dma;
42 bfa_boolean_t endis_pending; 42 bfa_boolean_t endis_pending;
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index e640223bab3c..6fc34fb20f00 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -288,18 +288,6 @@ plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
288 return 0; 288 return 0;
289} 289}
290 290
291static u64
292bfa_get_log_time(void)
293{
294 u64 system_time = 0;
295 struct timeval tv;
296 do_gettimeofday(&tv);
297
298 /* We are interested in seconds only. */
299 system_time = tv.tv_sec;
300 return system_time;
301}
302
303static void 291static void
304bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec) 292bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
305{ 293{
@@ -320,7 +308,7 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
320 308
321 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s)); 309 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
322 310
323 pl_recp->tv = bfa_get_log_time(); 311 pl_recp->tv = ktime_get_real_seconds();
324 BFA_PL_LOG_REC_INCR(plog->tail); 312 BFA_PL_LOG_REC_INCR(plog->tail);
325 313
326 if (plog->head == plog->tail) 314 if (plog->head == plog->tail)
@@ -350,8 +338,8 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
350 lp.eid = event; 338 lp.eid = event;
351 lp.log_type = BFA_PL_LOG_TYPE_STRING; 339 lp.log_type = BFA_PL_LOG_TYPE_STRING;
352 lp.misc = misc; 340 lp.misc = misc;
353 strncpy(lp.log_entry.string_log, log_str, 341 strlcpy(lp.log_entry.string_log, log_str,
354 BFA_PL_STRING_LOG_SZ - 1); 342 BFA_PL_STRING_LOG_SZ);
355 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0'; 343 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
356 bfa_plog_add(plog, &lp); 344 bfa_plog_add(plog, &lp);
357 } 345 }
@@ -3047,7 +3035,6 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3047 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); 3035 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3048 struct bfa_port_cfg_s *port_cfg = &fcport->cfg; 3036 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
3049 struct bfa_fcport_ln_s *ln = &fcport->ln; 3037 struct bfa_fcport_ln_s *ln = &fcport->ln;
3050 struct timeval tv;
3051 3038
3052 fcport->bfa = bfa; 3039 fcport->bfa = bfa;
3053 ln->fcport = fcport; 3040 ln->fcport = fcport;
@@ -3060,8 +3047,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3060 /* 3047 /*
3061 * initialize time stamp for stats reset 3048 * initialize time stamp for stats reset
3062 */ 3049 */
3063 do_gettimeofday(&tv); 3050 fcport->stats_reset_time = ktime_get_seconds();
3064 fcport->stats_reset_time = tv.tv_sec;
3065 fcport->stats_dma_ready = BFA_FALSE; 3051 fcport->stats_dma_ready = BFA_FALSE;
3066 3052
3067 /* 3053 /*
@@ -3295,9 +3281,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3295 union bfa_fcport_stats_u *ret; 3281 union bfa_fcport_stats_u *ret;
3296 3282
3297 if (complete) { 3283 if (complete) {
3298 struct timeval tv; 3284 time64_t time = ktime_get_seconds();
3299 if (fcport->stats_status == BFA_STATUS_OK)
3300 do_gettimeofday(&tv);
3301 3285
3302 list_for_each_safe(qe, qen, &fcport->stats_pending_q) { 3286 list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3303 bfa_q_deq(&fcport->stats_pending_q, &qe); 3287 bfa_q_deq(&fcport->stats_pending_q, &qe);
@@ -3312,7 +3296,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3312 bfa_fcport_fcoe_stats_swap(&ret->fcoe, 3296 bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3313 &fcport->stats->fcoe); 3297 &fcport->stats->fcoe);
3314 ret->fcoe.secs_reset = 3298 ret->fcoe.secs_reset =
3315 tv.tv_sec - fcport->stats_reset_time; 3299 time - fcport->stats_reset_time;
3316 } 3300 }
3317 } 3301 }
3318 bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe, 3302 bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
@@ -3373,13 +3357,10 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3373 struct list_head *qe, *qen; 3357 struct list_head *qe, *qen;
3374 3358
3375 if (complete) { 3359 if (complete) {
3376 struct timeval tv;
3377
3378 /* 3360 /*
3379 * re-initialize time stamp for stats reset 3361 * re-initialize time stamp for stats reset
3380 */ 3362 */
3381 do_gettimeofday(&tv); 3363 fcport->stats_reset_time = ktime_get_seconds();
3382 fcport->stats_reset_time = tv.tv_sec;
3383 list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) { 3364 list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3384 bfa_q_deq(&fcport->statsclr_pending_q, &qe); 3365 bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3385 cb = (struct bfa_cb_pending_q_s *)qe; 3366 cb = (struct bfa_cb_pending_q_s *)qe;
@@ -6148,13 +6129,13 @@ bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
6148/* 6129/*
6149 * D-port 6130 * D-port
6150 */ 6131 */
6151#define bfa_dport_result_start(__dport, __mode) do { \ 6132#define bfa_dport_result_start(__dport, __mode) do { \
6152 (__dport)->result.start_time = bfa_get_log_time(); \ 6133 (__dport)->result.start_time = ktime_get_real_seconds(); \
6153 (__dport)->result.status = DPORT_TEST_ST_INPRG; \ 6134 (__dport)->result.status = DPORT_TEST_ST_INPRG; \
6154 (__dport)->result.mode = (__mode); \ 6135 (__dport)->result.mode = (__mode); \
6155 (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \ 6136 (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
6156 (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \ 6137 (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
6157 (__dport)->result.lpcnt = (__dport)->lpcnt; \ 6138 (__dport)->result.lpcnt = (__dport)->lpcnt; \
6158} while (0) 6139} while (0)
6159 6140
6160static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport, 6141static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
@@ -6588,7 +6569,7 @@ bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
6588 6569
6589 switch (dport->i2hmsg.scn.state) { 6570 switch (dport->i2hmsg.scn.state) {
6590 case BFI_DPORT_SCN_TESTCOMP: 6571 case BFI_DPORT_SCN_TESTCOMP:
6591 dport->result.end_time = bfa_get_log_time(); 6572 dport->result.end_time = ktime_get_real_seconds();
6592 bfa_trc(dport->bfa, dport->result.end_time); 6573 bfa_trc(dport->bfa, dport->result.end_time);
6593 6574
6594 dport->result.status = msg->info.testcomp.status; 6575 dport->result.status = msg->info.testcomp.status;
@@ -6635,7 +6616,7 @@ bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
6635 case BFI_DPORT_SCN_SUBTESTSTART: 6616 case BFI_DPORT_SCN_SUBTESTSTART:
6636 subtesttype = msg->info.teststart.type; 6617 subtesttype = msg->info.teststart.type;
6637 dport->result.subtest[subtesttype].start_time = 6618 dport->result.subtest[subtesttype].start_time =
6638 bfa_get_log_time(); 6619 ktime_get_real_seconds();
6639 dport->result.subtest[subtesttype].status = 6620 dport->result.subtest[subtesttype].status =
6640 DPORT_TEST_ST_INPRG; 6621 DPORT_TEST_ST_INPRG;
6641 6622
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index ea2278bc78a8..7e8fb6231d49 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -505,7 +505,7 @@ struct bfa_fcport_s {
505 struct list_head stats_pending_q; 505 struct list_head stats_pending_q;
506 struct list_head statsclr_pending_q; 506 struct list_head statsclr_pending_q;
507 bfa_boolean_t stats_qfull; 507 bfa_boolean_t stats_qfull;
508 u32 stats_reset_time; /* stats reset time stamp */ 508 time64_t stats_reset_time; /* stats reset time stamp */
509 bfa_boolean_t diag_busy; /* diag busy status */ 509 bfa_boolean_t diag_busy; /* diag busy status */
510 bfa_boolean_t beacon; /* port beacon status */ 510 bfa_boolean_t beacon; /* port beacon status */
511 bfa_boolean_t link_e2e_beacon; /* link beacon status */ 511 bfa_boolean_t link_e2e_beacon; /* link beacon status */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index cf0466686804..bd7e6a6fc1f1 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -610,13 +610,12 @@ bfad_hal_mem_alloc(struct bfad_s *bfad)
610 /* Iterate through the KVA meminfo queue */ 610 /* Iterate through the KVA meminfo queue */
611 list_for_each(km_qe, &kva_info->qe) { 611 list_for_each(km_qe, &kva_info->qe) {
612 kva_elem = (struct bfa_mem_kva_s *) km_qe; 612 kva_elem = (struct bfa_mem_kva_s *) km_qe;
613 kva_elem->kva = vmalloc(kva_elem->mem_len); 613 kva_elem->kva = vzalloc(kva_elem->mem_len);
614 if (kva_elem->kva == NULL) { 614 if (kva_elem->kva == NULL) {
615 bfad_hal_mem_release(bfad); 615 bfad_hal_mem_release(bfad);
616 rc = BFA_STATUS_ENOMEM; 616 rc = BFA_STATUS_ENOMEM;
617 goto ext; 617 goto ext;
618 } 618 }
619 memset(kva_elem->kva, 0, kva_elem->mem_len);
620 } 619 }
621 620
622 /* Iterate through the DMA meminfo queue */ 621 /* Iterate through the DMA meminfo queue */
@@ -981,20 +980,20 @@ bfad_start_ops(struct bfad_s *bfad) {
981 980
982 /* Fill the driver_info info to fcs*/ 981 /* Fill the driver_info info to fcs*/
983 memset(&driver_info, 0, sizeof(driver_info)); 982 memset(&driver_info, 0, sizeof(driver_info));
984 strncpy(driver_info.version, BFAD_DRIVER_VERSION, 983 strlcpy(driver_info.version, BFAD_DRIVER_VERSION,
985 sizeof(driver_info.version) - 1); 984 sizeof(driver_info.version));
986 if (host_name) 985 if (host_name)
987 strncpy(driver_info.host_machine_name, host_name, 986 strlcpy(driver_info.host_machine_name, host_name,
988 sizeof(driver_info.host_machine_name) - 1); 987 sizeof(driver_info.host_machine_name));
989 if (os_name) 988 if (os_name)
990 strncpy(driver_info.host_os_name, os_name, 989 strlcpy(driver_info.host_os_name, os_name,
991 sizeof(driver_info.host_os_name) - 1); 990 sizeof(driver_info.host_os_name));
992 if (os_patch) 991 if (os_patch)
993 strncpy(driver_info.host_os_patch, os_patch, 992 strlcpy(driver_info.host_os_patch, os_patch,
994 sizeof(driver_info.host_os_patch) - 1); 993 sizeof(driver_info.host_os_patch));
995 994
996 strncpy(driver_info.os_device_name, bfad->pci_name, 995 strlcpy(driver_info.os_device_name, bfad->pci_name,
997 sizeof(driver_info.os_device_name) - 1); 996 sizeof(driver_info.os_device_name));
998 997
999 /* FCS driver info init */ 998 /* FCS driver info init */
1000 spin_lock_irqsave(&bfad->bfad_lock, flags); 999 spin_lock_irqsave(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 13db3b7bc873..d4d276c757ea 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -487,7 +487,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
487 struct bfad_im_port_s *im_port = 487 struct bfad_im_port_s *im_port =
488 (struct bfad_im_port_s *) vport->drv_port.im_port; 488 (struct bfad_im_port_s *) vport->drv_port.im_port;
489 struct bfad_s *bfad = im_port->bfad; 489 struct bfad_s *bfad = im_port->bfad;
490 struct bfad_port_s *port;
491 struct bfa_fcs_vport_s *fcs_vport; 490 struct bfa_fcs_vport_s *fcs_vport;
492 struct Scsi_Host *vshost; 491 struct Scsi_Host *vshost;
493 wwn_t pwwn; 492 wwn_t pwwn;
@@ -502,8 +501,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
502 return 0; 501 return 0;
503 } 502 }
504 503
505 port = im_port->port;
506
507 vshost = vport->drv_port.im_port->shost; 504 vshost = vport->drv_port.im_port->shost;
508 u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn); 505 u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
509 506
@@ -843,7 +840,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
843 char symname[BFA_SYMNAME_MAXLEN]; 840 char symname[BFA_SYMNAME_MAXLEN];
844 841
845 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); 842 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
846 strncpy(symname, port_attr.port_cfg.sym_name.symname, 843 strlcpy(symname, port_attr.port_cfg.sym_name.symname,
847 BFA_SYMNAME_MAXLEN); 844 BFA_SYMNAME_MAXLEN);
848 return snprintf(buf, PAGE_SIZE, "%s\n", symname); 845 return snprintf(buf, PAGE_SIZE, "%s\n", symname);
849} 846}
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index b2fa195adc7a..3976e787ba64 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -127,7 +127,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
127 127
128 /* fill in driver attr info */ 128 /* fill in driver attr info */
129 strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME); 129 strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
130 strncpy(iocmd->ioc_attr.driver_attr.driver_ver, 130 strlcpy(iocmd->ioc_attr.driver_attr.driver_ver,
131 BFAD_DRIVER_VERSION, BFA_VERSION_LEN); 131 BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
132 strcpy(iocmd->ioc_attr.driver_attr.fw_ver, 132 strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
133 iocmd->ioc_attr.adapter_attr.fw_ver); 133 iocmd->ioc_attr.adapter_attr.fw_ver);
@@ -315,9 +315,9 @@ bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
315 iocmd->attr.port_type = port_attr.port_type; 315 iocmd->attr.port_type = port_attr.port_type;
316 iocmd->attr.loopback = port_attr.loopback; 316 iocmd->attr.loopback = port_attr.loopback;
317 iocmd->attr.authfail = port_attr.authfail; 317 iocmd->attr.authfail = port_attr.authfail;
318 strncpy(iocmd->attr.port_symname.symname, 318 strlcpy(iocmd->attr.port_symname.symname,
319 port_attr.port_cfg.sym_name.symname, 319 port_attr.port_cfg.sym_name.symname,
320 sizeof(port_attr.port_cfg.sym_name.symname)); 320 sizeof(iocmd->attr.port_symname.symname));
321 321
322 iocmd->status = BFA_STATUS_OK; 322 iocmd->status = BFA_STATUS_OK;
323 return 0; 323 return 0;
@@ -2094,13 +2094,11 @@ bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2094{ 2094{
2095 struct bfa_bsg_fcpim_profile_s *iocmd = 2095 struct bfa_bsg_fcpim_profile_s *iocmd =
2096 (struct bfa_bsg_fcpim_profile_s *)cmd; 2096 (struct bfa_bsg_fcpim_profile_s *)cmd;
2097 struct timeval tv;
2098 unsigned long flags; 2097 unsigned long flags;
2099 2098
2100 do_gettimeofday(&tv);
2101 spin_lock_irqsave(&bfad->bfad_lock, flags); 2099 spin_lock_irqsave(&bfad->bfad_lock, flags);
2102 if (v_cmd == IOCMD_FCPIM_PROFILE_ON) 2100 if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
2103 iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec); 2101 iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, ktime_get_real_seconds());
2104 else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF) 2102 else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
2105 iocmd->status = bfa_fcpim_profile_off(&bfad->bfa); 2103 iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
2106 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2104 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 05f523971348..349cfe7d055e 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -81,7 +81,7 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
81 81
82 fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s); 82 fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
83 83
84 fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len); 84 fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len);
85 if (!fw_debug->debug_buffer) { 85 if (!fw_debug->debug_buffer) {
86 kfree(fw_debug); 86 kfree(fw_debug);
87 printk(KERN_INFO "bfad[%d]: Failed to allocate fwtrc buffer\n", 87 printk(KERN_INFO "bfad[%d]: Failed to allocate fwtrc buffer\n",
@@ -89,8 +89,6 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
89 return -ENOMEM; 89 return -ENOMEM;
90 } 90 }
91 91
92 memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
93
94 spin_lock_irqsave(&bfad->bfad_lock, flags); 92 spin_lock_irqsave(&bfad->bfad_lock, flags);
95 rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc, 93 rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc,
96 fw_debug->debug_buffer, 94 fw_debug->debug_buffer,
@@ -125,7 +123,7 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
125 123
126 fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s); 124 fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
127 125
128 fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len); 126 fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len);
129 if (!fw_debug->debug_buffer) { 127 if (!fw_debug->debug_buffer) {
130 kfree(fw_debug); 128 kfree(fw_debug);
131 printk(KERN_INFO "bfad[%d]: Failed to allocate fwsave buffer\n", 129 printk(KERN_INFO "bfad[%d]: Failed to allocate fwsave buffer\n",
@@ -133,8 +131,6 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
133 return -ENOMEM; 131 return -ENOMEM;
134 } 132 }
135 133
136 memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
137
138 spin_lock_irqsave(&bfad->bfad_lock, flags); 134 spin_lock_irqsave(&bfad->bfad_lock, flags);
139 rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc, 135 rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc,
140 fw_debug->debug_buffer, 136 fw_debug->debug_buffer,
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 06ce4ba2b7bc..af66275570c3 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -141,16 +141,28 @@ struct bfad_im_s {
141} while (0) 141} while (0)
142 142
143/* post fc_host vendor event */ 143/* post fc_host vendor event */
144#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do { \ 144static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry,
145 do_gettimeofday(&(_entry)->aen_tv); \ 145 struct bfad_s *drv, int cnt,
146 (_entry)->bfad_num = (_drv)->inst_no; \ 146 enum bfa_aen_category cat,
147 (_entry)->seq_num = (_cnt); \ 147 enum bfa_ioc_aen_event evt)
148 (_entry)->aen_category = (_cat); \ 148{
149 (_entry)->aen_type = (_evt); \ 149 struct timespec64 ts;
150 if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE) \ 150
151 queue_work((_drv)->im->drv_workq, \ 151 ktime_get_real_ts64(&ts);
152 &(_drv)->im->aen_im_notify_work); \ 152 /*
153} while (0) 153 * 'unsigned long aen_tv_sec' overflows in y2106 on 32-bit
154 * architectures, or in 2038 if user space interprets it
155 * as 'signed'.
156 */
157 entry->aen_tv_sec = ts.tv_sec;
158 entry->aen_tv_usec = ts.tv_nsec / NSEC_PER_USEC;
159 entry->bfad_num = drv->inst_no;
160 entry->seq_num = cnt;
161 entry->aen_category = cat;
162 entry->aen_type = evt;
163 if (drv->bfad_flags & BFAD_FC4_PROBE_DONE)
164 queue_work(drv->im->drv_workq, &drv->im->aen_im_notify_work);
165}
154 166
155struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, 167struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
156 struct bfad_s *); 168 struct bfad_s *);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index e6b9de7d41ac..65de1d0578a1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1552,7 +1552,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1552 1552
1553 rc = bnx2fc_shost_config(lport, parent); 1553 rc = bnx2fc_shost_config(lport, parent);
1554 if (rc) { 1554 if (rc) {
1555 printk(KERN_ERR PFX "Couldnt configure shost for %s\n", 1555 printk(KERN_ERR PFX "Couldn't configure shost for %s\n",
1556 interface->netdev->name); 1556 interface->netdev->name);
1557 goto lp_config_err; 1557 goto lp_config_err;
1558 } 1558 }
@@ -1560,7 +1560,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1560 /* Initialize the libfc library */ 1560 /* Initialize the libfc library */
1561 rc = bnx2fc_libfc_config(lport); 1561 rc = bnx2fc_libfc_config(lport);
1562 if (rc) { 1562 if (rc) {
1563 printk(KERN_ERR PFX "Couldnt configure libfc\n"); 1563 printk(KERN_ERR PFX "Couldn't configure libfc\n");
1564 goto shost_err; 1564 goto shost_err;
1565 } 1565 }
1566 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; 1566 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 26de61d65a4d..e8ae4d671d23 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1857,16 +1857,15 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1857 * entries. Hence the limit with one page is 8192 task context 1857 * entries. Hence the limit with one page is 8192 task context
1858 * entries. 1858 * entries.
1859 */ 1859 */
1860 hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, 1860 hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev,
1861 PAGE_SIZE, 1861 PAGE_SIZE,
1862 &hba->task_ctx_bd_dma, 1862 &hba->task_ctx_bd_dma,
1863 GFP_KERNEL); 1863 GFP_KERNEL);
1864 if (!hba->task_ctx_bd_tbl) { 1864 if (!hba->task_ctx_bd_tbl) {
1865 printk(KERN_ERR PFX "unable to allocate task context BDT\n"); 1865 printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1866 rc = -1; 1866 rc = -1;
1867 goto out; 1867 goto out;
1868 } 1868 }
1869 memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
1870 1869
1871 /* 1870 /*
1872 * Allocate task_ctx which is an array of pointers pointing to 1871 * Allocate task_ctx which is an array of pointers pointing to
@@ -1895,16 +1894,15 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1895 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; 1894 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1896 for (i = 0; i < task_ctx_arr_sz; i++) { 1895 for (i = 0; i < task_ctx_arr_sz; i++) {
1897 1896
1898 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, 1897 hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev,
1899 PAGE_SIZE, 1898 PAGE_SIZE,
1900 &hba->task_ctx_dma[i], 1899 &hba->task_ctx_dma[i],
1901 GFP_KERNEL); 1900 GFP_KERNEL);
1902 if (!hba->task_ctx[i]) { 1901 if (!hba->task_ctx[i]) {
1903 printk(KERN_ERR PFX "unable to alloc task context\n"); 1902 printk(KERN_ERR PFX "unable to alloc task context\n");
1904 rc = -1; 1903 rc = -1;
1905 goto out3; 1904 goto out3;
1906 } 1905 }
1907 memset(hba->task_ctx[i], 0, PAGE_SIZE);
1908 addr = (u64)hba->task_ctx_dma[i]; 1906 addr = (u64)hba->task_ctx_dma[i];
1909 task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32); 1907 task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1910 task_ctx_bdt->lo = cpu_to_le32((u32)addr); 1908 task_ctx_bdt->lo = cpu_to_le32((u32)addr);
@@ -2033,28 +2031,23 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
2033 } 2031 }
2034 2032
2035 for (i = 0; i < segment_count; ++i) { 2033 for (i = 0; i < segment_count; ++i) {
2036 hba->hash_tbl_segments[i] = 2034 hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev,
2037 dma_alloc_coherent(&hba->pcidev->dev, 2035 BNX2FC_HASH_TBL_CHUNK_SIZE,
2038 BNX2FC_HASH_TBL_CHUNK_SIZE, 2036 &dma_segment_array[i],
2039 &dma_segment_array[i], 2037 GFP_KERNEL);
2040 GFP_KERNEL);
2041 if (!hba->hash_tbl_segments[i]) { 2038 if (!hba->hash_tbl_segments[i]) {
2042 printk(KERN_ERR PFX "hash segment alloc failed\n"); 2039 printk(KERN_ERR PFX "hash segment alloc failed\n");
2043 goto cleanup_dma; 2040 goto cleanup_dma;
2044 } 2041 }
2045 memset(hba->hash_tbl_segments[i], 0,
2046 BNX2FC_HASH_TBL_CHUNK_SIZE);
2047 } 2042 }
2048 2043
2049 hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, 2044 hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2050 PAGE_SIZE, 2045 &hba->hash_tbl_pbl_dma,
2051 &hba->hash_tbl_pbl_dma, 2046 GFP_KERNEL);
2052 GFP_KERNEL);
2053 if (!hba->hash_tbl_pbl) { 2047 if (!hba->hash_tbl_pbl) {
2054 printk(KERN_ERR PFX "hash table pbl alloc failed\n"); 2048 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
2055 goto cleanup_dma; 2049 goto cleanup_dma;
2056 } 2050 }
2057 memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
2058 2051
2059 pbl = hba->hash_tbl_pbl; 2052 pbl = hba->hash_tbl_pbl;
2060 for (i = 0; i < segment_count; ++i) { 2053 for (i = 0; i < segment_count; ++i) {
@@ -2111,27 +2104,26 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2111 return -ENOMEM; 2104 return -ENOMEM;
2112 2105
2113 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); 2106 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2114 hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, 2107 hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev,
2115 &hba->t2_hash_tbl_ptr_dma, 2108 mem_size,
2116 GFP_KERNEL); 2109 &hba->t2_hash_tbl_ptr_dma,
2110 GFP_KERNEL);
2117 if (!hba->t2_hash_tbl_ptr) { 2111 if (!hba->t2_hash_tbl_ptr) {
2118 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); 2112 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
2119 bnx2fc_free_fw_resc(hba); 2113 bnx2fc_free_fw_resc(hba);
2120 return -ENOMEM; 2114 return -ENOMEM;
2121 } 2115 }
2122 memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
2123 2116
2124 mem_size = BNX2FC_NUM_MAX_SESS * 2117 mem_size = BNX2FC_NUM_MAX_SESS *
2125 sizeof(struct fcoe_t2_hash_table_entry); 2118 sizeof(struct fcoe_t2_hash_table_entry);
2126 hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, 2119 hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size,
2127 &hba->t2_hash_tbl_dma, 2120 &hba->t2_hash_tbl_dma,
2128 GFP_KERNEL); 2121 GFP_KERNEL);
2129 if (!hba->t2_hash_tbl) { 2122 if (!hba->t2_hash_tbl) {
2130 printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); 2123 printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
2131 bnx2fc_free_fw_resc(hba); 2124 bnx2fc_free_fw_resc(hba);
2132 return -ENOMEM; 2125 return -ENOMEM;
2133 } 2126 }
2134 memset(hba->t2_hash_tbl, 0x00, mem_size);
2135 for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { 2127 for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
2136 addr = (unsigned long) hba->t2_hash_tbl_dma + 2128 addr = (unsigned long) hba->t2_hash_tbl_dma +
2137 ((i+1) * sizeof(struct fcoe_t2_hash_table_entry)); 2129 ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
@@ -2148,16 +2140,14 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2148 return -ENOMEM; 2140 return -ENOMEM;
2149 } 2141 }
2150 2142
2151 hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, 2143 hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2152 PAGE_SIZE, 2144 &hba->stats_buf_dma,
2153 &hba->stats_buf_dma, 2145 GFP_KERNEL);
2154 GFP_KERNEL);
2155 if (!hba->stats_buffer) { 2146 if (!hba->stats_buffer) {
2156 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); 2147 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
2157 bnx2fc_free_fw_resc(hba); 2148 bnx2fc_free_fw_resc(hba);
2158 return -ENOMEM; 2149 return -ENOMEM;
2159 } 2150 }
2160 memset(hba->stats_buffer, 0x00, PAGE_SIZE);
2161 2151
2162 return 0; 2152 return 0;
2163} 2153}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index a8ae1a019eea..e3d1c7c440c8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -672,56 +672,52 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
672 tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & 672 tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
673 CNIC_PAGE_MASK; 673 CNIC_PAGE_MASK;
674 674
675 tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, 675 tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
676 &tgt->sq_dma, GFP_KERNEL); 676 &tgt->sq_dma, GFP_KERNEL);
677 if (!tgt->sq) { 677 if (!tgt->sq) {
678 printk(KERN_ERR PFX "unable to allocate SQ memory %d\n", 678 printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
679 tgt->sq_mem_size); 679 tgt->sq_mem_size);
680 goto mem_alloc_failure; 680 goto mem_alloc_failure;
681 } 681 }
682 memset(tgt->sq, 0, tgt->sq_mem_size);
683 682
684 /* Allocate and map CQ */ 683 /* Allocate and map CQ */
685 tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; 684 tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
686 tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & 685 tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
687 CNIC_PAGE_MASK; 686 CNIC_PAGE_MASK;
688 687
689 tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, 688 tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
690 &tgt->cq_dma, GFP_KERNEL); 689 &tgt->cq_dma, GFP_KERNEL);
691 if (!tgt->cq) { 690 if (!tgt->cq) {
692 printk(KERN_ERR PFX "unable to allocate CQ memory %d\n", 691 printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
693 tgt->cq_mem_size); 692 tgt->cq_mem_size);
694 goto mem_alloc_failure; 693 goto mem_alloc_failure;
695 } 694 }
696 memset(tgt->cq, 0, tgt->cq_mem_size);
697 695
698 /* Allocate and map RQ and RQ PBL */ 696 /* Allocate and map RQ and RQ PBL */
699 tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; 697 tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
700 tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & 698 tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
701 CNIC_PAGE_MASK; 699 CNIC_PAGE_MASK;
702 700
703 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, 701 tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
704 &tgt->rq_dma, GFP_KERNEL); 702 &tgt->rq_dma, GFP_KERNEL);
705 if (!tgt->rq) { 703 if (!tgt->rq) {
706 printk(KERN_ERR PFX "unable to allocate RQ memory %d\n", 704 printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
707 tgt->rq_mem_size); 705 tgt->rq_mem_size);
708 goto mem_alloc_failure; 706 goto mem_alloc_failure;
709 } 707 }
710 memset(tgt->rq, 0, tgt->rq_mem_size);
711 708
712 tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); 709 tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
713 tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & 710 tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
714 CNIC_PAGE_MASK; 711 CNIC_PAGE_MASK;
715 712
716 tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, 713 tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
717 &tgt->rq_pbl_dma, GFP_KERNEL); 714 &tgt->rq_pbl_dma, GFP_KERNEL);
718 if (!tgt->rq_pbl) { 715 if (!tgt->rq_pbl) {
719 printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n", 716 printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
720 tgt->rq_pbl_size); 717 tgt->rq_pbl_size);
721 goto mem_alloc_failure; 718 goto mem_alloc_failure;
722 } 719 }
723 720
724 memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
725 num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE; 721 num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
726 page = tgt->rq_dma; 722 page = tgt->rq_dma;
727 pbl = (u32 *)tgt->rq_pbl; 723 pbl = (u32 *)tgt->rq_pbl;
@@ -739,44 +735,43 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
739 tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & 735 tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
740 CNIC_PAGE_MASK; 736 CNIC_PAGE_MASK;
741 737
742 tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, 738 tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev,
743 &tgt->xferq_dma, GFP_KERNEL); 739 tgt->xferq_mem_size, &tgt->xferq_dma,
740 GFP_KERNEL);
744 if (!tgt->xferq) { 741 if (!tgt->xferq) {
745 printk(KERN_ERR PFX "unable to allocate XFERQ %d\n", 742 printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
746 tgt->xferq_mem_size); 743 tgt->xferq_mem_size);
747 goto mem_alloc_failure; 744 goto mem_alloc_failure;
748 } 745 }
749 memset(tgt->xferq, 0, tgt->xferq_mem_size);
750 746
751 /* Allocate and map CONFQ & CONFQ PBL */ 747 /* Allocate and map CONFQ & CONFQ PBL */
752 tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; 748 tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
753 tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & 749 tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
754 CNIC_PAGE_MASK; 750 CNIC_PAGE_MASK;
755 751
756 tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, 752 tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev,
757 &tgt->confq_dma, GFP_KERNEL); 753 tgt->confq_mem_size, &tgt->confq_dma,
754 GFP_KERNEL);
758 if (!tgt->confq) { 755 if (!tgt->confq) {
759 printk(KERN_ERR PFX "unable to allocate CONFQ %d\n", 756 printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
760 tgt->confq_mem_size); 757 tgt->confq_mem_size);
761 goto mem_alloc_failure; 758 goto mem_alloc_failure;
762 } 759 }
763 memset(tgt->confq, 0, tgt->confq_mem_size);
764 760
765 tgt->confq_pbl_size = 761 tgt->confq_pbl_size =
766 (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); 762 (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
767 tgt->confq_pbl_size = 763 tgt->confq_pbl_size =
768 (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; 764 (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
769 765
770 tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, 766 tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev,
771 tgt->confq_pbl_size, 767 tgt->confq_pbl_size,
772 &tgt->confq_pbl_dma, GFP_KERNEL); 768 &tgt->confq_pbl_dma, GFP_KERNEL);
773 if (!tgt->confq_pbl) { 769 if (!tgt->confq_pbl) {
774 printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n", 770 printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
775 tgt->confq_pbl_size); 771 tgt->confq_pbl_size);
776 goto mem_alloc_failure; 772 goto mem_alloc_failure;
777 } 773 }
778 774
779 memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
780 num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE; 775 num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
781 page = tgt->confq_dma; 776 page = tgt->confq_dma;
782 pbl = (u32 *)tgt->confq_pbl; 777 pbl = (u32 *)tgt->confq_pbl;
@@ -792,15 +787,14 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
792 /* Allocate and map ConnDB */ 787 /* Allocate and map ConnDB */
793 tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db); 788 tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
794 789
795 tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev, 790 tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev,
796 tgt->conn_db_mem_size, 791 tgt->conn_db_mem_size,
797 &tgt->conn_db_dma, GFP_KERNEL); 792 &tgt->conn_db_dma, GFP_KERNEL);
798 if (!tgt->conn_db) { 793 if (!tgt->conn_db) {
799 printk(KERN_ERR PFX "unable to allocate conn_db %d\n", 794 printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
800 tgt->conn_db_mem_size); 795 tgt->conn_db_mem_size);
801 goto mem_alloc_failure; 796 goto mem_alloc_failure;
802 } 797 }
803 memset(tgt->conn_db, 0, tgt->conn_db_mem_size);
804 798
805 799
806 /* Allocate and map LCQ */ 800 /* Allocate and map LCQ */
@@ -808,15 +802,14 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
808 tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & 802 tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
809 CNIC_PAGE_MASK; 803 CNIC_PAGE_MASK;
810 804
811 tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, 805 tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
812 &tgt->lcq_dma, GFP_KERNEL); 806 &tgt->lcq_dma, GFP_KERNEL);
813 807
814 if (!tgt->lcq) { 808 if (!tgt->lcq) {
815 printk(KERN_ERR PFX "unable to allocate lcq %d\n", 809 printk(KERN_ERR PFX "unable to allocate lcq %d\n",
816 tgt->lcq_mem_size); 810 tgt->lcq_mem_size);
817 goto mem_alloc_failure; 811 goto mem_alloc_failure;
818 } 812 }
819 memset(tgt->lcq, 0, tgt->lcq_mem_size);
820 813
821 tgt->conn_db->rq_prod = 0x8000; 814 tgt->conn_db->rq_prod = 0x8000;
822 815
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index e0640e0f259f..8f03a869ac98 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -547,12 +547,9 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
547 nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL; 547 nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
548 memcpy(nopout_wqe->lun, &nopout_hdr->lun, 8); 548 memcpy(nopout_wqe->lun, &nopout_hdr->lun, 8);
549 549
550 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { 550 /* 57710 requires LUN field to be swapped */
551 u32 tmp = nopout_wqe->lun[0]; 551 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
552 /* 57710 requires LUN field to be swapped */ 552 swap(nopout_wqe->lun[0], nopout_wqe->lun[1]);
553 nopout_wqe->lun[0] = nopout_wqe->lun[1];
554 nopout_wqe->lun[1] = tmp;
555 }
556 553
557 nopout_wqe->itt = ((u16)task->itt | 554 nopout_wqe->itt = ((u16)task->itt |
558 (ISCSI_TASK_TYPE_MPATH << 555 (ISCSI_TASK_TYPE_MPATH <<
@@ -1073,15 +1070,14 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1073 1070
1074 /* Allocate memory area for actual SQ element */ 1071 /* Allocate memory area for actual SQ element */
1075 ep->qp.sq_virt = 1072 ep->qp.sq_virt =
1076 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, 1073 dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
1077 &ep->qp.sq_phys, GFP_KERNEL); 1074 &ep->qp.sq_phys, GFP_KERNEL);
1078 if (!ep->qp.sq_virt) { 1075 if (!ep->qp.sq_virt) {
1079 printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n", 1076 printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
1080 ep->qp.sq_mem_size); 1077 ep->qp.sq_mem_size);
1081 goto mem_alloc_err; 1078 goto mem_alloc_err;
1082 } 1079 }
1083 1080
1084 memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
1085 ep->qp.sq_first_qe = ep->qp.sq_virt; 1081 ep->qp.sq_first_qe = ep->qp.sq_virt;
1086 ep->qp.sq_prod_qe = ep->qp.sq_first_qe; 1082 ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
1087 ep->qp.sq_cons_qe = ep->qp.sq_first_qe; 1083 ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
@@ -1110,14 +1106,13 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1110 1106
1111 /* Allocate memory area for actual CQ element */ 1107 /* Allocate memory area for actual CQ element */
1112 ep->qp.cq_virt = 1108 ep->qp.cq_virt =
1113 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, 1109 dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1114 &ep->qp.cq_phys, GFP_KERNEL); 1110 &ep->qp.cq_phys, GFP_KERNEL);
1115 if (!ep->qp.cq_virt) { 1111 if (!ep->qp.cq_virt) {
1116 printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n", 1112 printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
1117 ep->qp.cq_mem_size); 1113 ep->qp.cq_mem_size);
1118 goto mem_alloc_err; 1114 goto mem_alloc_err;
1119 } 1115 }
1120 memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
1121 1116
1122 ep->qp.cq_first_qe = ep->qp.cq_virt; 1117 ep->qp.cq_first_qe = ep->qp.cq_virt;
1123 ep->qp.cq_prod_qe = ep->qp.cq_first_qe; 1118 ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index cb1711a5d7a3..ed2dae657964 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1258,7 +1258,7 @@ module_init(csio_init);
1258module_exit(csio_exit); 1258module_exit(csio_exit);
1259MODULE_AUTHOR(CSIO_DRV_AUTHOR); 1259MODULE_AUTHOR(CSIO_DRV_AUTHOR);
1260MODULE_DESCRIPTION(CSIO_DRV_DESC); 1260MODULE_DESCRIPTION(CSIO_DRV_DESC);
1261MODULE_LICENSE(CSIO_DRV_LICENSE); 1261MODULE_LICENSE("Dual BSD/GPL");
1262MODULE_DEVICE_TABLE(pci, csio_pci_tbl); 1262MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
1263MODULE_VERSION(CSIO_DRV_VERSION); 1263MODULE_VERSION(CSIO_DRV_VERSION);
1264MODULE_FIRMWARE(FW_FNAME_T5); 1264MODULE_FIRMWARE(FW_FNAME_T5);
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h
index 96b31e5af91e..20244254325a 100644
--- a/drivers/scsi/csiostor/csio_init.h
+++ b/drivers/scsi/csiostor/csio_init.h
@@ -48,7 +48,6 @@
48#include "csio_hw.h" 48#include "csio_hw.h"
49 49
50#define CSIO_DRV_AUTHOR "Chelsio Communications" 50#define CSIO_DRV_AUTHOR "Chelsio Communications"
51#define CSIO_DRV_LICENSE "Dual BSD/GPL"
52#define CSIO_DRV_DESC "Chelsio FCoE driver" 51#define CSIO_DRV_DESC "Chelsio FCoE driver"
53#define CSIO_DRV_VERSION "1.0.0-ko" 52#define CSIO_DRV_VERSION "1.0.0-ko"
54 53
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index 931b1d8f9f3e..5f4e0a787bd1 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -1216,7 +1216,7 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
1216 /* Queue mbox cmd, if another mbox cmd is active */ 1216 /* Queue mbox cmd, if another mbox cmd is active */
1217 if (mbp->mb_cbfn == NULL) { 1217 if (mbp->mb_cbfn == NULL) {
1218 rv = -EBUSY; 1218 rv = -EBUSY;
1219 csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n", 1219 csio_dbg(hw, "Couldn't own Mailbox %x op:0x%x\n",
1220 hw->pfn, *((uint8_t *)mbp->mb)); 1220 hw->pfn, *((uint8_t *)mbp->mb));
1221 1221
1222 goto error_out; 1222 goto error_out;
@@ -1244,14 +1244,14 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
1244 rv = owner ? -EBUSY : -ETIMEDOUT; 1244 rv = owner ? -EBUSY : -ETIMEDOUT;
1245 1245
1246 csio_dbg(hw, 1246 csio_dbg(hw,
1247 "Couldnt own Mailbox %x op:0x%x " 1247 "Couldn't own Mailbox %x op:0x%x "
1248 "owner:%x\n", 1248 "owner:%x\n",
1249 hw->pfn, *((uint8_t *)mbp->mb), owner); 1249 hw->pfn, *((uint8_t *)mbp->mb), owner);
1250 goto error_out; 1250 goto error_out;
1251 } else { 1251 } else {
1252 if (mbm->mcurrent == NULL) { 1252 if (mbm->mcurrent == NULL) {
1253 csio_err(hw, 1253 csio_err(hw,
1254 "Couldnt own Mailbox %x " 1254 "Couldn't own Mailbox %x "
1255 "op:0x%x owner:%x\n", 1255 "op:0x%x owner:%x\n",
1256 hw->pfn, *((uint8_t *)mbp->mb), 1256 hw->pfn, *((uint8_t *)mbp->mb),
1257 owner); 1257 owner);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index ce1336414e0a..3f3af5e74a07 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1914,7 +1914,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
1914 if (task->sc) { 1914 if (task->sc) {
1915 task->hdr = (struct iscsi_hdr *)tdata->skb->data; 1915 task->hdr = (struct iscsi_hdr *)tdata->skb->data;
1916 } else { 1916 } else {
1917 task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_KERNEL); 1917 task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_ATOMIC);
1918 if (!task->hdr) { 1918 if (!task->hdr) {
1919 __kfree_skb(tdata->skb); 1919 __kfree_skb(tdata->skb);
1920 tdata->skb = NULL; 1920 tdata->skb = NULL;
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
index 9e39866d473b..7ec3f6b55dde 100644
--- a/drivers/scsi/cxlflash/Makefile
+++ b/drivers/scsi/cxlflash/Makefile
@@ -1,2 +1,2 @@
1obj-$(CONFIG_CXLFLASH) += cxlflash.o 1obj-$(CONFIG_CXLFLASH) += cxlflash.o
2cxlflash-y += main.o superpipe.o lunmgt.o vlun.o 2cxlflash-y += main.o superpipe.o lunmgt.o vlun.o cxl_hw.o
diff --git a/drivers/scsi/cxlflash/backend.h b/drivers/scsi/cxlflash/backend.h
new file mode 100644
index 000000000000..339e42b03c49
--- /dev/null
+++ b/drivers/scsi/cxlflash/backend.h
@@ -0,0 +1,41 @@
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
5 * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2018 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15extern const struct cxlflash_backend_ops cxlflash_cxl_ops;
16
17struct cxlflash_backend_ops {
18 struct module *module;
19 void __iomem * (*psa_map)(void *);
20 void (*psa_unmap)(void __iomem *);
21 int (*process_element)(void *);
22 int (*map_afu_irq)(void *, int, irq_handler_t, void *, char *);
23 void (*unmap_afu_irq)(void *, int, void *);
24 int (*start_context)(void *);
25 int (*stop_context)(void *);
26 int (*afu_reset)(void *);
27 void (*set_master)(void *);
28 void * (*get_context)(struct pci_dev *, void *);
29 void * (*dev_context_init)(struct pci_dev *, void *);
30 int (*release_context)(void *);
31 void (*perst_reloads_same_image)(void *, bool);
32 ssize_t (*read_adapter_vpd)(struct pci_dev *, void *, size_t);
33 int (*allocate_afu_irqs)(void *, int);
34 void (*free_afu_irqs)(void *);
35 void * (*create_afu)(struct pci_dev *);
36 struct file * (*get_fd)(void *, struct file_operations *, int *);
37 void * (*fops_get_context)(struct file *);
38 int (*start_work)(void *, u64);
39 int (*fd_mmap)(struct file *, struct vm_area_struct *);
40 int (*fd_release)(struct inode *, struct file *);
41};
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 6d95e8e147e0..102fd26ca886 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -25,6 +25,8 @@
25#include <scsi/scsi_cmnd.h> 25#include <scsi/scsi_cmnd.h>
26#include <scsi/scsi_device.h> 26#include <scsi/scsi_device.h>
27 27
28#include "backend.h"
29
28extern const struct file_operations cxlflash_cxl_fops; 30extern const struct file_operations cxlflash_cxl_fops;
29 31
30#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */ 32#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
@@ -114,6 +116,7 @@ enum cxlflash_hwq_mode {
114struct cxlflash_cfg { 116struct cxlflash_cfg {
115 struct afu *afu; 117 struct afu *afu;
116 118
119 const struct cxlflash_backend_ops *ops;
117 struct pci_dev *dev; 120 struct pci_dev *dev;
118 struct pci_device_id *dev_id; 121 struct pci_device_id *dev_id;
119 struct Scsi_Host *host; 122 struct Scsi_Host *host;
@@ -129,7 +132,7 @@ struct cxlflash_cfg {
129 int lr_port; 132 int lr_port;
130 atomic_t scan_host_needed; 133 atomic_t scan_host_needed;
131 134
132 struct cxl_afu *cxl_afu; 135 void *afu_cookie;
133 136
134 atomic_t recovery_threads; 137 atomic_t recovery_threads;
135 struct mutex ctx_recovery_mutex; 138 struct mutex ctx_recovery_mutex;
@@ -203,8 +206,7 @@ struct hwq {
203 * fields after this point 206 * fields after this point
204 */ 207 */
205 struct afu *afu; 208 struct afu *afu;
206 struct cxl_context *ctx; 209 void *ctx_cookie;
207 struct cxl_ioctl_start_work work;
208 struct sisl_host_map __iomem *host_map; /* MC host map */ 210 struct sisl_host_map __iomem *host_map; /* MC host map */
209 struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */ 211 struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
210 ctx_hndl_t ctx_hndl; /* master's context handle */ 212 ctx_hndl_t ctx_hndl; /* master's context handle */
diff --git a/drivers/scsi/cxlflash/cxl_hw.c b/drivers/scsi/cxlflash/cxl_hw.c
new file mode 100644
index 000000000000..db1cadad5c5d
--- /dev/null
+++ b/drivers/scsi/cxlflash/cxl_hw.c
@@ -0,0 +1,168 @@
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
5 * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2018 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <misc/cxl.h>
16
17#include "backend.h"
18
19/*
20 * The following routines map the cxlflash backend operations to existing CXL
21 * kernel API function and are largely simple shims that provide an abstraction
22 * for converting generic context and AFU cookies into cxl_context or cxl_afu
23 * pointers.
24 */
25
26static void __iomem *cxlflash_psa_map(void *ctx_cookie)
27{
28 return cxl_psa_map(ctx_cookie);
29}
30
31static void cxlflash_psa_unmap(void __iomem *addr)
32{
33 cxl_psa_unmap(addr);
34}
35
36static int cxlflash_process_element(void *ctx_cookie)
37{
38 return cxl_process_element(ctx_cookie);
39}
40
41static int cxlflash_map_afu_irq(void *ctx_cookie, int num,
42 irq_handler_t handler, void *cookie, char *name)
43{
44 return cxl_map_afu_irq(ctx_cookie, num, handler, cookie, name);
45}
46
47static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
48{
49 cxl_unmap_afu_irq(ctx_cookie, num, cookie);
50}
51
52static int cxlflash_start_context(void *ctx_cookie)
53{
54 return cxl_start_context(ctx_cookie, 0, NULL);
55}
56
57static int cxlflash_stop_context(void *ctx_cookie)
58{
59 return cxl_stop_context(ctx_cookie);
60}
61
62static int cxlflash_afu_reset(void *ctx_cookie)
63{
64 return cxl_afu_reset(ctx_cookie);
65}
66
67static void cxlflash_set_master(void *ctx_cookie)
68{
69 cxl_set_master(ctx_cookie);
70}
71
72static void *cxlflash_get_context(struct pci_dev *dev, void *afu_cookie)
73{
74 return cxl_get_context(dev);
75}
76
77static void *cxlflash_dev_context_init(struct pci_dev *dev, void *afu_cookie)
78{
79 return cxl_dev_context_init(dev);
80}
81
82static int cxlflash_release_context(void *ctx_cookie)
83{
84 return cxl_release_context(ctx_cookie);
85}
86
87static void cxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
88{
89 cxl_perst_reloads_same_image(afu_cookie, image);
90}
91
92static ssize_t cxlflash_read_adapter_vpd(struct pci_dev *dev,
93 void *buf, size_t count)
94{
95 return cxl_read_adapter_vpd(dev, buf, count);
96}
97
98static int cxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
99{
100 return cxl_allocate_afu_irqs(ctx_cookie, num);
101}
102
103static void cxlflash_free_afu_irqs(void *ctx_cookie)
104{
105 cxl_free_afu_irqs(ctx_cookie);
106}
107
108static void *cxlflash_create_afu(struct pci_dev *dev)
109{
110 return cxl_pci_to_afu(dev);
111}
112
113static struct file *cxlflash_get_fd(void *ctx_cookie,
114 struct file_operations *fops, int *fd)
115{
116 return cxl_get_fd(ctx_cookie, fops, fd);
117}
118
119static void *cxlflash_fops_get_context(struct file *file)
120{
121 return cxl_fops_get_context(file);
122}
123
124static int cxlflash_start_work(void *ctx_cookie, u64 irqs)
125{
126 struct cxl_ioctl_start_work work = { 0 };
127
128 work.num_interrupts = irqs;
129 work.flags = CXL_START_WORK_NUM_IRQS;
130
131 return cxl_start_work(ctx_cookie, &work);
132}
133
134static int cxlflash_fd_mmap(struct file *file, struct vm_area_struct *vm)
135{
136 return cxl_fd_mmap(file, vm);
137}
138
139static int cxlflash_fd_release(struct inode *inode, struct file *file)
140{
141 return cxl_fd_release(inode, file);
142}
143
144const struct cxlflash_backend_ops cxlflash_cxl_ops = {
145 .module = THIS_MODULE,
146 .psa_map = cxlflash_psa_map,
147 .psa_unmap = cxlflash_psa_unmap,
148 .process_element = cxlflash_process_element,
149 .map_afu_irq = cxlflash_map_afu_irq,
150 .unmap_afu_irq = cxlflash_unmap_afu_irq,
151 .start_context = cxlflash_start_context,
152 .stop_context = cxlflash_stop_context,
153 .afu_reset = cxlflash_afu_reset,
154 .set_master = cxlflash_set_master,
155 .get_context = cxlflash_get_context,
156 .dev_context_init = cxlflash_dev_context_init,
157 .release_context = cxlflash_release_context,
158 .perst_reloads_same_image = cxlflash_perst_reloads_same_image,
159 .read_adapter_vpd = cxlflash_read_adapter_vpd,
160 .allocate_afu_irqs = cxlflash_allocate_afu_irqs,
161 .free_afu_irqs = cxlflash_free_afu_irqs,
162 .create_afu = cxlflash_create_afu,
163 .get_fd = cxlflash_get_fd,
164 .fops_get_context = cxlflash_fops_get_context,
165 .start_work = cxlflash_start_work,
166 .fd_mmap = cxlflash_fd_mmap,
167 .fd_release = cxlflash_fd_release,
168};
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 38b3a9c84fd1..d8fe7ab870b8 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -620,6 +620,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
620 cmd->parent = afu; 620 cmd->parent = afu;
621 cmd->hwq_index = hwq_index; 621 cmd->hwq_index = hwq_index;
622 622
623 cmd->sa.ioasc = 0;
623 cmd->rcb.ctx_id = hwq->ctx_hndl; 624 cmd->rcb.ctx_id = hwq->ctx_hndl;
624 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; 625 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
625 cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); 626 cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
@@ -710,7 +711,7 @@ static void stop_afu(struct cxlflash_cfg *cfg)
710 } 711 }
711 712
712 if (likely(afu->afu_map)) { 713 if (likely(afu->afu_map)) {
713 cxl_psa_unmap((void __iomem *)afu->afu_map); 714 cfg->ops->psa_unmap(afu->afu_map);
714 afu->afu_map = NULL; 715 afu->afu_map = NULL;
715 } 716 }
716 } 717 }
@@ -738,7 +739,7 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
738 739
739 hwq = get_hwq(afu, index); 740 hwq = get_hwq(afu, index);
740 741
741 if (!hwq->ctx) { 742 if (!hwq->ctx_cookie) {
742 dev_err(dev, "%s: returning with NULL MC\n", __func__); 743 dev_err(dev, "%s: returning with NULL MC\n", __func__);
743 return; 744 return;
744 } 745 }
@@ -747,13 +748,13 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
747 case UNMAP_THREE: 748 case UNMAP_THREE:
748 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ 749 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
749 if (index == PRIMARY_HWQ) 750 if (index == PRIMARY_HWQ)
750 cxl_unmap_afu_irq(hwq->ctx, 3, hwq); 751 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
751 case UNMAP_TWO: 752 case UNMAP_TWO:
752 cxl_unmap_afu_irq(hwq->ctx, 2, hwq); 753 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
753 case UNMAP_ONE: 754 case UNMAP_ONE:
754 cxl_unmap_afu_irq(hwq->ctx, 1, hwq); 755 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
755 case FREE_IRQ: 756 case FREE_IRQ:
756 cxl_free_afu_irqs(hwq->ctx); 757 cfg->ops->free_afu_irqs(hwq->ctx_cookie);
757 /* fall through */ 758 /* fall through */
758 case UNDO_NOOP: 759 case UNDO_NOOP:
759 /* No action required */ 760 /* No action required */
@@ -782,15 +783,15 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
782 783
783 hwq = get_hwq(afu, index); 784 hwq = get_hwq(afu, index);
784 785
785 if (!hwq->ctx) { 786 if (!hwq->ctx_cookie) {
786 dev_err(dev, "%s: returning with NULL MC\n", __func__); 787 dev_err(dev, "%s: returning with NULL MC\n", __func__);
787 return; 788 return;
788 } 789 }
789 790
790 WARN_ON(cxl_stop_context(hwq->ctx)); 791 WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
791 if (index != PRIMARY_HWQ) 792 if (index != PRIMARY_HWQ)
792 WARN_ON(cxl_release_context(hwq->ctx)); 793 WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
793 hwq->ctx = NULL; 794 hwq->ctx_cookie = NULL;
794 795
795 spin_lock_irqsave(&hwq->hsq_slock, lock_flags); 796 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
796 flush_pending_cmds(hwq); 797 flush_pending_cmds(hwq);
@@ -1598,27 +1599,6 @@ out:
1598} 1599}
1599 1600
1600/** 1601/**
1601 * start_context() - starts the master context
1602 * @cfg: Internal structure associated with the host.
1603 * @index: Index of the hardware queue.
1604 *
1605 * Return: A success or failure value from CXL services.
1606 */
1607static int start_context(struct cxlflash_cfg *cfg, u32 index)
1608{
1609 struct device *dev = &cfg->dev->dev;
1610 struct hwq *hwq = get_hwq(cfg->afu, index);
1611 int rc = 0;
1612
1613 rc = cxl_start_context(hwq->ctx,
1614 hwq->work.work_element_descriptor,
1615 NULL);
1616
1617 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1618 return rc;
1619}
1620
1621/**
1622 * read_vpd() - obtains the WWPNs from VPD 1602 * read_vpd() - obtains the WWPNs from VPD
1623 * @cfg: Internal structure associated with the host. 1603 * @cfg: Internal structure associated with the host.
1624 * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs 1604 * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
@@ -1640,7 +1620,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1640 const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" }; 1620 const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1641 1621
1642 /* Get the VPD data from the device */ 1622 /* Get the VPD data from the device */
1643 vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data)); 1623 vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1644 if (unlikely(vpd_size <= 0)) { 1624 if (unlikely(vpd_size <= 0)) {
1645 dev_err(dev, "%s: Unable to read VPD (size = %ld)\n", 1625 dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1646 __func__, vpd_size); 1626 __func__, vpd_size);
@@ -1732,6 +1712,7 @@ static void init_pcr(struct cxlflash_cfg *cfg)
1732 struct afu *afu = cfg->afu; 1712 struct afu *afu = cfg->afu;
1733 struct sisl_ctrl_map __iomem *ctrl_map; 1713 struct sisl_ctrl_map __iomem *ctrl_map;
1734 struct hwq *hwq; 1714 struct hwq *hwq;
1715 void *cookie;
1735 int i; 1716 int i;
1736 1717
1737 for (i = 0; i < MAX_CONTEXT; i++) { 1718 for (i = 0; i < MAX_CONTEXT; i++) {
@@ -1746,8 +1727,9 @@ static void init_pcr(struct cxlflash_cfg *cfg)
1746 /* Copy frequently used fields into hwq */ 1727 /* Copy frequently used fields into hwq */
1747 for (i = 0; i < afu->num_hwqs; i++) { 1728 for (i = 0; i < afu->num_hwqs; i++) {
1748 hwq = get_hwq(afu, i); 1729 hwq = get_hwq(afu, i);
1730 cookie = hwq->ctx_cookie;
1749 1731
1750 hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx); 1732 hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
1751 hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host; 1733 hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1752 hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl; 1734 hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1753 1735
@@ -1925,13 +1907,13 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1925 struct hwq *hwq) 1907 struct hwq *hwq)
1926{ 1908{
1927 struct device *dev = &cfg->dev->dev; 1909 struct device *dev = &cfg->dev->dev;
1928 struct cxl_context *ctx = hwq->ctx; 1910 void *ctx = hwq->ctx_cookie;
1929 int rc = 0; 1911 int rc = 0;
1930 enum undo_level level = UNDO_NOOP; 1912 enum undo_level level = UNDO_NOOP;
1931 bool is_primary_hwq = (hwq->index == PRIMARY_HWQ); 1913 bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1932 int num_irqs = is_primary_hwq ? 3 : 2; 1914 int num_irqs = is_primary_hwq ? 3 : 2;
1933 1915
1934 rc = cxl_allocate_afu_irqs(ctx, num_irqs); 1916 rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
1935 if (unlikely(rc)) { 1917 if (unlikely(rc)) {
1936 dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n", 1918 dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1937 __func__, rc); 1919 __func__, rc);
@@ -1939,16 +1921,16 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1939 goto out; 1921 goto out;
1940 } 1922 }
1941 1923
1942 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq, 1924 rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1943 "SISL_MSI_SYNC_ERROR"); 1925 "SISL_MSI_SYNC_ERROR");
1944 if (unlikely(rc <= 0)) { 1926 if (unlikely(rc <= 0)) {
1945 dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__); 1927 dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1946 level = FREE_IRQ; 1928 level = FREE_IRQ;
1947 goto out; 1929 goto out;
1948 } 1930 }
1949 1931
1950 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq, 1932 rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1951 "SISL_MSI_RRQ_UPDATED"); 1933 "SISL_MSI_RRQ_UPDATED");
1952 if (unlikely(rc <= 0)) { 1934 if (unlikely(rc <= 0)) {
1953 dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__); 1935 dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1954 level = UNMAP_ONE; 1936 level = UNMAP_ONE;
@@ -1959,8 +1941,8 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1959 if (!is_primary_hwq) 1941 if (!is_primary_hwq)
1960 goto out; 1942 goto out;
1961 1943
1962 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq, 1944 rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1963 "SISL_MSI_ASYNC_ERROR"); 1945 "SISL_MSI_ASYNC_ERROR");
1964 if (unlikely(rc <= 0)) { 1946 if (unlikely(rc <= 0)) {
1965 dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__); 1947 dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1966 level = UNMAP_TWO; 1948 level = UNMAP_TWO;
@@ -1979,7 +1961,7 @@ out:
1979 */ 1961 */
1980static int init_mc(struct cxlflash_cfg *cfg, u32 index) 1962static int init_mc(struct cxlflash_cfg *cfg, u32 index)
1981{ 1963{
1982 struct cxl_context *ctx; 1964 void *ctx;
1983 struct device *dev = &cfg->dev->dev; 1965 struct device *dev = &cfg->dev->dev;
1984 struct hwq *hwq = get_hwq(cfg->afu, index); 1966 struct hwq *hwq = get_hwq(cfg->afu, index);
1985 int rc = 0; 1967 int rc = 0;
@@ -1990,23 +1972,23 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
1990 INIT_LIST_HEAD(&hwq->pending_cmds); 1972 INIT_LIST_HEAD(&hwq->pending_cmds);
1991 1973
1992 if (index == PRIMARY_HWQ) 1974 if (index == PRIMARY_HWQ)
1993 ctx = cxl_get_context(cfg->dev); 1975 ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
1994 else 1976 else
1995 ctx = cxl_dev_context_init(cfg->dev); 1977 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
1996 if (unlikely(!ctx)) { 1978 if (IS_ERR_OR_NULL(ctx)) {
1997 rc = -ENOMEM; 1979 rc = -ENOMEM;
1998 goto err1; 1980 goto err1;
1999 } 1981 }
2000 1982
2001 WARN_ON(hwq->ctx); 1983 WARN_ON(hwq->ctx_cookie);
2002 hwq->ctx = ctx; 1984 hwq->ctx_cookie = ctx;
2003 1985
2004 /* Set it up as a master with the CXL */ 1986 /* Set it up as a master with the CXL */
2005 cxl_set_master(ctx); 1987 cfg->ops->set_master(ctx);
2006 1988
2007 /* Reset AFU when initializing primary context */ 1989 /* Reset AFU when initializing primary context */
2008 if (index == PRIMARY_HWQ) { 1990 if (index == PRIMARY_HWQ) {
2009 rc = cxl_afu_reset(ctx); 1991 rc = cfg->ops->afu_reset(ctx);
2010 if (unlikely(rc)) { 1992 if (unlikely(rc)) {
2011 dev_err(dev, "%s: AFU reset failed rc=%d\n", 1993 dev_err(dev, "%s: AFU reset failed rc=%d\n",
2012 __func__, rc); 1994 __func__, rc);
@@ -2020,11 +2002,8 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
2020 goto err2; 2002 goto err2;
2021 } 2003 }
2022 2004
2023 /* This performs the equivalent of the CXL_IOCTL_START_WORK. 2005 /* Finally, activate the context by starting it */
2024 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process 2006 rc = cfg->ops->start_context(hwq->ctx_cookie);
2025 * element (pe) that is embedded in the context (ctx)
2026 */
2027 rc = start_context(cfg, index);
2028 if (unlikely(rc)) { 2007 if (unlikely(rc)) {
2029 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); 2008 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
2030 level = UNMAP_THREE; 2009 level = UNMAP_THREE;
@@ -2037,9 +2016,9 @@ out:
2037err2: 2016err2:
2038 term_intr(cfg, level, index); 2017 term_intr(cfg, level, index);
2039 if (index != PRIMARY_HWQ) 2018 if (index != PRIMARY_HWQ)
2040 cxl_release_context(ctx); 2019 cfg->ops->release_context(ctx);
2041err1: 2020err1:
2042 hwq->ctx = NULL; 2021 hwq->ctx_cookie = NULL;
2043 goto out; 2022 goto out;
2044} 2023}
2045 2024
@@ -2094,7 +2073,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
2094 struct hwq *hwq; 2073 struct hwq *hwq;
2095 int i; 2074 int i;
2096 2075
2097 cxl_perst_reloads_same_image(cfg->cxl_afu, true); 2076 cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
2098 2077
2099 afu->num_hwqs = afu->desired_hwqs; 2078 afu->num_hwqs = afu->desired_hwqs;
2100 for (i = 0; i < afu->num_hwqs; i++) { 2079 for (i = 0; i < afu->num_hwqs; i++) {
@@ -2108,9 +2087,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
2108 2087
2109 /* Map the entire MMIO space of the AFU using the first context */ 2088 /* Map the entire MMIO space of the AFU using the first context */
2110 hwq = get_hwq(afu, PRIMARY_HWQ); 2089 hwq = get_hwq(afu, PRIMARY_HWQ);
2111 afu->afu_map = cxl_psa_map(hwq->ctx); 2090 afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
2112 if (!afu->afu_map) { 2091 if (!afu->afu_map) {
2113 dev_err(dev, "%s: cxl_psa_map failed\n", __func__); 2092 dev_err(dev, "%s: psa_map failed\n", __func__);
2114 rc = -ENOMEM; 2093 rc = -ENOMEM;
2115 goto err1; 2094 goto err1;
2116 } 2095 }
@@ -3670,6 +3649,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
3670 3649
3671 cfg->init_state = INIT_STATE_NONE; 3650 cfg->init_state = INIT_STATE_NONE;
3672 cfg->dev = pdev; 3651 cfg->dev = pdev;
3652 cfg->ops = &cxlflash_cxl_ops;
3673 cfg->cxl_fops = cxlflash_cxl_fops; 3653 cfg->cxl_fops = cxlflash_cxl_fops;
3674 3654
3675 /* 3655 /*
@@ -3701,7 +3681,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
3701 3681
3702 pci_set_drvdata(pdev, cfg); 3682 pci_set_drvdata(pdev, cfg);
3703 3683
3704 cfg->cxl_afu = cxl_pci_to_afu(pdev); 3684 cfg->afu_cookie = cfg->ops->create_afu(pdev);
3705 3685
3706 rc = init_pci(cfg); 3686 rc = init_pci(cfg);
3707 if (rc) { 3687 if (rc) {
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 170fff5aeff6..2fe79df5c73c 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -810,20 +810,22 @@ err:
810 * init_context() - initializes a previously allocated context 810 * init_context() - initializes a previously allocated context
811 * @ctxi: Previously allocated context 811 * @ctxi: Previously allocated context
812 * @cfg: Internal structure associated with the host. 812 * @cfg: Internal structure associated with the host.
813 * @ctx: Previously obtained CXL context reference. 813 * @ctx: Previously obtained context cookie.
814 * @ctxid: Previously obtained process element associated with CXL context. 814 * @ctxid: Previously obtained process element associated with CXL context.
815 * @file: Previously obtained file associated with CXL context. 815 * @file: Previously obtained file associated with CXL context.
816 * @perms: User-specified permissions. 816 * @perms: User-specified permissions.
817 * @irqs: User-specified number of interrupts.
817 */ 818 */
818static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg, 819static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
819 struct cxl_context *ctx, int ctxid, struct file *file, 820 void *ctx, int ctxid, struct file *file, u32 perms,
820 u32 perms) 821 u64 irqs)
821{ 822{
822 struct afu *afu = cfg->afu; 823 struct afu *afu = cfg->afu;
823 824
824 ctxi->rht_perms = perms; 825 ctxi->rht_perms = perms;
825 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; 826 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
826 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); 827 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
828 ctxi->irqs = irqs;
827 ctxi->pid = task_tgid_nr(current); /* tgid = pid */ 829 ctxi->pid = task_tgid_nr(current); /* tgid = pid */
828 ctxi->ctx = ctx; 830 ctxi->ctx = ctx;
829 ctxi->cfg = cfg; 831 ctxi->cfg = cfg;
@@ -976,9 +978,9 @@ static int cxlflash_disk_detach(struct scsi_device *sdev,
976 */ 978 */
977static int cxlflash_cxl_release(struct inode *inode, struct file *file) 979static int cxlflash_cxl_release(struct inode *inode, struct file *file)
978{ 980{
979 struct cxl_context *ctx = cxl_fops_get_context(file);
980 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, 981 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
981 cxl_fops); 982 cxl_fops);
983 void *ctx = cfg->ops->fops_get_context(file);
982 struct device *dev = &cfg->dev->dev; 984 struct device *dev = &cfg->dev->dev;
983 struct ctx_info *ctxi = NULL; 985 struct ctx_info *ctxi = NULL;
984 struct dk_cxlflash_detach detach = { { 0 }, 0 }; 986 struct dk_cxlflash_detach detach = { { 0 }, 0 };
@@ -986,7 +988,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
986 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; 988 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
987 int ctxid; 989 int ctxid;
988 990
989 ctxid = cxl_process_element(ctx); 991 ctxid = cfg->ops->process_element(ctx);
990 if (unlikely(ctxid < 0)) { 992 if (unlikely(ctxid < 0)) {
991 dev_err(dev, "%s: Context %p was closed ctxid=%d\n", 993 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
992 __func__, ctx, ctxid); 994 __func__, ctx, ctxid);
@@ -1014,7 +1016,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
1014 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) 1016 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
1015 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach); 1017 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
1016out_release: 1018out_release:
1017 cxl_fd_release(inode, file); 1019 cfg->ops->fd_release(inode, file);
1018out: 1020out:
1019 dev_dbg(dev, "%s: returning\n", __func__); 1021 dev_dbg(dev, "%s: returning\n", __func__);
1020 return 0; 1022 return 0;
@@ -1089,9 +1091,9 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
1089{ 1091{
1090 struct vm_area_struct *vma = vmf->vma; 1092 struct vm_area_struct *vma = vmf->vma;
1091 struct file *file = vma->vm_file; 1093 struct file *file = vma->vm_file;
1092 struct cxl_context *ctx = cxl_fops_get_context(file);
1093 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, 1094 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1094 cxl_fops); 1095 cxl_fops);
1096 void *ctx = cfg->ops->fops_get_context(file);
1095 struct device *dev = &cfg->dev->dev; 1097 struct device *dev = &cfg->dev->dev;
1096 struct ctx_info *ctxi = NULL; 1098 struct ctx_info *ctxi = NULL;
1097 struct page *err_page = NULL; 1099 struct page *err_page = NULL;
@@ -1099,7 +1101,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
1099 int rc = 0; 1101 int rc = 0;
1100 int ctxid; 1102 int ctxid;
1101 1103
1102 ctxid = cxl_process_element(ctx); 1104 ctxid = cfg->ops->process_element(ctx);
1103 if (unlikely(ctxid < 0)) { 1105 if (unlikely(ctxid < 0)) {
1104 dev_err(dev, "%s: Context %p was closed ctxid=%d\n", 1106 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1105 __func__, ctx, ctxid); 1107 __func__, ctx, ctxid);
@@ -1162,16 +1164,16 @@ static const struct vm_operations_struct cxlflash_mmap_vmops = {
1162 */ 1164 */
1163static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma) 1165static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1164{ 1166{
1165 struct cxl_context *ctx = cxl_fops_get_context(file);
1166 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, 1167 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1167 cxl_fops); 1168 cxl_fops);
1169 void *ctx = cfg->ops->fops_get_context(file);
1168 struct device *dev = &cfg->dev->dev; 1170 struct device *dev = &cfg->dev->dev;
1169 struct ctx_info *ctxi = NULL; 1171 struct ctx_info *ctxi = NULL;
1170 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; 1172 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1171 int ctxid; 1173 int ctxid;
1172 int rc = 0; 1174 int rc = 0;
1173 1175
1174 ctxid = cxl_process_element(ctx); 1176 ctxid = cfg->ops->process_element(ctx);
1175 if (unlikely(ctxid < 0)) { 1177 if (unlikely(ctxid < 0)) {
1176 dev_err(dev, "%s: Context %p was closed ctxid=%d\n", 1178 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1177 __func__, ctx, ctxid); 1179 __func__, ctx, ctxid);
@@ -1188,7 +1190,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1188 1190
1189 dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid); 1191 dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
1190 1192
1191 rc = cxl_fd_mmap(file, vma); 1193 rc = cfg->ops->fd_mmap(file, vma);
1192 if (likely(!rc)) { 1194 if (likely(!rc)) {
1193 /* Insert ourself in the mmap fault handler path */ 1195 /* Insert ourself in the mmap fault handler path */
1194 ctxi->cxl_mmap_vmops = vma->vm_ops; 1196 ctxi->cxl_mmap_vmops = vma->vm_ops;
@@ -1307,23 +1309,23 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
1307 struct afu *afu = cfg->afu; 1309 struct afu *afu = cfg->afu;
1308 struct llun_info *lli = sdev->hostdata; 1310 struct llun_info *lli = sdev->hostdata;
1309 struct glun_info *gli = lli->parent; 1311 struct glun_info *gli = lli->parent;
1310 struct cxl_ioctl_start_work *work;
1311 struct ctx_info *ctxi = NULL; 1312 struct ctx_info *ctxi = NULL;
1312 struct lun_access *lun_access = NULL; 1313 struct lun_access *lun_access = NULL;
1313 int rc = 0; 1314 int rc = 0;
1314 u32 perms; 1315 u32 perms;
1315 int ctxid = -1; 1316 int ctxid = -1;
1317 u64 irqs = attach->num_interrupts;
1316 u64 flags = 0UL; 1318 u64 flags = 0UL;
1317 u64 rctxid = 0UL; 1319 u64 rctxid = 0UL;
1318 struct file *file = NULL; 1320 struct file *file = NULL;
1319 1321
1320 struct cxl_context *ctx = NULL; 1322 void *ctx = NULL;
1321 1323
1322 int fd = -1; 1324 int fd = -1;
1323 1325
1324 if (attach->num_interrupts > 4) { 1326 if (irqs > 4) {
1325 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n", 1327 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
1326 __func__, attach->num_interrupts); 1328 __func__, irqs);
1327 rc = -EINVAL; 1329 rc = -EINVAL;
1328 goto out; 1330 goto out;
1329 } 1331 }
@@ -1394,7 +1396,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
1394 goto err; 1396 goto err;
1395 } 1397 }
1396 1398
1397 ctx = cxl_dev_context_init(cfg->dev); 1399 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
1398 if (IS_ERR_OR_NULL(ctx)) { 1400 if (IS_ERR_OR_NULL(ctx)) {
1399 dev_err(dev, "%s: Could not initialize context %p\n", 1401 dev_err(dev, "%s: Could not initialize context %p\n",
1400 __func__, ctx); 1402 __func__, ctx);
@@ -1402,25 +1404,21 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
1402 goto err; 1404 goto err;
1403 } 1405 }
1404 1406
1405 work = &ctxi->work; 1407 rc = cfg->ops->start_work(ctx, irqs);
1406 work->num_interrupts = attach->num_interrupts;
1407 work->flags = CXL_START_WORK_NUM_IRQS;
1408
1409 rc = cxl_start_work(ctx, work);
1410 if (unlikely(rc)) { 1408 if (unlikely(rc)) {
1411 dev_dbg(dev, "%s: Could not start context rc=%d\n", 1409 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1412 __func__, rc); 1410 __func__, rc);
1413 goto err; 1411 goto err;
1414 } 1412 }
1415 1413
1416 ctxid = cxl_process_element(ctx); 1414 ctxid = cfg->ops->process_element(ctx);
1417 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { 1415 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1418 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid); 1416 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1419 rc = -EPERM; 1417 rc = -EPERM;
1420 goto err; 1418 goto err;
1421 } 1419 }
1422 1420
1423 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd); 1421 file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
1424 if (unlikely(fd < 0)) { 1422 if (unlikely(fd < 0)) {
1425 rc = -ENODEV; 1423 rc = -ENODEV;
1426 dev_err(dev, "%s: Could not get file descriptor\n", __func__); 1424 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
@@ -1431,7 +1429,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
1431 perms = SISL_RHT_PERM(attach->hdr.flags + 1); 1429 perms = SISL_RHT_PERM(attach->hdr.flags + 1);
1432 1430
1433 /* Context mutex is locked upon return */ 1431 /* Context mutex is locked upon return */
1434 init_context(ctxi, cfg, ctx, ctxid, file, perms); 1432 init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
1435 1433
1436 rc = afu_attach(cfg, ctxi); 1434 rc = afu_attach(cfg, ctxi);
1437 if (unlikely(rc)) { 1435 if (unlikely(rc)) {
@@ -1479,8 +1477,8 @@ out:
1479err: 1477err:
1480 /* Cleanup CXL context; okay to 'stop' even if it was not started */ 1478 /* Cleanup CXL context; okay to 'stop' even if it was not started */
1481 if (!IS_ERR_OR_NULL(ctx)) { 1479 if (!IS_ERR_OR_NULL(ctx)) {
1482 cxl_stop_context(ctx); 1480 cfg->ops->stop_context(ctx);
1483 cxl_release_context(ctx); 1481 cfg->ops->release_context(ctx);
1484 ctx = NULL; 1482 ctx = NULL;
1485 } 1483 }
1486 1484
@@ -1529,10 +1527,10 @@ static int recover_context(struct cxlflash_cfg *cfg,
1529 int fd = -1; 1527 int fd = -1;
1530 int ctxid = -1; 1528 int ctxid = -1;
1531 struct file *file; 1529 struct file *file;
1532 struct cxl_context *ctx; 1530 void *ctx;
1533 struct afu *afu = cfg->afu; 1531 struct afu *afu = cfg->afu;
1534 1532
1535 ctx = cxl_dev_context_init(cfg->dev); 1533 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
1536 if (IS_ERR_OR_NULL(ctx)) { 1534 if (IS_ERR_OR_NULL(ctx)) {
1537 dev_err(dev, "%s: Could not initialize context %p\n", 1535 dev_err(dev, "%s: Could not initialize context %p\n",
1538 __func__, ctx); 1536 __func__, ctx);
@@ -1540,21 +1538,21 @@ static int recover_context(struct cxlflash_cfg *cfg,
1540 goto out; 1538 goto out;
1541 } 1539 }
1542 1540
1543 rc = cxl_start_work(ctx, &ctxi->work); 1541 rc = cfg->ops->start_work(ctx, ctxi->irqs);
1544 if (unlikely(rc)) { 1542 if (unlikely(rc)) {
1545 dev_dbg(dev, "%s: Could not start context rc=%d\n", 1543 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1546 __func__, rc); 1544 __func__, rc);
1547 goto err1; 1545 goto err1;
1548 } 1546 }
1549 1547
1550 ctxid = cxl_process_element(ctx); 1548 ctxid = cfg->ops->process_element(ctx);
1551 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { 1549 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1552 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid); 1550 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1553 rc = -EPERM; 1551 rc = -EPERM;
1554 goto err2; 1552 goto err2;
1555 } 1553 }
1556 1554
1557 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd); 1555 file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
1558 if (unlikely(fd < 0)) { 1556 if (unlikely(fd < 0)) {
1559 rc = -ENODEV; 1557 rc = -ENODEV;
1560 dev_err(dev, "%s: Could not get file descriptor\n", __func__); 1558 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
@@ -1601,9 +1599,9 @@ err3:
1601 fput(file); 1599 fput(file);
1602 put_unused_fd(fd); 1600 put_unused_fd(fd);
1603err2: 1601err2:
1604 cxl_stop_context(ctx); 1602 cfg->ops->stop_context(ctx);
1605err1: 1603err1:
1606 cxl_release_context(ctx); 1604 cfg->ops->release_context(ctx);
1607 goto out; 1605 goto out;
1608} 1606}
1609 1607
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
index 0b5976829913..35c3cbf83fb5 100644
--- a/drivers/scsi/cxlflash/superpipe.h
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -96,15 +96,15 @@ struct ctx_info {
96 struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */ 96 struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */
97 u8 *rht_needs_ws; /* User-desired write-same function per RHTE */ 97 u8 *rht_needs_ws; /* User-desired write-same function per RHTE */
98 98
99 struct cxl_ioctl_start_work work;
100 u64 ctxid; 99 u64 ctxid;
100 u64 irqs; /* Number of interrupts requested for context */
101 pid_t pid; 101 pid_t pid;
102 bool initialized; 102 bool initialized;
103 bool unavail; 103 bool unavail;
104 bool err_recovery_active; 104 bool err_recovery_active;
105 struct mutex mutex; /* Context protection */ 105 struct mutex mutex; /* Context protection */
106 struct kref kref; 106 struct kref kref;
107 struct cxl_context *ctx; 107 void *ctx;
108 struct cxlflash_cfg *cfg; 108 struct cxlflash_cfg *cfg;
109 struct list_head luns; /* LUNs attached to this context */ 109 struct list_head luns; /* LUNs attached to this context */
110 const struct vm_operations_struct *cxl_mmap_vmops; 110 const struct vm_operations_struct *cxl_mmap_vmops;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index fd22dc6ab5d9..022e421c2185 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -40,6 +40,7 @@
40#define TPGS_SUPPORT_LBA_DEPENDENT 0x10 40#define TPGS_SUPPORT_LBA_DEPENDENT 0x10
41#define TPGS_SUPPORT_OFFLINE 0x40 41#define TPGS_SUPPORT_OFFLINE 0x40
42#define TPGS_SUPPORT_TRANSITION 0x80 42#define TPGS_SUPPORT_TRANSITION 0x80
43#define TPGS_SUPPORT_ALL 0xdf
43 44
44#define RTPG_FMT_MASK 0x70 45#define RTPG_FMT_MASK 0x70
45#define RTPG_FMT_EXT_HDR 0x10 46#define RTPG_FMT_EXT_HDR 0x10
@@ -81,6 +82,7 @@ struct alua_port_group {
81 int tpgs; 82 int tpgs;
82 int state; 83 int state;
83 int pref; 84 int pref;
85 int valid_states;
84 unsigned flags; /* used for optimizing STPG */ 86 unsigned flags; /* used for optimizing STPG */
85 unsigned char transition_tmo; 87 unsigned char transition_tmo;
86 unsigned long expiry; 88 unsigned long expiry;
@@ -243,6 +245,7 @@ static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
243 pg->group_id = group_id; 245 pg->group_id = group_id;
244 pg->tpgs = tpgs; 246 pg->tpgs = tpgs;
245 pg->state = SCSI_ACCESS_STATE_OPTIMAL; 247 pg->state = SCSI_ACCESS_STATE_OPTIMAL;
248 pg->valid_states = TPGS_SUPPORT_ALL;
246 if (optimize_stpg) 249 if (optimize_stpg)
247 pg->flags |= ALUA_OPTIMIZE_STPG; 250 pg->flags |= ALUA_OPTIMIZE_STPG;
248 kref_init(&pg->kref); 251 kref_init(&pg->kref);
@@ -516,7 +519,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
516{ 519{
517 struct scsi_sense_hdr sense_hdr; 520 struct scsi_sense_hdr sense_hdr;
518 struct alua_port_group *tmp_pg; 521 struct alua_port_group *tmp_pg;
519 int len, k, off, valid_states = 0, bufflen = ALUA_RTPG_SIZE; 522 int len, k, off, bufflen = ALUA_RTPG_SIZE;
520 unsigned char *desc, *buff; 523 unsigned char *desc, *buff;
521 unsigned err, retval; 524 unsigned err, retval;
522 unsigned int tpg_desc_tbl_off; 525 unsigned int tpg_desc_tbl_off;
@@ -541,6 +544,22 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
541 retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags); 544 retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
542 545
543 if (retval) { 546 if (retval) {
547 /*
548 * Some (broken) implementations have a habit of returning
549 * an error during things like firmware update etc.
550 * But if the target only supports active/optimized there's
551 * not much we can do; it's not that we can switch paths
552 * or anything.
553 * So ignore any errors to avoid spurious failures during
554 * path failover.
555 */
556 if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) {
557 sdev_printk(KERN_INFO, sdev,
558 "%s: ignoring rtpg result %d\n",
559 ALUA_DH_NAME, retval);
560 kfree(buff);
561 return SCSI_DH_OK;
562 }
544 if (!scsi_sense_valid(&sense_hdr)) { 563 if (!scsi_sense_valid(&sense_hdr)) {
545 sdev_printk(KERN_INFO, sdev, 564 sdev_printk(KERN_INFO, sdev,
546 "%s: rtpg failed, result %d\n", 565 "%s: rtpg failed, result %d\n",
@@ -652,7 +671,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
652 rcu_read_unlock(); 671 rcu_read_unlock();
653 } 672 }
654 if (tmp_pg == pg) 673 if (tmp_pg == pg)
655 valid_states = desc[1]; 674 tmp_pg->valid_states = desc[1];
656 spin_unlock_irqrestore(&tmp_pg->lock, flags); 675 spin_unlock_irqrestore(&tmp_pg->lock, flags);
657 } 676 }
658 kref_put(&tmp_pg->kref, release_port_group); 677 kref_put(&tmp_pg->kref, release_port_group);
@@ -665,13 +684,13 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
665 "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n", 684 "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
666 ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state), 685 ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
667 pg->pref ? "preferred" : "non-preferred", 686 pg->pref ? "preferred" : "non-preferred",
668 valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', 687 pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
669 valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', 688 pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
670 valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l', 689 pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
671 valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u', 690 pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
672 valid_states&TPGS_SUPPORT_STANDBY?'S':'s', 691 pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
673 valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n', 692 pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
674 valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a'); 693 pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
675 694
676 switch (pg->state) { 695 switch (pg->state) {
677 case SCSI_ACCESS_STATE_TRANSITIONING: 696 case SCSI_ACCESS_STATE_TRANSITIONING:
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 5e3d909cfc53..6d3e1cb4fea6 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -108,24 +108,6 @@ void fnic_debugfs_terminate(void)
108} 108}
109 109
110/* 110/*
111 * fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace
112 * Or Open fc_trace_enable file for fc_trace
113 * @inode: The inode pointer.
114 * @file: The file pointer to attach the trace enable/disable flag.
115 *
116 * Description:
117 * This routine opens a debugsfs file trace_enable or fc_trace_enable.
118 *
119 * Returns:
120 * This function returns zero if successful.
121 */
122static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
123{
124 filp->private_data = inode->i_private;
125 return 0;
126}
127
128/*
129 * fnic_trace_ctrl_read - 111 * fnic_trace_ctrl_read -
130 * Read trace_enable ,fc_trace_enable 112 * Read trace_enable ,fc_trace_enable
131 * or fc_trace_clear debugfs file 113 * or fc_trace_clear debugfs file
@@ -220,7 +202,7 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,
220 202
221static const struct file_operations fnic_trace_ctrl_fops = { 203static const struct file_operations fnic_trace_ctrl_fops = {
222 .owner = THIS_MODULE, 204 .owner = THIS_MODULE,
223 .open = fnic_trace_ctrl_open, 205 .open = simple_open,
224 .read = fnic_trace_ctrl_read, 206 .read = fnic_trace_ctrl_read,
225 .write = fnic_trace_ctrl_write, 207 .write = fnic_trace_ctrl_write,
226}; 208};
@@ -632,7 +614,7 @@ static ssize_t fnic_reset_stats_write(struct file *file,
632 sizeof(struct io_path_stats) - sizeof(u64)); 614 sizeof(struct io_path_stats) - sizeof(u64));
633 memset(fw_stats_p+1, 0, 615 memset(fw_stats_p+1, 0,
634 sizeof(struct fw_stats) - sizeof(u64)); 616 sizeof(struct fw_stats) - sizeof(u64));
635 getnstimeofday(&stats->stats_timestamps.last_reset_time); 617 ktime_get_real_ts64(&stats->stats_timestamps.last_reset_time);
636 } 618 }
637 619
638 (*ppos)++; 620 (*ppos)++;
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 999fc7547560..c7bf316d8e83 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -442,15 +442,13 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
442 vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); 442 vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
443 shost_printk(KERN_INFO, fnic->lport->host, 443 shost_printk(KERN_INFO, fnic->lport->host,
444 "process_vlan_resp: FIP VLAN %d\n", vid); 444 "process_vlan_resp: FIP VLAN %d\n", vid);
445 vlan = kmalloc(sizeof(*vlan), 445 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
446 GFP_ATOMIC);
447 if (!vlan) { 446 if (!vlan) {
448 /* retry from timer */ 447 /* retry from timer */
449 spin_unlock_irqrestore(&fnic->vlans_lock, 448 spin_unlock_irqrestore(&fnic->vlans_lock,
450 flags); 449 flags);
451 goto out; 450 goto out;
452 } 451 }
453 memset(vlan, 0, sizeof(struct fcoe_vlan));
454 vlan->vid = vid & 0x0fff; 452 vlan->vid = vid & 0x0fff;
455 vlan->state = FIP_VLAN_AVAIL; 453 vlan->state = FIP_VLAN_AVAIL;
456 list_add_tail(&vlan->list, &fnic->vlans); 454 list_add_tail(&vlan->list, &fnic->vlans);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 242e2ee494a1..8cbd3c9f0b4c 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -906,7 +906,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
906 906
907 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, 907 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
908 "icmnd_cmpl abts pending " 908 "icmnd_cmpl abts pending "
909 "hdr status = %s tag = 0x%x sc = 0x%p" 909 "hdr status = %s tag = 0x%x sc = 0x%p "
910 "scsi_status = %x residual = %d\n", 910 "scsi_status = %x residual = %d\n",
911 fnic_fcpio_status_to_str(hdr_status), 911 fnic_fcpio_status_to_str(hdr_status),
912 id, sc, 912 id, sc,
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index e007feedbf72..9daa6ada6fa0 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -18,8 +18,8 @@
18#define _FNIC_STATS_H_ 18#define _FNIC_STATS_H_
19 19
20struct stats_timestamps { 20struct stats_timestamps {
21 struct timespec last_reset_time; 21 struct timespec64 last_reset_time;
22 struct timespec last_read_time; 22 struct timespec64 last_read_time;
23}; 23};
24 24
25struct io_path_stats { 25struct io_path_stats {
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 4826f596cb31..abddde11982b 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -111,7 +111,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
111 int len = 0; 111 int len = 0;
112 unsigned long flags; 112 unsigned long flags;
113 char str[KSYM_SYMBOL_LEN]; 113 char str[KSYM_SYMBOL_LEN];
114 struct timespec val; 114 struct timespec64 val;
115 fnic_trace_data_t *tbp; 115 fnic_trace_data_t *tbp;
116 116
117 spin_lock_irqsave(&fnic_trace_lock, flags); 117 spin_lock_irqsave(&fnic_trace_lock, flags);
@@ -129,10 +129,10 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
129 /* Convert function pointer to function name */ 129 /* Convert function pointer to function name */
130 if (sizeof(unsigned long) < 8) { 130 if (sizeof(unsigned long) < 8) {
131 sprint_symbol(str, tbp->fnaddr.low); 131 sprint_symbol(str, tbp->fnaddr.low);
132 jiffies_to_timespec(tbp->timestamp.low, &val); 132 jiffies_to_timespec64(tbp->timestamp.low, &val);
133 } else { 133 } else {
134 sprint_symbol(str, tbp->fnaddr.val); 134 sprint_symbol(str, tbp->fnaddr.val);
135 jiffies_to_timespec(tbp->timestamp.val, &val); 135 jiffies_to_timespec64(tbp->timestamp.val, &val);
136 } 136 }
137 /* 137 /*
138 * Dump trace buffer entry to memory file 138 * Dump trace buffer entry to memory file
@@ -140,8 +140,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
140 */ 140 */
141 len += snprintf(fnic_dbgfs_prt->buffer + len, 141 len += snprintf(fnic_dbgfs_prt->buffer + len,
142 (trace_max_pages * PAGE_SIZE * 3) - len, 142 (trace_max_pages * PAGE_SIZE * 3) - len,
143 "%16lu.%16lu %-50s %8x %8x %16llx %16llx " 143 "%16llu.%09lu %-50s %8x %8x %16llx %16llx "
144 "%16llx %16llx %16llx\n", val.tv_sec, 144 "%16llx %16llx %16llx\n", (u64)val.tv_sec,
145 val.tv_nsec, str, tbp->host_no, tbp->tag, 145 val.tv_nsec, str, tbp->host_no, tbp->tag,
146 tbp->data[0], tbp->data[1], tbp->data[2], 146 tbp->data[0], tbp->data[1], tbp->data[2],
147 tbp->data[3], tbp->data[4]); 147 tbp->data[3], tbp->data[4]);
@@ -171,10 +171,10 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
171 /* Convert function pointer to function name */ 171 /* Convert function pointer to function name */
172 if (sizeof(unsigned long) < 8) { 172 if (sizeof(unsigned long) < 8) {
173 sprint_symbol(str, tbp->fnaddr.low); 173 sprint_symbol(str, tbp->fnaddr.low);
174 jiffies_to_timespec(tbp->timestamp.low, &val); 174 jiffies_to_timespec64(tbp->timestamp.low, &val);
175 } else { 175 } else {
176 sprint_symbol(str, tbp->fnaddr.val); 176 sprint_symbol(str, tbp->fnaddr.val);
177 jiffies_to_timespec(tbp->timestamp.val, &val); 177 jiffies_to_timespec64(tbp->timestamp.val, &val);
178 } 178 }
179 /* 179 /*
180 * Dump trace buffer entry to memory file 180 * Dump trace buffer entry to memory file
@@ -182,8 +182,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
182 */ 182 */
183 len += snprintf(fnic_dbgfs_prt->buffer + len, 183 len += snprintf(fnic_dbgfs_prt->buffer + len,
184 (trace_max_pages * PAGE_SIZE * 3) - len, 184 (trace_max_pages * PAGE_SIZE * 3) - len,
185 "%16lu.%16lu %-50s %8x %8x %16llx %16llx " 185 "%16llu.%09lu %-50s %8x %8x %16llx %16llx "
186 "%16llx %16llx %16llx\n", val.tv_sec, 186 "%16llx %16llx %16llx\n", (u64)val.tv_sec,
187 val.tv_nsec, str, tbp->host_no, tbp->tag, 187 val.tv_nsec, str, tbp->host_no, tbp->tag,
188 tbp->data[0], tbp->data[1], tbp->data[2], 188 tbp->data[0], tbp->data[1], tbp->data[2],
189 tbp->data[3], tbp->data[4]); 189 tbp->data[3], tbp->data[4]);
@@ -217,29 +217,29 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
217{ 217{
218 int len = 0; 218 int len = 0;
219 int buf_size = debug->buf_size; 219 int buf_size = debug->buf_size;
220 struct timespec val1, val2; 220 struct timespec64 val1, val2;
221 221
222 getnstimeofday(&val1); 222 ktime_get_real_ts64(&val1);
223 len = snprintf(debug->debug_buffer + len, buf_size - len, 223 len = snprintf(debug->debug_buffer + len, buf_size - len,
224 "------------------------------------------\n" 224 "------------------------------------------\n"
225 "\t\tTime\n" 225 "\t\tTime\n"
226 "------------------------------------------\n"); 226 "------------------------------------------\n");
227 227
228 len += snprintf(debug->debug_buffer + len, buf_size - len, 228 len += snprintf(debug->debug_buffer + len, buf_size - len,
229 "Current time : [%ld:%ld]\n" 229 "Current time : [%lld:%ld]\n"
230 "Last stats reset time: [%ld:%ld]\n" 230 "Last stats reset time: [%lld:%09ld]\n"
231 "Last stats read time: [%ld:%ld]\n" 231 "Last stats read time: [%lld:%ld]\n"
232 "delta since last reset: [%ld:%ld]\n" 232 "delta since last reset: [%lld:%ld]\n"
233 "delta since last read: [%ld:%ld]\n", 233 "delta since last read: [%lld:%ld]\n",
234 val1.tv_sec, val1.tv_nsec, 234 (s64)val1.tv_sec, val1.tv_nsec,
235 stats->stats_timestamps.last_reset_time.tv_sec, 235 (s64)stats->stats_timestamps.last_reset_time.tv_sec,
236 stats->stats_timestamps.last_reset_time.tv_nsec, 236 stats->stats_timestamps.last_reset_time.tv_nsec,
237 stats->stats_timestamps.last_read_time.tv_sec, 237 (s64)stats->stats_timestamps.last_read_time.tv_sec,
238 stats->stats_timestamps.last_read_time.tv_nsec, 238 stats->stats_timestamps.last_read_time.tv_nsec,
239 timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec, 239 (s64)timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
240 timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec, 240 timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
241 timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_sec, 241 (s64)timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
242 timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec); 242 timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
243 243
244 stats->stats_timestamps.last_read_time = val1; 244 stats->stats_timestamps.last_read_time = val1;
245 245
@@ -403,12 +403,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
403 "\t\tOther Important Statistics\n" 403 "\t\tOther Important Statistics\n"
404 "------------------------------------------\n"); 404 "------------------------------------------\n");
405 405
406 jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1); 406 jiffies_to_timespec64(stats->misc_stats.last_isr_time, &val1);
407 jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2); 407 jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2);
408 408
409 len += snprintf(debug->debug_buffer + len, buf_size - len, 409 len += snprintf(debug->debug_buffer + len, buf_size - len,
410 "Last ISR time: %llu (%8lu.%8lu)\n" 410 "Last ISR time: %llu (%8llu.%09lu)\n"
411 "Last ACK time: %llu (%8lu.%8lu)\n" 411 "Last ACK time: %llu (%8llu.%09lu)\n"
412 "Number of ISRs: %lld\n" 412 "Number of ISRs: %lld\n"
413 "Maximum CQ Entries: %lld\n" 413 "Maximum CQ Entries: %lld\n"
414 "Number of ACK index out of range: %lld\n" 414 "Number of ACK index out of range: %lld\n"
@@ -425,9 +425,9 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
425 "Number of rport not ready: %lld\n" 425 "Number of rport not ready: %lld\n"
426 "Number of receive frame errors: %lld\n", 426 "Number of receive frame errors: %lld\n",
427 (u64)stats->misc_stats.last_isr_time, 427 (u64)stats->misc_stats.last_isr_time,
428 val1.tv_sec, val1.tv_nsec, 428 (s64)val1.tv_sec, val1.tv_nsec,
429 (u64)stats->misc_stats.last_ack_time, 429 (u64)stats->misc_stats.last_ack_time,
430 val2.tv_sec, val2.tv_nsec, 430 (s64)val2.tv_sec, val2.tv_nsec,
431 (u64)atomic64_read(&stats->misc_stats.isr_count), 431 (u64)atomic64_read(&stats->misc_stats.isr_count),
432 (u64)atomic64_read(&stats->misc_stats.max_cq_entries), 432 (u64)atomic64_read(&stats->misc_stats.max_cq_entries),
433 (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range), 433 (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 83357b0367d8..e7fd2877c19c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -99,12 +99,43 @@ struct hisi_sas_hw_error {
99 const struct hisi_sas_hw_error *sub; 99 const struct hisi_sas_hw_error *sub;
100}; 100};
101 101
102struct hisi_sas_rst {
103 struct hisi_hba *hisi_hba;
104 struct completion *completion;
105 struct work_struct work;
106 bool done;
107};
108
109#define HISI_SAS_RST_WORK_INIT(r, c) \
110 { .hisi_hba = hisi_hba, \
111 .completion = &c, \
112 .work = __WORK_INITIALIZER(r.work, \
113 hisi_sas_sync_rst_work_handler), \
114 .done = false, \
115 }
116
117#define HISI_SAS_DECLARE_RST_WORK_ON_STACK(r) \
118 DECLARE_COMPLETION_ONSTACK(c); \
119 DECLARE_WORK(w, hisi_sas_sync_rst_work_handler); \
120 struct hisi_sas_rst r = HISI_SAS_RST_WORK_INIT(r, c)
121
122enum hisi_sas_bit_err_type {
123 HISI_SAS_ERR_SINGLE_BIT_ECC = 0x0,
124 HISI_SAS_ERR_MULTI_BIT_ECC = 0x1,
125};
126
127enum hisi_sas_phy_event {
128 HISI_PHYE_PHY_UP = 0U,
129 HISI_PHYE_LINK_RESET,
130 HISI_PHYES_NUM,
131};
132
102struct hisi_sas_phy { 133struct hisi_sas_phy {
134 struct work_struct works[HISI_PHYES_NUM];
103 struct hisi_hba *hisi_hba; 135 struct hisi_hba *hisi_hba;
104 struct hisi_sas_port *port; 136 struct hisi_sas_port *port;
105 struct asd_sas_phy sas_phy; 137 struct asd_sas_phy sas_phy;
106 struct sas_identify identify; 138 struct sas_identify identify;
107 struct work_struct phyup_ws;
108 u64 port_id; /* from hw */ 139 u64 port_id; /* from hw */
109 u64 dev_sas_addr; 140 u64 dev_sas_addr;
110 u64 frame_rcvd_size; 141 u64 frame_rcvd_size;
@@ -205,13 +236,16 @@ struct hisi_sas_hw {
205 void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no, 236 void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
206 struct sas_phy_linkrates *linkrates); 237 struct sas_phy_linkrates *linkrates);
207 enum sas_linkrate (*phy_get_max_linkrate)(void); 238 enum sas_linkrate (*phy_get_max_linkrate)(void);
208 void (*free_device)(struct hisi_hba *hisi_hba, 239 void (*clear_itct)(struct hisi_hba *hisi_hba,
209 struct hisi_sas_device *dev); 240 struct hisi_sas_device *dev);
241 void (*free_device)(struct hisi_sas_device *sas_dev);
210 int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id); 242 int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
211 void (*dereg_device)(struct hisi_hba *hisi_hba, 243 void (*dereg_device)(struct hisi_hba *hisi_hba,
212 struct domain_device *device); 244 struct domain_device *device);
213 int (*soft_reset)(struct hisi_hba *hisi_hba); 245 int (*soft_reset)(struct hisi_hba *hisi_hba);
214 u32 (*get_phys_state)(struct hisi_hba *hisi_hba); 246 u32 (*get_phys_state)(struct hisi_hba *hisi_hba);
247 int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type,
248 u8 reg_index, u8 reg_count, u8 *write_data);
215 int max_command_entries; 249 int max_command_entries;
216 int complete_hdr_size; 250 int complete_hdr_size;
217}; 251};
@@ -225,6 +259,7 @@ struct hisi_hba {
225 struct device *dev; 259 struct device *dev;
226 260
227 void __iomem *regs; 261 void __iomem *regs;
262 void __iomem *sgpio_regs;
228 struct regmap *ctrl; 263 struct regmap *ctrl;
229 u32 ctrl_reset_reg; 264 u32 ctrl_reset_reg;
230 u32 ctrl_reset_sts_reg; 265 u32 ctrl_reset_sts_reg;
@@ -409,7 +444,8 @@ extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
409extern void hisi_sas_init_add(struct hisi_hba *hisi_hba); 444extern void hisi_sas_init_add(struct hisi_hba *hisi_hba);
410extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost); 445extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
411extern void hisi_sas_free(struct hisi_hba *hisi_hba); 446extern void hisi_sas_free(struct hisi_hba *hisi_hba);
412extern u8 hisi_sas_get_ata_protocol(u8 cmd, int direction); 447extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
448 int direction);
413extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port); 449extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
414extern void hisi_sas_sata_done(struct sas_task *task, 450extern void hisi_sas_sata_done(struct sas_task *task,
415 struct hisi_sas_slot *slot); 451 struct hisi_sas_slot *slot);
@@ -425,5 +461,9 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
425 struct hisi_sas_slot *slot); 461 struct hisi_sas_slot *slot);
426extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba); 462extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
427extern void hisi_sas_rst_work_handler(struct work_struct *work); 463extern void hisi_sas_rst_work_handler(struct work_struct *work);
464extern void hisi_sas_sync_rst_work_handler(struct work_struct *work);
428extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba); 465extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
466extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
467 enum hisi_sas_phy_event event);
468extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
429#endif 469#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 5f503cb09508..2d4dbed03ee3 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -22,10 +22,12 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device, 22 struct domain_device *device,
23 int abort_flag, int tag); 23 int abort_flag, int tag);
24static int hisi_sas_softreset_ata_disk(struct domain_device *device); 24static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
26 void *funcdata);
25 27
26u8 hisi_sas_get_ata_protocol(u8 cmd, int direction) 28u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
27{ 29{
28 switch (cmd) { 30 switch (fis->command) {
29 case ATA_CMD_FPDMA_WRITE: 31 case ATA_CMD_FPDMA_WRITE:
30 case ATA_CMD_FPDMA_READ: 32 case ATA_CMD_FPDMA_READ:
31 case ATA_CMD_FPDMA_RECV: 33 case ATA_CMD_FPDMA_RECV:
@@ -77,10 +79,26 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
77 case ATA_CMD_ZAC_MGMT_OUT: 79 case ATA_CMD_ZAC_MGMT_OUT:
78 return HISI_SAS_SATA_PROTOCOL_NONDATA; 80 return HISI_SAS_SATA_PROTOCOL_NONDATA;
79 default: 81 default:
82 {
83 if (fis->command == ATA_CMD_SET_MAX) {
84 switch (fis->features) {
85 case ATA_SET_MAX_PASSWD:
86 case ATA_SET_MAX_LOCK:
87 return HISI_SAS_SATA_PROTOCOL_PIO;
88
89 case ATA_SET_MAX_PASSWD_DMA:
90 case ATA_SET_MAX_UNLOCK_DMA:
91 return HISI_SAS_SATA_PROTOCOL_DMA;
92
93 default:
94 return HISI_SAS_SATA_PROTOCOL_NONDATA;
95 }
96 }
80 if (direction == DMA_NONE) 97 if (direction == DMA_NONE)
81 return HISI_SAS_SATA_PROTOCOL_NONDATA; 98 return HISI_SAS_SATA_PROTOCOL_NONDATA;
82 return HISI_SAS_SATA_PROTOCOL_PIO; 99 return HISI_SAS_SATA_PROTOCOL_PIO;
83 } 100 }
101 }
84} 102}
85EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 103EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
86 104
@@ -192,7 +210,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
192 210
193 if (!sas_protocol_ata(task->task_proto)) 211 if (!sas_protocol_ata(task->task_proto))
194 if (slot->n_elem) 212 if (slot->n_elem)
195 dma_unmap_sg(dev, task->scatter, slot->n_elem, 213 dma_unmap_sg(dev, task->scatter,
214 task->num_scatter,
196 task->data_dir); 215 task->data_dir);
197 216
198 if (sas_dev) 217 if (sas_dev)
@@ -431,7 +450,8 @@ err_out:
431 dev_err(dev, "task prep: failed[%d]!\n", rc); 450 dev_err(dev, "task prep: failed[%d]!\n", rc);
432 if (!sas_protocol_ata(task->task_proto)) 451 if (!sas_protocol_ata(task->task_proto))
433 if (n_elem) 452 if (n_elem)
434 dma_unmap_sg(dev, task->scatter, n_elem, 453 dma_unmap_sg(dev, task->scatter,
454 task->num_scatter,
435 task->data_dir); 455 task->data_dir);
436prep_out: 456prep_out:
437 return rc; 457 return rc;
@@ -578,6 +598,9 @@ static int hisi_sas_dev_found(struct domain_device *device)
578 } 598 }
579 } 599 }
580 600
601 dev_info(dev, "dev[%d:%x] found\n",
602 sas_dev->device_id, sas_dev->dev_type);
603
581 return 0; 604 return 0;
582} 605}
583 606
@@ -617,7 +640,7 @@ static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
617static void hisi_sas_phyup_work(struct work_struct *work) 640static void hisi_sas_phyup_work(struct work_struct *work)
618{ 641{
619 struct hisi_sas_phy *phy = 642 struct hisi_sas_phy *phy =
620 container_of(work, struct hisi_sas_phy, phyup_ws); 643 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
621 struct hisi_hba *hisi_hba = phy->hisi_hba; 644 struct hisi_hba *hisi_hba = phy->hisi_hba;
622 struct asd_sas_phy *sas_phy = &phy->sas_phy; 645 struct asd_sas_phy *sas_phy = &phy->sas_phy;
623 int phy_no = sas_phy->id; 646 int phy_no = sas_phy->id;
@@ -626,10 +649,37 @@ static void hisi_sas_phyup_work(struct work_struct *work)
626 hisi_sas_bytes_dmaed(hisi_hba, phy_no); 649 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
627} 650}
628 651
652static void hisi_sas_linkreset_work(struct work_struct *work)
653{
654 struct hisi_sas_phy *phy =
655 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
656 struct asd_sas_phy *sas_phy = &phy->sas_phy;
657
658 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
659}
660
661static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
662 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
663 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
664};
665
666bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
667 enum hisi_sas_phy_event event)
668{
669 struct hisi_hba *hisi_hba = phy->hisi_hba;
670
671 if (WARN_ON(event >= HISI_PHYES_NUM))
672 return false;
673
674 return queue_work(hisi_hba->wq, &phy->works[event]);
675}
676EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
677
629static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 678static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
630{ 679{
631 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 680 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
632 struct asd_sas_phy *sas_phy = &phy->sas_phy; 681 struct asd_sas_phy *sas_phy = &phy->sas_phy;
682 int i;
633 683
634 phy->hisi_hba = hisi_hba; 684 phy->hisi_hba = hisi_hba;
635 phy->port = NULL; 685 phy->port = NULL;
@@ -647,7 +697,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
647 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 697 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
648 sas_phy->lldd_phy = phy; 698 sas_phy->lldd_phy = phy;
649 699
650 INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work); 700 for (i = 0; i < HISI_PHYES_NUM; i++)
701 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
651} 702}
652 703
653static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 704static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
@@ -702,7 +753,7 @@ static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
702 hisi_sas_do_release_task(hisi_hba, slot->task, slot); 753 hisi_sas_do_release_task(hisi_hba, slot->task, slot);
703} 754}
704 755
705static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 756void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
706{ 757{
707 struct hisi_sas_device *sas_dev; 758 struct hisi_sas_device *sas_dev;
708 struct domain_device *device; 759 struct domain_device *device;
@@ -719,6 +770,7 @@ static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
719 hisi_sas_release_task(hisi_hba, device); 770 hisi_sas_release_task(hisi_hba, device);
720 } 771 }
721} 772}
773EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
722 774
723static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 775static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
724 struct domain_device *device) 776 struct domain_device *device)
@@ -733,17 +785,21 @@ static void hisi_sas_dev_gone(struct domain_device *device)
733 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 785 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
734 struct device *dev = hisi_hba->dev; 786 struct device *dev = hisi_hba->dev;
735 787
736 dev_info(dev, "found dev[%d:%x] is gone\n", 788 dev_info(dev, "dev[%d:%x] is gone\n",
737 sas_dev->device_id, sas_dev->dev_type); 789 sas_dev->device_id, sas_dev->dev_type);
738 790
739 hisi_sas_internal_task_abort(hisi_hba, device, 791 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
792 hisi_sas_internal_task_abort(hisi_hba, device,
740 HISI_SAS_INT_ABT_DEV, 0); 793 HISI_SAS_INT_ABT_DEV, 0);
741 794
742 hisi_sas_dereg_device(hisi_hba, device); 795 hisi_sas_dereg_device(hisi_hba, device);
743 796
744 hisi_hba->hw->free_device(hisi_hba, sas_dev); 797 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
745 device->lldd_dev = NULL; 798 device->lldd_dev = NULL;
746 memset(sas_dev, 0, sizeof(*sas_dev)); 799 }
800
801 if (hisi_hba->hw->free_device)
802 hisi_hba->hw->free_device(sas_dev);
747 sas_dev->dev_type = SAS_PHY_UNUSED; 803 sas_dev->dev_type = SAS_PHY_UNUSED;
748} 804}
749 805
@@ -859,12 +915,13 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
859 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 915 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
860 struct hisi_sas_slot *slot = task->lldd_task; 916 struct hisi_sas_slot *slot = task->lldd_task;
861 917
862 dev_err(dev, "abort tmf: TMF task timeout\n"); 918 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
863 if (slot) 919 if (slot)
864 slot->task = NULL; 920 slot->task = NULL;
865 921
866 goto ex_err; 922 goto ex_err;
867 } 923 } else
924 dev_err(dev, "abort tmf: TMF task timeout\n");
868 } 925 }
869 926
870 if (task->task_status.resp == SAS_TASK_COMPLETE && 927 if (task->task_status.resp == SAS_TASK_COMPLETE &&
@@ -985,27 +1042,42 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
985 sizeof(ssp_task), tmf); 1042 sizeof(ssp_task), tmf);
986} 1043}
987 1044
988static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba, 1045static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
989 struct asd_sas_port *sas_port, enum sas_linkrate linkrate)
990{ 1046{
991 struct hisi_sas_device *sas_dev; 1047 u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
992 struct domain_device *device;
993 int i; 1048 int i;
994 1049
995 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1050 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
996 sas_dev = &hisi_hba->devices[i]; 1051 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
997 device = sas_dev->sas_device; 1052 struct domain_device *device = sas_dev->sas_device;
1053 struct asd_sas_port *sas_port;
1054 struct hisi_sas_port *port;
1055 struct hisi_sas_phy *phy = NULL;
1056 struct asd_sas_phy *sas_phy;
1057
998 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1058 if ((sas_dev->dev_type == SAS_PHY_UNUSED)
999 || !device || (device->port != sas_port)) 1059 || !device || !device->port)
1000 continue; 1060 continue;
1001 1061
1002 hisi_hba->hw->free_device(hisi_hba, sas_dev); 1062 sas_port = device->port;
1063 port = to_hisi_sas_port(sas_port);
1064
1065 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1066 if (state & BIT(sas_phy->id)) {
1067 phy = sas_phy->lldd_phy;
1068 break;
1069 }
1070
1071 if (phy) {
1072 port->id = phy->port_id;
1003 1073
1004 /* Update linkrate of directly attached device. */ 1074 /* Update linkrate of directly attached device. */
1005 if (!device->parent) 1075 if (!device->parent)
1006 device->linkrate = linkrate; 1076 device->linkrate = phy->sas_phy.linkrate;
1007 1077
1008 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1078 hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1079 } else
1080 port->id = 0xff;
1009 } 1081 }
1010} 1082}
1011 1083
@@ -1020,21 +1092,17 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1020 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1092 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1021 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1093 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1022 struct asd_sas_port *sas_port = sas_phy->port; 1094 struct asd_sas_port *sas_port = sas_phy->port;
1023 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
1024 bool do_port_check = !!(_sas_port != sas_port); 1095 bool do_port_check = !!(_sas_port != sas_port);
1025 1096
1026 if (!sas_phy->phy->enabled) 1097 if (!sas_phy->phy->enabled)
1027 continue; 1098 continue;
1028 1099
1029 /* Report PHY state change to libsas */ 1100 /* Report PHY state change to libsas */
1030 if (state & (1 << phy_no)) { 1101 if (state & BIT(phy_no)) {
1031 if (do_port_check && sas_port) { 1102 if (do_port_check && sas_port && sas_port->port_dev) {
1032 struct domain_device *dev = sas_port->port_dev; 1103 struct domain_device *dev = sas_port->port_dev;
1033 1104
1034 _sas_port = sas_port; 1105 _sas_port = sas_port;
1035 port->id = phy->port_id;
1036 hisi_sas_refresh_port_id(hisi_hba,
1037 sas_port, sas_phy->linkrate);
1038 1106
1039 if (DEV_IS_EXPANDER(dev->dev_type)) 1107 if (DEV_IS_EXPANDER(dev->dev_type))
1040 sas_ha->notify_port_event(sas_phy, 1108 sas_ha->notify_port_event(sas_phy,
@@ -1045,8 +1113,6 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1045 hisi_sas_phy_down(hisi_hba, phy_no, 0); 1113 hisi_sas_phy_down(hisi_hba, phy_no, 0);
1046 1114
1047 } 1115 }
1048
1049 drain_workqueue(hisi_hba->shost->work_q);
1050} 1116}
1051 1117
1052static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1118static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
@@ -1063,7 +1129,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1063 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 1129 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1064 return -1; 1130 return -1;
1065 1131
1066 dev_dbg(dev, "controller resetting...\n"); 1132 dev_info(dev, "controller resetting...\n");
1067 old_state = hisi_hba->hw->get_phys_state(hisi_hba); 1133 old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1068 1134
1069 scsi_block_requests(shost); 1135 scsi_block_requests(shost);
@@ -1072,6 +1138,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1072 if (rc) { 1138 if (rc) {
1073 dev_warn(dev, "controller reset failed (%d)\n", rc); 1139 dev_warn(dev, "controller reset failed (%d)\n", rc);
1074 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1140 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1141 scsi_unblock_requests(shost);
1075 goto out; 1142 goto out;
1076 } 1143 }
1077 spin_lock_irqsave(&hisi_hba->lock, flags); 1144 spin_lock_irqsave(&hisi_hba->lock, flags);
@@ -1083,15 +1150,14 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1083 /* Init and wait for PHYs to come up and all libsas event finished. */ 1150 /* Init and wait for PHYs to come up and all libsas event finished. */
1084 hisi_hba->hw->phys_init(hisi_hba); 1151 hisi_hba->hw->phys_init(hisi_hba);
1085 msleep(1000); 1152 msleep(1000);
1086 drain_workqueue(hisi_hba->wq); 1153 hisi_sas_refresh_port_id(hisi_hba);
1087 drain_workqueue(shost->work_q); 1154 scsi_unblock_requests(shost);
1088 1155
1089 state = hisi_hba->hw->get_phys_state(hisi_hba); 1156 state = hisi_hba->hw->get_phys_state(hisi_hba);
1090 hisi_sas_rescan_topology(hisi_hba, old_state, state); 1157 hisi_sas_rescan_topology(hisi_hba, old_state, state);
1091 dev_dbg(dev, "controller reset complete\n"); 1158 dev_info(dev, "controller reset complete\n");
1092 1159
1093out: 1160out:
1094 scsi_unblock_requests(shost);
1095 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1161 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1096 1162
1097 return rc; 1163 return rc;
@@ -1134,6 +1200,11 @@ static int hisi_sas_abort_task(struct sas_task *task)
1134 1200
1135 rc2 = hisi_sas_internal_task_abort(hisi_hba, device, 1201 rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
1136 HISI_SAS_INT_ABT_CMD, tag); 1202 HISI_SAS_INT_ABT_CMD, tag);
1203 if (rc2 < 0) {
1204 dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1205 return TMF_RESP_FUNC_FAILED;
1206 }
1207
1137 /* 1208 /*
1138 * If the TMF finds that the IO is not in the device and also 1209 * If the TMF finds that the IO is not in the device and also
1139 * the internal abort does not succeed, then it is safe to 1210 * the internal abort does not succeed, then it is safe to
@@ -1151,8 +1222,12 @@ static int hisi_sas_abort_task(struct sas_task *task)
1151 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1222 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1152 task->task_proto & SAS_PROTOCOL_STP) { 1223 task->task_proto & SAS_PROTOCOL_STP) {
1153 if (task->dev->dev_type == SAS_SATA_DEV) { 1224 if (task->dev->dev_type == SAS_SATA_DEV) {
1154 hisi_sas_internal_task_abort(hisi_hba, device, 1225 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1155 HISI_SAS_INT_ABT_DEV, 0); 1226 HISI_SAS_INT_ABT_DEV, 0);
1227 if (rc < 0) {
1228 dev_err(dev, "abort task: internal abort failed\n");
1229 goto out;
1230 }
1156 hisi_sas_dereg_device(hisi_hba, device); 1231 hisi_sas_dereg_device(hisi_hba, device);
1157 rc = hisi_sas_softreset_ata_disk(device); 1232 rc = hisi_sas_softreset_ata_disk(device);
1158 } 1233 }
@@ -1163,7 +1238,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
1163 1238
1164 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1239 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1165 HISI_SAS_INT_ABT_CMD, tag); 1240 HISI_SAS_INT_ABT_CMD, tag);
1166 if (rc == TMF_RESP_FUNC_FAILED && task->lldd_task) { 1241 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1242 task->lldd_task) {
1167 spin_lock_irqsave(&hisi_hba->lock, flags); 1243 spin_lock_irqsave(&hisi_hba->lock, flags);
1168 hisi_sas_do_release_task(hisi_hba, task, slot); 1244 hisi_sas_do_release_task(hisi_hba, task, slot);
1169 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1245 spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -1178,12 +1254,29 @@ out:
1178 1254
1179static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1255static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1180{ 1256{
1257 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1258 struct device *dev = hisi_hba->dev;
1181 struct hisi_sas_tmf_task tmf_task; 1259 struct hisi_sas_tmf_task tmf_task;
1182 int rc = TMF_RESP_FUNC_FAILED; 1260 int rc = TMF_RESP_FUNC_FAILED;
1261 unsigned long flags;
1262
1263 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1264 HISI_SAS_INT_ABT_DEV, 0);
1265 if (rc < 0) {
1266 dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1267 return TMF_RESP_FUNC_FAILED;
1268 }
1269 hisi_sas_dereg_device(hisi_hba, device);
1183 1270
1184 tmf_task.tmf = TMF_ABORT_TASK_SET; 1271 tmf_task.tmf = TMF_ABORT_TASK_SET;
1185 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1272 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1186 1273
1274 if (rc == TMF_RESP_FUNC_COMPLETE) {
1275 spin_lock_irqsave(&hisi_hba->lock, flags);
1276 hisi_sas_release_task(hisi_hba, device);
1277 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1278 }
1279
1187 return rc; 1280 return rc;
1188} 1281}
1189 1282
@@ -1213,20 +1306,25 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1213{ 1306{
1214 struct hisi_sas_device *sas_dev = device->lldd_dev; 1307 struct hisi_sas_device *sas_dev = device->lldd_dev;
1215 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1308 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1216 unsigned long flags; 1309 struct device *dev = hisi_hba->dev;
1217 int rc = TMF_RESP_FUNC_FAILED; 1310 int rc = TMF_RESP_FUNC_FAILED;
1311 unsigned long flags;
1218 1312
1219 if (sas_dev->dev_status != HISI_SAS_DEV_EH) 1313 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1220 return TMF_RESP_FUNC_FAILED; 1314 return TMF_RESP_FUNC_FAILED;
1221 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 1315 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1222 1316
1223 hisi_sas_internal_task_abort(hisi_hba, device, 1317 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1224 HISI_SAS_INT_ABT_DEV, 0); 1318 HISI_SAS_INT_ABT_DEV, 0);
1319 if (rc < 0) {
1320 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1321 return TMF_RESP_FUNC_FAILED;
1322 }
1225 hisi_sas_dereg_device(hisi_hba, device); 1323 hisi_sas_dereg_device(hisi_hba, device);
1226 1324
1227 rc = hisi_sas_debug_I_T_nexus_reset(device); 1325 rc = hisi_sas_debug_I_T_nexus_reset(device);
1228 1326
1229 if (rc == TMF_RESP_FUNC_COMPLETE) { 1327 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) {
1230 spin_lock_irqsave(&hisi_hba->lock, flags); 1328 spin_lock_irqsave(&hisi_hba->lock, flags);
1231 hisi_sas_release_task(hisi_hba, device); 1329 hisi_sas_release_task(hisi_hba, device);
1232 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1330 spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -1249,8 +1347,10 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1249 /* Clear internal IO and then hardreset */ 1347 /* Clear internal IO and then hardreset */
1250 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1348 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1251 HISI_SAS_INT_ABT_DEV, 0); 1349 HISI_SAS_INT_ABT_DEV, 0);
1252 if (rc == TMF_RESP_FUNC_FAILED) 1350 if (rc < 0) {
1351 dev_err(dev, "lu_reset: internal abort failed\n");
1253 goto out; 1352 goto out;
1353 }
1254 hisi_sas_dereg_device(hisi_hba, device); 1354 hisi_sas_dereg_device(hisi_hba, device);
1255 1355
1256 phy = sas_get_local_phy(device); 1356 phy = sas_get_local_phy(device);
@@ -1266,6 +1366,14 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1266 } else { 1366 } else {
1267 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; 1367 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1268 1368
1369 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1370 HISI_SAS_INT_ABT_DEV, 0);
1371 if (rc < 0) {
1372 dev_err(dev, "lu_reset: internal abort failed\n");
1373 goto out;
1374 }
1375 hisi_sas_dereg_device(hisi_hba, device);
1376
1269 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1377 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1270 if (rc == TMF_RESP_FUNC_COMPLETE) { 1378 if (rc == TMF_RESP_FUNC_COMPLETE) {
1271 spin_lock_irqsave(&hisi_hba->lock, flags); 1379 spin_lock_irqsave(&hisi_hba->lock, flags);
@@ -1283,8 +1391,14 @@ out:
1283static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1391static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1284{ 1392{
1285 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1393 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1394 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1286 1395
1287 return hisi_sas_controller_reset(hisi_hba); 1396 queue_work(hisi_hba->wq, &r.work);
1397 wait_for_completion(r.completion);
1398 if (r.done)
1399 return TMF_RESP_FUNC_COMPLETE;
1400
1401 return TMF_RESP_FUNC_FAILED;
1288} 1402}
1289 1403
1290static int hisi_sas_query_task(struct sas_task *task) 1404static int hisi_sas_query_task(struct sas_task *task)
@@ -1441,8 +1555,14 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1441 struct device *dev = hisi_hba->dev; 1555 struct device *dev = hisi_hba->dev;
1442 int res; 1556 int res;
1443 1557
1558 /*
1559 * The interface is not realized means this HW don't support internal
1560 * abort, or don't need to do internal abort. Then here, we return
1561 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1562 * the internal abort has been executed and returned CQ.
1563 */
1444 if (!hisi_hba->hw->prep_abort) 1564 if (!hisi_hba->hw->prep_abort)
1445 return -EOPNOTSUPP; 1565 return TMF_RESP_FUNC_FAILED;
1446 1566
1447 task = sas_alloc_slow_task(GFP_KERNEL); 1567 task = sas_alloc_slow_task(GFP_KERNEL);
1448 if (!task) 1568 if (!task)
@@ -1473,9 +1593,11 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1473 1593
1474 if (slot) 1594 if (slot)
1475 slot->task = NULL; 1595 slot->task = NULL;
1476 dev_err(dev, "internal task abort: timeout.\n"); 1596 dev_err(dev, "internal task abort: timeout and not done.\n");
1597 res = -EIO;
1477 goto exit; 1598 goto exit;
1478 } 1599 } else
1600 dev_err(dev, "internal task abort: timeout.\n");
1479 } 1601 }
1480 1602
1481 if (task->task_status.resp == SAS_TASK_COMPLETE && 1603 if (task->task_status.resp == SAS_TASK_COMPLETE &&
@@ -1507,6 +1629,22 @@ static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1507 hisi_sas_port_notify_formed(sas_phy); 1629 hisi_sas_port_notify_formed(sas_phy);
1508} 1630}
1509 1631
1632static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1633{
1634}
1635
1636static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1637 u8 reg_index, u8 reg_count, u8 *write_data)
1638{
1639 struct hisi_hba *hisi_hba = sha->lldd_ha;
1640
1641 if (!hisi_hba->hw->write_gpio)
1642 return -EOPNOTSUPP;
1643
1644 return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
1645 reg_index, reg_count, write_data);
1646}
1647
1510static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 1648static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
1511{ 1649{
1512 phy->phy_attached = 0; 1650 phy->phy_attached = 0;
@@ -1561,6 +1699,11 @@ EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1561struct scsi_transport_template *hisi_sas_stt; 1699struct scsi_transport_template *hisi_sas_stt;
1562EXPORT_SYMBOL_GPL(hisi_sas_stt); 1700EXPORT_SYMBOL_GPL(hisi_sas_stt);
1563 1701
1702static struct device_attribute *host_attrs[] = {
1703 &dev_attr_phy_event_threshold,
1704 NULL,
1705};
1706
1564static struct scsi_host_template _hisi_sas_sht = { 1707static struct scsi_host_template _hisi_sas_sht = {
1565 .module = THIS_MODULE, 1708 .module = THIS_MODULE,
1566 .name = DRV_NAME, 1709 .name = DRV_NAME,
@@ -1580,6 +1723,7 @@ static struct scsi_host_template _hisi_sas_sht = {
1580 .eh_target_reset_handler = sas_eh_target_reset_handler, 1723 .eh_target_reset_handler = sas_eh_target_reset_handler,
1581 .target_destroy = sas_target_destroy, 1724 .target_destroy = sas_target_destroy,
1582 .ioctl = sas_ioctl, 1725 .ioctl = sas_ioctl,
1726 .shost_attrs = host_attrs,
1583}; 1727};
1584struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht; 1728struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1585EXPORT_SYMBOL_GPL(hisi_sas_sht); 1729EXPORT_SYMBOL_GPL(hisi_sas_sht);
@@ -1597,6 +1741,8 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
1597 .lldd_query_task = hisi_sas_query_task, 1741 .lldd_query_task = hisi_sas_query_task,
1598 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 1742 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1599 .lldd_port_formed = hisi_sas_port_formed, 1743 .lldd_port_formed = hisi_sas_port_formed,
1744 .lldd_port_deformed = hisi_sas_port_deformed,
1745 .lldd_write_gpio = hisi_sas_write_gpio,
1600}; 1746};
1601 1747
1602void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 1748void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
@@ -1657,6 +1803,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1657 cq->hisi_hba = hisi_hba; 1803 cq->hisi_hba = hisi_hba;
1658 1804
1659 /* Delivery queue structure */ 1805 /* Delivery queue structure */
1806 spin_lock_init(&dq->lock);
1660 dq->id = i; 1807 dq->id = i;
1661 dq->hisi_hba = hisi_hba; 1808 dq->hisi_hba = hisi_hba;
1662 1809
@@ -1803,6 +1950,17 @@ void hisi_sas_rst_work_handler(struct work_struct *work)
1803} 1950}
1804EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 1951EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
1805 1952
1953void hisi_sas_sync_rst_work_handler(struct work_struct *work)
1954{
1955 struct hisi_sas_rst *rst =
1956 container_of(work, struct hisi_sas_rst, work);
1957
1958 if (!hisi_sas_controller_reset(rst->hisi_hba))
1959 rst->done = true;
1960 complete(rst->completion);
1961}
1962EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
1963
1806int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 1964int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
1807{ 1965{
1808 struct device *dev = hisi_hba->dev; 1966 struct device *dev = hisi_hba->dev;
@@ -1909,6 +2067,13 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1909 if (IS_ERR(hisi_hba->regs)) 2067 if (IS_ERR(hisi_hba->regs))
1910 goto err_out; 2068 goto err_out;
1911 2069
2070 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2071 if (res) {
2072 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2073 if (IS_ERR(hisi_hba->sgpio_regs))
2074 goto err_out;
2075 }
2076
1912 if (hisi_sas_alloc(hisi_hba, shost)) { 2077 if (hisi_sas_alloc(hisi_hba, shost)) {
1913 hisi_sas_free(hisi_hba); 2078 hisi_sas_free(hisi_hba);
1914 goto err_out; 2079 goto err_out;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index dc6eca8d6afd..679e76f58a0a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -544,7 +544,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
544 (0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF)); 544 (0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF));
545} 545}
546 546
547static void free_device_v1_hw(struct hisi_hba *hisi_hba, 547static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
548 struct hisi_sas_device *sas_dev) 548 struct hisi_sas_device *sas_dev)
549{ 549{
550 u64 dev_id = sas_dev->device_id; 550 u64 dev_id = sas_dev->device_id;
@@ -1482,7 +1482,7 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
1482 else if (phy->identify.device_type != SAS_PHY_UNUSED) 1482 else if (phy->identify.device_type != SAS_PHY_UNUSED)
1483 phy->identify.target_port_protocols = 1483 phy->identify.target_port_protocols =
1484 SAS_PROTOCOL_SMP; 1484 SAS_PROTOCOL_SMP;
1485 queue_work(hisi_hba->wq, &phy->phyup_ws); 1485 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
1486 1486
1487end: 1487end:
1488 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, 1488 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
@@ -1850,7 +1850,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
1850 .hw_init = hisi_sas_v1_init, 1850 .hw_init = hisi_sas_v1_init,
1851 .setup_itct = setup_itct_v1_hw, 1851 .setup_itct = setup_itct_v1_hw,
1852 .sl_notify = sl_notify_v1_hw, 1852 .sl_notify = sl_notify_v1_hw,
1853 .free_device = free_device_v1_hw, 1853 .clear_itct = clear_itct_v1_hw,
1854 .prep_smp = prep_smp_v1_hw, 1854 .prep_smp = prep_smp_v1_hw,
1855 .prep_ssp = prep_ssp_v1_hw, 1855 .prep_ssp = prep_ssp_v1_hw,
1856 .get_free_slot = get_free_slot_v1_hw, 1856 .get_free_slot = get_free_slot_v1_hw,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 5d3467fd728d..4ccb61e2ae5c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -240,7 +240,12 @@
240#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF) 240#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
241#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17 241#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
242#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF) 242#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
243#define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
244#define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
245#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
246#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
243#define CHL_INT2 (PORT_BASE + 0x1bc) 247#define CHL_INT2 (PORT_BASE + 0x1bc)
248#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
244#define CHL_INT0_MSK (PORT_BASE + 0x1c0) 249#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
245#define CHL_INT1_MSK (PORT_BASE + 0x1c4) 250#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
246#define CHL_INT2_MSK (PORT_BASE + 0x1c8) 251#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
@@ -952,7 +957,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
952 (0x1ULL << ITCT_HDR_RTOLT_OFF)); 957 (0x1ULL << ITCT_HDR_RTOLT_OFF));
953} 958}
954 959
955static void free_device_v2_hw(struct hisi_hba *hisi_hba, 960static void clear_itct_v2_hw(struct hisi_hba *hisi_hba,
956 struct hisi_sas_device *sas_dev) 961 struct hisi_sas_device *sas_dev)
957{ 962{
958 DECLARE_COMPLETION_ONSTACK(completion); 963 DECLARE_COMPLETION_ONSTACK(completion);
@@ -963,10 +968,6 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
963 968
964 sas_dev->completion = &completion; 969 sas_dev->completion = &completion;
965 970
966 /* SoC bug workaround */
967 if (dev_is_sata(sas_dev->sas_device))
968 clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
969
970 /* clear the itct interrupt state */ 971 /* clear the itct interrupt state */
971 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) 972 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
972 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 973 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
@@ -981,6 +982,15 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
981 } 982 }
982} 983}
983 984
985static void free_device_v2_hw(struct hisi_sas_device *sas_dev)
986{
987 struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
988
989 /* SoC bug workaround */
990 if (dev_is_sata(sas_dev->sas_device))
991 clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
992}
993
984static int reset_hw_v2_hw(struct hisi_hba *hisi_hba) 994static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
985{ 995{
986 int i, reset_val; 996 int i, reset_val;
@@ -1177,8 +1187,8 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
1177 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); 1187 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
1178 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff); 1188 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff);
1179 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); 1189 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
1180 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); 1190 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff857fff);
1181 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff); 1191 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbfe);
1182 hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x13f801fc); 1192 hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x13f801fc);
1183 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); 1193 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
1184 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); 1194 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
@@ -2356,6 +2366,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2356 ts->resp = SAS_TASK_COMPLETE; 2366 ts->resp = SAS_TASK_COMPLETE;
2357 2367
2358 if (unlikely(aborted)) { 2368 if (unlikely(aborted)) {
2369 dev_dbg(dev, "slot_complete: task(%p) aborted\n", task);
2359 ts->stat = SAS_ABORTED_TASK; 2370 ts->stat = SAS_ABORTED_TASK;
2360 spin_lock_irqsave(&hisi_hba->lock, flags); 2371 spin_lock_irqsave(&hisi_hba->lock, flags);
2361 hisi_sas_slot_task_free(hisi_hba, task, slot); 2372 hisi_sas_slot_task_free(hisi_hba, task, slot);
@@ -2400,6 +2411,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2400 (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) { 2411 (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
2401 u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK) 2412 u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK)
2402 >> CMPLT_HDR_ERR_PHASE_OFF; 2413 >> CMPLT_HDR_ERR_PHASE_OFF;
2414 u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
2403 2415
2404 /* Analyse error happens on which phase TX or RX */ 2416 /* Analyse error happens on which phase TX or RX */
2405 if (ERR_ON_TX_PHASE(err_phase)) 2417 if (ERR_ON_TX_PHASE(err_phase))
@@ -2407,6 +2419,16 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2407 else if (ERR_ON_RX_PHASE(err_phase)) 2419 else if (ERR_ON_RX_PHASE(err_phase))
2408 slot_err_v2_hw(hisi_hba, task, slot, 2); 2420 slot_err_v2_hw(hisi_hba, task, slot, 2);
2409 2421
2422 if (ts->stat != SAS_DATA_UNDERRUN)
2423 dev_info(dev, "erroneous completion iptt=%d task=%p "
2424 "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
2425 "Error info: 0x%x 0x%x 0x%x 0x%x\n",
2426 slot->idx, task,
2427 complete_hdr->dw0, complete_hdr->dw1,
2428 complete_hdr->act, complete_hdr->dw3,
2429 error_info[0], error_info[1],
2430 error_info[2], error_info[3]);
2431
2410 if (unlikely(slot->abort)) 2432 if (unlikely(slot->abort))
2411 return ts->stat; 2433 return ts->stat;
2412 goto out; 2434 goto out;
@@ -2456,7 +2478,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2456 } 2478 }
2457 2479
2458 if (!slot->port->port_attached) { 2480 if (!slot->port->port_attached) {
2459 dev_err(dev, "slot complete: port %d has removed\n", 2481 dev_warn(dev, "slot complete: port %d has removed\n",
2460 slot->port->sas_port.id); 2482 slot->port->sas_port.id);
2461 ts->stat = SAS_PHY_DOWN; 2483 ts->stat = SAS_PHY_DOWN;
2462 } 2484 }
@@ -2517,7 +2539,7 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
2517 dw1 |= 1 << CMD_HDR_RESET_OFF; 2539 dw1 |= 1 << CMD_HDR_RESET_OFF;
2518 2540
2519 dw1 |= (hisi_sas_get_ata_protocol( 2541 dw1 |= (hisi_sas_get_ata_protocol(
2520 task->ata_task.fis.command, task->data_dir)) 2542 &task->ata_task.fis, task->data_dir))
2521 << CMD_HDR_FRAME_TYPE_OFF; 2543 << CMD_HDR_FRAME_TYPE_OFF;
2522 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 2544 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
2523 hdr->dw1 = cpu_to_le32(dw1); 2545 hdr->dw1 = cpu_to_le32(dw1);
@@ -2687,7 +2709,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
2687 if (!timer_pending(&hisi_hba->timer)) 2709 if (!timer_pending(&hisi_hba->timer))
2688 set_link_timer_quirk(hisi_hba); 2710 set_link_timer_quirk(hisi_hba);
2689 } 2711 }
2690 queue_work(hisi_hba->wq, &phy->phyup_ws); 2712 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
2691 2713
2692end: 2714end:
2693 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 2715 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -2713,10 +2735,12 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
2713 u32 phy_state, sl_ctrl, txid_auto; 2735 u32 phy_state, sl_ctrl, txid_auto;
2714 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2736 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
2715 struct hisi_sas_port *port = phy->port; 2737 struct hisi_sas_port *port = phy->port;
2738 struct device *dev = hisi_hba->dev;
2716 2739
2717 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); 2740 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
2718 2741
2719 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 2742 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
2743 dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
2720 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0); 2744 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
2721 2745
2722 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL); 2746 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
@@ -2813,6 +2837,33 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
2813 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); 2837 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
2814} 2838}
2815 2839
2840static const struct hisi_sas_hw_error port_ecc_axi_error[] = {
2841 {
2842 .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_ERR_OFF),
2843 .msg = "dmac_tx_ecc_bad_err",
2844 },
2845 {
2846 .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_ERR_OFF),
2847 .msg = "dmac_rx_ecc_bad_err",
2848 },
2849 {
2850 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
2851 .msg = "dma_tx_axi_wr_err",
2852 },
2853 {
2854 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF),
2855 .msg = "dma_tx_axi_rd_err",
2856 },
2857 {
2858 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF),
2859 .msg = "dma_rx_axi_wr_err",
2860 },
2861 {
2862 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
2863 .msg = "dma_rx_axi_rd_err",
2864 },
2865};
2866
2816static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) 2867static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
2817{ 2868{
2818 struct hisi_hba *hisi_hba = p; 2869 struct hisi_hba *hisi_hba = p;
@@ -2829,40 +2880,55 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
2829 HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff; 2880 HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff;
2830 2881
2831 while (irq_msk) { 2882 while (irq_msk) {
2832 if (irq_msk & (1 << phy_no)) { 2883 u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
2833 u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, 2884 CHL_INT0);
2834 CHL_INT0); 2885 u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
2835 u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no, 2886 CHL_INT1);
2836 CHL_INT1); 2887 u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
2837 u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no, 2888 CHL_INT2);
2838 CHL_INT2); 2889
2839 2890 if ((irq_msk & (1 << phy_no)) && irq_value1) {
2840 if (irq_value1) { 2891 int i;
2841 if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
2842 CHL_INT1_DMAC_TX_ECC_ERR_MSK))
2843 panic("%s: DMAC RX/TX ecc bad error!\
2844 (0x%x)",
2845 dev_name(dev), irq_value1);
2846
2847 hisi_sas_phy_write32(hisi_hba, phy_no,
2848 CHL_INT1, irq_value1);
2849 }
2850 2892
2851 if (irq_value2) 2893 for (i = 0; i < ARRAY_SIZE(port_ecc_axi_error); i++) {
2852 hisi_sas_phy_write32(hisi_hba, phy_no, 2894 const struct hisi_sas_hw_error *error =
2853 CHL_INT2, irq_value2); 2895 &port_ecc_axi_error[i];
2896
2897 if (!(irq_value1 & error->irq_msk))
2898 continue;
2899
2900 dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
2901 error->msg, phy_no, irq_value1);
2902 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
2903 }
2854 2904
2905 hisi_sas_phy_write32(hisi_hba, phy_no,
2906 CHL_INT1, irq_value1);
2907 }
2855 2908
2856 if (irq_value0) { 2909 if ((irq_msk & (1 << phy_no)) && irq_value2) {
2857 if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK) 2910 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
2858 phy_bcast_v2_hw(phy_no, hisi_hba);
2859 2911
2860 hisi_sas_phy_write32(hisi_hba, phy_no, 2912 if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
2861 CHL_INT0, irq_value0 2913 dev_warn(dev, "phy%d identify timeout\n",
2862 & (~CHL_INT0_HOTPLUG_TOUT_MSK) 2914 phy_no);
2863 & (~CHL_INT0_SL_PHY_ENABLE_MSK) 2915 hisi_sas_notify_phy_event(phy,
2864 & (~CHL_INT0_NOT_RDY_MSK)); 2916 HISI_PHYE_LINK_RESET);
2865 } 2917 }
2918
2919 hisi_sas_phy_write32(hisi_hba, phy_no,
2920 CHL_INT2, irq_value2);
2921 }
2922
2923 if ((irq_msk & (1 << phy_no)) && irq_value0) {
2924 if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
2925 phy_bcast_v2_hw(phy_no, hisi_hba);
2926
2927 hisi_sas_phy_write32(hisi_hba, phy_no,
2928 CHL_INT0, irq_value0
2929 & (~CHL_INT0_HOTPLUG_TOUT_MSK)
2930 & (~CHL_INT0_SL_PHY_ENABLE_MSK)
2931 & (~CHL_INT0_NOT_RDY_MSK));
2866 } 2932 }
2867 irq_msk &= ~(1 << phy_no); 2933 irq_msk &= ~(1 << phy_no);
2868 phy_no++; 2934 phy_no++;
@@ -2906,7 +2972,7 @@ static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
2906 val = hisi_sas_read32(hisi_hba, ecc_error->reg); 2972 val = hisi_sas_read32(hisi_hba, ecc_error->reg);
2907 val &= ecc_error->msk; 2973 val &= ecc_error->msk;
2908 val >>= ecc_error->shift; 2974 val >>= ecc_error->shift;
2909 dev_warn(dev, ecc_error->msg, irq_value, val); 2975 dev_err(dev, ecc_error->msg, irq_value, val);
2910 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2976 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
2911 } 2977 }
2912 } 2978 }
@@ -3015,12 +3081,12 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
3015 for (; sub->msk || sub->msg; sub++) { 3081 for (; sub->msk || sub->msg; sub++) {
3016 if (!(err_value & sub->msk)) 3082 if (!(err_value & sub->msk))
3017 continue; 3083 continue;
3018 dev_warn(dev, "%s (0x%x) found!\n", 3084 dev_err(dev, "%s (0x%x) found!\n",
3019 sub->msg, irq_value); 3085 sub->msg, irq_value);
3020 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3086 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
3021 } 3087 }
3022 } else { 3088 } else {
3023 dev_warn(dev, "%s (0x%x) found!\n", 3089 dev_err(dev, "%s (0x%x) found!\n",
3024 axi_error->msg, irq_value); 3090 axi_error->msg, irq_value);
3025 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3091 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
3026 } 3092 }
@@ -3206,7 +3272,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
3206 phy->identify.device_type = SAS_SATA_DEV; 3272 phy->identify.device_type = SAS_SATA_DEV;
3207 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); 3273 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
3208 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; 3274 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
3209 queue_work(hisi_hba->wq, &phy->phyup_ws); 3275 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
3210 3276
3211end: 3277end:
3212 hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp); 3278 hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
@@ -3392,7 +3458,7 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
3392 3458
3393 udelay(10); 3459 udelay(10);
3394 if (cnt++ > 10) { 3460 if (cnt++ > 10) {
3395 dev_info(dev, "wait axi bus state to idle timeout!\n"); 3461 dev_err(dev, "wait axi bus state to idle timeout!\n");
3396 return -1; 3462 return -1;
3397 } 3463 }
3398 } 3464 }
@@ -3408,6 +3474,44 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
3408 return 0; 3474 return 0;
3409} 3475}
3410 3476
3477static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
3478 u8 reg_index, u8 reg_count, u8 *write_data)
3479{
3480 struct device *dev = hisi_hba->dev;
3481 int phy_no, count;
3482
3483 if (!hisi_hba->sgpio_regs)
3484 return -EOPNOTSUPP;
3485
3486 switch (reg_type) {
3487 case SAS_GPIO_REG_TX:
3488 count = reg_count * 4;
3489 count = min(count, hisi_hba->n_phy);
3490
3491 for (phy_no = 0; phy_no < count; phy_no++) {
3492 /*
3493 * GPIO_TX[n] register has the highest numbered drive
3494 * of the four in the first byte and the lowest
3495 * numbered drive in the fourth byte.
3496 * See SFF-8485 Rev. 0.7 Table 24.
3497 */
3498 void __iomem *reg_addr = hisi_hba->sgpio_regs +
3499 reg_index * 4 + phy_no;
3500 int data_idx = phy_no + 3 - (phy_no % 4) * 2;
3501
3502 writeb(write_data[data_idx], reg_addr);
3503 }
3504
3505 break;
3506 default:
3507 dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
3508 reg_type);
3509 return -EINVAL;
3510 }
3511
3512 return 0;
3513}
3514
3411static const struct hisi_sas_hw hisi_sas_v2_hw = { 3515static const struct hisi_sas_hw hisi_sas_v2_hw = {
3412 .hw_init = hisi_sas_v2_init, 3516 .hw_init = hisi_sas_v2_init,
3413 .setup_itct = setup_itct_v2_hw, 3517 .setup_itct = setup_itct_v2_hw,
@@ -3415,6 +3519,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
3415 .alloc_dev = alloc_dev_quirk_v2_hw, 3519 .alloc_dev = alloc_dev_quirk_v2_hw,
3416 .sl_notify = sl_notify_v2_hw, 3520 .sl_notify = sl_notify_v2_hw,
3417 .get_wideport_bitmap = get_wideport_bitmap_v2_hw, 3521 .get_wideport_bitmap = get_wideport_bitmap_v2_hw,
3522 .clear_itct = clear_itct_v2_hw,
3418 .free_device = free_device_v2_hw, 3523 .free_device = free_device_v2_hw,
3419 .prep_smp = prep_smp_v2_hw, 3524 .prep_smp = prep_smp_v2_hw,
3420 .prep_ssp = prep_ssp_v2_hw, 3525 .prep_ssp = prep_ssp_v2_hw,
@@ -3434,6 +3539,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
3434 .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr), 3539 .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
3435 .soft_reset = soft_reset_v2_hw, 3540 .soft_reset = soft_reset_v2_hw,
3436 .get_phys_state = get_phys_state_v2_hw, 3541 .get_phys_state = get_phys_state_v2_hw,
3542 .write_gpio = write_gpio_v2_hw,
3437}; 3543};
3438 3544
3439static int hisi_sas_v2_probe(struct platform_device *pdev) 3545static int hisi_sas_v2_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 19b1f2ffec17..a1f18689729a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -140,6 +140,7 @@
140#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) 140#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
141#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) 141#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
142#define STP_LINK_TIMER (PORT_BASE + 0x120) 142#define STP_LINK_TIMER (PORT_BASE + 0x120)
143#define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124)
143#define CON_CFG_DRIVER (PORT_BASE + 0x130) 144#define CON_CFG_DRIVER (PORT_BASE + 0x130)
144#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134) 145#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
145#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138) 146#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
@@ -165,6 +166,8 @@
165#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21 166#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
166#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22 167#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
167#define CHL_INT2 (PORT_BASE + 0x1bc) 168#define CHL_INT2 (PORT_BASE + 0x1bc)
169#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
170#define CHL_INT2_STP_LINK_TIMEOUT_OFF 31
168#define CHL_INT0_MSK (PORT_BASE + 0x1c0) 171#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
169#define CHL_INT1_MSK (PORT_BASE + 0x1c4) 172#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
170#define CHL_INT2_MSK (PORT_BASE + 0x1c8) 173#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
@@ -204,6 +207,13 @@
204#define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF 8 207#define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF 8
205#define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF) 208#define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF)
206 209
210/* RAS registers need init */
211#define RAS_BASE (0x6000)
212#define SAS_RAS_INTR0 (RAS_BASE)
213#define SAS_RAS_INTR1 (RAS_BASE + 0x04)
214#define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08)
215#define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c)
216
207/* HW dma structures */ 217/* HW dma structures */
208/* Delivery queue header */ 218/* Delivery queue header */
209/* dw0 */ 219/* dw0 */
@@ -422,7 +432,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
422 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); 432 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
423 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); 433 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
424 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff); 434 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff);
425 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff); 435 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
426 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); 436 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
427 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); 437 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
428 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); 438 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
@@ -496,6 +506,10 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
496 506
497 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI, 507 hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
498 upper_32_bits(hisi_hba->initial_fis_dma)); 508 upper_32_bits(hisi_hba->initial_fis_dma));
509
510 /* RAS registers init */
511 hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0);
512 hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0);
499} 513}
500 514
501static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 515static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -588,7 +602,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
588 (0x1ULL << ITCT_HDR_RTOLT_OFF)); 602 (0x1ULL << ITCT_HDR_RTOLT_OFF));
589} 603}
590 604
591static void free_device_v3_hw(struct hisi_hba *hisi_hba, 605static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
592 struct hisi_sas_device *sas_dev) 606 struct hisi_sas_device *sas_dev)
593{ 607{
594 DECLARE_COMPLETION_ONSTACK(completion); 608 DECLARE_COMPLETION_ONSTACK(completion);
@@ -1033,7 +1047,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
1033 dw1 |= 1 << CMD_HDR_RESET_OFF; 1047 dw1 |= 1 << CMD_HDR_RESET_OFF;
1034 1048
1035 dw1 |= (hisi_sas_get_ata_protocol( 1049 dw1 |= (hisi_sas_get_ata_protocol(
1036 task->ata_task.fis.command, task->data_dir)) 1050 &task->ata_task.fis, task->data_dir))
1037 << CMD_HDR_FRAME_TYPE_OFF; 1051 << CMD_HDR_FRAME_TYPE_OFF;
1038 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1052 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
1039 1053
@@ -1138,7 +1152,7 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
1138 struct dev_to_host_fis *fis; 1152 struct dev_to_host_fis *fis;
1139 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; 1153 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
1140 1154
1141 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); 1155 dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate);
1142 initial_fis = &hisi_hba->initial_fis[phy_no]; 1156 initial_fis = &hisi_hba->initial_fis[phy_no];
1143 fis = &initial_fis->fis; 1157 fis = &initial_fis->fis;
1144 sas_phy->oob_mode = SATA_OOB_MODE; 1158 sas_phy->oob_mode = SATA_OOB_MODE;
@@ -1181,7 +1195,7 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
1181 1195
1182 phy->port_id = port_id; 1196 phy->port_id = port_id;
1183 phy->phy_attached = 1; 1197 phy->phy_attached = 1;
1184 queue_work(hisi_hba->wq, &phy->phyup_ws); 1198 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
1185 1199
1186end: 1200end:
1187 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1201 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -1322,7 +1336,7 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
1322 if (!(irq_value1 & error->irq_msk)) 1336 if (!(irq_value1 & error->irq_msk))
1323 continue; 1337 continue;
1324 1338
1325 dev_warn(dev, "%s error (phy%d 0x%x) found!\n", 1339 dev_err(dev, "%s error (phy%d 0x%x) found!\n",
1326 error->msg, phy_no, irq_value1); 1340 error->msg, phy_no, irq_value1);
1327 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1341 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
1328 } 1342 }
@@ -1331,9 +1345,31 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
1331 CHL_INT1, irq_value1); 1345 CHL_INT1, irq_value1);
1332 } 1346 }
1333 1347
1334 if (irq_msk & (8 << (phy_no * 4)) && irq_value2) 1348 if (irq_msk & (8 << (phy_no * 4)) && irq_value2) {
1349 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1350
1351 if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
1352 dev_warn(dev, "phy%d identify timeout\n",
1353 phy_no);
1354 hisi_sas_notify_phy_event(phy,
1355 HISI_PHYE_LINK_RESET);
1356
1357 }
1358
1359 if (irq_value2 & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
1360 u32 reg_value = hisi_sas_phy_read32(hisi_hba,
1361 phy_no, STP_LINK_TIMEOUT_STATE);
1362
1363 dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
1364 phy_no, reg_value);
1365 if (reg_value & BIT(4))
1366 hisi_sas_notify_phy_event(phy,
1367 HISI_PHYE_LINK_RESET);
1368 }
1369
1335 hisi_sas_phy_write32(hisi_hba, phy_no, 1370 hisi_sas_phy_write32(hisi_hba, phy_no,
1336 CHL_INT2, irq_value2); 1371 CHL_INT2, irq_value2);
1372 }
1337 1373
1338 1374
1339 if (irq_msk & (2 << (phy_no * 4)) && irq_value0) { 1375 if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
@@ -1432,12 +1468,12 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
1432 if (!(err_value & sub->msk)) 1468 if (!(err_value & sub->msk))
1433 continue; 1469 continue;
1434 1470
1435 dev_warn(dev, "%s error (0x%x) found!\n", 1471 dev_err(dev, "%s error (0x%x) found!\n",
1436 sub->msg, irq_value); 1472 sub->msg, irq_value);
1437 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1473 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
1438 } 1474 }
1439 } else { 1475 } else {
1440 dev_warn(dev, "%s error (0x%x) found!\n", 1476 dev_err(dev, "%s error (0x%x) found!\n",
1441 error->msg, irq_value); 1477 error->msg, irq_value);
1442 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1478 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
1443 } 1479 }
@@ -1542,6 +1578,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
1542 memset(ts, 0, sizeof(*ts)); 1578 memset(ts, 0, sizeof(*ts));
1543 ts->resp = SAS_TASK_COMPLETE; 1579 ts->resp = SAS_TASK_COMPLETE;
1544 if (unlikely(aborted)) { 1580 if (unlikely(aborted)) {
1581 dev_dbg(dev, "slot complete: task(%p) aborted\n", task);
1545 ts->stat = SAS_ABORTED_TASK; 1582 ts->stat = SAS_ABORTED_TASK;
1546 spin_lock_irqsave(&hisi_hba->lock, flags); 1583 spin_lock_irqsave(&hisi_hba->lock, flags);
1547 hisi_sas_slot_task_free(hisi_hba, task, slot); 1584 hisi_sas_slot_task_free(hisi_hba, task, slot);
@@ -1583,7 +1620,18 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
1583 1620
1584 /* check for erroneous completion */ 1621 /* check for erroneous completion */
1585 if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) { 1622 if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
1623 u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
1624
1586 slot_err_v3_hw(hisi_hba, task, slot); 1625 slot_err_v3_hw(hisi_hba, task, slot);
1626 if (ts->stat != SAS_DATA_UNDERRUN)
1627 dev_info(dev, "erroneous completion iptt=%d task=%p "
1628 "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
1629 "Error info: 0x%x 0x%x 0x%x 0x%x\n",
1630 slot->idx, task,
1631 complete_hdr->dw0, complete_hdr->dw1,
1632 complete_hdr->act, complete_hdr->dw3,
1633 error_info[0], error_info[1],
1634 error_info[2], error_info[3]);
1587 if (unlikely(slot->abort)) 1635 if (unlikely(slot->abort))
1588 return ts->stat; 1636 return ts->stat;
1589 goto out; 1637 goto out;
@@ -1628,7 +1676,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
1628 } 1676 }
1629 1677
1630 if (!slot->port->port_attached) { 1678 if (!slot->port->port_attached) {
1631 dev_err(dev, "slot complete: port %d has removed\n", 1679 dev_warn(dev, "slot complete: port %d has removed\n",
1632 slot->port->sas_port.id); 1680 slot->port->sas_port.id);
1633 ts->stat = SAS_PHY_DOWN; 1681 ts->stat = SAS_PHY_DOWN;
1634 } 1682 }
@@ -1653,9 +1701,8 @@ static void cq_tasklet_v3_hw(unsigned long val)
1653 struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val; 1701 struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
1654 struct hisi_hba *hisi_hba = cq->hisi_hba; 1702 struct hisi_hba *hisi_hba = cq->hisi_hba;
1655 struct hisi_sas_slot *slot; 1703 struct hisi_sas_slot *slot;
1656 struct hisi_sas_itct *itct;
1657 struct hisi_sas_complete_v3_hdr *complete_queue; 1704 struct hisi_sas_complete_v3_hdr *complete_queue;
1658 u32 rd_point = cq->rd_point, wr_point, dev_id; 1705 u32 rd_point = cq->rd_point, wr_point;
1659 int queue = cq->id; 1706 int queue = cq->id;
1660 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 1707 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
1661 1708
@@ -1671,38 +1718,11 @@ static void cq_tasklet_v3_hw(unsigned long val)
1671 1718
1672 complete_hdr = &complete_queue[rd_point]; 1719 complete_hdr = &complete_queue[rd_point];
1673 1720
1674 /* Check for NCQ completion */ 1721 iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
1675 if (complete_hdr->act) { 1722 slot = &hisi_hba->slot_info[iptt];
1676 u32 act_tmp = complete_hdr->act; 1723 slot->cmplt_queue_slot = rd_point;
1677 int ncq_tag_count = ffs(act_tmp); 1724 slot->cmplt_queue = queue;
1678 1725 slot_complete_v3_hw(hisi_hba, slot);
1679 dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
1680 CMPLT_HDR_DEV_ID_OFF;
1681 itct = &hisi_hba->itct[dev_id];
1682
1683 /* The NCQ tags are held in the itct header */
1684 while (ncq_tag_count) {
1685 __le64 *ncq_tag = &itct->qw4_15[0];
1686
1687 ncq_tag_count -= 1;
1688 iptt = (ncq_tag[ncq_tag_count / 5]
1689 >> (ncq_tag_count % 5) * 12) & 0xfff;
1690
1691 slot = &hisi_hba->slot_info[iptt];
1692 slot->cmplt_queue_slot = rd_point;
1693 slot->cmplt_queue = queue;
1694 slot_complete_v3_hw(hisi_hba, slot);
1695
1696 act_tmp &= ~(1 << ncq_tag_count);
1697 ncq_tag_count = ffs(act_tmp);
1698 }
1699 } else {
1700 iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
1701 slot = &hisi_hba->slot_info[iptt];
1702 slot->cmplt_queue_slot = rd_point;
1703 slot->cmplt_queue = queue;
1704 slot_complete_v3_hw(hisi_hba, slot);
1705 }
1706 1726
1707 if (++rd_point >= HISI_SAS_QUEUE_SLOTS) 1727 if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
1708 rd_point = 0; 1728 rd_point = 0;
@@ -1951,7 +1971,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
1951 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW, 1971 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
1952 .get_wideport_bitmap = get_wideport_bitmap_v3_hw, 1972 .get_wideport_bitmap = get_wideport_bitmap_v3_hw,
1953 .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), 1973 .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
1954 .free_device = free_device_v3_hw, 1974 .clear_itct = clear_itct_v3_hw,
1955 .sl_notify = sl_notify_v3_hw, 1975 .sl_notify = sl_notify_v3_hw,
1956 .prep_ssp = prep_ssp_v3_hw, 1976 .prep_ssp = prep_ssp_v3_hw,
1957 .prep_smp = prep_smp_v3_hw, 1977 .prep_smp = prep_smp_v3_hw,
@@ -2157,21 +2177,243 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
2157 scsi_host_put(shost); 2177 scsi_host_put(shost);
2158} 2178}
2159 2179
2180static const struct hisi_sas_hw_error sas_ras_intr0_nfe[] = {
2181 { .irq_msk = BIT(19), .msg = "HILINK_INT" },
2182 { .irq_msk = BIT(20), .msg = "HILINK_PLL0_OUT_OF_LOCK" },
2183 { .irq_msk = BIT(21), .msg = "HILINK_PLL1_OUT_OF_LOCK" },
2184 { .irq_msk = BIT(22), .msg = "HILINK_LOSS_OF_REFCLK0" },
2185 { .irq_msk = BIT(23), .msg = "HILINK_LOSS_OF_REFCLK1" },
2186 { .irq_msk = BIT(24), .msg = "DMAC0_TX_POISON" },
2187 { .irq_msk = BIT(25), .msg = "DMAC1_TX_POISON" },
2188 { .irq_msk = BIT(26), .msg = "DMAC2_TX_POISON" },
2189 { .irq_msk = BIT(27), .msg = "DMAC3_TX_POISON" },
2190 { .irq_msk = BIT(28), .msg = "DMAC4_TX_POISON" },
2191 { .irq_msk = BIT(29), .msg = "DMAC5_TX_POISON" },
2192 { .irq_msk = BIT(30), .msg = "DMAC6_TX_POISON" },
2193 { .irq_msk = BIT(31), .msg = "DMAC7_TX_POISON" },
2194};
2195
2196static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = {
2197 { .irq_msk = BIT(0), .msg = "RXM_CFG_MEM3_ECC2B_INTR" },
2198 { .irq_msk = BIT(1), .msg = "RXM_CFG_MEM2_ECC2B_INTR" },
2199 { .irq_msk = BIT(2), .msg = "RXM_CFG_MEM1_ECC2B_INTR" },
2200 { .irq_msk = BIT(3), .msg = "RXM_CFG_MEM0_ECC2B_INTR" },
2201 { .irq_msk = BIT(4), .msg = "HGC_CQE_ECC2B_INTR" },
2202 { .irq_msk = BIT(5), .msg = "LM_CFG_IOSTL_ECC2B_INTR" },
2203 { .irq_msk = BIT(6), .msg = "LM_CFG_ITCTL_ECC2B_INTR" },
2204 { .irq_msk = BIT(7), .msg = "HGC_ITCT_ECC2B_INTR" },
2205 { .irq_msk = BIT(8), .msg = "HGC_IOST_ECC2B_INTR" },
2206 { .irq_msk = BIT(9), .msg = "HGC_DQE_ECC2B_INTR" },
2207 { .irq_msk = BIT(10), .msg = "DMAC0_RAM_ECC2B_INTR" },
2208 { .irq_msk = BIT(11), .msg = "DMAC1_RAM_ECC2B_INTR" },
2209 { .irq_msk = BIT(12), .msg = "DMAC2_RAM_ECC2B_INTR" },
2210 { .irq_msk = BIT(13), .msg = "DMAC3_RAM_ECC2B_INTR" },
2211 { .irq_msk = BIT(14), .msg = "DMAC4_RAM_ECC2B_INTR" },
2212 { .irq_msk = BIT(15), .msg = "DMAC5_RAM_ECC2B_INTR" },
2213 { .irq_msk = BIT(16), .msg = "DMAC6_RAM_ECC2B_INTR" },
2214 { .irq_msk = BIT(17), .msg = "DMAC7_RAM_ECC2B_INTR" },
2215 { .irq_msk = BIT(18), .msg = "OOO_RAM_ECC2B_INTR" },
2216 { .irq_msk = BIT(20), .msg = "HGC_DQE_POISON_INTR" },
2217 { .irq_msk = BIT(21), .msg = "HGC_IOST_POISON_INTR" },
2218 { .irq_msk = BIT(22), .msg = "HGC_ITCT_POISON_INTR" },
2219 { .irq_msk = BIT(23), .msg = "HGC_ITCT_NCQ_POISON_INTR" },
2220 { .irq_msk = BIT(24), .msg = "DMAC0_RX_POISON" },
2221 { .irq_msk = BIT(25), .msg = "DMAC1_RX_POISON" },
2222 { .irq_msk = BIT(26), .msg = "DMAC2_RX_POISON" },
2223 { .irq_msk = BIT(27), .msg = "DMAC3_RX_POISON" },
2224 { .irq_msk = BIT(28), .msg = "DMAC4_RX_POISON" },
2225 { .irq_msk = BIT(29), .msg = "DMAC5_RX_POISON" },
2226 { .irq_msk = BIT(30), .msg = "DMAC6_RX_POISON" },
2227 { .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" },
2228};
2229
2230static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
2231{
2232 struct device *dev = hisi_hba->dev;
2233 const struct hisi_sas_hw_error *ras_error;
2234 bool need_reset = false;
2235 u32 irq_value;
2236 int i;
2237
2238 irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR0);
2239 for (i = 0; i < ARRAY_SIZE(sas_ras_intr0_nfe); i++) {
2240 ras_error = &sas_ras_intr0_nfe[i];
2241 if (ras_error->irq_msk & irq_value) {
2242 dev_warn(dev, "SAS_RAS_INTR0: %s(irq_value=0x%x) found.\n",
2243 ras_error->msg, irq_value);
2244 need_reset = true;
2245 }
2246 }
2247 hisi_sas_write32(hisi_hba, SAS_RAS_INTR0, irq_value);
2248
2249 irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR1);
2250 for (i = 0; i < ARRAY_SIZE(sas_ras_intr1_nfe); i++) {
2251 ras_error = &sas_ras_intr1_nfe[i];
2252 if (ras_error->irq_msk & irq_value) {
2253 dev_warn(dev, "SAS_RAS_INTR1: %s(irq_value=0x%x) found.\n",
2254 ras_error->msg, irq_value);
2255 need_reset = true;
2256 }
2257 }
2258 hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value);
2259
2260 return need_reset;
2261}
2262
2263static pci_ers_result_t hisi_sas_error_detected_v3_hw(struct pci_dev *pdev,
2264 pci_channel_state_t state)
2265{
2266 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
2267 struct hisi_hba *hisi_hba = sha->lldd_ha;
2268 struct device *dev = hisi_hba->dev;
2269
2270 dev_info(dev, "PCI error: detected callback, state(%d)!!\n", state);
2271 if (state == pci_channel_io_perm_failure)
2272 return PCI_ERS_RESULT_DISCONNECT;
2273
2274 if (process_non_fatal_error_v3_hw(hisi_hba))
2275 return PCI_ERS_RESULT_NEED_RESET;
2276
2277 return PCI_ERS_RESULT_CAN_RECOVER;
2278}
2279
2280static pci_ers_result_t hisi_sas_mmio_enabled_v3_hw(struct pci_dev *pdev)
2281{
2282 return PCI_ERS_RESULT_RECOVERED;
2283}
2284
2285static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev)
2286{
2287 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
2288 struct hisi_hba *hisi_hba = sha->lldd_ha;
2289 struct device *dev = hisi_hba->dev;
2290 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
2291
2292 dev_info(dev, "PCI error: slot reset callback!!\n");
2293 queue_work(hisi_hba->wq, &r.work);
2294 wait_for_completion(r.completion);
2295 if (r.done)
2296 return PCI_ERS_RESULT_RECOVERED;
2297
2298 return PCI_ERS_RESULT_DISCONNECT;
2299}
2300
2160enum { 2301enum {
2161 /* instances of the controller */ 2302 /* instances of the controller */
2162 hip08, 2303 hip08,
2163}; 2304};
2164 2305
2306static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
2307{
2308 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
2309 struct hisi_hba *hisi_hba = sha->lldd_ha;
2310 struct device *dev = hisi_hba->dev;
2311 struct Scsi_Host *shost = hisi_hba->shost;
2312 u32 device_state, status;
2313 int rc;
2314 u32 reg_val;
2315 unsigned long flags;
2316
2317 if (!pdev->pm_cap) {
2318 dev_err(dev, "PCI PM not supported\n");
2319 return -ENODEV;
2320 }
2321
2322 set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
2323 scsi_block_requests(shost);
2324 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
2325 flush_workqueue(hisi_hba->wq);
2326 /* disable DQ/PHY/bus */
2327 interrupt_disable_v3_hw(hisi_hba);
2328 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
2329 hisi_sas_kill_tasklets(hisi_hba);
2330
2331 hisi_sas_stop_phys(hisi_hba);
2332
2333 reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
2334 AM_CTRL_GLOBAL);
2335 reg_val |= 0x1;
2336 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
2337 AM_CTRL_GLOBAL, reg_val);
2338
2339 /* wait until bus idle */
2340 rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE +
2341 AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100);
2342 if (rc) {
2343 dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
2344 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
2345 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
2346 scsi_unblock_requests(shost);
2347 return rc;
2348 }
2349
2350 hisi_sas_init_mem(hisi_hba);
2351
2352 device_state = pci_choose_state(pdev, state);
2353 dev_warn(dev, "entering operating state [D%d]\n",
2354 device_state);
2355 pci_save_state(pdev);
2356 pci_disable_device(pdev);
2357 pci_set_power_state(pdev, device_state);
2358
2359 spin_lock_irqsave(&hisi_hba->lock, flags);
2360 hisi_sas_release_tasks(hisi_hba);
2361 spin_unlock_irqrestore(&hisi_hba->lock, flags);
2362
2363 sas_suspend_ha(sha);
2364 return 0;
2365}
2366
2367static int hisi_sas_v3_resume(struct pci_dev *pdev)
2368{
2369 struct sas_ha_struct *sha = pci_get_drvdata(pdev);
2370 struct hisi_hba *hisi_hba = sha->lldd_ha;
2371 struct Scsi_Host *shost = hisi_hba->shost;
2372 struct device *dev = hisi_hba->dev;
2373 unsigned int rc;
2374 u32 device_state = pdev->current_state;
2375
2376 dev_warn(dev, "resuming from operating state [D%d]\n",
2377 device_state);
2378 pci_set_power_state(pdev, PCI_D0);
2379 pci_enable_wake(pdev, PCI_D0, 0);
2380 pci_restore_state(pdev);
2381 rc = pci_enable_device(pdev);
2382 if (rc)
2383 dev_err(dev, "enable device failed during resume (%d)\n", rc);
2384
2385 pci_set_master(pdev);
2386 scsi_unblock_requests(shost);
2387 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
2388
2389 sas_prep_resume_ha(sha);
2390 init_reg_v3_hw(hisi_hba);
2391 hisi_hba->hw->phys_init(hisi_hba);
2392 sas_resume_ha(sha);
2393 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
2394
2395 return 0;
2396}
2397
2165static const struct pci_device_id sas_v3_pci_table[] = { 2398static const struct pci_device_id sas_v3_pci_table[] = {
2166 { PCI_VDEVICE(HUAWEI, 0xa230), hip08 }, 2399 { PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
2167 {} 2400 {}
2168}; 2401};
2169 2402
2403static const struct pci_error_handlers hisi_sas_err_handler = {
2404 .error_detected = hisi_sas_error_detected_v3_hw,
2405 .mmio_enabled = hisi_sas_mmio_enabled_v3_hw,
2406 .slot_reset = hisi_sas_slot_reset_v3_hw,
2407};
2408
2170static struct pci_driver sas_v3_pci_driver = { 2409static struct pci_driver sas_v3_pci_driver = {
2171 .name = DRV_NAME, 2410 .name = DRV_NAME,
2172 .id_table = sas_v3_pci_table, 2411 .id_table = sas_v3_pci_table,
2173 .probe = hisi_sas_v3_probe, 2412 .probe = hisi_sas_v3_probe,
2174 .remove = hisi_sas_v3_remove, 2413 .remove = hisi_sas_v3_remove,
2414 .suspend = hisi_sas_v3_suspend,
2415 .resume = hisi_sas_v3_resume,
2416 .err_handler = &hisi_sas_err_handler,
2175}; 2417};
2176 2418
2177module_pci_driver(sas_v3_pci_driver); 2419module_pci_driver(sas_v3_pci_driver);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index fe3a0da3ec97..57bf43e34863 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -318,6 +318,9 @@ static void scsi_host_dev_release(struct device *dev)
318 318
319 scsi_proc_hostdir_rm(shost->hostt); 319 scsi_proc_hostdir_rm(shost->hostt);
320 320
321 /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
322 rcu_barrier();
323
321 if (shost->tmf_work_q) 324 if (shost->tmf_work_q)
322 destroy_workqueue(shost->tmf_work_q); 325 destroy_workqueue(shost->tmf_work_q);
323 if (shost->ehandler) 326 if (shost->ehandler)
@@ -325,6 +328,8 @@ static void scsi_host_dev_release(struct device *dev)
325 if (shost->work_q) 328 if (shost->work_q)
326 destroy_workqueue(shost->work_q); 329 destroy_workqueue(shost->work_q);
327 330
331 destroy_rcu_head(&shost->rcu);
332
328 if (shost->shost_state == SHOST_CREATED) { 333 if (shost->shost_state == SHOST_CREATED) {
329 /* 334 /*
330 * Free the shost_dev device name here if scsi_host_alloc() 335 * Free the shost_dev device name here if scsi_host_alloc()
@@ -399,6 +404,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
399 INIT_LIST_HEAD(&shost->starved_list); 404 INIT_LIST_HEAD(&shost->starved_list);
400 init_waitqueue_head(&shost->host_wait); 405 init_waitqueue_head(&shost->host_wait);
401 mutex_init(&shost->scan_mutex); 406 mutex_init(&shost->scan_mutex);
407 init_rcu_head(&shost->rcu);
402 408
403 index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL); 409 index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
404 if (index < 0) 410 if (index < 0)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 287e5eb0723f..87b260e403ec 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3518,7 +3518,7 @@ out:
3518 3518
3519 if (rc != IO_OK) 3519 if (rc != IO_OK)
3520 hpsa_show_dev_msg(KERN_INFO, h, encl_dev, 3520 hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
3521 "Error, could not get enclosure information\n"); 3521 "Error, could not get enclosure information");
3522} 3522}
3523 3523
3524static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h, 3524static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
@@ -4619,21 +4619,13 @@ sglist_finished:
4619 return 0; 4619 return 0;
4620} 4620}
4621 4621
4622#define BUFLEN 128
4623static inline void warn_zero_length_transfer(struct ctlr_info *h, 4622static inline void warn_zero_length_transfer(struct ctlr_info *h,
4624 u8 *cdb, int cdb_len, 4623 u8 *cdb, int cdb_len,
4625 const char *func) 4624 const char *func)
4626{ 4625{
4627 char buf[BUFLEN]; 4626 dev_warn(&h->pdev->dev,
4628 int outlen; 4627 "%s: Blocking zero-length request: CDB:%*phN\n",
4629 int i; 4628 func, cdb_len, cdb);
4630
4631 outlen = scnprintf(buf, BUFLEN,
4632 "%s: Blocking zero-length request: CDB:", func);
4633 for (i = 0; i < cdb_len; i++)
4634 outlen += scnprintf(buf+outlen, BUFLEN - outlen,
4635 "%02hhx", cdb[i]);
4636 dev_warn(&h->pdev->dev, "%s\n", buf);
4637} 4629}
4638 4630
4639#define IO_ACCEL_INELIGIBLE 1 4631#define IO_ACCEL_INELIGIBLE 1
@@ -8223,8 +8215,6 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
8223 8215
8224 if (!device) 8216 if (!device)
8225 continue; 8217 continue;
8226 if (!device->scsi3addr)
8227 continue;
8228 if (!hpsa_vpd_page_supported(h, device->scsi3addr, 8218 if (!hpsa_vpd_page_supported(h, device->scsi3addr,
8229 HPSA_VPD_LV_IOACCEL_STATUS)) 8219 HPSA_VPD_LV_IOACCEL_STATUS))
8230 continue; 8220 continue;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 0d2f7eb3acb6..b1b1d3a3b173 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -181,7 +181,7 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt)
181 break; 181 break;
182 default: 182 default:
183 break; 183 break;
184 }; 184 }
185} 185}
186 186
187/** 187/**
@@ -220,7 +220,7 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
220 default: 220 default:
221 break; 221 break;
222 222
223 }; 223 }
224} 224}
225 225
226#else 226#else
@@ -464,7 +464,7 @@ static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
464 default: 464 default:
465 vhost->state = state; 465 vhost->state = state;
466 break; 466 break;
467 }; 467 }
468 468
469 return rc; 469 return rc;
470} 470}
@@ -500,7 +500,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
500 break; 500 break;
501 default: 501 default:
502 break; 502 break;
503 }; 503 }
504 break; 504 break;
505 case IBMVFC_HOST_ACTION_TGT_INIT: 505 case IBMVFC_HOST_ACTION_TGT_INIT:
506 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) 506 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
@@ -515,7 +515,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
515 default: 515 default:
516 vhost->action = action; 516 vhost->action = action;
517 break; 517 break;
518 }; 518 }
519 break; 519 break;
520 case IBMVFC_HOST_ACTION_LOGO: 520 case IBMVFC_HOST_ACTION_LOGO:
521 case IBMVFC_HOST_ACTION_QUERY_TGTS: 521 case IBMVFC_HOST_ACTION_QUERY_TGTS:
@@ -526,7 +526,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
526 default: 526 default:
527 vhost->action = action; 527 vhost->action = action;
528 break; 528 break;
529 }; 529 }
530} 530}
531 531
532/** 532/**
@@ -1601,7 +1601,7 @@ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1601 case IBMVFC_ACTIVE: 1601 case IBMVFC_ACTIVE:
1602 result = 0; 1602 result = 0;
1603 break; 1603 break;
1604 }; 1604 }
1605 1605
1606 return result; 1606 return result;
1607} 1607}
@@ -1856,7 +1856,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
1856 break; 1856 break;
1857 default: 1857 default:
1858 return -ENOTSUPP; 1858 return -ENOTSUPP;
1859 }; 1859 }
1860 1860
1861 if (port_id == -1) 1861 if (port_id == -1)
1862 return -EINVAL; 1862 return -EINVAL;
@@ -2661,7 +2661,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2661 vhost->delay_init = 1; 2661 vhost->delay_init = 1;
2662 __ibmvfc_reset_host(vhost); 2662 __ibmvfc_reset_host(vhost);
2663 break; 2663 break;
2664 }; 2664 }
2665 2665
2666 break; 2666 break;
2667 case IBMVFC_AE_LINK_UP: 2667 case IBMVFC_AE_LINK_UP:
@@ -2715,7 +2715,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2715 default: 2715 default:
2716 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event); 2716 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
2717 break; 2717 break;
2718 }; 2718 }
2719} 2719}
2720 2720
2721/** 2721/**
@@ -3351,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3351 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), 3351 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3352 rsp->status, rsp->error, status); 3352 rsp->status, rsp->error, status);
3353 break; 3353 break;
3354 }; 3354 }
3355 3355
3356 kref_put(&tgt->kref, ibmvfc_release_tgt); 3356 kref_put(&tgt->kref, ibmvfc_release_tgt);
3357 ibmvfc_free_event(evt); 3357 ibmvfc_free_event(evt);
@@ -3451,7 +3451,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
3451 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type, 3451 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
3452 ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status); 3452 ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
3453 break; 3453 break;
3454 }; 3454 }
3455 3455
3456 kref_put(&tgt->kref, ibmvfc_release_tgt); 3456 kref_put(&tgt->kref, ibmvfc_release_tgt);
3457 ibmvfc_free_event(evt); 3457 ibmvfc_free_event(evt);
@@ -3522,7 +3522,7 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
3522 default: 3522 default:
3523 tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status); 3523 tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
3524 break; 3524 break;
3525 }; 3525 }
3526 3526
3527 if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT) 3527 if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
3528 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi); 3528 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
@@ -3626,7 +3626,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3626 ibmvfc_get_fc_type(fc_reason), fc_reason, 3626 ibmvfc_get_fc_type(fc_reason), fc_reason,
3627 ibmvfc_get_ls_explain(fc_explain), fc_explain, status); 3627 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
3628 break; 3628 break;
3629 }; 3629 }
3630 3630
3631 kref_put(&tgt->kref, ibmvfc_release_tgt); 3631 kref_put(&tgt->kref, ibmvfc_release_tgt);
3632 ibmvfc_free_event(evt); 3632 ibmvfc_free_event(evt);
@@ -3838,7 +3838,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3838 rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), 3838 rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
3839 rsp->fc_explain, status); 3839 rsp->fc_explain, status);
3840 break; 3840 break;
3841 }; 3841 }
3842 3842
3843 kref_put(&tgt->kref, ibmvfc_release_tgt); 3843 kref_put(&tgt->kref, ibmvfc_release_tgt);
3844 ibmvfc_free_event(evt); 3844 ibmvfc_free_event(evt);
@@ -4236,7 +4236,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
4236 case IBMVFC_HOST_ACTION_REENABLE: 4236 case IBMVFC_HOST_ACTION_REENABLE:
4237 default: 4237 default:
4238 break; 4238 break;
4239 }; 4239 }
4240 4240
4241 return 1; 4241 return 1;
4242} 4242}
@@ -4464,7 +4464,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
4464 break; 4464 break;
4465 default: 4465 default:
4466 break; 4466 break;
4467 }; 4467 }
4468 4468
4469 spin_unlock_irqrestore(vhost->host->host_lock, flags); 4469 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4470} 4470}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 2799a6b08f73..c3a76af9f5fa 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -122,7 +122,7 @@ static bool connection_broken(struct scsi_info *vscsi)
122 cpu_to_be64(buffer[MSG_HI]), 122 cpu_to_be64(buffer[MSG_HI]),
123 cpu_to_be64(buffer[MSG_LOW])); 123 cpu_to_be64(buffer[MSG_LOW]));
124 124
125 pr_debug("connection_broken: rc %ld\n", h_return_code); 125 dev_dbg(&vscsi->dev, "Connection_broken: rc %ld\n", h_return_code);
126 126
127 if (h_return_code == H_CLOSED) 127 if (h_return_code == H_CLOSED)
128 rc = true; 128 rc = true;
@@ -210,7 +210,7 @@ static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
210 } 210 }
211 } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS); 211 } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
212 212
213 pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc); 213 dev_dbg(&vscsi->dev, "Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
214 214
215 return rc; 215 return rc;
216} 216}
@@ -291,9 +291,9 @@ static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
291 ibmvscsis_delete_client_info(vscsi, false); 291 ibmvscsis_delete_client_info(vscsi, false);
292 } 292 }
293 293
294 pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n", 294 dev_dbg(&vscsi->dev, "free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
295 vscsi->flags, vscsi->state, vscsi->phyp_acr_flags, 295 vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
296 vscsi->phyp_acr_state); 296 vscsi->phyp_acr_state);
297 } 297 }
298 return rc; 298 return rc;
299} 299}
@@ -428,8 +428,8 @@ static void ibmvscsis_disconnect(struct work_struct *work)
428 vscsi->flags |= DISCONNECT_SCHEDULED; 428 vscsi->flags |= DISCONNECT_SCHEDULED;
429 vscsi->flags &= ~SCHEDULE_DISCONNECT; 429 vscsi->flags &= ~SCHEDULE_DISCONNECT;
430 430
431 pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags, 431 dev_dbg(&vscsi->dev, "disconnect: flags 0x%x, state 0x%hx\n",
432 vscsi->state); 432 vscsi->flags, vscsi->state);
433 433
434 /* 434 /*
435 * check which state we are in and see if we 435 * check which state we are in and see if we
@@ -540,13 +540,14 @@ static void ibmvscsis_disconnect(struct work_struct *work)
540 } 540 }
541 541
542 if (wait_idle) { 542 if (wait_idle) {
543 pr_debug("disconnect start wait, active %d, sched %d\n", 543 dev_dbg(&vscsi->dev, "disconnect start wait, active %d, sched %d\n",
544 (int)list_empty(&vscsi->active_q), 544 (int)list_empty(&vscsi->active_q),
545 (int)list_empty(&vscsi->schedule_q)); 545 (int)list_empty(&vscsi->schedule_q));
546 if (!list_empty(&vscsi->active_q) || 546 if (!list_empty(&vscsi->active_q) ||
547 !list_empty(&vscsi->schedule_q)) { 547 !list_empty(&vscsi->schedule_q)) {
548 vscsi->flags |= WAIT_FOR_IDLE; 548 vscsi->flags |= WAIT_FOR_IDLE;
549 pr_debug("disconnect flags 0x%x\n", vscsi->flags); 549 dev_dbg(&vscsi->dev, "disconnect flags 0x%x\n",
550 vscsi->flags);
550 /* 551 /*
551 * This routine is can not be called with the interrupt 552 * This routine is can not be called with the interrupt
552 * lock held. 553 * lock held.
@@ -555,7 +556,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
555 wait_for_completion(&vscsi->wait_idle); 556 wait_for_completion(&vscsi->wait_idle);
556 spin_lock_bh(&vscsi->intr_lock); 557 spin_lock_bh(&vscsi->intr_lock);
557 } 558 }
558 pr_debug("disconnect stop wait\n"); 559 dev_dbg(&vscsi->dev, "disconnect stop wait\n");
559 560
560 ibmvscsis_adapter_idle(vscsi); 561 ibmvscsis_adapter_idle(vscsi);
561 } 562 }
@@ -597,8 +598,8 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
597 598
598 vscsi->flags |= flag_bits; 599 vscsi->flags |= flag_bits;
599 600
600 pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n", 601 dev_dbg(&vscsi->dev, "post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
601 new_state, flag_bits, vscsi->flags, vscsi->state); 602 new_state, flag_bits, vscsi->flags, vscsi->state);
602 603
603 if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) { 604 if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
604 vscsi->flags |= SCHEDULE_DISCONNECT; 605 vscsi->flags |= SCHEDULE_DISCONNECT;
@@ -648,8 +649,8 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
648 } 649 }
649 } 650 }
650 651
651 pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n", 652 dev_dbg(&vscsi->dev, "Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
652 vscsi->flags, vscsi->new_state); 653 vscsi->flags, vscsi->new_state);
653} 654}
654 655
655/** 656/**
@@ -724,7 +725,8 @@ static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
724 break; 725 break;
725 726
726 case H_CLOSED: 727 case H_CLOSED:
727 pr_warn("init_msg: failed to send, rc %ld\n", rc); 728 dev_warn(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
729 rc);
728 rc = 0; 730 rc = 0;
729 break; 731 break;
730 } 732 }
@@ -768,7 +770,7 @@ static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
768{ 770{
769 long rc = ADAPT_SUCCESS; 771 long rc = ADAPT_SUCCESS;
770 772
771 pr_debug("init_msg: state 0x%hx\n", vscsi->state); 773 dev_dbg(&vscsi->dev, "init_msg: state 0x%hx\n", vscsi->state);
772 774
773 rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, 775 rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
774 (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, 776 (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
@@ -776,10 +778,10 @@ static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
776 if (rc == H_SUCCESS) { 778 if (rc == H_SUCCESS) {
777 vscsi->client_data.partition_number = 779 vscsi->client_data.partition_number =
778 be64_to_cpu(*(u64 *)vscsi->map_buf); 780 be64_to_cpu(*(u64 *)vscsi->map_buf);
779 pr_debug("init_msg, part num %d\n", 781 dev_dbg(&vscsi->dev, "init_msg, part num %d\n",
780 vscsi->client_data.partition_number); 782 vscsi->client_data.partition_number);
781 } else { 783 } else {
782 pr_debug("init_msg h_vioctl rc %ld\n", rc); 784 dev_dbg(&vscsi->dev, "init_msg h_vioctl rc %ld\n", rc);
783 rc = ADAPT_SUCCESS; 785 rc = ADAPT_SUCCESS;
784 } 786 }
785 787
@@ -813,7 +815,8 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
813 if (rc == H_SUCCESS) 815 if (rc == H_SUCCESS)
814 vscsi->flags |= PREP_FOR_SUSPEND_ENABLED; 816 vscsi->flags |= PREP_FOR_SUSPEND_ENABLED;
815 else if (rc != H_NOT_FOUND) 817 else if (rc != H_NOT_FOUND)
816 pr_err("Error from Enable Prepare for Suspend: %ld\n", rc); 818 dev_err(&vscsi->dev, "Error from Enable Prepare for Suspend: %ld\n",
819 rc);
817 820
818 vscsi->flags &= PRESERVE_FLAG_FIELDS; 821 vscsi->flags &= PRESERVE_FLAG_FIELDS;
819 vscsi->rsp_q_timer.timer_pops = 0; 822 vscsi->rsp_q_timer.timer_pops = 0;
@@ -822,8 +825,8 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
822 825
823 rc = vio_enable_interrupts(vscsi->dma_dev); 826 rc = vio_enable_interrupts(vscsi->dma_dev);
824 if (rc) { 827 if (rc) {
825 pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n", 828 dev_warn(&vscsi->dev, "establish_new_q: failed to enable interrupts, rc %ld\n",
826 rc); 829 rc);
827 return rc; 830 return rc;
828 } 831 }
829 832
@@ -883,7 +886,7 @@ static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
883 int bytes; 886 int bytes;
884 long rc = ADAPT_SUCCESS; 887 long rc = ADAPT_SUCCESS;
885 888
886 pr_debug("reset_queue: flags 0x%x\n", vscsi->flags); 889 dev_dbg(&vscsi->dev, "reset_queue: flags 0x%x\n", vscsi->flags);
887 890
888 /* don't reset, the client did it for us */ 891 /* don't reset, the client did it for us */
889 if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) { 892 if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
@@ -906,7 +909,8 @@ static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
906 } 909 }
907 910
908 if (rc != ADAPT_SUCCESS) { 911 if (rc != ADAPT_SUCCESS) {
909 pr_debug("reset_queue: reg_crq rc %ld\n", rc); 912 dev_dbg(&vscsi->dev, "reset_queue: reg_crq rc %ld\n",
913 rc);
910 914
911 vscsi->state = ERR_DISCONNECTED; 915 vscsi->state = ERR_DISCONNECTED;
912 vscsi->flags |= RESPONSE_Q_DOWN; 916 vscsi->flags |= RESPONSE_Q_DOWN;
@@ -985,14 +989,15 @@ static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
985 /* See if there is a Resume event in the queue */ 989 /* See if there is a Resume event in the queue */
986 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; 990 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
987 991
988 pr_debug("ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n", 992 dev_dbg(&vscsi->dev, "ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
989 vscsi->flags, vscsi->state, (int)crq->valid); 993 vscsi->flags, vscsi->state, (int)crq->valid);
990 994
991 if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) { 995 if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) {
992 rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0, 996 rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0,
993 0, 0); 997 0, 0);
994 if (rc) { 998 if (rc) {
995 pr_err("Ready for Suspend Vioctl failed: %ld\n", rc); 999 dev_err(&vscsi->dev, "Ready for Suspend Vioctl failed: %ld\n",
1000 rc);
996 rc = 0; 1001 rc = 0;
997 } 1002 }
998 } else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) && 1003 } else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) &&
@@ -1012,7 +1017,7 @@ static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
1012 1017
1013 if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) || 1018 if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
1014 (crq->format != RESUME_FROM_SUSP))) 1019 (crq->format != RESUME_FROM_SUSP)))
1015 pr_err("Invalid element in CRQ after Prepare for Suspend"); 1020 dev_err(&vscsi->dev, "Invalid element in CRQ after Prepare for Suspend");
1016 } 1021 }
1017 1022
1018 vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED); 1023 vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED);
@@ -1036,8 +1041,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
1036{ 1041{
1037 long rc = ADAPT_SUCCESS; 1042 long rc = ADAPT_SUCCESS;
1038 1043
1039 pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n", 1044 dev_dbg(&vscsi->dev, "trans_event: format %d, flags 0x%x, state 0x%hx\n",
1040 (int)crq->format, vscsi->flags, vscsi->state); 1045 (int)crq->format, vscsi->flags, vscsi->state);
1041 1046
1042 switch (crq->format) { 1047 switch (crq->format) {
1043 case MIGRATED: 1048 case MIGRATED:
@@ -1073,14 +1078,14 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
1073 !list_empty(&vscsi->schedule_q) || 1078 !list_empty(&vscsi->schedule_q) ||
1074 !list_empty(&vscsi->waiting_rsp) || 1079 !list_empty(&vscsi->waiting_rsp) ||
1075 !list_empty(&vscsi->active_q)) { 1080 !list_empty(&vscsi->active_q)) {
1076 pr_debug("debit %d, sched %d, wait %d, active %d\n", 1081 dev_dbg(&vscsi->dev, "debit %d, sched %d, wait %d, active %d\n",
1077 vscsi->debit, 1082 vscsi->debit,
1078 (int)list_empty(&vscsi->schedule_q), 1083 (int)list_empty(&vscsi->schedule_q),
1079 (int)list_empty(&vscsi->waiting_rsp), 1084 (int)list_empty(&vscsi->waiting_rsp),
1080 (int)list_empty(&vscsi->active_q)); 1085 (int)list_empty(&vscsi->active_q));
1081 pr_warn("connection lost with outstanding work\n"); 1086 dev_warn(&vscsi->dev, "connection lost with outstanding work\n");
1082 } else { 1087 } else {
1083 pr_debug("trans_event: SRP Processing, but no outstanding work\n"); 1088 dev_dbg(&vscsi->dev, "trans_event: SRP Processing, but no outstanding work\n");
1084 } 1089 }
1085 1090
1086 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 1091 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
@@ -1097,8 +1102,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
1097 break; 1102 break;
1098 1103
1099 case PREPARE_FOR_SUSPEND: 1104 case PREPARE_FOR_SUSPEND:
1100 pr_debug("Prep for Suspend, crq status = 0x%x\n", 1105 dev_dbg(&vscsi->dev, "Prep for Suspend, crq status = 0x%x\n",
1101 (int)crq->status); 1106 (int)crq->status);
1102 switch (vscsi->state) { 1107 switch (vscsi->state) {
1103 case ERR_DISCONNECTED: 1108 case ERR_DISCONNECTED:
1104 case WAIT_CONNECTION: 1109 case WAIT_CONNECTION:
@@ -1119,15 +1124,15 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
1119 case ERR_DISCONNECT: 1124 case ERR_DISCONNECT:
1120 case ERR_DISCONNECT_RECONNECT: 1125 case ERR_DISCONNECT_RECONNECT:
1121 case WAIT_IDLE: 1126 case WAIT_IDLE:
1122 pr_err("Invalid state for Prepare for Suspend Trans Event: 0x%x\n", 1127 dev_err(&vscsi->dev, "Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
1123 vscsi->state); 1128 vscsi->state);
1124 break; 1129 break;
1125 } 1130 }
1126 break; 1131 break;
1127 1132
1128 case RESUME_FROM_SUSP: 1133 case RESUME_FROM_SUSP:
1129 pr_debug("Resume from Suspend, crq status = 0x%x\n", 1134 dev_dbg(&vscsi->dev, "Resume from Suspend, crq status = 0x%x\n",
1130 (int)crq->status); 1135 (int)crq->status);
1131 if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) { 1136 if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
1132 vscsi->flags |= PREP_FOR_SUSPEND_ABORTED; 1137 vscsi->flags |= PREP_FOR_SUSPEND_ABORTED;
1133 } else { 1138 } else {
@@ -1152,8 +1157,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
1152 1157
1153 rc = vscsi->flags & SCHEDULE_DISCONNECT; 1158 rc = vscsi->flags & SCHEDULE_DISCONNECT;
1154 1159
1155 pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n", 1160 dev_dbg(&vscsi->dev, "Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
1156 vscsi->flags, vscsi->state, rc); 1161 vscsi->flags, vscsi->state, rc);
1157 1162
1158 return rc; 1163 return rc;
1159} 1164}
@@ -1175,8 +1180,8 @@ static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
1175 bool ack = true; 1180 bool ack = true;
1176 volatile u8 valid; 1181 volatile u8 valid;
1177 1182
1178 pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n", 1183 dev_dbg(&vscsi->dev, "poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
1179 vscsi->flags, vscsi->state, vscsi->cmd_q.index); 1184 vscsi->flags, vscsi->state, vscsi->cmd_q.index);
1180 1185
1181 rc = vscsi->flags & SCHEDULE_DISCONNECT; 1186 rc = vscsi->flags & SCHEDULE_DISCONNECT;
1182 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; 1187 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
@@ -1204,7 +1209,7 @@ poll_work:
1204 * if a tranport event has occurred leave 1209 * if a tranport event has occurred leave
1205 * everything but transport events on the queue 1210 * everything but transport events on the queue
1206 */ 1211 */
1207 pr_debug("poll_cmd_q, ignoring\n"); 1212 dev_dbg(&vscsi->dev, "poll_cmd_q, ignoring\n");
1208 1213
1209 /* 1214 /*
1210 * need to decrement the queue index so we can 1215 * need to decrement the queue index so we can
@@ -1233,7 +1238,7 @@ poll_work:
1233 if (ack) { 1238 if (ack) {
1234 vio_enable_interrupts(vscsi->dma_dev); 1239 vio_enable_interrupts(vscsi->dma_dev);
1235 ack = false; 1240 ack = false;
1236 pr_debug("poll_cmd_q, reenabling interrupts\n"); 1241 dev_dbg(&vscsi->dev, "poll_cmd_q, reenabling interrupts\n");
1237 } 1242 }
1238 valid = crq->valid; 1243 valid = crq->valid;
1239 dma_rmb(); 1244 dma_rmb();
@@ -1241,7 +1246,7 @@ poll_work:
1241 goto poll_work; 1246 goto poll_work;
1242 } 1247 }
1243 1248
1244 pr_debug("Leaving poll_cmd_q: rc %ld\n", rc); 1249 dev_dbg(&vscsi->dev, "Leaving poll_cmd_q: rc %ld\n", rc);
1245} 1250}
1246 1251
1247/** 1252/**
@@ -1258,9 +1263,9 @@ static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
1258{ 1263{
1259 struct ibmvscsis_cmd *cmd, *nxt; 1264 struct ibmvscsis_cmd *cmd, *nxt;
1260 1265
1261 pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n", 1266 dev_dbg(&vscsi->dev, "free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
1262 (int)list_empty(&vscsi->waiting_rsp), 1267 (int)list_empty(&vscsi->waiting_rsp),
1263 vscsi->rsp_q_timer.started); 1268 vscsi->rsp_q_timer.started);
1264 1269
1265 list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) { 1270 list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
1266 list_del(&cmd->list); 1271 list_del(&cmd->list);
@@ -1317,8 +1322,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
1317 int free_qs = false; 1322 int free_qs = false;
1318 long rc = 0; 1323 long rc = 0;
1319 1324
1320 pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags, 1325 dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx\n",
1321 vscsi->state); 1326 vscsi->flags, vscsi->state);
1322 1327
1323 /* Only need to free qs if we're disconnecting from client */ 1328 /* Only need to free qs if we're disconnecting from client */
1324 if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT) 1329 if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
@@ -1336,7 +1341,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
1336 break; 1341 break;
1337 case ERR_DISCONNECT_RECONNECT: 1342 case ERR_DISCONNECT_RECONNECT:
1338 ibmvscsis_reset_queue(vscsi); 1343 ibmvscsis_reset_queue(vscsi);
1339 pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags); 1344 dev_dbg(&vscsi->dev, "adapter_idle, disc_rec: flags 0x%x\n",
1345 vscsi->flags);
1340 break; 1346 break;
1341 1347
1342 case ERR_DISCONNECT: 1348 case ERR_DISCONNECT:
@@ -1347,8 +1353,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
1347 vscsi->state = ERR_DISCONNECTED; 1353 vscsi->state = ERR_DISCONNECTED;
1348 else 1354 else
1349 vscsi->state = WAIT_ENABLED; 1355 vscsi->state = WAIT_ENABLED;
1350 pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n", 1356 dev_dbg(&vscsi->dev, "adapter_idle, disc: flags 0x%x, state 0x%hx\n",
1351 vscsi->flags, vscsi->state); 1357 vscsi->flags, vscsi->state);
1352 break; 1358 break;
1353 1359
1354 case WAIT_IDLE: 1360 case WAIT_IDLE:
@@ -1370,15 +1376,15 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
1370 vscsi->flags &= ~DISCONNECT_SCHEDULED; 1376 vscsi->flags &= ~DISCONNECT_SCHEDULED;
1371 } 1377 }
1372 1378
1373 pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n", 1379 dev_dbg(&vscsi->dev, "adapter_idle, wait: flags 0x%x, state 0x%hx\n",
1374 vscsi->flags, vscsi->state); 1380 vscsi->flags, vscsi->state);
1375 ibmvscsis_poll_cmd_q(vscsi); 1381 ibmvscsis_poll_cmd_q(vscsi);
1376 break; 1382 break;
1377 1383
1378 case ERR_DISCONNECTED: 1384 case ERR_DISCONNECTED:
1379 vscsi->flags &= ~DISCONNECT_SCHEDULED; 1385 vscsi->flags &= ~DISCONNECT_SCHEDULED;
1380 pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n", 1386 dev_dbg(&vscsi->dev, "adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
1381 vscsi->flags, vscsi->state); 1387 vscsi->flags, vscsi->state);
1382 break; 1388 break;
1383 1389
1384 default: 1390 default:
@@ -1419,13 +1425,13 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
1419 vscsi->phyp_acr_state = 0; 1425 vscsi->phyp_acr_state = 0;
1420 vscsi->phyp_acr_flags = 0; 1426 vscsi->phyp_acr_flags = 0;
1421 1427
1422 pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n", 1428 dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
1423 vscsi->flags, vscsi->state, vscsi->phyp_acr_flags, 1429 vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
1424 vscsi->phyp_acr_state); 1430 vscsi->phyp_acr_state);
1425 } 1431 }
1426 1432
1427 pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n", 1433 dev_dbg(&vscsi->dev, "Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
1428 vscsi->flags, vscsi->state, vscsi->new_state); 1434 vscsi->flags, vscsi->state, vscsi->new_state);
1429} 1435}
1430 1436
1431/** 1437/**
@@ -1464,8 +1470,8 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
1464 cmd->init_time = mftb(); 1470 cmd->init_time = mftb();
1465 iue->remote_token = crq->IU_data_ptr; 1471 iue->remote_token = crq->IU_data_ptr;
1466 iue->iu_len = len; 1472 iue->iu_len = len;
1467 pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n", 1473 dev_dbg(&vscsi->dev, "copy_crq: ioba 0x%llx, init_time 0x%llx\n",
1468 be64_to_cpu(crq->IU_data_ptr), cmd->init_time); 1474 be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
1469 break; 1475 break;
1470 case H_PERMISSION: 1476 case H_PERMISSION:
1471 if (connection_broken(vscsi)) 1477 if (connection_broken(vscsi))
@@ -1536,10 +1542,10 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1536 if (connection_broken(vscsi)) 1542 if (connection_broken(vscsi))
1537 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); 1543 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1538 } 1544 }
1539 pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n", 1545 dev_warn(&vscsi->dev, "adapter_info: h_copy_rdma from client failed, rc %ld\n",
1540 rc); 1546 rc);
1541 pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n", 1547 dev_dbg(&vscsi->dev, "adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
1542 be64_to_cpu(mad->buffer), vscsi->flags, flag_bits); 1548 be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
1543 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 1549 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1544 flag_bits); 1550 flag_bits);
1545 goto free_dma; 1551 goto free_dma;
@@ -1595,7 +1601,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1595 1601
1596free_dma: 1602free_dma:
1597 dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token); 1603 dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
1598 pr_debug("Leaving adapter_info, rc %ld\n", rc); 1604 dev_dbg(&vscsi->dev, "Leaving adapter_info, rc %ld\n", rc);
1599 1605
1600 return rc; 1606 return rc;
1601} 1607}
@@ -1629,7 +1635,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1629 */ 1635 */
1630 min_len = offsetof(struct capabilities, migration); 1636 min_len = offsetof(struct capabilities, migration);
1631 if ((olen < min_len) || (olen > PAGE_SIZE)) { 1637 if ((olen < min_len) || (olen > PAGE_SIZE)) {
1632 pr_warn("cap_mad: invalid len %d\n", olen); 1638 dev_warn(&vscsi->dev, "cap_mad: invalid len %d\n", olen);
1633 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); 1639 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1634 return 0; 1640 return 0;
1635 } 1641 }
@@ -1654,9 +1660,9 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1654 common = (struct mad_capability_common *)&cap->migration; 1660 common = (struct mad_capability_common *)&cap->migration;
1655 1661
1656 while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) { 1662 while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
1657 pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n", 1663 dev_dbg(&vscsi->dev, "cap_mad: len left %hd, cap type %d, cap len %hd\n",
1658 len, be32_to_cpu(common->cap_type), 1664 len, be32_to_cpu(common->cap_type),
1659 be16_to_cpu(common->length)); 1665 be16_to_cpu(common->length));
1660 1666
1661 cap_len = be16_to_cpu(common->length); 1667 cap_len = be16_to_cpu(common->length);
1662 if (cap_len > len) { 1668 if (cap_len > len) {
@@ -1673,7 +1679,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1673 1679
1674 switch (common->cap_type) { 1680 switch (common->cap_type) {
1675 default: 1681 default:
1676 pr_debug("cap_mad: unsupported capability\n"); 1682 dev_dbg(&vscsi->dev, "cap_mad: unsupported capability\n");
1677 common->server_support = 0; 1683 common->server_support = 0;
1678 flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED); 1684 flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
1679 cap->flags &= ~flag; 1685 cap->flags &= ~flag;
@@ -1693,8 +1699,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1693 be64_to_cpu(mad->buffer)); 1699 be64_to_cpu(mad->buffer));
1694 1700
1695 if (rc != H_SUCCESS) { 1701 if (rc != H_SUCCESS) {
1696 pr_debug("cap_mad: failed to copy to client, rc %ld\n", 1702 dev_dbg(&vscsi->dev, "cap_mad: failed to copy to client, rc %ld\n",
1697 rc); 1703 rc);
1698 1704
1699 if (rc == H_PERMISSION) { 1705 if (rc == H_PERMISSION) {
1700 if (connection_broken(vscsi)) 1706 if (connection_broken(vscsi))
@@ -1702,8 +1708,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1702 CLIENT_FAILED); 1708 CLIENT_FAILED);
1703 } 1709 }
1704 1710
1705 pr_warn("cap_mad: error copying data to client, rc %ld\n", 1711 dev_warn(&vscsi->dev, "cap_mad: error copying data to client, rc %ld\n",
1706 rc); 1712 rc);
1707 ibmvscsis_post_disconnect(vscsi, 1713 ibmvscsis_post_disconnect(vscsi,
1708 ERR_DISCONNECT_RECONNECT, 1714 ERR_DISCONNECT_RECONNECT,
1709 flag_bits); 1715 flag_bits);
@@ -1712,8 +1718,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1712 1718
1713 dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token); 1719 dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
1714 1720
1715 pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n", 1721 dev_dbg(&vscsi->dev, "Leaving cap_mad, rc %ld, client_cap 0x%x\n",
1716 rc, vscsi->client_cap); 1722 rc, vscsi->client_cap);
1717 1723
1718 return rc; 1724 return rc;
1719} 1725}
@@ -1749,7 +1755,7 @@ static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1749 vscsi->fast_fail = true; 1755 vscsi->fast_fail = true;
1750 mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS); 1756 mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1751 } else { 1757 } else {
1752 pr_warn("fast fail mad sent after login\n"); 1758 dev_warn(&vscsi->dev, "fast fail mad sent after login\n");
1753 mad->status = cpu_to_be16(VIOSRP_MAD_FAILED); 1759 mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
1754 } 1760 }
1755 break; 1761 break;
@@ -1809,9 +1815,9 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
1809 */ 1815 */
1810 if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) || 1816 if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
1811 (vscsi->state == SRP_PROCESSING)) { 1817 (vscsi->state == SRP_PROCESSING)) {
1812 pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n", 1818 dev_dbg(&vscsi->dev, "snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
1813 vscsi->flags, (int)vscsi->rsp_q_timer.started, 1819 vscsi->flags, (int)vscsi->rsp_q_timer.started,
1814 vscsi->rsp_q_timer.timer_pops); 1820 vscsi->rsp_q_timer.timer_pops);
1815 1821
1816 /* 1822 /*
1817 * Check if the timer is running; if it 1823 * Check if the timer is running; if it
@@ -1947,8 +1953,9 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
1947 be64_to_cpu(msg_hi), 1953 be64_to_cpu(msg_hi),
1948 be64_to_cpu(cmd->rsp.tag)); 1954 be64_to_cpu(cmd->rsp.tag));
1949 1955
1950 pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n", 1956 dev_dbg(&vscsi->dev, "send_messages: cmd %p, tag 0x%llx, rc %ld\n",
1951 cmd, be64_to_cpu(cmd->rsp.tag), rc); 1957 cmd, be64_to_cpu(cmd->rsp.tag),
1958 rc);
1952 1959
1953 /* if all ok free up the command 1960 /* if all ok free up the command
1954 * element resources 1961 * element resources
@@ -2003,7 +2010,8 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
2003 list_add_tail(&cmd->list, &vscsi->waiting_rsp); 2010 list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2004 ibmvscsis_send_messages(vscsi); 2011 ibmvscsis_send_messages(vscsi);
2005 } else { 2012 } else {
2006 pr_debug("Error sending mad response, rc %ld\n", rc); 2013 dev_dbg(&vscsi->dev, "Error sending mad response, rc %ld\n",
2014 rc);
2007 if (rc == H_PERMISSION) { 2015 if (rc == H_PERMISSION) {
2008 if (connection_broken(vscsi)) 2016 if (connection_broken(vscsi))
2009 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); 2017 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
@@ -2039,8 +2047,8 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
2039 * expecting a response. 2047 * expecting a response.
2040 */ 2048 */
2041 case WAIT_CONNECTION: 2049 case WAIT_CONNECTION:
2042 pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n", 2050 dev_dbg(&vscsi->dev, "mad: in Wait Connection state, ignoring MAD, flags %d\n",
2043 vscsi->flags); 2051 vscsi->flags);
2044 return ADAPT_SUCCESS; 2052 return ADAPT_SUCCESS;
2045 2053
2046 case SRP_PROCESSING: 2054 case SRP_PROCESSING:
@@ -2075,12 +2083,12 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
2075 if (!rc) { 2083 if (!rc) {
2076 mad = (struct mad_common *)&vio_iu(iue)->mad; 2084 mad = (struct mad_common *)&vio_iu(iue)->mad;
2077 2085
2078 pr_debug("mad: type %d\n", be32_to_cpu(mad->type)); 2086 dev_dbg(&vscsi->dev, "mad: type %d\n", be32_to_cpu(mad->type));
2079 2087
2080 rc = ibmvscsis_process_mad(vscsi, iue); 2088 rc = ibmvscsis_process_mad(vscsi, iue);
2081 2089
2082 pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status), 2090 dev_dbg(&vscsi->dev, "mad: status %hd, rc %ld\n",
2083 rc); 2091 be16_to_cpu(mad->status), rc);
2084 2092
2085 if (!rc) 2093 if (!rc)
2086 ibmvscsis_send_mad_resp(vscsi, cmd, crq); 2094 ibmvscsis_send_mad_resp(vscsi, cmd, crq);
@@ -2088,7 +2096,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
2088 ibmvscsis_free_cmd_resources(vscsi, cmd); 2096 ibmvscsis_free_cmd_resources(vscsi, cmd);
2089 } 2097 }
2090 2098
2091 pr_debug("Leaving mad, rc %ld\n", rc); 2099 dev_dbg(&vscsi->dev, "Leaving mad, rc %ld\n", rc);
2092 return rc; 2100 return rc;
2093} 2101}
2094 2102
@@ -2211,16 +2219,17 @@ static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
2211{ 2219{
2212 char *name = tport->tport_name; 2220 char *name = tport->tport_name;
2213 struct ibmvscsis_nexus *nexus; 2221 struct ibmvscsis_nexus *nexus;
2222 struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
2214 int rc; 2223 int rc;
2215 2224
2216 if (tport->ibmv_nexus) { 2225 if (tport->ibmv_nexus) {
2217 pr_debug("tport->ibmv_nexus already exists\n"); 2226 dev_dbg(&vscsi->dev, "tport->ibmv_nexus already exists\n");
2218 return 0; 2227 return 0;
2219 } 2228 }
2220 2229
2221 nexus = kzalloc(sizeof(*nexus), GFP_KERNEL); 2230 nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
2222 if (!nexus) { 2231 if (!nexus) {
2223 pr_err("Unable to allocate struct ibmvscsis_nexus\n"); 2232 dev_err(&vscsi->dev, "Unable to allocate struct ibmvscsis_nexus\n");
2224 return -ENOMEM; 2233 return -ENOMEM;
2225 } 2234 }
2226 2235
@@ -2316,7 +2325,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
2316 cmd->rsp.format = VIOSRP_SRP_FORMAT; 2325 cmd->rsp.format = VIOSRP_SRP_FORMAT;
2317 cmd->rsp.tag = req->tag; 2326 cmd->rsp.tag = req->tag;
2318 2327
2319 pr_debug("srp_login: reason 0x%x\n", reason); 2328 dev_dbg(&vscsi->dev, "srp_login: reason 0x%x\n", reason);
2320 2329
2321 if (reason) 2330 if (reason)
2322 rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason); 2331 rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
@@ -2333,7 +2342,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
2333 ibmvscsis_free_cmd_resources(vscsi, cmd); 2342 ibmvscsis_free_cmd_resources(vscsi, cmd);
2334 } 2343 }
2335 2344
2336 pr_debug("Leaving srp_login, rc %ld\n", rc); 2345 dev_dbg(&vscsi->dev, "Leaving srp_login, rc %ld\n", rc);
2337 return rc; 2346 return rc;
2338} 2347}
2339 2348
@@ -2415,8 +2424,8 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
2415 2424
2416 case SRP_TSK_MGMT: 2425 case SRP_TSK_MGMT:
2417 tsk = &vio_iu(iue)->srp.tsk_mgmt; 2426 tsk = &vio_iu(iue)->srp.tsk_mgmt;
2418 pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag, 2427 dev_dbg(&vscsi->dev, "tsk_mgmt tag: %llu (0x%llx)\n",
2419 tsk->tag); 2428 tsk->tag, tsk->tag);
2420 cmd->rsp.tag = tsk->tag; 2429 cmd->rsp.tag = tsk->tag;
2421 vscsi->debit += 1; 2430 vscsi->debit += 1;
2422 cmd->type = TASK_MANAGEMENT; 2431 cmd->type = TASK_MANAGEMENT;
@@ -2425,8 +2434,8 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
2425 break; 2434 break;
2426 2435
2427 case SRP_CMD: 2436 case SRP_CMD:
2428 pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag, 2437 dev_dbg(&vscsi->dev, "srp_cmd tag: %llu (0x%llx)\n",
2429 srp->tag); 2438 srp->tag, srp->tag);
2430 cmd->rsp.tag = srp->tag; 2439 cmd->rsp.tag = srp->tag;
2431 vscsi->debit += 1; 2440 vscsi->debit += 1;
2432 cmd->type = SCSI_CDB; 2441 cmd->type = SCSI_CDB;
@@ -2603,7 +2612,7 @@ static int read_dma_window(struct scsi_info *vscsi)
2603 "ibm,my-dma-window", 2612 "ibm,my-dma-window",
2604 NULL); 2613 NULL);
2605 if (!dma_window) { 2614 if (!dma_window) {
2606 pr_err("Couldn't find ibm,my-dma-window property\n"); 2615 dev_err(&vscsi->dev, "Couldn't find ibm,my-dma-window property\n");
2607 return -1; 2616 return -1;
2608 } 2617 }
2609 2618
@@ -2613,7 +2622,7 @@ static int read_dma_window(struct scsi_info *vscsi)
2613 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells", 2622 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
2614 NULL); 2623 NULL);
2615 if (!prop) { 2624 if (!prop) {
2616 pr_warn("Couldn't find ibm,#dma-address-cells property\n"); 2625 dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-address-cells property\n");
2617 dma_window++; 2626 dma_window++;
2618 } else { 2627 } else {
2619 dma_window += be32_to_cpu(*prop); 2628 dma_window += be32_to_cpu(*prop);
@@ -2622,7 +2631,7 @@ static int read_dma_window(struct scsi_info *vscsi)
2622 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells", 2631 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
2623 NULL); 2632 NULL);
2624 if (!prop) { 2633 if (!prop) {
2625 pr_warn("Couldn't find ibm,#dma-size-cells property\n"); 2634 dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-size-cells property\n");
2626 dma_window++; 2635 dma_window++;
2627 } else { 2636 } else {
2628 dma_window += be32_to_cpu(*prop); 2637 dma_window += be32_to_cpu(*prop);
@@ -2808,8 +2817,8 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi,
2808 2817
2809 srp_tsk->lun.scsi_lun[0] &= 0x3f; 2818 srp_tsk->lun.scsi_lun[0] &= 0x3f;
2810 2819
2811 pr_debug("calling submit_tmr, func %d\n", 2820 dev_dbg(&vscsi->dev, "calling submit_tmr, func %d\n",
2812 srp_tsk->tsk_mgmt_func); 2821 srp_tsk->tsk_mgmt_func);
2813 rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL, 2822 rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
2814 scsilun_to_int(&srp_tsk->lun), srp_tsk, 2823 scsilun_to_int(&srp_tsk->lun), srp_tsk,
2815 tcm_type, GFP_KERNEL, tag_to_abort, 0); 2824 tcm_type, GFP_KERNEL, tag_to_abort, 0);
@@ -3113,8 +3122,8 @@ static long srp_build_response(struct scsi_info *vscsi,
3113 if (cmd->type == SCSI_CDB) { 3122 if (cmd->type == SCSI_CDB) {
3114 rsp->status = ibmvscsis_fast_fail(vscsi, cmd); 3123 rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
3115 if (rsp->status) { 3124 if (rsp->status) {
3116 pr_debug("build_resp: cmd %p, scsi status %d\n", cmd, 3125 dev_dbg(&vscsi->dev, "build_resp: cmd %p, scsi status %d\n",
3117 (int)rsp->status); 3126 cmd, (int)rsp->status);
3118 ibmvscsis_determine_resid(se_cmd, rsp); 3127 ibmvscsis_determine_resid(se_cmd, rsp);
3119 if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) { 3128 if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
3120 rsp->sense_data_len = 3129 rsp->sense_data_len =
@@ -3127,7 +3136,8 @@ static long srp_build_response(struct scsi_info *vscsi,
3127 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> 3136 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3128 UCSOLNT_RESP_SHIFT; 3137 UCSOLNT_RESP_SHIFT;
3129 } else if (cmd->flags & CMD_FAST_FAIL) { 3138 } else if (cmd->flags & CMD_FAST_FAIL) {
3130 pr_debug("build_resp: cmd %p, fast fail\n", cmd); 3139 dev_dbg(&vscsi->dev, "build_resp: cmd %p, fast fail\n",
3140 cmd);
3131 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> 3141 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3132 UCSOLNT_RESP_SHIFT; 3142 UCSOLNT_RESP_SHIFT;
3133 } else { 3143 } else {
@@ -3340,7 +3350,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
3340 3350
3341 spin_lock_bh(&vscsi->intr_lock); 3351 spin_lock_bh(&vscsi->intr_lock);
3342 3352
3343 pr_debug("got interrupt\n"); 3353 dev_dbg(&vscsi->dev, "got interrupt\n");
3344 3354
3345 /* 3355 /*
3346 * if we are in a path where we are waiting for all pending commands 3356 * if we are in a path where we are waiting for all pending commands
@@ -3350,8 +3360,8 @@ static void ibmvscsis_handle_crq(unsigned long data)
3350 if (TARGET_STOP(vscsi)) { 3360 if (TARGET_STOP(vscsi)) {
3351 vio_enable_interrupts(vscsi->dma_dev); 3361 vio_enable_interrupts(vscsi->dma_dev);
3352 3362
3353 pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n", 3363 dev_dbg(&vscsi->dev, "handle_crq, don't process: flags 0x%x, state 0x%hx\n",
3354 vscsi->flags, vscsi->state); 3364 vscsi->flags, vscsi->state);
3355 spin_unlock_bh(&vscsi->intr_lock); 3365 spin_unlock_bh(&vscsi->intr_lock);
3356 return; 3366 return;
3357 } 3367 }
@@ -3414,20 +3424,20 @@ cmd_work:
3414 if (ack) { 3424 if (ack) {
3415 vio_enable_interrupts(vscsi->dma_dev); 3425 vio_enable_interrupts(vscsi->dma_dev);
3416 ack = false; 3426 ack = false;
3417 pr_debug("handle_crq, reenabling interrupts\n"); 3427 dev_dbg(&vscsi->dev, "handle_crq, reenabling interrupts\n");
3418 } 3428 }
3419 valid = crq->valid; 3429 valid = crq->valid;
3420 dma_rmb(); 3430 dma_rmb();
3421 if (valid) 3431 if (valid)
3422 goto cmd_work; 3432 goto cmd_work;
3423 } else { 3433 } else {
3424 pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n", 3434 dev_dbg(&vscsi->dev, "handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
3425 vscsi->flags, vscsi->state, vscsi->cmd_q.index); 3435 vscsi->flags, vscsi->state, vscsi->cmd_q.index);
3426 } 3436 }
3427 3437
3428 pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n", 3438 dev_dbg(&vscsi->dev, "Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
3429 (int)list_empty(&vscsi->schedule_q), vscsi->flags, 3439 (int)list_empty(&vscsi->schedule_q), vscsi->flags,
3430 vscsi->state); 3440 vscsi->state);
3431 3441
3432 spin_unlock_bh(&vscsi->intr_lock); 3442 spin_unlock_bh(&vscsi->intr_lock);
3433} 3443}
@@ -3443,7 +3453,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
3443 vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL); 3453 vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
3444 if (!vscsi) { 3454 if (!vscsi) {
3445 rc = -ENOMEM; 3455 rc = -ENOMEM;
3446 pr_err("probe: allocation of adapter failed\n"); 3456 dev_err(&vdev->dev, "probe: allocation of adapter failed\n");
3447 return rc; 3457 return rc;
3448 } 3458 }
3449 3459
@@ -3456,14 +3466,14 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
3456 snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s", 3466 snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
3457 dev_name(&vdev->dev)); 3467 dev_name(&vdev->dev));
3458 3468
3459 pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name); 3469 dev_dbg(&vscsi->dev, "probe tport_name: %s\n", vscsi->tport.tport_name);
3460 3470
3461 rc = read_dma_window(vscsi); 3471 rc = read_dma_window(vscsi);
3462 if (rc) 3472 if (rc)
3463 goto free_adapter; 3473 goto free_adapter;
3464 pr_debug("Probe: liobn 0x%x, riobn 0x%x\n", 3474 dev_dbg(&vscsi->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
3465 vscsi->dds.window[LOCAL].liobn, 3475 vscsi->dds.window[LOCAL].liobn,
3466 vscsi->dds.window[REMOTE].liobn); 3476 vscsi->dds.window[REMOTE].liobn);
3467 3477
3468 strcpy(vscsi->eye, "VSCSI "); 3478 strcpy(vscsi->eye, "VSCSI ");
3469 strncat(vscsi->eye, vdev->name, MAX_EYE); 3479 strncat(vscsi->eye, vdev->name, MAX_EYE);
@@ -3541,8 +3551,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
3541 * client can connect" and the client isn't activated yet. 3551 * client can connect" and the client isn't activated yet.
3542 * We'll make the call again when he sends an init msg. 3552 * We'll make the call again when he sends an init msg.
3543 */ 3553 */
3544 pr_debug("probe hrc %ld, client partition num %d\n", 3554 dev_dbg(&vscsi->dev, "probe hrc %ld, client partition num %d\n",
3545 hrc, vscsi->client_data.partition_number); 3555 hrc, vscsi->client_data.partition_number);
3546 3556
3547 tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq, 3557 tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
3548 (unsigned long)vscsi); 3558 (unsigned long)vscsi);
@@ -3602,7 +3612,7 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
3602{ 3612{
3603 struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev); 3613 struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
3604 3614
3605 pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev)); 3615 dev_dbg(&vscsi->dev, "remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
3606 3616
3607 spin_lock_bh(&vscsi->intr_lock); 3617 spin_lock_bh(&vscsi->intr_lock);
3608 ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0); 3618 ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
@@ -3766,14 +3776,16 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
3766 * attempt an srp_transfer_data. 3776 * attempt an srp_transfer_data.
3767 */ 3777 */
3768 if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) { 3778 if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
3769 pr_err("write_pending failed since: %d\n", vscsi->flags); 3779 dev_err(&vscsi->dev, "write_pending failed since: %d\n",
3780 vscsi->flags);
3770 return -EIO; 3781 return -EIO;
3782
3771 } 3783 }
3772 3784
3773 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 3785 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
3774 1, 1); 3786 1, 1);
3775 if (rc) { 3787 if (rc) {
3776 pr_err("srp_transfer_data() failed: %d\n", rc); 3788 dev_err(&vscsi->dev, "srp_transfer_data() failed: %d\n", rc);
3777 return -EIO; 3789 return -EIO;
3778 } 3790 }
3779 /* 3791 /*
@@ -3811,7 +3823,7 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
3811 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1, 3823 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
3812 1); 3824 1);
3813 if (rc) { 3825 if (rc) {
3814 pr_err("srp_transfer_data failed: %d\n", rc); 3826 dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc);
3815 sd = se_cmd->sense_buffer; 3827 sd = se_cmd->sense_buffer;
3816 se_cmd->scsi_sense_length = 18; 3828 se_cmd->scsi_sense_length = 18;
3817 memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length); 3829 memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
@@ -3834,7 +3846,7 @@ static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
3834 struct scsi_info *vscsi = cmd->adapter; 3846 struct scsi_info *vscsi = cmd->adapter;
3835 uint len; 3847 uint len;
3836 3848
3837 pr_debug("queue_status %p\n", se_cmd); 3849 dev_dbg(&vscsi->dev, "queue_status %p\n", se_cmd);
3838 3850
3839 srp_build_response(vscsi, cmd, &len); 3851 srp_build_response(vscsi, cmd, &len);
3840 cmd->rsp.format = SRP_FORMAT; 3852 cmd->rsp.format = SRP_FORMAT;
@@ -3854,8 +3866,8 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
3854 u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag); 3866 u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
3855 uint len; 3867 uint len;
3856 3868
3857 pr_debug("queue_tm_rsp %p, status %d\n", 3869 dev_dbg(&vscsi->dev, "queue_tm_rsp %p, status %d\n",
3858 se_cmd, (int)se_cmd->se_tmr_req->response); 3870 se_cmd, (int)se_cmd->se_tmr_req->response);
3859 3871
3860 if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK && 3872 if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
3861 cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) { 3873 cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
@@ -3877,8 +3889,12 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
3877 3889
3878static void ibmvscsis_aborted_task(struct se_cmd *se_cmd) 3890static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
3879{ 3891{
3880 pr_debug("ibmvscsis_aborted_task %p task_tag: %llu\n", 3892 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3881 se_cmd, se_cmd->tag); 3893 se_cmd);
3894 struct scsi_info *vscsi = cmd->adapter;
3895
3896 dev_dbg(&vscsi->dev, "ibmvscsis_aborted_task %p task_tag: %llu\n",
3897 se_cmd, se_cmd->tag);
3882} 3898}
3883 3899
3884static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf, 3900static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
@@ -3886,12 +3902,14 @@ static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
3886 const char *name) 3902 const char *name)
3887{ 3903{
3888 struct ibmvscsis_tport *tport; 3904 struct ibmvscsis_tport *tport;
3905 struct scsi_info *vscsi;
3889 3906
3890 tport = ibmvscsis_lookup_port(name); 3907 tport = ibmvscsis_lookup_port(name);
3891 if (tport) { 3908 if (tport) {
3909 vscsi = container_of(tport, struct scsi_info, tport);
3892 tport->tport_proto_id = SCSI_PROTOCOL_SRP; 3910 tport->tport_proto_id = SCSI_PROTOCOL_SRP;
3893 pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n", 3911 dev_dbg(&vscsi->dev, "make_tport(%s), pointer:%p, tport_id:%x\n",
3894 name, tport, tport->tport_proto_id); 3912 name, tport, tport->tport_proto_id);
3895 return &tport->tport_wwn; 3913 return &tport->tport_wwn;
3896 } 3914 }
3897 3915
@@ -3903,9 +3921,10 @@ static void ibmvscsis_drop_tport(struct se_wwn *wwn)
3903 struct ibmvscsis_tport *tport = container_of(wwn, 3921 struct ibmvscsis_tport *tport = container_of(wwn,
3904 struct ibmvscsis_tport, 3922 struct ibmvscsis_tport,
3905 tport_wwn); 3923 tport_wwn);
3924 struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
3906 3925
3907 pr_debug("drop_tport(%s)\n", 3926 dev_dbg(&vscsi->dev, "drop_tport(%s)\n",
3908 config_item_name(&tport->tport_wwn.wwn_group.cg_item)); 3927 config_item_name(&tport->tport_wwn.wwn_group.cg_item));
3909} 3928}
3910 3929
3911static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn, 3930static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
@@ -3990,12 +4009,12 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
3990 4009
3991 rc = kstrtoul(page, 0, &tmp); 4010 rc = kstrtoul(page, 0, &tmp);
3992 if (rc < 0) { 4011 if (rc < 0) {
3993 pr_err("Unable to extract srpt_tpg_store_enable\n"); 4012 dev_err(&vscsi->dev, "Unable to extract srpt_tpg_store_enable\n");
3994 return -EINVAL; 4013 return -EINVAL;
3995 } 4014 }
3996 4015
3997 if ((tmp != 0) && (tmp != 1)) { 4016 if ((tmp != 0) && (tmp != 1)) {
3998 pr_err("Illegal value for srpt_tpg_store_enable\n"); 4017 dev_err(&vscsi->dev, "Illegal value for srpt_tpg_store_enable\n");
3999 return -EINVAL; 4018 return -EINVAL;
4000 } 4019 }
4001 4020
@@ -4004,8 +4023,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
4004 tport->enabled = true; 4023 tport->enabled = true;
4005 lrc = ibmvscsis_enable_change_state(vscsi); 4024 lrc = ibmvscsis_enable_change_state(vscsi);
4006 if (lrc) 4025 if (lrc)
4007 pr_err("enable_change_state failed, rc %ld state %d\n", 4026 dev_err(&vscsi->dev, "enable_change_state failed, rc %ld state %d\n",
4008 lrc, vscsi->state); 4027 lrc, vscsi->state);
4009 spin_unlock_bh(&vscsi->intr_lock); 4028 spin_unlock_bh(&vscsi->intr_lock);
4010 } else { 4029 } else {
4011 spin_lock_bh(&vscsi->intr_lock); 4030 spin_lock_bh(&vscsi->intr_lock);
@@ -4015,7 +4034,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
4015 spin_unlock_bh(&vscsi->intr_lock); 4034 spin_unlock_bh(&vscsi->intr_lock);
4016 } 4035 }
4017 4036
4018 pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state); 4037 dev_dbg(&vscsi->dev, "tpg_enable_store, tmp %ld, state %d\n", tmp,
4038 vscsi->state);
4019 4039
4020 return count; 4040 return count;
4021} 4041}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index cc0187965eee..e07dd990e585 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9653,8 +9653,8 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9653 if (i == 0) { 9653 if (i == 0) {
9654 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS; 9654 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9655 ioa_cfg->hrrq[i].min_cmd_id = 0; 9655 ioa_cfg->hrrq[i].min_cmd_id = 0;
9656 ioa_cfg->hrrq[i].max_cmd_id = 9656 ioa_cfg->hrrq[i].max_cmd_id =
9657 (entries_each_hrrq - 1); 9657 (entries_each_hrrq - 1);
9658 } else { 9658 } else {
9659 entries_each_hrrq = 9659 entries_each_hrrq =
9660 IPR_NUM_BASE_CMD_BLKS/ 9660 IPR_NUM_BASE_CMD_BLKS/
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 4d934d6c3e13..6198559abbd8 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -307,6 +307,7 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
307 307
308/** 308/**
309 * iscsi_sw_tcp_xmit - TCP transmit 309 * iscsi_sw_tcp_xmit - TCP transmit
310 * @conn: iscsi connection
310 **/ 311 **/
311static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn) 312static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
312{ 313{
@@ -357,6 +358,7 @@ error:
357 358
358/** 359/**
359 * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit 360 * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
361 * @conn: iscsi connection
360 */ 362 */
361static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn) 363static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
362{ 364{
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 9c50d2d9f27c..15a2fef51e38 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1696,6 +1696,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1696 */ 1696 */
1697 switch (session->state) { 1697 switch (session->state) {
1698 case ISCSI_STATE_FAILED: 1698 case ISCSI_STATE_FAILED:
1699 /*
1700 * cmds should fail during shutdown, if the session
1701 * state is bad, allowing completion to happen
1702 */
1703 if (unlikely(system_state != SYSTEM_RUNNING)) {
1704 reason = FAILURE_SESSION_FAILED;
1705 sc->result = DID_NO_CONNECT << 16;
1706 break;
1707 }
1699 case ISCSI_STATE_IN_RECOVERY: 1708 case ISCSI_STATE_IN_RECOVERY:
1700 reason = FAILURE_SESSION_IN_RECOVERY; 1709 reason = FAILURE_SESSION_IN_RECOVERY;
1701 sc->result = DID_IMM_RETRY << 16; 1710 sc->result = DID_IMM_RETRY << 16;
@@ -1979,6 +1988,19 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1979 1988
1980 if (session->state != ISCSI_STATE_LOGGED_IN) { 1989 if (session->state != ISCSI_STATE_LOGGED_IN) {
1981 /* 1990 /*
1991 * During shutdown, if session is prematurely disconnected,
1992 * recovery won't happen and there will be hung cmds. Not
1993 * handling cmds would trigger EH, also bad in this case.
1994 * Instead, handle cmd, allow completion to happen and let
1995 * upper layer to deal with the result.
1996 */
1997 if (unlikely(system_state != SYSTEM_RUNNING)) {
1998 sc->result = DID_NO_CONNECT << 16;
1999 ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
2000 rc = BLK_EH_HANDLED;
2001 goto done;
2002 }
2003 /*
1982 * We are probably in the middle of iscsi recovery so let 2004 * We are probably in the middle of iscsi recovery so let
1983 * that complete and handle the error. 2005 * that complete and handle the error.
1984 */ 2006 */
@@ -2082,7 +2104,7 @@ done:
2082 task->last_timeout = jiffies; 2104 task->last_timeout = jiffies;
2083 spin_unlock(&session->frwd_lock); 2105 spin_unlock(&session->frwd_lock);
2084 ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? 2106 ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
2085 "timer reset" : "nh"); 2107 "timer reset" : "shutdown or nh");
2086 return rc; 2108 return rc;
2087} 2109}
2088EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out); 2110EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
@@ -2722,8 +2744,10 @@ static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
2722 * @iscsit: iscsi transport template 2744 * @iscsit: iscsi transport template
2723 * @shost: scsi host 2745 * @shost: scsi host
2724 * @cmds_max: session can queue 2746 * @cmds_max: session can queue
2747 * @dd_size: private driver data size, added to session allocation size
2725 * @cmd_task_size: LLD task private data size 2748 * @cmd_task_size: LLD task private data size
2726 * @initial_cmdsn: initial CmdSN 2749 * @initial_cmdsn: initial CmdSN
2750 * @id: target ID to add to this session
2727 * 2751 *
2728 * This can be used by software iscsi_transports that allocate 2752 * This can be used by software iscsi_transports that allocate
2729 * a session per scsi host. 2753 * a session per scsi host.
@@ -2951,7 +2975,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_setup);
2951 2975
2952/** 2976/**
2953 * iscsi_conn_teardown - teardown iscsi connection 2977 * iscsi_conn_teardown - teardown iscsi connection
2954 * cls_conn: iscsi class connection 2978 * @cls_conn: iscsi class connection
2955 * 2979 *
2956 * TODO: we may need to make this into a two step process 2980 * TODO: we may need to make this into a two step process
2957 * like scsi-mls remove + put host 2981 * like scsi-mls remove + put host
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 63a1d69ff515..369ef8f23b24 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -798,6 +798,8 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
798 798
799/** 799/**
800 * iscsi_tcp_hdr_recv_done - process PDU header 800 * iscsi_tcp_hdr_recv_done - process PDU header
801 * @tcp_conn: iSCSI TCP connection
802 * @segment: the buffer segment being processed
801 * 803 *
802 * This is the callback invoked when the PDU header has 804 * This is the callback invoked when the PDU header has
803 * been received. If the header is followed by additional 805 * been received. If the header is followed by additional
@@ -876,9 +878,10 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr);
876 * @conn: iscsi connection 878 * @conn: iscsi connection
877 * @skb: network buffer with header and/or data segment 879 * @skb: network buffer with header and/or data segment
878 * @offset: offset in skb 880 * @offset: offset in skb
879 * @offload: bool indicating if transfer was offloaded 881 * @offloaded: bool indicating if transfer was offloaded
882 * @status: iscsi TCP status result
880 * 883 *
881 * Will return status of transfer in status. And will return 884 * Will return status of transfer in @status. And will return
882 * number of bytes copied. 885 * number of bytes copied.
883 */ 886 */
884int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb, 887int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
@@ -955,9 +958,7 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb);
955 958
956/** 959/**
957 * iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands 960 * iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
958 * @conn: iscsi connection
959 * @task: scsi command task 961 * @task: scsi command task
960 * @sc: scsi command
961 */ 962 */
962int iscsi_tcp_task_init(struct iscsi_task *task) 963int iscsi_tcp_task_init(struct iscsi_task *task)
963{ 964{
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 70be4425ae0b..2b3637b40dde 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -730,7 +730,6 @@ int sas_discover_sata(struct domain_device *dev)
730 if (res) 730 if (res)
731 return res; 731 return res;
732 732
733 sas_discover_event(dev->port, DISCE_PROBE);
734 return 0; 733 return 0;
735} 734}
736 735
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 60de66252fa2..e4fd078e4175 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -212,13 +212,9 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
212 } 212 }
213} 213}
214 214
215static void sas_probe_devices(struct work_struct *work) 215static void sas_probe_devices(struct asd_sas_port *port)
216{ 216{
217 struct domain_device *dev, *n; 217 struct domain_device *dev, *n;
218 struct sas_discovery_event *ev = to_sas_discovery_event(work);
219 struct asd_sas_port *port = ev->port;
220
221 clear_bit(DISCE_PROBE, &port->disc.pending);
222 218
223 /* devices must be domain members before link recovery and probe */ 219 /* devices must be domain members before link recovery and probe */
224 list_for_each_entry(dev, &port->disco_list, disco_list_node) { 220 list_for_each_entry(dev, &port->disco_list, disco_list_node) {
@@ -294,7 +290,6 @@ int sas_discover_end_dev(struct domain_device *dev)
294 res = sas_notify_lldd_dev_found(dev); 290 res = sas_notify_lldd_dev_found(dev);
295 if (res) 291 if (res)
296 return res; 292 return res;
297 sas_discover_event(dev->port, DISCE_PROBE);
298 293
299 return 0; 294 return 0;
300} 295}
@@ -353,13 +348,9 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
353 sas_put_device(dev); 348 sas_put_device(dev);
354} 349}
355 350
356static void sas_destruct_devices(struct work_struct *work) 351void sas_destruct_devices(struct asd_sas_port *port)
357{ 352{
358 struct domain_device *dev, *n; 353 struct domain_device *dev, *n;
359 struct sas_discovery_event *ev = to_sas_discovery_event(work);
360 struct asd_sas_port *port = ev->port;
361
362 clear_bit(DISCE_DESTRUCT, &port->disc.pending);
363 354
364 list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) { 355 list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) {
365 list_del_init(&dev->disco_list_node); 356 list_del_init(&dev->disco_list_node);
@@ -370,6 +361,16 @@ static void sas_destruct_devices(struct work_struct *work)
370 } 361 }
371} 362}
372 363
364static void sas_destruct_ports(struct asd_sas_port *port)
365{
366 struct sas_port *sas_port, *p;
367
368 list_for_each_entry_safe(sas_port, p, &port->sas_port_del_list, del_list) {
369 list_del_init(&sas_port->del_list);
370 sas_port_delete(sas_port);
371 }
372}
373
373void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev) 374void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
374{ 375{
375 if (!test_bit(SAS_DEV_DESTROY, &dev->state) && 376 if (!test_bit(SAS_DEV_DESTROY, &dev->state) &&
@@ -384,7 +385,6 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
384 if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) { 385 if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
385 sas_rphy_unlink(dev->rphy); 386 sas_rphy_unlink(dev->rphy);
386 list_move_tail(&dev->disco_list_node, &port->destroy_list); 387 list_move_tail(&dev->disco_list_node, &port->destroy_list);
387 sas_discover_event(dev->port, DISCE_DESTRUCT);
388 } 388 }
389} 389}
390 390
@@ -490,6 +490,8 @@ static void sas_discover_domain(struct work_struct *work)
490 port->port_dev = NULL; 490 port->port_dev = NULL;
491 } 491 }
492 492
493 sas_probe_devices(port);
494
493 SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id, 495 SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
494 task_pid_nr(current), error); 496 task_pid_nr(current), error);
495} 497}
@@ -523,6 +525,10 @@ static void sas_revalidate_domain(struct work_struct *work)
523 port->id, task_pid_nr(current), res); 525 port->id, task_pid_nr(current), res);
524 out: 526 out:
525 mutex_unlock(&ha->disco_mutex); 527 mutex_unlock(&ha->disco_mutex);
528
529 sas_destruct_devices(port);
530 sas_destruct_ports(port);
531 sas_probe_devices(port);
526} 532}
527 533
528/* ---------- Events ---------- */ 534/* ---------- Events ---------- */
@@ -534,7 +540,7 @@ static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw)
534 * workqueue, or known to be submitted from a context that is 540 * workqueue, or known to be submitted from a context that is
535 * not racing against draining 541 * not racing against draining
536 */ 542 */
537 scsi_queue_work(ha->core.shost, &sw->work); 543 queue_work(ha->disco_q, &sw->work);
538} 544}
539 545
540static void sas_chain_event(int event, unsigned long *pending, 546static void sas_chain_event(int event, unsigned long *pending,
@@ -578,10 +584,8 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
578 static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = { 584 static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
579 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, 585 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
580 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, 586 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
581 [DISCE_PROBE] = sas_probe_devices,
582 [DISCE_SUSPEND] = sas_suspend_devices, 587 [DISCE_SUSPEND] = sas_suspend_devices,
583 [DISCE_RESUME] = sas_resume_devices, 588 [DISCE_RESUME] = sas_resume_devices,
584 [DISCE_DESTRUCT] = sas_destruct_devices,
585 }; 589 };
586 590
587 disc->pending = 0; 591 disc->pending = 0;
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 0bb9eefc08c8..ae923eb6de95 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -29,7 +29,8 @@
29 29
30int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw) 30int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
31{ 31{
32 int rc = 0; 32 /* it's added to the defer_q when draining so return succeed */
33 int rc = 1;
33 34
34 if (!test_bit(SAS_HA_REGISTERED, &ha->state)) 35 if (!test_bit(SAS_HA_REGISTERED, &ha->state))
35 return 0; 36 return 0;
@@ -39,24 +40,20 @@ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
39 if (list_empty(&sw->drain_node)) 40 if (list_empty(&sw->drain_node))
40 list_add_tail(&sw->drain_node, &ha->defer_q); 41 list_add_tail(&sw->drain_node, &ha->defer_q);
41 } else 42 } else
42 rc = scsi_queue_work(ha->core.shost, &sw->work); 43 rc = queue_work(ha->event_q, &sw->work);
43 44
44 return rc; 45 return rc;
45} 46}
46 47
47static int sas_queue_event(int event, unsigned long *pending, 48static int sas_queue_event(int event, struct sas_work *work,
48 struct sas_work *work,
49 struct sas_ha_struct *ha) 49 struct sas_ha_struct *ha)
50{ 50{
51 int rc = 0; 51 unsigned long flags;
52 int rc;
52 53
53 if (!test_and_set_bit(event, pending)) { 54 spin_lock_irqsave(&ha->lock, flags);
54 unsigned long flags; 55 rc = sas_queue_work(ha, work);
55 56 spin_unlock_irqrestore(&ha->lock, flags);
56 spin_lock_irqsave(&ha->lock, flags);
57 rc = sas_queue_work(ha, work);
58 spin_unlock_irqrestore(&ha->lock, flags);
59 }
60 57
61 return rc; 58 return rc;
62} 59}
@@ -64,21 +61,25 @@ static int sas_queue_event(int event, unsigned long *pending,
64 61
65void __sas_drain_work(struct sas_ha_struct *ha) 62void __sas_drain_work(struct sas_ha_struct *ha)
66{ 63{
67 struct workqueue_struct *wq = ha->core.shost->work_q;
68 struct sas_work *sw, *_sw; 64 struct sas_work *sw, *_sw;
65 int ret;
69 66
70 set_bit(SAS_HA_DRAINING, &ha->state); 67 set_bit(SAS_HA_DRAINING, &ha->state);
71 /* flush submitters */ 68 /* flush submitters */
72 spin_lock_irq(&ha->lock); 69 spin_lock_irq(&ha->lock);
73 spin_unlock_irq(&ha->lock); 70 spin_unlock_irq(&ha->lock);
74 71
75 drain_workqueue(wq); 72 drain_workqueue(ha->event_q);
73 drain_workqueue(ha->disco_q);
76 74
77 spin_lock_irq(&ha->lock); 75 spin_lock_irq(&ha->lock);
78 clear_bit(SAS_HA_DRAINING, &ha->state); 76 clear_bit(SAS_HA_DRAINING, &ha->state);
79 list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) { 77 list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
80 list_del_init(&sw->drain_node); 78 list_del_init(&sw->drain_node);
81 sas_queue_work(ha, sw); 79 ret = sas_queue_work(ha, sw);
80 if (ret != 1)
81 sas_free_event(to_asd_sas_event(&sw->work));
82
82 } 83 }
83 spin_unlock_irq(&ha->lock); 84 spin_unlock_irq(&ha->lock);
84} 85}
@@ -115,33 +116,78 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
115 struct asd_sas_port *port = ha->sas_port[i]; 116 struct asd_sas_port *port = ha->sas_port[i];
116 const int ev = DISCE_REVALIDATE_DOMAIN; 117 const int ev = DISCE_REVALIDATE_DOMAIN;
117 struct sas_discovery *d = &port->disc; 118 struct sas_discovery *d = &port->disc;
119 struct asd_sas_phy *sas_phy;
118 120
119 if (!test_and_clear_bit(ev, &d->pending)) 121 if (!test_and_clear_bit(ev, &d->pending))
120 continue; 122 continue;
121 123
122 sas_queue_event(ev, &d->pending, &d->disc_work[ev].work, ha); 124 if (list_empty(&port->phy_list))
125 continue;
126
127 sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
128 port_phy_el);
129 ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
123 } 130 }
124 mutex_unlock(&ha->disco_mutex); 131 mutex_unlock(&ha->disco_mutex);
125} 132}
126 133
134
135static void sas_port_event_worker(struct work_struct *work)
136{
137 struct asd_sas_event *ev = to_asd_sas_event(work);
138
139 sas_port_event_fns[ev->event](work);
140 sas_free_event(ev);
141}
142
143static void sas_phy_event_worker(struct work_struct *work)
144{
145 struct asd_sas_event *ev = to_asd_sas_event(work);
146
147 sas_phy_event_fns[ev->event](work);
148 sas_free_event(ev);
149}
150
127static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event) 151static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
128{ 152{
153 struct asd_sas_event *ev;
129 struct sas_ha_struct *ha = phy->ha; 154 struct sas_ha_struct *ha = phy->ha;
155 int ret;
130 156
131 BUG_ON(event >= PORT_NUM_EVENTS); 157 BUG_ON(event >= PORT_NUM_EVENTS);
132 158
133 return sas_queue_event(event, &phy->port_events_pending, 159 ev = sas_alloc_event(phy);
134 &phy->port_events[event].work, ha); 160 if (!ev)
161 return -ENOMEM;
162
163 INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
164
165 ret = sas_queue_event(event, &ev->work, ha);
166 if (ret != 1)
167 sas_free_event(ev);
168
169 return ret;
135} 170}
136 171
137int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) 172int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
138{ 173{
174 struct asd_sas_event *ev;
139 struct sas_ha_struct *ha = phy->ha; 175 struct sas_ha_struct *ha = phy->ha;
176 int ret;
140 177
141 BUG_ON(event >= PHY_NUM_EVENTS); 178 BUG_ON(event >= PHY_NUM_EVENTS);
142 179
143 return sas_queue_event(event, &phy->phy_events_pending, 180 ev = sas_alloc_event(phy);
144 &phy->phy_events[event].work, ha); 181 if (!ev)
182 return -ENOMEM;
183
184 INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
185
186 ret = sas_queue_event(event, &ev->work, ha);
187 if (ret != 1)
188 sas_free_event(ev);
189
190 return ret;
145} 191}
146 192
147int sas_init_events(struct sas_ha_struct *sas_ha) 193int sas_init_events(struct sas_ha_struct *sas_ha)
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 3183d63de4da..6a4f8198b78e 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -293,6 +293,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
293 phy->phy->minimum_linkrate = dr->pmin_linkrate; 293 phy->phy->minimum_linkrate = dr->pmin_linkrate;
294 phy->phy->maximum_linkrate = dr->pmax_linkrate; 294 phy->phy->maximum_linkrate = dr->pmax_linkrate;
295 phy->phy->negotiated_linkrate = phy->linkrate; 295 phy->phy->negotiated_linkrate = phy->linkrate;
296 phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);
296 297
297 skip: 298 skip:
298 if (new_phy) 299 if (new_phy)
@@ -686,7 +687,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
686 res = smp_execute_task(dev, req, RPEL_REQ_SIZE, 687 res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
687 resp, RPEL_RESP_SIZE); 688 resp, RPEL_RESP_SIZE);
688 689
689 if (!res) 690 if (res)
690 goto out; 691 goto out;
691 692
692 phy->invalid_dword_count = scsi_to_u32(&resp[12]); 693 phy->invalid_dword_count = scsi_to_u32(&resp[12]);
@@ -695,6 +696,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
695 phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); 696 phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
696 697
697 out: 698 out:
699 kfree(req);
698 kfree(resp); 700 kfree(resp);
699 return res; 701 return res;
700 702
@@ -1914,7 +1916,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
1914 sas_port_delete_phy(phy->port, phy->phy); 1916 sas_port_delete_phy(phy->port, phy->phy);
1915 sas_device_set_phy(found, phy->port); 1917 sas_device_set_phy(found, phy->port);
1916 if (phy->port->num_phys == 0) 1918 if (phy->port->num_phys == 0)
1917 sas_port_delete(phy->port); 1919 list_add_tail(&phy->port->del_list,
1920 &parent->port->sas_port_del_list);
1918 phy->port = NULL; 1921 phy->port = NULL;
1919 } 1922 }
1920} 1923}
@@ -2122,7 +2125,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
2122 struct domain_device *dev = NULL; 2125 struct domain_device *dev = NULL;
2123 2126
2124 res = sas_find_bcast_dev(port_dev, &dev); 2127 res = sas_find_bcast_dev(port_dev, &dev);
2125 while (res == 0 && dev) { 2128 if (res == 0 && dev) {
2126 struct expander_device *ex = &dev->ex_dev; 2129 struct expander_device *ex = &dev->ex_dev;
2127 int i = 0, phy_id; 2130 int i = 0, phy_id;
2128 2131
@@ -2134,9 +2137,6 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
2134 res = sas_rediscover(dev, phy_id); 2137 res = sas_rediscover(dev, phy_id);
2135 i = phy_id + 1; 2138 i = phy_id + 1;
2136 } while (i < ex->num_phys); 2139 } while (i < ex->num_phys);
2137
2138 dev = NULL;
2139 res = sas_find_bcast_dev(port_dev, &dev);
2140 } 2140 }
2141 return res; 2141 return res;
2142} 2142}
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 64fa6f53cb8b..c81a63b5dc71 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -39,6 +39,7 @@
39#include "../scsi_sas_internal.h" 39#include "../scsi_sas_internal.h"
40 40
41static struct kmem_cache *sas_task_cache; 41static struct kmem_cache *sas_task_cache;
42static struct kmem_cache *sas_event_cache;
42 43
43struct sas_task *sas_alloc_task(gfp_t flags) 44struct sas_task *sas_alloc_task(gfp_t flags)
44{ 45{
@@ -109,6 +110,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
109 110
110int sas_register_ha(struct sas_ha_struct *sas_ha) 111int sas_register_ha(struct sas_ha_struct *sas_ha)
111{ 112{
113 char name[64];
112 int error = 0; 114 int error = 0;
113 115
114 mutex_init(&sas_ha->disco_mutex); 116 mutex_init(&sas_ha->disco_mutex);
@@ -122,6 +124,8 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
122 INIT_LIST_HEAD(&sas_ha->defer_q); 124 INIT_LIST_HEAD(&sas_ha->defer_q);
123 INIT_LIST_HEAD(&sas_ha->eh_dev_q); 125 INIT_LIST_HEAD(&sas_ha->eh_dev_q);
124 126
127 sas_ha->event_thres = SAS_PHY_SHUTDOWN_THRES;
128
125 error = sas_register_phys(sas_ha); 129 error = sas_register_phys(sas_ha);
126 if (error) { 130 if (error) {
127 printk(KERN_NOTICE "couldn't register sas phys:%d\n", error); 131 printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
@@ -140,10 +144,24 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
140 goto Undo_ports; 144 goto Undo_ports;
141 } 145 }
142 146
147 error = -ENOMEM;
148 snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
149 sas_ha->event_q = create_singlethread_workqueue(name);
150 if (!sas_ha->event_q)
151 goto Undo_ports;
152
153 snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev));
154 sas_ha->disco_q = create_singlethread_workqueue(name);
155 if (!sas_ha->disco_q)
156 goto Undo_event_q;
157
143 INIT_LIST_HEAD(&sas_ha->eh_done_q); 158 INIT_LIST_HEAD(&sas_ha->eh_done_q);
144 INIT_LIST_HEAD(&sas_ha->eh_ata_q); 159 INIT_LIST_HEAD(&sas_ha->eh_ata_q);
145 160
146 return 0; 161 return 0;
162
163Undo_event_q:
164 destroy_workqueue(sas_ha->event_q);
147Undo_ports: 165Undo_ports:
148 sas_unregister_ports(sas_ha); 166 sas_unregister_ports(sas_ha);
149Undo_phys: 167Undo_phys:
@@ -174,6 +192,9 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
174 __sas_drain_work(sas_ha); 192 __sas_drain_work(sas_ha);
175 mutex_unlock(&sas_ha->drain_mutex); 193 mutex_unlock(&sas_ha->drain_mutex);
176 194
195 destroy_workqueue(sas_ha->disco_q);
196 destroy_workqueue(sas_ha->event_q);
197
177 return 0; 198 return 0;
178} 199}
179 200
@@ -364,8 +385,6 @@ void sas_prep_resume_ha(struct sas_ha_struct *ha)
364 struct asd_sas_phy *phy = ha->sas_phy[i]; 385 struct asd_sas_phy *phy = ha->sas_phy[i];
365 386
366 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); 387 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
367 phy->port_events_pending = 0;
368 phy->phy_events_pending = 0;
369 phy->frame_rcvd_size = 0; 388 phy->frame_rcvd_size = 0;
370 } 389 }
371} 390}
@@ -537,6 +556,37 @@ static struct sas_function_template sft = {
537 .smp_handler = sas_smp_handler, 556 .smp_handler = sas_smp_handler,
538}; 557};
539 558
559static inline ssize_t phy_event_threshold_show(struct device *dev,
560 struct device_attribute *attr, char *buf)
561{
562 struct Scsi_Host *shost = class_to_shost(dev);
563 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
564
565 return scnprintf(buf, PAGE_SIZE, "%u\n", sha->event_thres);
566}
567
568static inline ssize_t phy_event_threshold_store(struct device *dev,
569 struct device_attribute *attr,
570 const char *buf, size_t count)
571{
572 struct Scsi_Host *shost = class_to_shost(dev);
573 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
574
575 sha->event_thres = simple_strtol(buf, NULL, 10);
576
577 /* threshold cannot be set too small */
578 if (sha->event_thres < 32)
579 sha->event_thres = 32;
580
581 return count;
582}
583
584DEVICE_ATTR(phy_event_threshold,
585 S_IRUGO|S_IWUSR,
586 phy_event_threshold_show,
587 phy_event_threshold_store);
588EXPORT_SYMBOL_GPL(dev_attr_phy_event_threshold);
589
540struct scsi_transport_template * 590struct scsi_transport_template *
541sas_domain_attach_transport(struct sas_domain_function_template *dft) 591sas_domain_attach_transport(struct sas_domain_function_template *dft)
542{ 592{
@@ -555,20 +605,71 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft)
555} 605}
556EXPORT_SYMBOL_GPL(sas_domain_attach_transport); 606EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
557 607
608
609struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
610{
611 struct asd_sas_event *event;
612 gfp_t flags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
613 struct sas_ha_struct *sas_ha = phy->ha;
614 struct sas_internal *i =
615 to_sas_internal(sas_ha->core.shost->transportt);
616
617 event = kmem_cache_zalloc(sas_event_cache, flags);
618 if (!event)
619 return NULL;
620
621 atomic_inc(&phy->event_nr);
622
623 if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
624 if (i->dft->lldd_control_phy) {
625 if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
626 sas_printk("The phy%02d bursting events, shut it down.\n",
627 phy->id);
628 sas_notify_phy_event(phy, PHYE_SHUTDOWN);
629 }
630 } else {
631 /* Do not support PHY control, stop allocating events */
632 WARN_ONCE(1, "PHY control not supported.\n");
633 kmem_cache_free(sas_event_cache, event);
634 atomic_dec(&phy->event_nr);
635 event = NULL;
636 }
637 }
638
639 return event;
640}
641
642void sas_free_event(struct asd_sas_event *event)
643{
644 struct asd_sas_phy *phy = event->phy;
645
646 kmem_cache_free(sas_event_cache, event);
647 atomic_dec(&phy->event_nr);
648}
649
558/* ---------- SAS Class register/unregister ---------- */ 650/* ---------- SAS Class register/unregister ---------- */
559 651
560static int __init sas_class_init(void) 652static int __init sas_class_init(void)
561{ 653{
562 sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN); 654 sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
563 if (!sas_task_cache) 655 if (!sas_task_cache)
564 return -ENOMEM; 656 goto out;
657
658 sas_event_cache = KMEM_CACHE(asd_sas_event, SLAB_HWCACHE_ALIGN);
659 if (!sas_event_cache)
660 goto free_task_kmem;
565 661
566 return 0; 662 return 0;
663free_task_kmem:
664 kmem_cache_destroy(sas_task_cache);
665out:
666 return -ENOMEM;
567} 667}
568 668
569static void __exit sas_class_exit(void) 669static void __exit sas_class_exit(void)
570{ 670{
571 kmem_cache_destroy(sas_task_cache); 671 kmem_cache_destroy(sas_task_cache);
672 kmem_cache_destroy(sas_event_cache);
572} 673}
573 674
574MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>"); 675MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index c07e08136491..50e12d662ffe 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -61,6 +61,9 @@ int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
61int sas_register_phys(struct sas_ha_struct *sas_ha); 61int sas_register_phys(struct sas_ha_struct *sas_ha);
62void sas_unregister_phys(struct sas_ha_struct *sas_ha); 62void sas_unregister_phys(struct sas_ha_struct *sas_ha);
63 63
64struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy);
65void sas_free_event(struct asd_sas_event *event);
66
64int sas_register_ports(struct sas_ha_struct *sas_ha); 67int sas_register_ports(struct sas_ha_struct *sas_ha);
65void sas_unregister_ports(struct sas_ha_struct *sas_ha); 68void sas_unregister_ports(struct sas_ha_struct *sas_ha);
66 69
@@ -98,6 +101,10 @@ int sas_try_ata_reset(struct asd_sas_phy *phy);
98void sas_hae_reset(struct work_struct *work); 101void sas_hae_reset(struct work_struct *work);
99 102
100void sas_free_device(struct kref *kref); 103void sas_free_device(struct kref *kref);
104void sas_destruct_devices(struct asd_sas_port *port);
105
106extern const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS];
107extern const work_func_t sas_port_event_fns[PORT_NUM_EVENTS];
101 108
102#ifdef CONFIG_SCSI_SAS_HOST_SMP 109#ifdef CONFIG_SCSI_SAS_HOST_SMP
103extern void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost); 110extern void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index cdee446c29e1..bf3e1b979ca6 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -35,7 +35,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
35 struct asd_sas_event *ev = to_asd_sas_event(work); 35 struct asd_sas_event *ev = to_asd_sas_event(work);
36 struct asd_sas_phy *phy = ev->phy; 36 struct asd_sas_phy *phy = ev->phy;
37 37
38 clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending); 38 phy->in_shutdown = 0;
39 phy->error = 0; 39 phy->error = 0;
40 sas_deform_port(phy, 1); 40 sas_deform_port(phy, 1);
41} 41}
@@ -45,7 +45,7 @@ static void sas_phye_oob_done(struct work_struct *work)
45 struct asd_sas_event *ev = to_asd_sas_event(work); 45 struct asd_sas_event *ev = to_asd_sas_event(work);
46 struct asd_sas_phy *phy = ev->phy; 46 struct asd_sas_phy *phy = ev->phy;
47 47
48 clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending); 48 phy->in_shutdown = 0;
49 phy->error = 0; 49 phy->error = 0;
50} 50}
51 51
@@ -58,8 +58,6 @@ static void sas_phye_oob_error(struct work_struct *work)
58 struct sas_internal *i = 58 struct sas_internal *i =
59 to_sas_internal(sas_ha->core.shost->transportt); 59 to_sas_internal(sas_ha->core.shost->transportt);
60 60
61 clear_bit(PHYE_OOB_ERROR, &phy->phy_events_pending);
62
63 sas_deform_port(phy, 1); 61 sas_deform_port(phy, 1);
64 62
65 if (!port && phy->enabled && i->dft->lldd_control_phy) { 63 if (!port && phy->enabled && i->dft->lldd_control_phy) {
@@ -88,8 +86,6 @@ static void sas_phye_spinup_hold(struct work_struct *work)
88 struct sas_internal *i = 86 struct sas_internal *i =
89 to_sas_internal(sas_ha->core.shost->transportt); 87 to_sas_internal(sas_ha->core.shost->transportt);
90 88
91 clear_bit(PHYE_SPINUP_HOLD, &phy->phy_events_pending);
92
93 phy->error = 0; 89 phy->error = 0;
94 i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL); 90 i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
95} 91}
@@ -99,8 +95,6 @@ static void sas_phye_resume_timeout(struct work_struct *work)
99 struct asd_sas_event *ev = to_asd_sas_event(work); 95 struct asd_sas_event *ev = to_asd_sas_event(work);
100 struct asd_sas_phy *phy = ev->phy; 96 struct asd_sas_phy *phy = ev->phy;
101 97
102 clear_bit(PHYE_RESUME_TIMEOUT, &phy->phy_events_pending);
103
104 /* phew, lldd got the phy back in the nick of time */ 98 /* phew, lldd got the phy back in the nick of time */
105 if (!phy->suspended) { 99 if (!phy->suspended) {
106 dev_info(&phy->phy->dev, "resume timeout cancelled\n"); 100 dev_info(&phy->phy->dev, "resume timeout cancelled\n");
@@ -113,45 +107,41 @@ static void sas_phye_resume_timeout(struct work_struct *work)
113} 107}
114 108
115 109
110static void sas_phye_shutdown(struct work_struct *work)
111{
112 struct asd_sas_event *ev = to_asd_sas_event(work);
113 struct asd_sas_phy *phy = ev->phy;
114 struct sas_ha_struct *sas_ha = phy->ha;
115 struct sas_internal *i =
116 to_sas_internal(sas_ha->core.shost->transportt);
117
118 if (phy->enabled) {
119 int ret;
120
121 phy->error = 0;
122 phy->enabled = 0;
123 ret = i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
124 if (ret)
125 sas_printk("lldd disable phy%02d returned %d\n",
126 phy->id, ret);
127 } else
128 sas_printk("phy%02d is not enabled, cannot shutdown\n",
129 phy->id);
130}
131
116/* ---------- Phy class registration ---------- */ 132/* ---------- Phy class registration ---------- */
117 133
118int sas_register_phys(struct sas_ha_struct *sas_ha) 134int sas_register_phys(struct sas_ha_struct *sas_ha)
119{ 135{
120 int i; 136 int i;
121 137
122 static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
123 [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
124 [PHYE_OOB_DONE] = sas_phye_oob_done,
125 [PHYE_OOB_ERROR] = sas_phye_oob_error,
126 [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
127 [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
128
129 };
130
131 static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
132 [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
133 [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
134 [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
135 [PORTE_TIMER_EVENT] = sas_porte_timer_event,
136 [PORTE_HARD_RESET] = sas_porte_hard_reset,
137 };
138
139 /* Now register the phys. */ 138 /* Now register the phys. */
140 for (i = 0; i < sas_ha->num_phys; i++) { 139 for (i = 0; i < sas_ha->num_phys; i++) {
141 int k;
142 struct asd_sas_phy *phy = sas_ha->sas_phy[i]; 140 struct asd_sas_phy *phy = sas_ha->sas_phy[i];
143 141
144 phy->error = 0; 142 phy->error = 0;
143 atomic_set(&phy->event_nr, 0);
145 INIT_LIST_HEAD(&phy->port_phy_el); 144 INIT_LIST_HEAD(&phy->port_phy_el);
146 for (k = 0; k < PORT_NUM_EVENTS; k++) {
147 INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]);
148 phy->port_events[k].phy = phy;
149 }
150
151 for (k = 0; k < PHY_NUM_EVENTS; k++) {
152 INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]);
153 phy->phy_events[k].phy = phy;
154 }
155 145
156 phy->port = NULL; 146 phy->port = NULL;
157 phy->ha = sas_ha; 147 phy->ha = sas_ha;
@@ -179,3 +169,12 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
179 169
180 return 0; 170 return 0;
181} 171}
172
173const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
174 [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
175 [PHYE_OOB_DONE] = sas_phye_oob_done,
176 [PHYE_OOB_ERROR] = sas_phye_oob_error,
177 [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
178 [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
179 [PHYE_SHUTDOWN] = sas_phye_shutdown,
180};
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index d3c5297c6c89..f07e55d3aa73 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -66,6 +66,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
66 rc = sas_notify_lldd_dev_found(dev); 66 rc = sas_notify_lldd_dev_found(dev);
67 if (rc) { 67 if (rc) {
68 sas_unregister_dev(port, dev); 68 sas_unregister_dev(port, dev);
69 sas_destruct_devices(port);
69 continue; 70 continue;
70 } 71 }
71 72
@@ -192,6 +193,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
192 si->dft->lldd_port_formed(phy); 193 si->dft->lldd_port_formed(phy);
193 194
194 sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN); 195 sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
196 flush_workqueue(sas_ha->disco_q);
195} 197}
196 198
197/** 199/**
@@ -219,6 +221,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
219 221
220 if (port->num_phys == 1) { 222 if (port->num_phys == 1) {
221 sas_unregister_domain_devices(port, gone); 223 sas_unregister_domain_devices(port, gone);
224 sas_destruct_devices(port);
222 sas_port_delete(port->port); 225 sas_port_delete(port->port);
223 port->port = NULL; 226 port->port = NULL;
224 } else { 227 } else {
@@ -261,8 +264,6 @@ void sas_porte_bytes_dmaed(struct work_struct *work)
261 struct asd_sas_event *ev = to_asd_sas_event(work); 264 struct asd_sas_event *ev = to_asd_sas_event(work);
262 struct asd_sas_phy *phy = ev->phy; 265 struct asd_sas_phy *phy = ev->phy;
263 266
264 clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
265
266 sas_form_port(phy); 267 sas_form_port(phy);
267} 268}
268 269
@@ -273,14 +274,15 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
273 unsigned long flags; 274 unsigned long flags;
274 u32 prim; 275 u32 prim;
275 276
276 clear_bit(PORTE_BROADCAST_RCVD, &phy->port_events_pending);
277
278 spin_lock_irqsave(&phy->sas_prim_lock, flags); 277 spin_lock_irqsave(&phy->sas_prim_lock, flags);
279 prim = phy->sas_prim; 278 prim = phy->sas_prim;
280 spin_unlock_irqrestore(&phy->sas_prim_lock, flags); 279 spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
281 280
282 SAS_DPRINTK("broadcast received: %d\n", prim); 281 SAS_DPRINTK("broadcast received: %d\n", prim);
283 sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN); 282 sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
283
284 if (phy->port)
285 flush_workqueue(phy->port->ha->disco_q);
284} 286}
285 287
286void sas_porte_link_reset_err(struct work_struct *work) 288void sas_porte_link_reset_err(struct work_struct *work)
@@ -288,8 +290,6 @@ void sas_porte_link_reset_err(struct work_struct *work)
288 struct asd_sas_event *ev = to_asd_sas_event(work); 290 struct asd_sas_event *ev = to_asd_sas_event(work);
289 struct asd_sas_phy *phy = ev->phy; 291 struct asd_sas_phy *phy = ev->phy;
290 292
291 clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
292
293 sas_deform_port(phy, 1); 293 sas_deform_port(phy, 1);
294} 294}
295 295
@@ -298,8 +298,6 @@ void sas_porte_timer_event(struct work_struct *work)
298 struct asd_sas_event *ev = to_asd_sas_event(work); 298 struct asd_sas_event *ev = to_asd_sas_event(work);
299 struct asd_sas_phy *phy = ev->phy; 299 struct asd_sas_phy *phy = ev->phy;
300 300
301 clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
302
303 sas_deform_port(phy, 1); 301 sas_deform_port(phy, 1);
304} 302}
305 303
@@ -308,8 +306,6 @@ void sas_porte_hard_reset(struct work_struct *work)
308 struct asd_sas_event *ev = to_asd_sas_event(work); 306 struct asd_sas_event *ev = to_asd_sas_event(work);
309 struct asd_sas_phy *phy = ev->phy; 307 struct asd_sas_phy *phy = ev->phy;
310 308
311 clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
312
313 sas_deform_port(phy, 1); 309 sas_deform_port(phy, 1);
314} 310}
315 311
@@ -323,6 +319,7 @@ static void sas_init_port(struct asd_sas_port *port,
323 INIT_LIST_HEAD(&port->dev_list); 319 INIT_LIST_HEAD(&port->dev_list);
324 INIT_LIST_HEAD(&port->disco_list); 320 INIT_LIST_HEAD(&port->disco_list);
325 INIT_LIST_HEAD(&port->destroy_list); 321 INIT_LIST_HEAD(&port->destroy_list);
322 INIT_LIST_HEAD(&port->sas_port_del_list);
326 spin_lock_init(&port->phy_list_lock); 323 spin_lock_init(&port->phy_list_lock);
327 INIT_LIST_HEAD(&port->phy_list); 324 INIT_LIST_HEAD(&port->phy_list);
328 port->ha = sas_ha; 325 port->ha = sas_ha;
@@ -353,3 +350,11 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
353 sas_deform_port(sas_ha->sas_phy[i], 0); 350 sas_deform_port(sas_ha->sas_phy[i], 0);
354 351
355} 352}
353
354const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
355 [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
356 [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
357 [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
358 [PORTE_TIMER_EVENT] = sas_porte_timer_event,
359 [PORTE_HARD_RESET] = sas_porte_hard_reset,
360};
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index c9406852c3e9..6de9681ace82 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -27,6 +27,7 @@
27#include <linux/firmware.h> 27#include <linux/firmware.h>
28#include <linux/export.h> 28#include <linux/export.h>
29#include <linux/ctype.h> 29#include <linux/ctype.h>
30#include <linux/kernel.h>
30 31
31#include "sas_internal.h" 32#include "sas_internal.h"
32 33
@@ -959,21 +960,6 @@ void sas_target_destroy(struct scsi_target *starget)
959 sas_put_device(found_dev); 960 sas_put_device(found_dev);
960} 961}
961 962
962static void sas_parse_addr(u8 *sas_addr, const char *p)
963{
964 int i;
965 for (i = 0; i < SAS_ADDR_SIZE; i++) {
966 u8 h, l;
967 if (!*p)
968 break;
969 h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
970 p++;
971 l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
972 p++;
973 sas_addr[i] = (h<<4) | l;
974 }
975}
976
977#define SAS_STRING_ADDR_SIZE 16 963#define SAS_STRING_ADDR_SIZE 16
978 964
979int sas_request_addr(struct Scsi_Host *shost, u8 *addr) 965int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
@@ -990,7 +976,9 @@ int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
990 goto out; 976 goto out;
991 } 977 }
992 978
993 sas_parse_addr(addr, fw->data); 979 res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2);
980 if (res)
981 goto out;
994 982
995out: 983out:
996 release_firmware(fw); 984 release_firmware(fw);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 231302273257..61fb46da05d4 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -55,9 +55,10 @@ struct lpfc_sli2_slim;
55#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */ 55#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
56#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */ 56#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
57#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ 57#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
58#define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */
58#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */ 59#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
59#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */ 60#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
60#define LPFC_MAX_NVME_SEG_CNT 128 /* max SGL element cnt per NVME cmnd */ 61#define LPFC_MAX_NVME_SEG_CNT 256 /* max SGL element cnt per NVME cmnd */
61 62
62#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ 63#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
63#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 64#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
@@ -705,7 +706,6 @@ struct lpfc_hba {
705 * capability 706 * capability
706 */ 707 */
707#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */ 708#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
708#define NVME_XRI_ABORT_EVENT 0x100000
709 709
710 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 710 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
711 struct lpfc_dmabuf slim2p; 711 struct lpfc_dmabuf slim2p;
@@ -945,6 +945,8 @@ struct lpfc_hba {
945 struct list_head lpfc_nvme_buf_list_get; 945 struct list_head lpfc_nvme_buf_list_get;
946 struct list_head lpfc_nvme_buf_list_put; 946 struct list_head lpfc_nvme_buf_list_put;
947 uint32_t total_nvme_bufs; 947 uint32_t total_nvme_bufs;
948 uint32_t get_nvme_bufs;
949 uint32_t put_nvme_bufs;
948 struct list_head lpfc_iocb_list; 950 struct list_head lpfc_iocb_list;
949 uint32_t total_iocbq_bufs; 951 uint32_t total_iocbq_bufs;
950 struct list_head active_rrq_list; 952 struct list_head active_rrq_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 82f6e219ee34..d188fb565a32 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -148,6 +148,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
148 struct lpfc_hba *phba = vport->phba; 148 struct lpfc_hba *phba = vport->phba;
149 struct lpfc_nvmet_tgtport *tgtp; 149 struct lpfc_nvmet_tgtport *tgtp;
150 struct nvme_fc_local_port *localport; 150 struct nvme_fc_local_port *localport;
151 struct lpfc_nvme_lport *lport;
151 struct lpfc_nodelist *ndlp; 152 struct lpfc_nodelist *ndlp;
152 struct nvme_fc_remote_port *nrport; 153 struct nvme_fc_remote_port *nrport;
153 uint64_t data1, data2, data3, tot; 154 uint64_t data1, data2, data3, tot;
@@ -198,10 +199,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
198 } 199 }
199 200
200 len += snprintf(buf+len, PAGE_SIZE-len, 201 len += snprintf(buf+len, PAGE_SIZE-len,
201 "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n", 202 "LS: Xmt %08x Drop %08x Cmpl %08x\n",
202 atomic_read(&tgtp->xmt_ls_rsp), 203 atomic_read(&tgtp->xmt_ls_rsp),
203 atomic_read(&tgtp->xmt_ls_drop), 204 atomic_read(&tgtp->xmt_ls_drop),
204 atomic_read(&tgtp->xmt_ls_rsp_cmpl), 205 atomic_read(&tgtp->xmt_ls_rsp_cmpl));
206
207 len += snprintf(buf + len, PAGE_SIZE - len,
208 "LS: RSP Abort %08x xb %08x Err %08x\n",
209 atomic_read(&tgtp->xmt_ls_rsp_aborted),
210 atomic_read(&tgtp->xmt_ls_rsp_xb_set),
205 atomic_read(&tgtp->xmt_ls_rsp_error)); 211 atomic_read(&tgtp->xmt_ls_rsp_error));
206 212
207 len += snprintf(buf+len, PAGE_SIZE-len, 213 len += snprintf(buf+len, PAGE_SIZE-len,
@@ -236,6 +242,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
236 atomic_read(&tgtp->xmt_fcp_rsp_drop)); 242 atomic_read(&tgtp->xmt_fcp_rsp_drop));
237 243
238 len += snprintf(buf+len, PAGE_SIZE-len, 244 len += snprintf(buf+len, PAGE_SIZE-len,
245 "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
246 atomic_read(&tgtp->xmt_fcp_rsp_aborted),
247 atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
248 atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
249
250 len += snprintf(buf + len, PAGE_SIZE - len,
239 "ABORT: Xmt %08x Cmpl %08x\n", 251 "ABORT: Xmt %08x Cmpl %08x\n",
240 atomic_read(&tgtp->xmt_fcp_abort), 252 atomic_read(&tgtp->xmt_fcp_abort),
241 atomic_read(&tgtp->xmt_fcp_abort_cmpl)); 253 atomic_read(&tgtp->xmt_fcp_abort_cmpl));
@@ -271,6 +283,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
271 wwn_to_u64(vport->fc_portname.u.wwn)); 283 wwn_to_u64(vport->fc_portname.u.wwn));
272 return len; 284 return len;
273 } 285 }
286 lport = (struct lpfc_nvme_lport *)localport->private;
274 len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n"); 287 len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
275 288
276 spin_lock_irq(shost->host_lock); 289 spin_lock_irq(shost->host_lock);
@@ -347,9 +360,16 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
347 360
348 len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n"); 361 len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
349 len += snprintf(buf+len, PAGE_SIZE-len, 362 len += snprintf(buf+len, PAGE_SIZE-len,
350 "LS: Xmt %016x Cmpl %016x\n", 363 "LS: Xmt %010x Cmpl %010x Abort %08x\n",
351 atomic_read(&phba->fc4NvmeLsRequests), 364 atomic_read(&phba->fc4NvmeLsRequests),
352 atomic_read(&phba->fc4NvmeLsCmpls)); 365 atomic_read(&phba->fc4NvmeLsCmpls),
366 atomic_read(&lport->xmt_ls_abort));
367
368 len += snprintf(buf + len, PAGE_SIZE - len,
369 "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
370 atomic_read(&lport->xmt_ls_err),
371 atomic_read(&lport->cmpl_ls_xb),
372 atomic_read(&lport->cmpl_ls_err));
353 373
354 tot = atomic_read(&phba->fc4NvmeIoCmpls); 374 tot = atomic_read(&phba->fc4NvmeIoCmpls);
355 data1 = atomic_read(&phba->fc4NvmeInputRequests); 375 data1 = atomic_read(&phba->fc4NvmeInputRequests);
@@ -360,8 +380,22 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
360 data1, data2, data3); 380 data1, data2, data3);
361 381
362 len += snprintf(buf+len, PAGE_SIZE-len, 382 len += snprintf(buf+len, PAGE_SIZE-len,
363 " Cmpl %016llx Outstanding %016llx\n", 383 " noxri %08x nondlp %08x qdepth %08x "
364 tot, (data1 + data2 + data3) - tot); 384 "wqerr %08x\n",
385 atomic_read(&lport->xmt_fcp_noxri),
386 atomic_read(&lport->xmt_fcp_bad_ndlp),
387 atomic_read(&lport->xmt_fcp_qdepth),
388 atomic_read(&lport->xmt_fcp_wqerr));
389
390 len += snprintf(buf + len, PAGE_SIZE - len,
391 " Cmpl %016llx Outstanding %016llx Abort %08x\n",
392 tot, ((data1 + data2 + data3) - tot),
393 atomic_read(&lport->xmt_fcp_abort));
394
395 len += snprintf(buf + len, PAGE_SIZE - len,
396 "FCP CMPL: xb %08x Err %08x\n",
397 atomic_read(&lport->cmpl_fcp_xb),
398 atomic_read(&lport->cmpl_fcp_err));
365 return len; 399 return len;
366} 400}
367 401
@@ -3366,12 +3400,13 @@ LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
3366 3400
3367/* 3401/*
3368 * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds 3402 * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
3403 * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs
3369 * lpfc_nvmet_mrq = 1 use a single RQ pair 3404 * lpfc_nvmet_mrq = 1 use a single RQ pair
3370 * lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ 3405 * lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ
3371 * 3406 *
3372 */ 3407 */
3373LPFC_ATTR_R(nvmet_mrq, 3408LPFC_ATTR_R(nvmet_mrq,
3374 1, 1, 16, 3409 LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
3375 "Specify number of RQ pairs for processing NVMET cmds"); 3410 "Specify number of RQ pairs for processing NVMET cmds");
3376 3411
3377/* 3412/*
@@ -5139,7 +5174,7 @@ LPFC_ATTR(delay_discovery, 0, 0, 1,
5139 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4 5174 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
5140 * and will be limited to 512 if BlockGuard is enabled under SLI3. 5175 * and will be limited to 512 if BlockGuard is enabled under SLI3.
5141 */ 5176 */
5142LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, 5177LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
5143 LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); 5178 LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
5144 5179
5145/* 5180/*
@@ -6362,6 +6397,9 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
6362 phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX; 6397 phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
6363 } 6398 }
6364 6399
6400 if (!phba->cfg_nvmet_mrq)
6401 phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
6402
6365 /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */ 6403 /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
6366 if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) { 6404 if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
6367 phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; 6405 phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
@@ -6369,10 +6407,13 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
6369 "6018 Adjust lpfc_nvmet_mrq to %d\n", 6407 "6018 Adjust lpfc_nvmet_mrq to %d\n",
6370 phba->cfg_nvmet_mrq); 6408 phba->cfg_nvmet_mrq);
6371 } 6409 }
6410 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
6411 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
6412
6372 } else { 6413 } else {
6373 /* Not NVME Target mode. Turn off Target parameters. */ 6414 /* Not NVME Target mode. Turn off Target parameters. */
6374 phba->nvmet_support = 0; 6415 phba->nvmet_support = 0;
6375 phba->cfg_nvmet_mrq = 0; 6416 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
6376 phba->cfg_nvmet_fb_size = 0; 6417 phba->cfg_nvmet_fb_size = 0;
6377 } 6418 }
6378 6419
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 4e858b38529a..559f9aa0ed08 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -254,6 +254,8 @@ void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
254 struct lpfc_nvmet_ctxbuf *ctxp); 254 struct lpfc_nvmet_ctxbuf *ctxp);
255int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, 255int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
256 struct fc_frame_header *fc_hdr); 256 struct fc_frame_header *fc_hdr);
257void lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba);
258void lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba);
257void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, 259void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
258 uint16_t); 260 uint16_t);
259int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 261int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index f77673ab4a84..9d20d2c208c7 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -471,6 +471,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
471 "Parse GID_FTrsp: did:x%x flg:x%x x%x", 471 "Parse GID_FTrsp: did:x%x flg:x%x x%x",
472 Did, ndlp->nlp_flag, vport->fc_flag); 472 Did, ndlp->nlp_flag, vport->fc_flag);
473 473
474 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
474 /* By default, the driver expects to support FCP FC4 */ 475 /* By default, the driver expects to support FCP FC4 */
475 if (fc4_type == FC_TYPE_FCP) 476 if (fc4_type == FC_TYPE_FCP)
476 ndlp->nlp_fc4_type |= NLP_FC4_FCP; 477 ndlp->nlp_fc4_type |= NLP_FC4_FCP;
@@ -685,6 +686,25 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
685 lpfc_els_flush_rscn(vport); 686 lpfc_els_flush_rscn(vport);
686 goto out; 687 goto out;
687 } 688 }
689
690 spin_lock_irq(shost->host_lock);
691 if (vport->fc_flag & FC_RSCN_DEFERRED) {
692 vport->fc_flag &= ~FC_RSCN_DEFERRED;
693 spin_unlock_irq(shost->host_lock);
694
695 /*
696 * Skip processing the NS response
697 * Re-issue the NS cmd
698 */
699 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
700 "0151 Process Deferred RSCN Data: x%x x%x\n",
701 vport->fc_flag, vport->fc_rscn_id_cnt);
702 lpfc_els_handle_rscn(vport);
703
704 goto out;
705 }
706 spin_unlock_irq(shost->host_lock);
707
688 if (irsp->ulpStatus) { 708 if (irsp->ulpStatus) {
689 /* Check for retry */ 709 /* Check for retry */
690 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { 710 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 2bf5ad3b1512..17ea3bb04266 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -750,6 +750,8 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
750 struct lpfc_hba *phba = vport->phba; 750 struct lpfc_hba *phba = vport->phba;
751 struct lpfc_nvmet_tgtport *tgtp; 751 struct lpfc_nvmet_tgtport *tgtp;
752 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; 752 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
753 struct nvme_fc_local_port *localport;
754 struct lpfc_nvme_lport *lport;
753 uint64_t tot, data1, data2, data3; 755 uint64_t tot, data1, data2, data3;
754 int len = 0; 756 int len = 0;
755 int cnt; 757 int cnt;
@@ -775,10 +777,15 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
775 } 777 }
776 778
777 len += snprintf(buf + len, size - len, 779 len += snprintf(buf + len, size - len,
778 "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n", 780 "LS: Xmt %08x Drop %08x Cmpl %08x\n",
779 atomic_read(&tgtp->xmt_ls_rsp), 781 atomic_read(&tgtp->xmt_ls_rsp),
780 atomic_read(&tgtp->xmt_ls_drop), 782 atomic_read(&tgtp->xmt_ls_drop),
781 atomic_read(&tgtp->xmt_ls_rsp_cmpl), 783 atomic_read(&tgtp->xmt_ls_rsp_cmpl));
784
785 len += snprintf(buf + len, size - len,
786 "LS: RSP Abort %08x xb %08x Err %08x\n",
787 atomic_read(&tgtp->xmt_ls_rsp_aborted),
788 atomic_read(&tgtp->xmt_ls_rsp_xb_set),
782 atomic_read(&tgtp->xmt_ls_rsp_error)); 789 atomic_read(&tgtp->xmt_ls_rsp_error));
783 790
784 len += snprintf(buf + len, size - len, 791 len += snprintf(buf + len, size - len,
@@ -812,6 +819,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
812 atomic_read(&tgtp->xmt_fcp_rsp_drop)); 819 atomic_read(&tgtp->xmt_fcp_rsp_drop));
813 820
814 len += snprintf(buf + len, size - len, 821 len += snprintf(buf + len, size - len,
822 "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
823 atomic_read(&tgtp->xmt_fcp_rsp_aborted),
824 atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
825 atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
826
827 len += snprintf(buf + len, size - len,
815 "ABORT: Xmt %08x Cmpl %08x\n", 828 "ABORT: Xmt %08x Cmpl %08x\n",
816 atomic_read(&tgtp->xmt_fcp_abort), 829 atomic_read(&tgtp->xmt_fcp_abort),
817 atomic_read(&tgtp->xmt_fcp_abort_cmpl)); 830 atomic_read(&tgtp->xmt_fcp_abort_cmpl));
@@ -885,8 +898,38 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
885 data1, data2, data3); 898 data1, data2, data3);
886 899
887 len += snprintf(buf + len, size - len, 900 len += snprintf(buf + len, size - len,
888 " Cmpl %016llx Outstanding %016llx\n", 901 " Cmpl %016llx Outstanding %016llx\n",
889 tot, (data1 + data2 + data3) - tot); 902 tot, (data1 + data2 + data3) - tot);
903
904 localport = vport->localport;
905 if (!localport)
906 return len;
907 lport = (struct lpfc_nvme_lport *)localport->private;
908 if (!lport)
909 return len;
910
911 len += snprintf(buf + len, size - len,
912 "LS Xmt Err: Abrt %08x Err %08x "
913 "Cmpl Err: xb %08x Err %08x\n",
914 atomic_read(&lport->xmt_ls_abort),
915 atomic_read(&lport->xmt_ls_err),
916 atomic_read(&lport->cmpl_ls_xb),
917 atomic_read(&lport->cmpl_ls_err));
918
919 len += snprintf(buf + len, size - len,
920 "FCP Xmt Err: noxri %06x nondlp %06x "
921 "qdepth %06x wqerr %06x Abrt %06x\n",
922 atomic_read(&lport->xmt_fcp_noxri),
923 atomic_read(&lport->xmt_fcp_bad_ndlp),
924 atomic_read(&lport->xmt_fcp_qdepth),
925 atomic_read(&lport->xmt_fcp_wqerr),
926 atomic_read(&lport->xmt_fcp_abort));
927
928 len += snprintf(buf + len, size - len,
929 "FCP Cmpl Err: xb %08x Err %08x\n",
930 atomic_read(&lport->cmpl_fcp_xb),
931 atomic_read(&lport->cmpl_fcp_err));
932
890 } 933 }
891 934
892 return len; 935 return len;
@@ -3213,7 +3256,7 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
3213 return 1; 3256 return 1;
3214 } 3257 }
3215 3258
3216 if (eqidx < phba->cfg_nvmet_mrq) { 3259 if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
3217 /* NVMET CQset */ 3260 /* NVMET CQset */
3218 qp = phba->sli4_hba.nvmet_cqset[eqidx]; 3261 qp = phba->sli4_hba.nvmet_cqset[eqidx];
3219 *len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len); 3262 *len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len);
@@ -3246,7 +3289,7 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
3246 3289
3247 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, 3290 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
3248 "\n%s EQ info: EQ-STAT[max:x%x noE:x%x " 3291 "\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
3249 "bs:x%x proc:x%llx eqd %d]\n", 3292 "cqe_proc:x%x eqe_proc:x%llx eqd %d]\n",
3250 eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3, 3293 eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
3251 (unsigned long long)qp->q_cnt_4, qp->q_mode); 3294 (unsigned long long)qp->q_cnt_4, qp->q_mode);
3252 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, 3295 len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
@@ -3366,6 +3409,12 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
3366 if (len >= max_cnt) 3409 if (len >= max_cnt)
3367 goto too_big; 3410 goto too_big;
3368 3411
3412 qp = phba->sli4_hba.hdr_rq;
3413 len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
3414 "ELS RQpair", pbuffer, len);
3415 if (len >= max_cnt)
3416 goto too_big;
3417
3369 /* Slow-path NVME LS response CQ */ 3418 /* Slow-path NVME LS response CQ */
3370 qp = phba->sli4_hba.nvmels_cq; 3419 qp = phba->sli4_hba.nvmels_cq;
3371 len = __lpfc_idiag_print_cq(qp, "NVME LS", 3420 len = __lpfc_idiag_print_cq(qp, "NVME LS",
@@ -3383,12 +3432,6 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
3383 if (len >= max_cnt) 3432 if (len >= max_cnt)
3384 goto too_big; 3433 goto too_big;
3385 3434
3386 qp = phba->sli4_hba.hdr_rq;
3387 len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
3388 "RQpair", pbuffer, len);
3389 if (len >= max_cnt)
3390 goto too_big;
3391
3392 goto out; 3435 goto out;
3393 } 3436 }
3394 3437
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index f9a566eaef04..5a7547f9d8d8 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -134,6 +134,8 @@ struct lpfc_nodelist {
134 struct lpfc_scsicmd_bkt *lat_data; /* Latency data */ 134 struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
135 uint32_t fc4_prli_sent; 135 uint32_t fc4_prli_sent;
136 uint32_t upcall_flags; 136 uint32_t upcall_flags;
137#define NLP_WAIT_FOR_UNREG 0x1
138
137 uint32_t nvme_fb_size; /* NVME target's supported byte cnt */ 139 uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
138#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */ 140#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
139}; 141};
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 39d5b146202e..234c7c015982 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -858,6 +858,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
858 vport->fc_flag |= FC_PT2PT; 858 vport->fc_flag |= FC_PT2PT;
859 spin_unlock_irq(shost->host_lock); 859 spin_unlock_irq(shost->host_lock);
860 860
861 /* If we are pt2pt with another NPort, force NPIV off! */
862 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
863
861 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 864 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
862 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 865 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
863 lpfc_unregister_fcf_prep(phba); 866 lpfc_unregister_fcf_prep(phba);
@@ -916,28 +919,29 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
916 spin_lock_irq(shost->host_lock); 919 spin_lock_irq(shost->host_lock);
917 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 920 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
918 spin_unlock_irq(shost->host_lock); 921 spin_unlock_irq(shost->host_lock);
919 } else 922
923 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
924 if (!mbox)
925 goto fail;
926
927 lpfc_config_link(phba, mbox);
928
929 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
930 mbox->vport = vport;
931 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
932 if (rc == MBX_NOT_FINISHED) {
933 mempool_free(mbox, phba->mbox_mem_pool);
934 goto fail;
935 }
936 } else {
920 /* This side will wait for the PLOGI, decrement ndlp reference 937 /* This side will wait for the PLOGI, decrement ndlp reference
921 * count indicating that ndlp can be released when other 938 * count indicating that ndlp can be released when other
922 * references to it are done. 939 * references to it are done.
923 */ 940 */
924 lpfc_nlp_put(ndlp); 941 lpfc_nlp_put(ndlp);
925 942
926 /* If we are pt2pt with another NPort, force NPIV off! */ 943 /* Start discovery - this should just do CLEAR_LA */
927 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 944 lpfc_disc_start(vport);
928
929 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
930 if (!mbox)
931 goto fail;
932
933 lpfc_config_link(phba, mbox);
934
935 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
936 mbox->vport = vport;
937 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
938 if (rc == MBX_NOT_FINISHED) {
939 mempool_free(mbox, phba->mbox_mem_pool);
940 goto fail;
941 } 945 }
942 946
943 return 0; 947 return 0;
@@ -1030,30 +1034,31 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1030 1034
1031stop_rr_fcf_flogi: 1035stop_rr_fcf_flogi:
1032 /* FLOGI failure */ 1036 /* FLOGI failure */
1033 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1037 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
1034 "2858 FLOGI failure Status:x%x/x%x TMO:x%x " 1038 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1035 "Data x%x x%x\n", 1039 IOERR_LOOP_OPEN_FAILURE)))
1036 irsp->ulpStatus, irsp->un.ulpWord[4], 1040 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1037 irsp->ulpTimeout, phba->hba_flag, 1041 "2858 FLOGI failure Status:x%x/x%x "
1038 phba->fcf.fcf_flag); 1042 "TMO:x%x Data x%x x%x\n",
1043 irsp->ulpStatus, irsp->un.ulpWord[4],
1044 irsp->ulpTimeout, phba->hba_flag,
1045 phba->fcf.fcf_flag);
1039 1046
1040 /* Check for retry */ 1047 /* Check for retry */
1041 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1048 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
1042 goto out; 1049 goto out;
1043 1050
1044 /* FLOGI failure */
1045 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1046 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
1047 irsp->ulpStatus, irsp->un.ulpWord[4],
1048 irsp->ulpTimeout);
1049
1050
1051 /* If this is not a loop open failure, bail out */ 1051 /* If this is not a loop open failure, bail out */
1052 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1052 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
1053 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1053 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1054 IOERR_LOOP_OPEN_FAILURE))) 1054 IOERR_LOOP_OPEN_FAILURE)))
1055 goto flogifail; 1055 goto flogifail;
1056 1056
1057 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
1058 "0150 FLOGI failure Status:x%x/x%x TMO:x%x\n",
1059 irsp->ulpStatus, irsp->un.ulpWord[4],
1060 irsp->ulpTimeout);
1061
1057 /* FLOGI failed, so there is no fabric */ 1062 /* FLOGI failed, so there is no fabric */
1058 spin_lock_irq(shost->host_lock); 1063 spin_lock_irq(shost->host_lock);
1059 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1064 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -1670,6 +1675,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1670 1675
1671 /* Two ndlps cannot have the same did on the nodelist */ 1676 /* Two ndlps cannot have the same did on the nodelist */
1672 ndlp->nlp_DID = keepDID; 1677 ndlp->nlp_DID = keepDID;
1678 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1673 if (phba->sli_rev == LPFC_SLI_REV4 && 1679 if (phba->sli_rev == LPFC_SLI_REV4 &&
1674 active_rrqs_xri_bitmap) 1680 active_rrqs_xri_bitmap)
1675 memcpy(ndlp->active_rrqs_xri_bitmap, 1681 memcpy(ndlp->active_rrqs_xri_bitmap,
@@ -2088,6 +2094,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2088 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2094 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2089 spin_lock_irq(shost->host_lock); 2095 spin_lock_irq(shost->host_lock);
2090 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2096 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2097
2098 /* Driver supports multiple FC4 types. Counters matter. */
2099 vport->fc_prli_sent--;
2100 ndlp->fc4_prli_sent--;
2091 spin_unlock_irq(shost->host_lock); 2101 spin_unlock_irq(shost->host_lock);
2092 2102
2093 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2103 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -2095,9 +2105,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2095 irsp->ulpStatus, irsp->un.ulpWord[4], 2105 irsp->ulpStatus, irsp->un.ulpWord[4],
2096 ndlp->nlp_DID); 2106 ndlp->nlp_DID);
2097 2107
2098 /* Ddriver supports multiple FC4 types. Counters matter. */
2099 vport->fc_prli_sent--;
2100
2101 /* PRLI completes to NPort <nlp_DID> */ 2108 /* PRLI completes to NPort <nlp_DID> */
2102 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2109 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2103 "0103 PRLI completes to NPort x%06x " 2110 "0103 PRLI completes to NPort x%06x "
@@ -2111,7 +2118,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2111 2118
2112 if (irsp->ulpStatus) { 2119 if (irsp->ulpStatus) {
2113 /* Check for retry */ 2120 /* Check for retry */
2114 ndlp->fc4_prli_sent--;
2115 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2121 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2116 /* ELS command is being retried */ 2122 /* ELS command is being retried */
2117 goto out; 2123 goto out;
@@ -2190,6 +2196,15 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2190 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2196 ndlp->nlp_fc4_type |= NLP_FC4_NVME;
2191 local_nlp_type = ndlp->nlp_fc4_type; 2197 local_nlp_type = ndlp->nlp_fc4_type;
2192 2198
2199 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp
2200 * fields here before any of them can complete.
2201 */
2202 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
2203 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
2204 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
2205 ndlp->nlp_flag &= ~NLP_FIRSTBURST;
2206 ndlp->nvme_fb_size = 0;
2207
2193 send_next_prli: 2208 send_next_prli:
2194 if (local_nlp_type & NLP_FC4_FCP) { 2209 if (local_nlp_type & NLP_FC4_FCP) {
2195 /* Payload is 4 + 16 = 20 x14 bytes. */ 2210 /* Payload is 4 + 16 = 20 x14 bytes. */
@@ -2298,6 +2313,13 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2298 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 2313 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
2299 spin_lock_irq(shost->host_lock); 2314 spin_lock_irq(shost->host_lock);
2300 ndlp->nlp_flag |= NLP_PRLI_SND; 2315 ndlp->nlp_flag |= NLP_PRLI_SND;
2316
2317 /* The vport counters are used for lpfc_scan_finished, but
2318 * the ndlp is used to track outstanding PRLIs for different
2319 * FC4 types.
2320 */
2321 vport->fc_prli_sent++;
2322 ndlp->fc4_prli_sent++;
2301 spin_unlock_irq(shost->host_lock); 2323 spin_unlock_irq(shost->host_lock);
2302 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2324 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2303 IOCB_ERROR) { 2325 IOCB_ERROR) {
@@ -2308,12 +2330,6 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2308 return 1; 2330 return 1;
2309 } 2331 }
2310 2332
2311 /* The vport counters are used for lpfc_scan_finished, but
2312 * the ndlp is used to track outstanding PRLIs for different
2313 * FC4 types.
2314 */
2315 vport->fc_prli_sent++;
2316 ndlp->fc4_prli_sent++;
2317 2333
2318 /* The driver supports 2 FC4 types. Make sure 2334 /* The driver supports 2 FC4 types. Make sure
2319 * a PRLI is issued for all types before exiting. 2335 * a PRLI is issued for all types before exiting.
@@ -2951,8 +2967,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2951 /* This will cause the callback-function lpfc_cmpl_els_cmd to 2967 /* This will cause the callback-function lpfc_cmpl_els_cmd to
2952 * trigger the release of node. 2968 * trigger the release of node.
2953 */ 2969 */
2954 2970 if (!(vport->fc_flag & FC_PT2PT))
2955 lpfc_nlp_put(ndlp); 2971 lpfc_nlp_put(ndlp);
2956 return 0; 2972 return 0;
2957} 2973}
2958 2974
@@ -6172,9 +6188,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6172 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6188 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6173 /* send RECOVERY event for ALL nodes that match RSCN payload */ 6189 /* send RECOVERY event for ALL nodes that match RSCN payload */
6174 lpfc_rscn_recovery_check(vport); 6190 lpfc_rscn_recovery_check(vport);
6175 spin_lock_irq(shost->host_lock);
6176 vport->fc_flag &= ~FC_RSCN_DEFERRED;
6177 spin_unlock_irq(shost->host_lock);
6178 return 0; 6191 return 0;
6179 } 6192 }
6180 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6193 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -6849,7 +6862,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6849 return 1; 6862 return 1;
6850 6863
6851 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6864 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6852 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6865 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6853 pcmd += sizeof(uint32_t); /* Skip past command */ 6866 pcmd += sizeof(uint32_t); /* Skip past command */
6854 6867
6855 /* use the command's xri in the response */ 6868 /* use the command's xri in the response */
@@ -8060,13 +8073,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8060 rjt_exp = LSEXP_NOTHING_MORE; 8073 rjt_exp = LSEXP_NOTHING_MORE;
8061 break; 8074 break;
8062 } 8075 }
8063
8064 /* NVMET accepts NVME PRLI only. Reject FCP PRLI */
8065 if (cmd == ELS_CMD_PRLI && phba->nvmet_support) {
8066 rjt_err = LSRJT_CMD_UNSUPPORTED;
8067 rjt_exp = LSEXP_REQ_UNSUPPORTED;
8068 break;
8069 }
8070 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 8076 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
8071 break; 8077 break;
8072 case ELS_CMD_LIRR: 8078 case ELS_CMD_LIRR:
@@ -8149,9 +8155,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8149 lpfc_nlp_put(ndlp); 8155 lpfc_nlp_put(ndlp);
8150 break; 8156 break;
8151 case ELS_CMD_REC: 8157 case ELS_CMD_REC:
8152 /* receive this due to exchange closed */ 8158 /* receive this due to exchange closed */
8153 rjt_err = LSRJT_UNABLE_TPC; 8159 rjt_err = LSRJT_UNABLE_TPC;
8154 rjt_exp = LSEXP_INVALID_OX_RX; 8160 rjt_exp = LSEXP_INVALID_OX_RX;
8155 break; 8161 break;
8156 default: 8162 default:
8157 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8163 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2bafde2b7cfe..b159a5c4e388 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -640,8 +640,6 @@ lpfc_work_done(struct lpfc_hba *phba)
640 lpfc_handle_rrq_active(phba); 640 lpfc_handle_rrq_active(phba);
641 if (phba->hba_flag & FCP_XRI_ABORT_EVENT) 641 if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
642 lpfc_sli4_fcp_xri_abort_event_proc(phba); 642 lpfc_sli4_fcp_xri_abort_event_proc(phba);
643 if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
644 lpfc_sli4_nvme_xri_abort_event_proc(phba);
645 if (phba->hba_flag & ELS_XRI_ABORT_EVENT) 643 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
646 lpfc_sli4_els_xri_abort_event_proc(phba); 644 lpfc_sli4_els_xri_abort_event_proc(phba);
647 if (phba->hba_flag & ASYNC_EVENT) 645 if (phba->hba_flag & ASYNC_EVENT)
@@ -4178,12 +4176,14 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4178 4176
4179 if (ndlp->nlp_fc4_type & NLP_FC4_NVME) { 4177 if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4180 vport->phba->nport_event_cnt++; 4178 vport->phba->nport_event_cnt++;
4181 if (vport->phba->nvmet_support == 0) 4179 if (vport->phba->nvmet_support == 0) {
4182 /* Start devloss */ 4180 /* Start devloss if target. */
4183 lpfc_nvme_unregister_port(vport, ndlp); 4181 if (ndlp->nlp_type & NLP_NVME_TARGET)
4184 else 4182 lpfc_nvme_unregister_port(vport, ndlp);
4183 } else {
4185 /* NVMET has no upcall. */ 4184 /* NVMET has no upcall. */
4186 lpfc_nlp_put(ndlp); 4185 lpfc_nlp_put(ndlp);
4186 }
4187 } 4187 }
4188 } 4188 }
4189 4189
@@ -4207,11 +4207,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4207 ndlp->nlp_fc4_type & NLP_FC4_NVME) { 4207 ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4208 if (vport->phba->nvmet_support == 0) { 4208 if (vport->phba->nvmet_support == 0) {
4209 /* Register this rport with the transport. 4209 /* Register this rport with the transport.
4210 * Initiators take the NDLP ref count in 4210 * Only NVME Target Rports are registered with
4211 * the register. 4211 * the transport.
4212 */ 4212 */
4213 vport->phba->nport_event_cnt++; 4213 if (ndlp->nlp_type & NLP_NVME_TARGET) {
4214 lpfc_nvme_register_port(vport, ndlp); 4214 vport->phba->nport_event_cnt++;
4215 lpfc_nvme_register_port(vport, ndlp);
4216 }
4215 } else { 4217 } else {
4216 /* Just take an NDLP ref count since the 4218 /* Just take an NDLP ref count since the
4217 * target does not register rports. 4219 * target does not register rports.
@@ -5838,9 +5840,12 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
5838 if (filter(ndlp, param)) { 5840 if (filter(ndlp, param)) {
5839 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 5841 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5840 "3185 FIND node filter %p DID " 5842 "3185 FIND node filter %p DID "
5841 "Data: x%p x%x x%x\n", 5843 "ndlp %p did x%x flg x%x st x%x "
5844 "xri x%x type x%x rpi x%x\n",
5842 filter, ndlp, ndlp->nlp_DID, 5845 filter, ndlp, ndlp->nlp_DID,
5843 ndlp->nlp_flag); 5846 ndlp->nlp_flag, ndlp->nlp_state,
5847 ndlp->nlp_xri, ndlp->nlp_type,
5848 ndlp->nlp_rpi);
5844 return ndlp; 5849 return ndlp;
5845 } 5850 }
5846 } 5851 }
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 2b145966c73f..73c2f6971d2b 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1122,6 +1122,7 @@ struct cq_context {
1122#define LPFC_CQ_CNT_256 0x0 1122#define LPFC_CQ_CNT_256 0x0
1123#define LPFC_CQ_CNT_512 0x1 1123#define LPFC_CQ_CNT_512 0x1
1124#define LPFC_CQ_CNT_1024 0x2 1124#define LPFC_CQ_CNT_1024 0x2
1125#define LPFC_CQ_CNT_WORD7 0x3
1125 uint32_t word1; 1126 uint32_t word1;
1126#define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */ 1127#define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */
1127#define lpfc_cq_eq_id_MASK 0x000000FF 1128#define lpfc_cq_eq_id_MASK 0x000000FF
@@ -1129,7 +1130,7 @@ struct cq_context {
1129#define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */ 1130#define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */
1130#define lpfc_cq_eq_id_2_MASK 0x0000FFFF 1131#define lpfc_cq_eq_id_2_MASK 0x0000FFFF
1131#define lpfc_cq_eq_id_2_WORD word1 1132#define lpfc_cq_eq_id_2_WORD word1
1132 uint32_t reserved0; 1133 uint32_t lpfc_cq_context_count; /* Version 2 Only */
1133 uint32_t reserved1; 1134 uint32_t reserved1;
1134}; 1135};
1135 1136
@@ -1193,6 +1194,9 @@ struct lpfc_mbx_cq_create_set {
1193#define lpfc_mbx_cq_create_set_arm_SHIFT 31 1194#define lpfc_mbx_cq_create_set_arm_SHIFT 31
1194#define lpfc_mbx_cq_create_set_arm_MASK 0x00000001 1195#define lpfc_mbx_cq_create_set_arm_MASK 0x00000001
1195#define lpfc_mbx_cq_create_set_arm_WORD word2 1196#define lpfc_mbx_cq_create_set_arm_WORD word2
1197#define lpfc_mbx_cq_create_set_cq_cnt_SHIFT 16
1198#define lpfc_mbx_cq_create_set_cq_cnt_MASK 0x00007FFF
1199#define lpfc_mbx_cq_create_set_cq_cnt_WORD word2
1196#define lpfc_mbx_cq_create_set_num_cq_SHIFT 0 1200#define lpfc_mbx_cq_create_set_num_cq_SHIFT 0
1197#define lpfc_mbx_cq_create_set_num_cq_MASK 0x0000FFFF 1201#define lpfc_mbx_cq_create_set_num_cq_MASK 0x0000FFFF
1198#define lpfc_mbx_cq_create_set_num_cq_WORD word2 1202#define lpfc_mbx_cq_create_set_num_cq_WORD word2
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2b7ea7e53e12..f539c554588c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1034,6 +1034,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1034 LIST_HEAD(nvmet_aborts); 1034 LIST_HEAD(nvmet_aborts);
1035 unsigned long iflag = 0; 1035 unsigned long iflag = 0;
1036 struct lpfc_sglq *sglq_entry = NULL; 1036 struct lpfc_sglq *sglq_entry = NULL;
1037 int cnt;
1037 1038
1038 1039
1039 lpfc_sli_hbqbuf_free_all(phba); 1040 lpfc_sli_hbqbuf_free_all(phba);
@@ -1090,11 +1091,14 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1090 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 1091 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1091 1092
1092 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 1093 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1094 cnt = 0;
1093 list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) { 1095 list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
1094 psb->pCmd = NULL; 1096 psb->pCmd = NULL;
1095 psb->status = IOSTAT_SUCCESS; 1097 psb->status = IOSTAT_SUCCESS;
1098 cnt++;
1096 } 1099 }
1097 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); 1100 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
1101 phba->put_nvme_bufs += cnt;
1098 list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put); 1102 list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
1099 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); 1103 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
1100 1104
@@ -3339,6 +3343,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
3339 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3343 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3340 &phba->lpfc_nvme_buf_list_put, list) { 3344 &phba->lpfc_nvme_buf_list_put, list) {
3341 list_del(&lpfc_ncmd->list); 3345 list_del(&lpfc_ncmd->list);
3346 phba->put_nvme_bufs--;
3342 dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, 3347 dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
3343 lpfc_ncmd->dma_handle); 3348 lpfc_ncmd->dma_handle);
3344 kfree(lpfc_ncmd); 3349 kfree(lpfc_ncmd);
@@ -3350,6 +3355,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
3350 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 3355 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3351 &phba->lpfc_nvme_buf_list_get, list) { 3356 &phba->lpfc_nvme_buf_list_get, list) {
3352 list_del(&lpfc_ncmd->list); 3357 list_del(&lpfc_ncmd->list);
3358 phba->get_nvme_bufs--;
3353 dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, 3359 dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
3354 lpfc_ncmd->dma_handle); 3360 lpfc_ncmd->dma_handle);
3355 kfree(lpfc_ncmd); 3361 kfree(lpfc_ncmd);
@@ -3754,9 +3760,11 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
3754 uint16_t i, lxri, els_xri_cnt; 3760 uint16_t i, lxri, els_xri_cnt;
3755 uint16_t nvme_xri_cnt, nvme_xri_max; 3761 uint16_t nvme_xri_cnt, nvme_xri_max;
3756 LIST_HEAD(nvme_sgl_list); 3762 LIST_HEAD(nvme_sgl_list);
3757 int rc; 3763 int rc, cnt;
3758 3764
3759 phba->total_nvme_bufs = 0; 3765 phba->total_nvme_bufs = 0;
3766 phba->get_nvme_bufs = 0;
3767 phba->put_nvme_bufs = 0;
3760 3768
3761 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 3769 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
3762 return 0; 3770 return 0;
@@ -3780,6 +3788,9 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
3780 spin_lock(&phba->nvme_buf_list_put_lock); 3788 spin_lock(&phba->nvme_buf_list_put_lock);
3781 list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list); 3789 list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
3782 list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list); 3790 list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
3791 cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
3792 phba->get_nvme_bufs = 0;
3793 phba->put_nvme_bufs = 0;
3783 spin_unlock(&phba->nvme_buf_list_put_lock); 3794 spin_unlock(&phba->nvme_buf_list_put_lock);
3784 spin_unlock_irq(&phba->nvme_buf_list_get_lock); 3795 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3785 3796
@@ -3824,6 +3835,7 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
3824 spin_lock_irq(&phba->nvme_buf_list_get_lock); 3835 spin_lock_irq(&phba->nvme_buf_list_get_lock);
3825 spin_lock(&phba->nvme_buf_list_put_lock); 3836 spin_lock(&phba->nvme_buf_list_put_lock);
3826 list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get); 3837 list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
3838 phba->get_nvme_bufs = cnt;
3827 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); 3839 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
3828 spin_unlock(&phba->nvme_buf_list_put_lock); 3840 spin_unlock(&phba->nvme_buf_list_put_lock);
3829 spin_unlock_irq(&phba->nvme_buf_list_get_lock); 3841 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -5609,8 +5621,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5609 /* Initialize the NVME buffer list used by driver for NVME IO */ 5621 /* Initialize the NVME buffer list used by driver for NVME IO */
5610 spin_lock_init(&phba->nvme_buf_list_get_lock); 5622 spin_lock_init(&phba->nvme_buf_list_get_lock);
5611 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get); 5623 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
5624 phba->get_nvme_bufs = 0;
5612 spin_lock_init(&phba->nvme_buf_list_put_lock); 5625 spin_lock_init(&phba->nvme_buf_list_put_lock);
5613 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); 5626 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
5627 phba->put_nvme_bufs = 0;
5614 } 5628 }
5615 5629
5616 /* Initialize the fabric iocb list */ 5630 /* Initialize the fabric iocb list */
@@ -5806,6 +5820,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5806 struct lpfc_mqe *mqe; 5820 struct lpfc_mqe *mqe;
5807 int longs; 5821 int longs;
5808 int fof_vectors = 0; 5822 int fof_vectors = 0;
5823 int extra;
5809 uint64_t wwn; 5824 uint64_t wwn;
5810 5825
5811 phba->sli4_hba.num_online_cpu = num_online_cpus(); 5826 phba->sli4_hba.num_online_cpu = num_online_cpus();
@@ -5860,13 +5875,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5860 */ 5875 */
5861 5876
5862 /* 5877 /*
5878 * 1 for cmd, 1 for rsp, NVME adds an extra one
5879 * for boundary conditions in its max_sgl_segment template.
5880 */
5881 extra = 2;
5882 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
5883 extra++;
5884
5885 /*
5863 * It doesn't matter what family our adapter is in, we are 5886 * It doesn't matter what family our adapter is in, we are
5864 * limited to 2 Pages, 512 SGEs, for our SGL. 5887 * limited to 2 Pages, 512 SGEs, for our SGL.
5865 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 5888 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
5866 */ 5889 */
5867 max_buf_size = (2 * SLI4_PAGE_SIZE); 5890 max_buf_size = (2 * SLI4_PAGE_SIZE);
5868 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) 5891 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
5869 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; 5892 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
5870 5893
5871 /* 5894 /*
5872 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 5895 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
@@ -5899,14 +5922,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5899 */ 5922 */
5900 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5923 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5901 sizeof(struct fcp_rsp) + 5924 sizeof(struct fcp_rsp) +
5902 ((phba->cfg_sg_seg_cnt + 2) * 5925 ((phba->cfg_sg_seg_cnt + extra) *
5903 sizeof(struct sli4_sge)); 5926 sizeof(struct sli4_sge));
5904 5927
5905 /* Total SGEs for scsi_sg_list */ 5928 /* Total SGEs for scsi_sg_list */
5906 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 5929 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
5907 5930
5908 /* 5931 /*
5909 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only 5932 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
5910 * need to post 1 page for the SGL. 5933 * need to post 1 page for the SGL.
5911 */ 5934 */
5912 } 5935 }
@@ -5947,9 +5970,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5947 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); 5970 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
5948 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 5971 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
5949 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); 5972 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
5950
5951 /* Fast-path XRI aborted CQ Event work queue list */
5952 INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
5953 } 5973 }
5954 5974
5955 /* This abort list used by worker thread */ 5975 /* This abort list used by worker thread */
@@ -7936,8 +7956,12 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
7936 phba->cfg_fcp_io_channel = io_channel; 7956 phba->cfg_fcp_io_channel = io_channel;
7937 if (phba->cfg_nvme_io_channel > io_channel) 7957 if (phba->cfg_nvme_io_channel > io_channel)
7938 phba->cfg_nvme_io_channel = io_channel; 7958 phba->cfg_nvme_io_channel = io_channel;
7939 if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq) 7959 if (phba->nvmet_support) {
7940 phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; 7960 if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
7961 phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
7962 }
7963 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
7964 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
7941 7965
7942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7943 "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n", 7967 "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
@@ -7958,10 +7982,10 @@ static int
7958lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) 7982lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
7959{ 7983{
7960 struct lpfc_queue *qdesc; 7984 struct lpfc_queue *qdesc;
7961 int cnt;
7962 7985
7963 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7986 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
7964 phba->sli4_hba.cq_ecount); 7987 phba->sli4_hba.cq_esize,
7988 LPFC_CQE_EXP_COUNT);
7965 if (!qdesc) { 7989 if (!qdesc) {
7966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7967 "0508 Failed allocate fast-path NVME CQ (%d)\n", 7991 "0508 Failed allocate fast-path NVME CQ (%d)\n",
@@ -7970,8 +7994,8 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
7970 } 7994 }
7971 phba->sli4_hba.nvme_cq[wqidx] = qdesc; 7995 phba->sli4_hba.nvme_cq[wqidx] = qdesc;
7972 7996
7973 cnt = LPFC_NVME_WQSIZE; 7997 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
7974 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt); 7998 LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
7975 if (!qdesc) { 7999 if (!qdesc) {
7976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8000 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7977 "0509 Failed allocate fast-path NVME WQ (%d)\n", 8001 "0509 Failed allocate fast-path NVME WQ (%d)\n",
@@ -7987,11 +8011,18 @@ static int
7987lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) 8011lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
7988{ 8012{
7989 struct lpfc_queue *qdesc; 8013 struct lpfc_queue *qdesc;
7990 uint32_t wqesize;
7991 8014
7992 /* Create Fast Path FCP CQs */ 8015 /* Create Fast Path FCP CQs */
7993 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 8016 if (phba->fcp_embed_io)
7994 phba->sli4_hba.cq_ecount); 8017 /* Increase the CQ size when WQEs contain an embedded cdb */
8018 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8019 phba->sli4_hba.cq_esize,
8020 LPFC_CQE_EXP_COUNT);
8021
8022 else
8023 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8024 phba->sli4_hba.cq_esize,
8025 phba->sli4_hba.cq_ecount);
7995 if (!qdesc) { 8026 if (!qdesc) {
7996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8027 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7997 "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx); 8028 "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
@@ -8000,9 +8031,15 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
8000 phba->sli4_hba.fcp_cq[wqidx] = qdesc; 8031 phba->sli4_hba.fcp_cq[wqidx] = qdesc;
8001 8032
8002 /* Create Fast Path FCP WQs */ 8033 /* Create Fast Path FCP WQs */
8003 wqesize = (phba->fcp_embed_io) ? 8034 if (phba->fcp_embed_io)
8004 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 8035 /* Increase the WQ size when WQEs contain an embedded cdb */
8005 qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount); 8036 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8037 LPFC_WQE128_SIZE,
8038 LPFC_WQE_EXP_COUNT);
8039 else
8040 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8041 phba->sli4_hba.wq_esize,
8042 phba->sli4_hba.wq_ecount);
8006 if (!qdesc) { 8043 if (!qdesc) {
8007 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8044 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8008 "0503 Failed allocate fast-path FCP WQ (%d)\n", 8045 "0503 Failed allocate fast-path FCP WQ (%d)\n",
@@ -8173,7 +8210,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8173 /* Create HBA Event Queues (EQs) */ 8210 /* Create HBA Event Queues (EQs) */
8174 for (idx = 0; idx < io_channel; idx++) { 8211 for (idx = 0; idx < io_channel; idx++) {
8175 /* Create EQs */ 8212 /* Create EQs */
8176 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 8213 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8214 phba->sli4_hba.eq_esize,
8177 phba->sli4_hba.eq_ecount); 8215 phba->sli4_hba.eq_ecount);
8178 if (!qdesc) { 8216 if (!qdesc) {
8179 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8217 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8196,8 +8234,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8196 if (phba->nvmet_support) { 8234 if (phba->nvmet_support) {
8197 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 8235 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
8198 qdesc = lpfc_sli4_queue_alloc(phba, 8236 qdesc = lpfc_sli4_queue_alloc(phba,
8199 phba->sli4_hba.cq_esize, 8237 LPFC_DEFAULT_PAGE_SIZE,
8200 phba->sli4_hba.cq_ecount); 8238 phba->sli4_hba.cq_esize,
8239 phba->sli4_hba.cq_ecount);
8201 if (!qdesc) { 8240 if (!qdesc) {
8202 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8203 "3142 Failed allocate NVME " 8242 "3142 Failed allocate NVME "
@@ -8213,7 +8252,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8213 */ 8252 */
8214 8253
8215 /* Create slow-path Mailbox Command Complete Queue */ 8254 /* Create slow-path Mailbox Command Complete Queue */
8216 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 8255 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8256 phba->sli4_hba.cq_esize,
8217 phba->sli4_hba.cq_ecount); 8257 phba->sli4_hba.cq_ecount);
8218 if (!qdesc) { 8258 if (!qdesc) {
8219 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8259 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8223,7 +8263,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8223 phba->sli4_hba.mbx_cq = qdesc; 8263 phba->sli4_hba.mbx_cq = qdesc;
8224 8264
8225 /* Create slow-path ELS Complete Queue */ 8265 /* Create slow-path ELS Complete Queue */
8226 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 8266 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8267 phba->sli4_hba.cq_esize,
8227 phba->sli4_hba.cq_ecount); 8268 phba->sli4_hba.cq_ecount);
8228 if (!qdesc) { 8269 if (!qdesc) {
8229 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8270 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8239,7 +8280,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8239 8280
8240 /* Create Mailbox Command Queue */ 8281 /* Create Mailbox Command Queue */
8241 8282
8242 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 8283 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8284 phba->sli4_hba.mq_esize,
8243 phba->sli4_hba.mq_ecount); 8285 phba->sli4_hba.mq_ecount);
8244 if (!qdesc) { 8286 if (!qdesc) {
8245 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8287 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8253,7 +8295,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8253 */ 8295 */
8254 8296
8255 /* Create slow-path ELS Work Queue */ 8297 /* Create slow-path ELS Work Queue */
8256 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 8298 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8299 phba->sli4_hba.wq_esize,
8257 phba->sli4_hba.wq_ecount); 8300 phba->sli4_hba.wq_ecount);
8258 if (!qdesc) { 8301 if (!qdesc) {
8259 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8302 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8265,7 +8308,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8265 8308
8266 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8309 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8267 /* Create NVME LS Complete Queue */ 8310 /* Create NVME LS Complete Queue */
8268 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 8311 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8312 phba->sli4_hba.cq_esize,
8269 phba->sli4_hba.cq_ecount); 8313 phba->sli4_hba.cq_ecount);
8270 if (!qdesc) { 8314 if (!qdesc) {
8271 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8315 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8275,7 +8319,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8275 phba->sli4_hba.nvmels_cq = qdesc; 8319 phba->sli4_hba.nvmels_cq = qdesc;
8276 8320
8277 /* Create NVME LS Work Queue */ 8321 /* Create NVME LS Work Queue */
8278 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 8322 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8323 phba->sli4_hba.wq_esize,
8279 phba->sli4_hba.wq_ecount); 8324 phba->sli4_hba.wq_ecount);
8280 if (!qdesc) { 8325 if (!qdesc) {
8281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8291,7 +8336,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8291 */ 8336 */
8292 8337
8293 /* Create Receive Queue for header */ 8338 /* Create Receive Queue for header */
8294 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 8339 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8340 phba->sli4_hba.rq_esize,
8295 phba->sli4_hba.rq_ecount); 8341 phba->sli4_hba.rq_ecount);
8296 if (!qdesc) { 8342 if (!qdesc) {
8297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8301,7 +8347,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8301 phba->sli4_hba.hdr_rq = qdesc; 8347 phba->sli4_hba.hdr_rq = qdesc;
8302 8348
8303 /* Create Receive Queue for data */ 8349 /* Create Receive Queue for data */
8304 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 8350 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
8351 phba->sli4_hba.rq_esize,
8305 phba->sli4_hba.rq_ecount); 8352 phba->sli4_hba.rq_ecount);
8306 if (!qdesc) { 8353 if (!qdesc) {
8307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8314,6 +8361,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8314 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { 8361 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
8315 /* Create NVMET Receive Queue for header */ 8362 /* Create NVMET Receive Queue for header */
8316 qdesc = lpfc_sli4_queue_alloc(phba, 8363 qdesc = lpfc_sli4_queue_alloc(phba,
8364 LPFC_DEFAULT_PAGE_SIZE,
8317 phba->sli4_hba.rq_esize, 8365 phba->sli4_hba.rq_esize,
8318 LPFC_NVMET_RQE_DEF_COUNT); 8366 LPFC_NVMET_RQE_DEF_COUNT);
8319 if (!qdesc) { 8367 if (!qdesc) {
@@ -8339,6 +8387,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8339 8387
8340 /* Create NVMET Receive Queue for data */ 8388 /* Create NVMET Receive Queue for data */
8341 qdesc = lpfc_sli4_queue_alloc(phba, 8389 qdesc = lpfc_sli4_queue_alloc(phba,
8390 LPFC_DEFAULT_PAGE_SIZE,
8342 phba->sli4_hba.rq_esize, 8391 phba->sli4_hba.rq_esize,
8343 LPFC_NVMET_RQE_DEF_COUNT); 8392 LPFC_NVMET_RQE_DEF_COUNT);
8344 if (!qdesc) { 8393 if (!qdesc) {
@@ -8437,13 +8486,15 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
8437 /* Release NVME CQ mapping array */ 8486 /* Release NVME CQ mapping array */
8438 lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map); 8487 lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
8439 8488
8440 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, 8489 if (phba->nvmet_support) {
8441 phba->cfg_nvmet_mrq); 8490 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
8491 phba->cfg_nvmet_mrq);
8442 8492
8443 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, 8493 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
8444 phba->cfg_nvmet_mrq); 8494 phba->cfg_nvmet_mrq);
8445 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, 8495 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
8446 phba->cfg_nvmet_mrq); 8496 phba->cfg_nvmet_mrq);
8497 }
8447 8498
8448 /* Release mailbox command work queue */ 8499 /* Release mailbox command work queue */
8449 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); 8500 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
@@ -8514,6 +8565,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
8514 qidx, (uint32_t)rc); 8565 qidx, (uint32_t)rc);
8515 return rc; 8566 return rc;
8516 } 8567 }
8568 cq->chann = qidx;
8517 8569
8518 if (qtype != LPFC_MBOX) { 8570 if (qtype != LPFC_MBOX) {
8519 /* Setup nvme_cq_map for fast lookup */ 8571 /* Setup nvme_cq_map for fast lookup */
@@ -8533,6 +8585,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
8533 /* no need to tear down cq - caller will do so */ 8585 /* no need to tear down cq - caller will do so */
8534 return rc; 8586 return rc;
8535 } 8587 }
8588 wq->chann = qidx;
8536 8589
8537 /* Bind this CQ/WQ to the NVME ring */ 8590 /* Bind this CQ/WQ to the NVME ring */
8538 pring = wq->pring; 8591 pring = wq->pring;
@@ -8773,6 +8826,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
8773 "rc = 0x%x\n", (uint32_t)rc); 8826 "rc = 0x%x\n", (uint32_t)rc);
8774 goto out_destroy; 8827 goto out_destroy;
8775 } 8828 }
8829 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
8830
8776 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8831 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8777 "6090 NVMET CQ setup: cq-id=%d, " 8832 "6090 NVMET CQ setup: cq-id=%d, "
8778 "parent eq-id=%d\n", 8833 "parent eq-id=%d\n",
@@ -8994,19 +9049,22 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
8994 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) 9049 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
8995 lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]); 9050 lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
8996 9051
8997 /* Unset NVMET MRQ queue */ 9052 if (phba->nvmet_support) {
8998 if (phba->sli4_hba.nvmet_mrq_hdr) { 9053 /* Unset NVMET MRQ queue */
8999 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9054 if (phba->sli4_hba.nvmet_mrq_hdr) {
9000 lpfc_rq_destroy(phba, 9055 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9056 lpfc_rq_destroy(
9057 phba,
9001 phba->sli4_hba.nvmet_mrq_hdr[qidx], 9058 phba->sli4_hba.nvmet_mrq_hdr[qidx],
9002 phba->sli4_hba.nvmet_mrq_data[qidx]); 9059 phba->sli4_hba.nvmet_mrq_data[qidx]);
9003 } 9060 }
9004 9061
9005 /* Unset NVMET CQ Set complete queue */ 9062 /* Unset NVMET CQ Set complete queue */
9006 if (phba->sli4_hba.nvmet_cqset) { 9063 if (phba->sli4_hba.nvmet_cqset) {
9007 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) 9064 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
9008 lpfc_cq_destroy(phba, 9065 lpfc_cq_destroy(
9009 phba->sli4_hba.nvmet_cqset[qidx]); 9066 phba, phba->sli4_hba.nvmet_cqset[qidx]);
9067 }
9010 } 9068 }
9011 9069
9012 /* Unset FCP response complete queue */ 9070 /* Unset FCP response complete queue */
@@ -9175,11 +9233,6 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
9175 /* Pending ELS XRI abort events */ 9233 /* Pending ELS XRI abort events */
9176 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 9234 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
9177 &cqelist); 9235 &cqelist);
9178 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9179 /* Pending NVME XRI abort events */
9180 list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
9181 &cqelist);
9182 }
9183 /* Pending asynnc events */ 9236 /* Pending asynnc events */
9184 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 9237 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
9185 &cqelist); 9238 &cqelist);
@@ -9421,44 +9474,62 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
9421 lpfc_sli4_bar0_register_memmap(phba, if_type); 9474 lpfc_sli4_bar0_register_memmap(phba, if_type);
9422 } 9475 }
9423 9476
9424 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 9477 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
9425 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 9478 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
9426 /* 9479 /*
9427 * Map SLI4 if type 0 HBA Control Register base to a kernel 9480 * Map SLI4 if type 0 HBA Control Register base to a
9428 * virtual address and setup the registers. 9481 * kernel virtual address and setup the registers.
9429 */ 9482 */
9430 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 9483 phba->pci_bar1_map = pci_resource_start(pdev,
9431 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 9484 PCI_64BIT_BAR2);
9432 phba->sli4_hba.ctrl_regs_memmap_p = 9485 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
9433 ioremap(phba->pci_bar1_map, bar1map_len); 9486 phba->sli4_hba.ctrl_regs_memmap_p =
9434 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 9487 ioremap(phba->pci_bar1_map,
9435 dev_printk(KERN_ERR, &pdev->dev, 9488 bar1map_len);
9436 "ioremap failed for SLI4 HBA control registers.\n"); 9489 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
9490 dev_err(&pdev->dev,
9491 "ioremap failed for SLI4 HBA "
9492 "control registers.\n");
9493 error = -ENOMEM;
9494 goto out_iounmap_conf;
9495 }
9496 phba->pci_bar2_memmap_p =
9497 phba->sli4_hba.ctrl_regs_memmap_p;
9498 lpfc_sli4_bar1_register_memmap(phba);
9499 } else {
9500 error = -ENOMEM;
9437 goto out_iounmap_conf; 9501 goto out_iounmap_conf;
9438 } 9502 }
9439 phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
9440 lpfc_sli4_bar1_register_memmap(phba);
9441 } 9503 }
9442 9504
9443 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 9505 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
9444 (pci_resource_start(pdev, PCI_64BIT_BAR4))) { 9506 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
9445 /* 9507 /*
9446 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 9508 * Map SLI4 if type 0 HBA Doorbell Register base to
9447 * virtual address and setup the registers. 9509 * a kernel virtual address and setup the registers.
9448 */ 9510 */
9449 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 9511 phba->pci_bar2_map = pci_resource_start(pdev,
9450 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 9512 PCI_64BIT_BAR4);
9451 phba->sli4_hba.drbl_regs_memmap_p = 9513 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
9452 ioremap(phba->pci_bar2_map, bar2map_len); 9514 phba->sli4_hba.drbl_regs_memmap_p =
9453 if (!phba->sli4_hba.drbl_regs_memmap_p) { 9515 ioremap(phba->pci_bar2_map,
9454 dev_printk(KERN_ERR, &pdev->dev, 9516 bar2map_len);
9455 "ioremap failed for SLI4 HBA doorbell registers.\n"); 9517 if (!phba->sli4_hba.drbl_regs_memmap_p) {
9456 goto out_iounmap_ctrl; 9518 dev_err(&pdev->dev,
9457 } 9519 "ioremap failed for SLI4 HBA"
9458 phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 9520 " doorbell registers.\n");
9459 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 9521 error = -ENOMEM;
9460 if (error) 9522 goto out_iounmap_ctrl;
9523 }
9524 phba->pci_bar4_memmap_p =
9525 phba->sli4_hba.drbl_regs_memmap_p;
9526 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
9527 if (error)
9528 goto out_iounmap_all;
9529 } else {
9530 error = -ENOMEM;
9461 goto out_iounmap_all; 9531 goto out_iounmap_all;
9532 }
9462 } 9533 }
9463 9534
9464 return 0; 9535 return 0;
@@ -10093,6 +10164,16 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
10093 int fcp_xri_cmpl = 1; 10164 int fcp_xri_cmpl = 1;
10094 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 10165 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
10095 10166
10167 /* Driver just aborted IOs during the hba_unset process. Pause
10168 * here to give the HBA time to complete the IO and get entries
10169 * into the abts lists.
10170 */
10171 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
10172
10173 /* Wait for NVME pending IO to flush back to transport. */
10174 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
10175 lpfc_nvme_wait_for_io_drain(phba);
10176
10096 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) 10177 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
10097 fcp_xri_cmpl = 10178 fcp_xri_cmpl =
10098 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 10179 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
@@ -10369,7 +10450,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
10369 !phba->nvme_support) { 10450 !phba->nvme_support) {
10370 phba->nvme_support = 0; 10451 phba->nvme_support = 0;
10371 phba->nvmet_support = 0; 10452 phba->nvmet_support = 0;
10372 phba->cfg_nvmet_mrq = 0; 10453 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
10373 phba->cfg_nvme_io_channel = 0; 10454 phba->cfg_nvme_io_channel = 0;
10374 phba->io_channel_irqs = phba->cfg_fcp_io_channel; 10455 phba->io_channel_irqs = phba->cfg_fcp_io_channel;
10375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, 10456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
@@ -11616,6 +11697,10 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
11616 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 11697 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
11617 lpfc_sli_flush_fcp_rings(phba); 11698 lpfc_sli_flush_fcp_rings(phba);
11618 11699
11700 /* Flush the outstanding NVME IOs if fc4 type enabled. */
11701 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11702 lpfc_sli_flush_nvme_rings(phba);
11703
11619 /* stop all timers */ 11704 /* stop all timers */
11620 lpfc_stop_hba_timers(phba); 11705 lpfc_stop_hba_timers(phba);
11621 11706
@@ -11647,6 +11732,10 @@ lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
11647 11732
11648 /* Clean up all driver's outstanding SCSI I/Os */ 11733 /* Clean up all driver's outstanding SCSI I/Os */
11649 lpfc_sli_flush_fcp_rings(phba); 11734 lpfc_sli_flush_fcp_rings(phba);
11735
11736 /* Flush the outstanding NVME IOs if fc4 type enabled. */
11737 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11738 lpfc_sli_flush_nvme_rings(phba);
11650} 11739}
11651 11740
11652/** 11741/**
@@ -12138,10 +12227,10 @@ int
12138lpfc_fof_queue_create(struct lpfc_hba *phba) 12227lpfc_fof_queue_create(struct lpfc_hba *phba)
12139{ 12228{
12140 struct lpfc_queue *qdesc; 12229 struct lpfc_queue *qdesc;
12141 uint32_t wqesize;
12142 12230
12143 /* Create FOF EQ */ 12231 /* Create FOF EQ */
12144 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 12232 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
12233 phba->sli4_hba.eq_esize,
12145 phba->sli4_hba.eq_ecount); 12234 phba->sli4_hba.eq_ecount);
12146 if (!qdesc) 12235 if (!qdesc)
12147 goto out_error; 12236 goto out_error;
@@ -12151,7 +12240,15 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
12151 if (phba->cfg_fof) { 12240 if (phba->cfg_fof) {
12152 12241
12153 /* Create OAS CQ */ 12242 /* Create OAS CQ */
12154 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 12243 if (phba->fcp_embed_io)
12244 qdesc = lpfc_sli4_queue_alloc(phba,
12245 LPFC_EXPANDED_PAGE_SIZE,
12246 phba->sli4_hba.cq_esize,
12247 LPFC_CQE_EXP_COUNT);
12248 else
12249 qdesc = lpfc_sli4_queue_alloc(phba,
12250 LPFC_DEFAULT_PAGE_SIZE,
12251 phba->sli4_hba.cq_esize,
12155 phba->sli4_hba.cq_ecount); 12252 phba->sli4_hba.cq_ecount);
12156 if (!qdesc) 12253 if (!qdesc)
12157 goto out_error; 12254 goto out_error;
@@ -12159,11 +12256,16 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
12159 phba->sli4_hba.oas_cq = qdesc; 12256 phba->sli4_hba.oas_cq = qdesc;
12160 12257
12161 /* Create OAS WQ */ 12258 /* Create OAS WQ */
12162 wqesize = (phba->fcp_embed_io) ? 12259 if (phba->fcp_embed_io)
12163 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 12260 qdesc = lpfc_sli4_queue_alloc(phba,
12164 qdesc = lpfc_sli4_queue_alloc(phba, wqesize, 12261 LPFC_EXPANDED_PAGE_SIZE,
12165 phba->sli4_hba.wq_ecount); 12262 LPFC_WQE128_SIZE,
12166 12263 LPFC_WQE_EXP_COUNT);
12264 else
12265 qdesc = lpfc_sli4_queue_alloc(phba,
12266 LPFC_DEFAULT_PAGE_SIZE,
12267 phba->sli4_hba.wq_esize,
12268 phba->sli4_hba.wq_ecount);
12167 if (!qdesc) 12269 if (!qdesc)
12168 goto out_error; 12270 goto out_error;
12169 12271
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b6957d944b9a..d841aa42f607 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -390,6 +390,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
390 break; 390 break;
391 } 391 }
392 392
393 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
394 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
395 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
396 ndlp->nlp_flag &= ~NLP_FIRSTBURST;
397
393 /* Check for Nport to NPort pt2pt protocol */ 398 /* Check for Nport to NPort pt2pt protocol */
394 if ((vport->fc_flag & FC_PT2PT) && 399 if ((vport->fc_flag & FC_PT2PT) &&
395 !(vport->fc_flag & FC_PT2PT_PLOGI)) { 400 !(vport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -727,6 +732,41 @@ out:
727 return 0; 732 return 0;
728} 733}
729 734
735static uint32_t
736lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
737 struct lpfc_nodelist *ndlp,
738 struct lpfc_iocbq *cmdiocb)
739{
740 struct ls_rjt stat;
741 uint32_t *payload;
742 uint32_t cmd;
743
744 payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
745 cmd = *payload;
746 if (vport->phba->nvmet_support) {
747 /* Must be a NVME PRLI */
748 if (cmd == ELS_CMD_PRLI)
749 goto out;
750 } else {
751 /* Initiator mode. */
752 if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI))
753 goto out;
754 }
755 return 1;
756out:
757 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
758 "6115 Rcv PRLI (%x) check failed: ndlp rpi %d "
759 "state x%x flags x%x\n",
760 cmd, ndlp->nlp_rpi, ndlp->nlp_state,
761 ndlp->nlp_flag);
762 memset(&stat, 0, sizeof(struct ls_rjt));
763 stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED;
764 stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED;
765 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
766 ndlp, NULL);
767 return 0;
768}
769
730static void 770static void
731lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 771lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
732 struct lpfc_iocbq *cmdiocb) 772 struct lpfc_iocbq *cmdiocb)
@@ -742,9 +782,6 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
742 lp = (uint32_t *) pcmd->virt; 782 lp = (uint32_t *) pcmd->virt;
743 npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t)); 783 npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
744 784
745 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
746 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
747 ndlp->nlp_flag &= ~NLP_FIRSTBURST;
748 if ((npr->prliType == PRLI_FCP_TYPE) || 785 if ((npr->prliType == PRLI_FCP_TYPE) ||
749 (npr->prliType == PRLI_NVME_TYPE)) { 786 (npr->prliType == PRLI_NVME_TYPE)) {
750 if (npr->initiatorFunc) { 787 if (npr->initiatorFunc) {
@@ -769,8 +806,12 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
769 * type. Target mode does not issue gft_id so doesn't get 806 * type. Target mode does not issue gft_id so doesn't get
770 * the fc4 type set until now. 807 * the fc4 type set until now.
771 */ 808 */
772 if ((phba->nvmet_support) && (npr->prliType == PRLI_NVME_TYPE)) 809 if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) {
773 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 810 ndlp->nlp_fc4_type |= NLP_FC4_NVME;
811 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
812 }
813 if (npr->prliType == PRLI_FCP_TYPE)
814 ndlp->nlp_fc4_type |= NLP_FC4_FCP;
774 } 815 }
775 if (rport) { 816 if (rport) {
776 /* We need to update the rport role values */ 817 /* We need to update the rport role values */
@@ -1373,7 +1414,8 @@ lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1373{ 1414{
1374 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1415 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1375 1416
1376 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1417 if (lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
1418 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1377 return ndlp->nlp_state; 1419 return ndlp->nlp_state;
1378} 1420}
1379 1421
@@ -1544,6 +1586,9 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1544 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1586 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1545 struct ls_rjt stat; 1587 struct ls_rjt stat;
1546 1588
1589 if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) {
1590 return ndlp->nlp_state;
1591 }
1547 if (vport->phba->nvmet_support) { 1592 if (vport->phba->nvmet_support) {
1548 /* NVME Target mode. Handle and respond to the PRLI and 1593 /* NVME Target mode. Handle and respond to the PRLI and
1549 * transition to UNMAPPED provided the RPI has completed 1594 * transition to UNMAPPED provided the RPI has completed
@@ -1552,28 +1597,22 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1552 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 1597 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
1553 lpfc_rcv_prli(vport, ndlp, cmdiocb); 1598 lpfc_rcv_prli(vport, ndlp, cmdiocb);
1554 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1599 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1555 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1556 } else { 1600 } else {
1557 /* RPI registration has not completed. Reject the PRLI 1601 /* RPI registration has not completed. Reject the PRLI
1558 * to prevent an illegal state transition when the 1602 * to prevent an illegal state transition when the
1559 * rpi registration does complete. 1603 * rpi registration does complete.
1560 */ 1604 */
1561 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
1562 "6115 NVMET ndlp rpi %d state "
1563 "unknown, state x%x flags x%08x\n",
1564 ndlp->nlp_rpi, ndlp->nlp_state,
1565 ndlp->nlp_flag);
1566 memset(&stat, 0, sizeof(struct ls_rjt)); 1605 memset(&stat, 0, sizeof(struct ls_rjt));
1567 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 1606 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
1568 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 1607 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1569 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, 1608 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
1570 ndlp, NULL); 1609 ndlp, NULL);
1610 return ndlp->nlp_state;
1571 } 1611 }
1572 } else { 1612 } else {
1573 /* Initiator mode. */ 1613 /* Initiator mode. */
1574 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1614 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1575 } 1615 }
1576
1577 return ndlp->nlp_state; 1616 return ndlp->nlp_state;
1578} 1617}
1579 1618
@@ -1819,6 +1858,8 @@ lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1819{ 1858{
1820 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 1859 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1821 1860
1861 if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
1862 return ndlp->nlp_state;
1822 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 1863 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1823 return ndlp->nlp_state; 1864 return ndlp->nlp_state;
1824} 1865}
@@ -1922,13 +1963,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1922 return ndlp->nlp_state; 1963 return ndlp->nlp_state;
1923 } 1964 }
1924 1965
1925 /* Check out PRLI rsp */
1926 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1927 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1928
1929 /* NVME or FCP first burst must be negotiated for each PRLI. */
1930 ndlp->nlp_flag &= ~NLP_FIRSTBURST;
1931 ndlp->nvme_fb_size = 0;
1932 if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) && 1966 if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1933 (npr->prliType == PRLI_FCP_TYPE)) { 1967 (npr->prliType == PRLI_FCP_TYPE)) {
1934 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 1968 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
@@ -1945,8 +1979,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1945 if (npr->Retry) 1979 if (npr->Retry)
1946 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 1980 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1947 1981
1948 /* PRLI completed. Decrement count. */
1949 ndlp->fc4_prli_sent--;
1950 } else if (nvpr && 1982 } else if (nvpr &&
1951 (bf_get_be32(prli_acc_rsp_code, nvpr) == 1983 (bf_get_be32(prli_acc_rsp_code, nvpr) ==
1952 PRLI_REQ_EXECUTED) && 1984 PRLI_REQ_EXECUTED) &&
@@ -1991,8 +2023,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1991 be32_to_cpu(nvpr->word5), 2023 be32_to_cpu(nvpr->word5),
1992 ndlp->nlp_flag, ndlp->nlp_fcp_info, 2024 ndlp->nlp_flag, ndlp->nlp_fcp_info,
1993 ndlp->nlp_type); 2025 ndlp->nlp_type);
1994 /* PRLI completed. Decrement count. */
1995 ndlp->fc4_prli_sent--;
1996 } 2026 }
1997 if (!(ndlp->nlp_type & NLP_FCP_TARGET) && 2027 if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
1998 (vport->port_type == LPFC_NPIV_PORT) && 2028 (vport->port_type == LPFC_NPIV_PORT) &&
@@ -2016,7 +2046,8 @@ out_err:
2016 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 2046 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
2017 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) 2047 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
2018 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 2048 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
2019 else 2049 else if (ndlp->nlp_type &
2050 (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR))
2020 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2051 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2021 } else 2052 } else
2022 lpfc_printf_vlog(vport, 2053 lpfc_printf_vlog(vport,
@@ -2241,6 +2272,9 @@ lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2241{ 2272{
2242 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2273 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2243 2274
2275 if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
2276 return ndlp->nlp_state;
2277
2244 lpfc_rcv_prli(vport, ndlp, cmdiocb); 2278 lpfc_rcv_prli(vport, ndlp, cmdiocb);
2245 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 2279 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
2246 return ndlp->nlp_state; 2280 return ndlp->nlp_state;
@@ -2310,6 +2344,8 @@ lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2310{ 2344{
2311 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 2345 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2312 2346
2347 if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
2348 return ndlp->nlp_state;
2313 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); 2349 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
2314 return ndlp->nlp_state; 2350 return ndlp->nlp_state;
2315} 2351}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 517ae570e507..81e3a4f10c3c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -57,11 +57,13 @@
57/* NVME initiator-based functions */ 57/* NVME initiator-based functions */
58 58
59static struct lpfc_nvme_buf * 59static struct lpfc_nvme_buf *
60lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp); 60lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
61 int expedite);
61 62
62static void 63static void
63lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *); 64lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
64 65
66static struct nvme_fc_port_template lpfc_nvme_template;
65 67
66/** 68/**
67 * lpfc_nvme_create_queue - 69 * lpfc_nvme_create_queue -
@@ -88,6 +90,9 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
88 struct lpfc_nvme_qhandle *qhandle; 90 struct lpfc_nvme_qhandle *qhandle;
89 char *str; 91 char *str;
90 92
93 if (!pnvme_lport->private)
94 return -ENOMEM;
95
91 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 96 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
92 vport = lport->vport; 97 vport = lport->vport;
93 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL); 98 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
@@ -140,6 +145,9 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
140 struct lpfc_nvme_lport *lport; 145 struct lpfc_nvme_lport *lport;
141 struct lpfc_vport *vport; 146 struct lpfc_vport *vport;
142 147
148 if (!pnvme_lport->private)
149 return;
150
143 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 151 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
144 vport = lport->vport; 152 vport = lport->vport;
145 153
@@ -154,6 +162,10 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
154{ 162{
155 struct lpfc_nvme_lport *lport = localport->private; 163 struct lpfc_nvme_lport *lport = localport->private;
156 164
165 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
166 "6173 localport %p delete complete\n",
167 lport);
168
157 /* release any threads waiting for the unreg to complete */ 169 /* release any threads waiting for the unreg to complete */
158 complete(&lport->lport_unreg_done); 170 complete(&lport->lport_unreg_done);
159} 171}
@@ -189,16 +201,19 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
189 * calling state machine to remove the node. 201 * calling state machine to remove the node.
190 */ 202 */
191 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 203 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
192 "6146 remoteport delete complete %p\n", 204 "6146 remoteport delete of remoteport %p\n",
193 remoteport); 205 remoteport);
206 spin_lock_irq(&vport->phba->hbalock);
194 ndlp->nrport = NULL; 207 ndlp->nrport = NULL;
208 spin_unlock_irq(&vport->phba->hbalock);
209
210 /* Remove original register reference. The host transport
211 * won't reference this rport/remoteport any further.
212 */
195 lpfc_nlp_put(ndlp); 213 lpfc_nlp_put(ndlp);
196 214
197 rport_err: 215 rport_err:
198 /* This call has to execute as long as the rport is valid. 216 return;
199 * Release any threads waiting for the unreg to complete.
200 */
201 complete(&rport->rport_unreg_done);
202} 217}
203 218
204static void 219static void
@@ -206,6 +221,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
206 struct lpfc_wcqe_complete *wcqe) 221 struct lpfc_wcqe_complete *wcqe)
207{ 222{
208 struct lpfc_vport *vport = cmdwqe->vport; 223 struct lpfc_vport *vport = cmdwqe->vport;
224 struct lpfc_nvme_lport *lport;
209 uint32_t status; 225 uint32_t status;
210 struct nvmefc_ls_req *pnvme_lsreq; 226 struct nvmefc_ls_req *pnvme_lsreq;
211 struct lpfc_dmabuf *buf_ptr; 227 struct lpfc_dmabuf *buf_ptr;
@@ -215,6 +231,13 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
215 231
216 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2; 232 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
217 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; 233 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
234 if (status) {
235 lport = (struct lpfc_nvme_lport *)vport->localport->private;
236 if (bf_get(lpfc_wcqe_c_xb, wcqe))
237 atomic_inc(&lport->cmpl_ls_xb);
238 atomic_inc(&lport->cmpl_ls_err);
239 }
240
218 ndlp = (struct lpfc_nodelist *)cmdwqe->context1; 241 ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
219 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 242 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
220 "6047 nvme cmpl Enter " 243 "6047 nvme cmpl Enter "
@@ -419,6 +442,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
419 if (vport->load_flag & FC_UNLOADING) 442 if (vport->load_flag & FC_UNLOADING)
420 return -ENODEV; 443 return -ENODEV;
421 444
445 if (vport->load_flag & FC_UNLOADING)
446 return -ENODEV;
447
422 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); 448 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
423 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 449 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
424 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, 450 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
@@ -490,6 +516,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
490 pnvme_lsreq, lpfc_nvme_cmpl_gen_req, 516 pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
491 ndlp, 2, 30, 0); 517 ndlp, 2, 30, 0);
492 if (ret != WQE_SUCCESS) { 518 if (ret != WQE_SUCCESS) {
519 atomic_inc(&lport->xmt_ls_err);
493 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 520 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
494 "6052 EXIT. issue ls wqe failed lport %p, " 521 "6052 EXIT. issue ls wqe failed lport %p, "
495 "rport %p lsreq%p Status %x DID %x\n", 522 "rport %p lsreq%p Status %x DID %x\n",
@@ -534,6 +561,9 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
534 vport = lport->vport; 561 vport = lport->vport;
535 phba = vport->phba; 562 phba = vport->phba;
536 563
564 if (vport->load_flag & FC_UNLOADING)
565 return;
566
537 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); 567 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
538 if (!ndlp) { 568 if (!ndlp) {
539 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 569 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
@@ -571,6 +601,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
571 601
572 /* Abort the targeted IOs and remove them from the abort list. */ 602 /* Abort the targeted IOs and remove them from the abort list. */
573 list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) { 603 list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
604 atomic_inc(&lport->xmt_ls_abort);
574 spin_lock_irq(&phba->hbalock); 605 spin_lock_irq(&phba->hbalock);
575 list_del_init(&wqe->dlist); 606 list_del_init(&wqe->dlist);
576 lpfc_sli_issue_abort_iotag(phba, pring, wqe); 607 lpfc_sli_issue_abort_iotag(phba, pring, wqe);
@@ -774,8 +805,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
774 struct lpfc_nvme_rport *rport; 805 struct lpfc_nvme_rport *rport;
775 struct lpfc_nodelist *ndlp; 806 struct lpfc_nodelist *ndlp;
776 struct lpfc_nvme_fcpreq_priv *freqpriv; 807 struct lpfc_nvme_fcpreq_priv *freqpriv;
808 struct lpfc_nvme_lport *lport;
777 unsigned long flags; 809 unsigned long flags;
778 uint32_t code; 810 uint32_t code, status;
779 uint16_t cid, sqhd, data; 811 uint16_t cid, sqhd, data;
780 uint32_t *ptr; 812 uint32_t *ptr;
781 813
@@ -790,10 +822,17 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
790 822
791 nCmd = lpfc_ncmd->nvmeCmd; 823 nCmd = lpfc_ncmd->nvmeCmd;
792 rport = lpfc_ncmd->nrport; 824 rport = lpfc_ncmd->nrport;
825 status = bf_get(lpfc_wcqe_c_status, wcqe);
826 if (status) {
827 lport = (struct lpfc_nvme_lport *)vport->localport->private;
828 if (bf_get(lpfc_wcqe_c_xb, wcqe))
829 atomic_inc(&lport->cmpl_fcp_xb);
830 atomic_inc(&lport->cmpl_fcp_err);
831 }
793 832
794 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n", 833 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
795 lpfc_ncmd->cur_iocbq.sli4_xritag, 834 lpfc_ncmd->cur_iocbq.sli4_xritag,
796 bf_get(lpfc_wcqe_c_status, wcqe), wcqe->parameter); 835 status, wcqe->parameter);
797 /* 836 /*
798 * Catch race where our node has transitioned, but the 837 * Catch race where our node has transitioned, but the
799 * transport is still transitioning. 838 * transport is still transitioning.
@@ -851,8 +890,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
851 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN; 890 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
852 nCmd->transferred_length = nCmd->payload_length; 891 nCmd->transferred_length = nCmd->payload_length;
853 } else { 892 } else {
854 lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) & 893 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
855 LPFC_IOCB_STATUS_MASK);
856 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK); 894 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
857 895
858 /* For NVME, the only failure path that results in an 896 /* For NVME, the only failure path that results in an
@@ -946,10 +984,13 @@ out_err:
946 freqpriv->nvme_buf = NULL; 984 freqpriv->nvme_buf = NULL;
947 985
948 /* NVME targets need completion held off until the abort exchange 986 /* NVME targets need completion held off until the abort exchange
949 * completes. 987 * completes unless the NVME Rport is getting unregistered.
950 */ 988 */
951 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) 989
990 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
952 nCmd->done(nCmd); 991 nCmd->done(nCmd);
992 lpfc_ncmd->nvmeCmd = NULL;
993 }
953 994
954 spin_lock_irqsave(&phba->hbalock, flags); 995 spin_lock_irqsave(&phba->hbalock, flags);
955 lpfc_ncmd->nrport = NULL; 996 lpfc_ncmd->nrport = NULL;
@@ -1149,7 +1190,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1149 1190
1150 first_data_sgl = sgl; 1191 first_data_sgl = sgl;
1151 lpfc_ncmd->seg_cnt = nCmd->sg_cnt; 1192 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1152 if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt + 1) { 1193 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1153 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1194 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1154 "6058 Too many sg segments from " 1195 "6058 Too many sg segments from "
1155 "NVME Transport. Max %d, " 1196 "NVME Transport. Max %d, "
@@ -1239,6 +1280,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1239 struct nvmefc_fcp_req *pnvme_fcreq) 1280 struct nvmefc_fcp_req *pnvme_fcreq)
1240{ 1281{
1241 int ret = 0; 1282 int ret = 0;
1283 int expedite = 0;
1242 struct lpfc_nvme_lport *lport; 1284 struct lpfc_nvme_lport *lport;
1243 struct lpfc_vport *vport; 1285 struct lpfc_vport *vport;
1244 struct lpfc_hba *phba; 1286 struct lpfc_hba *phba;
@@ -1246,13 +1288,30 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1246 struct lpfc_nvme_buf *lpfc_ncmd; 1288 struct lpfc_nvme_buf *lpfc_ncmd;
1247 struct lpfc_nvme_rport *rport; 1289 struct lpfc_nvme_rport *rport;
1248 struct lpfc_nvme_qhandle *lpfc_queue_info; 1290 struct lpfc_nvme_qhandle *lpfc_queue_info;
1249 struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private; 1291 struct lpfc_nvme_fcpreq_priv *freqpriv;
1292 struct nvme_common_command *sqe;
1250#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1293#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1251 uint64_t start = 0; 1294 uint64_t start = 0;
1252#endif 1295#endif
1253 1296
1297 /* Validate pointers. LLDD fault handling with transport does
1298 * have timing races.
1299 */
1254 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 1300 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1301 if (unlikely(!lport)) {
1302 ret = -EINVAL;
1303 goto out_fail;
1304 }
1305
1255 vport = lport->vport; 1306 vport = lport->vport;
1307
1308 if (unlikely(!hw_queue_handle)) {
1309 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1310 "6129 Fail Abort, NULL hw_queue_handle\n");
1311 ret = -EINVAL;
1312 goto out_fail;
1313 }
1314
1256 phba = vport->phba; 1315 phba = vport->phba;
1257 1316
1258 if (vport->load_flag & FC_UNLOADING) { 1317 if (vport->load_flag & FC_UNLOADING) {
@@ -1260,16 +1319,17 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1260 goto out_fail; 1319 goto out_fail;
1261 } 1320 }
1262 1321
1263 /* Validate pointers. */ 1322 if (vport->load_flag & FC_UNLOADING) {
1264 if (!pnvme_lport || !pnvme_rport || !freqpriv) {
1265 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR | LOG_NODE,
1266 "6117 No Send:IO submit ptrs NULL, lport %p, "
1267 "rport %p fcreq_priv %p\n",
1268 pnvme_lport, pnvme_rport, freqpriv);
1269 ret = -ENODEV; 1323 ret = -ENODEV;
1270 goto out_fail; 1324 goto out_fail;
1271 } 1325 }
1272 1326
1327 freqpriv = pnvme_fcreq->private;
1328 if (unlikely(!freqpriv)) {
1329 ret = -EINVAL;
1330 goto out_fail;
1331 }
1332
1273#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1333#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1274 if (phba->ktime_on) 1334 if (phba->ktime_on)
1275 start = ktime_get_ns(); 1335 start = ktime_get_ns();
@@ -1293,6 +1353,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1293 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, 1353 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1294 "6066 Missing node for DID %x\n", 1354 "6066 Missing node for DID %x\n",
1295 pnvme_rport->port_id); 1355 pnvme_rport->port_id);
1356 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1296 ret = -ENODEV; 1357 ret = -ENODEV;
1297 goto out_fail; 1358 goto out_fail;
1298 } 1359 }
@@ -1306,21 +1367,36 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1306 "IO. State x%x, Type x%x\n", 1367 "IO. State x%x, Type x%x\n",
1307 rport, pnvme_rport->port_id, 1368 rport, pnvme_rport->port_id,
1308 ndlp->nlp_state, ndlp->nlp_type); 1369 ndlp->nlp_state, ndlp->nlp_type);
1370 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1309 ret = -ENODEV; 1371 ret = -ENODEV;
1310 goto out_fail; 1372 goto out_fail;
1311 1373
1312 } 1374 }
1313 1375
1376 /* Currently only NVME Keep alive commands should be expedited
1377 * if the driver runs out of a resource. These should only be
1378 * issued on the admin queue, qidx 0
1379 */
1380 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1381 sqe = &((struct nvme_fc_cmd_iu *)
1382 pnvme_fcreq->cmdaddr)->sqe.common;
1383 if (sqe->opcode == nvme_admin_keep_alive)
1384 expedite = 1;
1385 }
1386
1314 /* The node is shared with FCP IO, make sure the IO pending count does 1387 /* The node is shared with FCP IO, make sure the IO pending count does
1315 * not exceed the programmed depth. 1388 * not exceed the programmed depth.
1316 */ 1389 */
1317 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { 1390 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1391 !expedite) {
1392 atomic_inc(&lport->xmt_fcp_qdepth);
1318 ret = -EBUSY; 1393 ret = -EBUSY;
1319 goto out_fail; 1394 goto out_fail;
1320 } 1395 }
1321 1396
1322 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp); 1397 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
1323 if (lpfc_ncmd == NULL) { 1398 if (lpfc_ncmd == NULL) {
1399 atomic_inc(&lport->xmt_fcp_noxri);
1324 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1400 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1325 "6065 driver's buffer pool is empty, " 1401 "6065 driver's buffer pool is empty, "
1326 "IO failed\n"); 1402 "IO failed\n");
@@ -1373,6 +1449,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1373 1449
1374 ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq); 1450 ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
1375 if (ret) { 1451 if (ret) {
1452 atomic_inc(&lport->xmt_fcp_wqerr);
1376 atomic_dec(&ndlp->cmd_pending); 1453 atomic_dec(&ndlp->cmd_pending);
1377 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1454 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1378 "6113 FCP could not issue WQE err %x " 1455 "6113 FCP could not issue WQE err %x "
@@ -1473,19 +1550,36 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1473 struct lpfc_nvme_lport *lport; 1550 struct lpfc_nvme_lport *lport;
1474 struct lpfc_vport *vport; 1551 struct lpfc_vport *vport;
1475 struct lpfc_hba *phba; 1552 struct lpfc_hba *phba;
1476 struct lpfc_nvme_rport *rport;
1477 struct lpfc_nvme_buf *lpfc_nbuf; 1553 struct lpfc_nvme_buf *lpfc_nbuf;
1478 struct lpfc_iocbq *abts_buf; 1554 struct lpfc_iocbq *abts_buf;
1479 struct lpfc_iocbq *nvmereq_wqe; 1555 struct lpfc_iocbq *nvmereq_wqe;
1480 struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private; 1556 struct lpfc_nvme_fcpreq_priv *freqpriv;
1481 union lpfc_wqe *abts_wqe; 1557 union lpfc_wqe *abts_wqe;
1482 unsigned long flags; 1558 unsigned long flags;
1483 int ret_val; 1559 int ret_val;
1484 1560
1561 /* Validate pointers. LLDD fault handling with transport does
1562 * have timing races.
1563 */
1485 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 1564 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1486 rport = (struct lpfc_nvme_rport *)pnvme_rport->private; 1565 if (unlikely(!lport))
1566 return;
1567
1487 vport = lport->vport; 1568 vport = lport->vport;
1569
1570 if (unlikely(!hw_queue_handle)) {
1571 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1572 "6129 Fail Abort, HW Queue Handle NULL.\n");
1573 return;
1574 }
1575
1488 phba = vport->phba; 1576 phba = vport->phba;
1577 freqpriv = pnvme_fcreq->private;
1578
1579 if (unlikely(!freqpriv))
1580 return;
1581 if (vport->load_flag & FC_UNLOADING)
1582 return;
1489 1583
1490 /* Announce entry to new IO submit field. */ 1584 /* Announce entry to new IO submit field. */
1491 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, 1585 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
@@ -1552,6 +1646,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1552 return; 1646 return;
1553 } 1647 }
1554 1648
1649 atomic_inc(&lport->xmt_fcp_abort);
1555 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n", 1650 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1556 nvmereq_wqe->sli4_xritag, 1651 nvmereq_wqe->sli4_xritag,
1557 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id); 1652 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
@@ -1931,6 +2026,8 @@ lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
1931 spin_lock(&phba->nvme_buf_list_put_lock); 2026 spin_lock(&phba->nvme_buf_list_put_lock);
1932 list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist); 2027 list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
1933 list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist); 2028 list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
2029 phba->get_nvme_bufs = 0;
2030 phba->put_nvme_bufs = 0;
1934 spin_unlock(&phba->nvme_buf_list_put_lock); 2031 spin_unlock(&phba->nvme_buf_list_put_lock);
1935 spin_unlock_irq(&phba->nvme_buf_list_get_lock); 2032 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
1936 2033
@@ -2067,6 +2164,20 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
2067 return num_posted; 2164 return num_posted;
2068} 2165}
2069 2166
2167static inline struct lpfc_nvme_buf *
2168lpfc_nvme_buf(struct lpfc_hba *phba)
2169{
2170 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
2171
2172 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
2173 &phba->lpfc_nvme_buf_list_get, list) {
2174 list_del_init(&lpfc_ncmd->list);
2175 phba->get_nvme_bufs--;
2176 return lpfc_ncmd;
2177 }
2178 return NULL;
2179}
2180
2070/** 2181/**
2071 * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA 2182 * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
2072 * @phba: The HBA for which this call is being executed. 2183 * @phba: The HBA for which this call is being executed.
@@ -2079,35 +2190,27 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
2079 * Pointer to lpfc_nvme_buf - Success 2190 * Pointer to lpfc_nvme_buf - Success
2080 **/ 2191 **/
2081static struct lpfc_nvme_buf * 2192static struct lpfc_nvme_buf *
2082lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 2193lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
2194 int expedite)
2083{ 2195{
2084 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; 2196 struct lpfc_nvme_buf *lpfc_ncmd = NULL;
2085 unsigned long iflag = 0; 2197 unsigned long iflag = 0;
2086 int found = 0;
2087 2198
2088 spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag); 2199 spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
2089 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 2200 if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
2090 &phba->lpfc_nvme_buf_list_get, list) { 2201 lpfc_ncmd = lpfc_nvme_buf(phba);
2091 list_del_init(&lpfc_ncmd->list); 2202 if (!lpfc_ncmd) {
2092 found = 1;
2093 break;
2094 }
2095 if (!found) {
2096 spin_lock(&phba->nvme_buf_list_put_lock); 2203 spin_lock(&phba->nvme_buf_list_put_lock);
2097 list_splice(&phba->lpfc_nvme_buf_list_put, 2204 list_splice(&phba->lpfc_nvme_buf_list_put,
2098 &phba->lpfc_nvme_buf_list_get); 2205 &phba->lpfc_nvme_buf_list_get);
2206 phba->get_nvme_bufs += phba->put_nvme_bufs;
2099 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); 2207 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
2208 phba->put_nvme_bufs = 0;
2100 spin_unlock(&phba->nvme_buf_list_put_lock); 2209 spin_unlock(&phba->nvme_buf_list_put_lock);
2101 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 2210 if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
2102 &phba->lpfc_nvme_buf_list_get, list) { 2211 lpfc_ncmd = lpfc_nvme_buf(phba);
2103 list_del_init(&lpfc_ncmd->list);
2104 found = 1;
2105 break;
2106 }
2107 } 2212 }
2108 spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag); 2213 spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
2109 if (!found)
2110 return NULL;
2111 return lpfc_ncmd; 2214 return lpfc_ncmd;
2112} 2215}
2113 2216
@@ -2145,6 +2248,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
2145 lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME; 2248 lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
2146 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); 2249 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
2147 list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put); 2250 list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
2251 phba->put_nvme_bufs++;
2148 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); 2252 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
2149 } 2253 }
2150} 2254}
@@ -2221,6 +2325,18 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2221 lport->vport = vport; 2325 lport->vport = vport;
2222 vport->nvmei_support = 1; 2326 vport->nvmei_support = 1;
2223 2327
2328 atomic_set(&lport->xmt_fcp_noxri, 0);
2329 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2330 atomic_set(&lport->xmt_fcp_qdepth, 0);
2331 atomic_set(&lport->xmt_fcp_wqerr, 0);
2332 atomic_set(&lport->xmt_fcp_abort, 0);
2333 atomic_set(&lport->xmt_ls_abort, 0);
2334 atomic_set(&lport->xmt_ls_err, 0);
2335 atomic_set(&lport->cmpl_fcp_xb, 0);
2336 atomic_set(&lport->cmpl_fcp_err, 0);
2337 atomic_set(&lport->cmpl_ls_xb, 0);
2338 atomic_set(&lport->cmpl_ls_err, 0);
2339
2224 /* Don't post more new bufs if repost already recovered 2340 /* Don't post more new bufs if repost already recovered
2225 * the nvme sgls. 2341 * the nvme sgls.
2226 */ 2342 */
@@ -2234,6 +2350,47 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2234 return ret; 2350 return ret;
2235} 2351}
2236 2352
2353/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
2354 *
2355 * The driver has to wait for the host nvme transport to callback
2356 * indicating the localport has successfully unregistered all
2357 * resources. Since this is an uninterruptible wait, loop every ten
2358 * seconds and print a message indicating no progress.
2359 *
2360 * An uninterruptible wait is used because of the risk of transport-to-
2361 * driver state mismatch.
2362 */
2363void
2364lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2365 struct lpfc_nvme_lport *lport)
2366{
2367#if (IS_ENABLED(CONFIG_NVME_FC))
2368 u32 wait_tmo;
2369 int ret;
2370
2371 /* Host transport has to clean up and confirm requiring an indefinite
2372 * wait. Print a message if a 10 second wait expires and renew the
2373 * wait. This is unexpected.
2374 */
2375 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2376 while (true) {
2377 ret = wait_for_completion_timeout(&lport->lport_unreg_done,
2378 wait_tmo);
2379 if (unlikely(!ret)) {
2380 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
2381 "6176 Lport %p Localport %p wait "
2382 "timed out. Renewing.\n",
2383 lport, vport->localport);
2384 continue;
2385 }
2386 break;
2387 }
2388 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2389 "6177 Lport %p Localport %p Complete Success\n",
2390 lport, vport->localport);
2391#endif
2392}
2393
2237/** 2394/**
2238 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport. 2395 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2239 * @pnvme: pointer to lpfc nvme data structure. 2396 * @pnvme: pointer to lpfc nvme data structure.
@@ -2268,7 +2425,11 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2268 */ 2425 */
2269 init_completion(&lport->lport_unreg_done); 2426 init_completion(&lport->lport_unreg_done);
2270 ret = nvme_fc_unregister_localport(localport); 2427 ret = nvme_fc_unregister_localport(localport);
2271 wait_for_completion_timeout(&lport->lport_unreg_done, 5); 2428
2429 /* Wait for completion. This either blocks
2430 * indefinitely or succeeds
2431 */
2432 lpfc_nvme_lport_unreg_wait(vport, lport);
2272 2433
2273 /* Regardless of the unregister upcall response, clear 2434 /* Regardless of the unregister upcall response, clear
2274 * nvmei_support. All rports are unregistered and the 2435 * nvmei_support. All rports are unregistered and the
@@ -2365,6 +2526,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2365 2526
2366 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); 2527 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2367 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 2528 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2529 if (!ndlp->nrport)
2530 lpfc_nlp_get(ndlp);
2531
2368 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port); 2532 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2369 if (!ret) { 2533 if (!ret) {
2370 /* If the ndlp already has an nrport, this is just 2534 /* If the ndlp already has an nrport, this is just
@@ -2373,23 +2537,33 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2373 */ 2537 */
2374 rport = remote_port->private; 2538 rport = remote_port->private;
2375 if (ndlp->nrport) { 2539 if (ndlp->nrport) {
2376 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 2540 if (ndlp->nrport == remote_port->private) {
2377 LOG_NVME_DISC, 2541 /* Same remoteport. Just reuse. */
2378 "6014 Rebinding lport to " 2542 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2379 "rport wwpn 0x%llx, " 2543 LOG_NVME_DISC,
2380 "Data: x%x x%x x%x x%06x\n", 2544 "6014 Rebinding lport to "
2381 remote_port->port_name, 2545 "remoteport %p wwpn 0x%llx, "
2382 remote_port->port_id, 2546 "Data: x%x x%x %p x%x x%06x\n",
2383 remote_port->port_role, 2547 remote_port,
2384 ndlp->nlp_type, 2548 remote_port->port_name,
2385 ndlp->nlp_DID); 2549 remote_port->port_id,
2550 remote_port->port_role,
2551 ndlp,
2552 ndlp->nlp_type,
2553 ndlp->nlp_DID);
2554 return 0;
2555 }
2386 prev_ndlp = rport->ndlp; 2556 prev_ndlp = rport->ndlp;
2387 2557
2388 /* Sever the ndlp<->rport connection before dropping 2558 /* Sever the ndlp<->rport association
2389 * the ndlp ref from register. 2559 * before dropping the ndlp ref from
2560 * register.
2390 */ 2561 */
2562 spin_lock_irq(&vport->phba->hbalock);
2391 ndlp->nrport = NULL; 2563 ndlp->nrport = NULL;
2564 spin_unlock_irq(&vport->phba->hbalock);
2392 rport->ndlp = NULL; 2565 rport->ndlp = NULL;
2566 rport->remoteport = NULL;
2393 if (prev_ndlp) 2567 if (prev_ndlp)
2394 lpfc_nlp_put(ndlp); 2568 lpfc_nlp_put(ndlp);
2395 } 2569 }
@@ -2397,19 +2571,20 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2397 /* Clean bind the rport to the ndlp. */ 2571 /* Clean bind the rport to the ndlp. */
2398 rport->remoteport = remote_port; 2572 rport->remoteport = remote_port;
2399 rport->lport = lport; 2573 rport->lport = lport;
2400 rport->ndlp = lpfc_nlp_get(ndlp); 2574 rport->ndlp = ndlp;
2401 if (!rport->ndlp) 2575 spin_lock_irq(&vport->phba->hbalock);
2402 return -1;
2403 ndlp->nrport = rport; 2576 ndlp->nrport = rport;
2577 spin_unlock_irq(&vport->phba->hbalock);
2404 lpfc_printf_vlog(vport, KERN_INFO, 2578 lpfc_printf_vlog(vport, KERN_INFO,
2405 LOG_NVME_DISC | LOG_NODE, 2579 LOG_NVME_DISC | LOG_NODE,
2406 "6022 Binding new rport to " 2580 "6022 Binding new rport to "
2407 "lport %p Rport WWNN 0x%llx, " 2581 "lport %p Remoteport %p WWNN 0x%llx, "
2408 "Rport WWPN 0x%llx DID " 2582 "Rport WWPN 0x%llx DID "
2409 "x%06x Role x%x\n", 2583 "x%06x Role x%x, ndlp %p\n",
2410 lport, 2584 lport, remote_port,
2411 rpinfo.node_name, rpinfo.port_name, 2585 rpinfo.node_name, rpinfo.port_name,
2412 rpinfo.port_id, rpinfo.port_role); 2586 rpinfo.port_id, rpinfo.port_role,
2587 ndlp);
2413 } else { 2588 } else {
2414 lpfc_printf_vlog(vport, KERN_ERR, 2589 lpfc_printf_vlog(vport, KERN_ERR,
2415 LOG_NVME_DISC | LOG_NODE, 2590 LOG_NVME_DISC | LOG_NODE,
@@ -2473,20 +2648,20 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2473 /* Sanity check ndlp type. Only call for NVME ports. Don't 2648 /* Sanity check ndlp type. Only call for NVME ports. Don't
2474 * clear any rport state until the transport calls back. 2649 * clear any rport state until the transport calls back.
2475 */ 2650 */
2476 if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
2477 init_completion(&rport->rport_unreg_done);
2478 2651
2652 if (ndlp->nlp_type & NLP_NVME_TARGET) {
2479 /* No concern about the role change on the nvme remoteport. 2653 /* No concern about the role change on the nvme remoteport.
2480 * The transport will update it. 2654 * The transport will update it.
2481 */ 2655 */
2656 ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
2482 ret = nvme_fc_unregister_remoteport(remoteport); 2657 ret = nvme_fc_unregister_remoteport(remoteport);
2483 if (ret != 0) { 2658 if (ret != 0) {
2659 lpfc_nlp_put(ndlp);
2484 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 2660 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2485 "6167 NVME unregister failed %d " 2661 "6167 NVME unregister failed %d "
2486 "port_state x%x\n", 2662 "port_state x%x\n",
2487 ret, remoteport->port_state); 2663 ret, remoteport->port_state);
2488 } 2664 }
2489
2490 } 2665 }
2491 return; 2666 return;
2492 2667
@@ -2545,8 +2720,11 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2545 * before the abort exchange command fully completes. 2720 * before the abort exchange command fully completes.
2546 * Once completed, it is available via the put list. 2721 * Once completed, it is available via the put list.
2547 */ 2722 */
2548 nvme_cmd = lpfc_ncmd->nvmeCmd; 2723 if (lpfc_ncmd->nvmeCmd) {
2549 nvme_cmd->done(nvme_cmd); 2724 nvme_cmd = lpfc_ncmd->nvmeCmd;
2725 nvme_cmd->done(nvme_cmd);
2726 lpfc_ncmd->nvmeCmd = NULL;
2727 }
2550 lpfc_release_nvme_buf(phba, lpfc_ncmd); 2728 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2551 return; 2729 return;
2552 } 2730 }
@@ -2558,3 +2736,45 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2558 "6312 XRI Aborted xri x%x not found\n", xri); 2736 "6312 XRI Aborted xri x%x not found\n", xri);
2559 2737
2560} 2738}
2739
2740/**
2741 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
2742 * @phba: Pointer to HBA context object.
2743 *
2744 * This function flushes all wqes in the nvme rings and frees all resources
2745 * in the txcmplq. This function does not issue abort wqes for the IO
2746 * commands in txcmplq, they will just be returned with
2747 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2748 * slot has been permanently disabled.
2749 **/
2750void
2751lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2752{
2753 struct lpfc_sli_ring *pring;
2754 u32 i, wait_cnt = 0;
2755
2756 if (phba->sli_rev < LPFC_SLI_REV4)
2757 return;
2758
2759 /* Cycle through all NVME rings and make sure all outstanding
2760 * WQEs have been removed from the txcmplqs.
2761 */
2762 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
2763 pring = phba->sli4_hba.nvme_wq[i]->pring;
2764
2765 /* Retrieve everything on the txcmplq */
2766 while (!list_empty(&pring->txcmplq)) {
2767 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2768 wait_cnt++;
2769
2770 /* The sleep is 10mS. Every ten seconds,
2771 * dump a message. Something is wrong.
2772 */
2773 if ((wait_cnt % 1000) == 0) {
2774 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2775 "6178 NVME IO not empty, "
2776 "cnt %d\n", wait_cnt);
2777 }
2778 }
2779 }
2780}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index d192bb268f99..e79f8f75758c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -22,10 +22,12 @@
22 ********************************************************************/ 22 ********************************************************************/
23 23
24#define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */ 24#define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */
25#define LPFC_NVME_WQSIZE 256
26 25
27#define LPFC_NVME_ERSP_LEN 0x20 26#define LPFC_NVME_ERSP_LEN 0x20
28 27
28#define LPFC_NVME_WAIT_TMO 10
29#define LPFC_NVME_EXPEDITE_XRICNT 8
30
29struct lpfc_nvme_qhandle { 31struct lpfc_nvme_qhandle {
30 uint32_t index; /* WQ index to use */ 32 uint32_t index; /* WQ index to use */
31 uint32_t qidx; /* queue index passed to create */ 33 uint32_t qidx; /* queue index passed to create */
@@ -36,7 +38,18 @@ struct lpfc_nvme_qhandle {
36struct lpfc_nvme_lport { 38struct lpfc_nvme_lport {
37 struct lpfc_vport *vport; 39 struct lpfc_vport *vport;
38 struct completion lport_unreg_done; 40 struct completion lport_unreg_done;
39 /* Add sttats counters here */ 41 /* Add stats counters here */
42 atomic_t xmt_fcp_noxri;
43 atomic_t xmt_fcp_bad_ndlp;
44 atomic_t xmt_fcp_qdepth;
45 atomic_t xmt_fcp_wqerr;
46 atomic_t xmt_fcp_abort;
47 atomic_t xmt_ls_abort;
48 atomic_t xmt_ls_err;
49 atomic_t cmpl_fcp_xb;
50 atomic_t cmpl_fcp_err;
51 atomic_t cmpl_ls_xb;
52 atomic_t cmpl_ls_err;
40}; 53};
41 54
42struct lpfc_nvme_rport { 55struct lpfc_nvme_rport {
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 84cf1b9079f7..8dbf5c9d51aa 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -38,6 +38,7 @@
38 38
39#include <../drivers/nvme/host/nvme.h> 39#include <../drivers/nvme/host/nvme.h>
40#include <linux/nvme-fc-driver.h> 40#include <linux/nvme-fc-driver.h>
41#include <linux/nvme-fc.h>
41 42
42#include "lpfc_version.h" 43#include "lpfc_version.h"
43#include "lpfc_hw4.h" 44#include "lpfc_hw4.h"
@@ -126,10 +127,17 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
126 127
127 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 128 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
128 129
129 if (status) 130 if (tgtp) {
130 atomic_inc(&tgtp->xmt_ls_rsp_error); 131 if (status) {
131 else 132 atomic_inc(&tgtp->xmt_ls_rsp_error);
132 atomic_inc(&tgtp->xmt_ls_rsp_cmpl); 133 if (status == IOERR_ABORT_REQUESTED)
134 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
135 if (bf_get(lpfc_wcqe_c_xb, wcqe))
136 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
137 } else {
138 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
139 }
140 }
133 141
134out: 142out:
135 rsp = &ctxp->ctx.ls_req; 143 rsp = &ctxp->ctx.ls_req;
@@ -218,6 +226,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
218 ctxp->entry_cnt = 1; 226 ctxp->entry_cnt = 1;
219 ctxp->flag = 0; 227 ctxp->flag = 0;
220 ctxp->ctxbuf = ctx_buf; 228 ctxp->ctxbuf = ctx_buf;
229 ctxp->rqb_buffer = (void *)nvmebuf;
221 spin_lock_init(&ctxp->ctxlock); 230 spin_lock_init(&ctxp->ctxlock);
222 231
223#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 232#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -253,6 +262,17 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
253 return; 262 return;
254 } 263 }
255 264
265 /* Processing of FCP command is deferred */
266 if (rc == -EOVERFLOW) {
267 lpfc_nvmeio_data(phba,
268 "NVMET RCV BUSY: xri x%x sz %d "
269 "from %06x\n",
270 oxid, size, sid);
271 /* defer repost rcv buffer till .defer_rcv callback */
272 ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
273 atomic_inc(&tgtp->rcv_fcp_cmd_out);
274 return;
275 }
256 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 276 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
257 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 277 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
258 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", 278 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
@@ -519,8 +539,11 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
519 if (status) { 539 if (status) {
520 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR; 540 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
521 rsp->transferred_length = 0; 541 rsp->transferred_length = 0;
522 if (tgtp) 542 if (tgtp) {
523 atomic_inc(&tgtp->xmt_fcp_rsp_error); 543 atomic_inc(&tgtp->xmt_fcp_rsp_error);
544 if (status == IOERR_ABORT_REQUESTED)
545 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
546 }
524 547
525 logerr = LOG_NVME_IOERR; 548 logerr = LOG_NVME_IOERR;
526 549
@@ -528,6 +551,8 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
528 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 551 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
529 ctxp->flag |= LPFC_NVMET_XBUSY; 552 ctxp->flag |= LPFC_NVMET_XBUSY;
530 logerr |= LOG_NVME_ABTS; 553 logerr |= LOG_NVME_ABTS;
554 if (tgtp)
555 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
531 556
532 } else { 557 } else {
533 ctxp->flag &= ~LPFC_NVMET_XBUSY; 558 ctxp->flag &= ~LPFC_NVMET_XBUSY;
@@ -635,6 +660,9 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
635 if (phba->pport->load_flag & FC_UNLOADING) 660 if (phba->pport->load_flag & FC_UNLOADING)
636 return -ENODEV; 661 return -ENODEV;
637 662
663 if (phba->pport->load_flag & FC_UNLOADING)
664 return -ENODEV;
665
638 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 666 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
639 "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid); 667 "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
640 668
@@ -721,6 +749,11 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
721 goto aerr; 749 goto aerr;
722 } 750 }
723 751
752 if (phba->pport->load_flag & FC_UNLOADING) {
753 rc = -ENODEV;
754 goto aerr;
755 }
756
724#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 757#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
725 if (ctxp->ts_cmd_nvme) { 758 if (ctxp->ts_cmd_nvme) {
726 if (rsp->op == NVMET_FCOP_RSP) 759 if (rsp->op == NVMET_FCOP_RSP)
@@ -823,6 +856,9 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
823 if (phba->pport->load_flag & FC_UNLOADING) 856 if (phba->pport->load_flag & FC_UNLOADING)
824 return; 857 return;
825 858
859 if (phba->pport->load_flag & FC_UNLOADING)
860 return;
861
826 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 862 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
827 "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n", 863 "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
828 ctxp->oxid, ctxp->flag, ctxp->state); 864 ctxp->oxid, ctxp->flag, ctxp->state);
@@ -910,7 +946,11 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
910 946
911 tgtp = phba->targetport->private; 947 tgtp = phba->targetport->private;
912 atomic_inc(&tgtp->rcv_fcp_cmd_defer); 948 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
913 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ 949 if (ctxp->flag & LPFC_NVMET_DEFER_RCV_REPOST)
950 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
951 else
952 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
953 ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
914} 954}
915 955
916static struct nvmet_fc_target_template lpfc_tgttemplate = { 956static struct nvmet_fc_target_template lpfc_tgttemplate = {
@@ -1216,6 +1256,8 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1216 atomic_set(&tgtp->xmt_ls_rsp, 0); 1256 atomic_set(&tgtp->xmt_ls_rsp, 0);
1217 atomic_set(&tgtp->xmt_ls_drop, 0); 1257 atomic_set(&tgtp->xmt_ls_drop, 0);
1218 atomic_set(&tgtp->xmt_ls_rsp_error, 0); 1258 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1259 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1260 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1219 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0); 1261 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1220 atomic_set(&tgtp->rcv_fcp_cmd_in, 0); 1262 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1221 atomic_set(&tgtp->rcv_fcp_cmd_out, 0); 1263 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
@@ -1228,7 +1270,10 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1228 atomic_set(&tgtp->xmt_fcp_release, 0); 1270 atomic_set(&tgtp->xmt_fcp_release, 0);
1229 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); 1271 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1230 atomic_set(&tgtp->xmt_fcp_rsp_error, 0); 1272 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1273 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1274 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1231 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); 1275 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1276 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1232 atomic_set(&tgtp->xmt_fcp_abort, 0); 1277 atomic_set(&tgtp->xmt_fcp_abort, 0);
1233 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0); 1278 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1234 atomic_set(&tgtp->xmt_abort_unsol, 0); 1279 atomic_set(&tgtp->xmt_abort_unsol, 0);
@@ -1270,6 +1315,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1270 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 1315 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1271 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 1316 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1272 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; 1317 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1318 struct lpfc_nvmet_tgtport *tgtp;
1273 struct lpfc_nodelist *ndlp; 1319 struct lpfc_nodelist *ndlp;
1274 unsigned long iflag = 0; 1320 unsigned long iflag = 0;
1275 int rrq_empty = 0; 1321 int rrq_empty = 0;
@@ -1280,6 +1326,12 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1280 1326
1281 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 1327 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1282 return; 1328 return;
1329
1330 if (phba->targetport) {
1331 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1332 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1333 }
1334
1283 spin_lock_irqsave(&phba->hbalock, iflag); 1335 spin_lock_irqsave(&phba->hbalock, iflag);
1284 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1336 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1285 list_for_each_entry_safe(ctxp, next_ctxp, 1337 list_for_each_entry_safe(ctxp, next_ctxp,
@@ -1682,6 +1734,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1682 ctxp->entry_cnt = 1; 1734 ctxp->entry_cnt = 1;
1683 ctxp->flag = 0; 1735 ctxp->flag = 0;
1684 ctxp->ctxbuf = ctx_buf; 1736 ctxp->ctxbuf = ctx_buf;
1737 ctxp->rqb_buffer = (void *)nvmebuf;
1685 spin_lock_init(&ctxp->ctxlock); 1738 spin_lock_init(&ctxp->ctxlock);
1686 1739
1687#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1740#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1715,6 +1768,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1715 1768
1716 /* Process FCP command */ 1769 /* Process FCP command */
1717 if (rc == 0) { 1770 if (rc == 0) {
1771 ctxp->rqb_buffer = NULL;
1718 atomic_inc(&tgtp->rcv_fcp_cmd_out); 1772 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1719 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ 1773 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1720 return; 1774 return;
@@ -1726,10 +1780,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1726 "NVMET RCV BUSY: xri x%x sz %d from %06x\n", 1780 "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
1727 oxid, size, sid); 1781 oxid, size, sid);
1728 /* defer reposting rcv buffer till .defer_rcv callback */ 1782 /* defer reposting rcv buffer till .defer_rcv callback */
1729 ctxp->rqb_buffer = nvmebuf; 1783 ctxp->flag |= LPFC_NVMET_DEFER_RCV_REPOST;
1730 atomic_inc(&tgtp->rcv_fcp_cmd_out); 1784 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1731 return; 1785 return;
1732 } 1786 }
1787 ctxp->rqb_buffer = nvmebuf;
1733 1788
1734 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 1789 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1735 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1790 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1992,7 +2047,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1992 return NULL; 2047 return NULL;
1993 } 2048 }
1994 2049
1995 if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) { 2050 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
1996 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2051 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1997 "6109 NVMET prep FCP wqe: seg cnt err: " 2052 "6109 NVMET prep FCP wqe: seg cnt err: "
1998 "NPORT x%x oxid x%x ste %d cnt %d\n", 2053 "NPORT x%x oxid x%x ste %d cnt %d\n",
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 25a65b0bb7f3..5b32c9e4d4ef 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -25,6 +25,10 @@
25#define LPFC_NVMET_RQE_DEF_COUNT 512 25#define LPFC_NVMET_RQE_DEF_COUNT 512
26#define LPFC_NVMET_SUCCESS_LEN 12 26#define LPFC_NVMET_SUCCESS_LEN 12
27 27
28#define LPFC_NVMET_MRQ_OFF 0xffff
29#define LPFC_NVMET_MRQ_AUTO 0
30#define LPFC_NVMET_MRQ_MAX 16
31
28/* Used for NVME Target */ 32/* Used for NVME Target */
29struct lpfc_nvmet_tgtport { 33struct lpfc_nvmet_tgtport {
30 struct lpfc_hba *phba; 34 struct lpfc_hba *phba;
@@ -43,6 +47,8 @@ struct lpfc_nvmet_tgtport {
43 47
44 /* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */ 48 /* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
45 atomic_t xmt_ls_rsp_error; 49 atomic_t xmt_ls_rsp_error;
50 atomic_t xmt_ls_rsp_aborted;
51 atomic_t xmt_ls_rsp_xb_set;
46 atomic_t xmt_ls_rsp_cmpl; 52 atomic_t xmt_ls_rsp_cmpl;
47 53
48 /* Stats counters - lpfc_nvmet_unsol_fcp_buffer */ 54 /* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
@@ -60,12 +66,15 @@ struct lpfc_nvmet_tgtport {
60 atomic_t xmt_fcp_rsp; 66 atomic_t xmt_fcp_rsp;
61 67
62 /* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */ 68 /* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
69 atomic_t xmt_fcp_rsp_xb_set;
63 atomic_t xmt_fcp_rsp_cmpl; 70 atomic_t xmt_fcp_rsp_cmpl;
64 atomic_t xmt_fcp_rsp_error; 71 atomic_t xmt_fcp_rsp_error;
72 atomic_t xmt_fcp_rsp_aborted;
65 atomic_t xmt_fcp_rsp_drop; 73 atomic_t xmt_fcp_rsp_drop;
66 74
67 75
68 /* Stats counters - lpfc_nvmet_xmt_fcp_abort */ 76 /* Stats counters - lpfc_nvmet_xmt_fcp_abort */
77 atomic_t xmt_fcp_xri_abort_cqe;
69 atomic_t xmt_fcp_abort; 78 atomic_t xmt_fcp_abort;
70 atomic_t xmt_fcp_abort_cmpl; 79 atomic_t xmt_fcp_abort_cmpl;
71 atomic_t xmt_abort_sol; 80 atomic_t xmt_abort_sol;
@@ -122,6 +131,7 @@ struct lpfc_nvmet_rcv_ctx {
122#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */ 131#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
123#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */ 132#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
124#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */ 133#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
134#define LPFC_NVMET_DEFER_RCV_REPOST 0x20 /* repost to RQ on defer rcv */
125 struct rqb_dmabuf *rqb_buffer; 135 struct rqb_dmabuf *rqb_buffer;
126 struct lpfc_nvmet_ctxbuf *ctxbuf; 136 struct lpfc_nvmet_ctxbuf *ctxbuf;
127 137
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index aecd2399005d..5f5528a12308 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -475,28 +475,30 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
475 struct lpfc_rqe *temp_hrqe; 475 struct lpfc_rqe *temp_hrqe;
476 struct lpfc_rqe *temp_drqe; 476 struct lpfc_rqe *temp_drqe;
477 struct lpfc_register doorbell; 477 struct lpfc_register doorbell;
478 int put_index; 478 int hq_put_index;
479 int dq_put_index;
479 480
480 /* sanity check on queue memory */ 481 /* sanity check on queue memory */
481 if (unlikely(!hq) || unlikely(!dq)) 482 if (unlikely(!hq) || unlikely(!dq))
482 return -ENOMEM; 483 return -ENOMEM;
483 put_index = hq->host_index; 484 hq_put_index = hq->host_index;
484 temp_hrqe = hq->qe[put_index].rqe; 485 dq_put_index = dq->host_index;
485 temp_drqe = dq->qe[dq->host_index].rqe; 486 temp_hrqe = hq->qe[hq_put_index].rqe;
487 temp_drqe = dq->qe[dq_put_index].rqe;
486 488
487 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 489 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
488 return -EINVAL; 490 return -EINVAL;
489 if (put_index != dq->host_index) 491 if (hq_put_index != dq_put_index)
490 return -EINVAL; 492 return -EINVAL;
491 /* If the host has not yet processed the next entry then we are done */ 493 /* If the host has not yet processed the next entry then we are done */
492 if (((put_index + 1) % hq->entry_count) == hq->hba_index) 494 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
493 return -EBUSY; 495 return -EBUSY;
494 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 496 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
495 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 497 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
496 498
497 /* Update the host index to point to the next slot */ 499 /* Update the host index to point to the next slot */
498 hq->host_index = ((put_index + 1) % hq->entry_count); 500 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
499 dq->host_index = ((dq->host_index + 1) % dq->entry_count); 501 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
500 hq->RQ_buf_posted++; 502 hq->RQ_buf_posted++;
501 503
502 /* Ring The Header Receive Queue Doorbell */ 504 /* Ring The Header Receive Queue Doorbell */
@@ -517,7 +519,7 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
517 } 519 }
518 writel(doorbell.word0, hq->db_regaddr); 520 writel(doorbell.word0, hq->db_regaddr);
519 } 521 }
520 return put_index; 522 return hq_put_index;
521} 523}
522 524
523/** 525/**
@@ -12318,41 +12320,6 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12318} 12320}
12319 12321
12320/** 12322/**
12321 * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
12322 * @phba: pointer to lpfc hba data structure.
12323 *
12324 * This routine is invoked by the worker thread to process all the pending
12325 * SLI4 NVME abort XRI events.
12326 **/
12327void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
12328{
12329 struct lpfc_cq_event *cq_event;
12330
12331 /* First, declare the fcp xri abort event has been handled */
12332 spin_lock_irq(&phba->hbalock);
12333 phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
12334 spin_unlock_irq(&phba->hbalock);
12335 /* Now, handle all the fcp xri abort events */
12336 while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
12337 /* Get the first event from the head of the event queue */
12338 spin_lock_irq(&phba->hbalock);
12339 list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
12340 cq_event, struct lpfc_cq_event, list);
12341 spin_unlock_irq(&phba->hbalock);
12342 /* Notify aborted XRI for NVME work queue */
12343 if (phba->nvmet_support) {
12344 lpfc_sli4_nvmet_xri_aborted(phba,
12345 &cq_event->cqe.wcqe_axri);
12346 } else {
12347 lpfc_sli4_nvme_xri_aborted(phba,
12348 &cq_event->cqe.wcqe_axri);
12349 }
12350 /* Free the event processed back to the free pool */
12351 lpfc_sli4_cq_event_release(phba, cq_event);
12352 }
12353}
12354
12355/**
12356 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 12323 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12357 * @phba: pointer to lpfc hba data structure. 12324 * @phba: pointer to lpfc hba data structure.
12358 * 12325 *
@@ -12548,6 +12515,24 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
12548 return irspiocbq; 12515 return irspiocbq;
12549} 12516}
12550 12517
12518inline struct lpfc_cq_event *
12519lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
12520{
12521 struct lpfc_cq_event *cq_event;
12522
12523 /* Allocate a new internal CQ_EVENT entry */
12524 cq_event = lpfc_sli4_cq_event_alloc(phba);
12525 if (!cq_event) {
12526 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12527 "0602 Failed to alloc CQ_EVENT entry\n");
12528 return NULL;
12529 }
12530
12531 /* Move the CQE into the event */
12532 memcpy(&cq_event->cqe, entry, size);
12533 return cq_event;
12534}
12535
12551/** 12536/**
12552 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 12537 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
12553 * @phba: Pointer to HBA context object. 12538 * @phba: Pointer to HBA context object.
@@ -12569,16 +12554,9 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12569 "word2:x%x, word3:x%x\n", mcqe->word0, 12554 "word2:x%x, word3:x%x\n", mcqe->word0,
12570 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 12555 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
12571 12556
12572 /* Allocate a new internal CQ_EVENT entry */ 12557 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
12573 cq_event = lpfc_sli4_cq_event_alloc(phba); 12558 if (!cq_event)
12574 if (!cq_event) {
12575 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12576 "0394 Failed to allocate CQ_EVENT entry\n");
12577 return false; 12559 return false;
12578 }
12579
12580 /* Move the CQE into an asynchronous event entry */
12581 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
12582 spin_lock_irqsave(&phba->hbalock, iflags); 12560 spin_lock_irqsave(&phba->hbalock, iflags);
12583 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 12561 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
12584 /* Set the async event flag */ 12562 /* Set the async event flag */
@@ -12824,18 +12802,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
12824 struct lpfc_cq_event *cq_event; 12802 struct lpfc_cq_event *cq_event;
12825 unsigned long iflags; 12803 unsigned long iflags;
12826 12804
12827 /* Allocate a new internal CQ_EVENT entry */
12828 cq_event = lpfc_sli4_cq_event_alloc(phba);
12829 if (!cq_event) {
12830 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12831 "0602 Failed to allocate CQ_EVENT entry\n");
12832 return false;
12833 }
12834
12835 /* Move the CQE into the proper xri abort event list */
12836 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
12837 switch (cq->subtype) { 12805 switch (cq->subtype) {
12838 case LPFC_FCP: 12806 case LPFC_FCP:
12807 cq_event = lpfc_cq_event_setup(
12808 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
12809 if (!cq_event)
12810 return false;
12839 spin_lock_irqsave(&phba->hbalock, iflags); 12811 spin_lock_irqsave(&phba->hbalock, iflags);
12840 list_add_tail(&cq_event->list, 12812 list_add_tail(&cq_event->list,
12841 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 12813 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
@@ -12844,7 +12816,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
12844 spin_unlock_irqrestore(&phba->hbalock, iflags); 12816 spin_unlock_irqrestore(&phba->hbalock, iflags);
12845 workposted = true; 12817 workposted = true;
12846 break; 12818 break;
12819 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
12847 case LPFC_ELS: 12820 case LPFC_ELS:
12821 cq_event = lpfc_cq_event_setup(
12822 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
12823 if (!cq_event)
12824 return false;
12848 spin_lock_irqsave(&phba->hbalock, iflags); 12825 spin_lock_irqsave(&phba->hbalock, iflags);
12849 list_add_tail(&cq_event->list, 12826 list_add_tail(&cq_event->list,
12850 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 12827 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
@@ -12854,13 +12831,13 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
12854 workposted = true; 12831 workposted = true;
12855 break; 12832 break;
12856 case LPFC_NVME: 12833 case LPFC_NVME:
12857 spin_lock_irqsave(&phba->hbalock, iflags); 12834 /* Notify aborted XRI for NVME work queue */
12858 list_add_tail(&cq_event->list, 12835 if (phba->nvmet_support)
12859 &phba->sli4_hba.sp_nvme_xri_aborted_work_queue); 12836 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
12860 /* Set the nvme xri abort event flag */ 12837 else
12861 phba->hba_flag |= NVME_XRI_ABORT_EVENT; 12838 lpfc_sli4_nvme_xri_aborted(phba, wcqe);
12862 spin_unlock_irqrestore(&phba->hbalock, iflags); 12839
12863 workposted = true; 12840 workposted = false;
12864 break; 12841 break;
12865 default: 12842 default:
12866 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12843 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12868,7 +12845,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
12868 "%08x %08x %08x %08x\n", 12845 "%08x %08x %08x %08x\n",
12869 cq->subtype, wcqe->word0, wcqe->parameter, 12846 cq->subtype, wcqe->word0, wcqe->parameter,
12870 wcqe->word2, wcqe->word3); 12847 wcqe->word2, wcqe->word3);
12871 lpfc_sli4_cq_event_release(phba, cq_event);
12872 workposted = false; 12848 workposted = false;
12873 break; 12849 break;
12874 } 12850 }
@@ -12913,8 +12889,8 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12913 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12889 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12914 "2537 Receive Frame Truncated!!\n"); 12890 "2537 Receive Frame Truncated!!\n");
12915 case FC_STATUS_RQ_SUCCESS: 12891 case FC_STATUS_RQ_SUCCESS:
12916 lpfc_sli4_rq_release(hrq, drq);
12917 spin_lock_irqsave(&phba->hbalock, iflags); 12892 spin_lock_irqsave(&phba->hbalock, iflags);
12893 lpfc_sli4_rq_release(hrq, drq);
12918 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 12894 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
12919 if (!dma_buf) { 12895 if (!dma_buf) {
12920 hrq->RQ_no_buf_found++; 12896 hrq->RQ_no_buf_found++;
@@ -13316,8 +13292,8 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13316 "6126 Receive Frame Truncated!!\n"); 13292 "6126 Receive Frame Truncated!!\n");
13317 /* Drop thru */ 13293 /* Drop thru */
13318 case FC_STATUS_RQ_SUCCESS: 13294 case FC_STATUS_RQ_SUCCESS:
13319 lpfc_sli4_rq_release(hrq, drq);
13320 spin_lock_irqsave(&phba->hbalock, iflags); 13295 spin_lock_irqsave(&phba->hbalock, iflags);
13296 lpfc_sli4_rq_release(hrq, drq);
13321 dma_buf = lpfc_sli_rqbuf_get(phba, hrq); 13297 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13322 if (!dma_buf) { 13298 if (!dma_buf) {
13323 hrq->RQ_no_buf_found++; 13299 hrq->RQ_no_buf_found++;
@@ -13919,7 +13895,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
13919 while (!list_empty(&queue->page_list)) { 13895 while (!list_empty(&queue->page_list)) {
13920 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 13896 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
13921 list); 13897 list);
13922 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE, 13898 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
13923 dmabuf->virt, dmabuf->phys); 13899 dmabuf->virt, dmabuf->phys);
13924 kfree(dmabuf); 13900 kfree(dmabuf);
13925 } 13901 }
@@ -13938,6 +13914,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
13938/** 13914/**
13939 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 13915 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
13940 * @phba: The HBA that this queue is being created on. 13916 * @phba: The HBA that this queue is being created on.
13917 * @page_size: The size of a queue page
13941 * @entry_size: The size of each queue entry for this queue. 13918 * @entry_size: The size of each queue entry for this queue.
13942 * @entry count: The number of entries that this queue will handle. 13919 * @entry count: The number of entries that this queue will handle.
13943 * 13920 *
@@ -13946,8 +13923,8 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
13946 * queue on the HBA. 13923 * queue on the HBA.
13947 **/ 13924 **/
13948struct lpfc_queue * 13925struct lpfc_queue *
13949lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, 13926lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
13950 uint32_t entry_count) 13927 uint32_t entry_size, uint32_t entry_count)
13951{ 13928{
13952 struct lpfc_queue *queue; 13929 struct lpfc_queue *queue;
13953 struct lpfc_dmabuf *dmabuf; 13930 struct lpfc_dmabuf *dmabuf;
@@ -13956,7 +13933,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
13956 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 13933 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13957 13934
13958 if (!phba->sli4_hba.pc_sli4_params.supported) 13935 if (!phba->sli4_hba.pc_sli4_params.supported)
13959 hw_page_size = SLI4_PAGE_SIZE; 13936 hw_page_size = page_size;
13960 13937
13961 queue = kzalloc(sizeof(struct lpfc_queue) + 13938 queue = kzalloc(sizeof(struct lpfc_queue) +
13962 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 13939 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
@@ -13973,6 +13950,15 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
13973 INIT_LIST_HEAD(&queue->wq_list); 13950 INIT_LIST_HEAD(&queue->wq_list);
13974 INIT_LIST_HEAD(&queue->page_list); 13951 INIT_LIST_HEAD(&queue->page_list);
13975 INIT_LIST_HEAD(&queue->child_list); 13952 INIT_LIST_HEAD(&queue->child_list);
13953
13954 /* Set queue parameters now. If the system cannot provide memory
13955 * resources, the free routine needs to know what was allocated.
13956 */
13957 queue->entry_size = entry_size;
13958 queue->entry_count = entry_count;
13959 queue->page_size = hw_page_size;
13960 queue->phba = phba;
13961
13976 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { 13962 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
13977 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 13963 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
13978 if (!dmabuf) 13964 if (!dmabuf)
@@ -13994,9 +13980,6 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
13994 queue->qe[total_qe_count].address = dma_pointer; 13980 queue->qe[total_qe_count].address = dma_pointer;
13995 } 13981 }
13996 } 13982 }
13997 queue->entry_size = entry_size;
13998 queue->entry_count = entry_count;
13999 queue->phba = phba;
14000 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); 13983 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14001 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); 13984 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14002 13985
@@ -14299,7 +14282,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14299 if (!cq || !eq) 14282 if (!cq || !eq)
14300 return -ENODEV; 14283 return -ENODEV;
14301 if (!phba->sli4_hba.pc_sli4_params.supported) 14284 if (!phba->sli4_hba.pc_sli4_params.supported)
14302 hw_page_size = SLI4_PAGE_SIZE; 14285 hw_page_size = cq->page_size;
14303 14286
14304 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14287 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14305 if (!mbox) 14288 if (!mbox)
@@ -14318,8 +14301,8 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14318 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14301 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14319 phba->sli4_hba.pc_sli4_params.cqv); 14302 phba->sli4_hba.pc_sli4_params.cqv);
14320 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 14303 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14321 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */ 14304 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14322 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1); 14305 (cq->page_size / SLI4_PAGE_SIZE));
14323 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 14306 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14324 eq->queue_id); 14307 eq->queue_id);
14325 } else { 14308 } else {
@@ -14327,6 +14310,18 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14327 eq->queue_id); 14310 eq->queue_id);
14328 } 14311 }
14329 switch (cq->entry_count) { 14312 switch (cq->entry_count) {
14313 case 2048:
14314 case 4096:
14315 if (phba->sli4_hba.pc_sli4_params.cqv ==
14316 LPFC_Q_CREATE_VERSION_2) {
14317 cq_create->u.request.context.lpfc_cq_context_count =
14318 cq->entry_count;
14319 bf_set(lpfc_cq_context_count,
14320 &cq_create->u.request.context,
14321 LPFC_CQ_CNT_WORD7);
14322 break;
14323 }
14324 /* Fall Thru */
14330 default: 14325 default:
14331 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14326 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14332 "0361 Unsupported CQ count: " 14327 "0361 Unsupported CQ count: "
@@ -14352,7 +14347,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14352 break; 14347 break;
14353 } 14348 }
14354 list_for_each_entry(dmabuf, &cq->page_list, list) { 14349 list_for_each_entry(dmabuf, &cq->page_list, list) {
14355 memset(dmabuf->virt, 0, hw_page_size); 14350 memset(dmabuf->virt, 0, cq->page_size);
14356 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14351 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14357 putPaddrLow(dmabuf->phys); 14352 putPaddrLow(dmabuf->phys);
14358 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14353 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -14433,8 +14428,6 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14433 numcq = phba->cfg_nvmet_mrq; 14428 numcq = phba->cfg_nvmet_mrq;
14434 if (!cqp || !eqp || !numcq) 14429 if (!cqp || !eqp || !numcq)
14435 return -ENODEV; 14430 return -ENODEV;
14436 if (!phba->sli4_hba.pc_sli4_params.supported)
14437 hw_page_size = SLI4_PAGE_SIZE;
14438 14431
14439 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14432 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14440 if (!mbox) 14433 if (!mbox)
@@ -14465,6 +14458,8 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14465 status = -ENOMEM; 14458 status = -ENOMEM;
14466 goto out; 14459 goto out;
14467 } 14460 }
14461 if (!phba->sli4_hba.pc_sli4_params.supported)
14462 hw_page_size = cq->page_size;
14468 14463
14469 switch (idx) { 14464 switch (idx) {
14470 case 0: 14465 case 0:
@@ -14482,6 +14477,19 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14482 bf_set(lpfc_mbx_cq_create_set_num_cq, 14477 bf_set(lpfc_mbx_cq_create_set_num_cq,
14483 &cq_set->u.request, numcq); 14478 &cq_set->u.request, numcq);
14484 switch (cq->entry_count) { 14479 switch (cq->entry_count) {
14480 case 2048:
14481 case 4096:
14482 if (phba->sli4_hba.pc_sli4_params.cqv ==
14483 LPFC_Q_CREATE_VERSION_2) {
14484 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14485 &cq_set->u.request,
14486 cq->entry_count);
14487 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14488 &cq_set->u.request,
14489 LPFC_CQ_CNT_WORD7);
14490 break;
14491 }
14492 /* Fall Thru */
14485 default: 14493 default:
14486 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14494 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14487 "3118 Bad CQ count. (%d)\n", 14495 "3118 Bad CQ count. (%d)\n",
@@ -14578,6 +14586,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14578 cq->host_index = 0; 14586 cq->host_index = 0;
14579 cq->hba_index = 0; 14587 cq->hba_index = 0;
14580 cq->entry_repost = LPFC_CQ_REPOST; 14588 cq->entry_repost = LPFC_CQ_REPOST;
14589 cq->chann = idx;
14581 14590
14582 rc = 0; 14591 rc = 0;
14583 list_for_each_entry(dmabuf, &cq->page_list, list) { 14592 list_for_each_entry(dmabuf, &cq->page_list, list) {
@@ -14872,12 +14881,13 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
14872 void __iomem *bar_memmap_p; 14881 void __iomem *bar_memmap_p;
14873 uint32_t db_offset; 14882 uint32_t db_offset;
14874 uint16_t pci_barset; 14883 uint16_t pci_barset;
14884 uint8_t wq_create_version;
14875 14885
14876 /* sanity check on queue memory */ 14886 /* sanity check on queue memory */
14877 if (!wq || !cq) 14887 if (!wq || !cq)
14878 return -ENODEV; 14888 return -ENODEV;
14879 if (!phba->sli4_hba.pc_sli4_params.supported) 14889 if (!phba->sli4_hba.pc_sli4_params.supported)
14880 hw_page_size = SLI4_PAGE_SIZE; 14890 hw_page_size = wq->page_size;
14881 14891
14882 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14892 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14883 if (!mbox) 14893 if (!mbox)
@@ -14898,7 +14908,12 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
14898 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14908 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14899 phba->sli4_hba.pc_sli4_params.wqv); 14909 phba->sli4_hba.pc_sli4_params.wqv);
14900 14910
14901 switch (phba->sli4_hba.pc_sli4_params.wqv) { 14911 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
14912 wq_create_version = LPFC_Q_CREATE_VERSION_1;
14913 else
14914 wq_create_version = LPFC_Q_CREATE_VERSION_0;
14915
14916 switch (wq_create_version) {
14902 case LPFC_Q_CREATE_VERSION_0: 14917 case LPFC_Q_CREATE_VERSION_0:
14903 switch (wq->entry_size) { 14918 switch (wq->entry_size) {
14904 default: 14919 default:
@@ -14956,7 +14971,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
14956 } 14971 }
14957 bf_set(lpfc_mbx_wq_create_page_size, 14972 bf_set(lpfc_mbx_wq_create_page_size,
14958 &wq_create->u.request_1, 14973 &wq_create->u.request_1,
14959 LPFC_WQ_PAGE_SIZE_4096); 14974 (wq->page_size / SLI4_PAGE_SIZE));
14960 page = wq_create->u.request_1.page; 14975 page = wq_create->u.request_1.page;
14961 break; 14976 break;
14962 default: 14977 default:
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 13b8f4d4da34..81fb58e59e60 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -161,7 +161,6 @@ struct lpfc_queue {
161#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */ 161#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
162 uint32_t queue_id; /* Queue ID assigned by the hardware */ 162 uint32_t queue_id; /* Queue ID assigned by the hardware */
163 uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ 163 uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
164 uint32_t page_count; /* Number of pages allocated for this queue */
165 uint32_t host_index; /* The host's index for putting or getting */ 164 uint32_t host_index; /* The host's index for putting or getting */
166 uint32_t hba_index; /* The last known hba index for get or put */ 165 uint32_t hba_index; /* The last known hba index for get or put */
167 166
@@ -169,6 +168,11 @@ struct lpfc_queue {
169 struct lpfc_rqb *rqbp; /* ptr to RQ buffers */ 168 struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
170 169
171 uint32_t q_mode; 170 uint32_t q_mode;
171 uint16_t page_count; /* Number of pages allocated for this queue */
172 uint16_t page_size; /* size of page allocated for this queue */
173#define LPFC_EXPANDED_PAGE_SIZE 16384
174#define LPFC_DEFAULT_PAGE_SIZE 4096
175 uint16_t chann; /* IO channel this queue is associated with */
172 uint16_t db_format; 176 uint16_t db_format;
173#define LPFC_DB_RING_FORMAT 0x01 177#define LPFC_DB_RING_FORMAT 0x01
174#define LPFC_DB_LIST_FORMAT 0x02 178#define LPFC_DB_LIST_FORMAT 0x02
@@ -366,9 +370,9 @@ struct lpfc_bmbx {
366 370
367#define LPFC_EQE_DEF_COUNT 1024 371#define LPFC_EQE_DEF_COUNT 1024
368#define LPFC_CQE_DEF_COUNT 1024 372#define LPFC_CQE_DEF_COUNT 1024
373#define LPFC_CQE_EXP_COUNT 4096
369#define LPFC_WQE_DEF_COUNT 256 374#define LPFC_WQE_DEF_COUNT 256
370#define LPFC_WQE128_DEF_COUNT 128 375#define LPFC_WQE_EXP_COUNT 1024
371#define LPFC_WQE128_MAX_COUNT 256
372#define LPFC_MQE_DEF_COUNT 16 376#define LPFC_MQE_DEF_COUNT 16
373#define LPFC_RQE_DEF_COUNT 512 377#define LPFC_RQE_DEF_COUNT 512
374 378
@@ -668,7 +672,6 @@ struct lpfc_sli4_hba {
668 struct list_head sp_asynce_work_queue; 672 struct list_head sp_asynce_work_queue;
669 struct list_head sp_fcp_xri_aborted_work_queue; 673 struct list_head sp_fcp_xri_aborted_work_queue;
670 struct list_head sp_els_xri_aborted_work_queue; 674 struct list_head sp_els_xri_aborted_work_queue;
671 struct list_head sp_nvme_xri_aborted_work_queue;
672 struct list_head sp_unsol_work_queue; 675 struct list_head sp_unsol_work_queue;
673 struct lpfc_sli4_link link_state; 676 struct lpfc_sli4_link link_state;
674 struct lpfc_sli4_lnk_info lnk_info; 677 struct lpfc_sli4_lnk_info lnk_info;
@@ -769,7 +772,7 @@ int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
769 772
770void lpfc_sli4_hba_reset(struct lpfc_hba *); 773void lpfc_sli4_hba_reset(struct lpfc_hba *);
771struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, 774struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
772 uint32_t); 775 uint32_t, uint32_t);
773void lpfc_sli4_queue_free(struct lpfc_queue *); 776void lpfc_sli4_queue_free(struct lpfc_queue *);
774int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t); 777int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
775int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, 778int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
@@ -820,7 +823,6 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
820int lpfc_sli4_resume_rpi(struct lpfc_nodelist *, 823int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
821 void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *); 824 void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
822void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); 825void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
823void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
824void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); 826void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
825void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, 827void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
826 struct sli4_wcqe_xri_aborted *); 828 struct sli4_wcqe_xri_aborted *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e0181371af09..c232bf0e8998 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
20 * included with this package. * 20 * included with this package. *
21 *******************************************************************/ 21 *******************************************************************/
22 22
23#define LPFC_DRIVER_VERSION "11.4.0.4" 23#define LPFC_DRIVER_VERSION "11.4.0.6"
24#define LPFC_DRIVER_NAME "lpfc" 24#define LPFC_DRIVER_NAME "lpfc"
25 25
26/* Used for SLI 2/3 */ 26/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index f5a36ccb8606..ba6503f37756 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
35/* 35/*
36 * MegaRAID SAS Driver meta data 36 * MegaRAID SAS Driver meta data
37 */ 37 */
38#define MEGASAS_VERSION "07.703.05.00-rc1" 38#define MEGASAS_VERSION "07.704.04.00-rc1"
39#define MEGASAS_RELDATE "October 5, 2017" 39#define MEGASAS_RELDATE "December 7, 2017"
40 40
41/* 41/*
42 * Device IDs 42 * Device IDs
@@ -197,6 +197,7 @@ enum MFI_CMD_OP {
197 MFI_CMD_ABORT = 0x6, 197 MFI_CMD_ABORT = 0x6,
198 MFI_CMD_SMP = 0x7, 198 MFI_CMD_SMP = 0x7,
199 MFI_CMD_STP = 0x8, 199 MFI_CMD_STP = 0x8,
200 MFI_CMD_NVME = 0x9,
200 MFI_CMD_OP_COUNT, 201 MFI_CMD_OP_COUNT,
201 MFI_CMD_INVALID = 0xff 202 MFI_CMD_INVALID = 0xff
202}; 203};
@@ -230,7 +231,7 @@ enum MFI_CMD_OP {
230/* 231/*
231 * Global functions 232 * Global functions
232 */ 233 */
233extern u8 MR_ValidateMapInfo(struct megasas_instance *instance); 234extern u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id);
234 235
235 236
236/* 237/*
@@ -1352,7 +1353,13 @@ struct megasas_ctrl_info {
1352 1353
1353 struct { 1354 struct {
1354 #if defined(__BIG_ENDIAN_BITFIELD) 1355 #if defined(__BIG_ENDIAN_BITFIELD)
1355 u16 reserved:8; 1356 u16 reserved:2;
1357 u16 support_nvme_passthru:1;
1358 u16 support_pl_debug_info:1;
1359 u16 support_flash_comp_info:1;
1360 u16 support_host_info:1;
1361 u16 support_dual_fw_update:1;
1362 u16 support_ssc_rev3:1;
1356 u16 fw_swaps_bbu_vpd_info:1; 1363 u16 fw_swaps_bbu_vpd_info:1;
1357 u16 support_pd_map_target_id:1; 1364 u16 support_pd_map_target_id:1;
1358 u16 support_ses_ctrl_in_multipathcfg:1; 1365 u16 support_ses_ctrl_in_multipathcfg:1;
@@ -1377,7 +1384,19 @@ struct megasas_ctrl_info {
1377 * provide the data in little endian order 1384 * provide the data in little endian order
1378 */ 1385 */
1379 u16 fw_swaps_bbu_vpd_info:1; 1386 u16 fw_swaps_bbu_vpd_info:1;
1380 u16 reserved:8; 1387 u16 support_ssc_rev3:1;
1388 /* FW supports CacheCade 3.0, only one SSCD creation allowed */
1389 u16 support_dual_fw_update:1;
1390 /* FW supports dual firmware update feature */
1391 u16 support_host_info:1;
1392 /* FW supports MR_DCMD_CTRL_HOST_INFO_SET/GET */
1393 u16 support_flash_comp_info:1;
1394 /* FW supports MR_DCMD_CTRL_FLASH_COMP_INFO_GET */
1395 u16 support_pl_debug_info:1;
1396 /* FW supports retrieval of PL debug information through apps */
1397 u16 support_nvme_passthru:1;
1398 /* FW supports NVMe passthru commands */
1399 u16 reserved:2;
1381 #endif 1400 #endif
1382 } adapter_operations4; 1401 } adapter_operations4;
1383 u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */ 1402 u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */
@@ -1630,7 +1649,8 @@ union megasas_sgl_frame {
1630typedef union _MFI_CAPABILITIES { 1649typedef union _MFI_CAPABILITIES {
1631 struct { 1650 struct {
1632#if defined(__BIG_ENDIAN_BITFIELD) 1651#if defined(__BIG_ENDIAN_BITFIELD)
1633 u32 reserved:18; 1652 u32 reserved:17;
1653 u32 support_nvme_passthru:1;
1634 u32 support_64bit_mode:1; 1654 u32 support_64bit_mode:1;
1635 u32 support_pd_map_target_id:1; 1655 u32 support_pd_map_target_id:1;
1636 u32 support_qd_throttling:1; 1656 u32 support_qd_throttling:1;
@@ -1660,7 +1680,8 @@ typedef union _MFI_CAPABILITIES {
1660 u32 support_qd_throttling:1; 1680 u32 support_qd_throttling:1;
1661 u32 support_pd_map_target_id:1; 1681 u32 support_pd_map_target_id:1;
1662 u32 support_64bit_mode:1; 1682 u32 support_64bit_mode:1;
1663 u32 reserved:18; 1683 u32 support_nvme_passthru:1;
1684 u32 reserved:17;
1664#endif 1685#endif
1665 } mfi_capabilities; 1686 } mfi_capabilities;
1666 __le32 reg; 1687 __le32 reg;
@@ -2188,7 +2209,6 @@ struct megasas_instance {
2188 struct megasas_evt_detail *evt_detail; 2209 struct megasas_evt_detail *evt_detail;
2189 dma_addr_t evt_detail_h; 2210 dma_addr_t evt_detail_h;
2190 struct megasas_cmd *aen_cmd; 2211 struct megasas_cmd *aen_cmd;
2191 struct mutex hba_mutex;
2192 struct semaphore ioctl_sem; 2212 struct semaphore ioctl_sem;
2193 2213
2194 struct Scsi_Host *host; 2214 struct Scsi_Host *host;
@@ -2269,6 +2289,7 @@ struct megasas_instance {
2269 u32 nvme_page_size; 2289 u32 nvme_page_size;
2270 u8 adapter_type; 2290 u8 adapter_type;
2271 bool consistent_mask_64bit; 2291 bool consistent_mask_64bit;
2292 bool support_nvme_passthru;
2272}; 2293};
2273struct MR_LD_VF_MAP { 2294struct MR_LD_VF_MAP {
2274 u32 size; 2295 u32 size;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 2c8cfa27909d..2791141bd035 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -181,6 +181,7 @@ static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
181static u32 support_poll_for_event; 181static u32 support_poll_for_event;
182u32 megasas_dbg_lvl; 182u32 megasas_dbg_lvl;
183static u32 support_device_change; 183static u32 support_device_change;
184static bool support_nvme_encapsulation;
184 185
185/* define lock for aen poll */ 186/* define lock for aen poll */
186spinlock_t poll_aen_lock; 187spinlock_t poll_aen_lock;
@@ -1952,7 +1953,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
1952 } 1953 }
1953 } 1954 }
1954 1955
1955 mutex_lock(&instance->hba_mutex); 1956 mutex_lock(&instance->reset_mutex);
1956 /* Send DCMD to Firmware and cache the information */ 1957 /* Send DCMD to Firmware and cache the information */
1957 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) 1958 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
1958 megasas_get_pd_info(instance, sdev); 1959 megasas_get_pd_info(instance, sdev);
@@ -1966,7 +1967,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
1966 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; 1967 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
1967 megasas_set_static_target_properties(sdev, is_target_prop); 1968 megasas_set_static_target_properties(sdev, is_target_prop);
1968 1969
1969 mutex_unlock(&instance->hba_mutex); 1970 mutex_unlock(&instance->reset_mutex);
1970 1971
1971 /* This sdev property may change post OCR */ 1972 /* This sdev property may change post OCR */
1972 megasas_set_dynamic_target_properties(sdev); 1973 megasas_set_dynamic_target_properties(sdev);
@@ -3122,6 +3123,16 @@ megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr
3122 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding)); 3123 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3123} 3124}
3124 3125
3126static ssize_t
3127megasas_fw_cmds_outstanding_show(struct device *cdev,
3128 struct device_attribute *attr, char *buf)
3129{
3130 struct Scsi_Host *shost = class_to_shost(cdev);
3131 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3132
3133 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3134}
3135
3125static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR, 3136static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
3126 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store); 3137 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
3127static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO, 3138static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
@@ -3132,6 +3143,8 @@ static DEVICE_ATTR(page_size, S_IRUGO,
3132 megasas_page_size_show, NULL); 3143 megasas_page_size_show, NULL);
3133static DEVICE_ATTR(ldio_outstanding, S_IRUGO, 3144static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
3134 megasas_ldio_outstanding_show, NULL); 3145 megasas_ldio_outstanding_show, NULL);
3146static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO,
3147 megasas_fw_cmds_outstanding_show, NULL);
3135 3148
3136struct device_attribute *megaraid_host_attrs[] = { 3149struct device_attribute *megaraid_host_attrs[] = {
3137 &dev_attr_fw_crash_buffer_size, 3150 &dev_attr_fw_crash_buffer_size,
@@ -3139,6 +3152,7 @@ struct device_attribute *megaraid_host_attrs[] = {
3139 &dev_attr_fw_crash_state, 3152 &dev_attr_fw_crash_state,
3140 &dev_attr_page_size, 3153 &dev_attr_page_size,
3141 &dev_attr_ldio_outstanding, 3154 &dev_attr_ldio_outstanding,
3155 &dev_attr_fw_cmds_outstanding,
3142 NULL, 3156 NULL,
3143}; 3157};
3144 3158
@@ -3321,6 +3335,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3321 3335
3322 case MFI_CMD_SMP: 3336 case MFI_CMD_SMP:
3323 case MFI_CMD_STP: 3337 case MFI_CMD_STP:
3338 case MFI_CMD_NVME:
3324 megasas_complete_int_cmd(instance, cmd); 3339 megasas_complete_int_cmd(instance, cmd);
3325 break; 3340 break;
3326 3341
@@ -3331,10 +3346,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3331 && (cmd->frame->dcmd.mbox.b[1] == 1)) { 3346 && (cmd->frame->dcmd.mbox.b[1] == 1)) {
3332 fusion->fast_path_io = 0; 3347 fusion->fast_path_io = 0;
3333 spin_lock_irqsave(instance->host->host_lock, flags); 3348 spin_lock_irqsave(instance->host->host_lock, flags);
3349 status = cmd->frame->hdr.cmd_status;
3334 instance->map_update_cmd = NULL; 3350 instance->map_update_cmd = NULL;
3335 if (cmd->frame->hdr.cmd_status != 0) { 3351 if (status != MFI_STAT_OK) {
3336 if (cmd->frame->hdr.cmd_status != 3352 if (status != MFI_STAT_NOT_FOUND)
3337 MFI_STAT_NOT_FOUND)
3338 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n", 3353 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3339 cmd->frame->hdr.cmd_status); 3354 cmd->frame->hdr.cmd_status);
3340 else { 3355 else {
@@ -3344,8 +3359,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3344 flags); 3359 flags);
3345 break; 3360 break;
3346 } 3361 }
3347 } else 3362 }
3348 instance->map_id++; 3363
3349 megasas_return_cmd(instance, cmd); 3364 megasas_return_cmd(instance, cmd);
3350 3365
3351 /* 3366 /*
@@ -3353,10 +3368,14 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3353 * Validate Map will set proper value. 3368 * Validate Map will set proper value.
3354 * Meanwhile all IOs will go as LD IO. 3369 * Meanwhile all IOs will go as LD IO.
3355 */ 3370 */
3356 if (MR_ValidateMapInfo(instance)) 3371 if (status == MFI_STAT_OK &&
3372 (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3373 instance->map_id++;
3357 fusion->fast_path_io = 1; 3374 fusion->fast_path_io = 1;
3358 else 3375 } else {
3359 fusion->fast_path_io = 0; 3376 fusion->fast_path_io = 0;
3377 }
3378
3360 megasas_sync_map_info(instance); 3379 megasas_sync_map_info(instance);
3361 spin_unlock_irqrestore(instance->host->host_lock, 3380 spin_unlock_irqrestore(instance->host->host_lock,
3362 flags); 3381 flags);
@@ -4677,10 +4696,12 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
4677 sizeof(struct megasas_ctrl_info)); 4696 sizeof(struct megasas_ctrl_info));
4678 4697
4679 if ((instance->adapter_type != MFI_SERIES) && 4698 if ((instance->adapter_type != MFI_SERIES) &&
4680 !instance->mask_interrupts) 4699 !instance->mask_interrupts) {
4681 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4700 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4682 else 4701 } else {
4683 ret = megasas_issue_polled(instance, cmd); 4702 ret = megasas_issue_polled(instance, cmd);
4703 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4704 }
4684 4705
4685 switch (ret) { 4706 switch (ret) {
4686 case DCMD_SUCCESS: 4707 case DCMD_SUCCESS:
@@ -4702,6 +4723,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
4702 ci->adapterOperations3.useSeqNumJbodFP; 4723 ci->adapterOperations3.useSeqNumJbodFP;
4703 instance->support_morethan256jbod = 4724 instance->support_morethan256jbod =
4704 ci->adapter_operations4.support_pd_map_target_id; 4725 ci->adapter_operations4.support_pd_map_target_id;
4726 instance->support_nvme_passthru =
4727 ci->adapter_operations4.support_nvme_passthru;
4705 4728
4706 /*Check whether controller is iMR or MR */ 4729 /*Check whether controller is iMR or MR */
4707 instance->is_imr = (ci->memory_size ? 0 : 1); 4730 instance->is_imr = (ci->memory_size ? 0 : 1);
@@ -4718,6 +4741,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
4718 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); 4741 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
4719 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", 4742 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
4720 instance->secure_jbod_support ? "Yes" : "No"); 4743 instance->secure_jbod_support ? "Yes" : "No");
4744 dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
4745 instance->support_nvme_passthru ? "Yes" : "No");
4721 break; 4746 break;
4722 4747
4723 case DCMD_TIMEOUT: 4748 case DCMD_TIMEOUT:
@@ -5387,7 +5412,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
5387 } 5412 }
5388 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { 5413 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
5389 fusion->stream_detect_by_ld[i] = 5414 fusion->stream_detect_by_ld[i] =
5390 kmalloc(sizeof(struct LD_STREAM_DETECT), 5415 kzalloc(sizeof(struct LD_STREAM_DETECT),
5391 GFP_KERNEL); 5416 GFP_KERNEL);
5392 if (!fusion->stream_detect_by_ld[i]) { 5417 if (!fusion->stream_detect_by_ld[i]) {
5393 dev_err(&instance->pdev->dev, 5418 dev_err(&instance->pdev->dev,
@@ -5432,7 +5457,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
5432 ctrl_info->adapterOperations2.supportUnevenSpans; 5457 ctrl_info->adapterOperations2.supportUnevenSpans;
5433 if (instance->UnevenSpanSupport) { 5458 if (instance->UnevenSpanSupport) {
5434 struct fusion_context *fusion = instance->ctrl_context; 5459 struct fusion_context *fusion = instance->ctrl_context;
5435 if (MR_ValidateMapInfo(instance)) 5460 if (MR_ValidateMapInfo(instance, instance->map_id))
5436 fusion->fast_path_io = 1; 5461 fusion->fast_path_io = 1;
5437 else 5462 else
5438 fusion->fast_path_io = 0; 5463 fusion->fast_path_io = 0;
@@ -5581,6 +5606,7 @@ megasas_get_seq_num(struct megasas_instance *instance,
5581 struct megasas_dcmd_frame *dcmd; 5606 struct megasas_dcmd_frame *dcmd;
5582 struct megasas_evt_log_info *el_info; 5607 struct megasas_evt_log_info *el_info;
5583 dma_addr_t el_info_h = 0; 5608 dma_addr_t el_info_h = 0;
5609 int ret;
5584 5610
5585 cmd = megasas_get_cmd(instance); 5611 cmd = megasas_get_cmd(instance);
5586 5612
@@ -5613,26 +5639,29 @@ megasas_get_seq_num(struct megasas_instance *instance,
5613 megasas_set_dma_settings(instance, dcmd, el_info_h, 5639 megasas_set_dma_settings(instance, dcmd, el_info_h,
5614 sizeof(struct megasas_evt_log_info)); 5640 sizeof(struct megasas_evt_log_info));
5615 5641
5616 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) == 5642 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5617 DCMD_SUCCESS) { 5643 if (ret != DCMD_SUCCESS) {
5618 /* 5644 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5619 * Copy the data back into callers buffer 5645 __func__, __LINE__);
5620 */ 5646 goto dcmd_failed;
5621 eli->newest_seq_num = el_info->newest_seq_num; 5647 }
5622 eli->oldest_seq_num = el_info->oldest_seq_num;
5623 eli->clear_seq_num = el_info->clear_seq_num;
5624 eli->shutdown_seq_num = el_info->shutdown_seq_num;
5625 eli->boot_seq_num = el_info->boot_seq_num;
5626 } else
5627 dev_err(&instance->pdev->dev, "DCMD failed "
5628 "from %s\n", __func__);
5629 5648
5649 /*
5650 * Copy the data back into callers buffer
5651 */
5652 eli->newest_seq_num = el_info->newest_seq_num;
5653 eli->oldest_seq_num = el_info->oldest_seq_num;
5654 eli->clear_seq_num = el_info->clear_seq_num;
5655 eli->shutdown_seq_num = el_info->shutdown_seq_num;
5656 eli->boot_seq_num = el_info->boot_seq_num;
5657
5658dcmd_failed:
5630 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), 5659 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
5631 el_info, el_info_h); 5660 el_info, el_info_h);
5632 5661
5633 megasas_return_cmd(instance, cmd); 5662 megasas_return_cmd(instance, cmd);
5634 5663
5635 return 0; 5664 return ret;
5636} 5665}
5637 5666
5638/** 5667/**
@@ -6346,7 +6375,6 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
6346 spin_lock_init(&instance->stream_lock); 6375 spin_lock_init(&instance->stream_lock);
6347 spin_lock_init(&instance->completion_lock); 6376 spin_lock_init(&instance->completion_lock);
6348 6377
6349 mutex_init(&instance->hba_mutex);
6350 mutex_init(&instance->reset_mutex); 6378 mutex_init(&instance->reset_mutex);
6351 6379
6352 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 6380 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
@@ -6704,6 +6732,7 @@ megasas_resume(struct pci_dev *pdev)
6704 */ 6732 */
6705 6733
6706 atomic_set(&instance->fw_outstanding, 0); 6734 atomic_set(&instance->fw_outstanding, 0);
6735 atomic_set(&instance->ldio_outstanding, 0);
6707 6736
6708 /* Now re-enable MSI-X */ 6737 /* Now re-enable MSI-X */
6709 if (instance->msix_vectors) { 6738 if (instance->msix_vectors) {
@@ -6822,7 +6851,6 @@ static void megasas_detach_one(struct pci_dev *pdev)
6822 u32 pd_seq_map_sz; 6851 u32 pd_seq_map_sz;
6823 6852
6824 instance = pci_get_drvdata(pdev); 6853 instance = pci_get_drvdata(pdev);
6825 instance->unload = 1;
6826 host = instance->host; 6854 host = instance->host;
6827 fusion = instance->ctrl_context; 6855 fusion = instance->ctrl_context;
6828 6856
@@ -6833,6 +6861,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
6833 if (instance->fw_crash_state != UNAVAILABLE) 6861 if (instance->fw_crash_state != UNAVAILABLE)
6834 megasas_free_host_crash_buffer(instance); 6862 megasas_free_host_crash_buffer(instance);
6835 scsi_remove_host(instance->host); 6863 scsi_remove_host(instance->host);
6864 instance->unload = 1;
6836 6865
6837 if (megasas_wait_for_adapter_operational(instance)) 6866 if (megasas_wait_for_adapter_operational(instance))
6838 goto skip_firing_dcmds; 6867 goto skip_firing_dcmds;
@@ -7087,7 +7116,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
7087 return -EINVAL; 7116 return -EINVAL;
7088 } 7117 }
7089 7118
7090 if (ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) { 7119 if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
7120 ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
7121 !instance->support_nvme_passthru)) {
7091 dev_err(&instance->pdev->dev, 7122 dev_err(&instance->pdev->dev,
7092 "Received invalid ioctl command 0x%x\n", 7123 "Received invalid ioctl command 0x%x\n",
7093 ioc->frame.hdr.cmd); 7124 ioc->frame.hdr.cmd);
@@ -7301,9 +7332,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
7301 struct megasas_iocpacket *ioc; 7332 struct megasas_iocpacket *ioc;
7302 struct megasas_instance *instance; 7333 struct megasas_instance *instance;
7303 int error; 7334 int error;
7304 int i;
7305 unsigned long flags;
7306 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
7307 7335
7308 ioc = memdup_user(user_ioc, sizeof(*ioc)); 7336 ioc = memdup_user(user_ioc, sizeof(*ioc));
7309 if (IS_ERR(ioc)) 7337 if (IS_ERR(ioc))
@@ -7315,10 +7343,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
7315 goto out_kfree_ioc; 7343 goto out_kfree_ioc;
7316 } 7344 }
7317 7345
7318 /* Adjust ioctl wait time for VF mode */
7319 if (instance->requestorId)
7320 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7321
7322 /* Block ioctls in VF mode */ 7346 /* Block ioctls in VF mode */
7323 if (instance->requestorId && !allow_vf_ioctls) { 7347 if (instance->requestorId && !allow_vf_ioctls) {
7324 error = -ENODEV; 7348 error = -ENODEV;
@@ -7341,32 +7365,10 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
7341 goto out_kfree_ioc; 7365 goto out_kfree_ioc;
7342 } 7366 }
7343 7367
7344 for (i = 0; i < wait_time; i++) { 7368 if (megasas_wait_for_adapter_operational(instance)) {
7345
7346 spin_lock_irqsave(&instance->hba_lock, flags);
7347 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
7348 spin_unlock_irqrestore(&instance->hba_lock, flags);
7349 break;
7350 }
7351 spin_unlock_irqrestore(&instance->hba_lock, flags);
7352
7353 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
7354 dev_notice(&instance->pdev->dev, "waiting"
7355 "for controller reset to finish\n");
7356 }
7357
7358 msleep(1000);
7359 }
7360
7361 spin_lock_irqsave(&instance->hba_lock, flags);
7362 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
7363 spin_unlock_irqrestore(&instance->hba_lock, flags);
7364
7365 dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
7366 error = -ENODEV; 7369 error = -ENODEV;
7367 goto out_up; 7370 goto out_up;
7368 } 7371 }
7369 spin_unlock_irqrestore(&instance->hba_lock, flags);
7370 7372
7371 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 7373 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
7372out_up: 7374out_up:
@@ -7382,9 +7384,6 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
7382 struct megasas_instance *instance; 7384 struct megasas_instance *instance;
7383 struct megasas_aen aen; 7385 struct megasas_aen aen;
7384 int error; 7386 int error;
7385 int i;
7386 unsigned long flags;
7387 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
7388 7387
7389 if (file->private_data != file) { 7388 if (file->private_data != file) {
7390 printk(KERN_DEBUG "megasas: fasync_helper was not " 7389 printk(KERN_DEBUG "megasas: fasync_helper was not "
@@ -7408,32 +7407,8 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
7408 return -ENODEV; 7407 return -ENODEV;
7409 } 7408 }
7410 7409
7411 for (i = 0; i < wait_time; i++) { 7410 if (megasas_wait_for_adapter_operational(instance))
7412
7413 spin_lock_irqsave(&instance->hba_lock, flags);
7414 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
7415 spin_unlock_irqrestore(&instance->hba_lock,
7416 flags);
7417 break;
7418 }
7419
7420 spin_unlock_irqrestore(&instance->hba_lock, flags);
7421
7422 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
7423 dev_notice(&instance->pdev->dev, "waiting for"
7424 "controller reset to finish\n");
7425 }
7426
7427 msleep(1000);
7428 }
7429
7430 spin_lock_irqsave(&instance->hba_lock, flags);
7431 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
7432 spin_unlock_irqrestore(&instance->hba_lock, flags);
7433 dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
7434 return -ENODEV; 7411 return -ENODEV;
7435 }
7436 spin_unlock_irqrestore(&instance->hba_lock, flags);
7437 7412
7438 mutex_lock(&instance->reset_mutex); 7413 mutex_lock(&instance->reset_mutex);
7439 error = megasas_register_aen(instance, aen.seq_num, 7414 error = megasas_register_aen(instance, aen.seq_num,
@@ -7613,6 +7588,14 @@ static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
7613} 7588}
7614static DRIVER_ATTR_RW(dbg_lvl); 7589static DRIVER_ATTR_RW(dbg_lvl);
7615 7590
7591static ssize_t
7592support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
7593{
7594 return sprintf(buf, "%u\n", support_nvme_encapsulation);
7595}
7596
7597static DRIVER_ATTR_RO(support_nvme_encapsulation);
7598
7616static inline void megasas_remove_scsi_device(struct scsi_device *sdev) 7599static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
7617{ 7600{
7618 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); 7601 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
@@ -7801,6 +7784,7 @@ static int __init megasas_init(void)
7801 7784
7802 support_poll_for_event = 2; 7785 support_poll_for_event = 2;
7803 support_device_change = 1; 7786 support_device_change = 1;
7787 support_nvme_encapsulation = true;
7804 7788
7805 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); 7789 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
7806 7790
@@ -7850,8 +7834,17 @@ static int __init megasas_init(void)
7850 if (rval) 7834 if (rval)
7851 goto err_dcf_support_device_change; 7835 goto err_dcf_support_device_change;
7852 7836
7837 rval = driver_create_file(&megasas_pci_driver.driver,
7838 &driver_attr_support_nvme_encapsulation);
7839 if (rval)
7840 goto err_dcf_support_nvme_encapsulation;
7841
7853 return rval; 7842 return rval;
7854 7843
7844err_dcf_support_nvme_encapsulation:
7845 driver_remove_file(&megasas_pci_driver.driver,
7846 &driver_attr_support_device_change);
7847
7855err_dcf_support_device_change: 7848err_dcf_support_device_change:
7856 driver_remove_file(&megasas_pci_driver.driver, 7849 driver_remove_file(&megasas_pci_driver.driver,
7857 &driver_attr_dbg_lvl); 7850 &driver_attr_dbg_lvl);
@@ -7884,6 +7877,8 @@ static void __exit megasas_exit(void)
7884 driver_remove_file(&megasas_pci_driver.driver, 7877 driver_remove_file(&megasas_pci_driver.driver,
7885 &driver_attr_release_date); 7878 &driver_attr_release_date);
7886 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); 7879 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7880 driver_remove_file(&megasas_pci_driver.driver,
7881 &driver_attr_support_nvme_encapsulation);
7887 7882
7888 pci_unregister_driver(&megasas_pci_driver); 7883 pci_unregister_driver(&megasas_pci_driver);
7889 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); 7884 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index bfad9bfc313f..59ecbb3b53b5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -168,7 +168,7 @@ static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
168/* 168/*
169 * This function will Populate Driver Map using firmware raid map 169 * This function will Populate Driver Map using firmware raid map
170 */ 170 */
171void MR_PopulateDrvRaidMap(struct megasas_instance *instance) 171static int MR_PopulateDrvRaidMap(struct megasas_instance *instance, u64 map_id)
172{ 172{
173 struct fusion_context *fusion = instance->ctrl_context; 173 struct fusion_context *fusion = instance->ctrl_context;
174 struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL; 174 struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
@@ -181,7 +181,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
181 181
182 182
183 struct MR_DRV_RAID_MAP_ALL *drv_map = 183 struct MR_DRV_RAID_MAP_ALL *drv_map =
184 fusion->ld_drv_map[(instance->map_id & 1)]; 184 fusion->ld_drv_map[(map_id & 1)];
185 struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; 185 struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
186 void *raid_map_data = NULL; 186 void *raid_map_data = NULL;
187 187
@@ -190,7 +190,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
190 0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN)); 190 0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
191 191
192 if (instance->max_raid_mapsize) { 192 if (instance->max_raid_mapsize) {
193 fw_map_dyn = fusion->ld_map[(instance->map_id & 1)]; 193 fw_map_dyn = fusion->ld_map[(map_id & 1)];
194 desc_table = 194 desc_table =
195 (struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset)); 195 (struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
196 if (desc_table != fw_map_dyn->raid_map_desc_table) 196 if (desc_table != fw_map_dyn->raid_map_desc_table)
@@ -255,11 +255,11 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
255 255
256 } else if (instance->supportmax256vd) { 256 } else if (instance->supportmax256vd) {
257 fw_map_ext = 257 fw_map_ext =
258 (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(instance->map_id & 1)]; 258 (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(map_id & 1)];
259 ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount); 259 ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
260 if (ld_count > MAX_LOGICAL_DRIVES_EXT) { 260 if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
261 dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n"); 261 dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
262 return; 262 return 1;
263 } 263 }
264 264
265 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); 265 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
@@ -282,9 +282,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
282 cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT)); 282 cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
283 } else { 283 } else {
284 fw_map_old = (struct MR_FW_RAID_MAP_ALL *) 284 fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
285 fusion->ld_map[(instance->map_id & 1)]; 285 fusion->ld_map[(map_id & 1)];
286 pFwRaidMap = &fw_map_old->raidMap; 286 pFwRaidMap = &fw_map_old->raidMap;
287 ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount); 287 ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
288 if (ld_count > MAX_LOGICAL_DRIVES) {
289 dev_dbg(&instance->pdev->dev,
290 "LD count exposed in RAID map in not valid\n");
291 return 1;
292 }
293
288 pDrvRaidMap->totalSize = pFwRaidMap->totalSize; 294 pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
289 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); 295 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
290 pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; 296 pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
@@ -300,12 +306,14 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
300 sizeof(struct MR_DEV_HANDLE_INFO) * 306 sizeof(struct MR_DEV_HANDLE_INFO) *
301 MAX_RAIDMAP_PHYSICAL_DEVICES); 307 MAX_RAIDMAP_PHYSICAL_DEVICES);
302 } 308 }
309
310 return 0;
303} 311}
304 312
305/* 313/*
306 * This function will validate Map info data provided by FW 314 * This function will validate Map info data provided by FW
307 */ 315 */
308u8 MR_ValidateMapInfo(struct megasas_instance *instance) 316u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
309{ 317{
310 struct fusion_context *fusion; 318 struct fusion_context *fusion;
311 struct MR_DRV_RAID_MAP_ALL *drv_map; 319 struct MR_DRV_RAID_MAP_ALL *drv_map;
@@ -317,11 +325,11 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
317 u16 ld; 325 u16 ld;
318 u32 expected_size; 326 u32 expected_size;
319 327
320 328 if (MR_PopulateDrvRaidMap(instance, map_id))
321 MR_PopulateDrvRaidMap(instance); 329 return 0;
322 330
323 fusion = instance->ctrl_context; 331 fusion = instance->ctrl_context;
324 drv_map = fusion->ld_drv_map[(instance->map_id & 1)]; 332 drv_map = fusion->ld_drv_map[(map_id & 1)];
325 pDrvRaidMap = &drv_map->raidMap; 333 pDrvRaidMap = &drv_map->raidMap;
326 334
327 lbInfo = fusion->load_balance_info; 335 lbInfo = fusion->load_balance_info;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 65dc4fea6352..073ced07e662 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -983,7 +983,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
983 MFI_CAPABILITIES *drv_ops; 983 MFI_CAPABILITIES *drv_ops;
984 u32 scratch_pad_2; 984 u32 scratch_pad_2;
985 unsigned long flags; 985 unsigned long flags;
986 struct timeval tv; 986 ktime_t time;
987 bool cur_fw_64bit_dma_capable; 987 bool cur_fw_64bit_dma_capable;
988 988
989 fusion = instance->ctrl_context; 989 fusion = instance->ctrl_context;
@@ -1042,13 +1042,12 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
1042 IOCInitMessage->HostMSIxVectors = instance->msix_vectors; 1042 IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
1043 IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT; 1043 IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
1044 1044
1045 do_gettimeofday(&tv); 1045 time = ktime_get_real();
1046 /* Convert to milliseconds as per FW requirement */ 1046 /* Convert to milliseconds as per FW requirement */
1047 IOCInitMessage->TimeStamp = cpu_to_le64((tv.tv_sec * 1000) + 1047 IOCInitMessage->TimeStamp = cpu_to_le64(ktime_to_ms(time));
1048 (tv.tv_usec / 1000));
1049 1048
1050 init_frame = (struct megasas_init_frame *)cmd->frame; 1049 init_frame = (struct megasas_init_frame *)cmd->frame;
1051 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 1050 memset(init_frame, 0, IOC_INIT_FRAME_SIZE);
1052 1051
1053 frame_hdr = &cmd->frame->hdr; 1052 frame_hdr = &cmd->frame->hdr;
1054 frame_hdr->cmd_status = 0xFF; 1053 frame_hdr->cmd_status = 0xFF;
@@ -1080,6 +1079,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
1080 1079
1081 drv_ops->mfi_capabilities.support_qd_throttling = 1; 1080 drv_ops->mfi_capabilities.support_qd_throttling = 1;
1082 drv_ops->mfi_capabilities.support_pd_map_target_id = 1; 1081 drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
1082 drv_ops->mfi_capabilities.support_nvme_passthru = 1;
1083 1083
1084 if (instance->consistent_mask_64bit) 1084 if (instance->consistent_mask_64bit)
1085 drv_ops->mfi_capabilities.support_64bit_mode = 1; 1085 drv_ops->mfi_capabilities.support_64bit_mode = 1;
@@ -1320,7 +1320,7 @@ megasas_get_map_info(struct megasas_instance *instance)
1320 1320
1321 fusion->fast_path_io = 0; 1321 fusion->fast_path_io = 0;
1322 if (!megasas_get_ld_map_info(instance)) { 1322 if (!megasas_get_ld_map_info(instance)) {
1323 if (MR_ValidateMapInfo(instance)) { 1323 if (MR_ValidateMapInfo(instance, instance->map_id)) {
1324 fusion->fast_path_io = 1; 1324 fusion->fast_path_io = 1;
1325 return 0; 1325 return 0;
1326 } 1326 }
@@ -1603,7 +1603,7 @@ static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance)
1603 1603
1604 fusion = instance->ctrl_context; 1604 fusion = instance->ctrl_context;
1605 1605
1606 cmd = kmalloc(sizeof(struct megasas_cmd), GFP_KERNEL); 1606 cmd = kzalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
1607 1607
1608 if (!cmd) { 1608 if (!cmd) {
1609 dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n", 1609 dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
@@ -2664,16 +2664,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2664 praid_context = &io_request->RaidContext; 2664 praid_context = &io_request->RaidContext;
2665 2665
2666 if (instance->adapter_type == VENTURA_SERIES) { 2666 if (instance->adapter_type == VENTURA_SERIES) {
2667 spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
2668 megasas_stream_detect(instance, cmd, &io_info);
2669 spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
2670 /* In ventura if stream detected for a read and it is read ahead
2671 * capable make this IO as LDIO
2672 */
2673 if (is_stream_detected(&io_request->RaidContext.raid_context_g35) &&
2674 io_info.isRead && io_info.ra_capable)
2675 fp_possible = false;
2676
2677 /* FP for Optimal raid level 1. 2667 /* FP for Optimal raid level 1.
2678 * All large RAID-1 writes (> 32 KiB, both WT and WB modes) 2668 * All large RAID-1 writes (> 32 KiB, both WT and WB modes)
2679 * are built by the driver as LD I/Os. 2669 * are built by the driver as LD I/Os.
@@ -2699,6 +2689,20 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2699 } 2689 }
2700 } 2690 }
2701 2691
2692 if (!fp_possible ||
2693 (io_info.isRead && io_info.ra_capable)) {
2694 spin_lock_irqsave(&instance->stream_lock,
2695 spinlock_flags);
2696 megasas_stream_detect(instance, cmd, &io_info);
2697 spin_unlock_irqrestore(&instance->stream_lock,
2698 spinlock_flags);
2699 /* In ventura if stream detected for a read and it is
2700 * read ahead capable make this IO as LDIO
2701 */
2702 if (is_stream_detected(&io_request->RaidContext.raid_context_g35))
2703 fp_possible = false;
2704 }
2705
2702 /* If raid is NULL, set CPU affinity to default CPU0 */ 2706 /* If raid is NULL, set CPU affinity to default CPU0 */
2703 if (raid) 2707 if (raid)
2704 megasas_set_raidflag_cpu_affinity(praid_context, 2708 megasas_set_raidflag_cpu_affinity(praid_context,
@@ -3953,6 +3957,8 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
3953 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3957 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3954 u16 smid; 3958 u16 smid;
3955 bool refire_cmd = 0; 3959 bool refire_cmd = 0;
3960 u8 result;
3961 u32 opcode = 0;
3956 3962
3957 fusion = instance->ctrl_context; 3963 fusion = instance->ctrl_context;
3958 3964
@@ -3963,29 +3969,53 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
3963 cmd_fusion = fusion->cmd_list[j]; 3969 cmd_fusion = fusion->cmd_list[j];
3964 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 3970 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
3965 smid = le16_to_cpu(cmd_mfi->context.smid); 3971 smid = le16_to_cpu(cmd_mfi->context.smid);
3972 result = REFIRE_CMD;
3966 3973
3967 if (!smid) 3974 if (!smid)
3968 continue; 3975 continue;
3969 3976
3970 /* Do not refire shutdown command */ 3977 req_desc = megasas_get_request_descriptor(instance, smid - 1);
3971 if (le32_to_cpu(cmd_mfi->frame->dcmd.opcode) == 3978
3972 MR_DCMD_CTRL_SHUTDOWN) { 3979 switch (cmd_mfi->frame->hdr.cmd) {
3973 cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK; 3980 case MFI_CMD_DCMD:
3974 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 3981 opcode = le32_to_cpu(cmd_mfi->frame->dcmd.opcode);
3975 continue; 3982 /* Do not refire shutdown command */
3983 if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
3984 cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
3985 result = COMPLETE_CMD;
3986 break;
3987 }
3988
3989 refire_cmd = ((opcode != MR_DCMD_LD_MAP_GET_INFO)) &&
3990 (opcode != MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3991 !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
3992
3993 if (!refire_cmd)
3994 result = RETURN_CMD;
3995
3996 break;
3997 case MFI_CMD_NVME:
3998 if (!instance->support_nvme_passthru) {
3999 cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD;
4000 result = COMPLETE_CMD;
4001 }
4002
4003 break;
4004 default:
4005 break;
3976 } 4006 }
3977 4007
3978 req_desc = megasas_get_request_descriptor 4008 switch (result) {
3979 (instance, smid - 1); 4009 case REFIRE_CMD:
3980 refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode !=
3981 cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
3982 (cmd_mfi->frame->dcmd.opcode !=
3983 cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO)))
3984 && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
3985 if (refire_cmd)
3986 megasas_fire_cmd_fusion(instance, req_desc); 4010 megasas_fire_cmd_fusion(instance, req_desc);
3987 else 4011 break;
4012 case RETURN_CMD:
3988 megasas_return_cmd(instance, cmd_mfi); 4013 megasas_return_cmd(instance, cmd_mfi);
4014 break;
4015 case COMPLETE_CMD:
4016 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
4017 break;
4018 }
3989 } 4019 }
3990} 4020}
3991 4021
@@ -4625,8 +4655,6 @@ transition_to_ready:
4625 continue; 4655 continue;
4626 } 4656 }
4627 4657
4628 megasas_refire_mgmt_cmd(instance);
4629
4630 if (megasas_get_ctrl_info(instance)) { 4658 if (megasas_get_ctrl_info(instance)) {
4631 dev_info(&instance->pdev->dev, 4659 dev_info(&instance->pdev->dev,
4632 "Failed from %s %d\n", 4660 "Failed from %s %d\n",
@@ -4635,6 +4663,9 @@ transition_to_ready:
4635 retval = FAILED; 4663 retval = FAILED;
4636 goto out; 4664 goto out;
4637 } 4665 }
4666
4667 megasas_refire_mgmt_cmd(instance);
4668
4638 /* Reset load balance info */ 4669 /* Reset load balance info */
4639 if (fusion->load_balance_info) 4670 if (fusion->load_balance_info)
4640 memset(fusion->load_balance_info, 0, 4671 memset(fusion->load_balance_info, 0,
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 1814d79cb98d..8e5ebee6517f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1344,6 +1344,12 @@ union desc_value {
1344 } u; 1344 } u;
1345}; 1345};
1346 1346
1347enum CMD_RET_VALUES {
1348 REFIRE_CMD = 1,
1349 COMPLETE_CMD = 2,
1350 RETURN_CMD = 3,
1351};
1352
1347void megasas_free_cmds_fusion(struct megasas_instance *instance); 1353void megasas_free_cmds_fusion(struct megasas_instance *instance);
1348int megasas_ioc_init_fusion(struct megasas_instance *instance); 1354int megasas_ioc_init_fusion(struct megasas_instance *instance);
1349u8 megasas_get_map_info(struct megasas_instance *instance); 1355u8 megasas_get_map_info(struct megasas_instance *instance);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 8027de465d47..13d6e4ec3022 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -888,6 +888,22 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
888 return 1; 888 return 1;
889} 889}
890 890
891static struct scsiio_tracker *
892_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
893{
894 struct scsi_cmnd *cmd;
895
896 if (WARN_ON(!smid) ||
897 WARN_ON(smid >= ioc->hi_priority_smid))
898 return NULL;
899
900 cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
901 if (cmd)
902 return scsi_cmd_priv(cmd);
903
904 return NULL;
905}
906
891/** 907/**
892 * _base_get_cb_idx - obtain the callback index 908 * _base_get_cb_idx - obtain the callback index
893 * @ioc: per adapter object 909 * @ioc: per adapter object
@@ -899,19 +915,25 @@ static u8
899_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) 915_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
900{ 916{
901 int i; 917 int i;
902 u8 cb_idx; 918 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
919 u8 cb_idx = 0xFF;
903 920
904 if (smid < ioc->hi_priority_smid) { 921 if (smid < ioc->hi_priority_smid) {
905 i = smid - 1; 922 struct scsiio_tracker *st;
906 cb_idx = ioc->scsi_lookup[i].cb_idx; 923
924 if (smid < ctl_smid) {
925 st = _get_st_from_smid(ioc, smid);
926 if (st)
927 cb_idx = st->cb_idx;
928 } else if (smid == ctl_smid)
929 cb_idx = ioc->ctl_cb_idx;
907 } else if (smid < ioc->internal_smid) { 930 } else if (smid < ioc->internal_smid) {
908 i = smid - ioc->hi_priority_smid; 931 i = smid - ioc->hi_priority_smid;
909 cb_idx = ioc->hpr_lookup[i].cb_idx; 932 cb_idx = ioc->hpr_lookup[i].cb_idx;
910 } else if (smid <= ioc->hba_queue_depth) { 933 } else if (smid <= ioc->hba_queue_depth) {
911 i = smid - ioc->internal_smid; 934 i = smid - ioc->internal_smid;
912 cb_idx = ioc->internal_lookup[i].cb_idx; 935 cb_idx = ioc->internal_lookup[i].cb_idx;
913 } else 936 }
914 cb_idx = 0xFF;
915 return cb_idx; 937 return cb_idx;
916} 938}
917 939
@@ -1287,14 +1309,16 @@ _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1287/** 1309/**
1288 * _base_get_chain_buffer_tracker - obtain chain tracker 1310 * _base_get_chain_buffer_tracker - obtain chain tracker
1289 * @ioc: per adapter object 1311 * @ioc: per adapter object
1290 * @smid: smid associated to an IO request 1312 * @scmd: SCSI commands of the IO request
1291 * 1313 *
1292 * Returns chain tracker(from ioc->free_chain_list) 1314 * Returns chain tracker(from ioc->free_chain_list)
1293 */ 1315 */
1294static struct chain_tracker * 1316static struct chain_tracker *
1295_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid) 1317_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
1318 struct scsi_cmnd *scmd)
1296{ 1319{
1297 struct chain_tracker *chain_req; 1320 struct chain_tracker *chain_req;
1321 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
1298 unsigned long flags; 1322 unsigned long flags;
1299 1323
1300 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1324 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
@@ -1307,8 +1331,7 @@ _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1307 chain_req = list_entry(ioc->free_chain_list.next, 1331 chain_req = list_entry(ioc->free_chain_list.next,
1308 struct chain_tracker, tracker_list); 1332 struct chain_tracker, tracker_list);
1309 list_del_init(&chain_req->tracker_list); 1333 list_del_init(&chain_req->tracker_list);
1310 list_add_tail(&chain_req->tracker_list, 1334 list_add_tail(&chain_req->tracker_list, &st->chain_list);
1311 &ioc->scsi_lookup[smid - 1].chain_list);
1312 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1335 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1313 return chain_req; 1336 return chain_req;
1314} 1337}
@@ -1923,7 +1946,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
1923 1946
1924 /* initializing the chain flags and pointers */ 1947 /* initializing the chain flags and pointers */
1925 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT; 1948 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
1926 chain_req = _base_get_chain_buffer_tracker(ioc, smid); 1949 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
1927 if (!chain_req) 1950 if (!chain_req)
1928 return -1; 1951 return -1;
1929 chain = chain_req->chain_buffer; 1952 chain = chain_req->chain_buffer;
@@ -1963,7 +1986,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
1963 sges_in_segment--; 1986 sges_in_segment--;
1964 } 1987 }
1965 1988
1966 chain_req = _base_get_chain_buffer_tracker(ioc, smid); 1989 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
1967 if (!chain_req) 1990 if (!chain_req)
1968 return -1; 1991 return -1;
1969 chain = chain_req->chain_buffer; 1992 chain = chain_req->chain_buffer;
@@ -2066,7 +2089,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2066 } 2089 }
2067 2090
2068 /* initializing the pointers */ 2091 /* initializing the pointers */
2069 chain_req = _base_get_chain_buffer_tracker(ioc, smid); 2092 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2070 if (!chain_req) 2093 if (!chain_req)
2071 return -1; 2094 return -1;
2072 chain = chain_req->chain_buffer; 2095 chain = chain_req->chain_buffer;
@@ -2097,7 +2120,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2097 sges_in_segment--; 2120 sges_in_segment--;
2098 } 2121 }
2099 2122
2100 chain_req = _base_get_chain_buffer_tracker(ioc, smid); 2123 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2101 if (!chain_req) 2124 if (!chain_req)
2102 return -1; 2125 return -1;
2103 chain = chain_req->chain_buffer; 2126 chain = chain_req->chain_buffer;
@@ -2742,7 +2765,7 @@ mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2742void * 2765void *
2743mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2766mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2744{ 2767{
2745 return (void *)(ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl); 2768 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
2746} 2769}
2747 2770
2748/** 2771/**
@@ -2755,7 +2778,7 @@ mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2755dma_addr_t 2778dma_addr_t
2756mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2779mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2757{ 2780{
2758 return ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl_dma; 2781 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
2759} 2782}
2760 2783
2761/** 2784/**
@@ -2822,26 +2845,15 @@ u16
2822mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, 2845mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
2823 struct scsi_cmnd *scmd) 2846 struct scsi_cmnd *scmd)
2824{ 2847{
2825 unsigned long flags; 2848 struct scsiio_tracker *request = scsi_cmd_priv(scmd);
2826 struct scsiio_tracker *request; 2849 unsigned int tag = scmd->request->tag;
2827 u16 smid; 2850 u16 smid;
2828 2851
2829 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 2852 smid = tag + 1;
2830 if (list_empty(&ioc->free_list)) {
2831 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2832 pr_err(MPT3SAS_FMT "%s: smid not available\n",
2833 ioc->name, __func__);
2834 return 0;
2835 }
2836
2837 request = list_entry(ioc->free_list.next,
2838 struct scsiio_tracker, tracker_list);
2839 request->scmd = scmd;
2840 request->cb_idx = cb_idx; 2853 request->cb_idx = cb_idx;
2841 smid = request->smid;
2842 request->msix_io = _base_get_msix_index(ioc); 2854 request->msix_io = _base_get_msix_index(ioc);
2843 list_del(&request->tracker_list); 2855 request->smid = smid;
2844 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2856 INIT_LIST_HEAD(&request->chain_list);
2845 return smid; 2857 return smid;
2846} 2858}
2847 2859
@@ -2874,6 +2886,35 @@ mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
2874 return smid; 2886 return smid;
2875} 2887}
2876 2888
2889static void
2890_base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
2891{
2892 /*
2893 * See _wait_for_commands_to_complete() call with regards to this code.
2894 */
2895 if (ioc->shost_recovery && ioc->pending_io_count) {
2896 ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
2897 if (ioc->pending_io_count == 0)
2898 wake_up(&ioc->reset_wq);
2899 }
2900}
2901
2902void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
2903 struct scsiio_tracker *st)
2904{
2905 if (WARN_ON(st->smid == 0))
2906 return;
2907 st->cb_idx = 0xFF;
2908 st->direct_io = 0;
2909 if (!list_empty(&st->chain_list)) {
2910 unsigned long flags;
2911
2912 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2913 list_splice_init(&st->chain_list, &ioc->free_chain_list);
2914 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2915 }
2916}
2917
2877/** 2918/**
2878 * mpt3sas_base_free_smid - put smid back on free_list 2919 * mpt3sas_base_free_smid - put smid back on free_list
2879 * @ioc: per adapter object 2920 * @ioc: per adapter object
@@ -2886,37 +2927,22 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2886{ 2927{
2887 unsigned long flags; 2928 unsigned long flags;
2888 int i; 2929 int i;
2889 struct chain_tracker *chain_req, *next;
2890 2930
2891 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2892 if (smid < ioc->hi_priority_smid) { 2931 if (smid < ioc->hi_priority_smid) {
2893 /* scsiio queue */ 2932 struct scsiio_tracker *st;
2894 i = smid - 1;
2895 if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
2896 list_for_each_entry_safe(chain_req, next,
2897 &ioc->scsi_lookup[i].chain_list, tracker_list) {
2898 list_del_init(&chain_req->tracker_list);
2899 list_add(&chain_req->tracker_list,
2900 &ioc->free_chain_list);
2901 }
2902 }
2903 ioc->scsi_lookup[i].cb_idx = 0xFF;
2904 ioc->scsi_lookup[i].scmd = NULL;
2905 ioc->scsi_lookup[i].direct_io = 0;
2906 list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
2907 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2908 2933
2909 /* 2934 st = _get_st_from_smid(ioc, smid);
2910 * See _wait_for_commands_to_complete() call with regards 2935 if (!st) {
2911 * to this code. 2936 _base_recovery_check(ioc);
2912 */ 2937 return;
2913 if (ioc->shost_recovery && ioc->pending_io_count) {
2914 if (ioc->pending_io_count == 1)
2915 wake_up(&ioc->reset_wq);
2916 ioc->pending_io_count--;
2917 } 2938 }
2939 mpt3sas_base_clear_st(ioc, st);
2940 _base_recovery_check(ioc);
2918 return; 2941 return;
2919 } else if (smid < ioc->internal_smid) { 2942 }
2943
2944 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2945 if (smid < ioc->internal_smid) {
2920 /* hi-priority */ 2946 /* hi-priority */
2921 i = smid - ioc->hi_priority_smid; 2947 i = smid - ioc->hi_priority_smid;
2922 ioc->hpr_lookup[i].cb_idx = 0xFF; 2948 ioc->hpr_lookup[i].cb_idx = 0xFF;
@@ -3789,13 +3815,12 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
3789 3815
3790 if (ioc->pcie_sgl_dma_pool) { 3816 if (ioc->pcie_sgl_dma_pool) {
3791 for (i = 0; i < ioc->scsiio_depth; i++) { 3817 for (i = 0; i < ioc->scsiio_depth; i++) {
3792 if (ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl) 3818 dma_pool_free(ioc->pcie_sgl_dma_pool,
3793 pci_pool_free(ioc->pcie_sgl_dma_pool, 3819 ioc->pcie_sg_lookup[i].pcie_sgl,
3794 ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl, 3820 ioc->pcie_sg_lookup[i].pcie_sgl_dma);
3795 ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
3796 } 3821 }
3797 if (ioc->pcie_sgl_dma_pool) 3822 if (ioc->pcie_sgl_dma_pool)
3798 pci_pool_destroy(ioc->pcie_sgl_dma_pool); 3823 dma_pool_destroy(ioc->pcie_sgl_dma_pool);
3799 } 3824 }
3800 3825
3801 if (ioc->config_page) { 3826 if (ioc->config_page) {
@@ -3806,10 +3831,6 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
3806 ioc->config_page, ioc->config_page_dma); 3831 ioc->config_page, ioc->config_page_dma);
3807 } 3832 }
3808 3833
3809 if (ioc->scsi_lookup) {
3810 free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
3811 ioc->scsi_lookup = NULL;
3812 }
3813 kfree(ioc->hpr_lookup); 3834 kfree(ioc->hpr_lookup);
3814 kfree(ioc->internal_lookup); 3835 kfree(ioc->internal_lookup);
3815 if (ioc->chain_lookup) { 3836 if (ioc->chain_lookup) {
@@ -4110,16 +4131,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4110 ioc->name, (unsigned long long) ioc->request_dma)); 4131 ioc->name, (unsigned long long) ioc->request_dma));
4111 total_sz += sz; 4132 total_sz += sz;
4112 4133
4113 sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
4114 ioc->scsi_lookup_pages = get_order(sz);
4115 ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
4116 GFP_KERNEL, ioc->scsi_lookup_pages);
4117 if (!ioc->scsi_lookup) {
4118 pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
4119 ioc->name, (int)sz);
4120 goto out;
4121 }
4122
4123 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n", 4134 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
4124 ioc->name, ioc->request, ioc->scsiio_depth)); 4135 ioc->name, ioc->request, ioc->scsiio_depth));
4125 4136
@@ -4202,23 +4213,29 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4202 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE); 4213 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
4203 nvme_blocks_needed++; 4214 nvme_blocks_needed++;
4204 4215
4216 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
4217 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
4218 if (!ioc->pcie_sg_lookup) {
4219 pr_info(MPT3SAS_FMT
4220 "PCIe SGL lookup: kzalloc failed\n", ioc->name);
4221 goto out;
4222 }
4205 sz = nvme_blocks_needed * ioc->page_size; 4223 sz = nvme_blocks_needed * ioc->page_size;
4206 ioc->pcie_sgl_dma_pool = 4224 ioc->pcie_sgl_dma_pool =
4207 pci_pool_create("PCIe SGL pool", ioc->pdev, sz, 16, 0); 4225 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
4208 if (!ioc->pcie_sgl_dma_pool) { 4226 if (!ioc->pcie_sgl_dma_pool) {
4209 pr_info(MPT3SAS_FMT 4227 pr_info(MPT3SAS_FMT
4210 "PCIe SGL pool: pci_pool_create failed\n", 4228 "PCIe SGL pool: dma_pool_create failed\n",
4211 ioc->name); 4229 ioc->name);
4212 goto out; 4230 goto out;
4213 } 4231 }
4214 for (i = 0; i < ioc->scsiio_depth; i++) { 4232 for (i = 0; i < ioc->scsiio_depth; i++) {
4215 ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl = 4233 ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
4216 pci_pool_alloc(ioc->pcie_sgl_dma_pool, 4234 ioc->pcie_sgl_dma_pool, GFP_KERNEL,
4217 GFP_KERNEL, 4235 &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
4218 &ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma); 4236 if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
4219 if (!ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl) {
4220 pr_info(MPT3SAS_FMT 4237 pr_info(MPT3SAS_FMT
4221 "PCIe SGL pool: pci_pool_alloc failed\n", 4238 "PCIe SGL pool: dma_pool_alloc failed\n",
4222 ioc->name); 4239 ioc->name);
4223 goto out; 4240 goto out;
4224 } 4241 }
@@ -5766,19 +5783,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
5766 kfree(delayed_event_ack); 5783 kfree(delayed_event_ack);
5767 } 5784 }
5768 5785
5769 /* initialize the scsi lookup free list */
5770 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 5786 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5771 INIT_LIST_HEAD(&ioc->free_list);
5772 smid = 1;
5773 for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
5774 INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
5775 ioc->scsi_lookup[i].cb_idx = 0xFF;
5776 ioc->scsi_lookup[i].smid = smid;
5777 ioc->scsi_lookup[i].scmd = NULL;
5778 ioc->scsi_lookup[i].direct_io = 0;
5779 list_add_tail(&ioc->scsi_lookup[i].tracker_list,
5780 &ioc->free_list);
5781 }
5782 5787
5783 /* hi-priority queue */ 5788 /* hi-priority queue */
5784 INIT_LIST_HEAD(&ioc->hpr_free_list); 5789 INIT_LIST_HEAD(&ioc->hpr_free_list);
@@ -6292,15 +6297,13 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
6292 * _wait_for_commands_to_complete - reset controller 6297 * _wait_for_commands_to_complete - reset controller
6293 * @ioc: Pointer to MPT_ADAPTER structure 6298 * @ioc: Pointer to MPT_ADAPTER structure
6294 * 6299 *
6295 * This function waiting(3s) for all pending commands to complete 6300 * This function is waiting 10s for all pending commands to complete
6296 * prior to putting controller in reset. 6301 * prior to putting controller in reset.
6297 */ 6302 */
6298static void 6303static void
6299_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) 6304_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
6300{ 6305{
6301 u32 ioc_state; 6306 u32 ioc_state;
6302 unsigned long flags;
6303 u16 i;
6304 6307
6305 ioc->pending_io_count = 0; 6308 ioc->pending_io_count = 0;
6306 6309
@@ -6309,11 +6312,7 @@ _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
6309 return; 6312 return;
6310 6313
6311 /* pending command count */ 6314 /* pending command count */
6312 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 6315 ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
6313 for (i = 0; i < ioc->scsiio_depth; i++)
6314 if (ioc->scsi_lookup[i].cb_idx != 0xFF)
6315 ioc->pending_io_count++;
6316 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
6317 6316
6318 if (!ioc->pending_io_count) 6317 if (!ioc->pending_io_count)
6319 return; 6318 return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 60f42ca3954f..789bc421424b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -772,20 +772,17 @@ struct chain_tracker {
772/** 772/**
773 * struct scsiio_tracker - scsi mf request tracker 773 * struct scsiio_tracker - scsi mf request tracker
774 * @smid: system message id 774 * @smid: system message id
775 * @scmd: scsi request pointer
776 * @cb_idx: callback index 775 * @cb_idx: callback index
777 * @direct_io: To indicate whether I/O is direct (WARPDRIVE) 776 * @direct_io: To indicate whether I/O is direct (WARPDRIVE)
778 * @tracker_list: list of free request (ioc->free_list) 777 * @chain_list: list of associated firmware chain tracker
779 * @msix_io: IO's msix 778 * @msix_io: IO's msix
780 */ 779 */
781struct scsiio_tracker { 780struct scsiio_tracker {
782 u16 smid; 781 u16 smid;
783 struct scsi_cmnd *scmd;
784 u8 cb_idx; 782 u8 cb_idx;
785 u8 direct_io; 783 u8 direct_io;
786 struct pcie_sg_list pcie_sg_list; 784 struct pcie_sg_list pcie_sg_list;
787 struct list_head chain_list; 785 struct list_head chain_list;
788 struct list_head tracker_list;
789 u16 msix_io; 786 u16 msix_io;
790}; 787};
791 788
@@ -1248,10 +1245,8 @@ struct MPT3SAS_ADAPTER {
1248 u8 *request; 1245 u8 *request;
1249 dma_addr_t request_dma; 1246 dma_addr_t request_dma;
1250 u32 request_dma_sz; 1247 u32 request_dma_sz;
1251 struct scsiio_tracker *scsi_lookup; 1248 struct pcie_sg_list *pcie_sg_lookup;
1252 ulong scsi_lookup_pages;
1253 spinlock_t scsi_lookup_lock; 1249 spinlock_t scsi_lookup_lock;
1254 struct list_head free_list;
1255 int pending_io_count; 1250 int pending_io_count;
1256 wait_queue_head_t reset_wq; 1251 wait_queue_head_t reset_wq;
1257 1252
@@ -1270,6 +1265,7 @@ struct MPT3SAS_ADAPTER {
1270 u16 chains_needed_per_io; 1265 u16 chains_needed_per_io;
1271 u32 chain_depth; 1266 u32 chain_depth;
1272 u16 chain_segment_sz; 1267 u16 chain_segment_sz;
1268 u16 chains_per_prp_buffer;
1273 1269
1274 /* hi-priority queue */ 1270 /* hi-priority queue */
1275 u16 hi_priority_smid; 1271 u16 hi_priority_smid;
@@ -1401,7 +1397,9 @@ void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
1401/* hi-priority queue */ 1397/* hi-priority queue */
1402u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); 1398u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
1403u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, 1399u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
1404 struct scsi_cmnd *scmd); 1400 struct scsi_cmnd *scmd);
1401void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
1402 struct scsiio_tracker *st);
1405 1403
1406u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); 1404u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
1407void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid); 1405void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
@@ -1437,16 +1435,16 @@ int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
1437 1435
1438 1436
1439/* scsih shared API */ 1437/* scsih shared API */
1438struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
1439 u16 smid);
1440u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, 1440u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
1441 u32 reply); 1441 u32 reply);
1442void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase); 1442void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
1443 1443
1444int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, 1444int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
1445 uint channel, uint id, uint lun, u8 type, u16 smid_task, 1445 u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout);
1446 ulong timeout);
1447int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, 1446int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
1448 uint channel, uint id, uint lun, u8 type, u16 smid_task, 1447 u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout);
1449 ulong timeout);
1450 1448
1451void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); 1449void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
1452void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); 1450void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
@@ -1613,14 +1611,9 @@ void mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status,
1613u8 mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc); 1611u8 mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc);
1614void mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc, 1612void mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
1615 struct _raid_device *raid_device); 1613 struct _raid_device *raid_device);
1616u8
1617mpt3sas_scsi_direct_io_get(struct MPT3SAS_ADAPTER *ioc, u16 smid);
1618void
1619mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io);
1620void 1614void
1621mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 1615mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
1622 struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request, 1616 struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request);
1623 u16 smid);
1624 1617
1625/* NCQ Prio Handling Check */ 1618/* NCQ Prio Handling Check */
1626bool scsih_ncq_prio_supp(struct scsi_device *sdev); 1619bool scsih_ncq_prio_supp(struct scsi_device *sdev);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 537de1b7e8e5..9cddc3074cd1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -567,11 +567,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
567 Mpi2SCSITaskManagementRequest_t *tm_request) 567 Mpi2SCSITaskManagementRequest_t *tm_request)
568{ 568{
569 u8 found = 0; 569 u8 found = 0;
570 u16 i; 570 u16 smid;
571 u16 handle; 571 u16 handle;
572 struct scsi_cmnd *scmd; 572 struct scsi_cmnd *scmd;
573 struct MPT3SAS_DEVICE *priv_data; 573 struct MPT3SAS_DEVICE *priv_data;
574 unsigned long flags;
575 Mpi2SCSITaskManagementReply_t *tm_reply; 574 Mpi2SCSITaskManagementReply_t *tm_reply;
576 u32 sz; 575 u32 sz;
577 u32 lun; 576 u32 lun;
@@ -587,11 +586,11 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
587 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); 586 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
588 587
589 handle = le16_to_cpu(tm_request->DevHandle); 588 handle = le16_to_cpu(tm_request->DevHandle);
590 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 589 for (smid = ioc->scsiio_depth; smid && !found; smid--) {
591 for (i = ioc->scsiio_depth; i && !found; i--) { 590 struct scsiio_tracker *st;
592 scmd = ioc->scsi_lookup[i - 1].scmd; 591
593 if (scmd == NULL || scmd->device == NULL || 592 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
594 scmd->device->hostdata == NULL) 593 if (!scmd)
595 continue; 594 continue;
596 if (lun != scmd->device->lun) 595 if (lun != scmd->device->lun)
597 continue; 596 continue;
@@ -600,10 +599,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
600 continue; 599 continue;
601 if (priv_data->sas_target->handle != handle) 600 if (priv_data->sas_target->handle != handle)
602 continue; 601 continue;
603 tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid); 602 st = scsi_cmd_priv(scmd);
603 tm_request->TaskMID = cpu_to_le16(st->smid);
604 found = 1; 604 found = 1;
605 } 605 }
606 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
607 606
608 if (!found) { 607 if (!found) {
609 dctlprintk(ioc, pr_info(MPT3SAS_FMT 608 dctlprintk(ioc, pr_info(MPT3SAS_FMT
@@ -724,14 +723,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
724 goto out; 723 goto out;
725 } 724 }
726 } else { 725 } else {
727 726 /* Use first reserved smid for passthrough ioctls */
728 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL); 727 smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
729 if (!smid) {
730 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
731 ioc->name, __func__);
732 ret = -EAGAIN;
733 goto out;
734 }
735 } 728 }
736 729
737 ret = 0; 730 ret = 0;
@@ -1081,8 +1074,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
1081 le16_to_cpu(mpi_request->FunctionDependent1)); 1074 le16_to_cpu(mpi_request->FunctionDependent1));
1082 mpt3sas_halt_firmware(ioc); 1075 mpt3sas_halt_firmware(ioc);
1083 mpt3sas_scsih_issue_locked_tm(ioc, 1076 mpt3sas_scsih_issue_locked_tm(ioc,
1084 le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 1077 le16_to_cpu(mpi_request->FunctionDependent1), 0,
1085 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30); 1078 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
1086 } else 1079 } else
1087 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1080 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1088 } 1081 }
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b258f210120a..74fca184dba9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1445,156 +1445,31 @@ _scsih_is_nvme_device(u32 device_info)
1445} 1445}
1446 1446
1447/** 1447/**
1448 * _scsih_scsi_lookup_get - returns scmd entry 1448 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1449 * @ioc: per adapter object
1450 * @smid: system request message index
1451 *
1452 * Returns the smid stored scmd pointer.
1453 */
1454static struct scsi_cmnd *
1455_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1456{
1457 return ioc->scsi_lookup[smid - 1].scmd;
1458}
1459
1460/**
1461 * __scsih_scsi_lookup_get_clear - returns scmd entry without
1462 * holding any lock.
1463 * @ioc: per adapter object 1449 * @ioc: per adapter object
1464 * @smid: system request message index 1450 * @smid: system request message index
1465 * 1451 *
1466 * Returns the smid stored scmd pointer. 1452 * Returns the smid stored scmd pointer.
1467 * Then will dereference the stored scmd pointer. 1453 * Then will dereference the stored scmd pointer.
1468 */ 1454 */
1469static inline struct scsi_cmnd * 1455struct scsi_cmnd *
1470__scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, 1456mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1471 u16 smid)
1472{ 1457{
1473 struct scsi_cmnd *scmd = NULL; 1458 struct scsi_cmnd *scmd = NULL;
1459 struct scsiio_tracker *st;
1474 1460
1475 swap(scmd, ioc->scsi_lookup[smid - 1].scmd); 1461 if (smid > 0 &&
1476 1462 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1477 return scmd; 1463 u32 unique_tag = smid - 1;
1478}
1479
1480/**
1481 * _scsih_scsi_lookup_get_clear - returns scmd entry
1482 * @ioc: per adapter object
1483 * @smid: system request message index
1484 *
1485 * Returns the smid stored scmd pointer.
1486 * Then will derefrence the stored scmd pointer.
1487 */
1488static inline struct scsi_cmnd *
1489_scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1490{
1491 unsigned long flags;
1492 struct scsi_cmnd *scmd;
1493
1494 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1495 scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
1496 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1497
1498 return scmd;
1499}
1500 1464
1501/** 1465 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1502 * _scsih_scsi_lookup_find_by_scmd - scmd lookup 1466 if (scmd) {
1503 * @ioc: per adapter object 1467 st = scsi_cmd_priv(scmd);
1504 * @smid: system request message index 1468 if (st->cb_idx == 0xFF)
1505 * @scmd: pointer to scsi command object 1469 scmd = NULL;
1506 * Context: This function will acquire ioc->scsi_lookup_lock.
1507 *
1508 * This will search for a scmd pointer in the scsi_lookup array,
1509 * returning the revelent smid. A returned value of zero means invalid.
1510 */
1511static u16
1512_scsih_scsi_lookup_find_by_scmd(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd
1513 *scmd)
1514{
1515 u16 smid;
1516 unsigned long flags;
1517 int i;
1518
1519 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1520 smid = 0;
1521 for (i = 0; i < ioc->scsiio_depth; i++) {
1522 if (ioc->scsi_lookup[i].scmd == scmd) {
1523 smid = ioc->scsi_lookup[i].smid;
1524 goto out;
1525 }
1526 }
1527 out:
1528 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1529 return smid;
1530}
1531
1532/**
1533 * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1534 * @ioc: per adapter object
1535 * @id: target id
1536 * @channel: channel
1537 * Context: This function will acquire ioc->scsi_lookup_lock.
1538 *
1539 * This will search for a matching channel:id in the scsi_lookup array,
1540 * returning 1 if found.
1541 */
1542static u8
1543_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1544 int channel)
1545{
1546 u8 found;
1547 unsigned long flags;
1548 int i;
1549
1550 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1551 found = 0;
1552 for (i = 0 ; i < ioc->scsiio_depth; i++) {
1553 if (ioc->scsi_lookup[i].scmd &&
1554 (ioc->scsi_lookup[i].scmd->device->id == id &&
1555 ioc->scsi_lookup[i].scmd->device->channel == channel)) {
1556 found = 1;
1557 goto out;
1558 }
1559 }
1560 out:
1561 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1562 return found;
1563}
1564
1565/**
1566 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1567 * @ioc: per adapter object
1568 * @id: target id
1569 * @lun: lun number
1570 * @channel: channel
1571 * Context: This function will acquire ioc->scsi_lookup_lock.
1572 *
1573 * This will search for a matching channel:id:lun in the scsi_lookup array,
1574 * returning 1 if found.
1575 */
1576static u8
1577_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1578 unsigned int lun, int channel)
1579{
1580 u8 found;
1581 unsigned long flags;
1582 int i;
1583
1584 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1585 found = 0;
1586 for (i = 0 ; i < ioc->scsiio_depth; i++) {
1587 if (ioc->scsi_lookup[i].scmd &&
1588 (ioc->scsi_lookup[i].scmd->device->id == id &&
1589 ioc->scsi_lookup[i].scmd->device->channel == channel &&
1590 ioc->scsi_lookup[i].scmd->device->lun == lun)) {
1591 found = 1;
1592 goto out;
1593 } 1470 }
1594 } 1471 }
1595 out: 1472 return scmd;
1596 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1597 return found;
1598} 1473}
1599 1474
1600/** 1475/**
@@ -2727,32 +2602,30 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2727/** 2602/**
2728 * mpt3sas_scsih_issue_tm - main routine for sending tm requests 2603 * mpt3sas_scsih_issue_tm - main routine for sending tm requests
2729 * @ioc: per adapter struct 2604 * @ioc: per adapter struct
2730 * @device_handle: device handle 2605 * @handle: device handle
2731 * @channel: the channel assigned by the OS
2732 * @id: the id assigned by the OS
2733 * @lun: lun number 2606 * @lun: lun number
2734 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 2607 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2735 * @smid_task: smid assigned to the task 2608 * @smid_task: smid assigned to the task
2609 * @msix_task: MSIX table index supplied by the OS
2736 * @timeout: timeout in seconds 2610 * @timeout: timeout in seconds
2737 * Context: user 2611 * Context: user
2738 * 2612 *
2739 * A generic API for sending task management requests to firmware. 2613 * A generic API for sending task management requests to firmware.
2740 * 2614 *
2741 * The callback index is set inside `ioc->tm_cb_idx`. 2615 * The callback index is set inside `ioc->tm_cb_idx`.
2616 * The caller is responsible to check for outstanding commands.
2742 * 2617 *
2743 * Return SUCCESS or FAILED. 2618 * Return SUCCESS or FAILED.
2744 */ 2619 */
2745int 2620int
2746mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, 2621mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2747 uint id, uint lun, u8 type, u16 smid_task, ulong timeout) 2622 u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout)
2748{ 2623{
2749 Mpi2SCSITaskManagementRequest_t *mpi_request; 2624 Mpi2SCSITaskManagementRequest_t *mpi_request;
2750 Mpi2SCSITaskManagementReply_t *mpi_reply; 2625 Mpi2SCSITaskManagementReply_t *mpi_reply;
2751 u16 smid = 0; 2626 u16 smid = 0;
2752 u32 ioc_state; 2627 u32 ioc_state;
2753 struct scsiio_tracker *scsi_lookup = NULL;
2754 int rc; 2628 int rc;
2755 u16 msix_task = 0;
2756 2629
2757 lockdep_assert_held(&ioc->tm_cmds.mutex); 2630 lockdep_assert_held(&ioc->tm_cmds.mutex);
2758 2631
@@ -2791,9 +2664,6 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
2791 return FAILED; 2664 return FAILED;
2792 } 2665 }
2793 2666
2794 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
2795 scsi_lookup = &ioc->scsi_lookup[smid_task - 1];
2796
2797 dtmprintk(ioc, pr_info(MPT3SAS_FMT 2667 dtmprintk(ioc, pr_info(MPT3SAS_FMT
2798 "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n", 2668 "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n",
2799 ioc->name, handle, type, smid_task)); 2669 ioc->name, handle, type, smid_task));
@@ -2809,11 +2679,6 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
2809 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); 2679 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
2810 mpt3sas_scsih_set_tm_flag(ioc, handle); 2680 mpt3sas_scsih_set_tm_flag(ioc, handle);
2811 init_completion(&ioc->tm_cmds.done); 2681 init_completion(&ioc->tm_cmds.done);
2812 if ((type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) &&
2813 (scsi_lookup->msix_io < ioc->reply_queue_count))
2814 msix_task = scsi_lookup->msix_io;
2815 else
2816 msix_task = 0;
2817 ioc->put_smid_hi_priority(ioc, smid, msix_task); 2682 ioc->put_smid_hi_priority(ioc, smid, msix_task);
2818 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); 2683 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
2819 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { 2684 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -2847,35 +2712,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
2847 sizeof(Mpi2SCSITaskManagementRequest_t)/4); 2712 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
2848 } 2713 }
2849 } 2714 }
2850 2715 rc = SUCCESS;
2851 switch (type) {
2852 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
2853 rc = SUCCESS;
2854 if (scsi_lookup->scmd == NULL)
2855 break;
2856 rc = FAILED;
2857 break;
2858
2859 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2860 if (_scsih_scsi_lookup_find_by_target(ioc, id, channel))
2861 rc = FAILED;
2862 else
2863 rc = SUCCESS;
2864 break;
2865 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2866 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2867 if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
2868 rc = FAILED;
2869 else
2870 rc = SUCCESS;
2871 break;
2872 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
2873 rc = SUCCESS;
2874 break;
2875 default:
2876 rc = FAILED;
2877 break;
2878 }
2879 2716
2880out: 2717out:
2881 mpt3sas_scsih_clear_tm_flag(ioc, handle); 2718 mpt3sas_scsih_clear_tm_flag(ioc, handle);
@@ -2884,13 +2721,13 @@ out:
2884} 2721}
2885 2722
2886int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, 2723int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2887 uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout) 2724 u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout)
2888{ 2725{
2889 int ret; 2726 int ret;
2890 2727
2891 mutex_lock(&ioc->tm_cmds.mutex); 2728 mutex_lock(&ioc->tm_cmds.mutex);
2892 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type, 2729 ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
2893 smid_task, timeout); 2730 msix_task, timeout);
2894 mutex_unlock(&ioc->tm_cmds.mutex); 2731 mutex_unlock(&ioc->tm_cmds.mutex);
2895 2732
2896 return ret; 2733 return ret;
@@ -2989,7 +2826,7 @@ scsih_abort(struct scsi_cmnd *scmd)
2989{ 2826{
2990 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2827 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2991 struct MPT3SAS_DEVICE *sas_device_priv_data; 2828 struct MPT3SAS_DEVICE *sas_device_priv_data;
2992 u16 smid; 2829 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
2993 u16 handle; 2830 u16 handle;
2994 int r; 2831 int r;
2995 2832
@@ -3007,9 +2844,8 @@ scsih_abort(struct scsi_cmnd *scmd)
3007 goto out; 2844 goto out;
3008 } 2845 }
3009 2846
3010 /* search for the command */ 2847 /* check for completed command */
3011 smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd); 2848 if (st == NULL || st->cb_idx == 0xFF) {
3012 if (!smid) {
3013 scmd->result = DID_RESET << 16; 2849 scmd->result = DID_RESET << 16;
3014 r = SUCCESS; 2850 r = SUCCESS;
3015 goto out; 2851 goto out;
@@ -3027,10 +2863,12 @@ scsih_abort(struct scsi_cmnd *scmd)
3027 mpt3sas_halt_firmware(ioc); 2863 mpt3sas_halt_firmware(ioc);
3028 2864
3029 handle = sas_device_priv_data->sas_target->handle; 2865 handle = sas_device_priv_data->sas_target->handle;
3030 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 2866 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
3031 scmd->device->id, scmd->device->lun, 2867 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3032 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30); 2868 st->smid, st->msix_io, 30);
3033 2869 /* Command must be cleared after abort */
2870 if (r == SUCCESS && st->cb_idx != 0xFF)
2871 r = FAILED;
3034 out: 2872 out:
3035 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", 2873 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
3036 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 2874 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -3086,10 +2924,11 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
3086 goto out; 2924 goto out;
3087 } 2925 }
3088 2926
3089 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 2927 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
3090 scmd->device->id, scmd->device->lun, 2928 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, 30);
3091 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30); 2929 /* Check for busy commands after reset */
3092 2930 if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
2931 r = FAILED;
3093 out: 2932 out:
3094 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", 2933 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
3095 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 2934 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -3148,10 +2987,11 @@ scsih_target_reset(struct scsi_cmnd *scmd)
3148 goto out; 2987 goto out;
3149 } 2988 }
3150 2989
3151 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 2990 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
3152 scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 2991 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
3153 30); 2992 /* Check for busy commands after reset */
3154 2993 if (r == SUCCESS && atomic_read(&starget->target_busy))
2994 r = FAILED;
3155 out: 2995 out:
3156 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", 2996 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
3157 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 2997 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -4600,16 +4440,18 @@ static void
4600_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) 4440_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4601{ 4441{
4602 struct scsi_cmnd *scmd; 4442 struct scsi_cmnd *scmd;
4443 struct scsiio_tracker *st;
4603 u16 smid; 4444 u16 smid;
4604 u16 count = 0; 4445 int count = 0;
4605 4446
4606 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 4447 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4607 scmd = _scsih_scsi_lookup_get_clear(ioc, smid); 4448 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4608 if (!scmd) 4449 if (!scmd)
4609 continue; 4450 continue;
4610 count++; 4451 count++;
4611 _scsih_set_satl_pending(scmd, false); 4452 _scsih_set_satl_pending(scmd, false);
4612 mpt3sas_base_free_smid(ioc, smid); 4453 st = scsi_cmd_priv(scmd);
4454 mpt3sas_base_clear_st(ioc, st);
4613 scsi_dma_unmap(scmd); 4455 scsi_dma_unmap(scmd);
4614 if (ioc->pci_error_recovery) 4456 if (ioc->pci_error_recovery)
4615 scmd->result = DID_NO_CONNECT << 16; 4457 scmd->result = DID_NO_CONNECT << 16;
@@ -4758,19 +4600,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4758 return 0; 4600 return 0;
4759 } 4601 }
4760 4602
4761 /*
4762 * Bug work around for firmware SATL handling. The loop
4763 * is based on atomic operations and ensures consistency
4764 * since we're lockless at this point
4765 */
4766 do {
4767 if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
4768 scmd->result = SAM_STAT_BUSY;
4769 scmd->scsi_done(scmd);
4770 return 0;
4771 }
4772 } while (_scsih_set_satl_pending(scmd, true));
4773
4774 sas_target_priv_data = sas_device_priv_data->sas_target; 4603 sas_target_priv_data = sas_device_priv_data->sas_target;
4775 4604
4776 /* invalid device handle */ 4605 /* invalid device handle */
@@ -4796,6 +4625,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4796 sas_device_priv_data->block) 4625 sas_device_priv_data->block)
4797 return SCSI_MLQUEUE_DEVICE_BUSY; 4626 return SCSI_MLQUEUE_DEVICE_BUSY;
4798 4627
4628 /*
4629 * Bug work around for firmware SATL handling. The loop
4630 * is based on atomic operations and ensures consistency
4631 * since we're lockless at this point
4632 */
4633 do {
4634 if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
4635 scmd->result = SAM_STAT_BUSY;
4636 scmd->scsi_done(scmd);
4637 return 0;
4638 }
4639 } while (_scsih_set_satl_pending(scmd, true));
4640
4799 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 4641 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
4800 mpi_control = MPI2_SCSIIO_CONTROL_READ; 4642 mpi_control = MPI2_SCSIIO_CONTROL_READ;
4801 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 4643 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
@@ -4823,6 +4665,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4823 if (!smid) { 4665 if (!smid) {
4824 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 4666 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4825 ioc->name, __func__); 4667 ioc->name, __func__);
4668 _scsih_set_satl_pending(scmd, false);
4826 goto out; 4669 goto out;
4827 } 4670 }
4828 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4671 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
@@ -4854,6 +4697,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4854 pcie_device = sas_target_priv_data->pcie_dev; 4697 pcie_device = sas_target_priv_data->pcie_dev;
4855 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) { 4698 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
4856 mpt3sas_base_free_smid(ioc, smid); 4699 mpt3sas_base_free_smid(ioc, smid);
4700 _scsih_set_satl_pending(scmd, false);
4857 goto out; 4701 goto out;
4858 } 4702 }
4859 } else 4703 } else
@@ -4862,7 +4706,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4862 raid_device = sas_target_priv_data->raid_device; 4706 raid_device = sas_target_priv_data->raid_device;
4863 if (raid_device && raid_device->direct_io_enabled) 4707 if (raid_device && raid_device->direct_io_enabled)
4864 mpt3sas_setup_direct_io(ioc, scmd, 4708 mpt3sas_setup_direct_io(ioc, scmd,
4865 raid_device, mpi_request, smid); 4709 raid_device, mpi_request);
4866 4710
4867 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) { 4711 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
4868 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { 4712 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
@@ -5330,6 +5174,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5330 Mpi25SCSIIORequest_t *mpi_request; 5174 Mpi25SCSIIORequest_t *mpi_request;
5331 Mpi2SCSIIOReply_t *mpi_reply; 5175 Mpi2SCSIIOReply_t *mpi_reply;
5332 struct scsi_cmnd *scmd; 5176 struct scsi_cmnd *scmd;
5177 struct scsiio_tracker *st;
5333 u16 ioc_status; 5178 u16 ioc_status;
5334 u32 xfer_cnt; 5179 u32 xfer_cnt;
5335 u8 scsi_state; 5180 u8 scsi_state;
@@ -5337,16 +5182,10 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5337 u32 log_info; 5182 u32 log_info;
5338 struct MPT3SAS_DEVICE *sas_device_priv_data; 5183 struct MPT3SAS_DEVICE *sas_device_priv_data;
5339 u32 response_code = 0; 5184 u32 response_code = 0;
5340 unsigned long flags;
5341 5185
5342 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 5186 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5343 5187
5344 if (ioc->broadcast_aen_busy || ioc->pci_error_recovery || 5188 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5345 ioc->got_task_abort_from_ioctl)
5346 scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
5347 else
5348 scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
5349
5350 if (scmd == NULL) 5189 if (scmd == NULL)
5351 return 1; 5190 return 1;
5352 5191
@@ -5371,13 +5210,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5371 * WARPDRIVE: If direct_io is set then it is directIO, 5210 * WARPDRIVE: If direct_io is set then it is directIO,
5372 * the failed direct I/O should be redirected to volume 5211 * the failed direct I/O should be redirected to volume
5373 */ 5212 */
5374 if (mpt3sas_scsi_direct_io_get(ioc, smid) && 5213 st = scsi_cmd_priv(scmd);
5214 if (st->direct_io &&
5375 ((ioc_status & MPI2_IOCSTATUS_MASK) 5215 ((ioc_status & MPI2_IOCSTATUS_MASK)
5376 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) { 5216 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5377 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 5217 st->direct_io = 0;
5378 ioc->scsi_lookup[smid - 1].scmd = scmd;
5379 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5380 mpt3sas_scsi_direct_io_set(ioc, smid, 0);
5381 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 5218 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5382 mpi_request->DevHandle = 5219 mpi_request->DevHandle =
5383 cpu_to_le16(sas_device_priv_data->sas_target->handle); 5220 cpu_to_le16(sas_device_priv_data->sas_target->handle);
@@ -5555,9 +5392,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5555 out: 5392 out:
5556 5393
5557 scsi_dma_unmap(scmd); 5394 scsi_dma_unmap(scmd);
5558 5395 mpt3sas_base_free_smid(ioc, smid);
5559 scmd->scsi_done(scmd); 5396 scmd->scsi_done(scmd);
5560 return 1; 5397 return 0;
5561} 5398}
5562 5399
5563/** 5400/**
@@ -7211,7 +7048,7 @@ _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7211 * Context: user. 7048 * Context: user.
7212 * 7049 *
7213 */ 7050 */
7214static int 7051static void
7215_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, 7052_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7216 struct fw_event_work *fw_event) 7053 struct fw_event_work *fw_event)
7217{ 7054{
@@ -7221,7 +7058,6 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7221 u8 link_rate, prev_link_rate; 7058 u8 link_rate, prev_link_rate;
7222 unsigned long flags; 7059 unsigned long flags;
7223 int rc; 7060 int rc;
7224 int requeue_event;
7225 Mpi26EventDataPCIeTopologyChangeList_t *event_data = 7061 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
7226 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data; 7062 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
7227 struct _pcie_device *pcie_device; 7063 struct _pcie_device *pcie_device;
@@ -7231,12 +7067,12 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7231 7067
7232 if (ioc->shost_recovery || ioc->remove_host || 7068 if (ioc->shost_recovery || ioc->remove_host ||
7233 ioc->pci_error_recovery) 7069 ioc->pci_error_recovery)
7234 return 0; 7070 return;
7235 7071
7236 if (fw_event->ignore) { 7072 if (fw_event->ignore) {
7237 dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n", 7073 dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
7238 ioc->name)); 7074 ioc->name));
7239 return 0; 7075 return;
7240 } 7076 }
7241 7077
7242 /* handle siblings events */ 7078 /* handle siblings events */
@@ -7244,10 +7080,10 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7244 if (fw_event->ignore) { 7080 if (fw_event->ignore) {
7245 dewtprintk(ioc, pr_info(MPT3SAS_FMT 7081 dewtprintk(ioc, pr_info(MPT3SAS_FMT
7246 "ignoring switch event\n", ioc->name)); 7082 "ignoring switch event\n", ioc->name));
7247 return 0; 7083 return;
7248 } 7084 }
7249 if (ioc->remove_host || ioc->pci_error_recovery) 7085 if (ioc->remove_host || ioc->pci_error_recovery)
7250 return 0; 7086 return;
7251 reason_code = event_data->PortEntry[i].PortStatus; 7087 reason_code = event_data->PortEntry[i].PortStatus;
7252 handle = 7088 handle =
7253 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 7089 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
@@ -7316,7 +7152,6 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7316 break; 7152 break;
7317 } 7153 }
7318 } 7154 }
7319 return requeue_event;
7320} 7155}
7321 7156
7322/** 7157/**
@@ -7502,6 +7337,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7502{ 7337{
7503 struct scsi_cmnd *scmd; 7338 struct scsi_cmnd *scmd;
7504 struct scsi_device *sdev; 7339 struct scsi_device *sdev;
7340 struct scsiio_tracker *st;
7505 u16 smid, handle; 7341 u16 smid, handle;
7506 u32 lun; 7342 u32 lun;
7507 struct MPT3SAS_DEVICE *sas_device_priv_data; 7343 struct MPT3SAS_DEVICE *sas_device_priv_data;
@@ -7543,9 +7379,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7543 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 7379 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
7544 if (ioc->shost_recovery) 7380 if (ioc->shost_recovery)
7545 goto out; 7381 goto out;
7546 scmd = _scsih_scsi_lookup_get(ioc, smid); 7382 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
7547 if (!scmd) 7383 if (!scmd)
7548 continue; 7384 continue;
7385 st = scsi_cmd_priv(scmd);
7549 sdev = scmd->device; 7386 sdev = scmd->device;
7550 sas_device_priv_data = sdev->hostdata; 7387 sas_device_priv_data = sdev->hostdata;
7551 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) 7388 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
@@ -7567,8 +7404,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7567 goto out; 7404 goto out;
7568 7405
7569 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 7406 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7570 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, 7407 r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
7571 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30); 7408 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
7409 st->msix_io, 30);
7572 if (r == FAILED) { 7410 if (r == FAILED) {
7573 sdev_printk(KERN_WARNING, sdev, 7411 sdev_printk(KERN_WARNING, sdev,
7574 "mpt3sas_scsih_issue_tm: FAILED when sending " 7412 "mpt3sas_scsih_issue_tm: FAILED when sending "
@@ -7607,10 +7445,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7607 if (ioc->shost_recovery) 7445 if (ioc->shost_recovery)
7608 goto out_no_lock; 7446 goto out_no_lock;
7609 7447
7610 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, 7448 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
7611 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 7449 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
7612 30); 7450 st->msix_io, 30);
7613 if (r == FAILED) { 7451 if (r == FAILED || st->cb_idx != 0xFF) {
7614 sdev_printk(KERN_WARNING, sdev, 7452 sdev_printk(KERN_WARNING, sdev,
7615 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " 7453 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
7616 "scmd(%p)\n", scmd); 7454 "scmd(%p)\n", scmd);
@@ -10416,6 +10254,7 @@ static struct scsi_host_template mpt2sas_driver_template = {
10416 .shost_attrs = mpt3sas_host_attrs, 10254 .shost_attrs = mpt3sas_host_attrs,
10417 .sdev_attrs = mpt3sas_dev_attrs, 10255 .sdev_attrs = mpt3sas_dev_attrs,
10418 .track_queue_depth = 1, 10256 .track_queue_depth = 1,
10257 .cmd_size = sizeof(struct scsiio_tracker),
10419}; 10258};
10420 10259
10421/* raid transport support for SAS 2.0 HBA devices */ 10260/* raid transport support for SAS 2.0 HBA devices */
@@ -10454,6 +10293,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
10454 .shost_attrs = mpt3sas_host_attrs, 10293 .shost_attrs = mpt3sas_host_attrs,
10455 .sdev_attrs = mpt3sas_dev_attrs, 10294 .sdev_attrs = mpt3sas_dev_attrs,
10456 .track_queue_depth = 1, 10295 .track_queue_depth = 1,
10296 .cmd_size = sizeof(struct scsiio_tracker),
10457}; 10297};
10458 10298
10459/* raid transport support for SAS 3.0 HBA devices */ 10299/* raid transport support for SAS 3.0 HBA devices */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index ced7d9f6274c..6bfcee4757e0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -261,33 +261,6 @@ out_error:
261} 261}
262 262
263/** 263/**
264 * mpt3sas_scsi_direct_io_get - returns direct io flag
265 * @ioc: per adapter object
266 * @smid: system request message index
267 *
268 * Returns the smid stored scmd pointer.
269 */
270inline u8
271mpt3sas_scsi_direct_io_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
272{
273 return ioc->scsi_lookup[smid - 1].direct_io;
274}
275
276/**
277 * mpt3sas_scsi_direct_io_set - sets direct io flag
278 * @ioc: per adapter object
279 * @smid: system request message index
280 * @direct_io: Zero or non-zero value to set in the direct_io flag
281 *
282 * Returns Nothing.
283 */
284inline void
285mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
286{
287 ioc->scsi_lookup[smid - 1].direct_io = direct_io;
288}
289
290/**
291 * mpt3sas_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O 264 * mpt3sas_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O
292 * @ioc: per adapter object 265 * @ioc: per adapter object
293 * @scmd: pointer to scsi command object 266 * @scmd: pointer to scsi command object
@@ -299,12 +272,12 @@ mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
299 */ 272 */
300void 273void
301mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 274mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
302 struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request, 275 struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request)
303 u16 smid)
304{ 276{
305 sector_t v_lba, p_lba, stripe_off, column, io_size; 277 sector_t v_lba, p_lba, stripe_off, column, io_size;
306 u32 stripe_sz, stripe_exp; 278 u32 stripe_sz, stripe_exp;
307 u8 num_pds, cmd = scmd->cmnd[0]; 279 u8 num_pds, cmd = scmd->cmnd[0];
280 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
308 281
309 if (cmd != READ_10 && cmd != WRITE_10 && 282 if (cmd != READ_10 && cmd != WRITE_10 &&
310 cmd != READ_16 && cmd != WRITE_16) 283 cmd != READ_16 && cmd != WRITE_16)
@@ -340,5 +313,5 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
340 else 313 else
341 put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]); 314 put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]);
342 315
343 mpt3sas_scsi_direct_io_set(ioc, smid, 1); 316 st->direct_io = 1;
344} 317}
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index e58be98430b0..201c8de1853d 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -5216,7 +5216,7 @@ static unsigned short pmcraid_get_minor(void)
5216{ 5216{
5217 int minor; 5217 int minor;
5218 5218
5219 minor = find_first_zero_bit(pmcraid_minor, sizeof(pmcraid_minor)); 5219 minor = find_first_zero_bit(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
5220 __set_bit(minor, pmcraid_minor); 5220 __set_bit(minor, pmcraid_minor);
5221 return minor; 5221 return minor;
5222} 5222}
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 7be5823ab036..ee86a0c62dbf 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -724,6 +724,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
724 return 0; 724 return 0;
725 } 725 }
726 cmd->SCp.phase++; 726 cmd->SCp.phase++;
727 /* fall through */
727 728
728 case 3: /* Phase 3 - Ready to accept a command */ 729 case 3: /* Phase 3 - Ready to accept a command */
729 w_ctr(ppb, 0x0c); 730 w_ctr(ppb, 0x0c);
@@ -733,6 +734,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
733 if (!ppa_send_command(cmd)) 734 if (!ppa_send_command(cmd))
734 return 0; 735 return 0;
735 cmd->SCp.phase++; 736 cmd->SCp.phase++;
737 /* fall through */
736 738
737 case 4: /* Phase 4 - Setup scatter/gather buffers */ 739 case 4: /* Phase 4 - Setup scatter/gather buffers */
738 if (scsi_bufflen(cmd)) { 740 if (scsi_bufflen(cmd)) {
@@ -746,6 +748,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
746 } 748 }
747 cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; 749 cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
748 cmd->SCp.phase++; 750 cmd->SCp.phase++;
751 /* fall through */
749 752
750 case 5: /* Phase 5 - Data transfer stage */ 753 case 5: /* Phase 5 - Data transfer stage */
751 w_ctr(ppb, 0x0c); 754 w_ctr(ppb, 0x0c);
@@ -758,6 +761,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
758 if (retv == 0) 761 if (retv == 0)
759 return 1; 762 return 1;
760 cmd->SCp.phase++; 763 cmd->SCp.phase++;
764 /* fall through */
761 765
762 case 6: /* Phase 6 - Read status/message */ 766 case 6: /* Phase 6 - Read status/message */
763 cmd->result = DID_OK << 16; 767 cmd->result = DID_OK << 16;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 7c0064500cc5..4809debc6110 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3126,6 +3126,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
3126 qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf); 3126 qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
3127 if (!qedf->cmd_mgr) { 3127 if (!qedf->cmd_mgr) {
3128 QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n"); 3128 QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
3129 rc = -ENOMEM;
3129 goto err5; 3130 goto err5;
3130 } 3131 }
3131 3132
@@ -3149,6 +3150,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
3149 create_workqueue(host_buf); 3150 create_workqueue(host_buf);
3150 if (!qedf->ll2_recv_wq) { 3151 if (!qedf->ll2_recv_wq) {
3151 QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n"); 3152 QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
3153 rc = -ENOMEM;
3152 goto err7; 3154 goto err7;
3153 } 3155 }
3154 3156
@@ -3192,6 +3194,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
3192 if (!qedf->timer_work_queue) { 3194 if (!qedf->timer_work_queue) {
3193 QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer " 3195 QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
3194 "workqueue.\n"); 3196 "workqueue.\n");
3197 rc = -ENOMEM;
3195 goto err7; 3198 goto err7;
3196 } 3199 }
3197 3200
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index bd302d3cb9af..20a9259304f2 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -198,7 +198,7 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
198 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response; 198 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
199 199
200 qedi_cmd = task->dd_data; 200 qedi_cmd = task->dd_data;
201 qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL); 201 qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_ATOMIC);
202 if (!qedi_cmd->tmf_resp_buf) { 202 if (!qedi_cmd->tmf_resp_buf) {
203 QEDI_ERR(&qedi->dbg_ctx, 203 QEDI_ERR(&qedi->dbg_ctx,
204 "Failed to allocate resp buf, cid=0x%x\n", 204 "Failed to allocate resp buf, cid=0x%x\n",
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cccc34adc0e0..58596d17f7d9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -998,7 +998,9 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
998 998
999 ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p); 999 ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
1000 if (ret) 1000 if (ret)
1001 continue; 1001 QEDI_WARN(&qedi->dbg_ctx,
1002 "Dropping CQE 0x%x for cid=0x%x.\n",
1003 que->cq_cons_idx, cqe->cqe_common.conn_id);
1002 1004
1003 que->cq_cons_idx++; 1005 que->cq_cons_idx++;
1004 if (que->cq_cons_idx == QEDI_CQ_SIZE) 1006 if (que->cq_cons_idx == QEDI_CQ_SIZE)
@@ -1268,16 +1270,14 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
1268 } 1270 }
1269 1271
1270 /* Allocate list of PBL pages */ 1272 /* Allocate list of PBL pages */
1271 qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev, 1273 qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, PAGE_SIZE,
1272 PAGE_SIZE, 1274 &qedi->bdq_pbl_list_dma,
1273 &qedi->bdq_pbl_list_dma, 1275 GFP_KERNEL);
1274 GFP_KERNEL);
1275 if (!qedi->bdq_pbl_list) { 1276 if (!qedi->bdq_pbl_list) {
1276 QEDI_ERR(&qedi->dbg_ctx, 1277 QEDI_ERR(&qedi->dbg_ctx,
1277 "Could not allocate list of PBL pages.\n"); 1278 "Could not allocate list of PBL pages.\n");
1278 return -ENOMEM; 1279 return -ENOMEM;
1279 } 1280 }
1280 memset(qedi->bdq_pbl_list, 0, PAGE_SIZE);
1281 1281
1282 /* 1282 /*
1283 * Now populate PBL list with pages that contain pointers to the 1283 * Now populate PBL list with pages that contain pointers to the
@@ -1367,11 +1367,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
1367 (qedi->global_queues[i]->cq_pbl_size + 1367 (qedi->global_queues[i]->cq_pbl_size +
1368 (QEDI_PAGE_SIZE - 1)); 1368 (QEDI_PAGE_SIZE - 1));
1369 1369
1370 qedi->global_queues[i]->cq = 1370 qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev,
1371 dma_alloc_coherent(&qedi->pdev->dev, 1371 qedi->global_queues[i]->cq_mem_size,
1372 qedi->global_queues[i]->cq_mem_size, 1372 &qedi->global_queues[i]->cq_dma,
1373 &qedi->global_queues[i]->cq_dma, 1373 GFP_KERNEL);
1374 GFP_KERNEL);
1375 1374
1376 if (!qedi->global_queues[i]->cq) { 1375 if (!qedi->global_queues[i]->cq) {
1377 QEDI_WARN(&qedi->dbg_ctx, 1376 QEDI_WARN(&qedi->dbg_ctx,
@@ -1379,14 +1378,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
1379 status = -ENOMEM; 1378 status = -ENOMEM;
1380 goto mem_alloc_failure; 1379 goto mem_alloc_failure;
1381 } 1380 }
1382 memset(qedi->global_queues[i]->cq, 0, 1381 qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev,
1383 qedi->global_queues[i]->cq_mem_size); 1382 qedi->global_queues[i]->cq_pbl_size,
1384 1383 &qedi->global_queues[i]->cq_pbl_dma,
1385 qedi->global_queues[i]->cq_pbl = 1384 GFP_KERNEL);
1386 dma_alloc_coherent(&qedi->pdev->dev,
1387 qedi->global_queues[i]->cq_pbl_size,
1388 &qedi->global_queues[i]->cq_pbl_dma,
1389 GFP_KERNEL);
1390 1385
1391 if (!qedi->global_queues[i]->cq_pbl) { 1386 if (!qedi->global_queues[i]->cq_pbl) {
1392 QEDI_WARN(&qedi->dbg_ctx, 1387 QEDI_WARN(&qedi->dbg_ctx,
@@ -1394,8 +1389,6 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
1394 status = -ENOMEM; 1389 status = -ENOMEM;
1395 goto mem_alloc_failure; 1390 goto mem_alloc_failure;
1396 } 1391 }
1397 memset(qedi->global_queues[i]->cq_pbl, 0,
1398 qedi->global_queues[i]->cq_pbl_size);
1399 1392
1400 /* Create PBL */ 1393 /* Create PBL */
1401 num_pages = qedi->global_queues[i]->cq_mem_size / 1394 num_pages = qedi->global_queues[i]->cq_mem_size /
@@ -1456,25 +1449,22 @@ int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
1456 ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *); 1449 ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
1457 ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE; 1450 ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
1458 1451
1459 ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, 1452 ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
1460 &ep->sq_dma, GFP_KERNEL); 1453 &ep->sq_dma, GFP_KERNEL);
1461 if (!ep->sq) { 1454 if (!ep->sq) {
1462 QEDI_WARN(&qedi->dbg_ctx, 1455 QEDI_WARN(&qedi->dbg_ctx,
1463 "Could not allocate send queue.\n"); 1456 "Could not allocate send queue.\n");
1464 rval = -ENOMEM; 1457 rval = -ENOMEM;
1465 goto out; 1458 goto out;
1466 } 1459 }
1467 memset(ep->sq, 0, ep->sq_mem_size); 1460 ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
1468 1461 &ep->sq_pbl_dma, GFP_KERNEL);
1469 ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
1470 &ep->sq_pbl_dma, GFP_KERNEL);
1471 if (!ep->sq_pbl) { 1462 if (!ep->sq_pbl) {
1472 QEDI_WARN(&qedi->dbg_ctx, 1463 QEDI_WARN(&qedi->dbg_ctx,
1473 "Could not allocate send queue PBL.\n"); 1464 "Could not allocate send queue PBL.\n");
1474 rval = -ENOMEM; 1465 rval = -ENOMEM;
1475 goto out_free_sq; 1466 goto out_free_sq;
1476 } 1467 }
1477 memset(ep->sq_pbl, 0, ep->sq_pbl_size);
1478 1468
1479 /* Create PBL */ 1469 /* Create PBL */
1480 num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE; 1470 num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 9ce28c4f9812..89a4999fa631 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1843,14 +1843,13 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1843 if (qla2x00_reset_active(vha)) 1843 if (qla2x00_reset_active(vha))
1844 goto done; 1844 goto done;
1845 1845
1846 stats = dma_alloc_coherent(&ha->pdev->dev, 1846 stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
1847 sizeof(*stats), &stats_dma, GFP_KERNEL); 1847 &stats_dma, GFP_KERNEL);
1848 if (!stats) { 1848 if (!stats) {
1849 ql_log(ql_log_warn, vha, 0x707d, 1849 ql_log(ql_log_warn, vha, 0x707d,
1850 "Failed to allocate memory for stats.\n"); 1850 "Failed to allocate memory for stats.\n");
1851 goto done; 1851 goto done;
1852 } 1852 }
1853 memset(stats, 0, sizeof(*stats));
1854 1853
1855 rval = QLA_FUNCTION_FAILED; 1854 rval = QLA_FUNCTION_FAILED;
1856 if (IS_FWI2_CAPABLE(ha)) { 1855 if (IS_FWI2_CAPABLE(ha)) {
@@ -2170,6 +2169,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
2170 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, 2169 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
2171 vha->gnl.ldma); 2170 vha->gnl.ldma);
2172 2171
2172 vfree(vha->scan.l);
2173
2173 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { 2174 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
2174 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) 2175 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
2175 ql_log(ql_log_warn, vha, 0x7087, 2176 ql_log(ql_log_warn, vha, 0x7087,
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index e3ac7078d2aa..e2d5d3ca0f57 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1435,7 +1435,7 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1435 ha->optrom_state = QLA_SREADING; 1435 ha->optrom_state = QLA_SREADING;
1436 } 1436 }
1437 1437
1438 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 1438 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1439 if (!ha->optrom_buffer) { 1439 if (!ha->optrom_buffer) {
1440 ql_log(ql_log_warn, vha, 0x7059, 1440 ql_log(ql_log_warn, vha, 0x7059,
1441 "Read: Unable to allocate memory for optrom retrieval " 1441 "Read: Unable to allocate memory for optrom retrieval "
@@ -1445,7 +1445,6 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1445 return -ENOMEM; 1445 return -ENOMEM;
1446 } 1446 }
1447 1447
1448 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1449 return 0; 1448 return 0;
1450} 1449}
1451 1450
@@ -2314,16 +2313,14 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2314 if (!IS_FWI2_CAPABLE(ha)) 2313 if (!IS_FWI2_CAPABLE(ha))
2315 return -EPERM; 2314 return -EPERM;
2316 2315
2317 stats = dma_alloc_coherent(&ha->pdev->dev, 2316 stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
2318 sizeof(*stats), &stats_dma, GFP_KERNEL); 2317 &stats_dma, GFP_KERNEL);
2319 if (!stats) { 2318 if (!stats) {
2320 ql_log(ql_log_warn, vha, 0x70e2, 2319 ql_log(ql_log_warn, vha, 0x70e2,
2321 "Failed to allocate memory for stats.\n"); 2320 "Failed to allocate memory for stats.\n");
2322 return -ENOMEM; 2321 return -ENOMEM;
2323 } 2322 }
2324 2323
2325 memset(stats, 0, sizeof(*stats));
2326
2327 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options); 2324 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2328 2325
2329 if (rval == QLA_SUCCESS) { 2326 if (rval == QLA_SUCCESS) {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 01a9b8971e88..be7d6824581a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -246,8 +246,8 @@
246 * There is no correspondence between an N-PORT id and an AL_PA. Therefore the 246 * There is no correspondence between an N-PORT id and an AL_PA. Therefore the
247 * valid range of an N-PORT id is 0 through 0x7ef. 247 * valid range of an N-PORT id is 0 through 0x7ef.
248 */ 248 */
249#define NPH_LAST_HANDLE 0x7ef 249#define NPH_LAST_HANDLE 0x7ee
250#define NPH_MGMT_SERVER 0x7fa /* FFFFFA */ 250#define NPH_MGMT_SERVER 0x7ef /* FFFFEF */
251#define NPH_SNS 0x7fc /* FFFFFC */ 251#define NPH_SNS 0x7fc /* FFFFFC */
252#define NPH_FABRIC_CONTROLLER 0x7fd /* FFFFFD */ 252#define NPH_FABRIC_CONTROLLER 0x7fd /* FFFFFD */
253#define NPH_F_PORT 0x7fe /* FFFFFE */ 253#define NPH_F_PORT 0x7fe /* FFFFFE */
@@ -288,6 +288,8 @@ struct name_list_extended {
288#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */ 288#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
289#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/ 289#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
290#define FW_DEF_EXCHANGES_CNT 2048 290#define FW_DEF_EXCHANGES_CNT 2048
291#define FW_MAX_EXCHANGES_CNT (32 * 1024)
292#define REDUCE_EXCHANGES_CNT (8 * 1024)
291 293
292struct req_que; 294struct req_que;
293struct qla_tgt_sess; 295struct qla_tgt_sess;
@@ -315,6 +317,29 @@ struct srb_cmd {
315/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */ 317/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
316#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID) 318#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
317 319
320/*
321 * 24 bit port ID type definition.
322 */
323typedef union {
324 uint32_t b24 : 24;
325
326 struct {
327#ifdef __BIG_ENDIAN
328 uint8_t domain;
329 uint8_t area;
330 uint8_t al_pa;
331#elif defined(__LITTLE_ENDIAN)
332 uint8_t al_pa;
333 uint8_t area;
334 uint8_t domain;
335#else
336#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
337#endif
338 uint8_t rsvd_1;
339 } b;
340} port_id_t;
341#define INVALID_PORT_ID 0xFFFFFF
342
318struct els_logo_payload { 343struct els_logo_payload {
319 uint8_t opcode; 344 uint8_t opcode;
320 uint8_t rsvd[3]; 345 uint8_t rsvd[3];
@@ -338,6 +363,7 @@ struct ct_arg {
338 u32 rsp_size; 363 u32 rsp_size;
339 void *req; 364 void *req;
340 void *rsp; 365 void *rsp;
366 port_id_t id;
341}; 367};
342 368
343/* 369/*
@@ -416,6 +442,7 @@ struct srb_iocb {
416 struct { 442 struct {
417 uint32_t cmd_hndl; 443 uint32_t cmd_hndl;
418 __le16 comp_status; 444 __le16 comp_status;
445 __le16 req_que_no;
419 struct completion comp; 446 struct completion comp;
420 } abt; 447 } abt;
421 struct ct_arg ctarg; 448 struct ct_arg ctarg;
@@ -448,6 +475,10 @@ struct srb_iocb {
448 uint32_t timeout_sec; 475 uint32_t timeout_sec;
449 struct list_head entry; 476 struct list_head entry;
450 } nvme; 477 } nvme;
478 struct {
479 u16 cmd;
480 u16 vp_index;
481 } ctrlvp;
451 } u; 482 } u;
452 483
453 struct timer_list timer; 484 struct timer_list timer;
@@ -476,6 +507,8 @@ struct srb_iocb {
476#define SRB_NVME_CMD 19 507#define SRB_NVME_CMD 19
477#define SRB_NVME_LS 20 508#define SRB_NVME_LS 20
478#define SRB_PRLI_CMD 21 509#define SRB_PRLI_CMD 21
510#define SRB_CTRL_VP 22
511#define SRB_PRLO_CMD 23
479 512
480enum { 513enum {
481 TYPE_SRB, 514 TYPE_SRB,
@@ -499,8 +532,12 @@ typedef struct srb {
499 const char *name; 532 const char *name;
500 int iocbs; 533 int iocbs;
501 struct qla_qpair *qpair; 534 struct qla_qpair *qpair;
535 struct list_head elem;
502 u32 gen1; /* scratch */ 536 u32 gen1; /* scratch */
503 u32 gen2; /* scratch */ 537 u32 gen2; /* scratch */
538 int rc;
539 int retry_count;
540 struct completion comp;
504 union { 541 union {
505 struct srb_iocb iocb_cmd; 542 struct srb_iocb iocb_cmd;
506 struct bsg_job *bsg_job; 543 struct bsg_job *bsg_job;
@@ -2164,28 +2201,6 @@ struct imm_ntfy_from_isp {
2164#define REQUEST_ENTRY_SIZE (sizeof(request_t)) 2201#define REQUEST_ENTRY_SIZE (sizeof(request_t))
2165 2202
2166 2203
2167/*
2168 * 24 bit port ID type definition.
2169 */
2170typedef union {
2171 uint32_t b24 : 24;
2172
2173 struct {
2174#ifdef __BIG_ENDIAN
2175 uint8_t domain;
2176 uint8_t area;
2177 uint8_t al_pa;
2178#elif defined(__LITTLE_ENDIAN)
2179 uint8_t al_pa;
2180 uint8_t area;
2181 uint8_t domain;
2182#else
2183#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
2184#endif
2185 uint8_t rsvd_1;
2186 } b;
2187} port_id_t;
2188#define INVALID_PORT_ID 0xFFFFFF
2189 2204
2190/* 2205/*
2191 * Switch info gathering structure. 2206 * Switch info gathering structure.
@@ -2257,14 +2272,17 @@ struct ct_sns_desc {
2257 2272
2258enum discovery_state { 2273enum discovery_state {
2259 DSC_DELETED, 2274 DSC_DELETED,
2275 DSC_GNN_ID,
2260 DSC_GID_PN, 2276 DSC_GID_PN,
2261 DSC_GNL, 2277 DSC_GNL,
2262 DSC_LOGIN_PEND, 2278 DSC_LOGIN_PEND,
2263 DSC_LOGIN_FAILED, 2279 DSC_LOGIN_FAILED,
2264 DSC_GPDB, 2280 DSC_GPDB,
2281 DSC_GFPN_ID,
2265 DSC_GPSC, 2282 DSC_GPSC,
2266 DSC_UPD_FCPORT, 2283 DSC_UPD_FCPORT,
2267 DSC_LOGIN_COMPLETE, 2284 DSC_LOGIN_COMPLETE,
2285 DSC_ADISC,
2268 DSC_DELETE_PEND, 2286 DSC_DELETE_PEND,
2269}; 2287};
2270 2288
@@ -2290,7 +2308,9 @@ enum fcport_mgt_event {
2290 FCME_GPDB_DONE, 2308 FCME_GPDB_DONE,
2291 FCME_GPNID_DONE, 2309 FCME_GPNID_DONE,
2292 FCME_GFFID_DONE, 2310 FCME_GFFID_DONE,
2293 FCME_DELETE_DONE, 2311 FCME_ADISC_DONE,
2312 FCME_GNNID_DONE,
2313 FCME_GFPNID_DONE,
2294}; 2314};
2295 2315
2296enum rscn_addr_format { 2316enum rscn_addr_format {
@@ -2315,6 +2335,7 @@ typedef struct fc_port {
2315 2335
2316 unsigned int conf_compl_supported:1; 2336 unsigned int conf_compl_supported:1;
2317 unsigned int deleted:2; 2337 unsigned int deleted:2;
2338 unsigned int free_pending:1;
2318 unsigned int local:1; 2339 unsigned int local:1;
2319 unsigned int logout_on_delete:1; 2340 unsigned int logout_on_delete:1;
2320 unsigned int logo_ack_needed:1; 2341 unsigned int logo_ack_needed:1;
@@ -2323,6 +2344,7 @@ typedef struct fc_port {
2323 unsigned int login_pause:1; 2344 unsigned int login_pause:1;
2324 unsigned int login_succ:1; 2345 unsigned int login_succ:1;
2325 unsigned int query:1; 2346 unsigned int query:1;
2347 unsigned int id_changed:1;
2326 2348
2327 struct work_struct nvme_del_work; 2349 struct work_struct nvme_del_work;
2328 struct completion nvme_del_done; 2350 struct completion nvme_del_done;
@@ -2434,6 +2456,7 @@ static const char * const port_state_str[] = {
2434#define FCF_FCP2_DEVICE BIT_2 2456#define FCF_FCP2_DEVICE BIT_2
2435#define FCF_ASYNC_SENT BIT_3 2457#define FCF_ASYNC_SENT BIT_3
2436#define FCF_CONF_COMP_SUPPORTED BIT_4 2458#define FCF_CONF_COMP_SUPPORTED BIT_4
2459#define FCF_ASYNC_ACTIVE BIT_5
2437 2460
2438/* No loop ID flag. */ 2461/* No loop ID flag. */
2439#define FC_NO_LOOP_ID 0x1000 2462#define FC_NO_LOOP_ID 0x1000
@@ -2470,6 +2493,11 @@ static const char * const port_state_str[] = {
2470#define GA_NXT_REQ_SIZE (16 + 4) 2493#define GA_NXT_REQ_SIZE (16 + 4)
2471#define GA_NXT_RSP_SIZE (16 + 620) 2494#define GA_NXT_RSP_SIZE (16 + 620)
2472 2495
2496#define GPN_FT_CMD 0x172
2497#define GPN_FT_REQ_SIZE (16 + 4)
2498#define GNN_FT_CMD 0x173
2499#define GNN_FT_REQ_SIZE (16 + 4)
2500
2473#define GID_PT_CMD 0x1A1 2501#define GID_PT_CMD 0x1A1
2474#define GID_PT_REQ_SIZE (16 + 4) 2502#define GID_PT_REQ_SIZE (16 + 4)
2475 2503
@@ -2725,6 +2753,13 @@ struct ct_sns_req {
2725 } port_id; 2753 } port_id;
2726 2754
2727 struct { 2755 struct {
2756 uint8_t reserved;
2757 uint8_t domain;
2758 uint8_t area;
2759 uint8_t port_type;
2760 } gpn_ft;
2761
2762 struct {
2728 uint8_t port_type; 2763 uint8_t port_type;
2729 uint8_t domain; 2764 uint8_t domain;
2730 uint8_t area; 2765 uint8_t area;
@@ -2837,6 +2872,27 @@ struct ct_sns_gid_pt_data {
2837 uint8_t port_id[3]; 2872 uint8_t port_id[3];
2838}; 2873};
2839 2874
2875/* It's the same for both GPN_FT and GNN_FT */
2876struct ct_sns_gpnft_rsp {
2877 struct {
2878 struct ct_cmd_hdr header;
2879 uint16_t response;
2880 uint16_t residual;
2881 uint8_t fragment_id;
2882 uint8_t reason_code;
2883 uint8_t explanation_code;
2884 uint8_t vendor_unique;
2885 };
2886 /* Assume the largest number of targets for the union */
2887 struct ct_sns_gpn_ft_data {
2888 u8 control_byte;
2889 u8 port_id[3];
2890 u32 reserved;
2891 u8 port_name[8];
2892 } entries[1];
2893};
2894
2895/* CT command response */
2840struct ct_sns_rsp { 2896struct ct_sns_rsp {
2841 struct ct_rsp_hdr header; 2897 struct ct_rsp_hdr header;
2842 2898
@@ -2912,6 +2968,33 @@ struct ct_sns_pkt {
2912 } p; 2968 } p;
2913}; 2969};
2914 2970
2971struct ct_sns_gpnft_pkt {
2972 union {
2973 struct ct_sns_req req;
2974 struct ct_sns_gpnft_rsp rsp;
2975 } p;
2976};
2977
2978enum scan_flags_t {
2979 SF_SCANNING = BIT_0,
2980 SF_QUEUED = BIT_1,
2981};
2982
2983struct fab_scan_rp {
2984 port_id_t id;
2985 u8 port_name[8];
2986 u8 node_name[8];
2987};
2988
2989struct fab_scan {
2990 struct fab_scan_rp *l;
2991 u32 size;
2992 u16 scan_retry;
2993#define MAX_SCAN_RETRIES 5
2994 enum scan_flags_t scan_flags;
2995 struct delayed_work scan_work;
2996};
2997
2915/* 2998/*
2916 * SNS command structures -- for 2200 compatibility. 2999 * SNS command structures -- for 2200 compatibility.
2917 */ 3000 */
@@ -3117,7 +3200,7 @@ enum qla_work_type {
3117 QLA_EVT_AENFX, 3200 QLA_EVT_AENFX,
3118 QLA_EVT_GIDPN, 3201 QLA_EVT_GIDPN,
3119 QLA_EVT_GPNID, 3202 QLA_EVT_GPNID,
3120 QLA_EVT_GPNID_DONE, 3203 QLA_EVT_UNMAP,
3121 QLA_EVT_NEW_SESS, 3204 QLA_EVT_NEW_SESS,
3122 QLA_EVT_GPDB, 3205 QLA_EVT_GPDB,
3123 QLA_EVT_PRLI, 3206 QLA_EVT_PRLI,
@@ -3125,6 +3208,15 @@ enum qla_work_type {
3125 QLA_EVT_UPD_FCPORT, 3208 QLA_EVT_UPD_FCPORT,
3126 QLA_EVT_GNL, 3209 QLA_EVT_GNL,
3127 QLA_EVT_NACK, 3210 QLA_EVT_NACK,
3211 QLA_EVT_RELOGIN,
3212 QLA_EVT_ASYNC_PRLO,
3213 QLA_EVT_ASYNC_PRLO_DONE,
3214 QLA_EVT_GPNFT,
3215 QLA_EVT_GPNFT_DONE,
3216 QLA_EVT_GNNFT_DONE,
3217 QLA_EVT_GNNID,
3218 QLA_EVT_GFPNID,
3219 QLA_EVT_SP_RETRY,
3128}; 3220};
3129 3221
3130 3222
@@ -3166,7 +3258,9 @@ struct qla_work_evt {
3166 struct { 3258 struct {
3167 port_id_t id; 3259 port_id_t id;
3168 u8 port_name[8]; 3260 u8 port_name[8];
3261 u8 node_name[8];
3169 void *pla; 3262 void *pla;
3263 u8 fc4_type;
3170 } new_sess; 3264 } new_sess;
3171 struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */ 3265 struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */
3172 fc_port_t *fcport; 3266 fc_port_t *fcport;
@@ -3177,6 +3271,9 @@ struct qla_work_evt {
3177 u8 iocb[IOCB_SIZE]; 3271 u8 iocb[IOCB_SIZE];
3178 int type; 3272 int type;
3179 } nack; 3273 } nack;
3274 struct {
3275 u8 fc4_type;
3276 } gpnft;
3180 } u; 3277 } u;
3181}; 3278};
3182 3279
@@ -3433,10 +3530,6 @@ struct qlt_hw_data {
3433 3530
3434#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */ 3531#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
3435 3532
3436#define QLA_EARLY_LINKUP(_ha) \
3437 ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \
3438 _ha->flags.fw_started && !_ha->flags.fw_init_done)
3439
3440/* 3533/*
3441 * Qlogic host adapter specific data structure. 3534 * Qlogic host adapter specific data structure.
3442*/ 3535*/
@@ -3494,8 +3587,10 @@ struct qla_hw_data {
3494 3587
3495 uint32_t detected_lr_sfp:1; 3588 uint32_t detected_lr_sfp:1;
3496 uint32_t using_lr_setting:1; 3589 uint32_t using_lr_setting:1;
3590 uint32_t rida_fmt2:1;
3497 } flags; 3591 } flags;
3498 3592
3593 uint16_t max_exchg;
3499 uint16_t long_range_distance; /* 32G & above */ 3594 uint16_t long_range_distance; /* 32G & above */
3500#define LR_DISTANCE_5K 1 3595#define LR_DISTANCE_5K 1
3501#define LR_DISTANCE_10K 0 3596#define LR_DISTANCE_10K 0
@@ -3713,6 +3808,8 @@ struct qla_hw_data {
3713 (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) 3808 (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
3714#define IS_EXLOGIN_OFFLD_CAPABLE(ha) \ 3809#define IS_EXLOGIN_OFFLD_CAPABLE(ha) \
3715 (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) 3810 (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
3811#define USE_ASYNC_SCAN(ha) (IS_QLA25XX(ha) || IS_QLA81XX(ha) ||\
3812 IS_QLA83XX(ha) || IS_QLA27XX(ha))
3716 3813
3717 /* HBA serial number */ 3814 /* HBA serial number */
3718 uint8_t serial0; 3815 uint8_t serial0;
@@ -3795,7 +3892,7 @@ struct qla_hw_data {
3795 int exchoffld_size; 3892 int exchoffld_size;
3796 int exchoffld_count; 3893 int exchoffld_count;
3797 3894
3798 void *swl; 3895 void *swl;
3799 3896
3800 /* These are used by mailbox operations. */ 3897 /* These are used by mailbox operations. */
3801 uint16_t mailbox_out[MAILBOX_REGISTER_COUNT]; 3898 uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
@@ -4107,6 +4204,7 @@ typedef struct scsi_qla_host {
4107#define LOOP_READY 5 4204#define LOOP_READY 5
4108#define LOOP_DEAD 6 4205#define LOOP_DEAD 6
4109 4206
4207 unsigned long relogin_jif;
4110 unsigned long dpc_flags; 4208 unsigned long dpc_flags;
4111#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */ 4209#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
4112#define RESET_ACTIVE 1 4210#define RESET_ACTIVE 1
@@ -4139,6 +4237,7 @@ typedef struct scsi_qla_host {
4139#define SET_ZIO_THRESHOLD_NEEDED 28 4237#define SET_ZIO_THRESHOLD_NEEDED 28
4140#define DETECT_SFP_CHANGE 29 4238#define DETECT_SFP_CHANGE 29
4141#define N2N_LOGIN_NEEDED 30 4239#define N2N_LOGIN_NEEDED 30
4240#define IOCB_WORK_ACTIVE 31
4142 4241
4143 unsigned long pci_flags; 4242 unsigned long pci_flags;
4144#define PFLG_DISCONNECTED 0 /* PCI device removed */ 4243#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -4252,6 +4351,8 @@ typedef struct scsi_qla_host {
4252 uint8_t n2n_node_name[WWN_SIZE]; 4351 uint8_t n2n_node_name[WWN_SIZE];
4253 uint8_t n2n_port_name[WWN_SIZE]; 4352 uint8_t n2n_port_name[WWN_SIZE];
4254 uint16_t n2n_id; 4353 uint16_t n2n_id;
4354 struct list_head gpnid_list;
4355 struct fab_scan scan;
4255} scsi_qla_host_t; 4356} scsi_qla_host_t;
4256 4357
4257struct qla27xx_image_status { 4358struct qla27xx_image_status {
@@ -4511,6 +4612,16 @@ struct sff_8247_a0 {
4511#define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \ 4612#define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \
4512 (IS_QLA27XX(_ha) || IS_QLA83XX(_ha))) 4613 (IS_QLA27XX(_ha) || IS_QLA83XX(_ha)))
4513 4614
4615#define SAVE_TOPO(_ha) { \
4616 if (_ha->current_topology) \
4617 _ha->prev_topology = _ha->current_topology; \
4618}
4619
4620#define N2N_TOPO(ha) \
4621 ((ha->prev_topology == ISP_CFG_N && !ha->current_topology) || \
4622 ha->current_topology == ISP_CFG_N || \
4623 !ha->current_topology)
4624
4514#include "qla_target.h" 4625#include "qla_target.h"
4515#include "qla_gbl.h" 4626#include "qla_gbl.h"
4516#include "qla_dbg.h" 4627#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index d231e7156134..0b190082aa8d 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -127,21 +127,32 @@ static int
127qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) 127qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
128{ 128{
129 struct scsi_qla_host *vha = s->private; 129 struct scsi_qla_host *vha = s->private;
130 struct qla_hw_data *ha = vha->hw; 130 uint16_t mb[MAX_IOCB_MB_REG];
131 int rc;
132
133 rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
134 if (rc != QLA_SUCCESS) {
135 seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]);
136 } else {
137 seq_puts(s, "FW Resource count\n\n");
138 seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
139 seq_printf(s, "current TGT exchg count[%d]\n", mb[2]);
140 seq_printf(s, "original Initiator Exchange count[%d]\n", mb[3]);
141 seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[6]);
142 seq_printf(s, "Original IOCB count[%d]\n", mb[7]);
143 seq_printf(s, "Current IOCB count[%d]\n", mb[10]);
144 seq_printf(s, "MAX VP count[%d]\n", mb[11]);
145 seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
146 seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
147 mb[20]);
148 seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n",
149 mb[21]);
150 seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n",
151 mb[22]);
152 seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
153 mb[23]);
131 154
132 seq_puts(s, "FW Resource count\n\n"); 155 }
133 seq_printf(s, "Original TGT exchg count[%d]\n",
134 ha->orig_fw_tgt_xcb_count);
135 seq_printf(s, "current TGT exchg count[%d]\n",
136 ha->cur_fw_tgt_xcb_count);
137 seq_printf(s, "original Initiator Exchange count[%d]\n",
138 ha->orig_fw_xcb_count);
139 seq_printf(s, "Current Initiator Exchange count[%d]\n",
140 ha->cur_fw_xcb_count);
141 seq_printf(s, "Original IOCB count[%d]\n", ha->orig_fw_iocb_count);
142 seq_printf(s, "Current IOCB count[%d]\n", ha->cur_fw_iocb_count);
143 seq_printf(s, "MAX VP count[%d]\n", ha->max_npiv_vports);
144 seq_printf(s, "MAX FCF count[%d]\n", ha->fw_max_fcf_count);
145 156
146 return 0; 157 return 0;
147} 158}
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index d5cef0727e72..5d8688e5bc7c 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1392,7 +1392,7 @@ struct vp_rpt_id_entry_24xx {
1392 1392
1393 uint8_t port_name[8]; 1393 uint8_t port_name[8];
1394 uint8_t node_name[8]; 1394 uint8_t node_name[8];
1395 uint32_t remote_nport_id; 1395 uint8_t remote_nport_id[4];
1396 uint32_t reserved_5; 1396 uint32_t reserved_5;
1397 } f2; 1397 } f2;
1398 } u; 1398 } u;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index fa115c7433e5..e9295398050c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -66,6 +66,7 @@ extern void qla84xx_put_chip(struct scsi_qla_host *);
66extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *, 66extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *,
67 uint16_t *); 67 uint16_t *);
68extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *); 68extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
69extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *);
69extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *, 70extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
70 uint16_t *); 71 uint16_t *);
71extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t); 72extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
@@ -104,11 +105,18 @@ int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8);
104int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *); 105int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *);
105int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, 106int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
106 struct imm_ntfy_from_isp *, int); 107 struct imm_ntfy_from_isp *, int);
107int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, 108int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*,
108 void *); 109 void *, u8);
109int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *); 110int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
110int qla24xx_detect_sfp(scsi_qla_host_t *vha); 111int qla24xx_detect_sfp(scsi_qla_host_t *vha);
111int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8); 112int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
113void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *,
114 uint16_t *);
115extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
116 uint16_t *);
117extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *,
118 fc_port_t *, uint16_t *);
119
112/* 120/*
113 * Global Data in qla_os.c source file. 121 * Global Data in qla_os.c source file.
114 */ 122 */
@@ -148,6 +156,7 @@ extern int ql2xuctrlirq;
148extern int ql2xnvmeenable; 156extern int ql2xnvmeenable;
149extern int ql2xautodetectsfp; 157extern int ql2xautodetectsfp;
150extern int ql2xenablemsix; 158extern int ql2xenablemsix;
159extern int qla2xuseresexchforels;
151 160
152extern int qla2x00_loop_reset(scsi_qla_host_t *); 161extern int qla2x00_loop_reset(scsi_qla_host_t *);
153extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 162extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -203,6 +212,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
203 uint16_t *); 212 uint16_t *);
204int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); 213int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
205int qla24xx_async_abort_cmd(srb_t *); 214int qla24xx_async_abort_cmd(srb_t *);
215int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
206 216
207/* 217/*
208 * Global Functions in qla_mid.c source file. 218 * Global Functions in qla_mid.c source file.
@@ -494,6 +504,7 @@ int qla24xx_get_port_login_templ(scsi_qla_host_t *, dma_addr_t,
494 504
495extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *); 505extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *);
496extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t); 506extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
507int qla24xx_res_count_wait(struct scsi_qla_host *, uint16_t *, int);
497 508
498/* 509/*
499 * Global Function Prototypes in qla_isr.c source file. 510 * Global Function Prototypes in qla_isr.c source file.
@@ -639,14 +650,26 @@ extern void qla2x00_free_fcport(fc_port_t *);
639 650
640extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *); 651extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *);
641extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *); 652extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *);
642void qla24xx_async_gpnid_done(scsi_qla_host_t *, srb_t*);
643void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *); 653void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *);
644 654
645int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *); 655int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *);
646int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *); 656int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
657void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
647int qla2x00_mgmt_svr_login(scsi_qla_host_t *); 658int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
648void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea); 659void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
649int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport); 660int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport);
661int qla24xx_async_gpnft(scsi_qla_host_t *, u8);
662void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
663void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *);
664int qla24xx_async_gnnid(scsi_qla_host_t *, fc_port_t *);
665void qla24xx_handle_gnnid_event(scsi_qla_host_t *, struct event_arg *);
666int qla24xx_post_gnnid_work(struct scsi_qla_host *, fc_port_t *);
667int qla24xx_post_gfpnid_work(struct scsi_qla_host *, fc_port_t *);
668int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *);
669void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *);
670void qla24xx_sp_unmap(scsi_qla_host_t *, srb_t *);
671void qla_scan_work_fn(struct work_struct *);
672
650/* 673/*
651 * Global Function Prototypes in qla_attr.c source file. 674 * Global Function Prototypes in qla_attr.c source file.
652 */ 675 */
@@ -864,8 +887,7 @@ void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *);
864void qlt_plogi_ack_link(struct scsi_qla_host *, struct qlt_plogi_ack_t *, 887void qlt_plogi_ack_link(struct scsi_qla_host *, struct qlt_plogi_ack_t *,
865 struct fc_port *, enum qlt_plogi_link_t); 888 struct fc_port *, enum qlt_plogi_link_t);
866void qlt_plogi_ack_unref(struct scsi_qla_host *, struct qlt_plogi_ack_t *); 889void qlt_plogi_ack_unref(struct scsi_qla_host *, struct qlt_plogi_ack_t *);
867extern void qlt_schedule_sess_for_deletion(struct fc_port *, bool); 890extern void qlt_schedule_sess_for_deletion(struct fc_port *);
868extern void qlt_schedule_sess_for_deletion_lock(struct fc_port *);
869extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *, 891extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
870 uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **); 892 uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
871void qla24xx_delete_sess_fn(struct work_struct *); 893void qla24xx_delete_sess_fn(struct work_struct *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index bc3db6abc9a0..5bf9a59432f6 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -14,6 +14,10 @@ static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
14static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *); 14static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
15static int qla2x00_sns_rft_id(scsi_qla_host_t *); 15static int qla2x00_sns_rft_id(scsi_qla_host_t *);
16static int qla2x00_sns_rnn_id(scsi_qla_host_t *); 16static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
17static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
18static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
19static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
20static int qla_async_rsnn_nn(scsi_qla_host_t *);
17 21
18/** 22/**
19 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query. 23 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
@@ -175,6 +179,9 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
175 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 179 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
176 } 180 }
177 break; 181 break;
182 case CS_TIMEOUT:
183 rval = QLA_FUNCTION_TIMEOUT;
184 /* fall through */
178 default: 185 default:
179 ql_dbg(ql_dbg_disc, vha, 0x2033, 186 ql_dbg(ql_dbg_disc, vha, 0x2033,
180 "%s failed, completion status (%x) on port_id: " 187 "%s failed, completion status (%x) on port_id: "
@@ -508,6 +515,72 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
508 return (rval); 515 return (rval);
509} 516}
510 517
518static void qla2x00_async_sns_sp_done(void *s, int rc)
519{
520 struct srb *sp = s;
521 struct scsi_qla_host *vha = sp->vha;
522 struct ct_sns_pkt *ct_sns;
523 struct qla_work_evt *e;
524
525 sp->rc = rc;
526 if (rc == QLA_SUCCESS) {
527 ql_dbg(ql_dbg_disc, vha, 0x204f,
528 "Async done-%s exiting normally.\n",
529 sp->name);
530 } else if (rc == QLA_FUNCTION_TIMEOUT) {
531 ql_dbg(ql_dbg_disc, vha, 0x204f,
532 "Async done-%s timeout\n", sp->name);
533 } else {
534 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
535 memset(ct_sns, 0, sizeof(*ct_sns));
536 sp->retry_count++;
537 if (sp->retry_count > 3)
538 goto err;
539
540 ql_dbg(ql_dbg_disc, vha, 0x204f,
541 "Async done-%s fail rc %x. Retry count %d\n",
542 sp->name, rc, sp->retry_count);
543
544 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
545 if (!e)
546 goto err2;
547
548 del_timer(&sp->u.iocb_cmd.timer);
549 e->u.iosb.sp = sp;
550 qla2x00_post_work(vha, e);
551 return;
552 }
553
554err:
555 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
556err2:
557 if (!e) {
558 /* please ignore kernel warning. otherwise, we have mem leak. */
559 if (sp->u.iocb_cmd.u.ctarg.req) {
560 dma_free_coherent(&vha->hw->pdev->dev,
561 sizeof(struct ct_sns_pkt),
562 sp->u.iocb_cmd.u.ctarg.req,
563 sp->u.iocb_cmd.u.ctarg.req_dma);
564 sp->u.iocb_cmd.u.ctarg.req = NULL;
565 }
566
567 if (sp->u.iocb_cmd.u.ctarg.rsp) {
568 dma_free_coherent(&vha->hw->pdev->dev,
569 sizeof(struct ct_sns_pkt),
570 sp->u.iocb_cmd.u.ctarg.rsp,
571 sp->u.iocb_cmd.u.ctarg.rsp_dma);
572 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
573 }
574
575 sp->free(sp);
576
577 return;
578 }
579
580 e->u.iosb.sp = sp;
581 qla2x00_post_work(vha, e);
582}
583
511/** 584/**
512 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA. 585 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
513 * @ha: HA context 586 * @ha: HA context
@@ -517,57 +590,87 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
517int 590int
518qla2x00_rft_id(scsi_qla_host_t *vha) 591qla2x00_rft_id(scsi_qla_host_t *vha)
519{ 592{
520 int rval;
521 struct qla_hw_data *ha = vha->hw; 593 struct qla_hw_data *ha = vha->hw;
522 ms_iocb_entry_t *ms_pkt;
523 struct ct_sns_req *ct_req;
524 struct ct_sns_rsp *ct_rsp;
525 struct ct_arg arg;
526 594
527 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 595 if (IS_QLA2100(ha) || IS_QLA2200(ha))
528 return qla2x00_sns_rft_id(vha); 596 return qla2x00_sns_rft_id(vha);
529 597
530 arg.iocb = ha->ms_iocb; 598 return qla_async_rftid(vha, &vha->d_id);
531 arg.req_dma = ha->ct_sns_dma; 599}
532 arg.rsp_dma = ha->ct_sns_dma;
533 arg.req_size = RFT_ID_REQ_SIZE;
534 arg.rsp_size = RFT_ID_RSP_SIZE;
535 arg.nport_handle = NPH_SNS;
536 600
537 /* Issue RFT_ID */ 601static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
538 /* Prepare common MS IOCB */ 602{
539 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 603 int rval = QLA_MEMORY_ALLOC_FAILED;
604 struct ct_sns_req *ct_req;
605 srb_t *sp;
606 struct ct_sns_pkt *ct_sns;
607
608 if (!vha->flags.online)
609 goto done;
610
611 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
612 if (!sp)
613 goto done;
614
615 sp->type = SRB_CT_PTHRU_CMD;
616 sp->name = "rft_id";
617 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
618
619 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
620 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
621 GFP_KERNEL);
622 if (!sp->u.iocb_cmd.u.ctarg.req) {
623 ql_log(ql_log_warn, vha, 0xd041,
624 "%s: Failed to allocate ct_sns request.\n",
625 __func__);
626 goto done_free_sp;
627 }
628
629 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
630 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
631 GFP_KERNEL);
632 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
633 ql_log(ql_log_warn, vha, 0xd042,
634 "%s: Failed to allocate ct_sns request.\n",
635 __func__);
636 goto done_free_sp;
637 }
638 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
639 memset(ct_sns, 0, sizeof(*ct_sns));
640 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
540 641
541 /* Prepare CT request */ 642 /* Prepare CT request */
542 ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFT_ID_CMD, 643 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
543 RFT_ID_RSP_SIZE);
544 ct_rsp = &ha->ct_sns->p.rsp;
545 644
546 /* Prepare CT arguments -- port_id, FC-4 types */ 645 /* Prepare CT arguments -- port_id, FC-4 types */
547 ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain; 646 ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
548 ct_req->req.rft_id.port_id[1] = vha->d_id.b.area; 647 ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
549 ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa; 648 ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
550
551 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */ 649 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
552 650
553 if (vha->flags.nvme_enabled) 651 if (vha->flags.nvme_enabled)
554 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */ 652 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
555 /* Execute MS IOCB */ 653
556 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 654 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
557 sizeof(ms_iocb_entry_t)); 655 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
656 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
657 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
658 sp->done = qla2x00_async_sns_sp_done;
659
660 rval = qla2x00_start_sp(sp);
558 if (rval != QLA_SUCCESS) { 661 if (rval != QLA_SUCCESS) {
559 /*EMPTY*/
560 ql_dbg(ql_dbg_disc, vha, 0x2043, 662 ql_dbg(ql_dbg_disc, vha, 0x2043,
561 "RFT_ID issue IOCB failed (%d).\n", rval); 663 "RFT_ID issue IOCB failed (%d).\n", rval);
562 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") != 664 goto done_free_sp;
563 QLA_SUCCESS) {
564 rval = QLA_FUNCTION_FAILED;
565 } else {
566 ql_dbg(ql_dbg_disc, vha, 0x2044,
567 "RFT_ID exiting normally.\n");
568 } 665 }
569 666 ql_dbg(ql_dbg_disc, vha, 0xffff,
570 return (rval); 667 "Async-%s - hdl=%x portid %06x.\n",
668 sp->name, sp->handle, d_id->b24);
669 return rval;
670done_free_sp:
671 sp->free(sp);
672done:
673 return rval;
571} 674}
572 675
573/** 676/**
@@ -579,12 +682,7 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
579int 682int
580qla2x00_rff_id(scsi_qla_host_t *vha, u8 type) 683qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
581{ 684{
582 int rval;
583 struct qla_hw_data *ha = vha->hw; 685 struct qla_hw_data *ha = vha->hw;
584 ms_iocb_entry_t *ms_pkt;
585 struct ct_sns_req *ct_req;
586 struct ct_sns_rsp *ct_rsp;
587 struct ct_arg arg;
588 686
589 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 687 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
590 ql_dbg(ql_dbg_disc, vha, 0x2046, 688 ql_dbg(ql_dbg_disc, vha, 0x2046,
@@ -592,47 +690,81 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
592 return (QLA_SUCCESS); 690 return (QLA_SUCCESS);
593 } 691 }
594 692
595 arg.iocb = ha->ms_iocb; 693 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
596 arg.req_dma = ha->ct_sns_dma; 694 FC4_TYPE_FCP_SCSI);
597 arg.rsp_dma = ha->ct_sns_dma; 695}
598 arg.req_size = RFF_ID_REQ_SIZE;
599 arg.rsp_size = RFF_ID_RSP_SIZE;
600 arg.nport_handle = NPH_SNS;
601 696
602 /* Issue RFF_ID */ 697static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
603 /* Prepare common MS IOCB */ 698 u8 fc4feature, u8 fc4type)
604 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 699{
700 int rval = QLA_MEMORY_ALLOC_FAILED;
701 struct ct_sns_req *ct_req;
702 srb_t *sp;
703 struct ct_sns_pkt *ct_sns;
605 704
606 /* Prepare CT request */ 705 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
607 ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFF_ID_CMD, 706 if (!sp)
608 RFF_ID_RSP_SIZE); 707 goto done;
609 ct_rsp = &ha->ct_sns->p.rsp;
610 708
611 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ 709 sp->type = SRB_CT_PTHRU_CMD;
612 ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain; 710 sp->name = "rff_id";
613 ct_req->req.rff_id.port_id[1] = vha->d_id.b.area; 711 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
614 ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
615 712
616 qlt_rff_id(vha, ct_req); 713 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
714 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
715 GFP_KERNEL);
716 if (!sp->u.iocb_cmd.u.ctarg.req) {
717 ql_log(ql_log_warn, vha, 0xd041,
718 "%s: Failed to allocate ct_sns request.\n",
719 __func__);
720 goto done_free_sp;
721 }
722
723 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
724 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
725 GFP_KERNEL);
726 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
727 ql_log(ql_log_warn, vha, 0xd042,
728 "%s: Failed to allocate ct_sns request.\n",
729 __func__);
730 goto done_free_sp;
731 }
732 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
733 memset(ct_sns, 0, sizeof(*ct_sns));
734 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
617 735
618 ct_req->req.rff_id.fc4_type = type; /* SCSI - FCP */ 736 /* Prepare CT request */
737 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
619 738
620 /* Execute MS IOCB */ 739 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
621 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 740 ct_req->req.rff_id.port_id[0] = d_id->b.domain;
622 sizeof(ms_iocb_entry_t)); 741 ct_req->req.rff_id.port_id[1] = d_id->b.area;
742 ct_req->req.rff_id.port_id[2] = d_id->b.al_pa;
743 ct_req->req.rff_id.fc4_feature = fc4feature;
744 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
745
746 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
747 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
748 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
749 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
750 sp->done = qla2x00_async_sns_sp_done;
751
752 rval = qla2x00_start_sp(sp);
623 if (rval != QLA_SUCCESS) { 753 if (rval != QLA_SUCCESS) {
624 /*EMPTY*/
625 ql_dbg(ql_dbg_disc, vha, 0x2047, 754 ql_dbg(ql_dbg_disc, vha, 0x2047,
626 "RFF_ID issue IOCB failed (%d).\n", rval); 755 "RFF_ID issue IOCB failed (%d).\n", rval);
627 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") != 756 goto done_free_sp;
628 QLA_SUCCESS) {
629 rval = QLA_FUNCTION_FAILED;
630 } else {
631 ql_dbg(ql_dbg_disc, vha, 0x2048,
632 "RFF_ID exiting normally.\n");
633 } 757 }
634 758
635 return (rval); 759 ql_dbg(ql_dbg_disc, vha, 0xffff,
760 "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
761 sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
762 return rval;
763
764done_free_sp:
765 sp->free(sp);
766done:
767 return rval;
636} 768}
637 769
638/** 770/**
@@ -644,54 +776,85 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
644int 776int
645qla2x00_rnn_id(scsi_qla_host_t *vha) 777qla2x00_rnn_id(scsi_qla_host_t *vha)
646{ 778{
647 int rval;
648 struct qla_hw_data *ha = vha->hw; 779 struct qla_hw_data *ha = vha->hw;
649 ms_iocb_entry_t *ms_pkt;
650 struct ct_sns_req *ct_req;
651 struct ct_sns_rsp *ct_rsp;
652 struct ct_arg arg;
653 780
654 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 781 if (IS_QLA2100(ha) || IS_QLA2200(ha))
655 return qla2x00_sns_rnn_id(vha); 782 return qla2x00_sns_rnn_id(vha);
656 783
657 arg.iocb = ha->ms_iocb; 784 return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
658 arg.req_dma = ha->ct_sns_dma; 785}
659 arg.rsp_dma = ha->ct_sns_dma;
660 arg.req_size = RNN_ID_REQ_SIZE;
661 arg.rsp_size = RNN_ID_RSP_SIZE;
662 arg.nport_handle = NPH_SNS;
663 786
664 /* Issue RNN_ID */ 787static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
665 /* Prepare common MS IOCB */ 788 u8 *node_name)
666 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 789{
790 int rval = QLA_MEMORY_ALLOC_FAILED;
791 struct ct_sns_req *ct_req;
792 srb_t *sp;
793 struct ct_sns_pkt *ct_sns;
794
795 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
796 if (!sp)
797 goto done;
798
799 sp->type = SRB_CT_PTHRU_CMD;
800 sp->name = "rnid";
801 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
802
803 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
804 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
805 GFP_KERNEL);
806 if (!sp->u.iocb_cmd.u.ctarg.req) {
807 ql_log(ql_log_warn, vha, 0xd041,
808 "%s: Failed to allocate ct_sns request.\n",
809 __func__);
810 goto done_free_sp;
811 }
812
813 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
814 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
815 GFP_KERNEL);
816 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
817 ql_log(ql_log_warn, vha, 0xd042,
818 "%s: Failed to allocate ct_sns request.\n",
819 __func__);
820 goto done_free_sp;
821 }
822 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
823 memset(ct_sns, 0, sizeof(*ct_sns));
824 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
667 825
668 /* Prepare CT request */ 826 /* Prepare CT request */
669 ct_req = qla2x00_prep_ct_req(ha->ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE); 827 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
670 ct_rsp = &ha->ct_sns->p.rsp;
671 828
672 /* Prepare CT arguments -- port_id, node_name */ 829 /* Prepare CT arguments -- port_id, node_name */
673 ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain; 830 ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
674 ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area; 831 ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
675 ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa; 832 ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
676
677 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE); 833 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
678 834
679 /* Execute MS IOCB */ 835 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
680 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 836 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
681 sizeof(ms_iocb_entry_t)); 837 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
838
839 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
840 sp->done = qla2x00_async_sns_sp_done;
841
842 rval = qla2x00_start_sp(sp);
682 if (rval != QLA_SUCCESS) { 843 if (rval != QLA_SUCCESS) {
683 /*EMPTY*/
684 ql_dbg(ql_dbg_disc, vha, 0x204d, 844 ql_dbg(ql_dbg_disc, vha, 0x204d,
685 "RNN_ID issue IOCB failed (%d).\n", rval); 845 "RNN_ID issue IOCB failed (%d).\n", rval);
686 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") != 846 goto done_free_sp;
687 QLA_SUCCESS) {
688 rval = QLA_FUNCTION_FAILED;
689 } else {
690 ql_dbg(ql_dbg_disc, vha, 0x204e,
691 "RNN_ID exiting normally.\n");
692 } 847 }
848 ql_dbg(ql_dbg_disc, vha, 0xffff,
849 "Async-%s - hdl=%x portid %06x\n",
850 sp->name, sp->handle, d_id->b24);
693 851
694 return (rval); 852 return rval;
853
854done_free_sp:
855 sp->free(sp);
856done:
857 return rval;
695} 858}
696 859
697void 860void
@@ -718,12 +881,7 @@ qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
718int 881int
719qla2x00_rsnn_nn(scsi_qla_host_t *vha) 882qla2x00_rsnn_nn(scsi_qla_host_t *vha)
720{ 883{
721 int rval;
722 struct qla_hw_data *ha = vha->hw; 884 struct qla_hw_data *ha = vha->hw;
723 ms_iocb_entry_t *ms_pkt;
724 struct ct_sns_req *ct_req;
725 struct ct_sns_rsp *ct_rsp;
726 struct ct_arg arg;
727 885
728 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 886 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
729 ql_dbg(ql_dbg_disc, vha, 0x2050, 887 ql_dbg(ql_dbg_disc, vha, 0x2050,
@@ -731,22 +889,49 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
731 return (QLA_SUCCESS); 889 return (QLA_SUCCESS);
732 } 890 }
733 891
734 arg.iocb = ha->ms_iocb; 892 return qla_async_rsnn_nn(vha);
735 arg.req_dma = ha->ct_sns_dma; 893}
736 arg.rsp_dma = ha->ct_sns_dma;
737 arg.req_size = 0;
738 arg.rsp_size = RSNN_NN_RSP_SIZE;
739 arg.nport_handle = NPH_SNS;
740 894
741 /* Issue RSNN_NN */ 895static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
742 /* Prepare common MS IOCB */ 896{
743 /* Request size adjusted after CT preparation */ 897 int rval = QLA_MEMORY_ALLOC_FAILED;
744 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg); 898 struct ct_sns_req *ct_req;
899 srb_t *sp;
900 struct ct_sns_pkt *ct_sns;
901
902 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
903 if (!sp)
904 goto done;
905
906 sp->type = SRB_CT_PTHRU_CMD;
907 sp->name = "rsnn_nn";
908 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
909
910 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
911 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
912 GFP_KERNEL);
913 if (!sp->u.iocb_cmd.u.ctarg.req) {
914 ql_log(ql_log_warn, vha, 0xd041,
915 "%s: Failed to allocate ct_sns request.\n",
916 __func__);
917 goto done_free_sp;
918 }
919
920 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
921 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
922 GFP_KERNEL);
923 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
924 ql_log(ql_log_warn, vha, 0xd042,
925 "%s: Failed to allocate ct_sns request.\n",
926 __func__);
927 goto done_free_sp;
928 }
929 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
930 memset(ct_sns, 0, sizeof(*ct_sns));
931 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
745 932
746 /* Prepare CT request */ 933 /* Prepare CT request */
747 ct_req = qla2x00_prep_ct_req(ha->ct_sns, RSNN_NN_CMD, 934 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
748 RSNN_NN_RSP_SIZE);
749 ct_rsp = &ha->ct_sns->p.rsp;
750 935
751 /* Prepare CT arguments -- node_name, symbolic node_name, size */ 936 /* Prepare CT arguments -- node_name, symbolic node_name, size */
752 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE); 937 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
@@ -754,32 +939,33 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
754 /* Prepare the Symbolic Node Name */ 939 /* Prepare the Symbolic Node Name */
755 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name, 940 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
756 sizeof(ct_req->req.rsnn_nn.sym_node_name)); 941 sizeof(ct_req->req.rsnn_nn.sym_node_name));
757
758 /* Calculate SNN length */
759 ct_req->req.rsnn_nn.name_len = 942 ct_req->req.rsnn_nn.name_len =
760 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name); 943 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
761 944
762 /* Update MS IOCB request */
763 ms_pkt->req_bytecount =
764 cpu_to_le32(24 + 1 + ct_req->req.rsnn_nn.name_len);
765 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
766 945
767 /* Execute MS IOCB */ 946 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
768 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 947 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
769 sizeof(ms_iocb_entry_t)); 948 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
949
950 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
951 sp->done = qla2x00_async_sns_sp_done;
952
953 rval = qla2x00_start_sp(sp);
770 if (rval != QLA_SUCCESS) { 954 if (rval != QLA_SUCCESS) {
771 /*EMPTY*/ 955 ql_dbg(ql_dbg_disc, vha, 0x2043,
772 ql_dbg(ql_dbg_disc, vha, 0x2051, 956 "RFT_ID issue IOCB failed (%d).\n", rval);
773 "RSNN_NN issue IOCB failed (%d).\n", rval); 957 goto done_free_sp;
774 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
775 QLA_SUCCESS) {
776 rval = QLA_FUNCTION_FAILED;
777 } else {
778 ql_dbg(ql_dbg_disc, vha, 0x2052,
779 "RSNN_NN exiting normally.\n");
780 } 958 }
959 ql_dbg(ql_dbg_disc, vha, 0xffff,
960 "Async-%s - hdl=%x.\n",
961 sp->name, sp->handle);
781 962
782 return (rval); 963 return rval;
964
965done_free_sp:
966 sp->free(sp);
967done:
968 return rval;
783} 969}
784 970
785/** 971/**
@@ -2790,15 +2976,20 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
2790 fc_port_t *fcport = ea->fcport; 2976 fc_port_t *fcport = ea->fcport;
2791 2977
2792 ql_dbg(ql_dbg_disc, vha, 0x201d, 2978 ql_dbg(ql_dbg_disc, vha, 0x201d,
2793 "%s %8phC login state %d\n", 2979 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
2794 __func__, fcport->port_name, fcport->fw_login_state); 2980 __func__, fcport->port_name, fcport->disc_state,
2981 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
2982 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
2983
2984 if (fcport->disc_state == DSC_DELETE_PEND)
2985 return;
2795 2986
2796 if (ea->sp->gen2 != fcport->login_gen) { 2987 if (ea->sp->gen2 != fcport->login_gen) {
2797 /* PLOGI/PRLI/LOGO came in while cmd was out.*/ 2988 /* PLOGI/PRLI/LOGO came in while cmd was out.*/
2798 ql_dbg(ql_dbg_disc, vha, 0x201e, 2989 ql_dbg(ql_dbg_disc, vha, 0x201e,
2799 "%s %8phC generation changed rscn %d|%d login %d|%d \n", 2990 "%s %8phC generation changed rscn %d|%d n",
2800 __func__, fcport->port_name, fcport->last_rscn_gen, 2991 __func__, fcport->port_name, fcport->last_rscn_gen,
2801 fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen); 2992 fcport->rscn_gen);
2802 return; 2993 return;
2803 } 2994 }
2804 2995
@@ -2811,7 +3002,21 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
2811 /* cable plugged into the same place */ 3002 /* cable plugged into the same place */
2812 switch (vha->host->active_mode) { 3003 switch (vha->host->active_mode) {
2813 case MODE_TARGET: 3004 case MODE_TARGET:
2814 /* NOOP. let the other guy login to us.*/ 3005 if (fcport->fw_login_state ==
3006 DSC_LS_PRLI_COMP) {
3007 u16 data[2];
3008 /*
3009 * Late RSCN was delivered.
3010 * Remote port already login'ed.
3011 */
3012 ql_dbg(ql_dbg_disc, vha, 0x201f,
3013 "%s %d %8phC post adisc\n",
3014 __func__, __LINE__,
3015 fcport->port_name);
3016 data[0] = data[1] = 0;
3017 qla2x00_post_async_adisc_work(
3018 vha, fcport, data);
3019 }
2815 break; 3020 break;
2816 case MODE_INITIATOR: 3021 case MODE_INITIATOR:
2817 case MODE_DUAL: 3022 case MODE_DUAL:
@@ -2820,24 +3025,29 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
2820 "%s %d %8phC post %s\n", __func__, 3025 "%s %d %8phC post %s\n", __func__,
2821 __LINE__, fcport->port_name, 3026 __LINE__, fcport->port_name,
2822 (atomic_read(&fcport->state) == 3027 (atomic_read(&fcport->state) ==
2823 FCS_ONLINE) ? "gpdb" : "gnl"); 3028 FCS_ONLINE) ? "adisc" : "gnl");
2824 3029
2825 if (atomic_read(&fcport->state) == 3030 if (atomic_read(&fcport->state) ==
2826 FCS_ONLINE) 3031 FCS_ONLINE) {
2827 qla24xx_post_gpdb_work(vha, 3032 u16 data[2];
2828 fcport, PDO_FORCE_ADISC); 3033
2829 else 3034 data[0] = data[1] = 0;
3035 qla2x00_post_async_adisc_work(
3036 vha, fcport, data);
3037 } else {
2830 qla24xx_post_gnl_work(vha, 3038 qla24xx_post_gnl_work(vha,
2831 fcport); 3039 fcport);
3040 }
2832 break; 3041 break;
2833 } 3042 }
2834 } else { /* fcport->d_id.b24 != ea->id.b24 */ 3043 } else { /* fcport->d_id.b24 != ea->id.b24 */
2835 fcport->d_id.b24 = ea->id.b24; 3044 fcport->d_id.b24 = ea->id.b24;
2836 if (fcport->deleted == QLA_SESS_DELETED) { 3045 fcport->id_changed = 1;
3046 if (fcport->deleted != QLA_SESS_DELETED) {
2837 ql_dbg(ql_dbg_disc, vha, 0x2021, 3047 ql_dbg(ql_dbg_disc, vha, 0x2021,
2838 "%s %d %8phC post del sess\n", 3048 "%s %d %8phC post del sess\n",
2839 __func__, __LINE__, fcport->port_name); 3049 __func__, __LINE__, fcport->port_name);
2840 qlt_schedule_sess_for_deletion_lock(fcport); 3050 qlt_schedule_sess_for_deletion(fcport);
2841 } 3051 }
2842 } 3052 }
2843 } else { /* ea->sp->gen1 != fcport->rscn_gen */ 3053 } else { /* ea->sp->gen1 != fcport->rscn_gen */
@@ -2854,7 +3064,7 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
2854 ql_dbg(ql_dbg_disc, vha, 0x2042, 3064 ql_dbg(ql_dbg_disc, vha, 0x2042,
2855 "%s %d %8phC post del sess\n", __func__, 3065 "%s %d %8phC post del sess\n", __func__,
2856 __LINE__, fcport->port_name); 3066 __LINE__, fcport->port_name);
2857 qlt_schedule_sess_for_deletion_lock(fcport); 3067 qlt_schedule_sess_for_deletion(fcport);
2858 } else { 3068 } else {
2859 ql_dbg(ql_dbg_disc, vha, 0x2045, 3069 ql_dbg(ql_dbg_disc, vha, 0x2045,
2860 "%s %d %8phC login\n", __func__, __LINE__, 3070 "%s %d %8phC login\n", __func__, __LINE__,
@@ -2878,7 +3088,7 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
2878 u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id; 3088 u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
2879 struct event_arg ea; 3089 struct event_arg ea;
2880 3090
2881 fcport->flags &= ~FCF_ASYNC_SENT; 3091 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2882 3092
2883 memset(&ea, 0, sizeof(ea)); 3093 memset(&ea, 0, sizeof(ea));
2884 ea.fcport = fcport; 3094 ea.fcport = fcport;
@@ -2889,9 +3099,22 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
2889 ea.rc = res; 3099 ea.rc = res;
2890 ea.event = FCME_GIDPN_DONE; 3100 ea.event = FCME_GIDPN_DONE;
2891 3101
2892 ql_dbg(ql_dbg_disc, vha, 0x204f, 3102 if (res == QLA_FUNCTION_TIMEOUT) {
2893 "Async done-%s res %x, WWPN %8phC ID %3phC \n", 3103 ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
2894 sp->name, res, fcport->port_name, id); 3104 "Async done-%s WWPN %8phC timed out.\n",
3105 sp->name, fcport->port_name);
3106 qla24xx_post_gidpn_work(sp->vha, fcport);
3107 sp->free(sp);
3108 return;
3109 } else if (res) {
3110 ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
3111 "Async done-%s fail res %x, WWPN %8phC\n",
3112 sp->name, res, fcport->port_name);
3113 } else {
3114 ql_dbg(ql_dbg_disc, vha, 0x204f,
3115 "Async done-%s good WWPN %8phC ID %3phC\n",
3116 sp->name, fcport->port_name, id);
3117 }
2895 3118
2896 qla2x00_fcport_event_handler(vha, &ea); 3119 qla2x00_fcport_event_handler(vha, &ea);
2897 3120
@@ -2904,16 +3127,16 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
2904 struct ct_sns_req *ct_req; 3127 struct ct_sns_req *ct_req;
2905 srb_t *sp; 3128 srb_t *sp;
2906 3129
2907 if (!vha->flags.online) 3130 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
2908 goto done; 3131 return rval;
2909 3132
2910 fcport->flags |= FCF_ASYNC_SENT;
2911 fcport->disc_state = DSC_GID_PN; 3133 fcport->disc_state = DSC_GID_PN;
2912 fcport->scan_state = QLA_FCPORT_SCAN; 3134 fcport->scan_state = QLA_FCPORT_SCAN;
2913 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 3135 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
2914 if (!sp) 3136 if (!sp)
2915 goto done; 3137 goto done;
2916 3138
3139 fcport->flags |= FCF_ASYNC_SENT;
2917 sp->type = SRB_CT_PTHRU_CMD; 3140 sp->type = SRB_CT_PTHRU_CMD;
2918 sp->name = "gidpn"; 3141 sp->name = "gidpn";
2919 sp->gen1 = fcport->rscn_gen; 3142 sp->gen1 = fcport->rscn_gen;
@@ -2954,8 +3177,8 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
2954 3177
2955done_free_sp: 3178done_free_sp:
2956 sp->free(sp); 3179 sp->free(sp);
2957done:
2958 fcport->flags &= ~FCF_ASYNC_SENT; 3180 fcport->flags &= ~FCF_ASYNC_SENT;
3181done:
2959 return rval; 3182 return rval;
2960} 3183}
2961 3184
@@ -2974,6 +3197,7 @@ int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
2974 return QLA_FUNCTION_FAILED; 3197 return QLA_FUNCTION_FAILED;
2975 3198
2976 e->u.fcport.fcport = fcport; 3199 e->u.fcport.fcport = fcport;
3200 fcport->flags |= FCF_ASYNC_ACTIVE;
2977 return qla2x00_post_work(vha, e); 3201 return qla2x00_post_work(vha, e);
2978} 3202}
2979 3203
@@ -2986,9 +3210,39 @@ int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
2986 return QLA_FUNCTION_FAILED; 3210 return QLA_FUNCTION_FAILED;
2987 3211
2988 e->u.fcport.fcport = fcport; 3212 e->u.fcport.fcport = fcport;
3213 fcport->flags |= FCF_ASYNC_ACTIVE;
2989 return qla2x00_post_work(vha, e); 3214 return qla2x00_post_work(vha, e);
2990} 3215}
2991 3216
3217void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
3218{
3219 struct fc_port *fcport = ea->fcport;
3220
3221 ql_dbg(ql_dbg_disc, vha, 0x20d8,
3222 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
3223 __func__, fcport->port_name, fcport->disc_state,
3224 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
3225 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
3226
3227 if (fcport->disc_state == DSC_DELETE_PEND)
3228 return;
3229
3230 if (ea->sp->gen2 != fcport->login_gen) {
3231 /* target side must have changed it. */
3232 ql_dbg(ql_dbg_disc, vha, 0x20d3,
3233 "%s %8phC generation changed\n",
3234 __func__, fcport->port_name);
3235 return;
3236 } else if (ea->sp->gen1 != fcport->rscn_gen) {
3237 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
3238 __func__, __LINE__, fcport->port_name);
3239 qla24xx_post_gidpn_work(vha, fcport);
3240 return;
3241 }
3242
3243 qla24xx_post_upd_fcport_work(vha, ea->fcport);
3244}
3245
2992static void qla24xx_async_gpsc_sp_done(void *s, int res) 3246static void qla24xx_async_gpsc_sp_done(void *s, int res)
2993{ 3247{
2994 struct srb *sp = s; 3248 struct srb *sp = s;
@@ -3004,7 +3258,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
3004 "Async done-%s res %x, WWPN %8phC \n", 3258 "Async done-%s res %x, WWPN %8phC \n",
3005 sp->name, res, fcport->port_name); 3259 sp->name, res, fcport->port_name);
3006 3260
3007 fcport->flags &= ~FCF_ASYNC_SENT; 3261 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3008 3262
3009 if (res == (DID_ERROR << 16)) { 3263 if (res == (DID_ERROR << 16)) {
3010 /* entry status error */ 3264 /* entry status error */
@@ -3055,6 +3309,7 @@ done:
3055 ea.event = FCME_GPSC_DONE; 3309 ea.event = FCME_GPSC_DONE;
3056 ea.rc = res; 3310 ea.rc = res;
3057 ea.fcport = fcport; 3311 ea.fcport = fcport;
3312 ea.sp = sp;
3058 qla2x00_fcport_event_handler(vha, &ea); 3313 qla2x00_fcport_event_handler(vha, &ea);
3059 3314
3060 sp->free(sp); 3315 sp->free(sp);
@@ -3066,14 +3321,14 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
3066 struct ct_sns_req *ct_req; 3321 struct ct_sns_req *ct_req;
3067 srb_t *sp; 3322 srb_t *sp;
3068 3323
3069 if (!vha->flags.online) 3324 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3070 goto done; 3325 return rval;
3071 3326
3072 fcport->flags |= FCF_ASYNC_SENT;
3073 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 3327 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3074 if (!sp) 3328 if (!sp)
3075 goto done; 3329 goto done;
3076 3330
3331 fcport->flags |= FCF_ASYNC_SENT;
3077 sp->type = SRB_CT_PTHRU_CMD; 3332 sp->type = SRB_CT_PTHRU_CMD;
3078 sp->name = "gpsc"; 3333 sp->name = "gpsc";
3079 sp->gen1 = fcport->rscn_gen; 3334 sp->gen1 = fcport->rscn_gen;
@@ -3113,8 +3368,8 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
3113 3368
3114done_free_sp: 3369done_free_sp:
3115 sp->free(sp); 3370 sp->free(sp);
3116done:
3117 fcport->flags &= ~FCF_ASYNC_SENT; 3371 fcport->flags &= ~FCF_ASYNC_SENT;
3372done:
3118 return rval; 3373 return rval;
3119} 3374}
3120 3375
@@ -3133,7 +3388,7 @@ int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
3133 return qla2x00_post_work(vha, e); 3388 return qla2x00_post_work(vha, e);
3134} 3389}
3135 3390
3136void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp) 3391void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
3137{ 3392{
3138 if (sp->u.iocb_cmd.u.ctarg.req) { 3393 if (sp->u.iocb_cmd.u.ctarg.req) {
3139 dma_free_coherent(&vha->hw->pdev->dev, 3394 dma_free_coherent(&vha->hw->pdev->dev,
@@ -3155,43 +3410,137 @@ void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp)
3155 3410
3156void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) 3411void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3157{ 3412{
3158 fc_port_t *fcport; 3413 fc_port_t *fcport, *conflict, *t;
3159 unsigned long flags; 3414 u16 data[2];
3160 3415
3161 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 3416 ql_dbg(ql_dbg_disc, vha, 0xffff,
3162 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1); 3417 "%s %d port_id: %06x\n",
3163 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 3418 __func__, __LINE__, ea->id.b24);
3164 3419
3165 if (fcport) { 3420 if (ea->rc) {
3166 /* cable moved. just plugged in */ 3421 /* cable is disconnected */
3167 fcport->rscn_gen++; 3422 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
3168 fcport->d_id = ea->id; 3423 if (fcport->d_id.b24 == ea->id.b24) {
3169 fcport->scan_state = QLA_FCPORT_FOUND; 3424 ql_dbg(ql_dbg_disc, vha, 0xffff,
3170 fcport->flags |= FCF_FABRIC_DEVICE; 3425 "%s %d %8phC DS %d\n",
3171 3426 __func__, __LINE__,
3172 switch (fcport->disc_state) { 3427 fcport->port_name,
3173 case DSC_DELETED: 3428 fcport->disc_state);
3174 ql_dbg(ql_dbg_disc, vha, 0x210d, 3429 fcport->scan_state = QLA_FCPORT_SCAN;
3175 "%s %d %8phC login\n", __func__, __LINE__, 3430 switch (fcport->disc_state) {
3176 fcport->port_name); 3431 case DSC_DELETED:
3177 qla24xx_fcport_handle_login(vha, fcport); 3432 case DSC_DELETE_PEND:
3178 break; 3433 break;
3179 case DSC_DELETE_PEND: 3434 default:
3180 break; 3435 ql_dbg(ql_dbg_disc, vha, 0xffff,
3181 default: 3436 "%s %d %8phC post del sess\n",
3182 ql_dbg(ql_dbg_disc, vha, 0x2064, 3437 __func__, __LINE__,
3183 "%s %d %8phC post del sess\n", 3438 fcport->port_name);
3184 __func__, __LINE__, fcport->port_name); 3439 qlt_schedule_sess_for_deletion(fcport);
3185 qlt_schedule_sess_for_deletion_lock(fcport); 3440 break;
3186 break; 3441 }
3442 }
3187 } 3443 }
3188 } else { 3444 } else {
3189 /* create new fcport */ 3445 /* cable is connected */
3190 ql_dbg(ql_dbg_disc, vha, 0x2065, 3446 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
3191 "%s %d %8phC post new sess\n", 3447 if (fcport) {
3192 __func__, __LINE__, ea->port_name); 3448 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3449 list) {
3450 if ((conflict->d_id.b24 == ea->id.b24) &&
3451 (fcport != conflict)) {
3452 /* 2 fcports with conflict Nport ID or
3453 * an existing fcport is having nport ID
3454 * conflict with new fcport.
3455 */
3456
3457 ql_dbg(ql_dbg_disc, vha, 0xffff,
3458 "%s %d %8phC DS %d\n",
3459 __func__, __LINE__,
3460 conflict->port_name,
3461 conflict->disc_state);
3462 conflict->scan_state = QLA_FCPORT_SCAN;
3463 switch (conflict->disc_state) {
3464 case DSC_DELETED:
3465 case DSC_DELETE_PEND:
3466 break;
3467 default:
3468 ql_dbg(ql_dbg_disc, vha, 0xffff,
3469 "%s %d %8phC post del sess\n",
3470 __func__, __LINE__,
3471 conflict->port_name);
3472 qlt_schedule_sess_for_deletion
3473 (conflict);
3474 break;
3475 }
3476 }
3477 }
3478
3479 fcport->rscn_gen++;
3480 fcport->scan_state = QLA_FCPORT_FOUND;
3481 fcport->flags |= FCF_FABRIC_DEVICE;
3482 switch (fcport->disc_state) {
3483 case DSC_LOGIN_COMPLETE:
3484 /* recheck session is still intact. */
3485 ql_dbg(ql_dbg_disc, vha, 0x210d,
3486 "%s %d %8phC revalidate session with ADISC\n",
3487 __func__, __LINE__, fcport->port_name);
3488 data[0] = data[1] = 0;
3489 qla2x00_post_async_adisc_work(vha, fcport,
3490 data);
3491 break;
3492 case DSC_DELETED:
3493 ql_dbg(ql_dbg_disc, vha, 0x210d,
3494 "%s %d %8phC login\n", __func__, __LINE__,
3495 fcport->port_name);
3496 fcport->d_id = ea->id;
3497 qla24xx_fcport_handle_login(vha, fcport);
3498 break;
3499 case DSC_DELETE_PEND:
3500 fcport->d_id = ea->id;
3501 break;
3502 default:
3503 fcport->d_id = ea->id;
3504 break;
3505 }
3506 } else {
3507 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3508 list) {
3509 if (conflict->d_id.b24 == ea->id.b24) {
3510 /* 2 fcports with conflict Nport ID or
3511 * an existing fcport is having nport ID
3512 * conflict with new fcport.
3513 */
3514 ql_dbg(ql_dbg_disc, vha, 0xffff,
3515 "%s %d %8phC DS %d\n",
3516 __func__, __LINE__,
3517 conflict->port_name,
3518 conflict->disc_state);
3519
3520 conflict->scan_state = QLA_FCPORT_SCAN;
3521 switch (conflict->disc_state) {
3522 case DSC_DELETED:
3523 case DSC_DELETE_PEND:
3524 break;
3525 default:
3526 ql_dbg(ql_dbg_disc, vha, 0xffff,
3527 "%s %d %8phC post del sess\n",
3528 __func__, __LINE__,
3529 conflict->port_name);
3530 qlt_schedule_sess_for_deletion
3531 (conflict);
3532 break;
3533 }
3534 }
3535 }
3193 3536
3194 qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL); 3537 /* create new fcport */
3538 ql_dbg(ql_dbg_disc, vha, 0x2065,
3539 "%s %d %8phC post new sess\n",
3540 __func__, __LINE__, ea->port_name);
3541 qla24xx_post_newsess_work(vha, &ea->id,
3542 ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
3543 }
3195 } 3544 }
3196} 3545}
3197 3546
@@ -3205,11 +3554,18 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
3205 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; 3554 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3206 struct event_arg ea; 3555 struct event_arg ea;
3207 struct qla_work_evt *e; 3556 struct qla_work_evt *e;
3557 unsigned long flags;
3208 3558
3209 ql_dbg(ql_dbg_disc, vha, 0x2066, 3559 if (res)
3210 "Async done-%s res %x ID %3phC. %8phC\n", 3560 ql_dbg(ql_dbg_disc, vha, 0x2066,
3211 sp->name, res, ct_req->req.port_id.port_id, 3561 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
3212 ct_rsp->rsp.gpn_id.port_name); 3562 sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
3563 ct_rsp->rsp.gpn_id.port_name);
3564 else
3565 ql_dbg(ql_dbg_disc, vha, 0x2066,
3566 "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
3567 sp->name, sp->gen1, ct_req->req.port_id.port_id,
3568 ct_rsp->rsp.gpn_id.port_name);
3213 3569
3214 memset(&ea, 0, sizeof(ea)); 3570 memset(&ea, 0, sizeof(ea));
3215 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); 3571 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
@@ -3220,9 +3576,26 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
3220 ea.rc = res; 3576 ea.rc = res;
3221 ea.event = FCME_GPNID_DONE; 3577 ea.event = FCME_GPNID_DONE;
3222 3578
3579 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3580 list_del(&sp->elem);
3581 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3582
3583 if (res) {
3584 if (res == QLA_FUNCTION_TIMEOUT) {
3585 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3586 sp->free(sp);
3587 return;
3588 }
3589 } else if (sp->gen1) {
3590 /* There was another RSCN for this Nport ID */
3591 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3592 sp->free(sp);
3593 return;
3594 }
3595
3223 qla2x00_fcport_event_handler(vha, &ea); 3596 qla2x00_fcport_event_handler(vha, &ea);
3224 3597
3225 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE); 3598 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3226 if (!e) { 3599 if (!e) {
3227 /* please ignore kernel warning. otherwise, we have mem leak. */ 3600 /* please ignore kernel warning. otherwise, we have mem leak. */
3228 if (sp->u.iocb_cmd.u.ctarg.req) { 3601 if (sp->u.iocb_cmd.u.ctarg.req) {
@@ -3253,8 +3626,9 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3253{ 3626{
3254 int rval = QLA_FUNCTION_FAILED; 3627 int rval = QLA_FUNCTION_FAILED;
3255 struct ct_sns_req *ct_req; 3628 struct ct_sns_req *ct_req;
3256 srb_t *sp; 3629 srb_t *sp, *tsp;
3257 struct ct_sns_pkt *ct_sns; 3630 struct ct_sns_pkt *ct_sns;
3631 unsigned long flags;
3258 3632
3259 if (!vha->flags.online) 3633 if (!vha->flags.online)
3260 goto done; 3634 goto done;
@@ -3265,8 +3639,22 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3265 3639
3266 sp->type = SRB_CT_PTHRU_CMD; 3640 sp->type = SRB_CT_PTHRU_CMD;
3267 sp->name = "gpnid"; 3641 sp->name = "gpnid";
3642 sp->u.iocb_cmd.u.ctarg.id = *id;
3643 sp->gen1 = 0;
3268 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 3644 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3269 3645
3646 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3647 list_for_each_entry(tsp, &vha->gpnid_list, elem) {
3648 if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
3649 tsp->gen1++;
3650 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3651 sp->free(sp);
3652 goto done;
3653 }
3654 }
3655 list_add_tail(&sp->elem, &vha->gpnid_list);
3656 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3657
3270 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 3658 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3271 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 3659 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
3272 GFP_KERNEL); 3660 GFP_KERNEL);
@@ -3393,7 +3781,7 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
3393 struct ct_sns_req *ct_req; 3781 struct ct_sns_req *ct_req;
3394 srb_t *sp; 3782 srb_t *sp;
3395 3783
3396 if (!vha->flags.online) 3784 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3397 return rval; 3785 return rval;
3398 3786
3399 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 3787 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
@@ -3441,3 +3829,720 @@ done_free_sp:
3441 fcport->flags &= ~FCF_ASYNC_SENT; 3829 fcport->flags &= ~FCF_ASYNC_SENT;
3442 return rval; 3830 return rval;
3443} 3831}
3832
3833/* GPN_FT + GNN_FT*/
3834static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
3835{
3836 struct qla_hw_data *ha = vha->hw;
3837 scsi_qla_host_t *vp;
3838 unsigned long flags;
3839 u64 twwn;
3840 int rc = 0;
3841
3842 if (!ha->num_vhosts)
3843 return 0;
3844
3845 spin_lock_irqsave(&ha->vport_slock, flags);
3846 list_for_each_entry(vp, &ha->vp_list, list) {
3847 twwn = wwn_to_u64(vp->port_name);
3848 if (wwn == twwn) {
3849 rc = 1;
3850 break;
3851 }
3852 }
3853 spin_unlock_irqrestore(&ha->vport_slock, flags);
3854
3855 return rc;
3856}
3857
3858void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3859{
3860 fc_port_t *fcport;
3861 u32 i, rc;
3862 bool found;
3863 u8 fc4type = sp->gen2;
3864 struct fab_scan_rp *rp;
3865 unsigned long flags;
3866
3867 ql_dbg(ql_dbg_disc, vha, 0xffff,
3868 "%s enter\n", __func__);
3869
3870 if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
3871 ql_dbg(ql_dbg_disc, vha, 0xffff,
3872 "%s scan stop due to chip reset %x/%x\n",
3873 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
3874 goto out;
3875 }
3876
3877 rc = sp->rc;
3878 if (rc) {
3879 vha->scan.scan_retry++;
3880 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3881 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3882 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3883 } else {
3884 ql_dbg(ql_dbg_disc, vha, 0xffff,
3885 "Fabric scan failed on all retries.\n");
3886 }
3887 goto out;
3888 }
3889 vha->scan.scan_retry = 0;
3890
3891 list_for_each_entry(fcport, &vha->vp_fcports, list)
3892 fcport->scan_state = QLA_FCPORT_SCAN;
3893
3894 for (i = 0; i < vha->hw->max_fibre_devices; i++) {
3895 u64 wwn;
3896
3897 rp = &vha->scan.l[i];
3898 found = false;
3899
3900 wwn = wwn_to_u64(rp->port_name);
3901 if (wwn == 0)
3902 continue;
3903
3904 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
3905 continue;
3906
3907 /* Bypass reserved domain fields. */
3908 if ((rp->id.b.domain & 0xf0) == 0xf0)
3909 continue;
3910
3911 /* Bypass virtual ports of the same host. */
3912 if (qla2x00_is_a_vp(vha, wwn))
3913 continue;
3914
3915 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3916 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3917 continue;
3918 fcport->scan_state = QLA_FCPORT_FOUND;
3919 fcport->d_id.b24 = rp->id.b24;
3920 found = true;
3921 /*
3922 * If device was not a fabric device before.
3923 */
3924 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3925 qla2x00_clear_loop_id(fcport);
3926 fcport->flags |= FCF_FABRIC_DEVICE;
3927 }
3928 break;
3929 }
3930
3931 if (!found) {
3932 ql_dbg(ql_dbg_disc, vha, 0xffff,
3933 "%s %d %8phC post new sess\n",
3934 __func__, __LINE__, rp->port_name);
3935 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
3936 rp->node_name, NULL, fc4type);
3937 }
3938 }
3939
3940 /*
3941 * Logout all previous fabric dev marked lost, except FCP2 devices.
3942 */
3943 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3944 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3945 continue;
3946
3947 if (fcport->scan_state != QLA_FCPORT_FOUND) {
3948 if ((qla_dual_mode_enabled(vha) ||
3949 qla_ini_mode_enabled(vha)) &&
3950 atomic_read(&fcport->state) == FCS_ONLINE) {
3951 qla2x00_mark_device_lost(vha, fcport,
3952 ql2xplogiabsentdevice, 0);
3953
3954 if (fcport->loop_id != FC_NO_LOOP_ID &&
3955 (fcport->flags & FCF_FCP2_DEVICE) == 0) {
3956 ql_dbg(ql_dbg_disc, vha, 0x20f0,
3957 "%s %d %8phC post del sess\n",
3958 __func__, __LINE__,
3959 fcport->port_name);
3960
3961 qlt_schedule_sess_for_deletion(fcport);
3962 continue;
3963 }
3964 }
3965 } else
3966 qla24xx_fcport_handle_login(vha, fcport);
3967 }
3968
3969out:
3970 qla24xx_sp_unmap(vha, sp);
3971 spin_lock_irqsave(&vha->work_lock, flags);
3972 vha->scan.scan_flags &= ~SF_SCANNING;
3973 spin_unlock_irqrestore(&vha->work_lock, flags);
3974}
3975
3976static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
3977{
3978 struct srb *sp = s;
3979 struct scsi_qla_host *vha = sp->vha;
3980 struct qla_work_evt *e;
3981 struct ct_sns_req *ct_req =
3982 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3983 struct ct_sns_gpnft_rsp *ct_rsp =
3984 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3985 struct ct_sns_gpn_ft_data *d;
3986 struct fab_scan_rp *rp;
3987 int i, j, k;
3988 u16 cmd = be16_to_cpu(ct_req->command);
3989
3990 /* gen2 field is holding the fc4type */
3991 ql_dbg(ql_dbg_disc, vha, 0xffff,
3992 "Async done-%s res %x FC4Type %x\n",
3993 sp->name, res, sp->gen2);
3994
3995 if (res) {
3996 unsigned long flags;
3997
3998 sp->free(sp);
3999 spin_lock_irqsave(&vha->work_lock, flags);
4000 vha->scan.scan_flags &= ~SF_SCANNING;
4001 vha->scan.scan_retry++;
4002 spin_unlock_irqrestore(&vha->work_lock, flags);
4003
4004 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
4005 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4006 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4007 qla2xxx_wake_dpc(vha);
4008 } else {
4009 ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
4010 "Async done-%s rescan failed on all retries\n",
4011 sp->name);
4012 }
4013 return;
4014 }
4015
4016 if (!res) {
4017 port_id_t id;
4018 u64 wwn;
4019
4020 j = 0;
4021 for (i = 0; i < vha->hw->max_fibre_devices; i++) {
4022 d = &ct_rsp->entries[i];
4023
4024 id.b.rsvd_1 = 0;
4025 id.b.domain = d->port_id[0];
4026 id.b.area = d->port_id[1];
4027 id.b.al_pa = d->port_id[2];
4028 wwn = wwn_to_u64(d->port_name);
4029
4030 if (id.b24 == 0 || wwn == 0)
4031 continue;
4032
4033 if (cmd == GPN_FT_CMD) {
4034 rp = &vha->scan.l[j];
4035 rp->id = id;
4036 memcpy(rp->port_name, d->port_name, 8);
4037 j++;
4038 } else {/* GNN_FT_CMD */
4039 for (k = 0; k < vha->hw->max_fibre_devices;
4040 k++) {
4041 rp = &vha->scan.l[k];
4042 if (id.b24 == rp->id.b24) {
4043 memcpy(rp->node_name,
4044 d->port_name, 8);
4045 break;
4046 }
4047 }
4048 }
4049 }
4050 }
4051
4052 if (cmd == GPN_FT_CMD)
4053 e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE);
4054 else
4055 e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE);
4056 if (!e) {
4057 /* please ignore kernel warning. Otherwise, we have mem leak. */
4058 if (sp->u.iocb_cmd.u.ctarg.req) {
4059 dma_free_coherent(&vha->hw->pdev->dev,
4060 sizeof(struct ct_sns_pkt),
4061 sp->u.iocb_cmd.u.ctarg.req,
4062 sp->u.iocb_cmd.u.ctarg.req_dma);
4063 sp->u.iocb_cmd.u.ctarg.req = NULL;
4064 }
4065 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4066 dma_free_coherent(&vha->hw->pdev->dev,
4067 sizeof(struct ct_sns_pkt),
4068 sp->u.iocb_cmd.u.ctarg.rsp,
4069 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4070 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4071 }
4072
4073 ql_dbg(ql_dbg_disc, vha, 0xffff,
4074 "Async done-%s unable to alloc work element\n",
4075 sp->name);
4076 sp->free(sp);
4077 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4078 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4079 return;
4080 }
4081
4082 sp->rc = res;
4083 e->u.iosb.sp = sp;
4084
4085 qla2x00_post_work(vha, e);
4086}
4087
4088/*
4089 * Get WWNN list for fc4_type
4090 *
4091 * It is assumed the same SRB is re-used from GPNFT to avoid
4092 * mem free & re-alloc
4093 */
4094static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
4095 u8 fc4_type)
4096{
4097 int rval = QLA_FUNCTION_FAILED;
4098 struct ct_sns_req *ct_req;
4099 struct ct_sns_pkt *ct_sns;
4100
4101 if (!vha->flags.online) {
4102 vha->scan.scan_flags &= ~SF_SCANNING;
4103 goto done_free_sp;
4104 }
4105
4106 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
4107 ql_log(ql_log_warn, vha, 0xffff,
4108 "%s: req %p rsp %p are not setup\n",
4109 __func__, sp->u.iocb_cmd.u.ctarg.req,
4110 sp->u.iocb_cmd.u.ctarg.rsp);
4111 vha->scan.scan_flags &= ~SF_SCANNING;
4112 WARN_ON(1);
4113 goto done_free_sp;
4114 }
4115 sp->type = SRB_CT_PTHRU_CMD;
4116 sp->name = "gnnft";
4117 sp->gen1 = vha->hw->base_qpair->chip_reset;
4118 sp->gen2 = fc4_type;
4119 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4120
4121 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4122 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4123
4124 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4125 /* CT_IU preamble */
4126 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
4127 sp->u.iocb_cmd.u.ctarg.rsp_size);
4128
4129 /* GPN_FT req */
4130 ct_req->req.gpn_ft.port_type = fc4_type;
4131
4132 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
4133 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4134
4135 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4136 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4137
4138 rval = qla2x00_start_sp(sp);
4139 if (rval != QLA_SUCCESS)
4140 goto done_free_sp;
4141
4142 ql_dbg(ql_dbg_disc, vha, 0xffff,
4143 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4144 sp->handle, ct_req->req.gpn_ft.port_type);
4145 return rval;
4146
4147done_free_sp:
4148 if (sp->u.iocb_cmd.u.ctarg.req) {
4149 dma_free_coherent(&vha->hw->pdev->dev,
4150 sizeof(struct ct_sns_pkt),
4151 sp->u.iocb_cmd.u.ctarg.req,
4152 sp->u.iocb_cmd.u.ctarg.req_dma);
4153 sp->u.iocb_cmd.u.ctarg.req = NULL;
4154 }
4155 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4156 dma_free_coherent(&vha->hw->pdev->dev,
4157 sizeof(struct ct_sns_pkt),
4158 sp->u.iocb_cmd.u.ctarg.rsp,
4159 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4160 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4161 }
4162
4163 sp->free(sp);
4164
4165 return rval;
4166} /* GNNFT */
4167
4168void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
4169{
4170 ql_dbg(ql_dbg_disc, vha, 0xffff,
4171 "%s enter\n", __func__);
4172 del_timer(&sp->u.iocb_cmd.timer);
4173 qla24xx_async_gnnft(vha, sp, sp->gen2);
4174}
4175
4176/* Get WWPN list for certain fc4_type */
4177int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type)
4178{
4179 int rval = QLA_FUNCTION_FAILED;
4180 struct ct_sns_req *ct_req;
4181 srb_t *sp;
4182 struct ct_sns_pkt *ct_sns;
4183 u32 rspsz;
4184 unsigned long flags;
4185
4186 if (!vha->flags.online)
4187 return rval;
4188
4189 spin_lock_irqsave(&vha->work_lock, flags);
4190 if (vha->scan.scan_flags & SF_SCANNING) {
4191 spin_unlock_irqrestore(&vha->work_lock, flags);
4192 ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
4193 return rval;
4194 }
4195 vha->scan.scan_flags |= SF_SCANNING;
4196 spin_unlock_irqrestore(&vha->work_lock, flags);
4197
4198 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
4199 if (!sp) {
4200 vha->scan.scan_flags &= ~SF_SCANNING;
4201 return rval;
4202 }
4203
4204 sp->type = SRB_CT_PTHRU_CMD;
4205 sp->name = "gpnft";
4206 sp->gen1 = vha->hw->base_qpair->chip_reset;
4207 sp->gen2 = fc4_type;
4208 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4209
4210 sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(&vha->hw->pdev->dev,
4211 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
4212 GFP_KERNEL);
4213 if (!sp->u.iocb_cmd.u.ctarg.req) {
4214 ql_log(ql_log_warn, vha, 0xffff,
4215 "Failed to allocate ct_sns request.\n");
4216 vha->scan.scan_flags &= ~SF_SCANNING;
4217 goto done_free_sp;
4218 }
4219
4220 rspsz = sizeof(struct ct_sns_gpnft_rsp) +
4221 ((vha->hw->max_fibre_devices - 1) *
4222 sizeof(struct ct_sns_gpn_ft_data));
4223
4224 sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(&vha->hw->pdev->dev,
4225 rspsz, &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
4226 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4227 ql_log(ql_log_warn, vha, 0xffff,
4228 "Failed to allocate ct_sns request.\n");
4229 vha->scan.scan_flags &= ~SF_SCANNING;
4230 goto done_free_sp;
4231 }
4232
4233 memset(vha->scan.l, 0, vha->scan.size);
4234
4235 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4236 /* CT_IU preamble */
4237 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
4238
4239 /* GPN_FT req */
4240 ct_req->req.gpn_ft.port_type = fc4_type;
4241
4242 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
4243 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
4244 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4245
4246 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4247 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4248
4249 rval = qla2x00_start_sp(sp);
4250 if (rval != QLA_SUCCESS) {
4251 vha->scan.scan_flags &= ~SF_SCANNING;
4252 goto done_free_sp;
4253 }
4254
4255 ql_dbg(ql_dbg_disc, vha, 0xffff,
4256 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4257 sp->handle, ct_req->req.gpn_ft.port_type);
4258 return rval;
4259
4260done_free_sp:
4261 if (sp->u.iocb_cmd.u.ctarg.req) {
4262 dma_free_coherent(&vha->hw->pdev->dev,
4263 sizeof(struct ct_sns_pkt),
4264 sp->u.iocb_cmd.u.ctarg.req,
4265 sp->u.iocb_cmd.u.ctarg.req_dma);
4266 sp->u.iocb_cmd.u.ctarg.req = NULL;
4267 }
4268 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4269 dma_free_coherent(&vha->hw->pdev->dev,
4270 sizeof(struct ct_sns_pkt),
4271 sp->u.iocb_cmd.u.ctarg.rsp,
4272 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4273 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4274 }
4275
4276 sp->free(sp);
4277
4278 return rval;
4279}
4280
4281void qla_scan_work_fn(struct work_struct *work)
4282{
4283 struct fab_scan *s = container_of(to_delayed_work(work),
4284 struct fab_scan, scan_work);
4285 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
4286 scan);
4287 unsigned long flags;
4288
4289 ql_dbg(ql_dbg_disc, vha, 0xffff,
4290 "%s: schedule loop resync\n", __func__);
4291 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4292 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4293 qla2xxx_wake_dpc(vha);
4294 spin_lock_irqsave(&vha->work_lock, flags);
4295 vha->scan.scan_flags &= ~SF_QUEUED;
4296 spin_unlock_irqrestore(&vha->work_lock, flags);
4297}
4298
4299/* GNN_ID */
4300void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4301{
4302 qla24xx_post_gnl_work(vha, ea->fcport);
4303}
4304
4305static void qla2x00_async_gnnid_sp_done(void *s, int res)
4306{
4307 struct srb *sp = s;
4308 struct scsi_qla_host *vha = sp->vha;
4309 fc_port_t *fcport = sp->fcport;
4310 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
4311 struct event_arg ea;
4312 u64 wwnn;
4313
4314 fcport->flags &= ~FCF_ASYNC_SENT;
4315 wwnn = wwn_to_u64(node_name);
4316 if (wwnn)
4317 memcpy(fcport->node_name, node_name, WWN_SIZE);
4318
4319 memset(&ea, 0, sizeof(ea));
4320 ea.fcport = fcport;
4321 ea.sp = sp;
4322 ea.rc = res;
4323 ea.event = FCME_GNNID_DONE;
4324
4325 ql_dbg(ql_dbg_disc, vha, 0x204f,
4326 "Async done-%s res %x, WWPN %8phC %8phC\n",
4327 sp->name, res, fcport->port_name, fcport->node_name);
4328
4329 qla2x00_fcport_event_handler(vha, &ea);
4330
4331 sp->free(sp);
4332}
4333
4334int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4335{
4336 int rval = QLA_FUNCTION_FAILED;
4337 struct ct_sns_req *ct_req;
4338 srb_t *sp;
4339
4340 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4341 return rval;
4342
4343 fcport->disc_state = DSC_GNN_ID;
4344 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4345 if (!sp)
4346 goto done;
4347
4348 fcport->flags |= FCF_ASYNC_SENT;
4349 sp->type = SRB_CT_PTHRU_CMD;
4350 sp->name = "gnnid";
4351 sp->gen1 = fcport->rscn_gen;
4352 sp->gen2 = fcport->login_gen;
4353
4354 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4355
4356 /* CT_IU preamble */
4357 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
4358 GNN_ID_RSP_SIZE);
4359
4360 /* GNN_ID req */
4361 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
4362 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
4363 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
4364
4365
4366 /* req & rsp use the same buffer */
4367 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4368 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4369 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4370 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4371 sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
4372 sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
4373 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4374
4375 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4376 sp->done = qla2x00_async_gnnid_sp_done;
4377
4378 rval = qla2x00_start_sp(sp);
4379 if (rval != QLA_SUCCESS)
4380 goto done_free_sp;
4381 ql_dbg(ql_dbg_disc, vha, 0xffff,
4382 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4383 sp->name, fcport->port_name,
4384 sp->handle, fcport->loop_id, fcport->d_id.b24);
4385 return rval;
4386
4387done_free_sp:
4388 sp->free(sp);
4389 fcport->flags &= ~FCF_ASYNC_SENT;
4390done:
4391 return rval;
4392}
4393
4394int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4395{
4396 struct qla_work_evt *e;
4397 int ls;
4398
4399 ls = atomic_read(&vha->loop_state);
4400 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4401 test_bit(UNLOADING, &vha->dpc_flags))
4402 return 0;
4403
4404 e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
4405 if (!e)
4406 return QLA_FUNCTION_FAILED;
4407
4408 e->u.fcport.fcport = fcport;
4409 return qla2x00_post_work(vha, e);
4410}
4411
4412/* GPFN_ID */
4413void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4414{
4415 fc_port_t *fcport = ea->fcport;
4416
4417 ql_dbg(ql_dbg_disc, vha, 0xffff,
4418 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
4419 __func__, fcport->port_name, fcport->disc_state,
4420 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
4421 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
4422
4423 if (fcport->disc_state == DSC_DELETE_PEND)
4424 return;
4425
4426 if (ea->sp->gen2 != fcport->login_gen) {
4427 /* target side must have changed it. */
4428 ql_dbg(ql_dbg_disc, vha, 0x20d3,
4429 "%s %8phC generation changed\n",
4430 __func__, fcport->port_name);
4431 return;
4432 } else if (ea->sp->gen1 != fcport->rscn_gen) {
4433 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
4434 __func__, __LINE__, fcport->port_name);
4435 qla24xx_post_gidpn_work(vha, fcport);
4436 return;
4437 }
4438
4439 qla24xx_post_gpsc_work(vha, fcport);
4440}
4441
4442static void qla2x00_async_gfpnid_sp_done(void *s, int res)
4443{
4444 struct srb *sp = s;
4445 struct scsi_qla_host *vha = sp->vha;
4446 fc_port_t *fcport = sp->fcport;
4447 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
4448 struct event_arg ea;
4449 u64 wwn;
4450
4451 fcport->flags &= ~FCF_ASYNC_SENT;
4452 wwn = wwn_to_u64(fpn);
4453 if (wwn)
4454 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
4455
4456 memset(&ea, 0, sizeof(ea));
4457 ea.fcport = fcport;
4458 ea.sp = sp;
4459 ea.rc = res;
4460 ea.event = FCME_GFPNID_DONE;
4461
4462 ql_dbg(ql_dbg_disc, vha, 0x204f,
4463 "Async done-%s res %x, WWPN %8phC %8phC\n",
4464 sp->name, res, fcport->port_name, fcport->fabric_port_name);
4465
4466 qla2x00_fcport_event_handler(vha, &ea);
4467
4468 sp->free(sp);
4469}
4470
4471int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4472{
4473 int rval = QLA_FUNCTION_FAILED;
4474 struct ct_sns_req *ct_req;
4475 srb_t *sp;
4476
4477 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4478 return rval;
4479
4480 fcport->disc_state = DSC_GFPN_ID;
4481 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4482 if (!sp)
4483 goto done;
4484
4485 fcport->flags |= FCF_ASYNC_SENT;
4486 sp->type = SRB_CT_PTHRU_CMD;
4487 sp->name = "gfpnid";
4488 sp->gen1 = fcport->rscn_gen;
4489 sp->gen2 = fcport->login_gen;
4490
4491 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4492
4493 /* CT_IU preamble */
4494 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
4495 GFPN_ID_RSP_SIZE);
4496
4497 /* GFPN_ID req */
4498 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
4499 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
4500 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
4501
4502
4503 /* req & rsp use the same buffer */
4504 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4505 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4506 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4507 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4508 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
4509 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
4510 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4511
4512 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4513 sp->done = qla2x00_async_gfpnid_sp_done;
4514
4515 rval = qla2x00_start_sp(sp);
4516 if (rval != QLA_SUCCESS)
4517 goto done_free_sp;
4518
4519 ql_dbg(ql_dbg_disc, vha, 0xffff,
4520 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4521 sp->name, fcport->port_name,
4522 sp->handle, fcport->loop_id, fcport->d_id.b24);
4523 return rval;
4524
4525done_free_sp:
4526 sp->free(sp);
4527 fcport->flags &= ~FCF_ASYNC_SENT;
4528done:
4529 return rval;
4530}
4531
4532int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4533{
4534 struct qla_work_evt *e;
4535 int ls;
4536
4537 ls = atomic_read(&vha->loop_state);
4538 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4539 test_bit(UNLOADING, &vha->dpc_flags))
4540 return 0;
4541
4542 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
4543 if (!e)
4544 return QLA_FUNCTION_FAILED;
4545
4546 e->u.fcport.fcport = fcport;
4547 return qla2x00_post_work(vha, e);
4548}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1bafa043f9f1..aececf664654 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -41,6 +41,7 @@ static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
41 struct event_arg *); 41 struct event_arg *);
42static void qla24xx_handle_prli_done_event(struct scsi_qla_host *, 42static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
43 struct event_arg *); 43 struct event_arg *);
44static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
44 45
45/* SRB Extensions ---------------------------------------------------------- */ 46/* SRB Extensions ---------------------------------------------------------- */
46 47
@@ -58,7 +59,8 @@ qla2x00_sp_timeout(struct timer_list *t)
58 req->outstanding_cmds[sp->handle] = NULL; 59 req->outstanding_cmds[sp->handle] = NULL;
59 iocb = &sp->u.iocb_cmd; 60 iocb = &sp->u.iocb_cmd;
60 iocb->timeout(sp); 61 iocb->timeout(sp);
61 sp->free(sp); 62 if (sp->type != SRB_ELS_DCMD)
63 sp->free(sp);
62 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 64 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
63} 65}
64 66
@@ -102,14 +104,21 @@ qla2x00_async_iocb_timeout(void *data)
102 struct srb_iocb *lio = &sp->u.iocb_cmd; 104 struct srb_iocb *lio = &sp->u.iocb_cmd;
103 struct event_arg ea; 105 struct event_arg ea;
104 106
105 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, 107 if (fcport) {
106 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", 108 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
107 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); 109 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
110 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
108 111
109 fcport->flags &= ~FCF_ASYNC_SENT; 112 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
113 } else {
114 pr_info("Async-%s timeout - hdl=%x.\n",
115 sp->name, sp->handle);
116 }
110 117
111 switch (sp->type) { 118 switch (sp->type) {
112 case SRB_LOGIN_CMD: 119 case SRB_LOGIN_CMD:
120 if (!fcport)
121 break;
113 /* Retry as needed. */ 122 /* Retry as needed. */
114 lio->u.logio.data[0] = MBS_COMMAND_ERROR; 123 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
115 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 124 lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
@@ -123,6 +132,8 @@ qla2x00_async_iocb_timeout(void *data)
123 qla24xx_handle_plogi_done_event(fcport->vha, &ea); 132 qla24xx_handle_plogi_done_event(fcport->vha, &ea);
124 break; 133 break;
125 case SRB_LOGOUT_CMD: 134 case SRB_LOGOUT_CMD:
135 if (!fcport)
136 break;
126 qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT); 137 qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
127 break; 138 break;
128 case SRB_CT_PTHRU_CMD: 139 case SRB_CT_PTHRU_CMD:
@@ -130,6 +141,7 @@ qla2x00_async_iocb_timeout(void *data)
130 case SRB_NACK_PLOGI: 141 case SRB_NACK_PLOGI:
131 case SRB_NACK_PRLI: 142 case SRB_NACK_PRLI:
132 case SRB_NACK_LOGO: 143 case SRB_NACK_LOGO:
144 case SRB_CTRL_VP:
133 sp->done(sp, QLA_FUNCTION_TIMEOUT); 145 sp->done(sp, QLA_FUNCTION_TIMEOUT);
134 break; 146 break;
135 } 147 }
@@ -146,7 +158,8 @@ qla2x00_async_login_sp_done(void *ptr, int res)
146 ql_dbg(ql_dbg_disc, vha, 0x20dd, 158 ql_dbg(ql_dbg_disc, vha, 0x20dd,
147 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res); 159 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
148 160
149 sp->fcport->flags &= ~FCF_ASYNC_SENT; 161 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
162
150 if (!test_bit(UNLOADING, &vha->dpc_flags)) { 163 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
151 memset(&ea, 0, sizeof(ea)); 164 memset(&ea, 0, sizeof(ea));
152 ea.event = FCME_PLOGI_DONE; 165 ea.event = FCME_PLOGI_DONE;
@@ -173,11 +186,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
173 if (!vha->flags.online) 186 if (!vha->flags.online)
174 goto done; 187 goto done;
175 188
176 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
177 (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
178 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
179 goto done;
180
181 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 189 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
182 if (!sp) 190 if (!sp)
183 goto done; 191 goto done;
@@ -185,8 +193,11 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
185 fcport->flags |= FCF_ASYNC_SENT; 193 fcport->flags |= FCF_ASYNC_SENT;
186 fcport->logout_completed = 0; 194 fcport->logout_completed = 0;
187 195
196 fcport->disc_state = DSC_LOGIN_PEND;
188 sp->type = SRB_LOGIN_CMD; 197 sp->type = SRB_LOGIN_CMD;
189 sp->name = "login"; 198 sp->name = "login";
199 sp->gen1 = fcport->rscn_gen;
200 sp->gen2 = fcport->login_gen;
190 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 201 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
191 202
192 lio = &sp->u.iocb_cmd; 203 lio = &sp->u.iocb_cmd;
@@ -201,7 +212,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
201 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 212 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
202 rval = qla2x00_start_sp(sp); 213 rval = qla2x00_start_sp(sp);
203 if (rval != QLA_SUCCESS) { 214 if (rval != QLA_SUCCESS) {
204 fcport->flags &= ~FCF_ASYNC_SENT;
205 fcport->flags |= FCF_LOGIN_NEEDED; 215 fcport->flags |= FCF_LOGIN_NEEDED;
206 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 216 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
207 goto done_free_sp; 217 goto done_free_sp;
@@ -216,8 +226,8 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
216 226
217done_free_sp: 227done_free_sp:
218 sp->free(sp); 228 sp->free(sp);
219done:
220 fcport->flags &= ~FCF_ASYNC_SENT; 229 fcport->flags &= ~FCF_ASYNC_SENT;
230done:
221 return rval; 231 return rval;
222} 232}
223 233
@@ -227,7 +237,7 @@ qla2x00_async_logout_sp_done(void *ptr, int res)
227 srb_t *sp = ptr; 237 srb_t *sp = ptr;
228 struct srb_iocb *lio = &sp->u.iocb_cmd; 238 struct srb_iocb *lio = &sp->u.iocb_cmd;
229 239
230 sp->fcport->flags &= ~FCF_ASYNC_SENT; 240 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
231 if (!test_bit(UNLOADING, &sp->vha->dpc_flags)) 241 if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
232 qla2x00_post_async_logout_done_work(sp->vha, sp->fcport, 242 qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
233 lio->u.logio.data); 243 lio->u.logio.data);
@@ -239,9 +249,11 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
239{ 249{
240 srb_t *sp; 250 srb_t *sp;
241 struct srb_iocb *lio; 251 struct srb_iocb *lio;
242 int rval; 252 int rval = QLA_FUNCTION_FAILED;
253
254 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
255 return rval;
243 256
244 rval = QLA_FUNCTION_FAILED;
245 fcport->flags |= FCF_ASYNC_SENT; 257 fcport->flags |= FCF_ASYNC_SENT;
246 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 258 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
247 if (!sp) 259 if (!sp)
@@ -272,16 +284,126 @@ done:
272 return rval; 284 return rval;
273} 285}
274 286
287void
288qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
289 uint16_t *data)
290{
291 /* Don't re-login in target mode */
292 if (!fcport->tgt_session)
293 qla2x00_mark_device_lost(vha, fcport, 1, 0);
294 qlt_logo_completion_handler(fcport, data[0]);
295}
296
297static void
298qla2x00_async_prlo_sp_done(void *s, int res)
299{
300 srb_t *sp = (srb_t *)s;
301 struct srb_iocb *lio = &sp->u.iocb_cmd;
302 struct scsi_qla_host *vha = sp->vha;
303
304 if (!test_bit(UNLOADING, &vha->dpc_flags))
305 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
306 lio->u.logio.data);
307 sp->free(sp);
308}
309
310int
311qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
312{
313 srb_t *sp;
314 struct srb_iocb *lio;
315 int rval;
316
317 rval = QLA_FUNCTION_FAILED;
318 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
319 if (!sp)
320 goto done;
321
322 sp->type = SRB_PRLO_CMD;
323 sp->name = "prlo";
324 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
325
326 lio = &sp->u.iocb_cmd;
327 lio->timeout = qla2x00_async_iocb_timeout;
328 sp->done = qla2x00_async_prlo_sp_done;
329 rval = qla2x00_start_sp(sp);
330 if (rval != QLA_SUCCESS)
331 goto done_free_sp;
332
333 ql_dbg(ql_dbg_disc, vha, 0x2070,
334 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
335 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
336 fcport->d_id.b.area, fcport->d_id.b.al_pa);
337 return rval;
338
339done_free_sp:
340 sp->free(sp);
341done:
342 return rval;
343}
344
345static
346void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
347{
348 struct fc_port *fcport = ea->fcport;
349
350 ql_dbg(ql_dbg_disc, vha, 0x20d2,
351 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
352 __func__, fcport->port_name, fcport->disc_state,
353 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
354 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
355
356 if (ea->data[0] != MBS_COMMAND_COMPLETE) {
357 ql_dbg(ql_dbg_disc, vha, 0x2066,
358 "%s %8phC: adisc fail: post delete\n",
359 __func__, ea->fcport->port_name);
360 qlt_schedule_sess_for_deletion(ea->fcport);
361 return;
362 }
363
364 if (ea->fcport->disc_state == DSC_DELETE_PEND)
365 return;
366
367 if (ea->sp->gen2 != ea->fcport->login_gen) {
368 /* target side must have changed it. */
369 ql_dbg(ql_dbg_disc, vha, 0x20d3,
370 "%s %8phC generation changed\n",
371 __func__, ea->fcport->port_name);
372 return;
373 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
374 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
375 __func__, __LINE__, ea->fcport->port_name);
376 qla24xx_post_gidpn_work(vha, ea->fcport);
377 return;
378 }
379
380 __qla24xx_handle_gpdb_event(vha, ea);
381}
382
275static void 383static void
276qla2x00_async_adisc_sp_done(void *ptr, int res) 384qla2x00_async_adisc_sp_done(void *ptr, int res)
277{ 385{
278 srb_t *sp = ptr; 386 srb_t *sp = ptr;
279 struct scsi_qla_host *vha = sp->vha; 387 struct scsi_qla_host *vha = sp->vha;
388 struct event_arg ea;
280 struct srb_iocb *lio = &sp->u.iocb_cmd; 389 struct srb_iocb *lio = &sp->u.iocb_cmd;
281 390
282 if (!test_bit(UNLOADING, &vha->dpc_flags)) 391 ql_dbg(ql_dbg_disc, vha, 0x2066,
283 qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport, 392 "Async done-%s res %x %8phC\n",
284 lio->u.logio.data); 393 sp->name, res, sp->fcport->port_name);
394
395 memset(&ea, 0, sizeof(ea));
396 ea.event = FCME_ADISC_DONE;
397 ea.rc = res;
398 ea.data[0] = lio->u.logio.data[0];
399 ea.data[1] = lio->u.logio.data[1];
400 ea.iop[0] = lio->u.logio.iop[0];
401 ea.iop[1] = lio->u.logio.iop[1];
402 ea.fcport = sp->fcport;
403 ea.sp = sp;
404
405 qla2x00_fcport_event_handler(vha, &ea);
406
285 sp->free(sp); 407 sp->free(sp);
286} 408}
287 409
@@ -313,15 +435,15 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
313 goto done_free_sp; 435 goto done_free_sp;
314 436
315 ql_dbg(ql_dbg_disc, vha, 0x206f, 437 ql_dbg(ql_dbg_disc, vha, 0x206f,
316 "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n", 438 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
317 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 439 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
318 fcport->d_id.b.area, fcport->d_id.b.al_pa);
319 return rval; 440 return rval;
320 441
321done_free_sp: 442done_free_sp:
322 sp->free(sp); 443 sp->free(sp);
323done: 444done:
324 fcport->flags &= ~FCF_ASYNC_SENT; 445 fcport->flags &= ~FCF_ASYNC_SENT;
446 qla2x00_post_async_adisc_work(vha, fcport, data);
325 return rval; 447 return rval;
326} 448}
327 449
@@ -333,9 +455,19 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
333 u16 i, n, found = 0, loop_id; 455 u16 i, n, found = 0, loop_id;
334 port_id_t id; 456 port_id_t id;
335 u64 wwn; 457 u64 wwn;
336 u8 opt = 0, current_login_state; 458 u16 data[2];
459 u8 current_login_state;
337 460
338 fcport = ea->fcport; 461 fcport = ea->fcport;
462 ql_dbg(ql_dbg_disc, vha, 0xffff,
463 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
464 __func__, fcport->port_name, fcport->disc_state,
465 fcport->fw_login_state, ea->rc,
466 fcport->login_gen, fcport->last_login_gen,
467 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
468
469 if (fcport->disc_state == DSC_DELETE_PEND)
470 return;
339 471
340 if (ea->rc) { /* rval */ 472 if (ea->rc) { /* rval */
341 if (fcport->login_retry == 0) { 473 if (fcport->login_retry == 0) {
@@ -356,9 +488,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
356 return; 488 return;
357 } else if (fcport->last_login_gen != fcport->login_gen) { 489 } else if (fcport->last_login_gen != fcport->login_gen) {
358 ql_dbg(ql_dbg_disc, vha, 0x20e0, 490 ql_dbg(ql_dbg_disc, vha, 0x20e0,
359 "%s %8phC login gen changed login %d|%d\n", 491 "%s %8phC login gen changed\n",
360 __func__, fcport->port_name, 492 __func__, fcport->port_name);
361 fcport->last_login_gen, fcport->login_gen);
362 return; 493 return;
363 } 494 }
364 495
@@ -400,7 +531,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
400 ql_dbg(ql_dbg_disc, vha, 0x20e3, 531 ql_dbg(ql_dbg_disc, vha, 0x20e3,
401 "%s %d %8phC post del sess\n", 532 "%s %d %8phC post del sess\n",
402 __func__, __LINE__, fcport->port_name); 533 __func__, __LINE__, fcport->port_name);
403 qlt_schedule_sess_for_deletion(fcport, 1); 534 qlt_schedule_sess_for_deletion(fcport);
404 return; 535 return;
405 } 536 }
406 537
@@ -430,8 +561,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
430 ql_dbg(ql_dbg_disc, vha, 0x20e4, 561 ql_dbg(ql_dbg_disc, vha, 0x20e4,
431 "%s %d %8phC post gpdb\n", 562 "%s %d %8phC post gpdb\n",
432 __func__, __LINE__, fcport->port_name); 563 __func__, __LINE__, fcport->port_name);
433 opt = PDO_FORCE_ADISC; 564
434 qla24xx_post_gpdb_work(vha, fcport, opt); 565 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
566 fcport->port_type = FCT_INITIATOR;
567 else
568 fcport->port_type = FCT_TARGET;
569
570 data[0] = data[1] = 0;
571 qla2x00_post_async_adisc_work(vha, fcport, data);
435 break; 572 break;
436 case DSC_LS_PORT_UNAVAIL: 573 case DSC_LS_PORT_UNAVAIL:
437 default: 574 default:
@@ -449,36 +586,29 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
449 586
450 if (!found) { 587 if (!found) {
451 /* fw has no record of this port */ 588 /* fw has no record of this port */
452 if (fcport->loop_id == FC_NO_LOOP_ID) { 589 for (i = 0; i < n; i++) {
453 qla2x00_find_new_loop_id(vha, fcport); 590 e = &vha->gnl.l[i];
454 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 591 id.b.domain = e->port_id[0];
455 } else { 592 id.b.area = e->port_id[1];
456 for (i = 0; i < n; i++) { 593 id.b.al_pa = e->port_id[2];
457 e = &vha->gnl.l[i]; 594 id.b.rsvd_1 = 0;
458 id.b.domain = e->port_id[0]; 595 loop_id = le16_to_cpu(e->nport_handle);
459 id.b.area = e->port_id[1]; 596
460 id.b.al_pa = e->port_id[2]; 597 if (fcport->d_id.b24 == id.b24) {
461 id.b.rsvd_1 = 0; 598 conflict_fcport =
462 loop_id = le16_to_cpu(e->nport_handle); 599 qla2x00_find_fcport_by_wwpn(vha,
463 600 e->port_name, 0);
464 if (fcport->d_id.b24 == id.b24) { 601 ql_dbg(ql_dbg_disc, vha, 0x20e6,
465 conflict_fcport = 602 "%s %d %8phC post del sess\n",
466 qla2x00_find_fcport_by_wwpn(vha, 603 __func__, __LINE__,
467 e->port_name, 0); 604 conflict_fcport->port_name);
468 605 qlt_schedule_sess_for_deletion
469 ql_dbg(ql_dbg_disc, vha, 0x20e6, 606 (conflict_fcport);
470 "%s %d %8phC post del sess\n",
471 __func__, __LINE__,
472 conflict_fcport->port_name);
473 qlt_schedule_sess_for_deletion
474 (conflict_fcport, 1);
475 }
476
477 if (fcport->loop_id == loop_id) {
478 /* FW already picked this loop id for another fcport */
479 qla2x00_find_new_loop_id(vha, fcport);
480 }
481 } 607 }
608
609 /* FW already picked this loop id for another fcport */
610 if (fcport->loop_id == loop_id)
611 fcport->loop_id = FC_NO_LOOP_ID;
482 } 612 }
483 qla24xx_fcport_handle_login(vha, fcport); 613 qla24xx_fcport_handle_login(vha, fcport);
484 } 614 }
@@ -496,6 +626,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
496 struct get_name_list_extended *e; 626 struct get_name_list_extended *e;
497 u64 wwn; 627 u64 wwn;
498 struct list_head h; 628 struct list_head h;
629 bool found = false;
499 630
500 ql_dbg(ql_dbg_disc, vha, 0x20e7, 631 ql_dbg(ql_dbg_disc, vha, 0x20e7,
501 "Async done-%s res %x mb[1]=%x mb[2]=%x \n", 632 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
@@ -539,12 +670,44 @@ qla24xx_async_gnl_sp_done(void *s, int res)
539 670
540 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { 671 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
541 list_del_init(&fcport->gnl_entry); 672 list_del_init(&fcport->gnl_entry);
542 fcport->flags &= ~FCF_ASYNC_SENT; 673 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
543 ea.fcport = fcport; 674 ea.fcport = fcport;
544 675
545 qla2x00_fcport_event_handler(vha, &ea); 676 qla2x00_fcport_event_handler(vha, &ea);
546 } 677 }
547 678
679 /* create new fcport if fw has knowledge of new sessions */
680 for (i = 0; i < n; i++) {
681 port_id_t id;
682 u64 wwnn;
683
684 e = &vha->gnl.l[i];
685 wwn = wwn_to_u64(e->port_name);
686
687 found = false;
688 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
689 if (!memcmp((u8 *)&wwn, fcport->port_name,
690 WWN_SIZE)) {
691 found = true;
692 break;
693 }
694 }
695
696 id.b.domain = e->port_id[2];
697 id.b.area = e->port_id[1];
698 id.b.al_pa = e->port_id[0];
699 id.b.rsvd_1 = 0;
700
701 if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
702 ql_dbg(ql_dbg_disc, vha, 0x2065,
703 "%s %d %8phC %06x post new sess\n",
704 __func__, __LINE__, (u8 *)&wwn, id.b24);
705 wwnn = wwn_to_u64(e->node_name);
706 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
707 (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
708 }
709 }
710
548 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 711 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
549 712
550 sp->free(sp); 713 sp->free(sp);
@@ -558,14 +721,13 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
558 unsigned long flags; 721 unsigned long flags;
559 u16 *mb; 722 u16 *mb;
560 723
561 if (!vha->flags.online) 724 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
562 goto done; 725 return rval;
563 726
564 ql_dbg(ql_dbg_disc, vha, 0x20d9, 727 ql_dbg(ql_dbg_disc, vha, 0x20d9,
565 "Async-gnlist WWPN %8phC \n", fcport->port_name); 728 "Async-gnlist WWPN %8phC \n", fcport->port_name);
566 729
567 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 730 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
568 fcport->flags |= FCF_ASYNC_SENT;
569 fcport->disc_state = DSC_GNL; 731 fcport->disc_state = DSC_GNL;
570 fcport->last_rscn_gen = fcport->rscn_gen; 732 fcport->last_rscn_gen = fcport->rscn_gen;
571 fcport->last_login_gen = fcport->login_gen; 733 fcport->last_login_gen = fcport->login_gen;
@@ -573,8 +735,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
573 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); 735 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
574 if (vha->gnl.sent) { 736 if (vha->gnl.sent) {
575 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 737 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
576 rval = QLA_SUCCESS; 738 return QLA_SUCCESS;
577 goto done;
578 } 739 }
579 vha->gnl.sent = 1; 740 vha->gnl.sent = 1;
580 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 741 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
@@ -582,6 +743,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
582 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 743 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
583 if (!sp) 744 if (!sp)
584 goto done; 745 goto done;
746
747 fcport->flags |= FCF_ASYNC_SENT;
585 sp->type = SRB_MB_IOCB; 748 sp->type = SRB_MB_IOCB;
586 sp->name = "gnlist"; 749 sp->name = "gnlist";
587 sp->gen1 = fcport->rscn_gen; 750 sp->gen1 = fcport->rscn_gen;
@@ -616,8 +779,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
616 779
617done_free_sp: 780done_free_sp:
618 sp->free(sp); 781 sp->free(sp);
619done:
620 fcport->flags &= ~FCF_ASYNC_SENT; 782 fcport->flags &= ~FCF_ASYNC_SENT;
783done:
621 return rval; 784 return rval;
622} 785}
623 786
@@ -630,6 +793,7 @@ int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
630 return QLA_FUNCTION_FAILED; 793 return QLA_FUNCTION_FAILED;
631 794
632 e->u.fcport.fcport = fcport; 795 e->u.fcport.fcport = fcport;
796 fcport->flags |= FCF_ASYNC_ACTIVE;
633 return qla2x00_post_work(vha, e); 797 return qla2x00_post_work(vha, e);
634} 798}
635 799
@@ -639,31 +803,18 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
639 struct srb *sp = s; 803 struct srb *sp = s;
640 struct scsi_qla_host *vha = sp->vha; 804 struct scsi_qla_host *vha = sp->vha;
641 struct qla_hw_data *ha = vha->hw; 805 struct qla_hw_data *ha = vha->hw;
642 struct port_database_24xx *pd;
643 fc_port_t *fcport = sp->fcport; 806 fc_port_t *fcport = sp->fcport;
644 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; 807 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
645 int rval = QLA_SUCCESS;
646 struct event_arg ea; 808 struct event_arg ea;
647 809
648 ql_dbg(ql_dbg_disc, vha, 0x20db, 810 ql_dbg(ql_dbg_disc, vha, 0x20db,
649 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", 811 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
650 sp->name, res, fcport->port_name, mb[1], mb[2]); 812 sp->name, res, fcport->port_name, mb[1], mb[2]);
651 813
652 fcport->flags &= ~FCF_ASYNC_SENT; 814 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
653
654 if (res) {
655 rval = res;
656 goto gpd_error_out;
657 }
658
659 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
660
661 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
662 815
663gpd_error_out:
664 memset(&ea, 0, sizeof(ea)); 816 memset(&ea, 0, sizeof(ea));
665 ea.event = FCME_GPDB_DONE; 817 ea.event = FCME_GPDB_DONE;
666 ea.rc = rval;
667 ea.fcport = fcport; 818 ea.fcport = fcport;
668 ea.sp = sp; 819 ea.sp = sp;
669 820
@@ -754,7 +905,6 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
754 905
755 rval = qla2x00_start_sp(sp); 906 rval = qla2x00_start_sp(sp);
756 if (rval != QLA_SUCCESS) { 907 if (rval != QLA_SUCCESS) {
757 fcport->flags &= ~FCF_ASYNC_SENT;
758 fcport->flags |= FCF_LOGIN_NEEDED; 908 fcport->flags |= FCF_LOGIN_NEEDED;
759 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 909 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
760 goto done_free_sp; 910 goto done_free_sp;
@@ -783,6 +933,7 @@ int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
783 933
784 e->u.fcport.fcport = fcport; 934 e->u.fcport.fcport = fcport;
785 e->u.fcport.opt = opt; 935 e->u.fcport.opt = opt;
936 fcport->flags |= FCF_ASYNC_ACTIVE;
786 return qla2x00_post_work(vha, e); 937 return qla2x00_post_work(vha, e);
787} 938}
788 939
@@ -796,16 +947,16 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
796 struct port_database_24xx *pd; 947 struct port_database_24xx *pd;
797 struct qla_hw_data *ha = vha->hw; 948 struct qla_hw_data *ha = vha->hw;
798 949
799 if (!vha->flags.online) 950 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
800 goto done; 951 return rval;
801 952
802 fcport->flags |= FCF_ASYNC_SENT;
803 fcport->disc_state = DSC_GPDB; 953 fcport->disc_state = DSC_GPDB;
804 954
805 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 955 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
806 if (!sp) 956 if (!sp)
807 goto done; 957 goto done;
808 958
959 fcport->flags |= FCF_ASYNC_SENT;
809 sp->type = SRB_MB_IOCB; 960 sp->type = SRB_MB_IOCB;
810 sp->name = "gpdb"; 961 sp->name = "gpdb";
811 sp->gen1 = fcport->rscn_gen; 962 sp->gen1 = fcport->rscn_gen;
@@ -851,47 +1002,17 @@ done_free_sp:
851 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 1002 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
852 1003
853 sp->free(sp); 1004 sp->free(sp);
854done:
855 fcport->flags &= ~FCF_ASYNC_SENT; 1005 fcport->flags &= ~FCF_ASYNC_SENT;
1006done:
856 qla24xx_post_gpdb_work(vha, fcport, opt); 1007 qla24xx_post_gpdb_work(vha, fcport, opt);
857 return rval; 1008 return rval;
858} 1009}
859 1010
860static 1011static
861void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) 1012void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
862{ 1013{
863 int rval = ea->rc;
864 fc_port_t *fcport = ea->fcport;
865 unsigned long flags; 1014 unsigned long flags;
866 1015
867 fcport->flags &= ~FCF_ASYNC_SENT;
868
869 ql_dbg(ql_dbg_disc, vha, 0x20d2,
870 "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
871 fcport->disc_state, fcport->fw_login_state, rval);
872
873 if (ea->sp->gen2 != fcport->login_gen) {
874 /* target side must have changed it. */
875 ql_dbg(ql_dbg_disc, vha, 0x20d3,
876 "%s %8phC generation changed rscn %d|%d login %d|%d \n",
877 __func__, fcport->port_name, fcport->last_rscn_gen,
878 fcport->rscn_gen, fcport->last_login_gen,
879 fcport->login_gen);
880 return;
881 } else if (ea->sp->gen1 != fcport->rscn_gen) {
882 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
883 __func__, __LINE__, fcport->port_name);
884 qla24xx_post_gidpn_work(vha, fcport);
885 return;
886 }
887
888 if (rval != QLA_SUCCESS) {
889 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
890 __func__, __LINE__, fcport->port_name);
891 qlt_schedule_sess_for_deletion_lock(fcport);
892 return;
893 }
894
895 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1016 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
896 ea->fcport->login_gen++; 1017 ea->fcport->login_gen++;
897 ea->fcport->deleted = 0; 1018 ea->fcport->deleted = 0;
@@ -905,47 +1026,157 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
905 !vha->hw->flags.gpsc_supported) { 1026 !vha->hw->flags.gpsc_supported) {
906 ql_dbg(ql_dbg_disc, vha, 0x20d6, 1027 ql_dbg(ql_dbg_disc, vha, 0x20d6,
907 "%s %d %8phC post upd_fcport fcp_cnt %d\n", 1028 "%s %d %8phC post upd_fcport fcp_cnt %d\n",
908 __func__, __LINE__, fcport->port_name, 1029 __func__, __LINE__, ea->fcport->port_name,
909 vha->fcport_count); 1030 vha->fcport_count);
910 1031
911 qla24xx_post_upd_fcport_work(vha, fcport); 1032 qla24xx_post_upd_fcport_work(vha, ea->fcport);
912 } else { 1033 } else {
913 ql_dbg(ql_dbg_disc, vha, 0x20d7, 1034 if (ea->fcport->id_changed) {
914 "%s %d %8phC post gpsc fcp_cnt %d\n", 1035 ea->fcport->id_changed = 0;
915 __func__, __LINE__, fcport->port_name, 1036 ql_dbg(ql_dbg_disc, vha, 0x20d7,
916 vha->fcport_count); 1037 "%s %d %8phC post gfpnid fcp_cnt %d\n",
917 1038 __func__, __LINE__, ea->fcport->port_name,
918 qla24xx_post_gpsc_work(vha, fcport); 1039 vha->fcport_count);
1040 qla24xx_post_gfpnid_work(vha, ea->fcport);
1041 } else {
1042 ql_dbg(ql_dbg_disc, vha, 0x20d7,
1043 "%s %d %8phC post gpsc fcp_cnt %d\n",
1044 __func__, __LINE__, ea->fcport->port_name,
1045 vha->fcport_count);
1046 qla24xx_post_gpsc_work(vha, ea->fcport);
1047 }
919 } 1048 }
1049 } else if (ea->fcport->login_succ) {
1050 /*
1051 * We have an existing session. A late RSCN delivery
1052 * must have triggered the session to be re-validate.
1053 * Session is still valid.
1054 */
1055 ql_dbg(ql_dbg_disc, vha, 0x20d6,
1056 "%s %d %8phC session revalidate success\n",
1057 __func__, __LINE__, ea->fcport->port_name);
1058 ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
920 } 1059 }
921 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1060 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1061}
1062
1063static
1064void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1065{
1066 fc_port_t *fcport = ea->fcport;
1067 struct port_database_24xx *pd;
1068 struct srb *sp = ea->sp;
1069
1070 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
1071
1072 fcport->flags &= ~FCF_ASYNC_SENT;
1073
1074 ql_dbg(ql_dbg_disc, vha, 0x20d2,
1075 "%s %8phC DS %d LS %d rc %d\n", __func__, fcport->port_name,
1076 fcport->disc_state, pd->current_login_state, ea->rc);
1077
1078 if (fcport->disc_state == DSC_DELETE_PEND)
1079 return;
1080
1081 switch (pd->current_login_state) {
1082 case PDS_PRLI_COMPLETE:
1083 __qla24xx_parse_gpdb(vha, fcport, pd);
1084 break;
1085 case PDS_PLOGI_PENDING:
1086 case PDS_PLOGI_COMPLETE:
1087 case PDS_PRLI_PENDING:
1088 case PDS_PRLI2_PENDING:
1089 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC relogin needed\n",
1090 __func__, __LINE__, fcport->port_name);
1091 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1092 return;
1093 case PDS_LOGO_PENDING:
1094 case PDS_PORT_UNAVAILABLE:
1095 default:
1096 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
1097 __func__, __LINE__, fcport->port_name);
1098 qlt_schedule_sess_for_deletion(fcport);
1099 return;
1100 }
1101 __qla24xx_handle_gpdb_event(vha, ea);
922} /* gpdb event */ 1102} /* gpdb event */
923 1103
924int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) 1104static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
925{ 1105{
926 if (fcport->login_retry == 0) 1106 u8 login = 0;
927 return 0; 1107 int rc;
928 1108
929 if (fcport->scan_state != QLA_FCPORT_FOUND) 1109 if (qla_tgt_mode_enabled(vha))
930 return 0; 1110 return;
1111
1112 if (qla_dual_mode_enabled(vha)) {
1113 if (N2N_TOPO(vha->hw)) {
1114 u64 mywwn, wwn;
1115
1116 mywwn = wwn_to_u64(vha->port_name);
1117 wwn = wwn_to_u64(fcport->port_name);
1118 if (mywwn > wwn)
1119 login = 1;
1120 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1121 && time_after_eq(jiffies,
1122 fcport->plogi_nack_done_deadline))
1123 login = 1;
1124 } else {
1125 login = 1;
1126 }
1127 } else {
1128 /* initiator mode */
1129 login = 1;
1130 }
1131
1132 if (login) {
1133 if (fcport->loop_id == FC_NO_LOOP_ID) {
1134 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
1135 rc = qla2x00_find_new_loop_id(vha, fcport);
1136 if (rc) {
1137 ql_dbg(ql_dbg_disc, vha, 0x20e6,
1138 "%s %d %8phC post del sess - out of loopid\n",
1139 __func__, __LINE__, fcport->port_name);
1140 fcport->scan_state = 0;
1141 qlt_schedule_sess_for_deletion(fcport);
1142 return;
1143 }
1144 }
1145 ql_dbg(ql_dbg_disc, vha, 0x20bf,
1146 "%s %d %8phC post login\n",
1147 __func__, __LINE__, fcport->port_name);
1148 qla2x00_post_async_login_work(vha, fcport, NULL);
1149 }
1150}
1151
1152int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1153{
1154 u16 data[2];
1155 u64 wwn;
931 1156
932 ql_dbg(ql_dbg_disc, vha, 0x20d8, 1157 ql_dbg(ql_dbg_disc, vha, 0x20d8,
933 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n", 1158 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n",
934 __func__, fcport->port_name, fcport->disc_state, 1159 __func__, fcport->port_name, fcport->disc_state,
935 fcport->fw_login_state, fcport->login_pause, fcport->flags, 1160 fcport->fw_login_state, fcport->login_pause, fcport->flags,
936 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, 1161 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
937 fcport->last_login_gen, fcport->login_gen, fcport->login_retry, 1162 fcport->login_gen, fcport->login_retry,
938 fcport->loop_id); 1163 fcport->loop_id, fcport->scan_state);
939 1164
940 fcport->login_retry--; 1165 if (fcport->login_retry == 0)
1166 return 0;
1167
1168 if (fcport->scan_state != QLA_FCPORT_FOUND)
1169 return 0;
941 1170
942 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1171 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
943 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) 1172 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
944 return 0; 1173 return 0;
945 1174
946 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { 1175 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
947 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) 1176 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1177 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
948 return 0; 1178 return 0;
1179 }
949 } 1180 }
950 1181
951 /* for pure Target Mode. Login will not be initiated */ 1182 /* for pure Target Mode. Login will not be initiated */
@@ -957,19 +1188,23 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
957 return 0; 1188 return 0;
958 } 1189 }
959 1190
1191 fcport->login_retry--;
1192
960 switch (fcport->disc_state) { 1193 switch (fcport->disc_state) {
961 case DSC_DELETED: 1194 case DSC_DELETED:
962 if (fcport->loop_id == FC_NO_LOOP_ID) { 1195 wwn = wwn_to_u64(fcport->node_name);
1196 if (wwn == 0) {
1197 ql_dbg(ql_dbg_disc, vha, 0xffff,
1198 "%s %d %8phC post GNNID\n",
1199 __func__, __LINE__, fcport->port_name);
1200 qla24xx_post_gnnid_work(vha, fcport);
1201 } else if (fcport->loop_id == FC_NO_LOOP_ID) {
963 ql_dbg(ql_dbg_disc, vha, 0x20bd, 1202 ql_dbg(ql_dbg_disc, vha, 0x20bd,
964 "%s %d %8phC post gnl\n", 1203 "%s %d %8phC post gnl\n",
965 __func__, __LINE__, fcport->port_name); 1204 __func__, __LINE__, fcport->port_name);
966 qla24xx_async_gnl(vha, fcport); 1205 qla24xx_post_gnl_work(vha, fcport);
967 } else { 1206 } else {
968 ql_dbg(ql_dbg_disc, vha, 0x20bf, 1207 qla_chk_n2n_b4_login(vha, fcport);
969 "%s %d %8phC post login\n",
970 __func__, __LINE__, fcport->port_name);
971 fcport->disc_state = DSC_LOGIN_PEND;
972 qla2x00_post_async_login_work(vha, fcport, NULL);
973 } 1208 }
974 break; 1209 break;
975 1210
@@ -981,40 +1216,26 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
981 break; 1216 break;
982 } 1217 }
983 1218
984 if (fcport->flags & FCF_FCP2_DEVICE) { 1219 qla_chk_n2n_b4_login(vha, fcport);
985 u8 opt = PDO_FORCE_ADISC;
986
987 ql_dbg(ql_dbg_disc, vha, 0x20c9,
988 "%s %d %8phC post gpdb\n",
989 __func__, __LINE__, fcport->port_name);
990
991 fcport->disc_state = DSC_GPDB;
992 qla24xx_post_gpdb_work(vha, fcport, opt);
993 } else {
994 ql_dbg(ql_dbg_disc, vha, 0x20cf,
995 "%s %d %8phC post login\n",
996 __func__, __LINE__, fcport->port_name);
997 fcport->disc_state = DSC_LOGIN_PEND;
998 qla2x00_post_async_login_work(vha, fcport, NULL);
999 }
1000
1001 break; 1220 break;
1002 1221
1003 case DSC_LOGIN_FAILED: 1222 case DSC_LOGIN_FAILED:
1004 ql_dbg(ql_dbg_disc, vha, 0x20d0, 1223 ql_dbg(ql_dbg_disc, vha, 0x20d0,
1005 "%s %d %8phC post gidpn\n", 1224 "%s %d %8phC post gidpn\n",
1006 __func__, __LINE__, fcport->port_name); 1225 __func__, __LINE__, fcport->port_name);
1007 1226 if (N2N_TOPO(vha->hw))
1008 qla24xx_post_gidpn_work(vha, fcport); 1227 qla_chk_n2n_b4_login(vha, fcport);
1228 else
1229 qla24xx_post_gidpn_work(vha, fcport);
1009 break; 1230 break;
1010 1231
1011 case DSC_LOGIN_COMPLETE: 1232 case DSC_LOGIN_COMPLETE:
1012 /* recheck login state */ 1233 /* recheck login state */
1013 ql_dbg(ql_dbg_disc, vha, 0x20d1, 1234 ql_dbg(ql_dbg_disc, vha, 0x20d1,
1014 "%s %d %8phC post gpdb\n", 1235 "%s %d %8phC post adisc\n",
1015 __func__, __LINE__, fcport->port_name); 1236 __func__, __LINE__, fcport->port_name);
1016 1237 data[0] = data[1] = 0;
1017 qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC); 1238 qla2x00_post_async_adisc_work(vha, fcport, data);
1018 break; 1239 break;
1019 1240
1020 default: 1241 default:
@@ -1040,16 +1261,15 @@ void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
1040 switch (fcport->disc_state) { 1261 switch (fcport->disc_state) {
1041 case DSC_DELETED: 1262 case DSC_DELETED:
1042 case DSC_LOGIN_COMPLETE: 1263 case DSC_LOGIN_COMPLETE:
1043 qla24xx_post_gidpn_work(fcport->vha, fcport); 1264 qla24xx_post_gpnid_work(fcport->vha, &ea->id);
1044 break; 1265 break;
1045
1046 default: 1266 default:
1047 break; 1267 break;
1048 } 1268 }
1049} 1269}
1050 1270
1051int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, 1271int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1052 u8 *port_name, void *pla) 1272 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
1053{ 1273{
1054 struct qla_work_evt *e; 1274 struct qla_work_evt *e;
1055 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS); 1275 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
@@ -1058,47 +1278,20 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1058 1278
1059 e->u.new_sess.id = *id; 1279 e->u.new_sess.id = *id;
1060 e->u.new_sess.pla = pla; 1280 e->u.new_sess.pla = pla;
1281 e->u.new_sess.fc4_type = fc4_type;
1061 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE); 1282 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
1283 if (node_name)
1284 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
1062 1285
1063 return qla2x00_post_work(vha, e); 1286 return qla2x00_post_work(vha, e);
1064} 1287}
1065 1288
1066static 1289static
1067int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha,
1068 struct event_arg *ea)
1069{
1070 fc_port_t *fcport = ea->fcport;
1071
1072 if (test_bit(UNLOADING, &vha->dpc_flags))
1073 return 0;
1074
1075 switch (vha->host->active_mode) {
1076 case MODE_INITIATOR:
1077 case MODE_DUAL:
1078 if (fcport->scan_state == QLA_FCPORT_FOUND)
1079 qla24xx_fcport_handle_login(vha, fcport);
1080 break;
1081
1082 case MODE_TARGET:
1083 default:
1084 /* no-op */
1085 break;
1086 }
1087
1088 return 0;
1089}
1090
1091static
1092void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, 1290void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1093 struct event_arg *ea) 1291 struct event_arg *ea)
1094{ 1292{
1095 fc_port_t *fcport = ea->fcport; 1293 fc_port_t *fcport = ea->fcport;
1096 1294
1097 if (fcport->scan_state != QLA_FCPORT_FOUND) {
1098 fcport->login_retry++;
1099 return;
1100 }
1101
1102 ql_dbg(ql_dbg_disc, vha, 0x2102, 1295 ql_dbg(ql_dbg_disc, vha, 0x2102,
1103 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", 1296 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1104 __func__, fcport->port_name, fcport->disc_state, 1297 __func__, fcport->port_name, fcport->disc_state,
@@ -1113,8 +1306,10 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1113 return; 1306 return;
1114 1307
1115 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { 1308 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
1116 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) 1309 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1310 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1117 return; 1311 return;
1312 }
1118 } 1313 }
1119 1314
1120 if (fcport->flags & FCF_ASYNC_SENT) { 1315 if (fcport->flags & FCF_ASYNC_SENT) {
@@ -1132,7 +1327,7 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1132 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n", 1327 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
1133 __func__, __LINE__, fcport->port_name); 1328 __func__, __LINE__, fcport->port_name);
1134 1329
1135 qla24xx_async_gidpn(vha, fcport); 1330 qla24xx_post_gidpn_work(vha, fcport);
1136 return; 1331 return;
1137 } 1332 }
1138 1333
@@ -1141,16 +1336,16 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1141 1336
1142void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) 1337void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1143{ 1338{
1144 fc_port_t *fcport, *f, *tf; 1339 fc_port_t *f, *tf;
1145 uint32_t id = 0, mask, rid; 1340 uint32_t id = 0, mask, rid;
1146 int rc; 1341 unsigned long flags;
1147 1342
1148 switch (ea->event) { 1343 switch (ea->event) {
1149 case FCME_RELOGIN:
1150 case FCME_RSCN: 1344 case FCME_RSCN:
1151 case FCME_GIDPN_DONE: 1345 case FCME_GIDPN_DONE:
1152 case FCME_GPSC_DONE: 1346 case FCME_GPSC_DONE:
1153 case FCME_GPNID_DONE: 1347 case FCME_GPNID_DONE:
1348 case FCME_GNNID_DONE:
1154 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || 1349 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
1155 test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags)) 1350 test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
1156 return; 1351 return;
@@ -1171,20 +1366,15 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1171 return; 1366 return;
1172 switch (ea->id.b.rsvd_1) { 1367 switch (ea->id.b.rsvd_1) {
1173 case RSCN_PORT_ADDR: 1368 case RSCN_PORT_ADDR:
1174 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); 1369 spin_lock_irqsave(&vha->work_lock, flags);
1175 if (!fcport) { 1370 if (vha->scan.scan_flags == 0) {
1176 /* cable moved */ 1371 ql_dbg(ql_dbg_disc, vha, 0xffff,
1177 rc = qla24xx_post_gpnid_work(vha, &ea->id); 1372 "%s: schedule\n", __func__);
1178 if (rc) { 1373 vha->scan.scan_flags |= SF_QUEUED;
1179 ql_log(ql_log_warn, vha, 0xd044, 1374 schedule_delayed_work(&vha->scan.scan_work, 5);
1180 "RSCN GPNID work failed %02x%02x%02x\n",
1181 ea->id.b.domain, ea->id.b.area,
1182 ea->id.b.al_pa);
1183 }
1184 } else {
1185 ea->fcport = fcport;
1186 qla24xx_handle_rscn_event(fcport, ea);
1187 } 1375 }
1376 spin_unlock_irqrestore(&vha->work_lock, flags);
1377
1188 break; 1378 break;
1189 case RSCN_AREA_ADDR: 1379 case RSCN_AREA_ADDR:
1190 case RSCN_DOM_ADDR: 1380 case RSCN_DOM_ADDR:
@@ -1227,7 +1417,7 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1227 qla24xx_handle_gnl_done_event(vha, ea); 1417 qla24xx_handle_gnl_done_event(vha, ea);
1228 break; 1418 break;
1229 case FCME_GPSC_DONE: 1419 case FCME_GPSC_DONE:
1230 qla24xx_post_upd_fcport_work(vha, ea->fcport); 1420 qla24xx_handle_gpsc_event(vha, ea);
1231 break; 1421 break;
1232 case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */ 1422 case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
1233 qla24xx_handle_plogi_done_event(vha, ea); 1423 qla24xx_handle_plogi_done_event(vha, ea);
@@ -1244,8 +1434,14 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1244 case FCME_GFFID_DONE: 1434 case FCME_GFFID_DONE:
1245 qla24xx_handle_gffid_event(vha, ea); 1435 qla24xx_handle_gffid_event(vha, ea);
1246 break; 1436 break;
1247 case FCME_DELETE_DONE: 1437 case FCME_ADISC_DONE:
1248 qla24xx_handle_delete_done_event(vha, ea); 1438 qla24xx_handle_adisc_event(vha, ea);
1439 break;
1440 case FCME_GNNID_DONE:
1441 qla24xx_handle_gnnid_event(vha, ea);
1442 break;
1443 case FCME_GFPNID_DONE:
1444 qla24xx_handle_gfpnid_event(vha, ea);
1249 break; 1445 break;
1250 default: 1446 default:
1251 BUG_ON(1); 1447 BUG_ON(1);
@@ -1327,6 +1523,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1327 1523
1328done_free_sp: 1524done_free_sp:
1329 sp->free(sp); 1525 sp->free(sp);
1526 sp->fcport->flags &= ~FCF_ASYNC_SENT;
1330done: 1527done:
1331 return rval; 1528 return rval;
1332} 1529}
@@ -1368,6 +1565,13 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
1368 sp->name = "abort"; 1565 sp->name = "abort";
1369 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); 1566 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1370 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; 1567 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
1568
1569 if (vha->flags.qpairs_available && cmd_sp->qpair)
1570 abt_iocb->u.abt.req_que_no =
1571 cpu_to_le16(cmd_sp->qpair->req->id);
1572 else
1573 abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id);
1574
1371 sp->done = qla24xx_abort_sp_done; 1575 sp->done = qla24xx_abort_sp_done;
1372 abt_iocb->timeout = qla24xx_abort_iocb_timeout; 1576 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
1373 init_completion(&abt_iocb->u.abt.comp); 1577 init_completion(&abt_iocb->u.abt.comp);
@@ -1402,6 +1606,9 @@ qla24xx_async_abort_command(srb_t *sp)
1402 struct qla_hw_data *ha = vha->hw; 1606 struct qla_hw_data *ha = vha->hw;
1403 struct req_que *req = vha->req; 1607 struct req_que *req = vha->req;
1404 1608
1609 if (vha->flags.qpairs_available && sp->qpair)
1610 req = sp->qpair->req;
1611
1405 spin_lock_irqsave(&ha->hardware_lock, flags); 1612 spin_lock_irqsave(&ha->hardware_lock, flags);
1406 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1613 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1407 if (req->outstanding_cmds[handle] == sp) 1614 if (req->outstanding_cmds[handle] == sp)
@@ -1452,6 +1659,42 @@ static void
1452qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) 1659qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1453{ 1660{
1454 port_id_t cid; /* conflict Nport id */ 1661 port_id_t cid; /* conflict Nport id */
1662 u16 lid;
1663 struct fc_port *conflict_fcport;
1664 unsigned long flags;
1665 struct fc_port *fcport = ea->fcport;
1666
1667 ql_dbg(ql_dbg_disc, vha, 0xffff,
1668 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
1669 __func__, fcport->port_name, fcport->disc_state,
1670 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
1671 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1,
1672 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
1673
1674 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1675 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
1676 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1677 "%s %d %8phC Remote is trying to login\n",
1678 __func__, __LINE__, fcport->port_name);
1679 return;
1680 }
1681
1682 if (fcport->disc_state == DSC_DELETE_PEND)
1683 return;
1684
1685 if (ea->sp->gen2 != fcport->login_gen) {
1686 /* target side must have changed it. */
1687 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1688 "%s %8phC generation changed\n",
1689 __func__, fcport->port_name);
1690 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1691 return;
1692 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1693 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
1694 __func__, __LINE__, fcport->port_name);
1695 qla24xx_post_gidpn_work(vha, fcport);
1696 return;
1697 }
1455 1698
1456 switch (ea->data[0]) { 1699 switch (ea->data[0]) {
1457 case MBS_COMMAND_COMPLETE: 1700 case MBS_COMMAND_COMPLETE:
@@ -1467,11 +1710,19 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1467 qla24xx_post_prli_work(vha, ea->fcport); 1710 qla24xx_post_prli_work(vha, ea->fcport);
1468 } else { 1711 } else {
1469 ql_dbg(ql_dbg_disc, vha, 0x20ea, 1712 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1470 "%s %d %8phC post gpdb\n", 1713 "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
1471 __func__, __LINE__, ea->fcport->port_name); 1714 __func__, __LINE__, ea->fcport->port_name,
1715 ea->fcport->loop_id, ea->fcport->d_id.b24);
1716
1717 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1718 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1719 ea->fcport->loop_id = FC_NO_LOOP_ID;
1472 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 1720 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1473 ea->fcport->logout_on_delete = 1; 1721 ea->fcport->logout_on_delete = 1;
1474 ea->fcport->send_els_logo = 0; 1722 ea->fcport->send_els_logo = 0;
1723 ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
1724 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1725
1475 qla24xx_post_gpdb_work(vha, ea->fcport, 0); 1726 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1476 } 1727 }
1477 break; 1728 break;
@@ -1513,8 +1764,38 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1513 ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area, 1764 ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
1514 ea->fcport->d_id.b.al_pa); 1765 ea->fcport->d_id.b.al_pa);
1515 1766
1516 qla2x00_clear_loop_id(ea->fcport); 1767 lid = ea->iop[1] & 0xffff;
1517 qla24xx_post_gidpn_work(vha, ea->fcport); 1768 qlt_find_sess_invalidate_other(vha,
1769 wwn_to_u64(ea->fcport->port_name),
1770 ea->fcport->d_id, lid, &conflict_fcport);
1771
1772 if (conflict_fcport) {
1773 /*
1774 * Another fcport share the same loop_id/nport id.
1775 * Conflict fcport needs to finish cleanup before this
1776 * fcport can proceed to login.
1777 */
1778 conflict_fcport->conflict = ea->fcport;
1779 ea->fcport->login_pause = 1;
1780
1781 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1782 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
1783 __func__, __LINE__, ea->fcport->port_name,
1784 ea->fcport->d_id.b24, lid);
1785 qla2x00_clear_loop_id(ea->fcport);
1786 qla24xx_post_gidpn_work(vha, ea->fcport);
1787 } else {
1788 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1789 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
1790 __func__, __LINE__, ea->fcport->port_name,
1791 ea->fcport->d_id.b24, lid);
1792
1793 qla2x00_clear_loop_id(ea->fcport);
1794 set_bit(lid, vha->hw->loop_id_map);
1795 ea->fcport->loop_id = lid;
1796 ea->fcport->keep_nport_handle = 0;
1797 qlt_schedule_sess_for_deletion(ea->fcport);
1798 }
1518 break; 1799 break;
1519 } 1800 }
1520 return; 1801 return;
@@ -2540,70 +2821,27 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
2540 return rval; 2821 return rval;
2541} 2822}
2542 2823
2543void 2824static void
2544qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) 2825qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
2545{ 2826{
2546 int rval; 2827 int rval;
2547 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
2548 eft_size, fce_size, mq_size;
2549 dma_addr_t tc_dma; 2828 dma_addr_t tc_dma;
2550 void *tc; 2829 void *tc;
2551 struct qla_hw_data *ha = vha->hw; 2830 struct qla_hw_data *ha = vha->hw;
2552 struct req_que *req = ha->req_q_map[0];
2553 struct rsp_que *rsp = ha->rsp_q_map[0];
2554 2831
2555 if (ha->fw_dump) { 2832 if (ha->eft) {
2556 ql_dbg(ql_dbg_init, vha, 0x00bd, 2833 ql_dbg(ql_dbg_init, vha, 0x00bd,
2557 "Firmware dump already allocated.\n"); 2834 "%s: Offload Mem is already allocated.\n",
2835 __func__);
2558 return; 2836 return;
2559 } 2837 }
2560 2838
2561 ha->fw_dumped = 0; 2839 if (IS_FWI2_CAPABLE(ha)) {
2562 ha->fw_dump_cap_flags = 0;
2563 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
2564 req_q_size = rsp_q_size = 0;
2565
2566 if (IS_QLA27XX(ha))
2567 goto try_fce;
2568
2569 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2570 fixed_size = sizeof(struct qla2100_fw_dump);
2571 } else if (IS_QLA23XX(ha)) {
2572 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
2573 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
2574 sizeof(uint16_t);
2575 } else if (IS_FWI2_CAPABLE(ha)) {
2576 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
2577 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
2578 else if (IS_QLA81XX(ha))
2579 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
2580 else if (IS_QLA25XX(ha))
2581 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
2582 else
2583 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
2584
2585 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
2586 sizeof(uint32_t);
2587 if (ha->mqenable) {
2588 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
2589 mq_size = sizeof(struct qla2xxx_mq_chain);
2590 /*
2591 * Allocate maximum buffer size for all queues.
2592 * Resizing must be done at end-of-dump processing.
2593 */
2594 mq_size += ha->max_req_queues *
2595 (req->length * sizeof(request_t));
2596 mq_size += ha->max_rsp_queues *
2597 (rsp->length * sizeof(response_t));
2598 }
2599 if (ha->tgt.atio_ring)
2600 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
2601 /* Allocate memory for Fibre Channel Event Buffer. */ 2840 /* Allocate memory for Fibre Channel Event Buffer. */
2602 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 2841 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2603 !IS_QLA27XX(ha)) 2842 !IS_QLA27XX(ha))
2604 goto try_eft; 2843 goto try_eft;
2605 2844
2606try_fce:
2607 if (ha->fce) 2845 if (ha->fce)
2608 dma_free_coherent(&ha->pdev->dev, 2846 dma_free_coherent(&ha->pdev->dev,
2609 FCE_SIZE, ha->fce, ha->fce_dma); 2847 FCE_SIZE, ha->fce, ha->fce_dma);
@@ -2631,7 +2869,6 @@ try_fce:
2631 ql_dbg(ql_dbg_init, vha, 0x00c0, 2869 ql_dbg(ql_dbg_init, vha, 0x00c0,
2632 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024); 2870 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
2633 2871
2634 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
2635 ha->flags.fce_enabled = 1; 2872 ha->flags.fce_enabled = 1;
2636 ha->fce_dma = tc_dma; 2873 ha->fce_dma = tc_dma;
2637 ha->fce = tc; 2874 ha->fce = tc;
@@ -2648,7 +2885,7 @@ try_eft:
2648 ql_log(ql_log_warn, vha, 0x00c1, 2885 ql_log(ql_log_warn, vha, 0x00c1,
2649 "Unable to allocate (%d KB) for EFT.\n", 2886 "Unable to allocate (%d KB) for EFT.\n",
2650 EFT_SIZE / 1024); 2887 EFT_SIZE / 1024);
2651 goto cont_alloc; 2888 goto eft_err;
2652 } 2889 }
2653 2890
2654 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); 2891 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
@@ -2657,17 +2894,76 @@ try_eft:
2657 "Unable to initialize EFT (%d).\n", rval); 2894 "Unable to initialize EFT (%d).\n", rval);
2658 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, 2895 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
2659 tc_dma); 2896 tc_dma);
2660 goto cont_alloc; 2897 goto eft_err;
2661 } 2898 }
2662 ql_dbg(ql_dbg_init, vha, 0x00c3, 2899 ql_dbg(ql_dbg_init, vha, 0x00c3,
2663 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); 2900 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
2664 2901
2665 eft_size = EFT_SIZE;
2666 ha->eft_dma = tc_dma; 2902 ha->eft_dma = tc_dma;
2667 ha->eft = tc; 2903 ha->eft = tc;
2668 } 2904 }
2669 2905
2670cont_alloc: 2906eft_err:
2907 return;
2908}
2909
2910void
2911qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
2912{
2913 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
2914 eft_size, fce_size, mq_size;
2915 struct qla_hw_data *ha = vha->hw;
2916 struct req_que *req = ha->req_q_map[0];
2917 struct rsp_que *rsp = ha->rsp_q_map[0];
2918 struct qla2xxx_fw_dump *fw_dump;
2919
2920 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
2921 req_q_size = rsp_q_size = 0;
2922
2923 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2924 fixed_size = sizeof(struct qla2100_fw_dump);
2925 } else if (IS_QLA23XX(ha)) {
2926 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
2927 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
2928 sizeof(uint16_t);
2929 } else if (IS_FWI2_CAPABLE(ha)) {
2930 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
2931 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
2932 else if (IS_QLA81XX(ha))
2933 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
2934 else if (IS_QLA25XX(ha))
2935 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
2936 else
2937 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
2938
2939 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
2940 sizeof(uint32_t);
2941 if (ha->mqenable) {
2942 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
2943 mq_size = sizeof(struct qla2xxx_mq_chain);
2944 /*
2945 * Allocate maximum buffer size for all queues.
2946 * Resizing must be done at end-of-dump processing.
2947 */
2948 mq_size += ha->max_req_queues *
2949 (req->length * sizeof(request_t));
2950 mq_size += ha->max_rsp_queues *
2951 (rsp->length * sizeof(response_t));
2952 }
2953 if (ha->tgt.atio_ring)
2954 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
2955 /* Allocate memory for Fibre Channel Event Buffer. */
2956 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2957 !IS_QLA27XX(ha))
2958 goto try_eft;
2959
2960 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
2961try_eft:
2962 ql_dbg(ql_dbg_init, vha, 0x00c3,
2963 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
2964 eft_size = EFT_SIZE;
2965 }
2966
2671 if (IS_QLA27XX(ha)) { 2967 if (IS_QLA27XX(ha)) {
2672 if (!ha->fw_dump_template) { 2968 if (!ha->fw_dump_template) {
2673 ql_log(ql_log_warn, vha, 0x00ba, 2969 ql_log(ql_log_warn, vha, 0x00ba,
@@ -2695,51 +2991,44 @@ cont_alloc:
2695 ha->exlogin_size; 2991 ha->exlogin_size;
2696 2992
2697allocate: 2993allocate:
2698 ha->fw_dump = vmalloc(dump_size); 2994 if (!ha->fw_dump_len || dump_size != ha->fw_dump_len) {
2699 if (!ha->fw_dump) { 2995 fw_dump = vmalloc(dump_size);
2700 ql_log(ql_log_warn, vha, 0x00c4, 2996 if (!fw_dump) {
2701 "Unable to allocate (%d KB) for firmware dump.\n", 2997 ql_log(ql_log_warn, vha, 0x00c4,
2702 dump_size / 1024); 2998 "Unable to allocate (%d KB) for firmware dump.\n",
2703 2999 dump_size / 1024);
2704 if (ha->fce) { 3000 } else {
2705 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 3001 if (ha->fw_dump)
2706 ha->fce_dma); 3002 vfree(ha->fw_dump);
2707 ha->fce = NULL; 3003 ha->fw_dump = fw_dump;
2708 ha->fce_dma = 0; 3004
2709 } 3005 ha->fw_dump_len = dump_size;
2710 3006 ql_dbg(ql_dbg_init, vha, 0x00c5,
2711 if (ha->eft) { 3007 "Allocated (%d KB) for firmware dump.\n",
2712 dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft, 3008 dump_size / 1024);
2713 ha->eft_dma); 3009
2714 ha->eft = NULL; 3010 if (IS_QLA27XX(ha))
2715 ha->eft_dma = 0; 3011 return;
3012
3013 ha->fw_dump->signature[0] = 'Q';
3014 ha->fw_dump->signature[1] = 'L';
3015 ha->fw_dump->signature[2] = 'G';
3016 ha->fw_dump->signature[3] = 'C';
3017 ha->fw_dump->version = htonl(1);
3018
3019 ha->fw_dump->fixed_size = htonl(fixed_size);
3020 ha->fw_dump->mem_size = htonl(mem_size);
3021 ha->fw_dump->req_q_size = htonl(req_q_size);
3022 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
3023
3024 ha->fw_dump->eft_size = htonl(eft_size);
3025 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
3026 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
3027
3028 ha->fw_dump->header_size =
3029 htonl(offsetof(struct qla2xxx_fw_dump, isp));
2716 } 3030 }
2717 return;
2718 } 3031 }
2719 ha->fw_dump_len = dump_size;
2720 ql_dbg(ql_dbg_init, vha, 0x00c5,
2721 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
2722
2723 if (IS_QLA27XX(ha))
2724 return;
2725
2726 ha->fw_dump->signature[0] = 'Q';
2727 ha->fw_dump->signature[1] = 'L';
2728 ha->fw_dump->signature[2] = 'G';
2729 ha->fw_dump->signature[3] = 'C';
2730 ha->fw_dump->version = htonl(1);
2731
2732 ha->fw_dump->fixed_size = htonl(fixed_size);
2733 ha->fw_dump->mem_size = htonl(mem_size);
2734 ha->fw_dump->req_q_size = htonl(req_q_size);
2735 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
2736
2737 ha->fw_dump->eft_size = htonl(eft_size);
2738 ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
2739 ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
2740
2741 ha->fw_dump->header_size =
2742 htonl(offsetof(struct qla2xxx_fw_dump, isp));
2743} 3032}
2744 3033
2745static int 3034static int
@@ -3065,9 +3354,12 @@ enable_82xx_npiv:
3065 if (rval != QLA_SUCCESS) 3354 if (rval != QLA_SUCCESS)
3066 goto failed; 3355 goto failed;
3067 3356
3068 if (!fw_major_version && ql2xallocfwdump 3357 if (!fw_major_version && !(IS_P3P_TYPE(ha)))
3069 && !(IS_P3P_TYPE(ha))) 3358 qla2x00_alloc_offload_mem(vha);
3359
3360 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
3070 qla2x00_alloc_fw_dump(vha); 3361 qla2x00_alloc_fw_dump(vha);
3362
3071 } else { 3363 } else {
3072 goto failed; 3364 goto failed;
3073 } 3365 }
@@ -3278,6 +3570,12 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
3278 ha->fw_options[2] |= BIT_4; 3570 ha->fw_options[2] |= BIT_4;
3279 else 3571 else
3280 ha->fw_options[2] &= ~BIT_4; 3572 ha->fw_options[2] &= ~BIT_4;
3573
3574 /* Reserve 1/2 of emergency exchanges for ELS.*/
3575 if (qla2xuseresexchforels)
3576 ha->fw_options[2] |= BIT_8;
3577 else
3578 ha->fw_options[2] &= ~BIT_8;
3281 } 3579 }
3282 3580
3283 ql_dbg(ql_dbg_init, vha, 0x00e8, 3581 ql_dbg(ql_dbg_init, vha, 0x00e8,
@@ -3671,6 +3969,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
3671 struct qla_hw_data *ha = vha->hw; 3969 struct qla_hw_data *ha = vha->hw;
3672 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 3970 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3673 port_id_t id; 3971 port_id_t id;
3972 unsigned long flags;
3674 3973
3675 /* Get host addresses. */ 3974 /* Get host addresses. */
3676 rval = qla2x00_get_adapter_id(vha, 3975 rval = qla2x00_get_adapter_id(vha,
@@ -3752,7 +4051,9 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
3752 id.b.area = area; 4051 id.b.area = area;
3753 id.b.al_pa = al_pa; 4052 id.b.al_pa = al_pa;
3754 id.b.rsvd_1 = 0; 4053 id.b.rsvd_1 = 0;
4054 spin_lock_irqsave(&ha->hardware_lock, flags);
3755 qlt_update_host_map(vha, id); 4055 qlt_update_host_map(vha, id);
4056 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3756 4057
3757 if (!vha->flags.init_done) 4058 if (!vha->flags.init_done)
3758 ql_log(ql_log_info, vha, 0x2010, 4059 ql_log(ql_log_info, vha, 0x2010,
@@ -4293,6 +4594,21 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
4293 4594
4294 } else if (ha->current_topology == ISP_CFG_N) { 4595 } else if (ha->current_topology == ISP_CFG_N) {
4295 clear_bit(RSCN_UPDATE, &flags); 4596 clear_bit(RSCN_UPDATE, &flags);
4597 if (ha->flags.rida_fmt2) {
4598 /* With Rida Format 2, the login is already triggered.
4599 * We know who is on the other side of the wire.
4600 * No need to login to do login to find out or drop into
4601 * qla2x00_configure_local_loop().
4602 */
4603 clear_bit(LOCAL_LOOP_UPDATE, &flags);
4604 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
4605 } else {
4606 if (qla_tgt_mode_enabled(vha)) {
4607 /* allow the other side to start the login */
4608 clear_bit(LOCAL_LOOP_UPDATE, &flags);
4609 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
4610 }
4611 }
4296 } else if (ha->current_topology == ISP_CFG_NL) { 4612 } else if (ha->current_topology == ISP_CFG_NL) {
4297 clear_bit(RSCN_UPDATE, &flags); 4613 clear_bit(RSCN_UPDATE, &flags);
4298 set_bit(LOCAL_LOOP_UPDATE, &flags); 4614 set_bit(LOCAL_LOOP_UPDATE, &flags);
@@ -4521,6 +4837,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
4521 (uint8_t *)ha->gid_list, 4837 (uint8_t *)ha->gid_list,
4522 entries * sizeof(struct gid_list_info)); 4838 entries * sizeof(struct gid_list_info));
4523 4839
4840 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4841 fcport->scan_state = QLA_FCPORT_SCAN;
4842 }
4843
4524 /* Allocate temporary fcport for any new fcports discovered. */ 4844 /* Allocate temporary fcport for any new fcports discovered. */
4525 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 4845 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
4526 if (new_fcport == NULL) { 4846 if (new_fcport == NULL) {
@@ -4531,22 +4851,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
4531 } 4851 }
4532 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 4852 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
4533 4853
4534 /*
4535 * Mark local devices that were present with FCF_DEVICE_LOST for now.
4536 */
4537 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4538 if (atomic_read(&fcport->state) == FCS_ONLINE &&
4539 fcport->port_type != FCT_BROADCAST &&
4540 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
4541
4542 ql_dbg(ql_dbg_disc, vha, 0x2096,
4543 "Marking port lost loop_id=0x%04x.\n",
4544 fcport->loop_id);
4545
4546 qla2x00_mark_device_lost(vha, fcport, 0, 0);
4547 }
4548 }
4549
4550 /* Inititae N2N login. */ 4854 /* Inititae N2N login. */
4551 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) { 4855 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
4552 rval = qla24xx_n2n_handle_login(vha, new_fcport); 4856 rval = qla24xx_n2n_handle_login(vha, new_fcport);
@@ -4589,6 +4893,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
4589 new_fcport->d_id.b.area = area; 4893 new_fcport->d_id.b.area = area;
4590 new_fcport->d_id.b.al_pa = al_pa; 4894 new_fcport->d_id.b.al_pa = al_pa;
4591 new_fcport->loop_id = loop_id; 4895 new_fcport->loop_id = loop_id;
4896 new_fcport->scan_state = QLA_FCPORT_FOUND;
4592 4897
4593 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 4898 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
4594 if (rval2 != QLA_SUCCESS) { 4899 if (rval2 != QLA_SUCCESS) {
@@ -4620,13 +4925,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
4620 fcport->d_id.b24 = new_fcport->d_id.b24; 4925 fcport->d_id.b24 = new_fcport->d_id.b24;
4621 memcpy(fcport->node_name, new_fcport->node_name, 4926 memcpy(fcport->node_name, new_fcport->node_name,
4622 WWN_SIZE); 4927 WWN_SIZE);
4623 4928 fcport->scan_state = QLA_FCPORT_FOUND;
4624 if (!fcport->login_succ) {
4625 vha->fcport_count++;
4626 fcport->login_succ = 1;
4627 fcport->disc_state = DSC_LOGIN_COMPLETE;
4628 }
4629
4630 found++; 4929 found++;
4631 break; 4930 break;
4632 } 4931 }
@@ -4637,11 +4936,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
4637 4936
4638 /* Allocate a new replacement fcport. */ 4937 /* Allocate a new replacement fcport. */
4639 fcport = new_fcport; 4938 fcport = new_fcport;
4640 if (!fcport->login_succ) {
4641 vha->fcport_count++;
4642 fcport->login_succ = 1;
4643 fcport->disc_state = DSC_LOGIN_COMPLETE;
4644 }
4645 4939
4646 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4940 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4647 4941
@@ -4662,11 +4956,38 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
4662 /* Base iIDMA settings on HBA port speed. */ 4956 /* Base iIDMA settings on HBA port speed. */
4663 fcport->fp_speed = ha->link_data_rate; 4957 fcport->fp_speed = ha->link_data_rate;
4664 4958
4665 qla2x00_update_fcport(vha, fcport);
4666
4667 found_devs++; 4959 found_devs++;
4668 } 4960 }
4669 4961
4962 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4963 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4964 break;
4965
4966 if (fcport->scan_state == QLA_FCPORT_SCAN) {
4967 if ((qla_dual_mode_enabled(vha) ||
4968 qla_ini_mode_enabled(vha)) &&
4969 atomic_read(&fcport->state) == FCS_ONLINE) {
4970 qla2x00_mark_device_lost(vha, fcport,
4971 ql2xplogiabsentdevice, 0);
4972 if (fcport->loop_id != FC_NO_LOOP_ID &&
4973 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
4974 fcport->port_type != FCT_INITIATOR &&
4975 fcport->port_type != FCT_BROADCAST) {
4976 ql_dbg(ql_dbg_disc, vha, 0x20f0,
4977 "%s %d %8phC post del sess\n",
4978 __func__, __LINE__,
4979 fcport->port_name);
4980
4981 qlt_schedule_sess_for_deletion(fcport);
4982 continue;
4983 }
4984 }
4985 }
4986
4987 if (fcport->scan_state == QLA_FCPORT_FOUND)
4988 qla24xx_fcport_handle_login(vha, fcport);
4989 }
4990
4670cleanup_allocation: 4991cleanup_allocation:
4671 kfree(new_fcport); 4992 kfree(new_fcport);
4672 4993
@@ -4920,9 +5241,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
4920 } 5241 }
4921 } 5242 }
4922 5243
4923 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4924 fcport->scan_state = QLA_FCPORT_SCAN;
4925 }
4926 5244
4927 /* Mark the time right before querying FW for connected ports. 5245 /* Mark the time right before querying FW for connected ports.
4928 * This process is long, asynchronous and by the time it's done, 5246 * This process is long, asynchronous and by the time it's done,
@@ -4932,7 +5250,17 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
4932 * will be newer than discovery_gen. */ 5250 * will be newer than discovery_gen. */
4933 qlt_do_generation_tick(vha, &discovery_gen); 5251 qlt_do_generation_tick(vha, &discovery_gen);
4934 5252
4935 rval = qla2x00_find_all_fabric_devs(vha); 5253 if (USE_ASYNC_SCAN(ha)) {
5254 rval = QLA_SUCCESS;
5255 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI);
5256 if (rval)
5257 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5258 } else {
5259 list_for_each_entry(fcport, &vha->vp_fcports, list)
5260 fcport->scan_state = QLA_FCPORT_SCAN;
5261
5262 rval = qla2x00_find_all_fabric_devs(vha);
5263 }
4936 if (rval != QLA_SUCCESS) 5264 if (rval != QLA_SUCCESS)
4937 break; 5265 break;
4938 } while (0); 5266 } while (0);
@@ -5237,9 +5565,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
5237 "%s %d %8phC post del sess\n", 5565 "%s %d %8phC post del sess\n",
5238 __func__, __LINE__, 5566 __func__, __LINE__,
5239 fcport->port_name); 5567 fcport->port_name);
5240 5568 qlt_schedule_sess_for_deletion(fcport);
5241 qlt_schedule_sess_for_deletion_lock
5242 (fcport);
5243 continue; 5569 continue;
5244 } 5570 }
5245 } 5571 }
@@ -5974,6 +6300,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
5974 if (!(IS_P3P_TYPE(ha))) 6300 if (!(IS_P3P_TYPE(ha)))
5975 ha->isp_ops->reset_chip(vha); 6301 ha->isp_ops->reset_chip(vha);
5976 6302
6303 SAVE_TOPO(ha);
6304 ha->flags.rida_fmt2 = 0;
5977 ha->flags.n2n_ae = 0; 6305 ha->flags.n2n_ae = 0;
5978 ha->flags.lip_ae = 0; 6306 ha->flags.lip_ae = 0;
5979 ha->current_topology = 0; 6307 ha->current_topology = 0;
@@ -8173,9 +8501,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
8173 int ret = QLA_FUNCTION_FAILED; 8501 int ret = QLA_FUNCTION_FAILED;
8174 struct qla_hw_data *ha = qpair->hw; 8502 struct qla_hw_data *ha = qpair->hw;
8175 8503
8176 if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created)
8177 goto fail;
8178
8179 qpair->delete_in_progress = 1; 8504 qpair->delete_in_progress = 1;
8180 while (atomic_read(&qpair->ref_count)) 8505 while (atomic_read(&qpair->ref_count))
8181 msleep(500); 8506 msleep(500);
@@ -8183,6 +8508,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
8183 ret = qla25xx_delete_req_que(vha, qpair->req); 8508 ret = qla25xx_delete_req_que(vha, qpair->req);
8184 if (ret != QLA_SUCCESS) 8509 if (ret != QLA_SUCCESS)
8185 goto fail; 8510 goto fail;
8511
8186 ret = qla25xx_delete_rsp_que(vha, qpair->rsp); 8512 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
8187 if (ret != QLA_SUCCESS) 8513 if (ret != QLA_SUCCESS)
8188 goto fail; 8514 goto fail;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 17d2c20f1f75..4d32426393c7 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -273,6 +273,7 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
273 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; 273 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
274 add_timer(&sp->u.iocb_cmd.timer); 274 add_timer(&sp->u.iocb_cmd.timer);
275 sp->free = qla2x00_sp_free; 275 sp->free = qla2x00_sp_free;
276 init_completion(&sp->comp);
276 if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD)) 277 if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
277 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); 278 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
278 if (sp->type == SRB_ELS_DCMD) 279 if (sp->type == SRB_ELS_DCMD)
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d810a447cb4a..1b62e943ec49 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2158,7 +2158,9 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2158skip_cmd_array: 2158skip_cmd_array:
2159 /* Check for room on request queue. */ 2159 /* Check for room on request queue. */
2160 if (req->cnt < req_cnt + 2) { 2160 if (req->cnt < req_cnt + 2) {
2161 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) 2161 if (qpair->use_shadow_reg)
2162 cnt = *req->out_ptr;
2163 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2162 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out); 2164 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2163 else if (IS_P3P_TYPE(ha)) 2165 else if (IS_P3P_TYPE(ha))
2164 cnt = RD_REG_DWORD(&reg->isp82.req_q_out); 2166 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -2392,26 +2394,13 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
2392 srb_t *sp = data; 2394 srb_t *sp = data;
2393 fc_port_t *fcport = sp->fcport; 2395 fc_port_t *fcport = sp->fcport;
2394 struct scsi_qla_host *vha = sp->vha; 2396 struct scsi_qla_host *vha = sp->vha;
2395 struct qla_hw_data *ha = vha->hw;
2396 struct srb_iocb *lio = &sp->u.iocb_cmd; 2397 struct srb_iocb *lio = &sp->u.iocb_cmd;
2397 unsigned long flags = 0;
2398 2398
2399 ql_dbg(ql_dbg_io, vha, 0x3069, 2399 ql_dbg(ql_dbg_io, vha, 0x3069,
2400 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n", 2400 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2401 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, 2401 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2402 fcport->d_id.b.al_pa); 2402 fcport->d_id.b.al_pa);
2403 2403
2404 /* Abort the exchange */
2405 spin_lock_irqsave(&ha->hardware_lock, flags);
2406 if (ha->isp_ops->abort_command(sp)) {
2407 ql_dbg(ql_dbg_io, vha, 0x3070,
2408 "mbx abort_command failed.\n");
2409 } else {
2410 ql_dbg(ql_dbg_io, vha, 0x3071,
2411 "mbx abort_command success.\n");
2412 }
2413 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2414
2415 complete(&lio->u.els_logo.comp); 2404 complete(&lio->u.els_logo.comp);
2416} 2405}
2417 2406
@@ -2631,7 +2620,7 @@ qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2631 struct scsi_qla_host *vha = sp->vha; 2620 struct scsi_qla_host *vha = sp->vha;
2632 2621
2633 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072, 2622 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072,
2634 "%s ELS hdl=%x, portid=%06x done %8pC\n", 2623 "%s ELS hdl=%x, portid=%06x done %8phC\n",
2635 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); 2624 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
2636 2625
2637 complete(&lio->u.els_plogi.comp); 2626 complete(&lio->u.els_plogi.comp);
@@ -3286,7 +3275,9 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3286 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); 3275 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3287 abt_iocb->entry_type = ABORT_IOCB_TYPE; 3276 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3288 abt_iocb->entry_count = 1; 3277 abt_iocb->entry_count = 1;
3289 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); 3278 abt_iocb->handle =
3279 cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3280 aio->u.abt.cmd_hndl));
3290 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3281 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3291 abt_iocb->handle_to_abort = 3282 abt_iocb->handle_to_abort =
3292 cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl)); 3283 cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
@@ -3294,7 +3285,7 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3294 abt_iocb->port_id[1] = sp->fcport->d_id.b.area; 3285 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3295 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; 3286 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3296 abt_iocb->vp_index = vha->vp_idx; 3287 abt_iocb->vp_index = vha->vp_idx;
3297 abt_iocb->req_que_no = cpu_to_le16(req->id); 3288 abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3298 /* Send the command to the firmware */ 3289 /* Send the command to the firmware */
3299 wmb(); 3290 wmb();
3300} 3291}
@@ -3381,6 +3372,40 @@ qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3381 return rval; 3372 return rval;
3382} 3373}
3383 3374
3375static void
3376qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3377{
3378 int map, pos;
3379
3380 vce->entry_type = VP_CTRL_IOCB_TYPE;
3381 vce->handle = sp->handle;
3382 vce->entry_count = 1;
3383 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3384 vce->vp_count = cpu_to_le16(1);
3385
3386 /*
3387 * index map in firmware starts with 1; decrement index
3388 * this is ok as we never use index 0
3389 */
3390 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3391 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3392 vce->vp_idx_map[map] |= 1 << pos;
3393}
3394
3395static void
3396qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3397{
3398 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3399 logio->control_flags =
3400 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3401
3402 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3403 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3404 logio->port_id[1] = sp->fcport->d_id.b.area;
3405 logio->port_id[2] = sp->fcport->d_id.b.domain;
3406 logio->vp_index = sp->fcport->vha->vp_idx;
3407}
3408
3384int 3409int
3385qla2x00_start_sp(srb_t *sp) 3410qla2x00_start_sp(srb_t *sp)
3386{ 3411{
@@ -3459,6 +3484,12 @@ qla2x00_start_sp(srb_t *sp)
3459 case SRB_NACK_LOGO: 3484 case SRB_NACK_LOGO:
3460 qla2x00_send_notify_ack_iocb(sp, pkt); 3485 qla2x00_send_notify_ack_iocb(sp, pkt);
3461 break; 3486 break;
3487 case SRB_CTRL_VP:
3488 qla25xx_ctrlvp_iocb(sp, pkt);
3489 break;
3490 case SRB_PRLO_CMD:
3491 qla24xx_prlo_iocb(sp, pkt);
3492 break;
3462 default: 3493 default:
3463 break; 3494 break;
3464 } 3495 }
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2fd79129bb2a..14109d86c3f6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -809,6 +809,7 @@ skip_rio:
809 break; 809 break;
810 810
811 case MBA_LOOP_DOWN: /* Loop Down Event */ 811 case MBA_LOOP_DOWN: /* Loop Down Event */
812 SAVE_TOPO(ha);
812 ha->flags.n2n_ae = 0; 813 ha->flags.n2n_ae = 0;
813 ha->flags.lip_ae = 0; 814 ha->flags.lip_ae = 0;
814 ha->current_topology = 0; 815 ha->current_topology = 0;
@@ -922,7 +923,6 @@ skip_rio:
922 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 923 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
923 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 924 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
924 925
925 ha->flags.gpsc_supported = 1;
926 vha->flags.management_server_logged_in = 0; 926 vha->flags.management_server_logged_in = 0;
927 break; 927 break;
928 928
@@ -1009,7 +1009,7 @@ skip_rio:
1009 if (qla_ini_mode_enabled(vha)) { 1009 if (qla_ini_mode_enabled(vha)) {
1010 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1010 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1011 fcport->logout_on_delete = 0; 1011 fcport->logout_on_delete = 0;
1012 qlt_schedule_sess_for_deletion_lock(fcport); 1012 qlt_schedule_sess_for_deletion(fcport);
1013 } 1013 }
1014 break; 1014 break;
1015 1015
@@ -1059,8 +1059,7 @@ global_port_update:
1059 * Mark all devices as missing so we will login again. 1059 * Mark all devices as missing so we will login again.
1060 */ 1060 */
1061 atomic_set(&vha->loop_state, LOOP_UP); 1061 atomic_set(&vha->loop_state, LOOP_UP);
1062 1062 vha->scan.scan_retry = 0;
1063 qla2x00_mark_all_devices_lost(vha, 1);
1064 1063
1065 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1064 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1066 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1065 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
@@ -1202,6 +1201,7 @@ global_port_update:
1202 qla2xxx_wake_dpc(vha); 1201 qla2xxx_wake_dpc(vha);
1203 } 1202 }
1204 } 1203 }
1204 /* fall through */
1205 case MBA_IDC_COMPLETE: 1205 case MBA_IDC_COMPLETE:
1206 if (ha->notify_lb_portup_comp && !vha->vp_idx) 1206 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1207 complete(&ha->lb_portup_comp); 1207 complete(&ha->lb_portup_comp);
@@ -1574,7 +1574,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1574 /* borrowing sts_entry_24xx.comp_status. 1574 /* borrowing sts_entry_24xx.comp_status.
1575 same location as ct_entry_24xx.comp_status 1575 same location as ct_entry_24xx.comp_status
1576 */ 1576 */
1577 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, 1577 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
1578 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 1578 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1579 sp->name); 1579 sp->name);
1580 sp->done(sp, res); 1580 sp->done(sp, res);
@@ -1769,7 +1769,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1769 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1769 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1770 qla2xxx_wake_dpc(vha); 1770 qla2xxx_wake_dpc(vha);
1771 } 1771 }
1772 /* drop through */ 1772 /* fall through */
1773 default: 1773 default:
1774 data[0] = MBS_COMMAND_ERROR; 1774 data[0] = MBS_COMMAND_ERROR;
1775 break; 1775 break;
@@ -1936,6 +1936,37 @@ qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1936 sp->done(sp, ret); 1936 sp->done(sp, ret);
1937} 1937}
1938 1938
1939static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
1940 struct vp_ctrl_entry_24xx *vce)
1941{
1942 const char func[] = "CTRLVP-IOCB";
1943 srb_t *sp;
1944 int rval = QLA_SUCCESS;
1945
1946 sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
1947 if (!sp)
1948 return;
1949
1950 if (vce->entry_status != 0) {
1951 ql_dbg(ql_dbg_vport, vha, 0x10c4,
1952 "%s: Failed to complete IOCB -- error status (%x)\n",
1953 sp->name, vce->entry_status);
1954 rval = QLA_FUNCTION_FAILED;
1955 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
1956 ql_dbg(ql_dbg_vport, vha, 0x10c5,
1957 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
1958 sp->name, le16_to_cpu(vce->comp_status),
1959 le16_to_cpu(vce->vp_idx_failed));
1960 rval = QLA_FUNCTION_FAILED;
1961 } else {
1962 ql_dbg(ql_dbg_vport, vha, 0x10c6,
1963 "Done %s.\n", __func__);
1964 }
1965
1966 sp->rc = rval;
1967 sp->done(sp, rval);
1968}
1969
1939/** 1970/**
1940 * qla2x00_process_response_queue() - Process response queue entries. 1971 * qla2x00_process_response_queue() - Process response queue entries.
1941 * @ha: SCSI driver HA context 1972 * @ha: SCSI driver HA context
@@ -2369,7 +2400,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2369 int res = 0; 2400 int res = 0;
2370 uint16_t state_flags = 0; 2401 uint16_t state_flags = 0;
2371 uint16_t retry_delay = 0; 2402 uint16_t retry_delay = 0;
2372 uint8_t no_logout = 0;
2373 2403
2374 sts = (sts_entry_t *) pkt; 2404 sts = (sts_entry_t *) pkt;
2375 sts24 = (struct sts_entry_24xx *) pkt; 2405 sts24 = (struct sts_entry_24xx *) pkt;
@@ -2640,7 +2670,6 @@ check_scsi_status:
2640 break; 2670 break;
2641 2671
2642 case CS_PORT_LOGGED_OUT: 2672 case CS_PORT_LOGGED_OUT:
2643 no_logout = 1;
2644 case CS_PORT_CONFIG_CHG: 2673 case CS_PORT_CONFIG_CHG:
2645 case CS_PORT_BUSY: 2674 case CS_PORT_BUSY:
2646 case CS_INCOMPLETE: 2675 case CS_INCOMPLETE:
@@ -2671,11 +2700,8 @@ check_scsi_status:
2671 port_state_str[atomic_read(&fcport->state)], 2700 port_state_str[atomic_read(&fcport->state)],
2672 comp_status); 2701 comp_status);
2673 2702
2674 if (no_logout)
2675 fcport->logout_on_delete = 0;
2676
2677 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 2703 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2678 qlt_schedule_sess_for_deletion_lock(fcport); 2704 qlt_schedule_sess_for_deletion(fcport);
2679 } 2705 }
2680 2706
2681 break; 2707 break;
@@ -2972,9 +2998,9 @@ process_err:
2972 (response_t *)pkt); 2998 (response_t *)pkt);
2973 break; 2999 break;
2974 } else { 3000 } else {
2975 /* drop through */
2976 qlt_24xx_process_atio_queue(vha, 1); 3001 qlt_24xx_process_atio_queue(vha, 1);
2977 } 3002 }
3003 /* fall through */
2978 case ABTS_RESP_24XX: 3004 case ABTS_RESP_24XX:
2979 case CTIO_TYPE7: 3005 case CTIO_TYPE7:
2980 case CTIO_CRC2: 3006 case CTIO_CRC2:
@@ -3005,6 +3031,10 @@ process_err:
3005 qla24xx_mbx_iocb_entry(vha, rsp->req, 3031 qla24xx_mbx_iocb_entry(vha, rsp->req,
3006 (struct mbx_24xx_entry *)pkt); 3032 (struct mbx_24xx_entry *)pkt);
3007 break; 3033 break;
3034 case VP_CTRL_IOCB_TYPE:
3035 qla_ctrlvp_completed(vha, rsp->req,
3036 (struct vp_ctrl_entry_24xx *)pkt);
3037 break;
3008 default: 3038 default:
3009 /* Type Not Supported. */ 3039 /* Type Not Supported. */
3010 ql_dbg(ql_dbg_async, vha, 0x5042, 3040 ql_dbg(ql_dbg_async, vha, 0x5042,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index cb717d47339f..7397aeddd96c 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -17,6 +17,7 @@ static struct mb_cmd_name {
17 {MBC_GET_PORT_DATABASE, "GPDB"}, 17 {MBC_GET_PORT_DATABASE, "GPDB"},
18 {MBC_GET_ID_LIST, "GIDList"}, 18 {MBC_GET_ID_LIST, "GIDList"},
19 {MBC_GET_LINK_PRIV_STATS, "Stats"}, 19 {MBC_GET_LINK_PRIV_STATS, "Stats"},
20 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
20}; 21};
21 22
22static const char *mb_to_str(uint16_t cmd) 23static const char *mb_to_str(uint16_t cmd)
@@ -3731,6 +3732,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3731 unsigned long flags; 3732 unsigned long flags;
3732 int found; 3733 int found;
3733 port_id_t id; 3734 port_id_t id;
3735 struct fc_port *fcport;
3734 3736
3735 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3737 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3736 "Entered %s.\n", __func__); 3738 "Entered %s.\n", __func__);
@@ -3753,7 +3755,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3753 "Primary port id %02x%02x%02x.\n", 3755 "Primary port id %02x%02x%02x.\n",
3754 rptid_entry->port_id[2], rptid_entry->port_id[1], 3756 rptid_entry->port_id[2], rptid_entry->port_id[1],
3755 rptid_entry->port_id[0]); 3757 rptid_entry->port_id[0]);
3756 3758 ha->current_topology = ISP_CFG_NL;
3757 qlt_update_host_map(vha, id); 3759 qlt_update_host_map(vha, id);
3758 3760
3759 } else if (rptid_entry->format == 1) { 3761 } else if (rptid_entry->format == 1) {
@@ -3797,6 +3799,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3797 return; 3799 return;
3798 } 3800 }
3799 3801
3802 ha->flags.gpsc_supported = 1;
3803 ha->current_topology = ISP_CFG_F;
3800 /* buffer to buffer credit flag */ 3804 /* buffer to buffer credit flag */
3801 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; 3805 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
3802 3806
@@ -3862,6 +3866,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3862 rptid_entry->u.f2.port_name); 3866 rptid_entry->u.f2.port_name);
3863 3867
3864 /* N2N. direct connect */ 3868 /* N2N. direct connect */
3869 ha->current_topology = ISP_CFG_N;
3870 ha->flags.rida_fmt2 = 1;
3865 vha->d_id.b.domain = rptid_entry->port_id[2]; 3871 vha->d_id.b.domain = rptid_entry->port_id[2];
3866 vha->d_id.b.area = rptid_entry->port_id[1]; 3872 vha->d_id.b.area = rptid_entry->port_id[1];
3867 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 3873 vha->d_id.b.al_pa = rptid_entry->port_id[0];
@@ -3869,6 +3875,40 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3869 spin_lock_irqsave(&ha->vport_slock, flags); 3875 spin_lock_irqsave(&ha->vport_slock, flags);
3870 qlt_update_vp_map(vha, SET_AL_PA); 3876 qlt_update_vp_map(vha, SET_AL_PA);
3871 spin_unlock_irqrestore(&ha->vport_slock, flags); 3877 spin_unlock_irqrestore(&ha->vport_slock, flags);
3878
3879 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3880 fcport->scan_state = QLA_FCPORT_SCAN;
3881 }
3882
3883 fcport = qla2x00_find_fcport_by_wwpn(vha,
3884 rptid_entry->u.f2.port_name, 1);
3885
3886 if (fcport) {
3887 fcport->plogi_nack_done_deadline = jiffies + HZ;
3888 fcport->scan_state = QLA_FCPORT_FOUND;
3889 switch (fcport->disc_state) {
3890 case DSC_DELETED:
3891 ql_dbg(ql_dbg_disc, vha, 0x210d,
3892 "%s %d %8phC login\n",
3893 __func__, __LINE__, fcport->port_name);
3894 qla24xx_fcport_handle_login(vha, fcport);
3895 break;
3896 case DSC_DELETE_PEND:
3897 break;
3898 default:
3899 qlt_schedule_sess_for_deletion(fcport);
3900 break;
3901 }
3902 } else {
3903 id.b.al_pa = rptid_entry->u.f2.remote_nport_id[0];
3904 id.b.area = rptid_entry->u.f2.remote_nport_id[1];
3905 id.b.domain = rptid_entry->u.f2.remote_nport_id[2];
3906 qla24xx_post_newsess_work(vha, &id,
3907 rptid_entry->u.f2.port_name,
3908 rptid_entry->u.f2.node_name,
3909 NULL,
3910 FC4_TYPE_UNKNOWN);
3911 }
3872 } 3912 }
3873} 3913}
3874 3914
@@ -3945,83 +3985,6 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3945} 3985}
3946 3986
3947/* 3987/*
3948 * qla24xx_control_vp
3949 * Enable a virtual port for given host
3950 *
3951 * Input:
3952 * ha = adapter block pointer.
3953 * vhba = virtual adapter (unused)
3954 * index = index number for enabled VP
3955 *
3956 * Returns:
3957 * qla2xxx local function return status code.
3958 *
3959 * Context:
3960 * Kernel context.
3961 */
3962int
3963qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3964{
3965 int rval;
3966 int map, pos;
3967 struct vp_ctrl_entry_24xx *vce;
3968 dma_addr_t vce_dma;
3969 struct qla_hw_data *ha = vha->hw;
3970 int vp_index = vha->vp_idx;
3971 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3972
3973 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
3974 "Entered %s enabling index %d.\n", __func__, vp_index);
3975
3976 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
3977 return QLA_PARAMETER_ERROR;
3978
3979 vce = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
3980 if (!vce) {
3981 ql_log(ql_log_warn, vha, 0x10c2,
3982 "Failed to allocate VP control IOCB.\n");
3983 return QLA_MEMORY_ALLOC_FAILED;
3984 }
3985
3986 vce->entry_type = VP_CTRL_IOCB_TYPE;
3987 vce->entry_count = 1;
3988 vce->command = cpu_to_le16(cmd);
3989 vce->vp_count = cpu_to_le16(1);
3990
3991 /* index map in firmware starts with 1; decrement index
3992 * this is ok as we never use index 0
3993 */
3994 map = (vp_index - 1) / 8;
3995 pos = (vp_index - 1) & 7;
3996 mutex_lock(&ha->vport_lock);
3997 vce->vp_idx_map[map] |= 1 << pos;
3998 mutex_unlock(&ha->vport_lock);
3999
4000 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
4001 if (rval != QLA_SUCCESS) {
4002 ql_dbg(ql_dbg_mbx, vha, 0x10c3,
4003 "Failed to issue VP control IOCB (%x).\n", rval);
4004 } else if (vce->entry_status != 0) {
4005 ql_dbg(ql_dbg_mbx, vha, 0x10c4,
4006 "Failed to complete IOCB -- error status (%x).\n",
4007 vce->entry_status);
4008 rval = QLA_FUNCTION_FAILED;
4009 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
4010 ql_dbg(ql_dbg_mbx, vha, 0x10c5,
4011 "Failed to complete IOCB -- completion status (%x).\n",
4012 le16_to_cpu(vce->comp_status));
4013 rval = QLA_FUNCTION_FAILED;
4014 } else {
4015 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
4016 "Done %s.\n", __func__);
4017 }
4018
4019 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
4020
4021 return rval;
4022}
4023
4024/*
4025 * qla2x00_send_change_request 3988 * qla2x00_send_change_request
4026 * Receive or disable RSCN request from fabric controller 3989 * Receive or disable RSCN request from fabric controller
4027 * 3990 *
@@ -6160,8 +6123,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6160 } 6123 }
6161 6124
6162 /* Check for logged in state. */ 6125 /* Check for logged in state. */
6163 if (current_login_state != PDS_PRLI_COMPLETE && 6126 if (current_login_state != PDS_PRLI_COMPLETE) {
6164 last_login_state != PDS_PRLI_COMPLETE) {
6165 ql_dbg(ql_dbg_mbx, vha, 0x119a, 6127 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6166 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 6128 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6167 current_login_state, last_login_state, fcport->loop_id); 6129 current_login_state, last_login_state, fcport->loop_id);
@@ -6350,3 +6312,32 @@ qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6350 6312
6351 return rval; 6313 return rval;
6352} 6314}
6315
6316int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6317 uint16_t *out_mb, int out_mb_sz)
6318{
6319 int rval = QLA_FUNCTION_FAILED;
6320 mbx_cmd_t mc;
6321
6322 if (!vha->hw->flags.fw_started)
6323 goto done;
6324
6325 memset(&mc, 0, sizeof(mc));
6326 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6327
6328 rval = qla24xx_send_mb_cmd(vha, &mc);
6329 if (rval != QLA_SUCCESS) {
6330 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6331 "%s: fail\n", __func__);
6332 } else {
6333 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6334 memcpy(out_mb, mc.mb, out_mb_sz);
6335 else
6336 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6337
6338 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6339 "%s: done\n", __func__);
6340 }
6341done:
6342 return rval;
6343}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index bd9f14bf7ac2..e965b16f21e3 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -50,10 +50,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
50 50
51 spin_lock_irqsave(&ha->vport_slock, flags); 51 spin_lock_irqsave(&ha->vport_slock, flags);
52 list_add_tail(&vha->list, &ha->vp_list); 52 list_add_tail(&vha->list, &ha->vp_list);
53 spin_unlock_irqrestore(&ha->vport_slock, flags);
53 54
55 spin_lock_irqsave(&ha->hardware_lock, flags);
54 qlt_update_vp_map(vha, SET_VP_IDX); 56 qlt_update_vp_map(vha, SET_VP_IDX);
55 57 spin_unlock_irqrestore(&ha->hardware_lock, flags);
56 spin_unlock_irqrestore(&ha->vport_slock, flags);
57 58
58 mutex_unlock(&ha->vport_lock); 59 mutex_unlock(&ha->vport_lock);
59 return vp_id; 60 return vp_id;
@@ -158,9 +159,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
158 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 159 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
159 160
160 /* Remove port id from vp target map */ 161 /* Remove port id from vp target map */
161 spin_lock_irqsave(&vha->hw->vport_slock, flags); 162 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
162 qlt_update_vp_map(vha, RESET_AL_PA); 163 qlt_update_vp_map(vha, RESET_AL_PA);
163 spin_unlock_irqrestore(&vha->hw->vport_slock, flags); 164 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
164 165
165 qla2x00_mark_vp_devices_dead(vha); 166 qla2x00_mark_vp_devices_dead(vha);
166 atomic_set(&vha->vp_state, VP_FAILED); 167 atomic_set(&vha->vp_state, VP_FAILED);
@@ -264,13 +265,20 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
264 case MBA_LIP_RESET: 265 case MBA_LIP_RESET:
265 case MBA_POINT_TO_POINT: 266 case MBA_POINT_TO_POINT:
266 case MBA_CHG_IN_CONNECTION: 267 case MBA_CHG_IN_CONNECTION:
267 case MBA_PORT_UPDATE:
268 case MBA_RSCN_UPDATE:
269 ql_dbg(ql_dbg_async, vha, 0x5024, 268 ql_dbg(ql_dbg_async, vha, 0x5024,
270 "Async_event for VP[%d], mb=0x%x vha=%p.\n", 269 "Async_event for VP[%d], mb=0x%x vha=%p.\n",
271 i, *mb, vha); 270 i, *mb, vha);
272 qla2x00_async_event(vha, rsp, mb); 271 qla2x00_async_event(vha, rsp, mb);
273 break; 272 break;
273 case MBA_PORT_UPDATE:
274 case MBA_RSCN_UPDATE:
275 if ((mb[3] & 0xff) == vha->vp_idx) {
276 ql_dbg(ql_dbg_async, vha, 0x5024,
277 "Async_event for VP[%d], mb=0x%x vha=%p\n",
278 i, *mb, vha);
279 qla2x00_async_event(vha, rsp, mb);
280 }
281 break;
274 } 282 }
275 283
276 spin_lock_irqsave(&ha->vport_slock, flags); 284 spin_lock_irqsave(&ha->vport_slock, flags);
@@ -319,8 +327,6 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
319 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012, 327 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
320 "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags); 328 "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
321 329
322 qla2x00_do_work(vha);
323
324 /* Check if Fw is ready to configure VP first */ 330 /* Check if Fw is ready to configure VP first */
325 if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) { 331 if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
326 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { 332 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
@@ -343,15 +349,19 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
343 "FCPort update end.\n"); 349 "FCPort update end.\n");
344 } 350 }
345 351
346 if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) && 352 if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
347 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && 353 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
348 atomic_read(&vha->loop_state) != LOOP_DOWN) { 354 atomic_read(&vha->loop_state) != LOOP_DOWN) {
349 355
350 ql_dbg(ql_dbg_dpc, vha, 0x4018, 356 if (!vha->relogin_jif ||
351 "Relogin needed scheduled.\n"); 357 time_after_eq(jiffies, vha->relogin_jif)) {
352 qla2x00_relogin(vha); 358 vha->relogin_jif = jiffies + HZ;
353 ql_dbg(ql_dbg_dpc, vha, 0x4019, 359 clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
354 "Relogin needed end.\n"); 360
361 ql_dbg(ql_dbg_dpc, vha, 0x4018,
362 "Relogin needed scheduled.\n");
363 qla24xx_post_relogin_work(vha);
364 }
355 } 365 }
356 366
357 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && 367 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
@@ -475,7 +485,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
475 "Couldn't allocate vp_id.\n"); 485 "Couldn't allocate vp_id.\n");
476 goto create_vhost_failed; 486 goto create_vhost_failed;
477 } 487 }
478 vha->mgmt_svr_loop_id = 10 + vha->vp_idx; 488 vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
479 489
480 vha->dpc_flags = 0L; 490 vha->dpc_flags = 0L;
481 491
@@ -569,14 +579,16 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
569int 579int
570qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) 580qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
571{ 581{
572 int ret = -1; 582 int ret = QLA_SUCCESS;
573 583
574 if (req) { 584 if (req && vha->flags.qpairs_req_created) {
575 req->options |= BIT_0; 585 req->options |= BIT_0;
576 ret = qla25xx_init_req_que(vha, req); 586 ret = qla25xx_init_req_que(vha, req);
577 } 587 if (ret != QLA_SUCCESS)
578 if (ret == QLA_SUCCESS) 588 return QLA_FUNCTION_FAILED;
589
579 qla25xx_free_req_que(vha, req); 590 qla25xx_free_req_que(vha, req);
591 }
580 592
581 return ret; 593 return ret;
582} 594}
@@ -584,14 +596,16 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
584int 596int
585qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 597qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
586{ 598{
587 int ret = -1; 599 int ret = QLA_SUCCESS;
588 600
589 if (rsp) { 601 if (rsp && vha->flags.qpairs_rsp_created) {
590 rsp->options |= BIT_0; 602 rsp->options |= BIT_0;
591 ret = qla25xx_init_rsp_que(vha, rsp); 603 ret = qla25xx_init_rsp_que(vha, rsp);
592 } 604 if (ret != QLA_SUCCESS)
593 if (ret == QLA_SUCCESS) 605 return QLA_FUNCTION_FAILED;
606
594 qla25xx_free_rsp_que(vha, rsp); 607 qla25xx_free_rsp_que(vha, rsp);
608 }
595 609
596 return ret; 610 return ret;
597} 611}
@@ -884,3 +898,79 @@ que_failed:
884failed: 898failed:
885 return 0; 899 return 0;
886} 900}
901
902static void qla_ctrlvp_sp_done(void *s, int res)
903{
904 struct srb *sp = s;
905
906 complete(&sp->comp);
907 /* don't free sp here. Let the caller do the free */
908}
909
910/**
911 * qla24xx_control_vp() - Enable a virtual port for given host
912 * @vha: adapter block pointer
913 * @cmd: command type to be sent for enable virtual port
914 *
915 * Return: qla2xxx local function return status code.
916 */
917int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
918{
919 int rval = QLA_MEMORY_ALLOC_FAILED;
920 struct qla_hw_data *ha = vha->hw;
921 int vp_index = vha->vp_idx;
922 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
923 srb_t *sp;
924
925 ql_dbg(ql_dbg_vport, vha, 0x10c1,
926 "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);
927
928 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
929 return QLA_PARAMETER_ERROR;
930
931 sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
932 if (!sp)
933 goto done;
934
935 sp->type = SRB_CTRL_VP;
936 sp->name = "ctrl_vp";
937 sp->done = qla_ctrlvp_sp_done;
938 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
939 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
940 sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
941 sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
942
943 rval = qla2x00_start_sp(sp);
944 if (rval != QLA_SUCCESS) {
945 ql_dbg(ql_dbg_async, vha, 0xffff,
946 "%s: %s Failed submission. %x.\n",
947 __func__, sp->name, rval);
948 goto done_free_sp;
949 }
950
951 ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
952 sp->name, sp->handle);
953
954 wait_for_completion(&sp->comp);
955 rval = sp->rc;
956 switch (rval) {
957 case QLA_FUNCTION_TIMEOUT:
958 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
959 __func__, sp->name, rval);
960 break;
961 case QLA_SUCCESS:
962 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
963 __func__, sp->name);
964 goto done_free_sp;
965 default:
966 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
967 __func__, sp->name, rval);
968 goto done_free_sp;
969 }
970done:
971 return rval;
972
973done_free_sp:
974 sp->free(sp);
975 return rval;
976}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 0aa9c38bf347..525ac35a757b 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -11,8 +11,6 @@
11#include "qla_def.h" 11#include "qla_def.h"
12#include "qla_gbl.h" 12#include "qla_gbl.h"
13 13
14#include <linux/delay.h>
15
16#define TIMEOUT_100_MS 100 14#define TIMEOUT_100_MS 100
17 15
18static const uint32_t qla8044_reg_tbl[] = { 16static const uint32_t qla8044_reg_tbl[] = {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 46f2d0cf7c0d..12ee6e02d146 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -277,6 +277,12 @@ MODULE_PARM_DESC(ql2xenablemsix,
277 " 1 -- enable MSI-X interrupt mechanism.\n" 277 " 1 -- enable MSI-X interrupt mechanism.\n"
278 " 2 -- enable MSI interrupt mechanism.\n"); 278 " 2 -- enable MSI interrupt mechanism.\n");
279 279
280int qla2xuseresexchforels;
281module_param(qla2xuseresexchforels, int, 0444);
282MODULE_PARM_DESC(qla2xuseresexchforels,
283 "Reserve 1/2 of emergency exchanges for ELS.\n"
284 " 0 (default): disabled");
285
280/* 286/*
281 * SCSI host template entry points 287 * SCSI host template entry points
282 */ 288 */
@@ -294,7 +300,6 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
294 300
295static void qla2x00_clear_drv_active(struct qla_hw_data *); 301static void qla2x00_clear_drv_active(struct qla_hw_data *);
296static void qla2x00_free_device(scsi_qla_host_t *); 302static void qla2x00_free_device(scsi_qla_host_t *);
297static void qla83xx_disable_laser(scsi_qla_host_t *vha);
298static int qla2xxx_map_queues(struct Scsi_Host *shost); 303static int qla2xxx_map_queues(struct Scsi_Host *shost);
299static void qla2x00_destroy_deferred_work(struct qla_hw_data *); 304static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
300 305
@@ -1705,93 +1710,103 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1705 return QLA_SUCCESS; 1710 return QLA_SUCCESS;
1706} 1711}
1707 1712
1708void 1713static void
1709qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) 1714__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
1710{ 1715{
1711 int que, cnt, status; 1716 int cnt, status;
1712 unsigned long flags; 1717 unsigned long flags;
1713 srb_t *sp; 1718 srb_t *sp;
1719 scsi_qla_host_t *vha = qp->vha;
1714 struct qla_hw_data *ha = vha->hw; 1720 struct qla_hw_data *ha = vha->hw;
1715 struct req_que *req; 1721 struct req_que *req;
1716 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1722 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1717 struct qla_tgt_cmd *cmd; 1723 struct qla_tgt_cmd *cmd;
1718 uint8_t trace = 0; 1724 uint8_t trace = 0;
1719 1725
1720 spin_lock_irqsave(&ha->hardware_lock, flags); 1726 spin_lock_irqsave(qp->qp_lock_ptr, flags);
1721 for (que = 0; que < ha->max_req_queues; que++) { 1727 req = qp->req;
1722 req = ha->req_q_map[que]; 1728 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
1723 if (!req) 1729 sp = req->outstanding_cmds[cnt];
1724 continue; 1730 if (sp) {
1725 if (!req->outstanding_cmds) 1731 req->outstanding_cmds[cnt] = NULL;
1726 continue; 1732 if (sp->cmd_type == TYPE_SRB) {
1727 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1733 if (sp->type == SRB_NVME_CMD ||
1728 sp = req->outstanding_cmds[cnt]; 1734 sp->type == SRB_NVME_LS) {
1729 if (sp) { 1735 sp_get(sp);
1730 req->outstanding_cmds[cnt] = NULL; 1736 spin_unlock_irqrestore(qp->qp_lock_ptr,
1731 if (sp->cmd_type == TYPE_SRB) { 1737 flags);
1732 if (sp->type == SRB_NVME_CMD || 1738 qla_nvme_abort(ha, sp);
1733 sp->type == SRB_NVME_LS) { 1739 spin_lock_irqsave(qp->qp_lock_ptr,
1734 sp_get(sp); 1740 flags);
1735 spin_unlock_irqrestore( 1741 } else if (GET_CMD_SP(sp) &&
1736 &ha->hardware_lock, flags); 1742 !ha->flags.eeh_busy &&
1737 qla_nvme_abort(ha, sp); 1743 (!test_bit(ABORT_ISP_ACTIVE,
1738 spin_lock_irqsave( 1744 &vha->dpc_flags)) &&
1739 &ha->hardware_lock, flags); 1745 (sp->type == SRB_SCSI_CMD)) {
1740 } else if (GET_CMD_SP(sp) && 1746 /*
1741 !ha->flags.eeh_busy && 1747 * Don't abort commands in
1742 (!test_bit(ABORT_ISP_ACTIVE, 1748 * adapter during EEH
1743 &vha->dpc_flags)) && 1749 * recovery as it's not
1744 (sp->type == SRB_SCSI_CMD)) { 1750 * accessible/responding.
1745 /* 1751 *
1746 * Don't abort commands in 1752 * Get a reference to the sp
1747 * adapter during EEH 1753 * and drop the lock. The
1748 * recovery as it's not 1754 * reference ensures this
1749 * accessible/responding. 1755 * sp->done() call and not the
1750 * 1756 * call in qla2xxx_eh_abort()
1751 * Get a reference to the sp 1757 * ends the SCSI command (with
1752 * and drop the lock. The 1758 * result 'res').
1753 * reference ensures this 1759 */
1754 * sp->done() call and not the 1760 sp_get(sp);
1755 * call in qla2xxx_eh_abort() 1761 spin_unlock_irqrestore(qp->qp_lock_ptr,
1756 * ends the SCSI command (with 1762 flags);
1757 * result 'res'). 1763 status = qla2xxx_eh_abort(
1758 */ 1764 GET_CMD_SP(sp));
1759 sp_get(sp); 1765 spin_lock_irqsave(qp->qp_lock_ptr,
1760 spin_unlock_irqrestore( 1766 flags);
1761 &ha->hardware_lock, flags); 1767 /*
1762 status = qla2xxx_eh_abort( 1768 * Get rid of extra reference
1763 GET_CMD_SP(sp)); 1769 * if immediate exit from
1764 spin_lock_irqsave( 1770 * ql2xxx_eh_abort
1765 &ha->hardware_lock, flags); 1771 */
1766 /* 1772 if (status == FAILED &&
1767 * Get rid of extra reference 1773 (qla2x00_isp_reg_stat(ha)))
1768 * if immediate exit from 1774 atomic_dec(
1769 * ql2xxx_eh_abort 1775 &sp->ref_count);
1770 */
1771 if (status == FAILED &&
1772 (qla2x00_isp_reg_stat(ha)))
1773 atomic_dec(
1774 &sp->ref_count);
1775 }
1776 sp->done(sp, res);
1777 } else {
1778 if (!vha->hw->tgt.tgt_ops || !tgt ||
1779 qla_ini_mode_enabled(vha)) {
1780 if (!trace)
1781 ql_dbg(ql_dbg_tgt_mgt,
1782 vha, 0xf003,
1783 "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
1784 vha->dpc_flags);
1785 continue;
1786 }
1787 cmd = (struct qla_tgt_cmd *)sp;
1788 qlt_abort_cmd_on_host_reset(cmd->vha,
1789 cmd);
1790 } 1776 }
1777 sp->done(sp, res);
1778 } else {
1779 if (!vha->hw->tgt.tgt_ops || !tgt ||
1780 qla_ini_mode_enabled(vha)) {
1781 if (!trace)
1782 ql_dbg(ql_dbg_tgt_mgt,
1783 vha, 0xf003,
1784 "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
1785 vha->dpc_flags);
1786 continue;
1787 }
1788 cmd = (struct qla_tgt_cmd *)sp;
1789 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
1791 } 1790 }
1792 } 1791 }
1793 } 1792 }
1794 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1793 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
1794}
1795
1796void
1797qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1798{
1799 int que;
1800 struct qla_hw_data *ha = vha->hw;
1801
1802 __qla2x00_abort_all_cmds(ha->base_qpair, res);
1803
1804 for (que = 0; que < ha->max_qpairs; que++) {
1805 if (!ha->queue_pair_map[que])
1806 continue;
1807
1808 __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res);
1809 }
1795} 1810}
1796 1811
1797static int 1812static int
@@ -2689,14 +2704,22 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
2689{ 2704{
2690 struct scsi_qla_host *vha = container_of(work, 2705 struct scsi_qla_host *vha = container_of(work,
2691 struct scsi_qla_host, iocb_work); 2706 struct scsi_qla_host, iocb_work);
2692 int cnt = 0; 2707 struct qla_hw_data *ha = vha->hw;
2708 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2709 int i = 20;
2710 unsigned long flags;
2693 2711
2694 while (!list_empty(&vha->work_list)) { 2712 if (test_bit(UNLOADING, &base_vha->dpc_flags))
2713 return;
2714
2715 while (!list_empty(&vha->work_list) && i > 0) {
2695 qla2x00_do_work(vha); 2716 qla2x00_do_work(vha);
2696 cnt++; 2717 i--;
2697 if (cnt > 10)
2698 break;
2699 } 2718 }
2719
2720 spin_lock_irqsave(&vha->work_lock, flags);
2721 clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags);
2722 spin_unlock_irqrestore(&vha->work_lock, flags);
2700} 2723}
2701 2724
2702/* 2725/*
@@ -2790,6 +2813,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2790 ha->init_cb_size = sizeof(init_cb_t); 2813 ha->init_cb_size = sizeof(init_cb_t);
2791 ha->link_data_rate = PORT_SPEED_UNKNOWN; 2814 ha->link_data_rate = PORT_SPEED_UNKNOWN;
2792 ha->optrom_size = OPTROM_SIZE_2300; 2815 ha->optrom_size = OPTROM_SIZE_2300;
2816 ha->max_exchg = FW_MAX_EXCHANGES_CNT;
2793 2817
2794 /* Assign ISP specific operations. */ 2818 /* Assign ISP specific operations. */
2795 if (IS_QLA2100(ha)) { 2819 if (IS_QLA2100(ha)) {
@@ -3011,9 +3035,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3011 base_vha = qla2x00_create_host(sht, ha); 3035 base_vha = qla2x00_create_host(sht, ha);
3012 if (!base_vha) { 3036 if (!base_vha) {
3013 ret = -ENOMEM; 3037 ret = -ENOMEM;
3014 qla2x00_mem_free(ha);
3015 qla2x00_free_req_que(ha, req);
3016 qla2x00_free_rsp_que(ha, rsp);
3017 goto probe_hw_failed; 3038 goto probe_hw_failed;
3018 } 3039 }
3019 3040
@@ -3023,7 +3044,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3023 host = base_vha->host; 3044 host = base_vha->host;
3024 base_vha->req = req; 3045 base_vha->req = req;
3025 if (IS_QLA2XXX_MIDTYPE(ha)) 3046 if (IS_QLA2XXX_MIDTYPE(ha))
3026 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; 3047 base_vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
3027 else 3048 else
3028 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + 3049 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
3029 base_vha->vp_idx; 3050 base_vha->vp_idx;
@@ -3074,7 +3095,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3074 /* Set up the irqs */ 3095 /* Set up the irqs */
3075 ret = qla2x00_request_irqs(ha, rsp); 3096 ret = qla2x00_request_irqs(ha, rsp);
3076 if (ret) 3097 if (ret)
3077 goto probe_init_failed; 3098 goto probe_hw_failed;
3078 3099
3079 /* Alloc arrays of request and response ring ptrs */ 3100 /* Alloc arrays of request and response ring ptrs */
3080 if (!qla2x00_alloc_queues(ha, req, rsp)) { 3101 if (!qla2x00_alloc_queues(ha, req, rsp)) {
@@ -3193,10 +3214,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3193 host->can_queue, base_vha->req, 3214 host->can_queue, base_vha->req,
3194 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 3215 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
3195 3216
3217 ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
3218
3196 if (ha->mqenable) { 3219 if (ha->mqenable) {
3197 bool mq = false; 3220 bool mq = false;
3198 bool startit = false; 3221 bool startit = false;
3199 ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
3200 3222
3201 if (QLA_TGT_MODE_ENABLED()) { 3223 if (QLA_TGT_MODE_ENABLED()) {
3202 mq = true; 3224 mq = true;
@@ -3390,6 +3412,9 @@ probe_failed:
3390 scsi_host_put(base_vha->host); 3412 scsi_host_put(base_vha->host);
3391 3413
3392probe_hw_failed: 3414probe_hw_failed:
3415 qla2x00_mem_free(ha);
3416 qla2x00_free_req_que(ha, req);
3417 qla2x00_free_rsp_que(ha, rsp);
3393 qla2x00_clear_drv_active(ha); 3418 qla2x00_clear_drv_active(ha);
3394 3419
3395iospace_config_failed: 3420iospace_config_failed:
@@ -3448,8 +3473,13 @@ qla2x00_shutdown(struct pci_dev *pdev)
3448 if (ha->eft) 3473 if (ha->eft)
3449 qla2x00_disable_eft_trace(vha); 3474 qla2x00_disable_eft_trace(vha);
3450 3475
3451 /* Stop currently executing firmware. */ 3476 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
3452 qla2x00_try_to_stop_firmware(vha); 3477 if (ha->flags.fw_started)
3478 qla2x00_abort_isp_cleanup(vha);
3479 } else {
3480 /* Stop currently executing firmware. */
3481 qla2x00_try_to_stop_firmware(vha);
3482 }
3453 3483
3454 /* Turn adapter off line */ 3484 /* Turn adapter off line */
3455 vha->flags.online = 0; 3485 vha->flags.online = 0;
@@ -3609,6 +3639,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
3609 dma_free_coherent(&ha->pdev->dev, 3639 dma_free_coherent(&ha->pdev->dev,
3610 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); 3640 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
3611 3641
3642 vfree(base_vha->scan.l);
3643
3612 if (IS_QLAFX00(ha)) 3644 if (IS_QLAFX00(ha))
3613 qlafx00_driver_shutdown(base_vha, 20); 3645 qlafx00_driver_shutdown(base_vha, 20);
3614 3646
@@ -3628,10 +3660,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
3628 3660
3629 qla84xx_put_chip(base_vha); 3661 qla84xx_put_chip(base_vha);
3630 3662
3631 /* Laser should be disabled only for ISP2031 */
3632 if (IS_QLA2031(ha))
3633 qla83xx_disable_laser(base_vha);
3634
3635 /* Disable timer */ 3663 /* Disable timer */
3636 if (base_vha->timer_active) 3664 if (base_vha->timer_active)
3637 qla2x00_stop_timer(base_vha); 3665 qla2x00_stop_timer(base_vha);
@@ -3692,8 +3720,16 @@ qla2x00_free_device(scsi_qla_host_t *vha)
3692 if (ha->eft) 3720 if (ha->eft)
3693 qla2x00_disable_eft_trace(vha); 3721 qla2x00_disable_eft_trace(vha);
3694 3722
3695 /* Stop currently executing firmware. */ 3723 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
3696 qla2x00_try_to_stop_firmware(vha); 3724 if (ha->flags.fw_started)
3725 qla2x00_abort_isp_cleanup(vha);
3726 } else {
3727 if (ha->flags.fw_started) {
3728 /* Stop currently executing firmware. */
3729 qla2x00_try_to_stop_firmware(vha);
3730 ha->flags.fw_started = 0;
3731 }
3732 }
3697 3733
3698 vha->flags.online = 0; 3734 vha->flags.online = 0;
3699 3735
@@ -3833,7 +3869,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
3833 3869
3834 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3870 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3835 fcport->scan_state = 0; 3871 fcport->scan_state = 0;
3836 qlt_schedule_sess_for_deletion_lock(fcport); 3872 qlt_schedule_sess_for_deletion(fcport);
3837 3873
3838 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx) 3874 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
3839 continue; 3875 continue;
@@ -4221,6 +4257,9 @@ qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
4221 u32 temp; 4257 u32 temp;
4222 *ret_cnt = FW_DEF_EXCHANGES_CNT; 4258 *ret_cnt = FW_DEF_EXCHANGES_CNT;
4223 4259
4260 if (max_cnt > vha->hw->max_exchg)
4261 max_cnt = vha->hw->max_exchg;
4262
4224 if (qla_ini_mode_enabled(vha)) { 4263 if (qla_ini_mode_enabled(vha)) {
4225 if (ql2xiniexchg > max_cnt) 4264 if (ql2xiniexchg > max_cnt)
4226 ql2xiniexchg = max_cnt; 4265 ql2xiniexchg = max_cnt;
@@ -4250,8 +4289,8 @@ int
4250qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha) 4289qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
4251{ 4290{
4252 int rval; 4291 int rval;
4253 u16 size, max_cnt; 4292 u16 size, max_cnt;
4254 u32 temp; 4293 u32 actual_cnt, totsz;
4255 struct qla_hw_data *ha = vha->hw; 4294 struct qla_hw_data *ha = vha->hw;
4256 4295
4257 if (!ha->flags.exchoffld_enabled) 4296 if (!ha->flags.exchoffld_enabled)
@@ -4268,16 +4307,19 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
4268 return rval; 4307 return rval;
4269 } 4308 }
4270 4309
4271 qla2x00_number_of_exch(vha, &temp, max_cnt); 4310 qla2x00_number_of_exch(vha, &actual_cnt, max_cnt);
4272 temp *= size; 4311 ql_log(ql_log_info, vha, 0xd014,
4312 "Actual exchange offload count: %d.\n", actual_cnt);
4313
4314 totsz = actual_cnt * size;
4273 4315
4274 if (temp != ha->exchoffld_size) { 4316 if (totsz != ha->exchoffld_size) {
4275 qla2x00_free_exchoffld_buffer(ha); 4317 qla2x00_free_exchoffld_buffer(ha);
4276 ha->exchoffld_size = temp; 4318 ha->exchoffld_size = totsz;
4277 4319
4278 ql_log(ql_log_info, vha, 0xd016, 4320 ql_log(ql_log_info, vha, 0xd016,
4279 "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n", 4321 "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n",
4280 max_cnt, size, temp); 4322 max_cnt, actual_cnt, size, totsz);
4281 4323
4282 ql_log(ql_log_info, vha, 0xd017, 4324 ql_log(ql_log_info, vha, 0xd017,
4283 "Exchange Buffers requested size = 0x%x\n", 4325 "Exchange Buffers requested size = 0x%x\n",
@@ -4288,7 +4330,21 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
4288 ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL); 4330 ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
4289 if (!ha->exchoffld_buf) { 4331 if (!ha->exchoffld_buf) {
4290 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013, 4332 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
4291 "Failed to allocate memory for exchoffld_buf_dma.\n"); 4333 "Failed to allocate memory for Exchange Offload.\n");
4334
4335 if (ha->max_exchg >
4336 (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) {
4337 ha->max_exchg -= REDUCE_EXCHANGES_CNT;
4338 } else if (ha->max_exchg >
4339 (FW_DEF_EXCHANGES_CNT + 512)) {
4340 ha->max_exchg -= 512;
4341 } else {
4342 ha->flags.exchoffld_enabled = 0;
4343 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
4344 "Disabling Exchange offload due to lack of memory\n");
4345 }
4346 ha->exchoffld_size = 0;
4347
4292 return -ENOMEM; 4348 return -ENOMEM;
4293 } 4349 }
4294 } 4350 }
@@ -4514,6 +4570,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4514 INIT_LIST_HEAD(&vha->qp_list); 4570 INIT_LIST_HEAD(&vha->qp_list);
4515 INIT_LIST_HEAD(&vha->gnl.fcports); 4571 INIT_LIST_HEAD(&vha->gnl.fcports);
4516 INIT_LIST_HEAD(&vha->nvme_rport_list); 4572 INIT_LIST_HEAD(&vha->nvme_rport_list);
4573 INIT_LIST_HEAD(&vha->gpnid_list);
4574 INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
4517 4575
4518 spin_lock_init(&vha->work_lock); 4576 spin_lock_init(&vha->work_lock);
4519 spin_lock_init(&vha->cmd_list_lock); 4577 spin_lock_init(&vha->cmd_list_lock);
@@ -4531,6 +4589,19 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4531 return NULL; 4589 return NULL;
4532 } 4590 }
4533 4591
4592 /* todo: what about ext login? */
4593 vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp);
4594 vha->scan.l = vmalloc(vha->scan.size);
4595 if (!vha->scan.l) {
4596 ql_log(ql_log_fatal, vha, 0xd04a,
4597 "Alloc failed for scan database.\n");
4598 dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
4599 vha->gnl.l, vha->gnl.ldma);
4600 scsi_remove_host(vha->host);
4601 return NULL;
4602 }
4603 INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
4604
4534 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 4605 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
4535 ql_dbg(ql_dbg_init, vha, 0x0041, 4606 ql_dbg(ql_dbg_init, vha, 0x0041,
4536 "Allocated the host=%p hw=%p vha=%p dev_name=%s", 4607 "Allocated the host=%p hw=%p vha=%p dev_name=%s",
@@ -4566,15 +4637,18 @@ int
4566qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 4637qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
4567{ 4638{
4568 unsigned long flags; 4639 unsigned long flags;
4640 bool q = false;
4569 4641
4570 spin_lock_irqsave(&vha->work_lock, flags); 4642 spin_lock_irqsave(&vha->work_lock, flags);
4571 list_add_tail(&e->list, &vha->work_list); 4643 list_add_tail(&e->list, &vha->work_list);
4644
4645 if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
4646 q = true;
4647
4572 spin_unlock_irqrestore(&vha->work_lock, flags); 4648 spin_unlock_irqrestore(&vha->work_lock, flags);
4573 4649
4574 if (QLA_EARLY_LINKUP(vha->hw)) 4650 if (q)
4575 schedule_work(&vha->iocb_work); 4651 queue_work(vha->hw->wq, &vha->iocb_work);
4576 else
4577 qla2xxx_wake_dpc(vha);
4578 4652
4579 return QLA_SUCCESS; 4653 return QLA_SUCCESS;
4580} 4654}
@@ -4623,6 +4697,7 @@ int qla2x00_post_async_##name##_work( \
4623 e->u.logio.data[0] = data[0]; \ 4697 e->u.logio.data[0] = data[0]; \
4624 e->u.logio.data[1] = data[1]; \ 4698 e->u.logio.data[1] = data[1]; \
4625 } \ 4699 } \
4700 fcport->flags |= FCF_ASYNC_ACTIVE; \
4626 return qla2x00_post_work(vha, e); \ 4701 return qla2x00_post_work(vha, e); \
4627} 4702}
4628 4703
@@ -4631,6 +4706,8 @@ qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
4631qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); 4706qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
4632qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); 4707qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
4633qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE); 4708qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
4709qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
4710qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
4634 4711
4635int 4712int
4636qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) 4713qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
@@ -4699,6 +4776,11 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4699 struct qlt_plogi_ack_t *pla = 4776 struct qlt_plogi_ack_t *pla =
4700 (struct qlt_plogi_ack_t *)e->u.new_sess.pla; 4777 (struct qlt_plogi_ack_t *)e->u.new_sess.pla;
4701 uint8_t free_fcport = 0; 4778 uint8_t free_fcport = 0;
4779 u64 wwn;
4780
4781 ql_dbg(ql_dbg_disc, vha, 0xffff,
4782 "%s %d %8phC enter\n",
4783 __func__, __LINE__, e->u.new_sess.port_name);
4702 4784
4703 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4785 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4704 fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1); 4786 fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
@@ -4706,6 +4788,9 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4706 fcport->d_id = e->u.new_sess.id; 4788 fcport->d_id = e->u.new_sess.id;
4707 if (pla) { 4789 if (pla) {
4708 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 4790 fcport->fw_login_state = DSC_LS_PLOGI_PEND;
4791 memcpy(fcport->node_name,
4792 pla->iocb.u.isp24.u.plogi.node_name,
4793 WWN_SIZE);
4709 qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN); 4794 qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN);
4710 /* we took an extra ref_count to prevent PLOGI ACK when 4795 /* we took an extra ref_count to prevent PLOGI ACK when
4711 * fcport/sess has not been created. 4796 * fcport/sess has not been created.
@@ -4717,9 +4802,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4717 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 4802 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
4718 if (fcport) { 4803 if (fcport) {
4719 fcport->d_id = e->u.new_sess.id; 4804 fcport->d_id = e->u.new_sess.id;
4720 fcport->scan_state = QLA_FCPORT_FOUND;
4721 fcport->flags |= FCF_FABRIC_DEVICE; 4805 fcport->flags |= FCF_FABRIC_DEVICE;
4722 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 4806 fcport->fw_login_state = DSC_LS_PLOGI_PEND;
4807 if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI)
4808 fcport->fc4_type = FC4_TYPE_FCP_SCSI;
4723 4809
4724 memcpy(fcport->port_name, e->u.new_sess.port_name, 4810 memcpy(fcport->port_name, e->u.new_sess.port_name,
4725 WWN_SIZE); 4811 WWN_SIZE);
@@ -4734,7 +4820,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4734 } 4820 }
4735 4821
4736 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4822 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4737 /* search again to make sure one else got ahead */ 4823 /* search again to make sure no one else got ahead */
4738 tfcp = qla2x00_find_fcport_by_wwpn(vha, 4824 tfcp = qla2x00_find_fcport_by_wwpn(vha,
4739 e->u.new_sess.port_name, 1); 4825 e->u.new_sess.port_name, 1);
4740 if (tfcp) { 4826 if (tfcp) {
@@ -4748,20 +4834,82 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4748 } else { 4834 } else {
4749 list_add_tail(&fcport->list, &vha->vp_fcports); 4835 list_add_tail(&fcport->list, &vha->vp_fcports);
4750 4836
4751 if (pla) { 4837 }
4752 qlt_plogi_ack_link(vha, pla, fcport, 4838 if (pla) {
4753 QLT_PLOGI_LINK_SAME_WWN); 4839 qlt_plogi_ack_link(vha, pla, fcport,
4754 pla->ref_count--; 4840 QLT_PLOGI_LINK_SAME_WWN);
4755 } 4841 pla->ref_count--;
4756 } 4842 }
4757 } 4843 }
4758 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4844 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4759 4845
4760 if (fcport) { 4846 if (fcport) {
4761 if (pla) 4847 if (N2N_TOPO(vha->hw))
4848 fcport->flags &= ~FCF_FABRIC_DEVICE;
4849
4850 fcport->id_changed = 1;
4851 fcport->scan_state = QLA_FCPORT_FOUND;
4852 memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
4853
4854 if (pla) {
4855 if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) {
4856 u16 wd3_lo;
4857
4858 fcport->fw_login_state = DSC_LS_PRLI_PEND;
4859 fcport->local = 0;
4860 fcport->loop_id =
4861 le16_to_cpu(
4862 pla->iocb.u.isp24.nport_handle);
4863 fcport->fw_login_state = DSC_LS_PRLI_PEND;
4864 wd3_lo =
4865 le16_to_cpu(
4866 pla->iocb.u.isp24.u.prli.wd3_lo);
4867
4868 if (wd3_lo & BIT_7)
4869 fcport->conf_compl_supported = 1;
4870
4871 if ((wd3_lo & BIT_4) == 0)
4872 fcport->port_type = FCT_INITIATOR;
4873 else
4874 fcport->port_type = FCT_TARGET;
4875 }
4762 qlt_plogi_ack_unref(vha, pla); 4876 qlt_plogi_ack_unref(vha, pla);
4763 else 4877 } else {
4764 qla24xx_async_gffid(vha, fcport); 4878 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4879 tfcp = qla2x00_find_fcport_by_nportid(vha,
4880 &e->u.new_sess.id, 1);
4881 if (tfcp && (tfcp != fcport)) {
4882 /*
4883 * We have a conflict fcport with same NportID.
4884 */
4885 ql_dbg(ql_dbg_disc, vha, 0xffff,
4886 "%s %8phC found conflict b4 add. DS %d LS %d\n",
4887 __func__, tfcp->port_name, tfcp->disc_state,
4888 tfcp->fw_login_state);
4889
4890 switch (tfcp->disc_state) {
4891 case DSC_DELETED:
4892 break;
4893 case DSC_DELETE_PEND:
4894 fcport->login_pause = 1;
4895 tfcp->conflict = fcport;
4896 break;
4897 default:
4898 fcport->login_pause = 1;
4899 tfcp->conflict = fcport;
4900 qlt_schedule_sess_for_deletion(tfcp);
4901 break;
4902 }
4903 }
4904 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4905
4906 wwn = wwn_to_u64(fcport->node_name);
4907
4908 if (!wwn)
4909 qla24xx_async_gnnid(vha, fcport);
4910 else
4911 qla24xx_async_gnl(vha, fcport);
4912 }
4765 } 4913 }
4766 4914
4767 if (free_fcport) { 4915 if (free_fcport) {
@@ -4771,6 +4919,20 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4771 } 4919 }
4772} 4920}
4773 4921
4922static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e)
4923{
4924 struct srb *sp = e->u.iosb.sp;
4925 int rval;
4926
4927 rval = qla2x00_start_sp(sp);
4928 if (rval != QLA_SUCCESS) {
4929 ql_dbg(ql_dbg_disc, vha, 0x2043,
4930 "%s: %s: Re-issue IOCB failed (%d).\n",
4931 __func__, sp->name, rval);
4932 qla24xx_sp_unmap(vha, sp);
4933 }
4934}
4935
4774void 4936void
4775qla2x00_do_work(struct scsi_qla_host *vha) 4937qla2x00_do_work(struct scsi_qla_host *vha)
4776{ 4938{
@@ -4824,8 +4986,11 @@ qla2x00_do_work(struct scsi_qla_host *vha)
4824 case QLA_EVT_GPNID: 4986 case QLA_EVT_GPNID:
4825 qla24xx_async_gpnid(vha, &e->u.gpnid.id); 4987 qla24xx_async_gpnid(vha, &e->u.gpnid.id);
4826 break; 4988 break;
4827 case QLA_EVT_GPNID_DONE: 4989 case QLA_EVT_UNMAP:
4828 qla24xx_async_gpnid_done(vha, e->u.iosb.sp); 4990 qla24xx_sp_unmap(vha, e->u.iosb.sp);
4991 break;
4992 case QLA_EVT_RELOGIN:
4993 qla2x00_relogin(vha);
4829 break; 4994 break;
4830 case QLA_EVT_NEW_SESS: 4995 case QLA_EVT_NEW_SESS:
4831 qla24xx_create_new_sess(vha, e); 4996 qla24xx_create_new_sess(vha, e);
@@ -4849,6 +5014,30 @@ qla2x00_do_work(struct scsi_qla_host *vha)
4849 case QLA_EVT_NACK: 5014 case QLA_EVT_NACK:
4850 qla24xx_do_nack_work(vha, e); 5015 qla24xx_do_nack_work(vha, e);
4851 break; 5016 break;
5017 case QLA_EVT_ASYNC_PRLO:
5018 qla2x00_async_prlo(vha, e->u.logio.fcport);
5019 break;
5020 case QLA_EVT_ASYNC_PRLO_DONE:
5021 qla2x00_async_prlo_done(vha, e->u.logio.fcport,
5022 e->u.logio.data);
5023 break;
5024 case QLA_EVT_GPNFT:
5025 qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type);
5026 break;
5027 case QLA_EVT_GPNFT_DONE:
5028 qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
5029 break;
5030 case QLA_EVT_GNNFT_DONE:
5031 qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
5032 break;
5033 case QLA_EVT_GNNID:
5034 qla24xx_async_gnnid(vha, e->u.fcport.fcport);
5035 break;
5036 case QLA_EVT_GFPNID:
5037 qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
5038 break;
5039 case QLA_EVT_SP_RETRY:
5040 qla_sp_retry(vha, e);
4852 } 5041 }
4853 if (e->flags & QLA_EVT_FLAG_FREE) 5042 if (e->flags & QLA_EVT_FLAG_FREE)
4854 kfree(e); 5043 kfree(e);
@@ -4858,6 +5047,20 @@ qla2x00_do_work(struct scsi_qla_host *vha)
4858 } 5047 }
4859} 5048}
4860 5049
5050int qla24xx_post_relogin_work(struct scsi_qla_host *vha)
5051{
5052 struct qla_work_evt *e;
5053
5054 e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN);
5055
5056 if (!e) {
5057 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5058 return QLA_FUNCTION_FAILED;
5059 }
5060
5061 return qla2x00_post_work(vha, e);
5062}
5063
4861/* Relogins all the fcports of a vport 5064/* Relogins all the fcports of a vport
4862 * Context: dpc thread 5065 * Context: dpc thread
4863 */ 5066 */
@@ -4868,14 +5071,14 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
4868 struct event_arg ea; 5071 struct event_arg ea;
4869 5072
4870 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5073 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4871 /* 5074 /*
4872 * If the port is not ONLINE then try to login 5075 * If the port is not ONLINE then try to login
4873 * to it if we haven't run out of retries. 5076 * to it if we haven't run out of retries.
4874 */ 5077 */
4875 if (atomic_read(&fcport->state) != FCS_ONLINE && 5078 if (atomic_read(&fcport->state) != FCS_ONLINE &&
4876 fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) { 5079 fcport->login_retry &&
4877 fcport->login_retry--; 5080 !(fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE))) {
4878 if (fcport->flags & FCF_FABRIC_DEVICE) { 5081 if (vha->hw->current_topology != ISP_CFG_NL) {
4879 ql_dbg(ql_dbg_disc, fcport->vha, 0x2108, 5082 ql_dbg(ql_dbg_disc, fcport->vha, 0x2108,
4880 "%s %8phC DS %d LS %d\n", __func__, 5083 "%s %8phC DS %d LS %d\n", __func__,
4881 fcport->port_name, fcport->disc_state, 5084 fcport->port_name, fcport->disc_state,
@@ -4884,7 +5087,8 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
4884 ea.event = FCME_RELOGIN; 5087 ea.event = FCME_RELOGIN;
4885 ea.fcport = fcport; 5088 ea.fcport = fcport;
4886 qla2x00_fcport_event_handler(vha, &ea); 5089 qla2x00_fcport_event_handler(vha, &ea);
4887 } else { 5090 } else if (vha->hw->current_topology == ISP_CFG_NL) {
5091 fcport->login_retry--;
4888 status = qla2x00_local_device_login(vha, 5092 status = qla2x00_local_device_login(vha,
4889 fcport); 5093 fcport);
4890 if (status == QLA_SUCCESS) { 5094 if (status == QLA_SUCCESS) {
@@ -4912,6 +5116,9 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
4912 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 5116 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
4913 break; 5117 break;
4914 } 5118 }
5119
5120 ql_dbg(ql_dbg_disc, vha, 0x400e,
5121 "Relogin end.\n");
4915} 5122}
4916 5123
4917/* Schedule work on any of the dpc-workqueues */ 5124/* Schedule work on any of the dpc-workqueues */
@@ -5687,8 +5894,6 @@ qla2x00_do_dpc(void *data)
5687 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 5894 if (test_bit(UNLOADING, &base_vha->dpc_flags))
5688 break; 5895 break;
5689 5896
5690 qla2x00_do_work(base_vha);
5691
5692 if (IS_P3P_TYPE(ha)) { 5897 if (IS_P3P_TYPE(ha)) {
5693 if (IS_QLA8044(ha)) { 5898 if (IS_QLA8044(ha)) {
5694 if (test_and_clear_bit(ISP_UNRECOVERABLE, 5899 if (test_and_clear_bit(ISP_UNRECOVERABLE,
@@ -5867,16 +6072,19 @@ qla2x00_do_dpc(void *data)
5867 } 6072 }
5868 6073
5869 /* Retry each device up to login retry count */ 6074 /* Retry each device up to login retry count */
5870 if ((test_and_clear_bit(RELOGIN_NEEDED, 6075 if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) &&
5871 &base_vha->dpc_flags)) &&
5872 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && 6076 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
5873 atomic_read(&base_vha->loop_state) != LOOP_DOWN) { 6077 atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
5874 6078
5875 ql_dbg(ql_dbg_dpc, base_vha, 0x400d, 6079 if (!base_vha->relogin_jif ||
5876 "Relogin scheduled.\n"); 6080 time_after_eq(jiffies, base_vha->relogin_jif)) {
5877 qla2x00_relogin(base_vha); 6081 base_vha->relogin_jif = jiffies + HZ;
5878 ql_dbg(ql_dbg_dpc, base_vha, 0x400e, 6082 clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags);
5879 "Relogin end.\n"); 6083
6084 ql_dbg(ql_dbg_disc, base_vha, 0x400d,
6085 "Relogin scheduled.\n");
6086 qla24xx_post_relogin_work(base_vha);
6087 }
5880 } 6088 }
5881loop_resync_check: 6089loop_resync_check:
5882 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, 6090 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
@@ -6135,8 +6343,17 @@ qla2x00_timer(struct timer_list *t)
6135 } 6343 }
6136 6344
6137 /* Process any deferred work. */ 6345 /* Process any deferred work. */
6138 if (!list_empty(&vha->work_list)) 6346 if (!list_empty(&vha->work_list)) {
6139 start_dpc++; 6347 unsigned long flags;
6348 bool q = false;
6349
6350 spin_lock_irqsave(&vha->work_lock, flags);
6351 if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
6352 q = true;
6353 spin_unlock_irqrestore(&vha->work_lock, flags);
6354 if (q)
6355 queue_work(vha->hw->wq, &vha->iocb_work);
6356 }
6140 6357
6141 /* 6358 /*
6142 * FC-NVME 6359 * FC-NVME
@@ -6580,37 +6797,16 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
6580 ha->flags.eeh_busy = 0; 6797 ha->flags.eeh_busy = 0;
6581} 6798}
6582 6799
6583static void
6584qla83xx_disable_laser(scsi_qla_host_t *vha)
6585{
6586 uint32_t reg, data, fn;
6587 struct qla_hw_data *ha = vha->hw;
6588 struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24;
6589
6590 /* pci func #/port # */
6591 ql_dbg(ql_dbg_init, vha, 0x004b,
6592 "Disabling Laser for hba: %p\n", vha);
6593
6594 fn = (RD_REG_DWORD(&isp_reg->ctrl_status) &
6595 (BIT_15|BIT_14|BIT_13|BIT_12));
6596
6597 fn = (fn >> 12);
6598
6599 if (fn & 1)
6600 reg = PORT_1_2031;
6601 else
6602 reg = PORT_0_2031;
6603
6604 data = LASER_OFF_2031;
6605
6606 qla83xx_wr_reg(vha, reg, data);
6607}
6608
6609static int qla2xxx_map_queues(struct Scsi_Host *shost) 6800static int qla2xxx_map_queues(struct Scsi_Host *shost)
6610{ 6801{
6802 int rc;
6611 scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; 6803 scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
6612 6804
6613 return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev); 6805 if (USER_CTRL_IRQ(vha->hw))
6806 rc = blk_mq_map_queues(&shost->tag_set);
6807 else
6808 rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
6809 return rc;
6614} 6810}
6615 6811
6616static const struct pci_error_handlers qla2xxx_err_handler = { 6812static const struct pci_error_handlers qla2xxx_err_handler = {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index b4336e0cd85f..d2db86ea06b2 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2461,6 +2461,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2461 sec_mask = 0x1e000; 2461 sec_mask = 0x1e000;
2462 break; 2462 break;
2463 } 2463 }
2464 /* fall through */
2464 default: 2465 default:
2465 /* Default to 16 kb sector size. */ 2466 /* Default to 16 kb sector size. */
2466 rest_addr = 0x3fff; 2467 rest_addr = 0x3fff;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 18069edd4773..fc89af8fe256 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -75,7 +75,8 @@ MODULE_PARM_DESC(ql2xuctrlirq,
75 75
76int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 76int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
77 77
78static int temp_sam_status = SAM_STAT_BUSY; 78static int qla_sam_status = SAM_STAT_BUSY;
79static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
79 80
80/* 81/*
81 * From scsi/fc/fc_fcp.h 82 * From scsi/fc/fc_fcp.h
@@ -208,7 +209,7 @@ struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
208 209
209 host = btree_lookup32(&vha->hw->tgt.host_map, key); 210 host = btree_lookup32(&vha->hw->tgt.host_map, key);
210 if (!host) 211 if (!host)
211 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, 212 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
212 "Unable to find host %06x\n", key); 213 "Unable to find host %06x\n", key);
213 214
214 return host; 215 return host;
@@ -309,17 +310,17 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
309 310
310 host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); 311 host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
311 if (host != NULL) { 312 if (host != NULL) {
312 ql_dbg(ql_dbg_async, vha, 0x502f, 313 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
313 "Requeuing unknown ATIO_TYPE7 %p\n", u); 314 "Requeuing unknown ATIO_TYPE7 %p\n", u);
314 qlt_24xx_atio_pkt(host, &u->atio, ha_locked); 315 qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
315 } else if (tgt->tgt_stop) { 316 } else if (tgt->tgt_stop) {
316 ql_dbg(ql_dbg_async, vha, 0x503a, 317 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
317 "Freeing unknown %s %p, because tgt is being stopped\n", 318 "Freeing unknown %s %p, because tgt is being stopped\n",
318 "ATIO_TYPE7", u); 319 "ATIO_TYPE7", u);
319 qlt_send_term_exchange(vha->hw->base_qpair, NULL, 320 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
320 &u->atio, ha_locked, 0); 321 &u->atio, ha_locked, 0);
321 } else { 322 } else {
322 ql_dbg(ql_dbg_async, vha, 0x503d, 323 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
323 "Reschedule u %p, vha %p, host %p\n", u, vha, host); 324 "Reschedule u %p, vha %p, host %p\n", u, vha, host);
324 if (!queued) { 325 if (!queued) {
325 queued = 1; 326 queued = 1;
@@ -450,6 +451,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
450 ql_dbg(ql_dbg_tgt, vha, 0xe073, 451 ql_dbg(ql_dbg_tgt, vha, 0xe073,
451 "qla_target(%d):%s: CRC2 Response pkt\n", 452 "qla_target(%d):%s: CRC2 Response pkt\n",
452 vha->vp_idx, __func__); 453 vha->vp_idx, __func__);
454 /* fall through */
453 case CTIO_TYPE7: 455 case CTIO_TYPE7:
454 { 456 {
455 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 457 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
@@ -606,7 +608,7 @@ void qla2x00_async_nack_sp_done(void *s, int res)
606 __func__, __LINE__, 608 __func__, __LINE__,
607 sp->fcport->port_name, 609 sp->fcport->port_name,
608 vha->fcport_count); 610 vha->fcport_count);
609 611 sp->fcport->disc_state = DSC_UPD_FCPORT;
610 qla24xx_post_upd_fcport_work(vha, sp->fcport); 612 qla24xx_post_upd_fcport_work(vha, sp->fcport);
611 } else { 613 } else {
612 ql_dbg(ql_dbg_disc, vha, 0x20f5, 614 ql_dbg(ql_dbg_disc, vha, 0x20f5,
@@ -665,7 +667,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
665 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); 667 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
666 668
667 sp->u.iocb_cmd.u.nack.ntfy = ntfy; 669 sp->u.iocb_cmd.u.nack.ntfy = ntfy;
668 670 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
669 sp->done = qla2x00_async_nack_sp_done; 671 sp->done = qla2x00_async_nack_sp_done;
670 672
671 rval = qla2x00_start_sp(sp); 673 rval = qla2x00_start_sp(sp);
@@ -861,7 +863,10 @@ void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
861 863
862 fcport->loop_id = loop_id; 864 fcport->loop_id = loop_id;
863 fcport->d_id = port_id; 865 fcport->d_id = port_id;
864 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI); 866 if (iocb->u.isp24.status_subcode == ELS_PLOGI)
867 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
868 else
869 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
865 870
866 list_for_each_entry(fcport, &vha->vp_fcports, list) { 871 list_for_each_entry(fcport, &vha->vp_fcports, list) {
867 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla) 872 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
@@ -890,6 +895,17 @@ qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
890 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 895 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
891 pla->ref_count, pla, link); 896 pla->ref_count, pla, link);
892 897
898 if (link == QLT_PLOGI_LINK_CONFLICT) {
899 switch (sess->disc_state) {
900 case DSC_DELETED:
901 case DSC_DELETE_PEND:
902 pla->ref_count--;
903 return;
904 default:
905 break;
906 }
907 }
908
893 if (sess->plogi_link[link]) 909 if (sess->plogi_link[link])
894 qlt_plogi_ack_unref(vha, sess->plogi_link[link]); 910 qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
895 911
@@ -954,8 +970,9 @@ static void qlt_free_session_done(struct work_struct *work)
954 struct qla_hw_data *ha = vha->hw; 970 struct qla_hw_data *ha = vha->hw;
955 unsigned long flags; 971 unsigned long flags;
956 bool logout_started = false; 972 bool logout_started = false;
957 struct event_arg ea;
958 scsi_qla_host_t *base_vha; 973 scsi_qla_host_t *base_vha;
974 struct qlt_plogi_ack_t *own =
975 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
959 976
960 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, 977 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
961 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" 978 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
@@ -971,19 +988,35 @@ static void qlt_free_session_done(struct work_struct *work)
971 988
972 logo.id = sess->d_id; 989 logo.id = sess->d_id;
973 logo.cmd_count = 0; 990 logo.cmd_count = 0;
991 sess->send_els_logo = 0;
974 qlt_send_first_logo(vha, &logo); 992 qlt_send_first_logo(vha, &logo);
975 } 993 }
976 994
977 if (sess->logout_on_delete) { 995 if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
978 int rc; 996 int rc;
979 997
980 rc = qla2x00_post_async_logout_work(vha, sess, NULL); 998 if (!own ||
981 if (rc != QLA_SUCCESS) 999 (own &&
982 ql_log(ql_log_warn, vha, 0xf085, 1000 (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
983 "Schedule logo failed sess %p rc %d\n", 1001 rc = qla2x00_post_async_logout_work(vha, sess,
984 sess, rc); 1002 NULL);
985 else 1003 if (rc != QLA_SUCCESS)
986 logout_started = true; 1004 ql_log(ql_log_warn, vha, 0xf085,
1005 "Schedule logo failed sess %p rc %d\n",
1006 sess, rc);
1007 else
1008 logout_started = true;
1009 } else if (own && (own->iocb.u.isp24.status_subcode ==
1010 ELS_PRLI) && ha->flags.rida_fmt2) {
1011 rc = qla2x00_post_async_prlo_work(vha, sess,
1012 NULL);
1013 if (rc != QLA_SUCCESS)
1014 ql_log(ql_log_warn, vha, 0xf085,
1015 "Schedule PRLO failed sess %p rc %d\n",
1016 sess, rc);
1017 else
1018 logout_started = true;
1019 }
987 } 1020 }
988 } 1021 }
989 1022
@@ -1007,7 +1040,7 @@ static void qlt_free_session_done(struct work_struct *work)
1007 } 1040 }
1008 1041
1009 ql_dbg(ql_dbg_disc, vha, 0xf087, 1042 ql_dbg(ql_dbg_disc, vha, 0xf087,
1010 "%s: sess %p logout completed\n",__func__, sess); 1043 "%s: sess %p logout completed\n", __func__, sess);
1011 } 1044 }
1012 1045
1013 if (sess->logo_ack_needed) { 1046 if (sess->logo_ack_needed) {
@@ -1033,8 +1066,7 @@ static void qlt_free_session_done(struct work_struct *work)
1033 sess->login_succ = 0; 1066 sess->login_succ = 0;
1034 } 1067 }
1035 1068
1036 if (sess->chip_reset != ha->base_qpair->chip_reset) 1069 qla2x00_clear_loop_id(sess);
1037 qla2x00_clear_loop_id(sess);
1038 1070
1039 if (sess->conflict) { 1071 if (sess->conflict) {
1040 sess->conflict->login_pause = 0; 1072 sess->conflict->login_pause = 0;
@@ -1044,8 +1076,6 @@ static void qlt_free_session_done(struct work_struct *work)
1044 } 1076 }
1045 1077
1046 { 1078 {
1047 struct qlt_plogi_ack_t *own =
1048 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
1049 struct qlt_plogi_ack_t *con = 1079 struct qlt_plogi_ack_t *con =
1050 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; 1080 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
1051 struct imm_ntfy_from_isp *iocb; 1081 struct imm_ntfy_from_isp *iocb;
@@ -1076,6 +1106,7 @@ static void qlt_free_session_done(struct work_struct *work)
1076 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 1106 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
1077 } 1107 }
1078 } 1108 }
1109
1079 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1110 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1080 1111
1081 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 1112 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
@@ -1089,14 +1120,24 @@ static void qlt_free_session_done(struct work_struct *work)
1089 wake_up_all(&vha->fcport_waitQ); 1120 wake_up_all(&vha->fcport_waitQ);
1090 1121
1091 base_vha = pci_get_drvdata(ha->pdev); 1122 base_vha = pci_get_drvdata(ha->pdev);
1123
1124 sess->free_pending = 0;
1125
1092 if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags)) 1126 if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
1093 return; 1127 return;
1094 1128
1095 if (!tgt || !tgt->tgt_stop) { 1129 if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
1096 memset(&ea, 0, sizeof(ea)); 1130 switch (vha->host->active_mode) {
1097 ea.event = FCME_DELETE_DONE; 1131 case MODE_INITIATOR:
1098 ea.fcport = sess; 1132 case MODE_DUAL:
1099 qla2x00_fcport_event_handler(vha, &ea); 1133 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1134 qla2xxx_wake_dpc(vha);
1135 break;
1136 case MODE_TARGET:
1137 default:
1138 /* no-op */
1139 break;
1140 }
1100 } 1141 }
1101} 1142}
1102 1143
@@ -1104,11 +1145,20 @@ static void qlt_free_session_done(struct work_struct *work)
1104void qlt_unreg_sess(struct fc_port *sess) 1145void qlt_unreg_sess(struct fc_port *sess)
1105{ 1146{
1106 struct scsi_qla_host *vha = sess->vha; 1147 struct scsi_qla_host *vha = sess->vha;
1148 unsigned long flags;
1107 1149
1108 ql_dbg(ql_dbg_disc, sess->vha, 0x210a, 1150 ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1109 "%s sess %p for deletion %8phC\n", 1151 "%s sess %p for deletion %8phC\n",
1110 __func__, sess, sess->port_name); 1152 __func__, sess, sess->port_name);
1111 1153
1154 spin_lock_irqsave(&sess->vha->work_lock, flags);
1155 if (sess->free_pending) {
1156 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1157 return;
1158 }
1159 sess->free_pending = 1;
1160 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1161
1112 if (sess->se_sess) 1162 if (sess->se_sess)
1113 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 1163 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1114 1164
@@ -1175,10 +1225,10 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
1175} 1225}
1176 1226
1177/* ha->tgt.sess_lock supposed to be held on entry */ 1227/* ha->tgt.sess_lock supposed to be held on entry */
1178void qlt_schedule_sess_for_deletion(struct fc_port *sess, 1228void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1179 bool immediate)
1180{ 1229{
1181 struct qla_tgt *tgt = sess->tgt; 1230 struct qla_tgt *tgt = sess->tgt;
1231 unsigned long flags;
1182 1232
1183 if (sess->disc_state == DSC_DELETE_PEND) 1233 if (sess->disc_state == DSC_DELETE_PEND)
1184 return; 1234 return;
@@ -1194,27 +1244,28 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess,
1194 return; 1244 return;
1195 } 1245 }
1196 1246
1197 sess->disc_state = DSC_DELETE_PEND;
1198
1199 if (sess->deleted == QLA_SESS_DELETED) 1247 if (sess->deleted == QLA_SESS_DELETED)
1200 sess->logout_on_delete = 0; 1248 sess->logout_on_delete = 0;
1201 1249
1250 spin_lock_irqsave(&sess->vha->work_lock, flags);
1251 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1252 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1253 return;
1254 }
1202 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1255 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1256 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1257
1258 sess->disc_state = DSC_DELETE_PEND;
1259
1203 qla24xx_chk_fcp_state(sess); 1260 qla24xx_chk_fcp_state(sess);
1204 1261
1205 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 1262 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
1206 "Scheduling sess %p for deletion\n", sess); 1263 "Scheduling sess %p for deletion\n", sess);
1207 1264
1208 schedule_work(&sess->del_work); 1265 /* use cancel to push work element through before re-queue */
1209} 1266 cancel_work_sync(&sess->del_work);
1210 1267 INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
1211void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess) 1268 queue_work(sess->vha->hw->wq, &sess->del_work);
1212{
1213 unsigned long flags;
1214 struct qla_hw_data *ha = sess->vha->hw;
1215 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1216 qlt_schedule_sess_for_deletion(sess, 1);
1217 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1218} 1269}
1219 1270
1220/* ha->tgt.sess_lock supposed to be held on entry */ 1271/* ha->tgt.sess_lock supposed to be held on entry */
@@ -1225,7 +1276,7 @@ static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1225 1276
1226 list_for_each_entry(sess, &vha->vp_fcports, list) { 1277 list_for_each_entry(sess, &vha->vp_fcports, list) {
1227 if (sess->se_sess) 1278 if (sess->se_sess)
1228 qlt_schedule_sess_for_deletion(sess, 1); 1279 qlt_schedule_sess_for_deletion(sess);
1229 } 1280 }
1230 1281
1231 /* At this point tgt could be already dead */ 1282 /* At this point tgt could be already dead */
@@ -1400,7 +1451,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1400 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 1451 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1401 1452
1402 sess->local = 1; 1453 sess->local = 1;
1403 qlt_schedule_sess_for_deletion(sess, false); 1454 qlt_schedule_sess_for_deletion(sess);
1404 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1455 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1405} 1456}
1406 1457
@@ -1560,8 +1611,11 @@ static void qlt_release(struct qla_tgt *tgt)
1560 1611
1561 btree_destroy64(&tgt->lun_qpair_map); 1612 btree_destroy64(&tgt->lun_qpair_map);
1562 1613
1563 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->remove_target) 1614 if (vha->vp_idx)
1564 ha->tgt.tgt_ops->remove_target(vha); 1615 if (ha->tgt.tgt_ops &&
1616 ha->tgt.tgt_ops->remove_target &&
1617 vha->vha_tgt.target_lport_ptr)
1618 ha->tgt.tgt_ops->remove_target(vha);
1565 1619
1566 vha->vha_tgt.qla_tgt = NULL; 1620 vha->vha_tgt.qla_tgt = NULL;
1567 1621
@@ -1976,15 +2030,10 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1976 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 2030 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
1977 "qla_target(%d): task abort for non-existant session\n", 2031 "qla_target(%d): task abort for non-existant session\n",
1978 vha->vp_idx); 2032 vha->vp_idx);
1979 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
1980 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1981
1982 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 2033 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1983 2034
1984 if (rc != 0) { 2035 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
1985 qlt_24xx_send_abts_resp(ha->base_qpair, abts, 2036 false);
1986 FCP_TMF_REJECTED, false);
1987 }
1988 return; 2037 return;
1989 } 2038 }
1990 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 2039 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
@@ -2174,7 +2223,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2174 "TM response logo %phC status %#x state %#x", 2223 "TM response logo %phC status %#x state %#x",
2175 mcmd->sess->port_name, mcmd->fc_tm_rsp, 2224 mcmd->sess->port_name, mcmd->fc_tm_rsp,
2176 mcmd->flags); 2225 mcmd->flags);
2177 qlt_schedule_sess_for_deletion_lock(mcmd->sess); 2226 qlt_schedule_sess_for_deletion(mcmd->sess);
2178 } else { 2227 } else {
2179 qlt_send_notify_ack(vha->hw->base_qpair, 2228 qlt_send_notify_ack(vha->hw->base_qpair,
2180 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); 2229 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
@@ -3708,7 +3757,7 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
3708 term = 1; 3757 term = 1;
3709 3758
3710 if (term) 3759 if (term)
3711 qlt_term_ctio_exchange(qpair, ctio, cmd, status); 3760 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
3712 3761
3713 return term; 3762 return term;
3714} 3763}
@@ -3869,7 +3918,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
3869 "%s %d %8phC post del sess\n", 3918 "%s %d %8phC post del sess\n",
3870 __func__, __LINE__, cmd->sess->port_name); 3919 __func__, __LINE__, cmd->sess->port_name);
3871 3920
3872 qlt_schedule_sess_for_deletion_lock(cmd->sess); 3921 qlt_schedule_sess_for_deletion(cmd->sess);
3873 } 3922 }
3874 break; 3923 break;
3875 } 3924 }
@@ -4204,76 +4253,6 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4204 return cmd; 4253 return cmd;
4205} 4254}
4206 4255
4207static void qlt_create_sess_from_atio(struct work_struct *work)
4208{
4209 struct qla_tgt_sess_op *op = container_of(work,
4210 struct qla_tgt_sess_op, work);
4211 scsi_qla_host_t *vha = op->vha;
4212 struct qla_hw_data *ha = vha->hw;
4213 struct fc_port *sess;
4214 struct qla_tgt_cmd *cmd;
4215 unsigned long flags;
4216 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
4217
4218 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4219 list_del(&op->cmd_list);
4220 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4221
4222 if (op->aborted) {
4223 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
4224 "sess_op with tag %u is aborted\n",
4225 op->atio.u.isp24.exchange_addr);
4226 goto out_term;
4227 }
4228
4229 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
4230 "qla_target(%d): Unable to find wwn login"
4231 " (s_id %x:%x:%x), trying to create it manually\n",
4232 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
4233
4234 if (op->atio.u.raw.entry_count > 1) {
4235 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
4236 "Dropping multy entry atio %p\n", &op->atio);
4237 goto out_term;
4238 }
4239
4240 sess = qlt_make_local_sess(vha, s_id);
4241 /* sess has an extra creation ref. */
4242
4243 if (!sess)
4244 goto out_term;
4245 /*
4246 * Now obtain a pre-allocated session tag using the original op->atio
4247 * packet header, and dispatch into __qlt_do_work() using the existing
4248 * process context.
4249 */
4250 cmd = qlt_get_tag(vha, sess, &op->atio);
4251 if (!cmd) {
4252 struct qla_qpair *qpair = ha->base_qpair;
4253
4254 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
4255 qlt_send_busy(qpair, &op->atio, SAM_STAT_BUSY);
4256 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
4257
4258 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4259 ha->tgt.tgt_ops->put_sess(sess);
4260 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4261 kfree(op);
4262 return;
4263 }
4264
4265 /*
4266 * __qlt_do_work() will call qlt_put_sess() to release
4267 * the extra reference taken above by qlt_make_local_sess()
4268 */
4269 __qlt_do_work(cmd);
4270 kfree(op);
4271 return;
4272out_term:
4273 qlt_send_term_exchange(vha->hw->base_qpair, NULL, &op->atio, 0, 0);
4274 kfree(op);
4275}
4276
4277/* ha->hardware_lock supposed to be held on entry */ 4256/* ha->hardware_lock supposed to be held on entry */
4278static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 4257static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4279 struct atio_from_isp *atio) 4258 struct atio_from_isp *atio)
@@ -4283,31 +4262,23 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4283 struct fc_port *sess; 4262 struct fc_port *sess;
4284 struct qla_tgt_cmd *cmd; 4263 struct qla_tgt_cmd *cmd;
4285 unsigned long flags; 4264 unsigned long flags;
4265 port_id_t id;
4286 4266
4287 if (unlikely(tgt->tgt_stop)) { 4267 if (unlikely(tgt->tgt_stop)) {
4288 ql_dbg(ql_dbg_io, vha, 0x3061, 4268 ql_dbg(ql_dbg_io, vha, 0x3061,
4289 "New command while device %p is shutting down\n", tgt); 4269 "New command while device %p is shutting down\n", tgt);
4290 return -EFAULT; 4270 return -ENODEV;
4291 } 4271 }
4292 4272
4293 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); 4273 id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2];
4294 if (unlikely(!sess)) { 4274 id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
4295 struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op), 4275 id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
4296 GFP_ATOMIC); 4276 if (IS_SW_RESV_ADDR(id))
4297 if (!op) 4277 return -EBUSY;
4298 return -ENOMEM;
4299
4300 memcpy(&op->atio, atio, sizeof(*atio));
4301 op->vha = vha;
4302
4303 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4304 list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
4305 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4306 4278
4307 INIT_WORK(&op->work, qlt_create_sess_from_atio); 4279 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
4308 queue_work(qla_tgt_wq, &op->work); 4280 if (unlikely(!sess))
4309 return 0; 4281 return -EFAULT;
4310 }
4311 4282
4312 /* Another WWN used to have our s_id. Our PLOGI scheduled its 4283 /* Another WWN used to have our s_id. Our PLOGI scheduled its
4313 * session deletion, but it's still in sess_del_work wq */ 4284 * session deletion, but it's still in sess_del_work wq */
@@ -4336,7 +4307,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4336 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4307 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4337 ha->tgt.tgt_ops->put_sess(sess); 4308 ha->tgt.tgt_ops->put_sess(sess);
4338 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4309 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4339 return -ENOMEM; 4310 return -EBUSY;
4340 } 4311 }
4341 4312
4342 cmd->cmd_in_wq = 1; 4313 cmd->cmd_in_wq = 1;
@@ -4417,14 +4388,11 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4417{ 4388{
4418 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4389 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4419 struct qla_hw_data *ha = vha->hw; 4390 struct qla_hw_data *ha = vha->hw;
4420 struct qla_tgt *tgt;
4421 struct fc_port *sess; 4391 struct fc_port *sess;
4422 u64 unpacked_lun; 4392 u64 unpacked_lun;
4423 int fn; 4393 int fn;
4424 unsigned long flags; 4394 unsigned long flags;
4425 4395
4426 tgt = vha->vha_tgt.qla_tgt;
4427
4428 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4396 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4429 4397
4430 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4398 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4435,15 +4403,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4435 unpacked_lun = 4403 unpacked_lun =
4436 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 4404 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4437 4405
4438 if (!sess) { 4406 if (sess == NULL || sess->deleted)
4439 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
4440 "qla_target(%d): task mgmt fn 0x%x for "
4441 "non-existant session\n", vha->vp_idx, fn);
4442 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
4443 sizeof(struct atio_from_isp));
4444 }
4445
4446 if (sess->deleted)
4447 return -EFAULT; 4407 return -EFAULT;
4448 4408
4449 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 4409 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
@@ -4574,7 +4534,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4574 * might have cleared it when requested this session 4534 * might have cleared it when requested this session
4575 * deletion, so don't touch it 4535 * deletion, so don't touch it
4576 */ 4536 */
4577 qlt_schedule_sess_for_deletion(other_sess, true); 4537 qlt_schedule_sess_for_deletion(other_sess);
4578 } else { 4538 } else {
4579 /* 4539 /*
4580 * Another wwn used to have our s_id/loop_id 4540 * Another wwn used to have our s_id/loop_id
@@ -4584,11 +4544,10 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4584 "Invalidating sess %p loop_id %d wwn %llx.\n", 4544 "Invalidating sess %p loop_id %d wwn %llx.\n",
4585 other_sess, other_sess->loop_id, other_wwn); 4545 other_sess, other_sess->loop_id, other_wwn);
4586 4546
4587
4588 other_sess->keep_nport_handle = 1; 4547 other_sess->keep_nport_handle = 1;
4589 *conflict_sess = other_sess; 4548 if (other_sess->disc_state != DSC_DELETED)
4590 qlt_schedule_sess_for_deletion(other_sess, 4549 *conflict_sess = other_sess;
4591 true); 4550 qlt_schedule_sess_for_deletion(other_sess);
4592 } 4551 }
4593 continue; 4552 continue;
4594 } 4553 }
@@ -4602,7 +4561,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4602 4561
4603 /* Same loop_id but different s_id 4562 /* Same loop_id but different s_id
4604 * Ok to kill and logout */ 4563 * Ok to kill and logout */
4605 qlt_schedule_sess_for_deletion(other_sess, true); 4564 qlt_schedule_sess_for_deletion(other_sess);
4606 } 4565 }
4607 } 4566 }
4608 4567
@@ -4652,6 +4611,138 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4652 return count; 4611 return count;
4653} 4612}
4654 4613
4614static int qlt_handle_login(struct scsi_qla_host *vha,
4615 struct imm_ntfy_from_isp *iocb)
4616{
4617 struct fc_port *sess = NULL, *conflict_sess = NULL;
4618 uint64_t wwn;
4619 port_id_t port_id;
4620 uint16_t loop_id, wd3_lo;
4621 int res = 0;
4622 struct qlt_plogi_ack_t *pla;
4623 unsigned long flags;
4624
4625 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4626
4627 port_id.b.domain = iocb->u.isp24.port_id[2];
4628 port_id.b.area = iocb->u.isp24.port_id[1];
4629 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4630 port_id.b.rsvd_1 = 0;
4631
4632 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4633
4634 /* Mark all stale commands sitting in qla_tgt_wq for deletion */
4635 abort_cmds_for_s_id(vha, &port_id);
4636
4637 if (wwn) {
4638 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4639 sess = qlt_find_sess_invalidate_other(vha, wwn,
4640 port_id, loop_id, &conflict_sess);
4641 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4642 }
4643
4644 if (IS_SW_RESV_ADDR(port_id)) {
4645 res = 1;
4646 goto out;
4647 }
4648
4649 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4650 if (!pla) {
4651 qlt_send_term_imm_notif(vha, iocb, 1);
4652 goto out;
4653 }
4654
4655 if (conflict_sess) {
4656 conflict_sess->login_gen++;
4657 qlt_plogi_ack_link(vha, pla, conflict_sess,
4658 QLT_PLOGI_LINK_CONFLICT);
4659 }
4660
4661 if (!sess) {
4662 pla->ref_count++;
4663 ql_dbg(ql_dbg_disc, vha, 0xffff,
4664 "%s %d %8phC post new sess\n",
4665 __func__, __LINE__, iocb->u.isp24.port_name);
4666 if (iocb->u.isp24.status_subcode == ELS_PLOGI)
4667 qla24xx_post_newsess_work(vha, &port_id,
4668 iocb->u.isp24.port_name,
4669 iocb->u.isp24.u.plogi.node_name,
4670 pla, FC4_TYPE_UNKNOWN);
4671 else
4672 qla24xx_post_newsess_work(vha, &port_id,
4673 iocb->u.isp24.port_name, NULL,
4674 pla, FC4_TYPE_UNKNOWN);
4675
4676 goto out;
4677 }
4678
4679 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4680 sess->d_id = port_id;
4681 sess->login_gen++;
4682
4683 if (iocb->u.isp24.status_subcode == ELS_PRLI) {
4684 sess->fw_login_state = DSC_LS_PRLI_PEND;
4685 sess->local = 0;
4686 sess->loop_id = loop_id;
4687 sess->d_id = port_id;
4688 sess->fw_login_state = DSC_LS_PRLI_PEND;
4689 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4690
4691 if (wd3_lo & BIT_7)
4692 sess->conf_compl_supported = 1;
4693
4694 if ((wd3_lo & BIT_4) == 0)
4695 sess->port_type = FCT_INITIATOR;
4696 else
4697 sess->port_type = FCT_TARGET;
4698
4699 } else
4700 sess->fw_login_state = DSC_LS_PLOGI_PEND;
4701
4702
4703 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4704 "%s %d %8phC DS %d\n",
4705 __func__, __LINE__, sess->port_name, sess->disc_state);
4706
4707 switch (sess->disc_state) {
4708 case DSC_DELETED:
4709 qlt_plogi_ack_unref(vha, pla);
4710 break;
4711
4712 default:
4713 /*
4714 * Under normal circumstances we want to release nport handle
4715 * during LOGO process to avoid nport handle leaks inside FW.
4716 * The exception is when LOGO is done while another PLOGI with
4717 * the same nport handle is waiting as might be the case here.
4718 * Note: there is always a possibily of a race where session
4719 * deletion has already started for other reasons (e.g. ACL
4720 * removal) and now PLOGI arrives:
4721 * 1. if PLOGI arrived in FW after nport handle has been freed,
4722 * FW must have assigned this PLOGI a new/same handle and we
4723 * can proceed ACK'ing it as usual when session deletion
4724 * completes.
4725 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4726 * bit reached it, the handle has now been released. We'll
4727 * get an error when we ACK this PLOGI. Nothing will be sent
4728 * back to initiator. Initiator should eventually retry
4729 * PLOGI and situation will correct itself.
4730 */
4731 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4732 (sess->d_id.b24 == port_id.b24));
4733
4734 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4735 "%s %d %8phC post del sess\n",
4736 __func__, __LINE__, sess->port_name);
4737
4738
4739 qlt_schedule_sess_for_deletion(sess);
4740 break;
4741 }
4742out:
4743 return res;
4744}
4745
4655/* 4746/*
4656 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4747 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4657 */ 4748 */
@@ -4666,7 +4757,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4666 uint16_t loop_id; 4757 uint16_t loop_id;
4667 uint16_t wd3_lo; 4758 uint16_t wd3_lo;
4668 int res = 0; 4759 int res = 0;
4669 struct qlt_plogi_ack_t *pla;
4670 unsigned long flags; 4760 unsigned long flags;
4671 4761
4672 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4762 wwn = wwn_to_u64(iocb->u.isp24.port_name);
@@ -4690,88 +4780,32 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4690 */ 4780 */
4691 switch (iocb->u.isp24.status_subcode) { 4781 switch (iocb->u.isp24.status_subcode) {
4692 case ELS_PLOGI: 4782 case ELS_PLOGI:
4783 res = qlt_handle_login(vha, iocb);
4784 break;
4693 4785
4694 /* Mark all stale commands in qla_tgt_wq for deletion */ 4786 case ELS_PRLI:
4695 abort_cmds_for_s_id(vha, &port_id); 4787 if (N2N_TOPO(ha)) {
4696 4788 sess = qla2x00_find_fcport_by_wwpn(vha,
4697 if (wwn) { 4789 iocb->u.isp24.port_name, 1);
4698 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4699 sess = qlt_find_sess_invalidate_other(vha, wwn,
4700 port_id, loop_id, &conflict_sess);
4701 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4702 }
4703
4704 if (IS_SW_RESV_ADDR(port_id)) {
4705 res = 1;
4706 break;
4707 }
4708
4709 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4710 if (!pla) {
4711 qlt_send_term_imm_notif(vha, iocb, 1);
4712 break;
4713 }
4714
4715 res = 0;
4716 4790
4717 if (conflict_sess) { 4791 if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
4718 conflict_sess->login_gen++; 4792 ql_dbg(ql_dbg_disc, vha, 0xffff,
4719 qlt_plogi_ack_link(vha, pla, conflict_sess, 4793 "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
4720 QLT_PLOGI_LINK_CONFLICT); 4794 __func__, __LINE__,
4721 } 4795 iocb->u.isp24.port_name);
4796 qlt_send_term_imm_notif(vha, iocb, 1);
4797 break;
4798 }
4722 4799
4723 if (!sess) { 4800 res = qlt_handle_login(vha, iocb);
4724 pla->ref_count++;
4725 qla24xx_post_newsess_work(vha, &port_id,
4726 iocb->u.isp24.port_name, pla);
4727 res = 0;
4728 break; 4801 break;
4729 } 4802 }
4730 4803
4731 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); 4804 if (IS_SW_RESV_ADDR(port_id)) {
4732 sess->fw_login_state = DSC_LS_PLOGI_PEND; 4805 res = 1;
4733 sess->d_id = port_id;
4734 sess->login_gen++;
4735
4736 switch (sess->disc_state) {
4737 case DSC_DELETED:
4738 qlt_plogi_ack_unref(vha, pla);
4739 break;
4740
4741 default:
4742 /*
4743 * Under normal circumstances we want to release nport handle
4744 * during LOGO process to avoid nport handle leaks inside FW.
4745 * The exception is when LOGO is done while another PLOGI with
4746 * the same nport handle is waiting as might be the case here.
4747 * Note: there is always a possibily of a race where session
4748 * deletion has already started for other reasons (e.g. ACL
4749 * removal) and now PLOGI arrives:
4750 * 1. if PLOGI arrived in FW after nport handle has been freed,
4751 * FW must have assigned this PLOGI a new/same handle and we
4752 * can proceed ACK'ing it as usual when session deletion
4753 * completes.
4754 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4755 * bit reached it, the handle has now been released. We'll
4756 * get an error when we ACK this PLOGI. Nothing will be sent
4757 * back to initiator. Initiator should eventually retry
4758 * PLOGI and situation will correct itself.
4759 */
4760 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4761 (sess->d_id.b24 == port_id.b24));
4762
4763 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4764 "%s %d %8phC post del sess\n",
4765 __func__, __LINE__, sess->port_name);
4766
4767
4768 qlt_schedule_sess_for_deletion_lock(sess);
4769 break; 4806 break;
4770 } 4807 }
4771 4808
4772 break;
4773
4774 case ELS_PRLI:
4775 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 4809 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4776 4810
4777 if (wwn) { 4811 if (wwn) {
@@ -4782,17 +4816,51 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4782 } 4816 }
4783 4817
4784 if (conflict_sess) { 4818 if (conflict_sess) {
4785 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b, 4819 switch (conflict_sess->disc_state) {
4786 "PRLI with conflicting sess %p port %8phC\n", 4820 case DSC_DELETED:
4787 conflict_sess, conflict_sess->port_name); 4821 case DSC_DELETE_PEND:
4788 qlt_send_term_imm_notif(vha, iocb, 1); 4822 break;
4789 res = 0; 4823 default:
4790 break; 4824 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
4825 "PRLI with conflicting sess %p port %8phC\n",
4826 conflict_sess, conflict_sess->port_name);
4827 conflict_sess->fw_login_state =
4828 DSC_LS_PORT_UNAVAIL;
4829 qlt_send_term_imm_notif(vha, iocb, 1);
4830 res = 0;
4831 break;
4832 }
4791 } 4833 }
4792 4834
4793 if (sess != NULL) { 4835 if (sess != NULL) {
4794 if (sess->fw_login_state != DSC_LS_PLOGI_PEND && 4836 bool delete = false;
4795 sess->fw_login_state != DSC_LS_PLOGI_COMP) { 4837 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4838 switch (sess->fw_login_state) {
4839 case DSC_LS_PLOGI_PEND:
4840 case DSC_LS_PLOGI_COMP:
4841 case DSC_LS_PRLI_COMP:
4842 break;
4843 default:
4844 delete = true;
4845 break;
4846 }
4847
4848 switch (sess->disc_state) {
4849 case DSC_LOGIN_PEND:
4850 case DSC_GPDB:
4851 case DSC_GPSC:
4852 case DSC_UPD_FCPORT:
4853 case DSC_LOGIN_COMPLETE:
4854 case DSC_ADISC:
4855 delete = false;
4856 break;
4857 default:
4858 break;
4859 }
4860
4861 if (delete) {
4862 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
4863 flags);
4796 /* 4864 /*
4797 * Impatient initiator sent PRLI before last 4865 * Impatient initiator sent PRLI before last
4798 * PLOGI could finish. Will force him to re-try, 4866 * PLOGI could finish. Will force him to re-try,
@@ -4803,6 +4871,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4803 sess); 4871 sess);
4804 qlt_send_term_imm_notif(vha, iocb, 1); 4872 qlt_send_term_imm_notif(vha, iocb, 1);
4805 res = 0; 4873 res = 0;
4874 spin_lock_irqsave(&tgt->ha->tgt.sess_lock,
4875 flags);
4806 break; 4876 break;
4807 } 4877 }
4808 4878
@@ -4826,6 +4896,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4826 sess->port_type = FCT_INITIATOR; 4896 sess->port_type = FCT_INITIATOR;
4827 else 4897 else
4828 sess->port_type = FCT_TARGET; 4898 sess->port_type = FCT_TARGET;
4899
4900 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4829 } 4901 }
4830 res = 1; /* send notify ack */ 4902 res = 1; /* send notify ack */
4831 4903
@@ -4863,7 +4935,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4863 res = 1; 4935 res = 1;
4864 break; 4936 break;
4865 } 4937 }
4866 /* drop through */ 4938 /* fall through */
4867 case ELS_LOGO: 4939 case ELS_LOGO:
4868 case ELS_PRLO: 4940 case ELS_PRLO:
4869 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4941 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4892,7 +4964,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4892 } else { 4964 } else {
4893 /* cmd did not go to upper layer. */ 4965 /* cmd did not go to upper layer. */
4894 if (sess) { 4966 if (sess) {
4895 qlt_schedule_sess_for_deletion_lock(sess); 4967 qlt_schedule_sess_for_deletion(sess);
4896 res = 0; 4968 res = 0;
4897 } 4969 }
4898 /* else logo will be ack */ 4970 /* else logo will be ack */
@@ -4930,6 +5002,10 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4930 break; 5002 break;
4931 } 5003 }
4932 5004
5005 ql_dbg(ql_dbg_disc, vha, 0xf026,
5006 "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
5007 vha->vp_idx, iocb->u.isp24.status_subcode, res);
5008
4933 return res; 5009 return res;
4934} 5010}
4935 5011
@@ -5320,7 +5396,6 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
5320 struct atio_from_isp *atio, uint8_t ha_locked) 5396 struct atio_from_isp *atio, uint8_t ha_locked)
5321{ 5397{
5322 struct qla_hw_data *ha = vha->hw; 5398 struct qla_hw_data *ha = vha->hw;
5323 uint16_t status;
5324 unsigned long flags; 5399 unsigned long flags;
5325 5400
5326 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) 5401 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
@@ -5328,8 +5403,7 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
5328 5403
5329 if (!ha_locked) 5404 if (!ha_locked)
5330 spin_lock_irqsave(&ha->hardware_lock, flags); 5405 spin_lock_irqsave(&ha->hardware_lock, flags);
5331 status = temp_sam_status; 5406 qlt_send_busy(qpair, atio, qla_sam_status);
5332 qlt_send_busy(qpair, atio, status);
5333 if (!ha_locked) 5407 if (!ha_locked)
5334 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5408 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5335 5409
@@ -5344,7 +5418,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5344 struct qla_hw_data *ha = vha->hw; 5418 struct qla_hw_data *ha = vha->hw;
5345 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5419 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5346 int rc; 5420 int rc;
5347 unsigned long flags; 5421 unsigned long flags = 0;
5348 5422
5349 if (unlikely(tgt == NULL)) { 5423 if (unlikely(tgt == NULL)) {
5350 ql_dbg(ql_dbg_tgt, vha, 0x3064, 5424 ql_dbg(ql_dbg_tgt, vha, 0x3064,
@@ -5368,8 +5442,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5368 "sending QUEUE_FULL\n", vha->vp_idx); 5442 "sending QUEUE_FULL\n", vha->vp_idx);
5369 if (!ha_locked) 5443 if (!ha_locked)
5370 spin_lock_irqsave(&ha->hardware_lock, flags); 5444 spin_lock_irqsave(&ha->hardware_lock, flags);
5371 qlt_send_busy(ha->base_qpair, atio, 5445 qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
5372 SAM_STAT_TASK_SET_FULL);
5373 if (!ha_locked) 5446 if (!ha_locked)
5374 spin_unlock_irqrestore(&ha->hardware_lock, 5447 spin_unlock_irqrestore(&ha->hardware_lock,
5375 flags); 5448 flags);
@@ -5388,42 +5461,37 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5388 rc = qlt_handle_task_mgmt(vha, atio); 5461 rc = qlt_handle_task_mgmt(vha, atio);
5389 } 5462 }
5390 if (unlikely(rc != 0)) { 5463 if (unlikely(rc != 0)) {
5391 if (rc == -ESRCH) { 5464 if (!ha_locked)
5392 if (!ha_locked) 5465 spin_lock_irqsave(&ha->hardware_lock, flags);
5393 spin_lock_irqsave(&ha->hardware_lock, 5466 switch (rc) {
5394 flags); 5467 case -ENODEV:
5395 5468 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5396#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 5469 "qla_target: Unable to send command to target\n");
5397 qlt_send_busy(ha->base_qpair, atio, 5470 break;
5398 SAM_STAT_BUSY); 5471 case -EBADF:
5399#else 5472 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5473 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5400 qlt_send_term_exchange(ha->base_qpair, NULL, 5474 qlt_send_term_exchange(ha->base_qpair, NULL,
5401 atio, 1, 0); 5475 atio, 1, 0);
5402#endif 5476 break;
5403 if (!ha_locked) 5477 case -EBUSY:
5404 spin_unlock_irqrestore( 5478 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5405 &ha->hardware_lock, flags); 5479 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5406 } else { 5480 vha->vp_idx);
5407 if (tgt->tgt_stop) { 5481 qlt_send_busy(ha->base_qpair, atio,
5408 ql_dbg(ql_dbg_tgt, vha, 0xe059, 5482 tc_sam_status);
5409 "qla_target: Unable to send " 5483 break;
5410 "command to target for req, " 5484 default:
5411 "ignoring.\n"); 5485 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5412 } else { 5486 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5413 ql_dbg(ql_dbg_tgt, vha, 0xe05a, 5487 vha->vp_idx);
5414 "qla_target(%d): Unable to send " 5488 qlt_send_busy(ha->base_qpair, atio,
5415 "command to target, sending BUSY " 5489 qla_sam_status);
5416 "status.\n", vha->vp_idx); 5490 break;
5417 if (!ha_locked)
5418 spin_lock_irqsave(
5419 &ha->hardware_lock, flags);
5420 qlt_send_busy(ha->base_qpair,
5421 atio, SAM_STAT_BUSY);
5422 if (!ha_locked)
5423 spin_unlock_irqrestore(
5424 &ha->hardware_lock, flags);
5425 }
5426 } 5491 }
5492 if (!ha_locked)
5493 spin_unlock_irqrestore(&ha->hardware_lock,
5494 flags);
5427 } 5495 }
5428 break; 5496 break;
5429 5497
@@ -5506,27 +5574,31 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
5506 5574
5507 rc = qlt_handle_cmd_for_atio(vha, atio); 5575 rc = qlt_handle_cmd_for_atio(vha, atio);
5508 if (unlikely(rc != 0)) { 5576 if (unlikely(rc != 0)) {
5509 if (rc == -ESRCH) { 5577 switch (rc) {
5510#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 5578 case -ENODEV:
5511 qlt_send_busy(rsp->qpair, atio, 0); 5579 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5512#else 5580 "qla_target: Unable to send command to target\n");
5513 qlt_send_term_exchange(rsp->qpair, NULL, atio, 1, 0); 5581 break;
5514#endif 5582 case -EBADF:
5515 } else { 5583 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5516 if (tgt->tgt_stop) { 5584 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5517 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5585 qlt_send_term_exchange(rsp->qpair, NULL,
5518 "qla_target: Unable to send " 5586 atio, 1, 0);
5519 "command to target, sending TERM " 5587 break;
5520 "EXCHANGE for rsp\n"); 5588 case -EBUSY:
5521 qlt_send_term_exchange(rsp->qpair, NULL, 5589 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5522 atio, 1, 0); 5590 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5523 } else { 5591 vha->vp_idx);
5524 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5592 qlt_send_busy(rsp->qpair, atio,
5525 "qla_target(%d): Unable to send " 5593 tc_sam_status);
5526 "command to target, sending BUSY " 5594 break;
5527 "status\n", vha->vp_idx); 5595 default:
5528 qlt_send_busy(rsp->qpair, atio, 0); 5596 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5529 } 5597 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5598 vha->vp_idx);
5599 qlt_send_busy(rsp->qpair, atio,
5600 qla_sam_status);
5601 break;
5530 } 5602 }
5531 } 5603 }
5532 } 5604 }
@@ -5755,7 +5827,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
5755 unsigned long flags; 5827 unsigned long flags;
5756 u8 newfcport = 0; 5828 u8 newfcport = 0;
5757 5829
5758 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); 5830 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5759 if (!fcport) { 5831 if (!fcport) {
5760 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 5832 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
5761 "qla_target(%d): Allocation of tmp FC port failed", 5833 "qla_target(%d): Allocation of tmp FC port failed",
@@ -5784,6 +5856,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
5784 tfcp->port_type = fcport->port_type; 5856 tfcp->port_type = fcport->port_type;
5785 tfcp->supported_classes = fcport->supported_classes; 5857 tfcp->supported_classes = fcport->supported_classes;
5786 tfcp->flags |= fcport->flags; 5858 tfcp->flags |= fcport->flags;
5859 tfcp->scan_state = QLA_FCPORT_FOUND;
5787 5860
5788 del = fcport; 5861 del = fcport;
5789 fcport = tfcp; 5862 fcport = tfcp;
@@ -6445,18 +6518,21 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6445 qlt_add_target(ha, vha); 6518 qlt_add_target(ha, vha);
6446} 6519}
6447 6520
6448void 6521u8
6449qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req) 6522qlt_rff_id(struct scsi_qla_host *vha)
6450{ 6523{
6524 u8 fc4_feature = 0;
6451 /* 6525 /*
6452 * FC-4 Feature bit 0 indicates target functionality to the name server. 6526 * FC-4 Feature bit 0 indicates target functionality to the name server.
6453 */ 6527 */
6454 if (qla_tgt_mode_enabled(vha)) { 6528 if (qla_tgt_mode_enabled(vha)) {
6455 ct_req->req.rff_id.fc4_feature = BIT_0; 6529 fc4_feature = BIT_0;
6456 } else if (qla_ini_mode_enabled(vha)) { 6530 } else if (qla_ini_mode_enabled(vha)) {
6457 ct_req->req.rff_id.fc4_feature = BIT_1; 6531 fc4_feature = BIT_1;
6458 } else if (qla_dual_mode_enabled(vha)) 6532 } else if (qla_dual_mode_enabled(vha))
6459 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; 6533 fc4_feature = BIT_0 | BIT_1;
6534
6535 return fc4_feature;
6460} 6536}
6461 6537
6462/* 6538/*
@@ -6546,7 +6622,9 @@ void
6546qlt_24xx_config_rings(struct scsi_qla_host *vha) 6622qlt_24xx_config_rings(struct scsi_qla_host *vha)
6547{ 6623{
6548 struct qla_hw_data *ha = vha->hw; 6624 struct qla_hw_data *ha = vha->hw;
6549 struct init_cb_24xx *icb; 6625 struct qla_msix_entry *msix = &ha->msix_entries[2];
6626 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
6627
6550 if (!QLA_TGT_MODE_ENABLED()) 6628 if (!QLA_TGT_MODE_ENABLED())
6551 return; 6629 return;
6552 6630
@@ -6554,19 +6632,28 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
6554 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); 6632 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
6555 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); 6633 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
6556 6634
6557 icb = (struct init_cb_24xx *)ha->init_cb; 6635 if (ha->flags.msix_enabled) {
6558 6636 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6559 if ((ql2xenablemsix != 0) && IS_ATIO_MSIX_CAPABLE(ha)) { 6637 if (IS_QLA2071(ha)) {
6560 struct qla_msix_entry *msix = &ha->msix_entries[2]; 6638 /* 4 ports Baker: Enable Interrupt Handshake */
6561 6639 icb->msix_atio = 0;
6562 icb->msix_atio = cpu_to_le16(msix->entry); 6640 icb->firmware_options_2 |= BIT_26;
6563 ql_dbg(ql_dbg_init, vha, 0xf072, 6641 } else {
6564 "Registering ICB vector 0x%x for atio que.\n", 6642 icb->msix_atio = cpu_to_le16(msix->entry);
6565 msix->entry); 6643 icb->firmware_options_2 &= ~BIT_26;
6566 } else if (ql2xenablemsix == 0) { 6644 }
6567 icb->firmware_options_2 |= cpu_to_le32(BIT_26); 6645 ql_dbg(ql_dbg_init, vha, 0xf072,
6568 ql_dbg(ql_dbg_init, vha, 0xf07f, 6646 "Registering ICB vector 0x%x for atio que.\n",
6569 "Registering INTx vector for ATIO.\n"); 6647 msix->entry);
6648 }
6649 } else {
6650 /* INTx|MSI */
6651 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6652 icb->msix_atio = 0;
6653 icb->firmware_options_2 |= BIT_26;
6654 ql_dbg(ql_dbg_init, vha, 0xf072,
6655 "%s: Use INTx for ATIOQ.\n", __func__);
6656 }
6570 } 6657 }
6571} 6658}
6572 6659
@@ -6574,6 +6661,7 @@ void
6574qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) 6661qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6575{ 6662{
6576 struct qla_hw_data *ha = vha->hw; 6663 struct qla_hw_data *ha = vha->hw;
6664 u32 tmp;
6577 6665
6578 if (!QLA_TGT_MODE_ENABLED()) 6666 if (!QLA_TGT_MODE_ENABLED())
6579 return; 6667 return;
@@ -6625,6 +6713,14 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6625 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 6713 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6626 /* Enable target PRLI control */ 6714 /* Enable target PRLI control */
6627 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6715 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6716
6717 if (IS_QLA25XX(ha)) {
6718 /* Change Loop-prefer to Pt-Pt */
6719 tmp = ~(BIT_4|BIT_5|BIT_6);
6720 nv->firmware_options_2 &= cpu_to_le32(tmp);
6721 tmp = P2P << 4;
6722 nv->firmware_options_2 |= cpu_to_le32(tmp);
6723 }
6628 } else { 6724 } else {
6629 if (ha->tgt.saved_set) { 6725 if (ha->tgt.saved_set) {
6630 nv->exchange_count = ha->tgt.saved_exchange_count; 6726 nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -6679,6 +6775,7 @@ void
6679qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) 6775qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6680{ 6776{
6681 struct qla_hw_data *ha = vha->hw; 6777 struct qla_hw_data *ha = vha->hw;
6778 u32 tmp;
6682 6779
6683 if (!QLA_TGT_MODE_ENABLED()) 6780 if (!QLA_TGT_MODE_ENABLED())
6684 return; 6781 return;
@@ -6729,6 +6826,12 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6729 nv->host_p &= cpu_to_le32(~BIT_10); 6826 nv->host_p &= cpu_to_le32(~BIT_10);
6730 /* Enable target PRLI control */ 6827 /* Enable target PRLI control */
6731 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6828 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6829
6830 /* Change Loop-prefer to Pt-Pt */
6831 tmp = ~(BIT_4|BIT_5|BIT_6);
6832 nv->firmware_options_2 &= cpu_to_le32(tmp);
6833 tmp = P2P << 4;
6834 nv->firmware_options_2 |= cpu_to_le32(tmp);
6732 } else { 6835 } else {
6733 if (ha->tgt.saved_set) { 6836 if (ha->tgt.saved_set) {
6734 nv->exchange_count = ha->tgt.saved_exchange_count; 6837 nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -6991,20 +7094,14 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
6991 7094
6992void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id) 7095void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
6993{ 7096{
6994 unsigned long flags;
6995 struct qla_hw_data *ha = vha->hw;
6996 7097
6997 if (!vha->d_id.b24) { 7098 if (!vha->d_id.b24) {
6998 spin_lock_irqsave(&ha->vport_slock, flags);
6999 vha->d_id = id; 7099 vha->d_id = id;
7000 qlt_update_vp_map(vha, SET_AL_PA); 7100 qlt_update_vp_map(vha, SET_AL_PA);
7001 spin_unlock_irqrestore(&ha->vport_slock, flags);
7002 } else if (vha->d_id.b24 != id.b24) { 7101 } else if (vha->d_id.b24 != id.b24) {
7003 spin_lock_irqsave(&ha->vport_slock, flags);
7004 qlt_update_vp_map(vha, RESET_AL_PA); 7102 qlt_update_vp_map(vha, RESET_AL_PA);
7005 vha->d_id = id; 7103 vha->d_id = id;
7006 qlt_update_vp_map(vha, SET_AL_PA); 7104 qlt_update_vp_map(vha, SET_AL_PA);
7007 spin_unlock_irqrestore(&ha->vport_slock, flags);
7008 } 7105 }
7009} 7106}
7010 7107
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index aba58d3848a6..bb67b5a284a8 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -993,7 +993,7 @@ struct qla_tgt_prm {
993 993
994/* Check for Switch reserved address */ 994/* Check for Switch reserved address */
995#define IS_SW_RESV_ADDR(_s_id) \ 995#define IS_SW_RESV_ADDR(_s_id) \
996 ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc)) 996 ((_s_id.b.domain == 0xff) && ((_s_id.b.area & 0xf0) == 0xf0))
997 997
998#define QLA_TGT_XMIT_DATA 1 998#define QLA_TGT_XMIT_DATA 1
999#define QLA_TGT_XMIT_STATUS 2 999#define QLA_TGT_XMIT_STATUS 2
@@ -1072,7 +1072,7 @@ extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
1072extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *); 1072extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
1073extern void qlt_enable_vha(struct scsi_qla_host *); 1073extern void qlt_enable_vha(struct scsi_qla_host *);
1074extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *); 1074extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
1075extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *); 1075extern u8 qlt_rff_id(struct scsi_qla_host *);
1076extern void qlt_init_atio_q_entries(struct scsi_qla_host *); 1076extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
1077extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *, uint8_t); 1077extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *, uint8_t);
1078extern void qlt_24xx_config_rings(struct scsi_qla_host *); 1078extern void qlt_24xx_config_rings(struct scsi_qla_host *);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 733e8dcccf5c..731ca0d8520a 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -526,7 +526,8 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
526{ 526{
527 ql_dbg(ql_dbg_misc, vha, 0xd20c, 527 ql_dbg(ql_dbg_misc, vha, 0xd20c,
528 "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len); 528 "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
529 if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) { 529 switch (ent->t268.buf_type) {
530 case T268_BUF_TYPE_EXTD_TRACE:
530 if (vha->hw->eft) { 531 if (vha->hw->eft) {
531 if (buf) { 532 if (buf) {
532 ent->t268.buf_size = EFT_SIZE; 533 ent->t268.buf_size = EFT_SIZE;
@@ -538,10 +539,43 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
538 "%s: missing eft\n", __func__); 539 "%s: missing eft\n", __func__);
539 qla27xx_skip_entry(ent, buf); 540 qla27xx_skip_entry(ent, buf);
540 } 541 }
541 } else { 542 break;
542 ql_dbg(ql_dbg_misc, vha, 0xd02b, 543 case T268_BUF_TYPE_EXCH_BUFOFF:
544 if (vha->hw->exchoffld_buf) {
545 if (buf) {
546 ent->t268.buf_size = vha->hw->exchoffld_size;
547 ent->t268.start_addr =
548 vha->hw->exchoffld_buf_dma;
549 }
550 qla27xx_insertbuf(vha->hw->exchoffld_buf,
551 vha->hw->exchoffld_size, buf, len);
552 } else {
553 ql_dbg(ql_dbg_misc, vha, 0xd028,
554 "%s: missing exch offld\n", __func__);
555 qla27xx_skip_entry(ent, buf);
556 }
557 break;
558 case T268_BUF_TYPE_EXTD_LOGIN:
559 if (vha->hw->exlogin_buf) {
560 if (buf) {
561 ent->t268.buf_size = vha->hw->exlogin_size;
562 ent->t268.start_addr =
563 vha->hw->exlogin_buf_dma;
564 }
565 qla27xx_insertbuf(vha->hw->exlogin_buf,
566 vha->hw->exlogin_size, buf, len);
567 } else {
568 ql_dbg(ql_dbg_misc, vha, 0xd028,
569 "%s: missing ext login\n", __func__);
570 qla27xx_skip_entry(ent, buf);
571 }
572 break;
573
574 default:
575 ql_dbg(ql_dbg_async, vha, 0xd02b,
543 "%s: unknown buffer %x\n", __func__, ent->t268.buf_type); 576 "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
544 qla27xx_skip_entry(ent, buf); 577 qla27xx_skip_entry(ent, buf);
578 break;
545 } 579 }
546 580
547 return false; 581 return false;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index b6ec02b96d3d..549bef9afddd 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "10.00.00.02-k" 10#define QLA2XXX_VERSION "10.00.00.05-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 10 12#define QLA_DRIVER_MAJOR_VER 10
13#define QLA_DRIVER_MINOR_VER 0 13#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 3f82ea1b72dc..aadfeaac3898 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1635,16 +1635,13 @@ static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
1635 return rc; 1635 return rc;
1636 } 1636 }
1637 1637
1638 lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) * 1638 lport->lport_loopid_map = vzalloc(sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1639 65536);
1640 if (!lport->lport_loopid_map) { 1639 if (!lport->lport_loopid_map) {
1641 pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n", 1640 pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
1642 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536); 1641 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1643 btree_destroy32(&lport->lport_fcport_map); 1642 btree_destroy32(&lport->lport_fcport_map);
1644 return -ENOMEM; 1643 return -ENOMEM;
1645 } 1644 }
1646 memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
1647 * 65536);
1648 pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n", 1645 pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
1649 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536); 1646 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1650 return 0; 1647 return 0;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 5d6d158bbfd6..52b1a0bc93c9 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -153,15 +153,14 @@ int qla4xxx_get_sys_info(struct scsi_qla_host *ha)
153 dma_addr_t sys_info_dma; 153 dma_addr_t sys_info_dma;
154 int status = QLA_ERROR; 154 int status = QLA_ERROR;
155 155
156 sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info), 156 sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
157 &sys_info_dma, GFP_KERNEL); 157 &sys_info_dma, GFP_KERNEL);
158 if (sys_info == NULL) { 158 if (sys_info == NULL) {
159 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 159 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
160 ha->host_no, __func__)); 160 ha->host_no, __func__));
161 161
162 goto exit_get_sys_info_no_free; 162 goto exit_get_sys_info_no_free;
163 } 163 }
164 memset(sys_info, 0, sizeof(*sys_info));
165 164
166 /* Get flash sys info */ 165 /* Get flash sys info */
167 if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO, 166 if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO,
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 1da04f323d38..bda2e64ee5ca 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -625,15 +625,14 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
625 uint32_t mbox_sts[MBOX_REG_COUNT]; 625 uint32_t mbox_sts[MBOX_REG_COUNT];
626 int status = QLA_ERROR; 626 int status = QLA_ERROR;
627 627
628 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 628 init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
629 sizeof(struct addr_ctrl_blk), 629 sizeof(struct addr_ctrl_blk),
630 &init_fw_cb_dma, GFP_KERNEL); 630 &init_fw_cb_dma, GFP_KERNEL);
631 if (init_fw_cb == NULL) { 631 if (init_fw_cb == NULL) {
632 DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n", 632 DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
633 ha->host_no, __func__)); 633 ha->host_no, __func__));
634 goto exit_init_fw_cb_no_free; 634 goto exit_init_fw_cb_no_free;
635 } 635 }
636 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
637 636
638 /* Get Initialize Firmware Control Block. */ 637 /* Get Initialize Firmware Control Block. */
639 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 638 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
@@ -710,9 +709,9 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
710 uint32_t mbox_cmd[MBOX_REG_COUNT]; 709 uint32_t mbox_cmd[MBOX_REG_COUNT];
711 uint32_t mbox_sts[MBOX_REG_COUNT]; 710 uint32_t mbox_sts[MBOX_REG_COUNT];
712 711
713 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 712 init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
714 sizeof(struct addr_ctrl_blk), 713 sizeof(struct addr_ctrl_blk),
715 &init_fw_cb_dma, GFP_KERNEL); 714 &init_fw_cb_dma, GFP_KERNEL);
716 if (init_fw_cb == NULL) { 715 if (init_fw_cb == NULL) {
717 printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, 716 printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
718 __func__); 717 __func__);
@@ -720,7 +719,6 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
720 } 719 }
721 720
722 /* Get Initialize Firmware Control Block. */ 721 /* Get Initialize Firmware Control Block. */
723 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
724 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) != 722 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
725 QLA_SUCCESS) { 723 QLA_SUCCESS) {
726 DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n", 724 DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
@@ -1342,16 +1340,15 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha)
1342 uint32_t mbox_sts[MBOX_REG_COUNT]; 1340 uint32_t mbox_sts[MBOX_REG_COUNT];
1343 int status = QLA_ERROR; 1341 int status = QLA_ERROR;
1344 1342
1345 about_fw = dma_alloc_coherent(&ha->pdev->dev, 1343 about_fw = dma_zalloc_coherent(&ha->pdev->dev,
1346 sizeof(struct about_fw_info), 1344 sizeof(struct about_fw_info),
1347 &about_fw_dma, GFP_KERNEL); 1345 &about_fw_dma, GFP_KERNEL);
1348 if (!about_fw) { 1346 if (!about_fw) {
1349 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory " 1347 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
1350 "for about_fw\n", __func__)); 1348 "for about_fw\n", __func__));
1351 return status; 1349 return status;
1352 } 1350 }
1353 1351
1354 memset(about_fw, 0, sizeof(struct about_fw_info));
1355 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 1352 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1356 memset(&mbox_sts, 0, sizeof(mbox_sts)); 1353 memset(&mbox_sts, 0, sizeof(mbox_sts));
1357 1354
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e91abb327745..968bd85610f8 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -4050,15 +4050,14 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
4050 dma_addr_t sys_info_dma; 4050 dma_addr_t sys_info_dma;
4051 int status = QLA_ERROR; 4051 int status = QLA_ERROR;
4052 4052
4053 sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info), 4053 sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
4054 &sys_info_dma, GFP_KERNEL); 4054 &sys_info_dma, GFP_KERNEL);
4055 if (sys_info == NULL) { 4055 if (sys_info == NULL) {
4056 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 4056 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
4057 ha->host_no, __func__)); 4057 ha->host_no, __func__));
4058 return status; 4058 return status;
4059 } 4059 }
4060 4060
4061 memset(sys_info, 0, sizeof(*sys_info));
4062 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 4061 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
4063 memset(&mbox_sts, 0, sizeof(mbox_sts)); 4062 memset(&mbox_sts, 0, sizeof(mbox_sts));
4064 4063
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2b8a8ce2a431..82e889bbe0ed 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2689,16 +2689,15 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
2689 uint32_t rem = len; 2689 uint32_t rem = len;
2690 struct nlattr *attr; 2690 struct nlattr *attr;
2691 2691
2692 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 2692 init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
2693 sizeof(struct addr_ctrl_blk), 2693 sizeof(struct addr_ctrl_blk),
2694 &init_fw_cb_dma, GFP_KERNEL); 2694 &init_fw_cb_dma, GFP_KERNEL);
2695 if (!init_fw_cb) { 2695 if (!init_fw_cb) {
2696 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", 2696 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
2697 __func__); 2697 __func__);
2698 return -ENOMEM; 2698 return -ENOMEM;
2699 } 2699 }
2700 2700
2701 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
2702 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 2701 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
2703 memset(&mbox_sts, 0, sizeof(mbox_sts)); 2702 memset(&mbox_sts, 0, sizeof(mbox_sts));
2704 2703
@@ -4196,15 +4195,14 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
4196 sizeof(struct shadow_regs) + 4195 sizeof(struct shadow_regs) +
4197 MEM_ALIGN_VALUE + 4196 MEM_ALIGN_VALUE +
4198 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4197 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4199 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, 4198 ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len,
4200 &ha->queues_dma, GFP_KERNEL); 4199 &ha->queues_dma, GFP_KERNEL);
4201 if (ha->queues == NULL) { 4200 if (ha->queues == NULL) {
4202 ql4_printk(KERN_WARNING, ha, 4201 ql4_printk(KERN_WARNING, ha,
4203 "Memory Allocation failed - queues.\n"); 4202 "Memory Allocation failed - queues.\n");
4204 4203
4205 goto mem_alloc_error_exit; 4204 goto mem_alloc_error_exit;
4206 } 4205 }
4207 memset(ha->queues, 0, ha->queues_len);
4208 4206
4209 /* 4207 /*
4210 * As per RISC alignment requirements -- the bus-address must be a 4208 * As per RISC alignment requirements -- the bus-address must be a
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index 40bc616cf8ab..90349498f686 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -12,7 +12,7 @@
12 12
13/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. 13/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
14 * You may not alter any existing entry (although adding new ones is 14 * You may not alter any existing entry (although adding new ones is
15 * encouraged once assigned by ANSI/INCITS T10 15 * encouraged once assigned by ANSI/INCITS T10).
16 */ 16 */
17static const char *const scsi_device_types[] = { 17static const char *const scsi_device_types[] = {
18 "Direct-Access ", 18 "Direct-Access ",
@@ -39,7 +39,7 @@ static const char *const scsi_device_types[] = {
39}; 39};
40 40
41/** 41/**
42 * scsi_device_type - Return 17 char string indicating device type. 42 * scsi_device_type - Return 17-char string indicating device type.
43 * @type: type number to look up 43 * @type: type number to look up
44 */ 44 */
45const char *scsi_device_type(unsigned type) 45const char *scsi_device_type(unsigned type)
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(scsi_device_type);
59 * @scsilun: struct scsi_lun to be converted. 59 * @scsilun: struct scsi_lun to be converted.
60 * 60 *
61 * Description: 61 * Description:
62 * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered 62 * Convert @scsilun from a struct scsi_lun to a four-byte host byte-ordered
63 * integer, and return the result. The caller must check for 63 * integer, and return the result. The caller must check for
64 * truncation before using this function. 64 * truncation before using this function.
65 * 65 *
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(scsilun_to_int);
98 * back into the lun value. 98 * back into the lun value.
99 * 99 *
100 * Notes: 100 * Notes:
101 * Given an integer : 0x0b03d204, this function returns a 101 * Given an integer : 0x0b03d204, this function returns a
102 * struct scsi_lun of: d2 04 0b 03 00 00 00 00 102 * struct scsi_lun of: d2 04 0b 03 00 00 00 00
103 * 103 *
104 */ 104 */
@@ -221,7 +221,7 @@ EXPORT_SYMBOL(scsi_sense_desc_find);
221 221
222/** 222/**
223 * scsi_build_sense_buffer - build sense data in a buffer 223 * scsi_build_sense_buffer - build sense data in a buffer
224 * @desc: Sense format (non zero == descriptor format, 224 * @desc: Sense format (non-zero == descriptor format,
225 * 0 == fixed format) 225 * 0 == fixed format)
226 * @buf: Where to build sense data 226 * @buf: Where to build sense data
227 * @key: Sense key 227 * @key: Sense key
@@ -255,7 +255,7 @@ EXPORT_SYMBOL(scsi_build_sense_buffer);
255 * @info: 64-bit information value to be set 255 * @info: 64-bit information value to be set
256 * 256 *
257 * Return value: 257 * Return value:
258 * 0 on success or EINVAL for invalid sense buffer length 258 * 0 on success or -EINVAL for invalid sense buffer length
259 **/ 259 **/
260int scsi_set_sense_information(u8 *buf, int buf_len, u64 info) 260int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
261{ 261{
@@ -305,7 +305,7 @@ EXPORT_SYMBOL(scsi_set_sense_information);
305 * @cd: command/data bit 305 * @cd: command/data bit
306 * 306 *
307 * Return value: 307 * Return value:
308 * 0 on success or EINVAL for invalid sense buffer length 308 * 0 on success or -EINVAL for invalid sense buffer length
309 */ 309 */
310int scsi_set_sense_field_pointer(u8 *buf, int buf_len, u16 fp, u8 bp, bool cd) 310int scsi_set_sense_field_pointer(u8 *buf, int buf_len, u16 fp, u8 bp, bool cd)
311{ 311{
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index e4f037f0f38b..a5986dae9020 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -6,7 +6,7 @@
6 * anything out of the ordinary is seen. 6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8 * 8 *
9 * Copyright (C) 2001 - 2016 Douglas Gilbert 9 * Copyright (C) 2001 - 2017 Douglas Gilbert
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
@@ -61,8 +61,8 @@
61#include "scsi_logging.h" 61#include "scsi_logging.h"
62 62
63/* make sure inq_product_rev string corresponds to this version */ 63/* make sure inq_product_rev string corresponds to this version */
64#define SDEBUG_VERSION "1.86" 64#define SDEBUG_VERSION "0187" /* format to fit INQUIRY revision field */
65static const char *sdebug_version_date = "20160430"; 65static const char *sdebug_version_date = "20171202";
66 66
67#define MY_NAME "scsi_debug" 67#define MY_NAME "scsi_debug"
68 68
@@ -93,6 +93,7 @@ static const char *sdebug_version_date = "20160430";
93#define MISCOMPARE_VERIFY_ASC 0x1d 93#define MISCOMPARE_VERIFY_ASC 0x1d
94#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */ 94#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16 95#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96#define WRITE_ERROR_ASC 0xc
96 97
97/* Additional Sense Code Qualifier (ASCQ) */ 98/* Additional Sense Code Qualifier (ASCQ) */
98#define ACK_NAK_TO 0x3 99#define ACK_NAK_TO 0x3
@@ -105,6 +106,7 @@ static const char *sdebug_version_date = "20160430";
105 * (id 0) containing 1 logical unit (lun 0). That is 1 device. 106 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
106 */ 107 */
107#define DEF_ATO 1 108#define DEF_ATO 1
109#define DEF_CDB_LEN 10
108#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */ 110#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
109#define DEF_DEV_SIZE_MB 8 111#define DEF_DEV_SIZE_MB 8
110#define DEF_DIF 0 112#define DEF_DIF 0
@@ -161,12 +163,14 @@ static const char *sdebug_version_date = "20160430";
161#define SDEBUG_OPT_N_WCE 0x1000 163#define SDEBUG_OPT_N_WCE 0x1000
162#define SDEBUG_OPT_RESET_NOISE 0x2000 164#define SDEBUG_OPT_RESET_NOISE 0x2000
163#define SDEBUG_OPT_NO_CDB_NOISE 0x4000 165#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
166#define SDEBUG_OPT_HOST_BUSY 0x8000
164#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \ 167#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
165 SDEBUG_OPT_RESET_NOISE) 168 SDEBUG_OPT_RESET_NOISE)
166#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \ 169#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
167 SDEBUG_OPT_TRANSPORT_ERR | \ 170 SDEBUG_OPT_TRANSPORT_ERR | \
168 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \ 171 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
169 SDEBUG_OPT_SHORT_TRANSFER) 172 SDEBUG_OPT_SHORT_TRANSFER | \
173 SDEBUG_OPT_HOST_BUSY)
170/* When "every_nth" > 0 then modulo "every_nth" commands: 174/* When "every_nth" > 0 then modulo "every_nth" commands:
171 * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set 175 * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
172 * - a RECOVERED_ERROR is simulated on successful read and write 176 * - a RECOVERED_ERROR is simulated on successful read and write
@@ -232,7 +236,7 @@ static const char *sdebug_version_date = "20160430";
232#define F_M_ACCESS 0x800 /* media access */ 236#define F_M_ACCESS 0x800 /* media access */
233 237
234#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR) 238#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
235#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW) 239#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
236#define FF_SA (F_SA_HIGH | F_SA_LOW) 240#define FF_SA (F_SA_HIGH | F_SA_LOW)
237 241
238#define SDEBUG_MAX_PARTS 4 242#define SDEBUG_MAX_PARTS 4
@@ -263,12 +267,18 @@ struct sdebug_host_info {
263#define to_sdebug_host(d) \ 267#define to_sdebug_host(d) \
264 container_of(d, struct sdebug_host_info, dev) 268 container_of(d, struct sdebug_host_info, dev)
265 269
270enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
271 SDEB_DEFER_WQ = 2};
272
266struct sdebug_defer { 273struct sdebug_defer {
267 struct hrtimer hrt; 274 struct hrtimer hrt;
268 struct execute_work ew; 275 struct execute_work ew;
269 int sqa_idx; /* index of sdebug_queue array */ 276 int sqa_idx; /* index of sdebug_queue array */
270 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */ 277 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
271 int issuing_cpu; 278 int issuing_cpu;
279 bool init_hrt;
280 bool init_wq;
281 enum sdeb_defer_type defer_t;
272}; 282};
273 283
274struct sdebug_queued_cmd { 284struct sdebug_queued_cmd {
@@ -282,6 +292,7 @@ struct sdebug_queued_cmd {
282 unsigned int inj_dif:1; 292 unsigned int inj_dif:1;
283 unsigned int inj_dix:1; 293 unsigned int inj_dix:1;
284 unsigned int inj_short:1; 294 unsigned int inj_short:1;
295 unsigned int inj_host_busy:1;
285}; 296};
286 297
287struct sdebug_queue { 298struct sdebug_queue {
@@ -304,8 +315,8 @@ struct opcode_info_t {
304 u32 flags; /* OR-ed set of SDEB_F_* */ 315 u32 flags; /* OR-ed set of SDEB_F_* */
305 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); 316 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
306 const struct opcode_info_t *arrp; /* num_attached elements or NULL */ 317 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
307 u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */ 318 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
308 /* ignore cdb bytes after position 15 */ 319 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
309}; 320};
310 321
311/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */ 322/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
@@ -322,12 +333,12 @@ enum sdeb_opcode_index {
322 SDEB_I_READ = 9, /* 6, 10, 12, 16 */ 333 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
323 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */ 334 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
324 SDEB_I_START_STOP = 11, 335 SDEB_I_START_STOP = 11,
325 SDEB_I_SERV_ACT_IN = 12, /* 12, 16 */ 336 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
326 SDEB_I_SERV_ACT_OUT = 13, /* 12, 16 */ 337 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
327 SDEB_I_MAINT_IN = 14, 338 SDEB_I_MAINT_IN = 14,
328 SDEB_I_MAINT_OUT = 15, 339 SDEB_I_MAINT_OUT = 15,
329 SDEB_I_VERIFY = 16, /* 10 only */ 340 SDEB_I_VERIFY = 16, /* 10 only */
330 SDEB_I_VARIABLE_LEN = 17, 341 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
331 SDEB_I_RESERVE = 18, /* 6, 10 */ 342 SDEB_I_RESERVE = 18, /* 6, 10 */
332 SDEB_I_RELEASE = 19, /* 6, 10 */ 343 SDEB_I_RELEASE = 19, /* 6, 10 */
333 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */ 344 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
@@ -340,7 +351,7 @@ enum sdeb_opcode_index {
340 SDEB_I_WRITE_SAME = 27, /* 10, 16 */ 351 SDEB_I_WRITE_SAME = 27, /* 10, 16 */
341 SDEB_I_SYNC_CACHE = 28, /* 10 only */ 352 SDEB_I_SYNC_CACHE = 28, /* 10 only */
342 SDEB_I_COMP_WRITE = 29, 353 SDEB_I_COMP_WRITE = 29,
343 SDEB_I_LAST_ELEMENT = 30, /* keep this last */ 354 SDEB_I_LAST_ELEMENT = 30, /* keep this last (previous + 1) */
344}; 355};
345 356
346 357
@@ -372,12 +383,12 @@ static const unsigned char opcode_ind_arr[256] = {
372 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0, 383 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
373 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0, 384 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
374 0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0, 385 0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
375 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT, 386 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
376/* 0xa0; 0xa0->0xbf: 12 byte cdbs */ 387/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
377 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN, 388 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
378 SDEB_I_MAINT_OUT, 0, 0, 0, 389 SDEB_I_MAINT_OUT, 0, 0, 0,
379 SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN, 390 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
380 0, 0, 0, 0, 391 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
381 0, 0, 0, 0, 0, 0, 0, 0, 392 0, 0, 0, 0, 0, 0, 0, 0,
382 0, 0, 0, 0, 0, 0, 0, 0, 393 0, 0, 0, 0, 0, 0, 0, 0,
383/* 0xc0; 0xc0->0xff: vendor specific */ 394/* 0xc0; 0xc0->0xff: vendor specific */
@@ -396,6 +407,7 @@ static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
396static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *); 407static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
397static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *); 408static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
398static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *); 409static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
410static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
399static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *); 411static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
400static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *); 412static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
401static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *); 413static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -409,72 +421,81 @@ static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
409static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *); 421static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
410static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *); 422static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
411 423
412static const struct opcode_info_t msense_iarr[1] = { 424/*
425 * The following are overflow arrays for cdbs that "hit" the same index in
426 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
427 * should be placed in opcode_info_arr[], the others should be placed here.
428 */
429static const struct opcode_info_t msense_iarr[] = {
413 {0, 0x1a, 0, F_D_IN, NULL, NULL, 430 {0, 0x1a, 0, F_D_IN, NULL, NULL,
414 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 431 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
415}; 432};
416 433
417static const struct opcode_info_t mselect_iarr[1] = { 434static const struct opcode_info_t mselect_iarr[] = {
418 {0, 0x15, 0, F_D_OUT, NULL, NULL, 435 {0, 0x15, 0, F_D_OUT, NULL, NULL,
419 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 436 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
420}; 437};
421 438
422static const struct opcode_info_t read_iarr[3] = { 439static const struct opcode_info_t read_iarr[] = {
423 {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */ 440 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
424 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 441 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
425 0, 0, 0, 0} }, 442 0, 0, 0, 0} },
426 {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */ 443 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
427 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 444 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
428 {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */ 445 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
429 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 446 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
430 0xc7, 0, 0, 0, 0} }, 447 0xc7, 0, 0, 0, 0} },
431}; 448};
432 449
433static const struct opcode_info_t write_iarr[3] = { 450static const struct opcode_info_t write_iarr[] = {
434 {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 10 */ 451 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
435 {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 452 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
436 0, 0, 0, 0} }, 453 0, 0, 0, 0, 0, 0} },
437 {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 6 */ 454 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
438 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 455 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
439 {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 12 */ 456 0, 0, 0} },
440 {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, 457 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
441 0xc7, 0, 0, 0, 0} }, 458 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
459 0xbf, 0xc7, 0, 0, 0, 0} },
442}; 460};
443 461
444static const struct opcode_info_t sa_in_iarr[1] = { 462static const struct opcode_info_t sa_in_16_iarr[] = {
445 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL, 463 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
446 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 464 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
447 0xff, 0xff, 0xff, 0, 0xc7} }, 465 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
448}; 466};
449 467
450static const struct opcode_info_t vl_iarr[1] = { /* VARIABLE LENGTH */ 468static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
451 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0, 469 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
452 NULL, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa, 470 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
453 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */ 471 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
472 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
473 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
474 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
454}; 475};
455 476
456static const struct opcode_info_t maint_in_iarr[2] = { 477static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
457 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL, 478 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
458 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 479 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
459 0xc7, 0, 0, 0, 0} }, 480 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
460 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL, 481 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
461 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 482 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
462 0, 0} }, 483 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
463}; 484};
464 485
465static const struct opcode_info_t write_same_iarr[1] = { 486static const struct opcode_info_t write_same_iarr[] = {
466 {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL, 487 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
467 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 488 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
468 0xff, 0xff, 0xff, 0x1f, 0xc7} }, 489 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
469}; 490};
470 491
471static const struct opcode_info_t reserve_iarr[1] = { 492static const struct opcode_info_t reserve_iarr[] = {
472 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */ 493 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
473 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 494 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
474}; 495};
475 496
476static const struct opcode_info_t release_iarr[1] = { 497static const struct opcode_info_t release_iarr[] = {
477 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */ 498 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
478 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 499 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
479}; 500};
480 501
@@ -484,57 +505,67 @@ static const struct opcode_info_t release_iarr[1] = {
484 * REPORT SUPPORTED OPERATION CODES. */ 505 * REPORT SUPPORTED OPERATION CODES. */
485static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = { 506static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
486/* 0 */ 507/* 0 */
487 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, 508 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
488 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 509 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
489 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, 510 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
490 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 511 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
491 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL, 512 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
492 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 513 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
493 0, 0} }, 514 0, 0} }, /* REPORT LUNS */
494 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL, 515 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
495 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 516 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
496 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */ 517 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
497 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 518 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
498 {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr, 519/* 5 */
499 {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 520 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
500 0} }, 521 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
501 {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr, 522 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
502 {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, 523 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
503 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, 524 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
525 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
526 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
504 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 527 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
505 0, 0, 0} }, 528 0, 0, 0} },
506 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, 529 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
507 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0, 530 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
508 0, 0} }, 531 0, 0} },
509 {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr, 532 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
510 {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 533 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
511 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */ 534 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
512/* 10 */ 535/* 10 */
513 {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr, 536 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
514 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 537 resp_write_dt0, write_iarr, /* WRITE(16) */
515 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */ 538 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
539 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* WRITE(16) */
516 {0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */ 540 {0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */
517 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 541 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
518 {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr, 542 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
519 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 543 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
520 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */ 544 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
521 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */ 545 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
522 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 546 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
523 {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr, 547 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
524 {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0, 548 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
525 0} }, 549 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
550 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
551 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
552 0xff, 0, 0xc7, 0, 0, 0, 0} },
553/* 15 */
526 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */ 554 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
527 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 555 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
528 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */ 556 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
529 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 557 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
530 0, 0, 0, 0, 0, 0} }, 558 0, 0, 0, 0, 0, 0} },
531 {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0, 559 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
532 vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0, 560 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
533 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */ 561 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
534 {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */ 562 0xff, 0xff} },
563 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
564 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
535 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 565 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
536 0} }, 566 0} },
537 {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */ 567 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
568 NULL, release_iarr, /* RELEASE(10) <no response function> */
538 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 569 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
539 0} }, 570 0} },
540/* 20 */ 571/* 20 */
@@ -546,23 +577,25 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
546 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 577 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
547 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */ 578 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
548 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 579 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
549 {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */ 580 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
550 {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, 581 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
551 {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10, 582/* 25 */
552 NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 583 {0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
553 0, 0, 0, 0, 0, 0} }, 584 NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
585 0, 0, 0, 0, 0, 0} }, /* XDWRITEREAD(10) */
554 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL, 586 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
555 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 587 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
556 0, 0, 0, 0} }, /* WRITE_BUFFER */ 588 0, 0, 0, 0} }, /* WRITE_BUFFER */
557 {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10, 589 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
558 write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 590 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
559 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, 591 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
560 {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */ 592 0, 0, 0, 0, 0} },
561 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 593 {0, 0x35, 0, F_DELAY_OVERR | FF_MEDIA_IO, NULL, NULL, /* SYNC_CACHE */
594 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
562 0, 0, 0, 0} }, 595 0, 0, 0, 0} },
563 {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL, 596 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
564 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 597 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
565 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */ 598 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
566 599
567/* 30 */ 600/* 30 */
568 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */ 601 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
@@ -571,6 +604,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
571 604
572static int sdebug_add_host = DEF_NUM_HOST; 605static int sdebug_add_host = DEF_NUM_HOST;
573static int sdebug_ato = DEF_ATO; 606static int sdebug_ato = DEF_ATO;
607static int sdebug_cdb_len = DEF_CDB_LEN;
574static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */ 608static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
575static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB; 609static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
576static int sdebug_dif = DEF_DIF; 610static int sdebug_dif = DEF_DIF;
@@ -797,6 +831,61 @@ static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
797 /* return -ENOTTY; // correct return but upsets fdisk */ 831 /* return -ENOTTY; // correct return but upsets fdisk */
798} 832}
799 833
834static void config_cdb_len(struct scsi_device *sdev)
835{
836 switch (sdebug_cdb_len) {
837 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
838 sdev->use_10_for_rw = false;
839 sdev->use_16_for_rw = false;
840 sdev->use_10_for_ms = false;
841 break;
842 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
843 sdev->use_10_for_rw = true;
844 sdev->use_16_for_rw = false;
845 sdev->use_10_for_ms = false;
846 break;
847 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
848 sdev->use_10_for_rw = true;
849 sdev->use_16_for_rw = false;
850 sdev->use_10_for_ms = true;
851 break;
852 case 16:
853 sdev->use_10_for_rw = false;
854 sdev->use_16_for_rw = true;
855 sdev->use_10_for_ms = true;
856 break;
857 case 32: /* No knobs to suggest this so same as 16 for now */
858 sdev->use_10_for_rw = false;
859 sdev->use_16_for_rw = true;
860 sdev->use_10_for_ms = true;
861 break;
862 default:
863 pr_warn("unexpected cdb_len=%d, force to 10\n",
864 sdebug_cdb_len);
865 sdev->use_10_for_rw = true;
866 sdev->use_16_for_rw = false;
867 sdev->use_10_for_ms = false;
868 sdebug_cdb_len = 10;
869 break;
870 }
871}
872
873static void all_config_cdb_len(void)
874{
875 struct sdebug_host_info *sdbg_host;
876 struct Scsi_Host *shost;
877 struct scsi_device *sdev;
878
879 spin_lock(&sdebug_host_list_lock);
880 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
881 shost = sdbg_host->shost;
882 shost_for_each_device(sdev, shost) {
883 config_cdb_len(sdev);
884 }
885 }
886 spin_unlock(&sdebug_host_list_lock);
887}
888
800static void clear_luns_changed_on_target(struct sdebug_dev_info *devip) 889static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
801{ 890{
802 struct sdebug_host_info *sdhp; 891 struct sdebug_host_info *sdhp;
@@ -955,7 +1044,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
955 1044
956static char sdebug_inq_vendor_id[9] = "Linux "; 1045static char sdebug_inq_vendor_id[9] = "Linux ";
957static char sdebug_inq_product_id[17] = "scsi_debug "; 1046static char sdebug_inq_product_id[17] = "scsi_debug ";
958static char sdebug_inq_product_rev[5] = "0186"; /* version less '.' */ 1047static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
959/* Use some locally assigned NAAs for SAS addresses. */ 1048/* Use some locally assigned NAAs for SAS addresses. */
960static const u64 naa3_comp_a = 0x3222222000000000ULL; 1049static const u64 naa3_comp_a = 0x3222222000000000ULL;
961static const u64 naa3_comp_b = 0x3333333000000000ULL; 1050static const u64 naa3_comp_b = 0x3333333000000000ULL;
@@ -1411,6 +1500,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1411 memcpy(&arr[8], sdebug_inq_vendor_id, 8); 1500 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1412 memcpy(&arr[16], sdebug_inq_product_id, 16); 1501 memcpy(&arr[16], sdebug_inq_product_id, 16);
1413 memcpy(&arr[32], sdebug_inq_product_rev, 4); 1502 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1503 /* Use Vendor Specific area to place driver date in ASCII hex */
1504 memcpy(&arr[36], sdebug_version_date, 8);
1414 /* version descriptors (2 bytes each) follow */ 1505 /* version descriptors (2 bytes each) follow */
1415 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */ 1506 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1416 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */ 1507 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
@@ -1900,7 +1991,7 @@ static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1900static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target) 1991static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1901{ /* Control mode page for mode_sense */ 1992{ /* Control mode page for mode_sense */
1902 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0, 1993 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1903 0, 0, 0, 0}; 1994 0, 0, 0, 0};
1904 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, 1995 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1905 0, 0, 0x2, 0x4b}; 1996 0, 0, 0x2, 0x4b};
1906 1997
@@ -2077,13 +2168,13 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
2077 len = resp_disconnect_pg(ap, pcontrol, target); 2168 len = resp_disconnect_pg(ap, pcontrol, target);
2078 offset += len; 2169 offset += len;
2079 break; 2170 break;
2080 case 0x3: /* Format device page, direct access */ 2171 case 0x3: /* Format device page, direct access */
2081 if (is_disk) { 2172 if (is_disk) {
2082 len = resp_format_pg(ap, pcontrol, target); 2173 len = resp_format_pg(ap, pcontrol, target);
2083 offset += len; 2174 offset += len;
2084 } else 2175 } else
2085 bad_pcode = true; 2176 bad_pcode = true;
2086 break; 2177 break;
2087 case 0x8: /* Caching page, direct access */ 2178 case 0x8: /* Caching page, direct access */
2088 if (is_disk) { 2179 if (is_disk) {
2089 len = resp_caching_pg(ap, pcontrol, target); 2180 len = resp_caching_pg(ap, pcontrol, target);
@@ -2099,7 +2190,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
2099 if ((subpcode > 0x2) && (subpcode < 0xff)) { 2190 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2100 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); 2191 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2101 return check_condition_result; 2192 return check_condition_result;
2102 } 2193 }
2103 len = 0; 2194 len = 0;
2104 if ((0x0 == subpcode) || (0xff == subpcode)) 2195 if ((0x0 == subpcode) || (0xff == subpcode))
2105 len += resp_sas_sf_m_pg(ap + len, pcontrol, target); 2196 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
@@ -2136,7 +2227,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
2136 } else { 2227 } else {
2137 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); 2228 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2138 return check_condition_result; 2229 return check_condition_result;
2139 } 2230 }
2140 break; 2231 break;
2141 default: 2232 default:
2142 bad_pcode = true; 2233 bad_pcode = true;
@@ -2172,8 +2263,8 @@ static int resp_mode_select(struct scsi_cmnd *scp,
2172 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1); 2263 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2173 return check_condition_result; 2264 return check_condition_result;
2174 } 2265 }
2175 res = fetch_to_dev_buffer(scp, arr, param_len); 2266 res = fetch_to_dev_buffer(scp, arr, param_len);
2176 if (-1 == res) 2267 if (-1 == res)
2177 return DID_ERROR << 16; 2268 return DID_ERROR << 16;
2178 else if (sdebug_verbose && (res < param_len)) 2269 else if (sdebug_verbose && (res < param_len))
2179 sdev_printk(KERN_INFO, scp->device, 2270 sdev_printk(KERN_INFO, scp->device,
@@ -2239,8 +2330,8 @@ static int resp_temp_l_pg(unsigned char * arr)
2239 0x0, 0x1, 0x3, 0x2, 0x0, 65, 2330 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2240 }; 2331 };
2241 2332
2242 memcpy(arr, temp_l_pg, sizeof(temp_l_pg)); 2333 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2243 return sizeof(temp_l_pg); 2334 return sizeof(temp_l_pg);
2244} 2335}
2245 2336
2246static int resp_ie_l_pg(unsigned char * arr) 2337static int resp_ie_l_pg(unsigned char * arr)
@@ -2248,18 +2339,18 @@ static int resp_ie_l_pg(unsigned char * arr)
2248 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38, 2339 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2249 }; 2340 };
2250 2341
2251 memcpy(arr, ie_l_pg, sizeof(ie_l_pg)); 2342 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2252 if (iec_m_pg[2] & 0x4) { /* TEST bit set */ 2343 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2253 arr[4] = THRESHOLD_EXCEEDED; 2344 arr[4] = THRESHOLD_EXCEEDED;
2254 arr[5] = 0xff; 2345 arr[5] = 0xff;
2255 } 2346 }
2256 return sizeof(ie_l_pg); 2347 return sizeof(ie_l_pg);
2257} 2348}
2258 2349
2259#define SDEBUG_MAX_LSENSE_SZ 512 2350#define SDEBUG_MAX_LSENSE_SZ 512
2260 2351
2261static int resp_log_sense(struct scsi_cmnd * scp, 2352static int resp_log_sense(struct scsi_cmnd *scp,
2262 struct sdebug_dev_info * devip) 2353 struct sdebug_dev_info *devip)
2263{ 2354{
2264 int ppc, sp, pcode, subpcode, alloc_len, len, n; 2355 int ppc, sp, pcode, subpcode, alloc_len, len, n;
2265 unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; 2356 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
@@ -2353,8 +2444,8 @@ static int check_device_access_params(struct scsi_cmnd *scp,
2353} 2444}
2354 2445
2355/* Returns number of bytes copied or -1 if error. */ 2446/* Returns number of bytes copied or -1 if error. */
2356static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, 2447static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2357 bool do_write) 2448 u32 num, bool do_write)
2358{ 2449{
2359 int ret; 2450 int ret;
2360 u64 block, rest = 0; 2451 u64 block, rest = 0;
@@ -2380,14 +2471,15 @@ static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
2380 2471
2381 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, 2472 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2382 fake_storep + (block * sdebug_sector_size), 2473 fake_storep + (block * sdebug_sector_size),
2383 (num - rest) * sdebug_sector_size, 0, do_write); 2474 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2384 if (ret != (num - rest) * sdebug_sector_size) 2475 if (ret != (num - rest) * sdebug_sector_size)
2385 return ret; 2476 return ret;
2386 2477
2387 if (rest) { 2478 if (rest) {
2388 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents, 2479 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2389 fake_storep, rest * sdebug_sector_size, 2480 fake_storep, rest * sdebug_sector_size,
2390 (num - rest) * sdebug_sector_size, do_write); 2481 sg_skip + ((num - rest) * sdebug_sector_size),
2482 do_write);
2391 } 2483 }
2392 2484
2393 return ret; 2485 return ret;
@@ -2648,7 +2740,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2648 } 2740 }
2649 } 2741 }
2650 2742
2651 ret = do_device_access(scp, lba, num, false); 2743 ret = do_device_access(scp, 0, lba, num, false);
2652 read_unlock_irqrestore(&atomic_rw, iflags); 2744 read_unlock_irqrestore(&atomic_rw, iflags);
2653 if (unlikely(ret == -1)) 2745 if (unlikely(ret == -1))
2654 return DID_ERROR << 16; 2746 return DID_ERROR << 16;
@@ -2936,7 +3028,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2936 } 3028 }
2937 } 3029 }
2938 3030
2939 ret = do_device_access(scp, lba, num, true); 3031 ret = do_device_access(scp, 0, lba, num, true);
2940 if (unlikely(scsi_debug_lbp())) 3032 if (unlikely(scsi_debug_lbp()))
2941 map_region(lba, num); 3033 map_region(lba, num);
2942 write_unlock_irqrestore(&atomic_rw, iflags); 3034 write_unlock_irqrestore(&atomic_rw, iflags);
@@ -2970,6 +3062,173 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2970 return 0; 3062 return 0;
2971} 3063}
2972 3064
3065/*
3066 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3067 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3068 */
3069static int resp_write_scat(struct scsi_cmnd *scp,
3070 struct sdebug_dev_info *devip)
3071{
3072 u8 *cmd = scp->cmnd;
3073 u8 *lrdp = NULL;
3074 u8 *up;
3075 u8 wrprotect;
3076 u16 lbdof, num_lrd, k;
3077 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3078 u32 lb_size = sdebug_sector_size;
3079 u32 ei_lba;
3080 u64 lba;
3081 unsigned long iflags;
3082 int ret, res;
3083 bool is_16;
3084 static const u32 lrd_size = 32; /* + parameter list header size */
3085
3086 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3087 is_16 = false;
3088 wrprotect = (cmd[10] >> 5) & 0x7;
3089 lbdof = get_unaligned_be16(cmd + 12);
3090 num_lrd = get_unaligned_be16(cmd + 16);
3091 bt_len = get_unaligned_be32(cmd + 28);
3092 } else { /* that leaves WRITE SCATTERED(16) */
3093 is_16 = true;
3094 wrprotect = (cmd[2] >> 5) & 0x7;
3095 lbdof = get_unaligned_be16(cmd + 4);
3096 num_lrd = get_unaligned_be16(cmd + 8);
3097 bt_len = get_unaligned_be32(cmd + 10);
3098 if (unlikely(have_dif_prot)) {
3099 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3100 wrprotect) {
3101 mk_sense_invalid_opcode(scp);
3102 return illegal_condition_result;
3103 }
3104 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3105 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3106 wrprotect == 0)
3107 sdev_printk(KERN_ERR, scp->device,
3108 "Unprotected WR to DIF device\n");
3109 }
3110 }
3111 if ((num_lrd == 0) || (bt_len == 0))
3112 return 0; /* T10 says these do-nothings are not errors */
3113 if (lbdof == 0) {
3114 if (sdebug_verbose)
3115 sdev_printk(KERN_INFO, scp->device,
3116 "%s: %s: LB Data Offset field bad\n",
3117 my_name, __func__);
3118 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3119 return illegal_condition_result;
3120 }
3121 lbdof_blen = lbdof * lb_size;
3122 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3123 if (sdebug_verbose)
3124 sdev_printk(KERN_INFO, scp->device,
3125 "%s: %s: LBA range descriptors don't fit\n",
3126 my_name, __func__);
3127 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3128 return illegal_condition_result;
3129 }
3130 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3131 if (lrdp == NULL)
3132 return SCSI_MLQUEUE_HOST_BUSY;
3133 if (sdebug_verbose)
3134 sdev_printk(KERN_INFO, scp->device,
3135 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3136 my_name, __func__, lbdof_blen);
3137 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3138 if (res == -1) {
3139 ret = DID_ERROR << 16;
3140 goto err_out;
3141 }
3142
3143 write_lock_irqsave(&atomic_rw, iflags);
3144 sg_off = lbdof_blen;
3145 /* Spec says Buffer xfer Length field in number of LBs in dout */
3146 cum_lb = 0;
3147 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3148 lba = get_unaligned_be64(up + 0);
3149 num = get_unaligned_be32(up + 8);
3150 if (sdebug_verbose)
3151 sdev_printk(KERN_INFO, scp->device,
3152 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3153 my_name, __func__, k, lba, num, sg_off);
3154 if (num == 0)
3155 continue;
3156 ret = check_device_access_params(scp, lba, num);
3157 if (ret)
3158 goto err_out_unlock;
3159 num_by = num * lb_size;
3160 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3161
3162 if ((cum_lb + num) > bt_len) {
3163 if (sdebug_verbose)
3164 sdev_printk(KERN_INFO, scp->device,
3165 "%s: %s: sum of blocks > data provided\n",
3166 my_name, __func__);
3167 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3168 0);
3169 ret = illegal_condition_result;
3170 goto err_out_unlock;
3171 }
3172
3173 /* DIX + T10 DIF */
3174 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3175 int prot_ret = prot_verify_write(scp, lba, num,
3176 ei_lba);
3177
3178 if (prot_ret) {
3179 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3180 prot_ret);
3181 ret = illegal_condition_result;
3182 goto err_out_unlock;
3183 }
3184 }
3185
3186 ret = do_device_access(scp, sg_off, lba, num, true);
3187 if (unlikely(scsi_debug_lbp()))
3188 map_region(lba, num);
3189 if (unlikely(-1 == ret)) {
3190 ret = DID_ERROR << 16;
3191 goto err_out_unlock;
3192 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3193 sdev_printk(KERN_INFO, scp->device,
3194 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3195 my_name, num_by, ret);
3196
3197 if (unlikely(sdebug_any_injecting_opt)) {
3198 struct sdebug_queued_cmd *sqcp =
3199 (struct sdebug_queued_cmd *)scp->host_scribble;
3200
3201 if (sqcp) {
3202 if (sqcp->inj_recovered) {
3203 mk_sense_buffer(scp, RECOVERED_ERROR,
3204 THRESHOLD_EXCEEDED, 0);
3205 ret = illegal_condition_result;
3206 goto err_out_unlock;
3207 } else if (sqcp->inj_dif) {
3208 /* Logical block guard check failed */
3209 mk_sense_buffer(scp, ABORTED_COMMAND,
3210 0x10, 1);
3211 ret = illegal_condition_result;
3212 goto err_out_unlock;
3213 } else if (sqcp->inj_dix) {
3214 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3215 0x10, 1);
3216 ret = illegal_condition_result;
3217 goto err_out_unlock;
3218 }
3219 }
3220 }
3221 sg_off += num_by;
3222 cum_lb += num;
3223 }
3224 ret = 0;
3225err_out_unlock:
3226 write_unlock_irqrestore(&atomic_rw, iflags);
3227err_out:
3228 kfree(lrdp);
3229 return ret;
3230}
3231
2973static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, 3232static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
2974 u32 ei_lba, bool unmap, bool ndob) 3233 u32 ei_lba, bool unmap, bool ndob)
2975{ 3234{
@@ -3177,7 +3436,7 @@ static int resp_comp_write(struct scsi_cmnd *scp,
3177 * from data-in into arr. Safe (atomic) since write_lock held. */ 3436 * from data-in into arr. Safe (atomic) since write_lock held. */
3178 fake_storep_hold = fake_storep; 3437 fake_storep_hold = fake_storep;
3179 fake_storep = arr; 3438 fake_storep = arr;
3180 ret = do_device_access(scp, 0, dnum, true); 3439 ret = do_device_access(scp, 0, 0, dnum, true);
3181 fake_storep = fake_storep_hold; 3440 fake_storep = fake_storep_hold;
3182 if (ret == -1) { 3441 if (ret == -1) {
3183 retval = DID_ERROR << 16; 3442 retval = DID_ERROR << 16;
@@ -3495,6 +3754,7 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3495 struct scsi_cmnd *scp; 3754 struct scsi_cmnd *scp;
3496 struct sdebug_dev_info *devip; 3755 struct sdebug_dev_info *devip;
3497 3756
3757 sd_dp->defer_t = SDEB_DEFER_NONE;
3498 qc_idx = sd_dp->qc_idx; 3758 qc_idx = sd_dp->qc_idx;
3499 sqp = sdebug_q_arr + sd_dp->sqa_idx; 3759 sqp = sdebug_q_arr + sd_dp->sqa_idx;
3500 if (sdebug_statistics) { 3760 if (sdebug_statistics) {
@@ -3603,12 +3863,12 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3603 if (!sdbg_host) { 3863 if (!sdbg_host) {
3604 pr_err("Host info NULL\n"); 3864 pr_err("Host info NULL\n");
3605 return NULL; 3865 return NULL;
3606 } 3866 }
3607 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { 3867 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3608 if ((devip->used) && (devip->channel == sdev->channel) && 3868 if ((devip->used) && (devip->channel == sdev->channel) &&
3609 (devip->target == sdev->id) && 3869 (devip->target == sdev->id) &&
3610 (devip->lun == sdev->lun)) 3870 (devip->lun == sdev->lun))
3611 return devip; 3871 return devip;
3612 else { 3872 else {
3613 if ((!devip->used) && (!open_devip)) 3873 if ((!devip->used) && (!open_devip))
3614 open_devip = devip; 3874 open_devip = devip;
@@ -3660,6 +3920,7 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
3660 blk_queue_max_segment_size(sdp->request_queue, -1U); 3920 blk_queue_max_segment_size(sdp->request_queue, -1U);
3661 if (sdebug_no_uld) 3921 if (sdebug_no_uld)
3662 sdp->no_uld_attach = 1; 3922 sdp->no_uld_attach = 1;
3923 config_cdb_len(sdp);
3663 return 0; 3924 return 0;
3664} 3925}
3665 3926
@@ -3678,13 +3939,14 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3678 } 3939 }
3679} 3940}
3680 3941
3681static void stop_qc_helper(struct sdebug_defer *sd_dp) 3942static void stop_qc_helper(struct sdebug_defer *sd_dp,
3943 enum sdeb_defer_type defer_t)
3682{ 3944{
3683 if (!sd_dp) 3945 if (!sd_dp)
3684 return; 3946 return;
3685 if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0)) 3947 if (defer_t == SDEB_DEFER_HRT)
3686 hrtimer_cancel(&sd_dp->hrt); 3948 hrtimer_cancel(&sd_dp->hrt);
3687 else if (sdebug_jdelay < 0) 3949 else if (defer_t == SDEB_DEFER_WQ)
3688 cancel_work_sync(&sd_dp->ew.work); 3950 cancel_work_sync(&sd_dp->ew.work);
3689} 3951}
3690 3952
@@ -3694,6 +3956,7 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3694{ 3956{
3695 unsigned long iflags; 3957 unsigned long iflags;
3696 int j, k, qmax, r_qmax; 3958 int j, k, qmax, r_qmax;
3959 enum sdeb_defer_type l_defer_t;
3697 struct sdebug_queue *sqp; 3960 struct sdebug_queue *sqp;
3698 struct sdebug_queued_cmd *sqcp; 3961 struct sdebug_queued_cmd *sqcp;
3699 struct sdebug_dev_info *devip; 3962 struct sdebug_dev_info *devip;
@@ -3717,8 +3980,13 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3717 atomic_dec(&devip->num_in_q); 3980 atomic_dec(&devip->num_in_q);
3718 sqcp->a_cmnd = NULL; 3981 sqcp->a_cmnd = NULL;
3719 sd_dp = sqcp->sd_dp; 3982 sd_dp = sqcp->sd_dp;
3983 if (sd_dp) {
3984 l_defer_t = sd_dp->defer_t;
3985 sd_dp->defer_t = SDEB_DEFER_NONE;
3986 } else
3987 l_defer_t = SDEB_DEFER_NONE;
3720 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 3988 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3721 stop_qc_helper(sd_dp); 3989 stop_qc_helper(sd_dp, l_defer_t);
3722 clear_bit(k, sqp->in_use_bm); 3990 clear_bit(k, sqp->in_use_bm);
3723 return true; 3991 return true;
3724 } 3992 }
@@ -3733,6 +4001,7 @@ static void stop_all_queued(void)
3733{ 4001{
3734 unsigned long iflags; 4002 unsigned long iflags;
3735 int j, k; 4003 int j, k;
4004 enum sdeb_defer_type l_defer_t;
3736 struct sdebug_queue *sqp; 4005 struct sdebug_queue *sqp;
3737 struct sdebug_queued_cmd *sqcp; 4006 struct sdebug_queued_cmd *sqcp;
3738 struct sdebug_dev_info *devip; 4007 struct sdebug_dev_info *devip;
@@ -3751,8 +4020,13 @@ static void stop_all_queued(void)
3751 atomic_dec(&devip->num_in_q); 4020 atomic_dec(&devip->num_in_q);
3752 sqcp->a_cmnd = NULL; 4021 sqcp->a_cmnd = NULL;
3753 sd_dp = sqcp->sd_dp; 4022 sd_dp = sqcp->sd_dp;
4023 if (sd_dp) {
4024 l_defer_t = sd_dp->defer_t;
4025 sd_dp->defer_t = SDEB_DEFER_NONE;
4026 } else
4027 l_defer_t = SDEB_DEFER_NONE;
3754 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 4028 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3755 stop_qc_helper(sd_dp); 4029 stop_qc_helper(sd_dp, l_defer_t);
3756 clear_bit(k, sqp->in_use_bm); 4030 clear_bit(k, sqp->in_use_bm);
3757 spin_lock_irqsave(&sqp->qc_lock, iflags); 4031 spin_lock_irqsave(&sqp->qc_lock, iflags);
3758 } 4032 }
@@ -3848,8 +4122,8 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3848{ 4122{
3849 struct sdebug_host_info *sdbg_host; 4123 struct sdebug_host_info *sdbg_host;
3850 struct sdebug_dev_info *devip; 4124 struct sdebug_dev_info *devip;
3851 struct scsi_device * sdp; 4125 struct scsi_device *sdp;
3852 struct Scsi_Host * hp; 4126 struct Scsi_Host *hp;
3853 int k = 0; 4127 int k = 0;
3854 4128
3855 ++num_bus_resets; 4129 ++num_bus_resets;
@@ -3863,7 +4137,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3863 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp); 4137 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3864 if (sdbg_host) { 4138 if (sdbg_host) {
3865 list_for_each_entry(devip, 4139 list_for_each_entry(devip,
3866 &sdbg_host->dev_info_list, 4140 &sdbg_host->dev_info_list,
3867 dev_list) { 4141 dev_list) {
3868 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); 4142 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3869 ++k; 4143 ++k;
@@ -3886,15 +4160,15 @@ static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3886 ++num_host_resets; 4160 ++num_host_resets;
3887 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts)) 4161 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
3888 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__); 4162 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3889 spin_lock(&sdebug_host_list_lock); 4163 spin_lock(&sdebug_host_list_lock);
3890 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { 4164 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3891 list_for_each_entry(devip, &sdbg_host->dev_info_list, 4165 list_for_each_entry(devip, &sdbg_host->dev_info_list,
3892 dev_list) { 4166 dev_list) {
3893 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); 4167 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3894 ++k; 4168 ++k;
3895 } 4169 }
3896 } 4170 }
3897 spin_unlock(&sdebug_host_list_lock); 4171 spin_unlock(&sdebug_host_list_lock);
3898 stop_all_queued(); 4172 stop_all_queued();
3899 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts) 4173 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
3900 sdev_printk(KERN_INFO, SCpnt->device, 4174 sdev_printk(KERN_INFO, SCpnt->device,
@@ -3921,7 +4195,7 @@ static void __init sdebug_build_parts(unsigned char *ramp,
3921 sectors_per_part = (num_sectors - sdebug_sectors_per) 4195 sectors_per_part = (num_sectors - sdebug_sectors_per)
3922 / sdebug_num_parts; 4196 / sdebug_num_parts;
3923 heads_by_sects = sdebug_heads * sdebug_sectors_per; 4197 heads_by_sects = sdebug_heads * sdebug_sectors_per;
3924 starts[0] = sdebug_sectors_per; 4198 starts[0] = sdebug_sectors_per;
3925 for (k = 1; k < sdebug_num_parts; ++k) 4199 for (k = 1; k < sdebug_num_parts; ++k)
3926 starts[k] = ((k * sectors_per_part) / heads_by_sects) 4200 starts[k] = ((k * sectors_per_part) / heads_by_sects)
3927 * heads_by_sects; 4201 * heads_by_sects;
@@ -3995,6 +4269,7 @@ static void setup_inject(struct sdebug_queue *sqp,
3995 sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts); 4269 sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
3996 sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts); 4270 sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
3997 sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts); 4271 sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4272 sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
3998} 4273}
3999 4274
4000/* Complete the processing of the thread that queued a SCSI command to this 4275/* Complete the processing of the thread that queued a SCSI command to this
@@ -4003,7 +4278,7 @@ static void setup_inject(struct sdebug_queue *sqp,
4003 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources. 4278 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4004 */ 4279 */
4005static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, 4280static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4006 int scsi_result, int delta_jiff) 4281 int scsi_result, int delta_jiff, int ndelay)
4007{ 4282{
4008 unsigned long iflags; 4283 unsigned long iflags;
4009 int k, num_in_q, qdepth, inject; 4284 int k, num_in_q, qdepth, inject;
@@ -4081,20 +4356,20 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4081 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 4356 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4082 if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt)) 4357 if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4083 setup_inject(sqp, sqcp); 4358 setup_inject(sqp, sqcp);
4084 if (delta_jiff > 0 || sdebug_ndelay > 0) { 4359 if (sd_dp == NULL) {
4360 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4361 if (sd_dp == NULL)
4362 return SCSI_MLQUEUE_HOST_BUSY;
4363 }
4364 if (delta_jiff > 0 || ndelay > 0) {
4085 ktime_t kt; 4365 ktime_t kt;
4086 4366
4087 if (delta_jiff > 0) { 4367 if (delta_jiff > 0) {
4088 struct timespec ts; 4368 kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4089
4090 jiffies_to_timespec(delta_jiff, &ts);
4091 kt = ktime_set(ts.tv_sec, ts.tv_nsec);
4092 } else 4369 } else
4093 kt = sdebug_ndelay; 4370 kt = ndelay;
4094 if (NULL == sd_dp) { 4371 if (!sd_dp->init_hrt) {
4095 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC); 4372 sd_dp->init_hrt = true;
4096 if (NULL == sd_dp)
4097 return SCSI_MLQUEUE_HOST_BUSY;
4098 sqcp->sd_dp = sd_dp; 4373 sqcp->sd_dp = sd_dp;
4099 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, 4374 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4100 HRTIMER_MODE_REL_PINNED); 4375 HRTIMER_MODE_REL_PINNED);
@@ -4104,12 +4379,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4104 } 4379 }
4105 if (sdebug_statistics) 4380 if (sdebug_statistics)
4106 sd_dp->issuing_cpu = raw_smp_processor_id(); 4381 sd_dp->issuing_cpu = raw_smp_processor_id();
4382 sd_dp->defer_t = SDEB_DEFER_HRT;
4107 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED); 4383 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4108 } else { /* jdelay < 0, use work queue */ 4384 } else { /* jdelay < 0, use work queue */
4109 if (NULL == sd_dp) { 4385 if (!sd_dp->init_wq) {
4110 sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC); 4386 sd_dp->init_wq = true;
4111 if (NULL == sd_dp)
4112 return SCSI_MLQUEUE_HOST_BUSY;
4113 sqcp->sd_dp = sd_dp; 4387 sqcp->sd_dp = sd_dp;
4114 sd_dp->sqa_idx = sqp - sdebug_q_arr; 4388 sd_dp->sqa_idx = sqp - sdebug_q_arr;
4115 sd_dp->qc_idx = k; 4389 sd_dp->qc_idx = k;
@@ -4117,6 +4391,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4117 } 4391 }
4118 if (sdebug_statistics) 4392 if (sdebug_statistics)
4119 sd_dp->issuing_cpu = raw_smp_processor_id(); 4393 sd_dp->issuing_cpu = raw_smp_processor_id();
4394 sd_dp->defer_t = SDEB_DEFER_WQ;
4120 schedule_work(&sd_dp->ew.work); 4395 schedule_work(&sd_dp->ew.work);
4121 } 4396 }
4122 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && 4397 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
@@ -4141,6 +4416,7 @@ respond_in_thread: /* call back to mid-layer using invocation thread */
4141 */ 4416 */
4142module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR); 4417module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4143module_param_named(ato, sdebug_ato, int, S_IRUGO); 4418module_param_named(ato, sdebug_ato, int, S_IRUGO);
4419module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4144module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR); 4420module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4145module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR); 4421module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4146module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO); 4422module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
@@ -4198,6 +4474,7 @@ MODULE_VERSION(SDEBUG_VERSION);
4198 4474
4199MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)"); 4475MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4200MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); 4476MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4477MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4201MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)"); 4478MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4202MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny"); 4479MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4203MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)"); 4480MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
@@ -4210,7 +4487,8 @@ MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4210MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)"); 4487MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4211MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")"); 4488MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4212MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")"); 4489MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4213MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\"0186\")"); 4490MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4491 SDEBUG_VERSION "\")");
4214MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); 4492MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4215MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); 4493MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4216MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); 4494MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
@@ -4360,9 +4638,6 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4360 } 4638 }
4361 } 4639 }
4362 if (res > 0) { 4640 if (res > 0) {
4363 /* make sure sdebug_defer instances get
4364 * re-allocated for new delay variant */
4365 free_all_queued();
4366 sdebug_jdelay = jdelay; 4641 sdebug_jdelay = jdelay;
4367 sdebug_ndelay = 0; 4642 sdebug_ndelay = 0;
4368 } 4643 }
@@ -4403,9 +4678,6 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4403 } 4678 }
4404 } 4679 }
4405 if (res > 0) { 4680 if (res > 0) {
4406 /* make sure sdebug_defer instances get
4407 * re-allocated for new delay variant */
4408 free_all_queued();
4409 sdebug_ndelay = ndelay; 4681 sdebug_ndelay = ndelay;
4410 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN 4682 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
4411 : DEF_JDELAY; 4683 : DEF_JDELAY;
@@ -4426,15 +4698,15 @@ static ssize_t opts_show(struct device_driver *ddp, char *buf)
4426static ssize_t opts_store(struct device_driver *ddp, const char *buf, 4698static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4427 size_t count) 4699 size_t count)
4428{ 4700{
4429 int opts; 4701 int opts;
4430 char work[20]; 4702 char work[20];
4431 4703
4432 if (1 == sscanf(buf, "%10s", work)) { 4704 if (sscanf(buf, "%10s", work) == 1) {
4433 if (0 == strncasecmp(work,"0x", 2)) { 4705 if (strncasecmp(work, "0x", 2) == 0) {
4434 if (1 == sscanf(&work[2], "%x", &opts)) 4706 if (kstrtoint(work + 2, 16, &opts) == 0)
4435 goto opts_done; 4707 goto opts_done;
4436 } else { 4708 } else {
4437 if (1 == sscanf(work, "%d", &opts)) 4709 if (kstrtoint(work, 10, &opts) == 0)
4438 goto opts_done; 4710 goto opts_done;
4439 } 4711 }
4440 } 4712 }
@@ -4455,7 +4727,7 @@ static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4455static ssize_t ptype_store(struct device_driver *ddp, const char *buf, 4727static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4456 size_t count) 4728 size_t count)
4457{ 4729{
4458 int n; 4730 int n;
4459 4731
4460 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4732 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4461 sdebug_ptype = n; 4733 sdebug_ptype = n;
@@ -4472,7 +4744,7 @@ static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4472static ssize_t dsense_store(struct device_driver *ddp, const char *buf, 4744static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4473 size_t count) 4745 size_t count)
4474{ 4746{
4475 int n; 4747 int n;
4476 4748
4477 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4749 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4478 sdebug_dsense = n; 4750 sdebug_dsense = n;
@@ -4489,7 +4761,7 @@ static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4489static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf, 4761static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4490 size_t count) 4762 size_t count)
4491{ 4763{
4492 int n; 4764 int n;
4493 4765
4494 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4766 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4495 n = (n > 0); 4767 n = (n > 0);
@@ -4522,7 +4794,7 @@ static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4522static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf, 4794static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4523 size_t count) 4795 size_t count)
4524{ 4796{
4525 int n; 4797 int n;
4526 4798
4527 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4799 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4528 sdebug_no_lun_0 = n; 4800 sdebug_no_lun_0 = n;
@@ -4539,7 +4811,7 @@ static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4539static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf, 4811static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4540 size_t count) 4812 size_t count)
4541{ 4813{
4542 int n; 4814 int n;
4543 4815
4544 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4816 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4545 sdebug_num_tgts = n; 4817 sdebug_num_tgts = n;
@@ -4569,7 +4841,7 @@ static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4569static ssize_t every_nth_store(struct device_driver *ddp, const char *buf, 4841static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4570 size_t count) 4842 size_t count)
4571{ 4843{
4572 int nth; 4844 int nth;
4573 4845
4574 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) { 4846 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4575 sdebug_every_nth = nth; 4847 sdebug_every_nth = nth;
@@ -4591,7 +4863,7 @@ static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4591static ssize_t max_luns_store(struct device_driver *ddp, const char *buf, 4863static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4592 size_t count) 4864 size_t count)
4593{ 4865{
4594 int n; 4866 int n;
4595 bool changed; 4867 bool changed;
4596 4868
4597 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4869 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
@@ -4678,7 +4950,7 @@ static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4678static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf, 4950static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4679 size_t count) 4951 size_t count)
4680{ 4952{
4681 int n; 4953 int n;
4682 bool changed; 4954 bool changed;
4683 4955
4684 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { 4956 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
@@ -4884,6 +5156,24 @@ static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
4884} 5156}
4885static DRIVER_ATTR_RO(uuid_ctl); 5157static DRIVER_ATTR_RO(uuid_ctl);
4886 5158
5159static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
5160{
5161 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
5162}
5163static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
5164 size_t count)
5165{
5166 int ret, n;
5167
5168 ret = kstrtoint(buf, 0, &n);
5169 if (ret)
5170 return ret;
5171 sdebug_cdb_len = n;
5172 all_config_cdb_len();
5173 return count;
5174}
5175static DRIVER_ATTR_RW(cdb_len);
5176
4887 5177
4888/* Note: The following array creates attribute files in the 5178/* Note: The following array creates attribute files in the
4889 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these 5179 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
@@ -4923,6 +5213,7 @@ static struct attribute *sdebug_drv_attrs[] = {
4923 &driver_attr_ndelay.attr, 5213 &driver_attr_ndelay.attr,
4924 &driver_attr_strict.attr, 5214 &driver_attr_strict.attr,
4925 &driver_attr_uuid_ctl.attr, 5215 &driver_attr_uuid_ctl.attr,
5216 &driver_attr_cdb_len.attr,
4926 NULL, 5217 NULL,
4927}; 5218};
4928ATTRIBUTE_GROUPS(sdebug_drv); 5219ATTRIBUTE_GROUPS(sdebug_drv);
@@ -5113,12 +5404,12 @@ static int __init scsi_debug_init(void)
5113 host_to_add = sdebug_add_host; 5404 host_to_add = sdebug_add_host;
5114 sdebug_add_host = 0; 5405 sdebug_add_host = 0;
5115 5406
5116 for (k = 0; k < host_to_add; k++) { 5407 for (k = 0; k < host_to_add; k++) {
5117 if (sdebug_add_adapter()) { 5408 if (sdebug_add_adapter()) {
5118 pr_err("sdebug_add_adapter failed k=%d\n", k); 5409 pr_err("sdebug_add_adapter failed k=%d\n", k);
5119 break; 5410 break;
5120 } 5411 }
5121 } 5412 }
5122 5413
5123 if (sdebug_verbose) 5414 if (sdebug_verbose)
5124 pr_info("built %d host(s)\n", sdebug_add_host); 5415 pr_info("built %d host(s)\n", sdebug_add_host);
@@ -5161,53 +5452,53 @@ module_exit(scsi_debug_exit);
5161 5452
5162static void sdebug_release_adapter(struct device * dev) 5453static void sdebug_release_adapter(struct device * dev)
5163{ 5454{
5164 struct sdebug_host_info *sdbg_host; 5455 struct sdebug_host_info *sdbg_host;
5165 5456
5166 sdbg_host = to_sdebug_host(dev); 5457 sdbg_host = to_sdebug_host(dev);
5167 kfree(sdbg_host); 5458 kfree(sdbg_host);
5168} 5459}
5169 5460
5170static int sdebug_add_adapter(void) 5461static int sdebug_add_adapter(void)
5171{ 5462{
5172 int k, devs_per_host; 5463 int k, devs_per_host;
5173 int error = 0; 5464 int error = 0;
5174 struct sdebug_host_info *sdbg_host; 5465 struct sdebug_host_info *sdbg_host;
5175 struct sdebug_dev_info *sdbg_devinfo, *tmp; 5466 struct sdebug_dev_info *sdbg_devinfo, *tmp;
5176 5467
5177 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL); 5468 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5178 if (NULL == sdbg_host) { 5469 if (sdbg_host == NULL) {
5179 pr_err("out of memory at line %d\n", __LINE__); 5470 pr_err("out of memory at line %d\n", __LINE__);
5180 return -ENOMEM; 5471 return -ENOMEM;
5181 } 5472 }
5182 5473
5183 INIT_LIST_HEAD(&sdbg_host->dev_info_list); 5474 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5184 5475
5185 devs_per_host = sdebug_num_tgts * sdebug_max_luns; 5476 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5186 for (k = 0; k < devs_per_host; k++) { 5477 for (k = 0; k < devs_per_host; k++) {
5187 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL); 5478 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5188 if (!sdbg_devinfo) { 5479 if (!sdbg_devinfo) {
5189 pr_err("out of memory at line %d\n", __LINE__); 5480 pr_err("out of memory at line %d\n", __LINE__);
5190 error = -ENOMEM; 5481 error = -ENOMEM;
5191 goto clean; 5482 goto clean;
5192 } 5483 }
5193 } 5484 }
5194 5485
5195 spin_lock(&sdebug_host_list_lock); 5486 spin_lock(&sdebug_host_list_lock);
5196 list_add_tail(&sdbg_host->host_list, &sdebug_host_list); 5487 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5197 spin_unlock(&sdebug_host_list_lock); 5488 spin_unlock(&sdebug_host_list_lock);
5198 5489
5199 sdbg_host->dev.bus = &pseudo_lld_bus; 5490 sdbg_host->dev.bus = &pseudo_lld_bus;
5200 sdbg_host->dev.parent = pseudo_primary; 5491 sdbg_host->dev.parent = pseudo_primary;
5201 sdbg_host->dev.release = &sdebug_release_adapter; 5492 sdbg_host->dev.release = &sdebug_release_adapter;
5202 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host); 5493 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5203 5494
5204 error = device_register(&sdbg_host->dev); 5495 error = device_register(&sdbg_host->dev);
5205 5496
5206 if (error) 5497 if (error)
5207 goto clean; 5498 goto clean;
5208 5499
5209 ++sdebug_add_host; 5500 ++sdebug_add_host;
5210 return error; 5501 return error;
5211 5502
5212clean: 5503clean:
5213 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list, 5504 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
@@ -5217,20 +5508,20 @@ clean:
5217 } 5508 }
5218 5509
5219 kfree(sdbg_host); 5510 kfree(sdbg_host);
5220 return error; 5511 return error;
5221} 5512}
5222 5513
5223static void sdebug_remove_adapter(void) 5514static void sdebug_remove_adapter(void)
5224{ 5515{
5225 struct sdebug_host_info * sdbg_host = NULL; 5516 struct sdebug_host_info *sdbg_host = NULL;
5226 5517
5227 spin_lock(&sdebug_host_list_lock); 5518 spin_lock(&sdebug_host_list_lock);
5228 if (!list_empty(&sdebug_host_list)) { 5519 if (!list_empty(&sdebug_host_list)) {
5229 sdbg_host = list_entry(sdebug_host_list.prev, 5520 sdbg_host = list_entry(sdebug_host_list.prev,
5230 struct sdebug_host_info, host_list); 5521 struct sdebug_host_info, host_list);
5231 list_del(&sdbg_host->host_list); 5522 list_del(&sdbg_host->host_list);
5232 } 5523 }
5233 spin_unlock(&sdebug_host_list_lock); 5524 spin_unlock(&sdebug_host_list_lock);
5234 5525
5235 if (!sdbg_host) 5526 if (!sdbg_host)
5236 return; 5527 return;
@@ -5281,6 +5572,12 @@ static bool fake_timeout(struct scsi_cmnd *scp)
5281 return false; 5572 return false;
5282} 5573}
5283 5574
5575static bool fake_host_busy(struct scsi_cmnd *scp)
5576{
5577 return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
5578 (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5579}
5580
5284static int scsi_debug_queuecommand(struct Scsi_Host *shost, 5581static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5285 struct scsi_cmnd *scp) 5582 struct scsi_cmnd *scp)
5286{ 5583{
@@ -5323,6 +5620,8 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5323 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, 5620 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
5324 b); 5621 b);
5325 } 5622 }
5623 if (fake_host_busy(scp))
5624 return SCSI_MLQUEUE_HOST_BUSY;
5326 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS); 5625 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5327 if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl)) 5626 if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5328 goto err_out; 5627 goto err_out;
@@ -5420,12 +5719,15 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5420 errsts = r_pfp(scp, devip); 5719 errsts = r_pfp(scp, devip);
5421 5720
5422fini: 5721fini:
5423 return schedule_resp(scp, devip, errsts, 5722 if (F_DELAY_OVERR & flags)
5424 ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay)); 5723 return schedule_resp(scp, devip, errsts, 0, 0);
5724 else
5725 return schedule_resp(scp, devip, errsts, sdebug_jdelay,
5726 sdebug_ndelay);
5425check_cond: 5727check_cond:
5426 return schedule_resp(scp, devip, check_condition_result, 0); 5728 return schedule_resp(scp, devip, check_condition_result, 0, 0);
5427err_out: 5729err_out:
5428 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0); 5730 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0, 0);
5429} 5731}
5430 5732
5431static struct scsi_host_template sdebug_driver_template = { 5733static struct scsi_host_template sdebug_driver_template = {
@@ -5484,7 +5786,7 @@ static int sdebug_driver_probe(struct device * dev)
5484 if (sdebug_mq_active) 5786 if (sdebug_mq_active)
5485 hpnt->nr_hw_queues = submit_queues; 5787 hpnt->nr_hw_queues = submit_queues;
5486 5788
5487 sdbg_host->shost = hpnt; 5789 sdbg_host->shost = hpnt;
5488 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host; 5790 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5489 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id)) 5791 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5490 hpnt->max_id = sdebug_num_tgts + 1; 5792 hpnt->max_id = sdebug_num_tgts + 1;
@@ -5542,12 +5844,12 @@ static int sdebug_driver_probe(struct device * dev)
5542 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts); 5844 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5543 if (sdebug_every_nth) /* need stats counters for every_nth */ 5845 if (sdebug_every_nth) /* need stats counters for every_nth */
5544 sdebug_statistics = true; 5846 sdebug_statistics = true;
5545 error = scsi_add_host(hpnt, &sdbg_host->dev); 5847 error = scsi_add_host(hpnt, &sdbg_host->dev);
5546 if (error) { 5848 if (error) {
5547 pr_err("scsi_add_host failed\n"); 5849 pr_err("scsi_add_host failed\n");
5548 error = -ENODEV; 5850 error = -ENODEV;
5549 scsi_host_put(hpnt); 5851 scsi_host_put(hpnt);
5550 } else 5852 } else
5551 scsi_scan_host(hpnt); 5853 scsi_scan_host(hpnt);
5552 5854
5553 return error; 5855 return error;
@@ -5555,7 +5857,7 @@ static int sdebug_driver_probe(struct device * dev)
5555 5857
5556static int sdebug_driver_remove(struct device * dev) 5858static int sdebug_driver_remove(struct device * dev)
5557{ 5859{
5558 struct sdebug_host_info *sdbg_host; 5860 struct sdebug_host_info *sdbg_host;
5559 struct sdebug_dev_info *sdbg_devinfo, *tmp; 5861 struct sdebug_dev_info *sdbg_devinfo, *tmp;
5560 5862
5561 sdbg_host = to_sdebug_host(dev); 5863 sdbg_host = to_sdebug_host(dev);
@@ -5565,16 +5867,16 @@ static int sdebug_driver_remove(struct device * dev)
5565 return -ENODEV; 5867 return -ENODEV;
5566 } 5868 }
5567 5869
5568 scsi_remove_host(sdbg_host->shost); 5870 scsi_remove_host(sdbg_host->shost);
5569 5871
5570 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list, 5872 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5571 dev_list) { 5873 dev_list) {
5572 list_del(&sdbg_devinfo->dev_list); 5874 list_del(&sdbg_devinfo->dev_list);
5573 kfree(sdbg_devinfo); 5875 kfree(sdbg_devinfo);
5574 } 5876 }
5575 5877
5576 scsi_host_put(sdbg_host->shost); 5878 scsi_host_put(sdbg_host->shost);
5577 return 0; 5879 return 0;
5578} 5880}
5579 5881
5580static int pseudo_lld_bus_match(struct device *dev, 5882static int pseudo_lld_bus_match(struct device *dev,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index dfb8da83fa50..f3b117246d47 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -108,8 +108,8 @@ static struct {
108 * seagate controller, which causes SCSI code to reset bus. 108 * seagate controller, which causes SCSI code to reset bus.
109 */ 109 */
110 {"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */ 110 {"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */
111 {"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */ 111 {"HP", "C1790A", NULL, BLIST_NOLUN}, /* scanjet iip */
112 {"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */ 112 {"HP", "C2500A", NULL, BLIST_NOLUN}, /* scanjet iicx */
113 {"MEDIAVIS", "CDR-H93MV", "1.31", BLIST_NOLUN}, /* locks up */ 113 {"MEDIAVIS", "CDR-H93MV", "1.31", BLIST_NOLUN}, /* locks up */
114 {"MICROTEK", "ScanMaker II", "5.61", BLIST_NOLUN}, /* responds to all lun */ 114 {"MICROTEK", "ScanMaker II", "5.61", BLIST_NOLUN}, /* responds to all lun */
115 {"MITSUMI", "CD-R CR-2201CS", "6119", BLIST_NOLUN}, /* locks up */ 115 {"MITSUMI", "CD-R CR-2201CS", "6119", BLIST_NOLUN}, /* locks up */
@@ -119,7 +119,7 @@ static struct {
119 {"QUANTUM", "FIREBALL ST4.3S", "0F0C", BLIST_NOLUN}, /* locks up */ 119 {"QUANTUM", "FIREBALL ST4.3S", "0F0C", BLIST_NOLUN}, /* locks up */
120 {"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */ 120 {"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */
121 {"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */ 121 {"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
122 {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN}, 122 {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN | BLIST_BORKEN},
123 {"transtec", "T5008", "0001", BLIST_NOREPORTLUN }, 123 {"transtec", "T5008", "0001", BLIST_NOREPORTLUN },
124 {"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */ 124 {"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */
125 {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */ 125 {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
@@ -158,8 +158,8 @@ static struct {
158 {"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */ 158 {"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */
159 {"DELL", "PV530F", NULL, BLIST_SPARSELUN}, 159 {"DELL", "PV530F", NULL, BLIST_SPARSELUN},
160 {"DELL", "PERCRAID", NULL, BLIST_FORCELUN}, 160 {"DELL", "PERCRAID", NULL, BLIST_FORCELUN},
161 {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */ 161 {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, storage on LUN 0 */
162 {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */ 162 {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, no storage on LUN 0 */
163 {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 163 {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
164 {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2}, 164 {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
165 {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN}, 165 {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
@@ -181,15 +181,14 @@ static struct {
181 {"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 181 {"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
182 {"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 182 {"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
183 {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */ 183 {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */
184 {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */ 184 {"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */
185 {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN}, 185 {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN},
186 {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, 186 {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
187 {"HP", "C1557A", NULL, BLIST_FORCELUN}, 187 {"HP", "C1557A", NULL, BLIST_FORCELUN},
188 {"HP", "C3323-300", "4269", BLIST_NOTQ}, 188 {"HP", "C3323-300", "4269", BLIST_NOTQ},
189 {"HP", "C5713A", NULL, BLIST_NOREPORTLUN}, 189 {"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
190 {"HP", "DF400", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 190 {"HP", "DF400", "*", BLIST_REPORTLUN2},
191 {"HP", "DF500", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 191 {"HP", "DF500", "*", BLIST_REPORTLUN2},
192 {"HP", "DF600", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
193 {"HP", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 192 {"HP", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
194 {"HP", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 193 {"HP", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
195 {"HP", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 194 {"HP", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
@@ -255,7 +254,6 @@ static struct {
255 {"ST650211", "CF", NULL, BLIST_RETRY_HWERROR}, 254 {"ST650211", "CF", NULL, BLIST_RETRY_HWERROR},
256 {"SUN", "T300", "*", BLIST_SPARSELUN}, 255 {"SUN", "T300", "*", BLIST_SPARSELUN},
257 {"SUN", "T4", "*", BLIST_SPARSELUN}, 256 {"SUN", "T4", "*", BLIST_SPARSELUN},
258 {"TEXEL", "CD-ROM", "1.06", BLIST_BORKEN},
259 {"Tornado-", "F4", "*", BLIST_NOREPORTLUN}, 257 {"Tornado-", "F4", "*", BLIST_NOREPORTLUN},
260 {"TOSHIBA", "CDROM", NULL, BLIST_ISROM}, 258 {"TOSHIBA", "CDROM", NULL, BLIST_ISROM},
261 {"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM}, 259 {"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM},
@@ -353,7 +351,8 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
353 * Returns: 0 OK, -error on failure. 351 * Returns: 0 OK, -error on failure.
354 **/ 352 **/
355int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model, 353int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
356 char *strflags, blist_flags_t flags, int key) 354 char *strflags, blist_flags_t flags,
355 enum scsi_devinfo_key key)
357{ 356{
358 struct scsi_dev_info_list *devinfo; 357 struct scsi_dev_info_list *devinfo;
359 struct scsi_dev_info_list_table *devinfo_table = 358 struct scsi_dev_info_list_table *devinfo_table =
@@ -402,7 +401,7 @@ EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
402 * Returns: pointer to matching entry, or ERR_PTR on failure. 401 * Returns: pointer to matching entry, or ERR_PTR on failure.
403 **/ 402 **/
404static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, 403static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
405 const char *model, int key) 404 const char *model, enum scsi_devinfo_key key)
406{ 405{
407 struct scsi_dev_info_list *devinfo; 406 struct scsi_dev_info_list *devinfo;
408 struct scsi_dev_info_list_table *devinfo_table = 407 struct scsi_dev_info_list_table *devinfo_table =
@@ -485,7 +484,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
485 * 484 *
486 * Returns: 0 OK, -error on failure. 485 * Returns: 0 OK, -error on failure.
487 **/ 486 **/
488int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key) 487int scsi_dev_info_list_del_keyed(char *vendor, char *model,
488 enum scsi_devinfo_key key)
489{ 489{
490 struct scsi_dev_info_list *found; 490 struct scsi_dev_info_list *found;
491 491
@@ -587,20 +587,15 @@ blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
587blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev, 587blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
588 const unsigned char *vendor, 588 const unsigned char *vendor,
589 const unsigned char *model, 589 const unsigned char *model,
590 int key) 590 enum scsi_devinfo_key key)
591{ 591{
592 struct scsi_dev_info_list *devinfo; 592 struct scsi_dev_info_list *devinfo;
593 int err;
594 593
595 devinfo = scsi_dev_info_list_find(vendor, model, key); 594 devinfo = scsi_dev_info_list_find(vendor, model, key);
596 if (!IS_ERR(devinfo)) 595 if (!IS_ERR(devinfo))
597 return devinfo->flags; 596 return devinfo->flags;
598 597
599 err = PTR_ERR(devinfo); 598 /* key or device not found: return nothing */
600 if (err != -ENOENT)
601 return err;
602
603 /* nothing found, return nothing */
604 if (key != SCSI_DEVINFO_GLOBAL) 599 if (key != SCSI_DEVINFO_GLOBAL)
605 return 0; 600 return 0;
606 601
@@ -774,7 +769,7 @@ void scsi_exit_devinfo(void)
774 * Adds the requested list, returns zero on success, -EEXIST if the 769 * Adds the requested list, returns zero on success, -EEXIST if the
775 * key is already registered to a list, or other error on failure. 770 * key is already registered to a list, or other error on failure.
776 */ 771 */
777int scsi_dev_info_add_list(int key, const char *name) 772int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name)
778{ 773{
779 struct scsi_dev_info_list_table *devinfo_table = 774 struct scsi_dev_info_list_table *devinfo_table =
780 scsi_devinfo_lookup_by_key(key); 775 scsi_devinfo_lookup_by_key(key);
@@ -806,7 +801,7 @@ EXPORT_SYMBOL(scsi_dev_info_add_list);
806 * frees the list itself. Returns 0 on success or -EINVAL if the key 801 * frees the list itself. Returns 0 on success or -EINVAL if the key
807 * can't be found. 802 * can't be found.
808 */ 803 */
809int scsi_dev_info_remove_list(int key) 804int scsi_dev_info_remove_list(enum scsi_devinfo_key key)
810{ 805{
811 struct list_head *lh, *lh_next; 806 struct list_head *lh, *lh_next;
812 struct scsi_dev_info_list_table *devinfo_table = 807 struct scsi_dev_info_list_table *devinfo_table =
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 2b785d09d5bd..b88b5dbbc444 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -56,10 +56,13 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
56 {"IBM", "1815", "rdac", }, 56 {"IBM", "1815", "rdac", },
57 {"IBM", "1818", "rdac", }, 57 {"IBM", "1818", "rdac", },
58 {"IBM", "3526", "rdac", }, 58 {"IBM", "3526", "rdac", },
59 {"IBM", "3542", "rdac", },
60 {"IBM", "3552", "rdac", },
59 {"SGI", "TP9", "rdac", }, 61 {"SGI", "TP9", "rdac", },
60 {"SGI", "IS", "rdac", }, 62 {"SGI", "IS", "rdac", },
61 {"STK", "OPENstorage D280", "rdac", }, 63 {"STK", "OPENstorage", "rdac", },
62 {"STK", "FLEXLINE 380", "rdac", }, 64 {"STK", "FLEXLINE 380", "rdac", },
65 {"STK", "BladeCtlr", "rdac", },
63 {"SUN", "CSM", "rdac", }, 66 {"SUN", "CSM", "rdac", },
64 {"SUN", "LCSM100", "rdac", }, 67 {"SUN", "LCSM100", "rdac", },
65 {"SUN", "STK6580_6780", "rdac", }, 68 {"SUN", "STK6580_6780", "rdac", },
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 62b56de38ae8..d042915ce895 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -61,9 +61,10 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
61static int scsi_try_to_abort_cmd(struct scsi_host_template *, 61static int scsi_try_to_abort_cmd(struct scsi_host_template *,
62 struct scsi_cmnd *); 62 struct scsi_cmnd *);
63 63
64/* called with shost->host_lock held */
65void scsi_eh_wakeup(struct Scsi_Host *shost) 64void scsi_eh_wakeup(struct Scsi_Host *shost)
66{ 65{
66 lockdep_assert_held(shost->host_lock);
67
67 if (atomic_read(&shost->host_busy) == shost->host_failed) { 68 if (atomic_read(&shost->host_busy) == shost->host_failed) {
68 trace_scsi_eh_wakeup(shost); 69 trace_scsi_eh_wakeup(shost);
69 wake_up_process(shost->ehandler); 70 wake_up_process(shost->ehandler);
@@ -220,6 +221,17 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd)
220 } 221 }
221} 222}
222 223
224static void scsi_eh_inc_host_failed(struct rcu_head *head)
225{
226 struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu);
227 unsigned long flags;
228
229 spin_lock_irqsave(shost->host_lock, flags);
230 shost->host_failed++;
231 scsi_eh_wakeup(shost);
232 spin_unlock_irqrestore(shost->host_lock, flags);
233}
234
223/** 235/**
224 * scsi_eh_scmd_add - add scsi cmd to error handling. 236 * scsi_eh_scmd_add - add scsi cmd to error handling.
225 * @scmd: scmd to run eh on. 237 * @scmd: scmd to run eh on.
@@ -242,9 +254,12 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
242 254
243 scsi_eh_reset(scmd); 255 scsi_eh_reset(scmd);
244 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); 256 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
245 shost->host_failed++;
246 scsi_eh_wakeup(shost);
247 spin_unlock_irqrestore(shost->host_lock, flags); 257 spin_unlock_irqrestore(shost->host_lock, flags);
258 /*
259 * Ensure that all tasks observe the host state change before the
260 * host_failed change.
261 */
262 call_rcu(&shost->rcu, scsi_eh_inc_host_failed);
248} 263}
249 264
250/** 265/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d9ca1dfab154..976c936029cb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -164,7 +164,7 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
164 * for a requeue after completion, which should only occur in this 164 * for a requeue after completion, which should only occur in this
165 * file. 165 * file.
166 */ 166 */
167static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) 167static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
168{ 168{
169 struct scsi_device *device = cmd->device; 169 struct scsi_device *device = cmd->device;
170 struct request_queue *q = device->request_queue; 170 struct request_queue *q = device->request_queue;
@@ -220,7 +220,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
220 */ 220 */
221void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 221void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
222{ 222{
223 __scsi_queue_insert(cmd, reason, 1); 223 __scsi_queue_insert(cmd, reason, true);
224} 224}
225 225
226 226
@@ -318,22 +318,39 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
318 cmd->cmd_len = scsi_command_size(cmd->cmnd); 318 cmd->cmd_len = scsi_command_size(cmd->cmnd);
319} 319}
320 320
321void scsi_device_unbusy(struct scsi_device *sdev) 321/*
322 * Decrement the host_busy counter and wake up the error handler if necessary.
323 * Avoid as follows that the error handler is not woken up if shost->host_busy
324 * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
325 * with an RCU read lock in this function to ensure that this function in its
326 * entirety either finishes before scsi_eh_scmd_add() increases the
327 * host_failed counter or that it notices the shost state change made by
328 * scsi_eh_scmd_add().
329 */
330static void scsi_dec_host_busy(struct Scsi_Host *shost)
322{ 331{
323 struct Scsi_Host *shost = sdev->host;
324 struct scsi_target *starget = scsi_target(sdev);
325 unsigned long flags; 332 unsigned long flags;
326 333
334 rcu_read_lock();
327 atomic_dec(&shost->host_busy); 335 atomic_dec(&shost->host_busy);
328 if (starget->can_queue > 0) 336 if (unlikely(scsi_host_in_recovery(shost))) {
329 atomic_dec(&starget->target_busy);
330
331 if (unlikely(scsi_host_in_recovery(shost) &&
332 (shost->host_failed || shost->host_eh_scheduled))) {
333 spin_lock_irqsave(shost->host_lock, flags); 337 spin_lock_irqsave(shost->host_lock, flags);
334 scsi_eh_wakeup(shost); 338 if (shost->host_failed || shost->host_eh_scheduled)
339 scsi_eh_wakeup(shost);
335 spin_unlock_irqrestore(shost->host_lock, flags); 340 spin_unlock_irqrestore(shost->host_lock, flags);
336 } 341 }
342 rcu_read_unlock();
343}
344
345void scsi_device_unbusy(struct scsi_device *sdev)
346{
347 struct Scsi_Host *shost = sdev->host;
348 struct scsi_target *starget = scsi_target(sdev);
349
350 scsi_dec_host_busy(shost);
351
352 if (starget->can_queue > 0)
353 atomic_dec(&starget->target_busy);
337 354
338 atomic_dec(&sdev->device_busy); 355 atomic_dec(&sdev->device_busy);
339} 356}
@@ -998,11 +1015,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
998 break; 1015 break;
999 case ACTION_RETRY: 1016 case ACTION_RETRY:
1000 /* Retry the same command immediately */ 1017 /* Retry the same command immediately */
1001 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0); 1018 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false);
1002 break; 1019 break;
1003 case ACTION_DELAYED_RETRY: 1020 case ACTION_DELAYED_RETRY:
1004 /* Retry the same command after a delay */ 1021 /* Retry the same command after a delay */
1005 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); 1022 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false);
1006 break; 1023 break;
1007 } 1024 }
1008} 1025}
@@ -1128,7 +1145,7 @@ EXPORT_SYMBOL(scsi_init_io);
1128 * Called from inside blk_get_request() for pass-through requests and from 1145 * Called from inside blk_get_request() for pass-through requests and from
1129 * inside scsi_init_command() for filesystem requests. 1146 * inside scsi_init_command() for filesystem requests.
1130 */ 1147 */
1131void scsi_initialize_rq(struct request *rq) 1148static void scsi_initialize_rq(struct request *rq)
1132{ 1149{
1133 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 1150 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1134 1151
@@ -1136,7 +1153,6 @@ void scsi_initialize_rq(struct request *rq)
1136 cmd->jiffies_at_alloc = jiffies; 1153 cmd->jiffies_at_alloc = jiffies;
1137 cmd->retries = 0; 1154 cmd->retries = 0;
1138} 1155}
1139EXPORT_SYMBOL(scsi_initialize_rq);
1140 1156
1141/* Add a command to the list used by the aacraid and dpt_i2o drivers */ 1157/* Add a command to the list used by the aacraid and dpt_i2o drivers */
1142void scsi_add_cmd_to_list(struct scsi_cmnd *cmd) 1158void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
@@ -1532,7 +1548,7 @@ starved:
1532 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1548 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1533 spin_unlock_irq(shost->host_lock); 1549 spin_unlock_irq(shost->host_lock);
1534out_dec: 1550out_dec:
1535 atomic_dec(&shost->host_busy); 1551 scsi_dec_host_busy(shost);
1536 return 0; 1552 return 0;
1537} 1553}
1538 1554
@@ -2020,7 +2036,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
2020 return BLK_STS_OK; 2036 return BLK_STS_OK;
2021 2037
2022out_dec_host_busy: 2038out_dec_host_busy:
2023 atomic_dec(&shost->host_busy); 2039 scsi_dec_host_busy(shost);
2024out_dec_target_busy: 2040out_dec_target_busy:
2025 if (scsi_target(sdev)->can_queue > 0) 2041 if (scsi_target(sdev)->can_queue > 0)
2026 atomic_dec(&scsi_target(sdev)->target_busy); 2042 atomic_dec(&scsi_target(sdev)->target_busy);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index a5946cd64caa..99f1db5e467e 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -45,7 +45,7 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
45/* scsi_devinfo.c */ 45/* scsi_devinfo.c */
46 46
47/* list of keys for the lists */ 47/* list of keys for the lists */
48enum { 48enum scsi_devinfo_key {
49 SCSI_DEVINFO_GLOBAL = 0, 49 SCSI_DEVINFO_GLOBAL = 0,
50 SCSI_DEVINFO_SPI, 50 SCSI_DEVINFO_SPI,
51}; 51};
@@ -56,13 +56,15 @@ extern blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
56extern blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev, 56extern blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
57 const unsigned char *vendor, 57 const unsigned char *vendor,
58 const unsigned char *model, 58 const unsigned char *model,
59 int key); 59 enum scsi_devinfo_key key);
60extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor, 60extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
61 char *model, char *strflags, 61 char *model, char *strflags,
62 blist_flags_t flags, int key); 62 blist_flags_t flags,
63extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key); 63 enum scsi_devinfo_key key);
64extern int scsi_dev_info_add_list(int key, const char *name); 64extern int scsi_dev_info_list_del_keyed(char *vendor, char *model,
65extern int scsi_dev_info_remove_list(int key); 65 enum scsi_devinfo_key key);
66extern int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name);
67extern int scsi_dev_info_remove_list(enum scsi_devinfo_key key);
66 68
67extern int __init scsi_init_devinfo(void); 69extern int __init scsi_init_devinfo(void);
68extern void scsi_exit_devinfo(void); 70extern void scsi_exit_devinfo(void);
@@ -184,7 +186,6 @@ void scsi_dh_release_device(struct scsi_device *sdev);
184static inline void scsi_dh_add_device(struct scsi_device *sdev) { } 186static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
185static inline void scsi_dh_release_device(struct scsi_device *sdev) { } 187static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
186#endif 188#endif
187static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
188 189
189/* 190/*
190 * internal scsi timeout functions: for use by mid-layer and transport 191 * internal scsi timeout functions: for use by mid-layer and transport
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 26ce17178401..91b90f672d23 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1278,7 +1278,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
1278 if (error) { 1278 if (error) {
1279 sdev_printk(KERN_INFO, sdev, 1279 sdev_printk(KERN_INFO, sdev,
1280 "failed to add device: %d\n", error); 1280 "failed to add device: %d\n", error);
1281 scsi_dh_remove_device(sdev);
1282 return error; 1281 return error;
1283 } 1282 }
1284 1283
@@ -1287,7 +1286,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
1287 if (error) { 1286 if (error) {
1288 sdev_printk(KERN_INFO, sdev, 1287 sdev_printk(KERN_INFO, sdev,
1289 "failed to add class device: %d\n", error); 1288 "failed to add class device: %d\n", error);
1290 scsi_dh_remove_device(sdev);
1291 device_del(&sdev->sdev_gendev); 1289 device_del(&sdev->sdev_gendev);
1292 return error; 1290 return error;
1293 } 1291 }
@@ -1354,7 +1352,6 @@ void __scsi_remove_device(struct scsi_device *sdev)
1354 bsg_unregister_queue(sdev->request_queue); 1352 bsg_unregister_queue(sdev->request_queue);
1355 device_unregister(&sdev->sdev_dev); 1353 device_unregister(&sdev->sdev_dev);
1356 transport_remove_device(dev); 1354 transport_remove_device(dev);
1357 scsi_dh_remove_device(sdev);
1358 device_del(dev); 1355 device_del(dev);
1359 } else 1356 } else
1360 put_device(&sdev->sdev_dev); 1357 put_device(&sdev->sdev_dev);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 4664024bd5d3..be3be0f9cb2d 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -267,8 +267,8 @@ static const struct {
267 { FC_PORTSPEED_50GBIT, "50 Gbit" }, 267 { FC_PORTSPEED_50GBIT, "50 Gbit" },
268 { FC_PORTSPEED_100GBIT, "100 Gbit" }, 268 { FC_PORTSPEED_100GBIT, "100 Gbit" },
269 { FC_PORTSPEED_25GBIT, "25 Gbit" }, 269 { FC_PORTSPEED_25GBIT, "25 Gbit" },
270 { FC_PORTSPEED_64BIT, "64 Gbit" }, 270 { FC_PORTSPEED_64GBIT, "64 Gbit" },
271 { FC_PORTSPEED_128BIT, "128 Gbit" }, 271 { FC_PORTSPEED_128GBIT, "128 Gbit" },
272 { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" }, 272 { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
273}; 273};
274fc_bitfield_name_search(port_speed, fc_port_speed_names) 274fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a028ab3322a9..ce756d575aff 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2172,7 +2172,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
2172 } 2172 }
2173 /* Wait 1 second for next try */ 2173 /* Wait 1 second for next try */
2174 msleep(1000); 2174 msleep(1000);
2175 printk("."); 2175 printk(KERN_CONT ".");
2176 2176
2177 /* 2177 /*
2178 * Wait for USB flash devices with slow firmware. 2178 * Wait for USB flash devices with slow firmware.
@@ -2202,9 +2202,9 @@ sd_spinup_disk(struct scsi_disk *sdkp)
2202 2202
2203 if (spintime) { 2203 if (spintime) {
2204 if (scsi_status_is_good(the_result)) 2204 if (scsi_status_is_good(the_result))
2205 printk("ready\n"); 2205 printk(KERN_CONT "ready\n");
2206 else 2206 else
2207 printk("not responding...\n"); 2207 printk(KERN_CONT "not responding...\n");
2208 } 2208 }
2209} 2209}
2210 2210
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 11826c5c2dd4..62f04c0511cf 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -615,13 +615,16 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
615} 615}
616 616
617static void ses_match_to_enclosure(struct enclosure_device *edev, 617static void ses_match_to_enclosure(struct enclosure_device *edev,
618 struct scsi_device *sdev) 618 struct scsi_device *sdev,
619 int refresh)
619{ 620{
621 struct scsi_device *edev_sdev = to_scsi_device(edev->edev.parent);
620 struct efd efd = { 622 struct efd efd = {
621 .addr = 0, 623 .addr = 0,
622 }; 624 };
623 625
624 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); 626 if (refresh)
627 ses_enclosure_data_process(edev, edev_sdev, 0);
625 628
626 if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent)) 629 if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
627 efd.addr = sas_get_address(sdev); 630 efd.addr = sas_get_address(sdev);
@@ -652,7 +655,7 @@ static int ses_intf_add(struct device *cdev,
652 struct enclosure_device *prev = NULL; 655 struct enclosure_device *prev = NULL;
653 656
654 while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) { 657 while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) {
655 ses_match_to_enclosure(edev, sdev); 658 ses_match_to_enclosure(edev, sdev, 1);
656 prev = edev; 659 prev = edev;
657 } 660 }
658 return -ENODEV; 661 return -ENODEV;
@@ -768,7 +771,7 @@ page2_not_supported:
768 shost_for_each_device(tmp_sdev, sdev->host) { 771 shost_for_each_device(tmp_sdev, sdev->host) {
769 if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev)) 772 if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev))
770 continue; 773 continue;
771 ses_match_to_enclosure(edev, tmp_sdev); 774 ses_match_to_enclosure(edev, tmp_sdev, 0);
772 } 775 }
773 776
774 return 0; 777 return 0;
diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile
index 0f42a225a664..e6b779930230 100644
--- a/drivers/scsi/smartpqi/Makefile
+++ b/drivers/scsi/smartpqi/Makefile
@@ -1,3 +1,3 @@
1ccflags-y += -I. 1ccflags-y += -I.
2obj-m += smartpqi.o 2obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o
3smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o 3smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index b141d7641a2e..6c399480783d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4712,7 +4712,7 @@ static ssize_t read_byte_cnt_show(struct device *dev,
4712static DEVICE_ATTR_RO(read_byte_cnt); 4712static DEVICE_ATTR_RO(read_byte_cnt);
4713 4713
4714/** 4714/**
4715 * read_us_show - return read us - overall time spent waiting on reads in ns. 4715 * read_ns_show - return read ns - overall time spent waiting on reads in ns.
4716 * @dev: struct device 4716 * @dev: struct device
4717 * @attr: attribute structure 4717 * @attr: attribute structure
4718 * @buf: buffer to return formatted data in 4718 * @buf: buffer to return formatted data in
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3b3d1d050cac..40fc7a590e81 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1834,8 +1834,10 @@ static int storvsc_probe(struct hv_device *device,
1834 fc_host_node_name(host) = stor_device->node_name; 1834 fc_host_node_name(host) = stor_device->node_name;
1835 fc_host_port_name(host) = stor_device->port_name; 1835 fc_host_port_name(host) = stor_device->port_name;
1836 stor_device->rport = fc_remote_port_add(host, 0, &ids); 1836 stor_device->rport = fc_remote_port_add(host, 0, &ids);
1837 if (!stor_device->rport) 1837 if (!stor_device->rport) {
1838 ret = -ENOMEM;
1838 goto err_out4; 1839 goto err_out4;
1840 }
1839 } 1841 }
1840#endif 1842#endif
1841 return 0; 1843 return 0;
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 277752b0fc6f..1a1b5d9fe514 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -157,6 +157,8 @@ enum {
157#define UTP_TRANSFER_REQ_LIST_READY 0x2 157#define UTP_TRANSFER_REQ_LIST_READY 0x2
158#define UTP_TASK_REQ_LIST_READY 0x4 158#define UTP_TASK_REQ_LIST_READY 0x4
159#define UIC_COMMAND_READY 0x8 159#define UIC_COMMAND_READY 0x8
160#define HOST_ERROR_INDICATOR 0x10
161#define DEVICE_ERROR_INDICATOR 0x20
160#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8) 162#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
161 163
162#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\ 164#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\
@@ -185,6 +187,10 @@ enum {
185/* UECDL - Host UIC Error Code Data Link Layer 3Ch */ 187/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
186#define UIC_DATA_LINK_LAYER_ERROR 0x80000000 188#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
187#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF 189#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
190#define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2
191#define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4
192#define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8
193#define UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF 0x20
188#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000 194#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
189#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001 195#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
190#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002 196#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
@@ -192,10 +198,20 @@ enum {
192/* UECN - Host UIC Error Code Network Layer 40h */ 198/* UECN - Host UIC Error Code Network Layer 40h */
193#define UIC_NETWORK_LAYER_ERROR 0x80000000 199#define UIC_NETWORK_LAYER_ERROR 0x80000000
194#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7 200#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
201#define UIC_NETWORK_UNSUPPORTED_HEADER_TYPE 0x1
202#define UIC_NETWORK_BAD_DEVICEID_ENC 0x2
203#define UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING 0x4
195 204
196/* UECT - Host UIC Error Code Transport Layer 44h */ 205/* UECT - Host UIC Error Code Transport Layer 44h */
197#define UIC_TRANSPORT_LAYER_ERROR 0x80000000 206#define UIC_TRANSPORT_LAYER_ERROR 0x80000000
198#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F 207#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
208#define UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE 0x1
209#define UIC_TRANSPORT_UNKNOWN_CPORTID 0x2
210#define UIC_TRANSPORT_NO_CONNECTION_RX 0x4
211#define UIC_TRANSPORT_CONTROLLED_SEGMENT_DROPPING 0x8
212#define UIC_TRANSPORT_BAD_TC 0x10
213#define UIC_TRANSPORT_E2E_CREDIT_OVERFOW 0x20
214#define UIC_TRANSPORT_SAFETY_VALUE_DROPPING 0x40
199 215
200/* UECDME - Host UIC Error Code DME 48h */ 216/* UECDME - Host UIC Error Code DME 48h */
201#define UIC_DME_ERROR 0x80000000 217#define UIC_DME_ERROR 0x80000000
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 2a9da2e0ea6b..2ba2b7b47f41 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -803,7 +803,9 @@ static enum wd719x_card_type wd719x_detect_type(struct wd719x *wd)
803static int wd719x_board_found(struct Scsi_Host *sh) 803static int wd719x_board_found(struct Scsi_Host *sh)
804{ 804{
805 struct wd719x *wd = shost_priv(sh); 805 struct wd719x *wd = shost_priv(sh);
806 char *card_types[] = { "Unknown card", "WD7193", "WD7197", "WD7296" }; 806 static const char * const card_types[] = {
807 "Unknown card", "WD7193", "WD7197", "WD7296"
808 };
807 int ret; 809 int ret;
808 810
809 INIT_LIST_HEAD(&wd->active_scbs); 811 INIT_LIST_HEAD(&wd->active_scbs);