aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Bottomley <James.Bottomley@suse.de>2010-05-18 10:33:43 -0400
committerJames Bottomley <James.Bottomley@suse.de>2010-05-18 10:37:41 -0400
commit95bb335c0ebe96afe926387a1ef3a096bd884a82 (patch)
tree56115332b4f2f7ef300c36248a6a7d20db2e639d
parent1b4d0d8ea7b3cbd107f345ab766416f9b38ce66a (diff)
parent9cccde93fed1ca988eb2fb17ab9194bf7b5ed1b0 (diff)
[SCSI] Merge scsi-misc-2.6 into scsi-rc-fixes-2.6
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
-rw-r--r--MAINTAINERS23
-rw-r--r--drivers/message/fusion/mptbase.c177
-rw-r--r--drivers/message/fusion/mptbase.h5
-rw-r--r--drivers/message/fusion/mptctl.c181
-rw-r--r--drivers/message/fusion/mptfc.c22
-rw-r--r--drivers/message/fusion/mptsas.c55
-rw-r--r--drivers/message/fusion/mptsas.h2
-rw-r--r--drivers/message/fusion/mptscsih.c27
-rw-r--r--drivers/message/fusion/mptspi.c10
-rw-r--r--drivers/s390/scsi/zfcp_aux.c7
-rw-r--r--drivers/s390/scsi/zfcp_def.h19
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h6
-rw-r--r--drivers/s390/scsi/zfcp_fc.c4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c246
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h11
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c108
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h104
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c23
-rw-r--r--drivers/scsi/3w-9xxx.c24
-rw-r--r--drivers/scsi/3w-9xxx.h9
-rw-r--r--drivers/scsi/3w-xxxx.c23
-rw-r--r--drivers/scsi/3w-xxxx.h8
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/a2091.c309
-rw-r--r--drivers/scsi/a2091.h42
-rw-r--r--drivers/scsi/a3000.c285
-rw-r--r--drivers/scsi/a3000.h46
-rw-r--r--drivers/scsi/aacraid/aachba.c67
-rw-r--r--drivers/scsi/aacraid/aacraid.h4
-rw-r--r--drivers/scsi/aacraid/commsup.c18
-rw-r--r--drivers/scsi/bfa/bfa_cb_ioim_macros.h29
-rw-r--r--drivers/scsi/bfa/bfa_ioim.c22
-rw-r--r--drivers/scsi/bfa/bfa_os_inc.h23
-rw-r--r--drivers/scsi/bfa/bfad.c21
-rw-r--r--drivers/scsi/bfa/bfad_attr.c201
-rw-r--r--drivers/scsi/bfa/bfad_drv.h4
-rw-r--r--drivers/scsi/bfa/bfad_im.c67
-rw-r--r--drivers/scsi/bfa/bfad_im.h6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c11
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c194
-rw-r--r--drivers/scsi/fcoe/libfcoe.c111
-rw-r--r--drivers/scsi/fnic/fnic.h4
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c2
-rw-r--r--drivers/scsi/fnic/fnic_main.c2
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/gvp11.c565
-rw-r--r--drivers/scsi/gvp11.h36
-rw-r--r--drivers/scsi/hpsa.c8
-rw-r--r--drivers/scsi/hpsa_cmd.h15
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h1
-rw-r--r--drivers/scsi/iscsi_tcp.c6
-rw-r--r--drivers/scsi/iscsi_tcp.h1
-rw-r--r--drivers/scsi/libfc/fc_disc.c8
-rw-r--r--drivers/scsi/libfc/fc_elsct.c2
-rw-r--r--drivers/scsi/libfc/fc_exch.c51
-rw-r--r--drivers/scsi/libfc/fc_fcp.c103
-rw-r--r--drivers/scsi/libfc/fc_libfc.h8
-rw-r--r--drivers/scsi/libfc/fc_lport.c58
-rw-r--r--drivers/scsi/libfc/fc_npiv.c7
-rw-r--r--drivers/scsi/libfc/fc_rport.c195
-rw-r--r--drivers/scsi/libiscsi_tcp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c498
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c79
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h190
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h60
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c213
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c69
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c99
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c149
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c269
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/mpt2sas/Kconfig2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_history.txt2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c90
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h36
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c46
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1055
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c89
-rw-r--r--drivers/scsi/mvme147.c169
-rw-r--r--drivers/scsi/mvme147.h4
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c25
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c10
-rw-r--r--drivers/scsi/mvsas/mv_init.c19
-rw-r--r--drivers/scsi/mvsas/mv_sas.c201
-rw-r--r--drivers/scsi/mvsas/mv_sas.h11
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c4
-rw-r--r--drivers/scsi/pmcraid.c6
-rw-r--r--drivers/scsi/qla2xxx/Makefile3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c772
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c1212
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h135
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c61
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h343
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h106
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h126
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c724
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h21
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c879
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c540
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c266
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c3636
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h889
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c543
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c149
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h48
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h46
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h8
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c374
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c23
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c337
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c135
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h3
-rw-r--r--drivers/scsi/scsi.c6
-rw-r--r--drivers/scsi/scsi_debug.c89
-rw-r--r--drivers/scsi/scsi_error.c19
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c7
-rw-r--r--drivers/scsi/scsi_trace.c284
-rw-r--r--drivers/scsi/scsi_transport_fc.c30
-rw-r--r--drivers/scsi/sd.c25
-rw-r--r--drivers/scsi/wd33c93.c6
-rw-r--r--drivers/scsi/wd33c93.h1
-rw-r--r--include/linux/ftrace_event.h3
-rw-r--r--include/scsi/Kbuild1
-rw-r--r--include/scsi/fc/fc_fcp.h1
-rw-r--r--include/scsi/fc_encode.h18
-rw-r--r--include/scsi/libfc.h29
-rw-r--r--include/scsi/libfcoe.h10
-rw-r--r--include/scsi/scsi.h3
-rw-r--r--include/scsi/scsi_transport_fc.h2
-rw-r--r--include/trace/events/scsi.h345
-rw-r--r--include/trace/ftrace.h3
-rw-r--r--kernel/trace/trace_output.c16
159 files changed, 15290 insertions, 4139 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index d329b053a718..3c41de95a33d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -131,19 +131,12 @@ L: netdev@vger.kernel.org
131S: Maintained 131S: Maintained
132F: drivers/net/typhoon* 132F: drivers/net/typhoon*
133 133
1343W-9XXX SATA-RAID CONTROLLER DRIVER 1343WARE SAS/SATA-RAID SCSI DRIVERS (3W-XXXX, 3W-9XXX, 3W-SAS)
135M: Adam Radford <linuxraid@amcc.com> 135M: Adam Radford <linuxraid@lsi.com>
136L: linux-scsi@vger.kernel.org 136L: linux-scsi@vger.kernel.org
137W: http://www.amcc.com 137W: http://www.lsi.com
138S: Supported 138S: Supported
139F: drivers/scsi/3w-9xxx* 139F: drivers/scsi/3w-*
140
1413W-XXXX ATA-RAID CONTROLLER DRIVER
142M: Adam Radford <linuxraid@amcc.com>
143L: linux-scsi@vger.kernel.org
144W: http://www.amcc.com
145S: Supported
146F: drivers/scsi/3w-xxxx*
147 140
14853C700 AND 53C700-66 SCSI DRIVER 14153C700 AND 53C700-66 SCSI DRIVER
149M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> 142M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
@@ -4577,6 +4570,14 @@ S: Supported
4577F: Documentation/scsi/LICENSE.qla2xxx 4570F: Documentation/scsi/LICENSE.qla2xxx
4578F: drivers/scsi/qla2xxx/ 4571F: drivers/scsi/qla2xxx/
4579 4572
4573QLOGIC QLA4XXX iSCSI DRIVER
4574M: Ravi Anand <ravi.anand@qlogic.com>
4575M: Vikas Chaudhary <vikas.chaudhary@qlogic.com>
4576M: iscsi-driver@qlogic.com
4577L: linux-scsi@vger.kernel.org
4578S: Supported
4579F: drivers/scsi/qla4xxx/
4580
4580QLOGIC QLA3XXX NETWORK DRIVER 4581QLOGIC QLA3XXX NETWORK DRIVER
4581M: Ron Mercer <ron.mercer@qlogic.com> 4582M: Ron Mercer <ron.mercer@qlogic.com>
4582M: linux-driver@qlogic.com 4583M: linux-driver@qlogic.com
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5382b5a44aff..a6a57011ba6c 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -5064,7 +5064,7 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
5064 if (!timeleft) { 5064 if (!timeleft) {
5065 printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n", 5065 printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n",
5066 ioc->name, __func__); 5066 ioc->name, __func__);
5067 mpt_HardResetHandler(ioc, CAN_SLEEP); 5067 mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
5068 mpt_free_msg_frame(ioc, mf); 5068 mpt_free_msg_frame(ioc, mf);
5069 } 5069 }
5070 goto out; 5070 goto out;
@@ -6456,10 +6456,15 @@ out:
6456 issue_hard_reset = 0; 6456 issue_hard_reset = 0;
6457 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", 6457 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
6458 ioc->name, __func__); 6458 ioc->name, __func__);
6459 mpt_HardResetHandler(ioc, CAN_SLEEP); 6459 if (retry_count == 0) {
6460 if (mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP) != 0)
6461 retry_count++;
6462 } else
6463 mpt_HardResetHandler(ioc, CAN_SLEEP);
6464
6460 mpt_free_msg_frame(ioc, mf); 6465 mpt_free_msg_frame(ioc, mf);
6461 /* attempt one retry for a timed out command */ 6466 /* attempt one retry for a timed out command */
6462 if (!retry_count) { 6467 if (retry_count < 2) {
6463 printk(MYIOC_s_INFO_FMT 6468 printk(MYIOC_s_INFO_FMT
6464 "Attempting Retry Config request" 6469 "Attempting Retry Config request"
6465 " type 0x%x, page 0x%x," 6470 " type 0x%x, page 0x%x,"
@@ -6904,6 +6909,172 @@ mpt_halt_firmware(MPT_ADAPTER *ioc)
6904} 6909}
6905EXPORT_SYMBOL(mpt_halt_firmware); 6910EXPORT_SYMBOL(mpt_halt_firmware);
6906 6911
6912/**
6913 * mpt_SoftResetHandler - Issues a less expensive reset
6914 * @ioc: Pointer to MPT_ADAPTER structure
6915 * @sleepFlag: Indicates if sleep or schedule must be called.
6916
6917 *
6918 * Returns 0 for SUCCESS or -1 if FAILED.
6919 *
6920 * Message Unit Reset - instructs the IOC to reset the Reply Post and
6921 * Free FIFO's. All the Message Frames on Reply Free FIFO are discarded.
6922 * All posted buffers are freed, and event notification is turned off.
6923 * IOC doesnt reply to any outstanding request. This will transfer IOC
6924 * to READY state.
6925 **/
6926int
6927mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6928{
6929 int rc;
6930 int ii;
6931 u8 cb_idx;
6932 unsigned long flags;
6933 u32 ioc_state;
6934 unsigned long time_count;
6935
6936 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SoftResetHandler Entered!\n",
6937 ioc->name));
6938
6939 ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK;
6940
6941 if (mpt_fwfault_debug)
6942 mpt_halt_firmware(ioc);
6943
6944 if (ioc_state == MPI_IOC_STATE_FAULT ||
6945 ioc_state == MPI_IOC_STATE_RESET) {
6946 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6947 "skipping, either in FAULT or RESET state!\n", ioc->name));
6948 return -1;
6949 }
6950
6951 if (ioc->bus_type == FC) {
6952 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
6953 "skipping, because the bus type is FC!\n", ioc->name));
6954 return -1;
6955 }
6956
6957 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6958 if (ioc->ioc_reset_in_progress) {
6959 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6960 return -1;
6961 }
6962 ioc->ioc_reset_in_progress = 1;
6963 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6964
6965 rc = -1;
6966
6967 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
6968 if (MptResetHandlers[cb_idx])
6969 mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
6970 }
6971
6972 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6973 if (ioc->taskmgmt_in_progress) {
6974 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6975 return -1;
6976 }
6977 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6978 /* Disable reply interrupts (also blocks FreeQ) */
6979 CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
6980 ioc->active = 0;
6981 time_count = jiffies;
6982
6983 rc = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag);
6984
6985 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
6986 if (MptResetHandlers[cb_idx])
6987 mpt_signal_reset(cb_idx, ioc, MPT_IOC_PRE_RESET);
6988 }
6989
6990 if (rc)
6991 goto out;
6992
6993 ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK;
6994 if (ioc_state != MPI_IOC_STATE_READY)
6995 goto out;
6996
6997 for (ii = 0; ii < 5; ii++) {
6998 /* Get IOC facts! Allow 5 retries */
6999 rc = GetIocFacts(ioc, sleepFlag,
7000 MPT_HOSTEVENT_IOC_RECOVER);
7001 if (rc == 0)
7002 break;
7003 if (sleepFlag == CAN_SLEEP)
7004 msleep(100);
7005 else
7006 mdelay(100);
7007 }
7008 if (ii == 5)
7009 goto out;
7010
7011 rc = PrimeIocFifos(ioc);
7012 if (rc != 0)
7013 goto out;
7014
7015 rc = SendIocInit(ioc, sleepFlag);
7016 if (rc != 0)
7017 goto out;
7018
7019 rc = SendEventNotification(ioc, 1, sleepFlag);
7020 if (rc != 0)
7021 goto out;
7022
7023 if (ioc->hard_resets < -1)
7024 ioc->hard_resets++;
7025
7026 /*
7027 * At this point, we know soft reset succeeded.
7028 */
7029
7030 ioc->active = 1;
7031 CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
7032
7033 out:
7034 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
7035 ioc->ioc_reset_in_progress = 0;
7036 ioc->taskmgmt_quiesce_io = 0;
7037 ioc->taskmgmt_in_progress = 0;
7038 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
7039
7040 if (ioc->active) { /* otherwise, hard reset coming */
7041 for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
7042 if (MptResetHandlers[cb_idx])
7043 mpt_signal_reset(cb_idx, ioc,
7044 MPT_IOC_POST_RESET);
7045 }
7046 }
7047
7048 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
7049 "SoftResetHandler: completed (%d seconds): %s\n",
7050 ioc->name, jiffies_to_msecs(jiffies - time_count)/1000,
7051 ((rc == 0) ? "SUCCESS" : "FAILED")));
7052
7053 return rc;
7054}
7055
7056/**
7057 * mpt_Soft_Hard_ResetHandler - Try less expensive reset
7058 * @ioc: Pointer to MPT_ADAPTER structure
7059 * @sleepFlag: Indicates if sleep or schedule must be called.
7060
7061 *
7062 * Returns 0 for SUCCESS or -1 if FAILED.
7063 * Try for softreset first, only if it fails go for expensive
7064 * HardReset.
7065 **/
7066int
7067mpt_Soft_Hard_ResetHandler(MPT_ADAPTER *ioc, int sleepFlag) {
7068 int ret = -1;
7069
7070 ret = mpt_SoftResetHandler(ioc, sleepFlag);
7071 if (ret == 0)
7072 return ret;
7073 ret = mpt_HardResetHandler(ioc, sleepFlag);
7074 return ret;
7075}
7076EXPORT_SYMBOL(mpt_Soft_Hard_ResetHandler);
7077
6907/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 7078/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6908/* 7079/*
6909 * Reset Handling 7080 * Reset Handling
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 9718c8f2e959..b613eb3d4706 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.04.14" 79#define MPT_LINUX_VERSION_COMMON "3.04.15"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.14" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.15"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
@@ -940,6 +940,7 @@ extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
940extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked); 940extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
941extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan); 941extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan);
942extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); 942extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
943extern int mpt_Soft_Hard_ResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
943extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg); 944extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
944extern int mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size); 945extern int mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
945extern void mpt_free_fw_memory(MPT_ADAPTER *ioc); 946extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index caa8f568a41c..f06b29193b4e 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -128,7 +128,6 @@ static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags
128 struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); 128 struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
129static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, 129static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
130 struct buflist *buflist, MPT_ADAPTER *ioc); 130 struct buflist *buflist, MPT_ADAPTER *ioc);
131static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function);
132 131
133/* 132/*
134 * Reset Handler cleanup function 133 * Reset Handler cleanup function
@@ -275,45 +274,6 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
275 return 1; 274 return 1;
276} 275}
277 276
278/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
279/* mptctl_timeout_expired
280 *
281 * Expecting an interrupt, however timed out.
282 *
283 */
284static void
285mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
286{
287 unsigned long flags;
288
289 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
290 ioc->name, __func__));
291
292 if (mpt_fwfault_debug)
293 mpt_halt_firmware(ioc);
294
295 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
296 if (ioc->ioc_reset_in_progress) {
297 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
298 CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
299 mpt_free_msg_frame(ioc, mf);
300 return;
301 }
302 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
303
304
305 if (!mptctl_bus_reset(ioc, mf->u.hdr.Function))
306 return;
307
308 /* Issue a reset for this device.
309 * The IOC is not responding.
310 */
311 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n",
312 ioc->name));
313 CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
314 mpt_HardResetHandler(ioc, CAN_SLEEP);
315 mpt_free_msg_frame(ioc, mf);
316}
317 277
318static int 278static int
319mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) 279mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
@@ -343,12 +303,8 @@ mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
343 return 0; 303 return 0;
344} 304}
345 305
346/* mptctl_bus_reset 306static int
347 * 307mptctl_do_taskmgmt(MPT_ADAPTER *ioc, u8 tm_type, u8 bus_id, u8 target_id)
348 * Bus reset code.
349 *
350 */
351static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
352{ 308{
353 MPT_FRAME_HDR *mf; 309 MPT_FRAME_HDR *mf;
354 SCSITaskMgmt_t *pScsiTm; 310 SCSITaskMgmt_t *pScsiTm;
@@ -359,13 +315,6 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
359 unsigned long time_count; 315 unsigned long time_count;
360 u16 iocstatus; 316 u16 iocstatus;
361 317
362 /* bus reset is only good for SCSI IO, RAID PASSTHRU */
363 if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
364 function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
365 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
366 "TaskMgmt, not SCSI_IO!!\n", ioc->name));
367 return -EPERM;
368 }
369 318
370 mutex_lock(&ioc->taskmgmt_cmds.mutex); 319 mutex_lock(&ioc->taskmgmt_cmds.mutex);
371 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { 320 if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
@@ -375,15 +324,14 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
375 324
376 retval = 0; 325 retval = 0;
377 326
378 /* Send request
379 */
380 mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc); 327 mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc);
381 if (mf == NULL) { 328 if (mf == NULL) {
382 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT 329 dtmprintk(ioc,
383 "TaskMgmt, no msg frames!!\n", ioc->name)); 330 printk(MYIOC_s_WARN_FMT "TaskMgmt, no msg frames!!\n",
331 ioc->name));
384 mpt_clear_taskmgmt_in_progress_flag(ioc); 332 mpt_clear_taskmgmt_in_progress_flag(ioc);
385 retval = -ENOMEM; 333 retval = -ENOMEM;
386 goto mptctl_bus_reset_done; 334 goto tm_done;
387 } 335 }
388 336
389 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n", 337 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
@@ -392,10 +340,13 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
392 pScsiTm = (SCSITaskMgmt_t *) mf; 340 pScsiTm = (SCSITaskMgmt_t *) mf;
393 memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t)); 341 memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
394 pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 342 pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
395 pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; 343 pScsiTm->TaskType = tm_type;
396 pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION; 344 if ((tm_type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) &&
397 pScsiTm->TargetID = 0; 345 (ioc->bus_type == FC))
398 pScsiTm->Bus = 0; 346 pScsiTm->MsgFlags =
347 MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
348 pScsiTm->TargetID = target_id;
349 pScsiTm->Bus = bus_id;
399 pScsiTm->ChainOffset = 0; 350 pScsiTm->ChainOffset = 0;
400 pScsiTm->Reserved = 0; 351 pScsiTm->Reserved = 0;
401 pScsiTm->Reserved1 = 0; 352 pScsiTm->Reserved1 = 0;
@@ -413,17 +364,16 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
413 timeout = 30; 364 timeout = 30;
414 break; 365 break;
415 case SPI: 366 case SPI:
416 default: 367 default:
417 timeout = 2; 368 timeout = 10;
418 break; 369 break;
419 } 370 }
420 371
421 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT 372 dtmprintk(ioc,
422 "TaskMgmt type=%d timeout=%ld\n", 373 printk(MYIOC_s_DEBUG_FMT "TaskMgmt type=%d timeout=%ld\n",
423 ioc->name, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, timeout)); 374 ioc->name, tm_type, timeout));
424 375
425 INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status) 376 INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
426 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
427 time_count = jiffies; 377 time_count = jiffies;
428 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && 378 if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
429 (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) 379 (ioc->facts.MsgVersion >= MPI_VERSION_01_05))
@@ -432,17 +382,20 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
432 retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc, 382 retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc,
433 sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP); 383 sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP);
434 if (retval != 0) { 384 if (retval != 0) {
435 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 385 dfailprintk(ioc,
386 printk(MYIOC_s_ERR_FMT
436 "TaskMgmt send_handshake FAILED!" 387 "TaskMgmt send_handshake FAILED!"
437 " (ioc %p, mf %p, rc=%d) \n", ioc->name, 388 " (ioc %p, mf %p, rc=%d) \n", ioc->name,
438 ioc, mf, retval)); 389 ioc, mf, retval));
390 mpt_free_msg_frame(ioc, mf);
439 mpt_clear_taskmgmt_in_progress_flag(ioc); 391 mpt_clear_taskmgmt_in_progress_flag(ioc);
440 goto mptctl_bus_reset_done; 392 goto tm_done;
441 } 393 }
442 } 394 }
443 395
444 /* Now wait for the command to complete */ 396 /* Now wait for the command to complete */
445 ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ); 397 ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ);
398
446 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { 399 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
447 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT 400 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
448 "TaskMgmt failed\n", ioc->name)); 401 "TaskMgmt failed\n", ioc->name));
@@ -452,14 +405,14 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
452 retval = 0; 405 retval = 0;
453 else 406 else
454 retval = -1; /* return failure */ 407 retval = -1; /* return failure */
455 goto mptctl_bus_reset_done; 408 goto tm_done;
456 } 409 }
457 410
458 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { 411 if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
459 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT 412 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
460 "TaskMgmt failed\n", ioc->name)); 413 "TaskMgmt failed\n", ioc->name));
461 retval = -1; /* return failure */ 414 retval = -1; /* return failure */
462 goto mptctl_bus_reset_done; 415 goto tm_done;
463 } 416 }
464 417
465 pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply; 418 pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
@@ -467,7 +420,7 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
467 "TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, " 420 "TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, "
468 "iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, " 421 "iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, "
469 "term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus, 422 "term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus,
470 pScsiTmReply->TargetID, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 423 pScsiTmReply->TargetID, tm_type,
471 le16_to_cpu(pScsiTmReply->IOCStatus), 424 le16_to_cpu(pScsiTmReply->IOCStatus),
472 le32_to_cpu(pScsiTmReply->IOCLogInfo), 425 le32_to_cpu(pScsiTmReply->IOCLogInfo),
473 pScsiTmReply->ResponseCode, 426 pScsiTmReply->ResponseCode,
@@ -485,13 +438,71 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
485 retval = -1; /* return failure */ 438 retval = -1; /* return failure */
486 } 439 }
487 440
488 441 tm_done:
489 mptctl_bus_reset_done:
490 mutex_unlock(&ioc->taskmgmt_cmds.mutex); 442 mutex_unlock(&ioc->taskmgmt_cmds.mutex);
491 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) 443 CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
492 return retval; 444 return retval;
493} 445}
494 446
447/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
448/* mptctl_timeout_expired
449 *
450 * Expecting an interrupt, however timed out.
451 *
452 */
453static void
454mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
455{
456 unsigned long flags;
457 int ret_val = -1;
458 SCSIIORequest_t *scsi_req = (SCSIIORequest_t *) mf;
459 u8 function = mf->u.hdr.Function;
460
461 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
462 ioc->name, __func__));
463
464 if (mpt_fwfault_debug)
465 mpt_halt_firmware(ioc);
466
467 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
468 if (ioc->ioc_reset_in_progress) {
469 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
470 CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
471 mpt_free_msg_frame(ioc, mf);
472 return;
473 }
474 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
475
476
477 CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
478
479 if (ioc->bus_type == SAS) {
480 if (function == MPI_FUNCTION_SCSI_IO_REQUEST)
481 ret_val = mptctl_do_taskmgmt(ioc,
482 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
483 scsi_req->Bus, scsi_req->TargetID);
484 else if (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)
485 ret_val = mptctl_do_taskmgmt(ioc,
486 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
487 scsi_req->Bus, 0);
488 if (!ret_val)
489 return;
490 } else {
491 if ((function == MPI_FUNCTION_SCSI_IO_REQUEST) ||
492 (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH))
493 ret_val = mptctl_do_taskmgmt(ioc,
494 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
495 scsi_req->Bus, 0);
496 if (!ret_val)
497 return;
498 }
499
500 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling Reset! \n",
501 ioc->name));
502 mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
503 mpt_free_msg_frame(ioc, mf);
504}
505
495 506
496/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 507/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
497/* mptctl_ioc_reset 508/* mptctl_ioc_reset
@@ -1318,6 +1329,8 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
1318 if (ioc->sh) { 1329 if (ioc->sh) {
1319 shost_for_each_device(sdev, ioc->sh) { 1330 shost_for_each_device(sdev, ioc->sh) {
1320 vdevice = sdev->hostdata; 1331 vdevice = sdev->hostdata;
1332 if (vdevice == NULL || vdevice->vtarget == NULL)
1333 continue;
1321 if (vdevice->vtarget->tflags & 1334 if (vdevice->vtarget->tflags &
1322 MPT_TARGET_FLAGS_RAID_COMPONENT) 1335 MPT_TARGET_FLAGS_RAID_COMPONENT)
1323 continue; 1336 continue;
@@ -1439,6 +1452,8 @@ mptctl_gettargetinfo (unsigned long arg)
1439 if (!maxWordsLeft) 1452 if (!maxWordsLeft)
1440 continue; 1453 continue;
1441 vdevice = sdev->hostdata; 1454 vdevice = sdev->hostdata;
1455 if (vdevice == NULL || vdevice->vtarget == NULL)
1456 continue;
1442 if (vdevice->vtarget->tflags & 1457 if (vdevice->vtarget->tflags &
1443 MPT_TARGET_FLAGS_RAID_COMPONENT) 1458 MPT_TARGET_FLAGS_RAID_COMPONENT)
1444 continue; 1459 continue;
@@ -1967,6 +1982,9 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1967 struct scsi_target *starget = scsi_target(sdev); 1982 struct scsi_target *starget = scsi_target(sdev);
1968 VirtTarget *vtarget = starget->hostdata; 1983 VirtTarget *vtarget = starget->hostdata;
1969 1984
1985 if (vtarget == NULL)
1986 continue;
1987
1970 if ((pScsiReq->TargetID == vtarget->id) && 1988 if ((pScsiReq->TargetID == vtarget->id) &&
1971 (pScsiReq->Bus == vtarget->channel) && 1989 (pScsiReq->Bus == vtarget->channel) &&
1972 (vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)) 1990 (vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
@@ -2991,6 +3009,14 @@ static int __init mptctl_init(void)
2991 } 3009 }
2992 3010
2993 mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER); 3011 mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER);
3012 if (!mptctl_taskmgmt_id || mptctl_taskmgmt_id >= MPT_MAX_PROTOCOL_DRIVERS) {
3013 printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n");
3014 mpt_deregister(mptctl_id);
3015 misc_deregister(&mptctl_miscdev);
3016 err = -EBUSY;
3017 goto out_fail;
3018 }
3019
2994 mpt_reset_register(mptctl_id, mptctl_ioc_reset); 3020 mpt_reset_register(mptctl_id, mptctl_ioc_reset);
2995 mpt_event_register(mptctl_id, mptctl_event_process); 3021 mpt_event_register(mptctl_id, mptctl_event_process);
2996 3022
@@ -3010,12 +3036,15 @@ static void mptctl_exit(void)
3010 printk(KERN_INFO MYNAM ": Deregistered /dev/%s @ (major,minor=%d,%d)\n", 3036 printk(KERN_INFO MYNAM ": Deregistered /dev/%s @ (major,minor=%d,%d)\n",
3011 mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor); 3037 mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor);
3012 3038
3039 /* De-register event handler from base module */
3040 mpt_event_deregister(mptctl_id);
3041
3013 /* De-register reset handler from base module */ 3042 /* De-register reset handler from base module */
3014 mpt_reset_deregister(mptctl_id); 3043 mpt_reset_deregister(mptctl_id);
3015 3044
3016 /* De-register callback handler from base module */ 3045 /* De-register callback handler from base module */
3046 mpt_deregister(mptctl_taskmgmt_id);
3017 mpt_deregister(mptctl_id); 3047 mpt_deregister(mptctl_id);
3018 mpt_reset_deregister(mptctl_taskmgmt_id);
3019 3048
3020 mpt_device_driver_deregister(MPTCTL_DRIVER); 3049 mpt_device_driver_deregister(MPTCTL_DRIVER);
3021 3050
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 33f7256055b1..b5f03ad81568 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -482,6 +482,7 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
482 if (vtarget) { 482 if (vtarget) {
483 vtarget->id = pg0->CurrentTargetID; 483 vtarget->id = pg0->CurrentTargetID;
484 vtarget->channel = pg0->CurrentBus; 484 vtarget->channel = pg0->CurrentBus;
485 vtarget->deleted = 0;
485 } 486 }
486 } 487 }
487 *((struct mptfc_rport_info **)rport->dd_data) = ri; 488 *((struct mptfc_rport_info **)rport->dd_data) = ri;
@@ -1092,6 +1093,8 @@ mptfc_setup_reset(struct work_struct *work)
1092 container_of(work, MPT_ADAPTER, fc_setup_reset_work); 1093 container_of(work, MPT_ADAPTER, fc_setup_reset_work);
1093 u64 pn; 1094 u64 pn;
1094 struct mptfc_rport_info *ri; 1095 struct mptfc_rport_info *ri;
1096 struct scsi_target *starget;
1097 VirtTarget *vtarget;
1095 1098
1096 /* reset about to happen, delete (block) all rports */ 1099 /* reset about to happen, delete (block) all rports */
1097 list_for_each_entry(ri, &ioc->fc_rports, list) { 1100 list_for_each_entry(ri, &ioc->fc_rports, list) {
@@ -1099,6 +1102,12 @@ mptfc_setup_reset(struct work_struct *work)
1099 ri->flags &= ~MPT_RPORT_INFO_FLAGS_REGISTERED; 1102 ri->flags &= ~MPT_RPORT_INFO_FLAGS_REGISTERED;
1100 fc_remote_port_delete(ri->rport); /* won't sleep */ 1103 fc_remote_port_delete(ri->rport); /* won't sleep */
1101 ri->rport = NULL; 1104 ri->rport = NULL;
1105 starget = ri->starget;
1106 if (starget) {
1107 vtarget = starget->hostdata;
1108 if (vtarget)
1109 vtarget->deleted = 1;
1110 }
1102 1111
1103 pn = (u64)ri->pg0.WWPN.High << 32 | 1112 pn = (u64)ri->pg0.WWPN.High << 32 |
1104 (u64)ri->pg0.WWPN.Low; 1113 (u64)ri->pg0.WWPN.Low;
@@ -1119,6 +1128,8 @@ mptfc_rescan_devices(struct work_struct *work)
1119 int ii; 1128 int ii;
1120 u64 pn; 1129 u64 pn;
1121 struct mptfc_rport_info *ri; 1130 struct mptfc_rport_info *ri;
1131 struct scsi_target *starget;
1132 VirtTarget *vtarget;
1122 1133
1123 /* start by tagging all ports as missing */ 1134 /* start by tagging all ports as missing */
1124 list_for_each_entry(ri, &ioc->fc_rports, list) { 1135 list_for_each_entry(ri, &ioc->fc_rports, list) {
@@ -1146,6 +1157,12 @@ mptfc_rescan_devices(struct work_struct *work)
1146 MPT_RPORT_INFO_FLAGS_MISSING); 1157 MPT_RPORT_INFO_FLAGS_MISSING);
1147 fc_remote_port_delete(ri->rport); /* won't sleep */ 1158 fc_remote_port_delete(ri->rport); /* won't sleep */
1148 ri->rport = NULL; 1159 ri->rport = NULL;
1160 starget = ri->starget;
1161 if (starget) {
1162 vtarget = starget->hostdata;
1163 if (vtarget)
1164 vtarget->deleted = 1;
1165 }
1149 1166
1150 pn = (u64)ri->pg0.WWPN.High << 32 | 1167 pn = (u64)ri->pg0.WWPN.High << 32 |
1151 (u64)ri->pg0.WWPN.Low; 1168 (u64)ri->pg0.WWPN.Low;
@@ -1358,6 +1375,9 @@ mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
1358 unsigned long flags; 1375 unsigned long flags;
1359 int rc=1; 1376 int rc=1;
1360 1377
1378 if (ioc->bus_type != FC)
1379 return 0;
1380
1361 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", 1381 devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
1362 ioc->name, event)); 1382 ioc->name, event));
1363 1383
@@ -1396,7 +1416,7 @@ mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
1396 unsigned long flags; 1416 unsigned long flags;
1397 1417
1398 rc = mptscsih_ioc_reset(ioc,reset_phase); 1418 rc = mptscsih_ioc_reset(ioc,reset_phase);
1399 if (rc == 0) 1419 if ((ioc->bus_type != FC) || (!rc))
1400 return rc; 1420 return rc;
1401 1421
1402 1422
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 76687126b573..ac000e83db0e 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1894,7 +1894,7 @@ static struct scsi_host_template mptsas_driver_template = {
1894 .module = THIS_MODULE, 1894 .module = THIS_MODULE,
1895 .proc_name = "mptsas", 1895 .proc_name = "mptsas",
1896 .proc_info = mptscsih_proc_info, 1896 .proc_info = mptscsih_proc_info,
1897 .name = "MPT SPI Host", 1897 .name = "MPT SAS Host",
1898 .info = mptscsih_info, 1898 .info = mptscsih_info,
1899 .queuecommand = mptsas_qcmd, 1899 .queuecommand = mptsas_qcmd,
1900 .target_alloc = mptsas_target_alloc, 1900 .target_alloc = mptsas_target_alloc,
@@ -2038,11 +2038,13 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
2038 2038
2039 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 2039 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
2040 10 * HZ); 2040 10 * HZ);
2041 if (!timeleft) { 2041 if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
2042 /* On timeout reset the board */ 2042 error = -ETIME;
2043 mpt_free_msg_frame(ioc, mf); 2043 mpt_free_msg_frame(ioc, mf);
2044 mpt_HardResetHandler(ioc, CAN_SLEEP); 2044 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
2045 error = -ETIMEDOUT; 2045 goto out_unlock;
2046 if (!timeleft)
2047 mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
2046 goto out_unlock; 2048 goto out_unlock;
2047 } 2049 }
2048 2050
@@ -2223,11 +2225,14 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2223 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); 2225 mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
2224 2226
2225 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ); 2227 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
2226 if (!timeleft) { 2228 if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
2227 printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __func__); 2229 ret = -ETIME;
2228 /* On timeout reset the board */ 2230 mpt_free_msg_frame(ioc, mf);
2229 mpt_HardResetHandler(ioc, CAN_SLEEP); 2231 mf = NULL;
2230 ret = -ETIMEDOUT; 2232 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
2233 goto unmap;
2234 if (!timeleft)
2235 mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
2231 goto unmap; 2236 goto unmap;
2232 } 2237 }
2233 mf = NULL; 2238 mf = NULL;
@@ -2518,6 +2523,12 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
2518 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 2523 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
2519 2524
2520 error = mpt_config(ioc, &cfg); 2525 error = mpt_config(ioc, &cfg);
2526
2527 if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
2528 error = -ENODEV;
2529 goto out_free_consistent;
2530 }
2531
2521 if (error) 2532 if (error)
2522 goto out_free_consistent; 2533 goto out_free_consistent;
2523 2534
@@ -2594,14 +2605,14 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
2594 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 2605 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
2595 2606
2596 error = mpt_config(ioc, &cfg); 2607 error = mpt_config(ioc, &cfg);
2597 if (error) 2608 if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
2598 goto out_free_consistent;
2599
2600 if (!buffer->NumPhys) {
2601 error = -ENODEV; 2609 error = -ENODEV;
2602 goto out_free_consistent; 2610 goto out_free_consistent;
2603 } 2611 }
2604 2612
2613 if (error)
2614 goto out_free_consistent;
2615
2605 /* save config data */ 2616 /* save config data */
2606 port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1; 2617 port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1;
2607 port_info->phy_info = kcalloc(port_info->num_phys, 2618 port_info->phy_info = kcalloc(port_info->num_phys,
@@ -2677,7 +2688,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
2677 2688
2678 if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) { 2689 if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
2679 error = -ENODEV; 2690 error = -ENODEV;
2680 goto out; 2691 goto out_free_consistent;
2681 } 2692 }
2682 2693
2683 if (error) 2694 if (error)
@@ -2833,7 +2844,7 @@ mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
2833 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET) 2844 if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
2834 goto out_free; 2845 goto out_free;
2835 if (!timeleft) 2846 if (!timeleft)
2836 mpt_HardResetHandler(ioc, CAN_SLEEP); 2847 mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
2837 goto out_free; 2848 goto out_free;
2838 } 2849 }
2839 2850
@@ -4098,6 +4109,7 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
4098 cfg.pageAddr = (channel << 8) + id; 4109 cfg.pageAddr = (channel << 8) + id;
4099 cfg.cfghdr.hdr = &hdr; 4110 cfg.cfghdr.hdr = &hdr;
4100 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; 4111 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
4112 cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
4101 4113
4102 if (mpt_config(ioc, &cfg) != 0) 4114 if (mpt_config(ioc, &cfg) != 0)
4103 goto out; 4115 goto out;
@@ -4717,7 +4729,7 @@ mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
4717 if (issue_reset) { 4729 if (issue_reset) {
4718 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", 4730 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
4719 ioc->name, __func__); 4731 ioc->name, __func__);
4720 mpt_HardResetHandler(ioc, CAN_SLEEP); 4732 mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
4721 } 4733 }
4722 mptsas_free_fw_event(ioc, fw_event); 4734 mptsas_free_fw_event(ioc, fw_event);
4723} 4735}
@@ -4779,6 +4791,9 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
4779 struct fw_event_work *fw_event; 4791 struct fw_event_work *fw_event;
4780 unsigned long delay; 4792 unsigned long delay;
4781 4793
4794 if (ioc->bus_type != SAS)
4795 return 0;
4796
4782 /* events turned off due to host reset or driver unloading */ 4797 /* events turned off due to host reset or driver unloading */
4783 if (ioc->fw_events_off) 4798 if (ioc->fw_events_off)
4784 return 0; 4799 return 0;
@@ -5073,6 +5088,12 @@ static void __devexit mptsas_remove(struct pci_dev *pdev)
5073 struct mptsas_portinfo *p, *n; 5088 struct mptsas_portinfo *p, *n;
5074 int i; 5089 int i;
5075 5090
5091 if (!ioc->sh) {
5092 printk(MYIOC_s_INFO_FMT "IOC is in Target mode\n", ioc->name);
5093 mpt_detach(pdev);
5094 return;
5095 }
5096
5076 mptsas_shutdown(pdev); 5097 mptsas_shutdown(pdev);
5077 5098
5078 mptsas_del_device_components(ioc); 5099 mptsas_del_device_components(ioc);
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
index 953c2bfcf6aa..7b249edbda78 100644
--- a/drivers/message/fusion/mptsas.h
+++ b/drivers/message/fusion/mptsas.h
@@ -110,7 +110,7 @@ struct fw_event_work {
110 MPT_ADAPTER *ioc; 110 MPT_ADAPTER *ioc;
111 u32 event; 111 u32 event;
112 u8 retries; 112 u8 retries;
113 u8 event_data[1]; 113 u8 __attribute__((aligned(4))) event_data[1];
114}; 114};
115 115
116struct mptsas_discovery_event { 116struct mptsas_discovery_event {
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 6796597dcee0..7bd4c0fc23cc 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1149,11 +1149,6 @@ mptscsih_remove(struct pci_dev *pdev)
1149 MPT_SCSI_HOST *hd; 1149 MPT_SCSI_HOST *hd;
1150 int sz1; 1150 int sz1;
1151 1151
1152 if(!host) {
1153 mpt_detach(pdev);
1154 return;
1155 }
1156
1157 scsi_remove_host(host); 1152 scsi_remove_host(host);
1158 1153
1159 if((hd = shost_priv(host)) == NULL) 1154 if((hd = shost_priv(host)) == NULL)
@@ -1711,7 +1706,7 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
1711 if (issue_hard_reset) { 1706 if (issue_hard_reset) {
1712 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", 1707 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
1713 ioc->name, __func__); 1708 ioc->name, __func__);
1714 retval = mpt_HardResetHandler(ioc, CAN_SLEEP); 1709 retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
1715 mpt_free_msg_frame(ioc, mf); 1710 mpt_free_msg_frame(ioc, mf);
1716 } 1711 }
1717 1712
@@ -1728,6 +1723,7 @@ mptscsih_get_tm_timeout(MPT_ADAPTER *ioc)
1728 case FC: 1723 case FC:
1729 return 40; 1724 return 40;
1730 case SAS: 1725 case SAS:
1726 return 30;
1731 case SPI: 1727 case SPI:
1732 default: 1728 default:
1733 return 10; 1729 return 10;
@@ -1777,7 +1773,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1777 ioc->name, SCpnt)); 1773 ioc->name, SCpnt));
1778 SCpnt->result = DID_NO_CONNECT << 16; 1774 SCpnt->result = DID_NO_CONNECT << 16;
1779 SCpnt->scsi_done(SCpnt); 1775 SCpnt->scsi_done(SCpnt);
1780 retval = 0; 1776 retval = SUCCESS;
1781 goto out; 1777 goto out;
1782 } 1778 }
1783 1779
@@ -1792,6 +1788,17 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1792 goto out; 1788 goto out;
1793 } 1789 }
1794 1790
1791 /* Task aborts are not supported for volumes.
1792 */
1793 if (vdevice->vtarget->raidVolume) {
1794 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
1795 "task abort: raid volume (sc=%p)\n",
1796 ioc->name, SCpnt));
1797 SCpnt->result = DID_RESET << 16;
1798 retval = FAILED;
1799 goto out;
1800 }
1801
1795 /* Find this command 1802 /* Find this command
1796 */ 1803 */
1797 if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(ioc, SCpnt)) < 0) { 1804 if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(ioc, SCpnt)) < 0) {
@@ -1991,7 +1998,7 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
1991 /* If our attempts to reset the host failed, then return a failed 1998 /* If our attempts to reset the host failed, then return a failed
1992 * status. The host will be taken off line by the SCSI mid-layer. 1999 * status. The host will be taken off line by the SCSI mid-layer.
1993 */ 2000 */
1994 retval = mpt_HardResetHandler(ioc, CAN_SLEEP); 2001 retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
1995 if (retval < 0) 2002 if (retval < 0)
1996 status = FAILED; 2003 status = FAILED;
1997 else 2004 else
@@ -2344,6 +2351,8 @@ mptscsih_slave_destroy(struct scsi_device *sdev)
2344 starget = scsi_target(sdev); 2351 starget = scsi_target(sdev);
2345 vtarget = starget->hostdata; 2352 vtarget = starget->hostdata;
2346 vdevice = sdev->hostdata; 2353 vdevice = sdev->hostdata;
2354 if (!vdevice)
2355 return;
2347 2356
2348 mptscsih_search_running_cmds(hd, vdevice); 2357 mptscsih_search_running_cmds(hd, vdevice);
2349 vtarget->num_luns--; 2358 vtarget->num_luns--;
@@ -3040,7 +3049,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
3040 if (!timeleft) { 3049 if (!timeleft) {
3041 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", 3050 printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
3042 ioc->name, __func__); 3051 ioc->name, __func__);
3043 mpt_HardResetHandler(ioc, CAN_SLEEP); 3052 mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
3044 mpt_free_msg_frame(ioc, mf); 3053 mpt_free_msg_frame(ioc, mf);
3045 } 3054 }
3046 goto out; 3055 goto out;
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index e44365193fdf..1abaa5d01ae3 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -210,6 +210,10 @@ mptspi_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target,
210 target->maxOffset = offset; 210 target->maxOffset = offset;
211 target->maxWidth = width; 211 target->maxWidth = width;
212 212
213 spi_min_period(scsi_target(sdev)) = factor;
214 spi_max_offset(scsi_target(sdev)) = offset;
215 spi_max_width(scsi_target(sdev)) = width;
216
213 target->tflags |= MPT_TARGET_FLAGS_VALID_NEGO; 217 target->tflags |= MPT_TARGET_FLAGS_VALID_NEGO;
214 218
215 /* Disable unused features. 219 /* Disable unused features.
@@ -558,6 +562,7 @@ static int mptspi_read_spi_device_pg0(struct scsi_target *starget,
558 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; 562 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
559 cfg.dir = 0; 563 cfg.dir = 0;
560 cfg.pageAddr = starget->id; 564 cfg.pageAddr = starget->id;
565 cfg.timeout = 60;
561 566
562 if (mpt_config(ioc, &cfg)) { 567 if (mpt_config(ioc, &cfg)) {
563 starget_printk(KERN_ERR, starget, MYIOC_s_FMT "mpt_config failed\n", ioc->name); 568 starget_printk(KERN_ERR, starget, MYIOC_s_FMT "mpt_config failed\n", ioc->name);
@@ -1152,6 +1157,9 @@ mptspi_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
1152 u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; 1157 u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
1153 struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); 1158 struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
1154 1159
1160 if (ioc->bus_type != SPI)
1161 return 0;
1162
1155 if (hd && event == MPI_EVENT_INTEGRATED_RAID) { 1163 if (hd && event == MPI_EVENT_INTEGRATED_RAID) {
1156 int reason 1164 int reason
1157 = (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16; 1165 = (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16;
@@ -1283,6 +1291,8 @@ mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
1283 int rc; 1291 int rc;
1284 1292
1285 rc = mptscsih_ioc_reset(ioc, reset_phase); 1293 rc = mptscsih_ioc_reset(ioc, reset_phase);
1294 if ((ioc->bus_type != SPI) || (!rc))
1295 return rc;
1286 1296
1287 /* only try to do a renegotiation if we're properly set up 1297 /* only try to do a renegotiation if we're properly set up
1288 * if we get an ioc fault on bringup, ioc->sh will be NULL */ 1298 * if we get an ioc fault on bringup, ioc->sh will be NULL */
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 1e6183a86ce5..e331df2122f7 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -425,7 +425,8 @@ int zfcp_status_read_refill(struct zfcp_adapter *adapter)
425{ 425{
426 while (atomic_read(&adapter->stat_miss) > 0) 426 while (atomic_read(&adapter->stat_miss) > 0)
427 if (zfcp_fsf_status_read(adapter->qdio)) { 427 if (zfcp_fsf_status_read(adapter->qdio)) {
428 if (atomic_read(&adapter->stat_miss) >= 16) { 428 if (atomic_read(&adapter->stat_miss) >=
429 adapter->stat_read_buf_num) {
429 zfcp_erp_adapter_reopen(adapter, 0, "axsref1", 430 zfcp_erp_adapter_reopen(adapter, 0, "axsref1",
430 NULL); 431 NULL);
431 return 1; 432 return 1;
@@ -545,6 +546,10 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
545 &zfcp_sysfs_adapter_attrs)) 546 &zfcp_sysfs_adapter_attrs))
546 goto failed; 547 goto failed;
547 548
549 /* report size limit per scatter-gather segment */
550 adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
551 adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
552
548 if (!zfcp_adapter_scsi_register(adapter)) 553 if (!zfcp_adapter_scsi_register(adapter))
549 return adapter; 554 return adapter;
550 555
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 7131c7db1f04..9fa1b064893e 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -44,23 +44,6 @@ struct zfcp_reqlist;
44/********************* SCSI SPECIFIC DEFINES *********************************/ 44/********************* SCSI SPECIFIC DEFINES *********************************/
45#define ZFCP_SCSI_ER_TIMEOUT (10*HZ) 45#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
46 46
47/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
48
49/* DMQ bug workaround: don't use last SBALE */
50#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
51
52/* index of last SBALE (with respect to DMQ bug workaround) */
53#define ZFCP_LAST_SBALE_PER_SBAL (ZFCP_MAX_SBALES_PER_SBAL - 1)
54
55/* max. number of (data buffer) SBALEs in largest SBAL chain */
56#define ZFCP_MAX_SBALES_PER_REQ \
57 (FSF_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL - 2)
58 /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
59
60#define ZFCP_MAX_SECTORS (ZFCP_MAX_SBALES_PER_REQ * 8)
61 /* max. number of (data buffer) SBALEs in largest SBAL chain
62 multiplied with number of sectors per 4k block */
63
64/********************* FSF SPECIFIC DEFINES *********************************/ 47/********************* FSF SPECIFIC DEFINES *********************************/
65 48
66/* ATTENTION: value must not be used by hardware */ 49/* ATTENTION: value must not be used by hardware */
@@ -181,6 +164,7 @@ struct zfcp_adapter {
181 stack abort/command 164 stack abort/command
182 completion races */ 165 completion races */
183 atomic_t stat_miss; /* # missing status reads*/ 166 atomic_t stat_miss; /* # missing status reads*/
167 unsigned int stat_read_buf_num;
184 struct work_struct stat_work; 168 struct work_struct stat_work;
185 atomic_t status; /* status of this adapter */ 169 atomic_t status; /* status of this adapter */
186 struct list_head erp_ready_head; /* error recovery for this 170 struct list_head erp_ready_head; /* error recovery for this
@@ -205,6 +189,7 @@ struct zfcp_adapter {
205 struct work_struct scan_work; 189 struct work_struct scan_work;
206 struct service_level service_level; 190 struct service_level service_level;
207 struct workqueue_struct *work_queue; 191 struct workqueue_struct *work_queue;
192 struct device_dma_parameters dma_parms;
208}; 193};
209 194
210struct zfcp_port { 195struct zfcp_port {
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 0be5e7ea2828..e3dbeda97179 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -714,7 +714,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
714 if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED) 714 if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
715 return ZFCP_ERP_FAILED; 715 return ZFCP_ERP_FAILED;
716 716
717 atomic_set(&act->adapter->stat_miss, 16); 717 atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
718 if (zfcp_status_read_refill(act->adapter)) 718 if (zfcp_status_read_refill(act->adapter))
719 return ZFCP_ERP_FAILED; 719 return ZFCP_ERP_FAILED;
720 720
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 8786a79c7f8f..48a8f93b72f5 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * External function declarations. 4 * External function declarations.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#ifndef ZFCP_EXT_H 9#ifndef ZFCP_EXT_H
@@ -143,9 +143,9 @@ extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
143/* zfcp_qdio.c */ 143/* zfcp_qdio.c */
144extern int zfcp_qdio_setup(struct zfcp_adapter *); 144extern int zfcp_qdio_setup(struct zfcp_adapter *);
145extern void zfcp_qdio_destroy(struct zfcp_qdio *); 145extern void zfcp_qdio_destroy(struct zfcp_qdio *);
146extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
146extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *); 147extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
147extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, 148extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
148 struct zfcp_qdio_req *, unsigned long,
149 struct scatterlist *, int); 149 struct scatterlist *, int);
150extern int zfcp_qdio_open(struct zfcp_qdio *); 150extern int zfcp_qdio_open(struct zfcp_qdio *);
151extern void zfcp_qdio_close(struct zfcp_qdio *); 151extern void zfcp_qdio_close(struct zfcp_qdio *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 2a1cbb74b99b..6f8ab43a4856 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -400,7 +400,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
400 struct zfcp_adapter *adapter = port->adapter; 400 struct zfcp_adapter *adapter = port->adapter;
401 int ret; 401 int ret;
402 402
403 adisc = kmem_cache_alloc(zfcp_data.adisc_cache, GFP_ATOMIC); 403 adisc = kmem_cache_zalloc(zfcp_data.adisc_cache, GFP_ATOMIC);
404 if (!adisc) 404 if (!adisc)
405 return -ENOMEM; 405 return -ENOMEM;
406 406
@@ -493,7 +493,7 @@ static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
493 if (!gpn_ft) 493 if (!gpn_ft)
494 return NULL; 494 return NULL;
495 495
496 req = kmem_cache_alloc(zfcp_data.gpn_ft_cache, GFP_KERNEL); 496 req = kmem_cache_zalloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
497 if (!req) { 497 if (!req) {
498 kfree(gpn_ft); 498 kfree(gpn_ft);
499 gpn_ft = NULL; 499 gpn_ft = NULL;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index b3b1d2f79398..9ac6a6e4a604 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -496,6 +496,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
496 496
497 adapter->hydra_version = bottom->adapter_type; 497 adapter->hydra_version = bottom->adapter_type;
498 adapter->timer_ticks = bottom->timer_interval; 498 adapter->timer_ticks = bottom->timer_interval;
499 adapter->stat_read_buf_num = max(bottom->status_read_buf_num, (u16)16);
499 500
500 if (fc_host_permanent_port_name(shost) == -1) 501 if (fc_host_permanent_port_name(shost) == -1)
501 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 502 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
@@ -640,37 +641,6 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
640 } 641 }
641} 642}
642 643
643static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio)
644{
645 struct zfcp_qdio_queue *req_q = &qdio->req_q;
646
647 spin_lock_bh(&qdio->req_q_lock);
648 if (atomic_read(&req_q->count))
649 return 1;
650 spin_unlock_bh(&qdio->req_q_lock);
651 return 0;
652}
653
654static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio)
655{
656 struct zfcp_adapter *adapter = qdio->adapter;
657 long ret;
658
659 spin_unlock_bh(&qdio->req_q_lock);
660 ret = wait_event_interruptible_timeout(qdio->req_q_wq,
661 zfcp_fsf_sbal_check(qdio), 5 * HZ);
662 if (ret > 0)
663 return 0;
664 if (!ret) {
665 atomic_inc(&qdio->req_q_full);
666 /* assume hanging outbound queue, try queue recovery */
667 zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL);
668 }
669
670 spin_lock_bh(&qdio->req_q_lock);
671 return -EIO;
672}
673
674static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) 644static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
675{ 645{
676 struct zfcp_fsf_req *req; 646 struct zfcp_fsf_req *req;
@@ -705,10 +675,9 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
705} 675}
706 676
707static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, 677static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
708 u32 fsf_cmd, mempool_t *pool) 678 u32 fsf_cmd, u32 sbtype,
679 mempool_t *pool)
709{ 680{
710 struct qdio_buffer_element *sbale;
711 struct zfcp_qdio_queue *req_q = &qdio->req_q;
712 struct zfcp_adapter *adapter = qdio->adapter; 681 struct zfcp_adapter *adapter = qdio->adapter;
713 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); 682 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
714 683
@@ -725,14 +694,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
725 req->adapter = adapter; 694 req->adapter = adapter;
726 req->fsf_command = fsf_cmd; 695 req->fsf_command = fsf_cmd;
727 req->req_id = adapter->req_no; 696 req->req_id = adapter->req_no;
728 req->qdio_req.sbal_number = 1;
729 req->qdio_req.sbal_first = req_q->first;
730 req->qdio_req.sbal_last = req_q->first;
731 req->qdio_req.sbale_curr = 1;
732
733 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
734 sbale[0].addr = (void *) req->req_id;
735 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
736 697
737 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) { 698 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
738 if (likely(pool)) 699 if (likely(pool))
@@ -753,10 +714,11 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
753 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; 714 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
754 req->qtcb->header.req_handle = req->req_id; 715 req->qtcb->header.req_handle = req->req_id;
755 req->qtcb->header.fsf_command = req->fsf_command; 716 req->qtcb->header.fsf_command = req->fsf_command;
756 sbale[1].addr = (void *) req->qtcb;
757 sbale[1].length = sizeof(struct fsf_qtcb);
758 } 717 }
759 718
719 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
720 req->qtcb, sizeof(struct fsf_qtcb));
721
760 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) { 722 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
761 zfcp_fsf_req_free(req); 723 zfcp_fsf_req_free(req);
762 return ERR_PTR(-EIO); 724 return ERR_PTR(-EIO);
@@ -803,24 +765,19 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
803 struct zfcp_adapter *adapter = qdio->adapter; 765 struct zfcp_adapter *adapter = qdio->adapter;
804 struct zfcp_fsf_req *req; 766 struct zfcp_fsf_req *req;
805 struct fsf_status_read_buffer *sr_buf; 767 struct fsf_status_read_buffer *sr_buf;
806 struct qdio_buffer_element *sbale;
807 int retval = -EIO; 768 int retval = -EIO;
808 769
809 spin_lock_bh(&qdio->req_q_lock); 770 spin_lock_bh(&qdio->req_q_lock);
810 if (zfcp_fsf_req_sbal_get(qdio)) 771 if (zfcp_qdio_sbal_get(qdio))
811 goto out; 772 goto out;
812 773
813 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 774 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 0,
814 adapter->pool.status_read_req); 775 adapter->pool.status_read_req);
815 if (IS_ERR(req)) { 776 if (IS_ERR(req)) {
816 retval = PTR_ERR(req); 777 retval = PTR_ERR(req);
817 goto out; 778 goto out;
818 } 779 }
819 780
820 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
821 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
822 req->qdio_req.sbale_curr = 2;
823
824 sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); 781 sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
825 if (!sr_buf) { 782 if (!sr_buf) {
826 retval = -ENOMEM; 783 retval = -ENOMEM;
@@ -828,9 +785,9 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
828 } 785 }
829 memset(sr_buf, 0, sizeof(*sr_buf)); 786 memset(sr_buf, 0, sizeof(*sr_buf));
830 req->data = sr_buf; 787 req->data = sr_buf;
831 sbale = zfcp_qdio_sbale_curr(qdio, &req->qdio_req); 788
832 sbale->addr = (void *) sr_buf; 789 zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
833 sbale->length = sizeof(*sr_buf); 790 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
834 791
835 retval = zfcp_fsf_req_send(req); 792 retval = zfcp_fsf_req_send(req);
836 if (retval) 793 if (retval)
@@ -907,14 +864,14 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
907struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, 864struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
908 struct zfcp_unit *unit) 865 struct zfcp_unit *unit)
909{ 866{
910 struct qdio_buffer_element *sbale;
911 struct zfcp_fsf_req *req = NULL; 867 struct zfcp_fsf_req *req = NULL;
912 struct zfcp_qdio *qdio = unit->port->adapter->qdio; 868 struct zfcp_qdio *qdio = unit->port->adapter->qdio;
913 869
914 spin_lock_bh(&qdio->req_q_lock); 870 spin_lock_bh(&qdio->req_q_lock);
915 if (zfcp_fsf_req_sbal_get(qdio)) 871 if (zfcp_qdio_sbal_get(qdio))
916 goto out; 872 goto out;
917 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, 873 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
874 SBAL_FLAGS0_TYPE_READ,
918 qdio->adapter->pool.scsi_abort); 875 qdio->adapter->pool.scsi_abort);
919 if (IS_ERR(req)) { 876 if (IS_ERR(req)) {
920 req = NULL; 877 req = NULL;
@@ -925,9 +882,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
925 ZFCP_STATUS_COMMON_UNBLOCKED))) 882 ZFCP_STATUS_COMMON_UNBLOCKED)))
926 goto out_error_free; 883 goto out_error_free;
927 884
928 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); 885 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
929 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
930 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
931 886
932 req->data = unit; 887 req->data = unit;
933 req->handler = zfcp_fsf_abort_fcp_command_handler; 888 req->handler = zfcp_fsf_abort_fcp_command_handler;
@@ -996,21 +951,14 @@ skip_fsfstatus:
996 ct->handler(ct->handler_data); 951 ct->handler(ct->handler_data);
997} 952}
998 953
999static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale, 954static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
955 struct zfcp_qdio_req *q_req,
1000 struct scatterlist *sg_req, 956 struct scatterlist *sg_req,
1001 struct scatterlist *sg_resp) 957 struct scatterlist *sg_resp)
1002{ 958{
1003 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ; 959 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
1004 sbale[2].addr = sg_virt(sg_req); 960 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
1005 sbale[2].length = sg_req->length; 961 zfcp_qdio_set_sbale_last(qdio, q_req);
1006 sbale[3].addr = sg_virt(sg_resp);
1007 sbale[3].length = sg_resp->length;
1008 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1009}
1010
1011static int zfcp_fsf_one_sbal(struct scatterlist *sg)
1012{
1013 return sg_is_last(sg) && sg->length <= PAGE_SIZE;
1014} 962}
1015 963
1016static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, 964static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
@@ -1019,35 +967,34 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1019 int max_sbals) 967 int max_sbals)
1020{ 968{
1021 struct zfcp_adapter *adapter = req->adapter; 969 struct zfcp_adapter *adapter = req->adapter;
1022 struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
1023 &req->qdio_req);
1024 u32 feat = adapter->adapter_features; 970 u32 feat = adapter->adapter_features;
1025 int bytes; 971 int bytes;
1026 972
1027 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) { 973 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
1028 if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp)) 974 if (!zfcp_qdio_sg_one_sbale(sg_req) ||
975 !zfcp_qdio_sg_one_sbale(sg_resp))
1029 return -EOPNOTSUPP; 976 return -EOPNOTSUPP;
1030 977
1031 zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp); 978 zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
979 sg_req, sg_resp);
1032 return 0; 980 return 0;
1033 } 981 }
1034 982
1035 /* use single, unchained SBAL if it can hold the request */ 983 /* use single, unchained SBAL if it can hold the request */
1036 if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) { 984 if (zfcp_qdio_sg_one_sbale(sg_req) || zfcp_qdio_sg_one_sbale(sg_resp)) {
1037 zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp); 985 zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
986 sg_req, sg_resp);
1038 return 0; 987 return 0;
1039 } 988 }
1040 989
1041 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, 990 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
1042 SBAL_FLAGS0_TYPE_WRITE_READ,
1043 sg_req, max_sbals); 991 sg_req, max_sbals);
1044 if (bytes <= 0) 992 if (bytes <= 0)
1045 return -EIO; 993 return -EIO;
1046 req->qtcb->bottom.support.req_buf_length = bytes; 994 req->qtcb->bottom.support.req_buf_length = bytes;
1047 req->qdio_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; 995 zfcp_qdio_skip_to_last_sbale(&req->qdio_req);
1048 996
1049 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, 997 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
1050 SBAL_FLAGS0_TYPE_WRITE_READ,
1051 sg_resp, max_sbals); 998 sg_resp, max_sbals);
1052 req->qtcb->bottom.support.resp_buf_length = bytes; 999 req->qtcb->bottom.support.resp_buf_length = bytes;
1053 if (bytes <= 0) 1000 if (bytes <= 0)
@@ -1091,10 +1038,11 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1091 int ret = -EIO; 1038 int ret = -EIO;
1092 1039
1093 spin_lock_bh(&qdio->req_q_lock); 1040 spin_lock_bh(&qdio->req_q_lock);
1094 if (zfcp_fsf_req_sbal_get(qdio)) 1041 if (zfcp_qdio_sbal_get(qdio))
1095 goto out; 1042 goto out;
1096 1043
1097 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool); 1044 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1045 SBAL_FLAGS0_TYPE_WRITE_READ, pool);
1098 1046
1099 if (IS_ERR(req)) { 1047 if (IS_ERR(req)) {
1100 ret = PTR_ERR(req); 1048 ret = PTR_ERR(req);
@@ -1103,7 +1051,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1103 1051
1104 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1052 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1105 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, 1053 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
1106 FSF_MAX_SBALS_PER_REQ, timeout); 1054 ZFCP_FSF_MAX_SBALS_PER_REQ, timeout);
1107 if (ret) 1055 if (ret)
1108 goto failed_send; 1056 goto failed_send;
1109 1057
@@ -1187,10 +1135,11 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1187 int ret = -EIO; 1135 int ret = -EIO;
1188 1136
1189 spin_lock_bh(&qdio->req_q_lock); 1137 spin_lock_bh(&qdio->req_q_lock);
1190 if (zfcp_fsf_req_sbal_get(qdio)) 1138 if (zfcp_qdio_sbal_get(qdio))
1191 goto out; 1139 goto out;
1192 1140
1193 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL); 1141 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1142 SBAL_FLAGS0_TYPE_WRITE_READ, NULL);
1194 1143
1195 if (IS_ERR(req)) { 1144 if (IS_ERR(req)) {
1196 ret = PTR_ERR(req); 1145 ret = PTR_ERR(req);
@@ -1224,16 +1173,16 @@ out:
1224 1173
1225int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) 1174int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1226{ 1175{
1227 struct qdio_buffer_element *sbale;
1228 struct zfcp_fsf_req *req; 1176 struct zfcp_fsf_req *req;
1229 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1177 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1230 int retval = -EIO; 1178 int retval = -EIO;
1231 1179
1232 spin_lock_bh(&qdio->req_q_lock); 1180 spin_lock_bh(&qdio->req_q_lock);
1233 if (zfcp_fsf_req_sbal_get(qdio)) 1181 if (zfcp_qdio_sbal_get(qdio))
1234 goto out; 1182 goto out;
1235 1183
1236 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1184 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1185 SBAL_FLAGS0_TYPE_READ,
1237 qdio->adapter->pool.erp_req); 1186 qdio->adapter->pool.erp_req);
1238 1187
1239 if (IS_ERR(req)) { 1188 if (IS_ERR(req)) {
@@ -1242,9 +1191,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1242 } 1191 }
1243 1192
1244 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1193 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1245 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); 1194 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1246 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1247 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1248 1195
1249 req->qtcb->bottom.config.feature_selection = 1196 req->qtcb->bottom.config.feature_selection =
1250 FSF_FEATURE_CFDC | 1197 FSF_FEATURE_CFDC |
@@ -1269,24 +1216,22 @@ out:
1269int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, 1216int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1270 struct fsf_qtcb_bottom_config *data) 1217 struct fsf_qtcb_bottom_config *data)
1271{ 1218{
1272 struct qdio_buffer_element *sbale;
1273 struct zfcp_fsf_req *req = NULL; 1219 struct zfcp_fsf_req *req = NULL;
1274 int retval = -EIO; 1220 int retval = -EIO;
1275 1221
1276 spin_lock_bh(&qdio->req_q_lock); 1222 spin_lock_bh(&qdio->req_q_lock);
1277 if (zfcp_fsf_req_sbal_get(qdio)) 1223 if (zfcp_qdio_sbal_get(qdio))
1278 goto out_unlock; 1224 goto out_unlock;
1279 1225
1280 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL); 1226 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1227 SBAL_FLAGS0_TYPE_READ, NULL);
1281 1228
1282 if (IS_ERR(req)) { 1229 if (IS_ERR(req)) {
1283 retval = PTR_ERR(req); 1230 retval = PTR_ERR(req);
1284 goto out_unlock; 1231 goto out_unlock;
1285 } 1232 }
1286 1233
1287 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); 1234 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1288 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1289 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1290 req->handler = zfcp_fsf_exchange_config_data_handler; 1235 req->handler = zfcp_fsf_exchange_config_data_handler;
1291 1236
1292 req->qtcb->bottom.config.feature_selection = 1237 req->qtcb->bottom.config.feature_selection =
@@ -1320,7 +1265,6 @@ out_unlock:
1320int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) 1265int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1321{ 1266{
1322 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1267 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1323 struct qdio_buffer_element *sbale;
1324 struct zfcp_fsf_req *req; 1268 struct zfcp_fsf_req *req;
1325 int retval = -EIO; 1269 int retval = -EIO;
1326 1270
@@ -1328,10 +1272,11 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1328 return -EOPNOTSUPP; 1272 return -EOPNOTSUPP;
1329 1273
1330 spin_lock_bh(&qdio->req_q_lock); 1274 spin_lock_bh(&qdio->req_q_lock);
1331 if (zfcp_fsf_req_sbal_get(qdio)) 1275 if (zfcp_qdio_sbal_get(qdio))
1332 goto out; 1276 goto out;
1333 1277
1334 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1278 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1279 SBAL_FLAGS0_TYPE_READ,
1335 qdio->adapter->pool.erp_req); 1280 qdio->adapter->pool.erp_req);
1336 1281
1337 if (IS_ERR(req)) { 1282 if (IS_ERR(req)) {
@@ -1340,9 +1285,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1340 } 1285 }
1341 1286
1342 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1287 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1343 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); 1288 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1344 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1345 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1346 1289
1347 req->handler = zfcp_fsf_exchange_port_data_handler; 1290 req->handler = zfcp_fsf_exchange_port_data_handler;
1348 req->erp_action = erp_action; 1291 req->erp_action = erp_action;
@@ -1368,7 +1311,6 @@ out:
1368int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, 1311int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1369 struct fsf_qtcb_bottom_port *data) 1312 struct fsf_qtcb_bottom_port *data)
1370{ 1313{
1371 struct qdio_buffer_element *sbale;
1372 struct zfcp_fsf_req *req = NULL; 1314 struct zfcp_fsf_req *req = NULL;
1373 int retval = -EIO; 1315 int retval = -EIO;
1374 1316
@@ -1376,10 +1318,11 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1376 return -EOPNOTSUPP; 1318 return -EOPNOTSUPP;
1377 1319
1378 spin_lock_bh(&qdio->req_q_lock); 1320 spin_lock_bh(&qdio->req_q_lock);
1379 if (zfcp_fsf_req_sbal_get(qdio)) 1321 if (zfcp_qdio_sbal_get(qdio))
1380 goto out_unlock; 1322 goto out_unlock;
1381 1323
1382 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL); 1324 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1325 SBAL_FLAGS0_TYPE_READ, NULL);
1383 1326
1384 if (IS_ERR(req)) { 1327 if (IS_ERR(req)) {
1385 retval = PTR_ERR(req); 1328 retval = PTR_ERR(req);
@@ -1389,9 +1332,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1389 if (data) 1332 if (data)
1390 req->data = data; 1333 req->data = data;
1391 1334
1392 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); 1335 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1393 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1394 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1395 1336
1396 req->handler = zfcp_fsf_exchange_port_data_handler; 1337 req->handler = zfcp_fsf_exchange_port_data_handler;
1397 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1338 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
@@ -1485,17 +1426,17 @@ out:
1485 */ 1426 */
1486int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) 1427int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1487{ 1428{
1488 struct qdio_buffer_element *sbale;
1489 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1429 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1490 struct zfcp_port *port = erp_action->port; 1430 struct zfcp_port *port = erp_action->port;
1491 struct zfcp_fsf_req *req; 1431 struct zfcp_fsf_req *req;
1492 int retval = -EIO; 1432 int retval = -EIO;
1493 1433
1494 spin_lock_bh(&qdio->req_q_lock); 1434 spin_lock_bh(&qdio->req_q_lock);
1495 if (zfcp_fsf_req_sbal_get(qdio)) 1435 if (zfcp_qdio_sbal_get(qdio))
1496 goto out; 1436 goto out;
1497 1437
1498 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1438 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1439 SBAL_FLAGS0_TYPE_READ,
1499 qdio->adapter->pool.erp_req); 1440 qdio->adapter->pool.erp_req);
1500 1441
1501 if (IS_ERR(req)) { 1442 if (IS_ERR(req)) {
@@ -1504,9 +1445,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1504 } 1445 }
1505 1446
1506 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1447 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1507 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); 1448 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1508 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1509 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1510 1449
1511 req->handler = zfcp_fsf_open_port_handler; 1450 req->handler = zfcp_fsf_open_port_handler;
1512 hton24(req->qtcb->bottom.support.d_id, port->d_id); 1451 hton24(req->qtcb->bottom.support.d_id, port->d_id);
@@ -1556,16 +1495,16 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1556 */ 1495 */
1557int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) 1496int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1558{ 1497{
1559 struct qdio_buffer_element *sbale;
1560 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1498 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1561 struct zfcp_fsf_req *req; 1499 struct zfcp_fsf_req *req;
1562 int retval = -EIO; 1500 int retval = -EIO;
1563 1501
1564 spin_lock_bh(&qdio->req_q_lock); 1502 spin_lock_bh(&qdio->req_q_lock);
1565 if (zfcp_fsf_req_sbal_get(qdio)) 1503 if (zfcp_qdio_sbal_get(qdio))
1566 goto out; 1504 goto out;
1567 1505
1568 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1506 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1507 SBAL_FLAGS0_TYPE_READ,
1569 qdio->adapter->pool.erp_req); 1508 qdio->adapter->pool.erp_req);
1570 1509
1571 if (IS_ERR(req)) { 1510 if (IS_ERR(req)) {
@@ -1574,9 +1513,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1574 } 1513 }
1575 1514
1576 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1515 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1577 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); 1516 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1578 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1579 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1580 1517
1581 req->handler = zfcp_fsf_close_port_handler; 1518 req->handler = zfcp_fsf_close_port_handler;
1582 req->data = erp_action->port; 1519 req->data = erp_action->port;
@@ -1633,16 +1570,16 @@ out:
1633 */ 1570 */
1634int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) 1571int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1635{ 1572{
1636 struct qdio_buffer_element *sbale;
1637 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1573 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1638 struct zfcp_fsf_req *req; 1574 struct zfcp_fsf_req *req;
1639 int retval = -EIO; 1575 int retval = -EIO;
1640 1576
1641 spin_lock_bh(&qdio->req_q_lock); 1577 spin_lock_bh(&qdio->req_q_lock);
1642 if (zfcp_fsf_req_sbal_get(qdio)) 1578 if (zfcp_qdio_sbal_get(qdio))
1643 goto out; 1579 goto out;
1644 1580
1645 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1581 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1582 SBAL_FLAGS0_TYPE_READ,
1646 qdio->adapter->pool.erp_req); 1583 qdio->adapter->pool.erp_req);
1647 1584
1648 if (unlikely(IS_ERR(req))) { 1585 if (unlikely(IS_ERR(req))) {
@@ -1651,9 +1588,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1651 } 1588 }
1652 1589
1653 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1590 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1654 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); 1591 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1655 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1656 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1657 1592
1658 req->handler = zfcp_fsf_open_wka_port_handler; 1593 req->handler = zfcp_fsf_open_wka_port_handler;
1659 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id); 1594 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
@@ -1688,16 +1623,16 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1688 */ 1623 */
1689int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) 1624int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1690{ 1625{
1691 struct qdio_buffer_element *sbale;
1692 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1626 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1693 struct zfcp_fsf_req *req; 1627 struct zfcp_fsf_req *req;
1694 int retval = -EIO; 1628 int retval = -EIO;
1695 1629
1696 spin_lock_bh(&qdio->req_q_lock); 1630 spin_lock_bh(&qdio->req_q_lock);
1697 if (zfcp_fsf_req_sbal_get(qdio)) 1631 if (zfcp_qdio_sbal_get(qdio))
1698 goto out; 1632 goto out;
1699 1633
1700 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1634 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1635 SBAL_FLAGS0_TYPE_READ,
1701 qdio->adapter->pool.erp_req); 1636 qdio->adapter->pool.erp_req);
1702 1637
1703 if (unlikely(IS_ERR(req))) { 1638 if (unlikely(IS_ERR(req))) {
@@ -1706,9 +1641,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1706 } 1641 }
1707 1642
1708 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1643 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1709 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); 1644 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1710 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1711 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1712 1645
1713 req->handler = zfcp_fsf_close_wka_port_handler; 1646 req->handler = zfcp_fsf_close_wka_port_handler;
1714 req->data = wka_port; 1647 req->data = wka_port;
@@ -1782,16 +1715,16 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1782 */ 1715 */
1783int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) 1716int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1784{ 1717{
1785 struct qdio_buffer_element *sbale;
1786 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1718 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1787 struct zfcp_fsf_req *req; 1719 struct zfcp_fsf_req *req;
1788 int retval = -EIO; 1720 int retval = -EIO;
1789 1721
1790 spin_lock_bh(&qdio->req_q_lock); 1722 spin_lock_bh(&qdio->req_q_lock);
1791 if (zfcp_fsf_req_sbal_get(qdio)) 1723 if (zfcp_qdio_sbal_get(qdio))
1792 goto out; 1724 goto out;
1793 1725
1794 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, 1726 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1727 SBAL_FLAGS0_TYPE_READ,
1795 qdio->adapter->pool.erp_req); 1728 qdio->adapter->pool.erp_req);
1796 1729
1797 if (IS_ERR(req)) { 1730 if (IS_ERR(req)) {
@@ -1800,9 +1733,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1800 } 1733 }
1801 1734
1802 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1735 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1803 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); 1736 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1804 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1805 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1806 1737
1807 req->data = erp_action->port; 1738 req->data = erp_action->port;
1808 req->qtcb->header.port_handle = erp_action->port->handle; 1739 req->qtcb->header.port_handle = erp_action->port->handle;
@@ -1954,17 +1885,17 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1954 */ 1885 */
1955int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) 1886int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1956{ 1887{
1957 struct qdio_buffer_element *sbale;
1958 struct zfcp_adapter *adapter = erp_action->adapter; 1888 struct zfcp_adapter *adapter = erp_action->adapter;
1959 struct zfcp_qdio *qdio = adapter->qdio; 1889 struct zfcp_qdio *qdio = adapter->qdio;
1960 struct zfcp_fsf_req *req; 1890 struct zfcp_fsf_req *req;
1961 int retval = -EIO; 1891 int retval = -EIO;
1962 1892
1963 spin_lock_bh(&qdio->req_q_lock); 1893 spin_lock_bh(&qdio->req_q_lock);
1964 if (zfcp_fsf_req_sbal_get(qdio)) 1894 if (zfcp_qdio_sbal_get(qdio))
1965 goto out; 1895 goto out;
1966 1896
1967 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, 1897 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1898 SBAL_FLAGS0_TYPE_READ,
1968 adapter->pool.erp_req); 1899 adapter->pool.erp_req);
1969 1900
1970 if (IS_ERR(req)) { 1901 if (IS_ERR(req)) {
@@ -1973,9 +1904,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1973 } 1904 }
1974 1905
1975 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1906 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1976 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); 1907 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1977 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1978 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1979 1908
1980 req->qtcb->header.port_handle = erp_action->port->handle; 1909 req->qtcb->header.port_handle = erp_action->port->handle;
1981 req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun; 1910 req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
@@ -2041,16 +1970,16 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
2041 */ 1970 */
2042int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) 1971int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2043{ 1972{
2044 struct qdio_buffer_element *sbale;
2045 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1973 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
2046 struct zfcp_fsf_req *req; 1974 struct zfcp_fsf_req *req;
2047 int retval = -EIO; 1975 int retval = -EIO;
2048 1976
2049 spin_lock_bh(&qdio->req_q_lock); 1977 spin_lock_bh(&qdio->req_q_lock);
2050 if (zfcp_fsf_req_sbal_get(qdio)) 1978 if (zfcp_qdio_sbal_get(qdio))
2051 goto out; 1979 goto out;
2052 1980
2053 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, 1981 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
1982 SBAL_FLAGS0_TYPE_READ,
2054 qdio->adapter->pool.erp_req); 1983 qdio->adapter->pool.erp_req);
2055 1984
2056 if (IS_ERR(req)) { 1985 if (IS_ERR(req)) {
@@ -2059,9 +1988,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2059 } 1988 }
2060 1989
2061 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1990 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2062 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); 1991 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2063 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2064 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2065 1992
2066 req->qtcb->header.port_handle = erp_action->port->handle; 1993 req->qtcb->header.port_handle = erp_action->port->handle;
2067 req->qtcb->header.lun_handle = erp_action->unit->handle; 1994 req->qtcb->header.lun_handle = erp_action->unit->handle;
@@ -2289,8 +2216,11 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2289 goto out; 2216 goto out;
2290 } 2217 }
2291 2218
2219 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2220 sbtype = SBAL_FLAGS0_TYPE_WRITE;
2221
2292 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2222 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2293 adapter->pool.scsi_req); 2223 sbtype, adapter->pool.scsi_req);
2294 2224
2295 if (IS_ERR(req)) { 2225 if (IS_ERR(req)) {
2296 retval = PTR_ERR(req); 2226 retval = PTR_ERR(req);
@@ -2298,7 +2228,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2298 } 2228 }
2299 2229
2300 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2230 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2301 get_device(&unit->dev);
2302 req->unit = unit; 2231 req->unit = unit;
2303 req->data = scsi_cmnd; 2232 req->data = scsi_cmnd;
2304 req->handler = zfcp_fsf_send_fcp_command_handler; 2233 req->handler = zfcp_fsf_send_fcp_command_handler;
@@ -2323,20 +2252,21 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2323 break; 2252 break;
2324 case DMA_TO_DEVICE: 2253 case DMA_TO_DEVICE:
2325 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE; 2254 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
2326 sbtype = SBAL_FLAGS0_TYPE_WRITE;
2327 break; 2255 break;
2328 case DMA_BIDIRECTIONAL: 2256 case DMA_BIDIRECTIONAL:
2329 goto failed_scsi_cmnd; 2257 goto failed_scsi_cmnd;
2330 } 2258 }
2331 2259
2260 get_device(&unit->dev);
2261
2332 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; 2262 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2333 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); 2263 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2334 2264
2335 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sbtype, 2265 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2336 scsi_sglist(scsi_cmnd), 2266 scsi_sglist(scsi_cmnd),
2337 FSF_MAX_SBALS_PER_REQ); 2267 ZFCP_FSF_MAX_SBALS_PER_REQ);
2338 if (unlikely(real_bytes < 0)) { 2268 if (unlikely(real_bytes < 0)) {
2339 if (req->qdio_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) { 2269 if (req->qdio_req.sbal_number >= ZFCP_FSF_MAX_SBALS_PER_REQ) {
2340 dev_err(&adapter->ccw_device->dev, 2270 dev_err(&adapter->ccw_device->dev,
2341 "Oversize data package, unit 0x%016Lx " 2271 "Oversize data package, unit 0x%016Lx "
2342 "on port 0x%016Lx closed\n", 2272 "on port 0x%016Lx closed\n",
@@ -2371,7 +2301,6 @@ out:
2371 */ 2301 */
2372struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) 2302struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2373{ 2303{
2374 struct qdio_buffer_element *sbale;
2375 struct zfcp_fsf_req *req = NULL; 2304 struct zfcp_fsf_req *req = NULL;
2376 struct fcp_cmnd *fcp_cmnd; 2305 struct fcp_cmnd *fcp_cmnd;
2377 struct zfcp_qdio *qdio = unit->port->adapter->qdio; 2306 struct zfcp_qdio *qdio = unit->port->adapter->qdio;
@@ -2381,10 +2310,11 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2381 return NULL; 2310 return NULL;
2382 2311
2383 spin_lock_bh(&qdio->req_q_lock); 2312 spin_lock_bh(&qdio->req_q_lock);
2384 if (zfcp_fsf_req_sbal_get(qdio)) 2313 if (zfcp_qdio_sbal_get(qdio))
2385 goto out; 2314 goto out;
2386 2315
2387 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2316 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2317 SBAL_FLAGS0_TYPE_WRITE,
2388 qdio->adapter->pool.scsi_req); 2318 qdio->adapter->pool.scsi_req);
2389 2319
2390 if (IS_ERR(req)) { 2320 if (IS_ERR(req)) {
@@ -2401,9 +2331,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2401 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2331 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2402 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; 2332 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2403 2333
2404 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req); 2334 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2405 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
2406 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2407 2335
2408 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; 2336 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2409 zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags); 2337 zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags);
@@ -2432,7 +2360,6 @@ static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
2432struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, 2360struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2433 struct zfcp_fsf_cfdc *fsf_cfdc) 2361 struct zfcp_fsf_cfdc *fsf_cfdc)
2434{ 2362{
2435 struct qdio_buffer_element *sbale;
2436 struct zfcp_qdio *qdio = adapter->qdio; 2363 struct zfcp_qdio *qdio = adapter->qdio;
2437 struct zfcp_fsf_req *req = NULL; 2364 struct zfcp_fsf_req *req = NULL;
2438 struct fsf_qtcb_bottom_support *bottom; 2365 struct fsf_qtcb_bottom_support *bottom;
@@ -2453,10 +2380,10 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2453 } 2380 }
2454 2381
2455 spin_lock_bh(&qdio->req_q_lock); 2382 spin_lock_bh(&qdio->req_q_lock);
2456 if (zfcp_fsf_req_sbal_get(qdio)) 2383 if (zfcp_qdio_sbal_get(qdio))
2457 goto out; 2384 goto out;
2458 2385
2459 req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL); 2386 req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, direction, NULL);
2460 if (IS_ERR(req)) { 2387 if (IS_ERR(req)) {
2461 retval = -EPERM; 2388 retval = -EPERM;
2462 goto out; 2389 goto out;
@@ -2464,16 +2391,13 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2464 2391
2465 req->handler = zfcp_fsf_control_file_handler; 2392 req->handler = zfcp_fsf_control_file_handler;
2466 2393
2467 sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
2468 sbale[0].flags |= direction;
2469
2470 bottom = &req->qtcb->bottom.support; 2394 bottom = &req->qtcb->bottom.support;
2471 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; 2395 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2472 bottom->option = fsf_cfdc->option; 2396 bottom->option = fsf_cfdc->option;
2473 2397
2474 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2398 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2475 direction, fsf_cfdc->sg, 2399 fsf_cfdc->sg,
2476 FSF_MAX_SBALS_PER_REQ); 2400 ZFCP_FSF_MAX_SBALS_PER_REQ);
2477 if (bytes != ZFCP_CFDC_MAX_SIZE) { 2401 if (bytes != ZFCP_CFDC_MAX_SIZE) {
2478 zfcp_fsf_req_free(req); 2402 zfcp_fsf_req_free(req);
2479 goto out; 2403 goto out;
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index b3de682b64cf..519083fd6e89 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Interface to the FSF support functions. 4 * Interface to the FSF support functions.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#ifndef FSF_H 9#ifndef FSF_H
@@ -152,7 +152,12 @@
152#define FSF_CLASS_3 0x00000003 152#define FSF_CLASS_3 0x00000003
153 153
154/* SBAL chaining */ 154/* SBAL chaining */
155#define FSF_MAX_SBALS_PER_REQ 36 155#define ZFCP_FSF_MAX_SBALS_PER_REQ 36
156
157/* max. number of (data buffer) SBALEs in largest SBAL chain
158 * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
159#define ZFCP_FSF_MAX_SBALES_PER_REQ \
160 (ZFCP_FSF_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
156 161
157/* logging space behind QTCB */ 162/* logging space behind QTCB */
158#define FSF_QTCB_LOG_SIZE 1024 163#define FSF_QTCB_LOG_SIZE 1024
@@ -361,7 +366,7 @@ struct fsf_qtcb_bottom_config {
361 u32 adapter_type; 366 u32 adapter_type;
362 u8 res0; 367 u8 res0;
363 u8 peer_d_id[3]; 368 u8 peer_d_id[3];
364 u8 res1[2]; 369 u16 status_read_buf_num;
365 u16 timer_interval; 370 u16 timer_interval;
366 u8 res2[9]; 371 u8 res2[9];
367 u8 s_id[3]; 372 u8 s_id[3];
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index dbfa312a7f50..28117e130e2c 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Setup and helper functions to access QDIO. 4 * Setup and helper functions to access QDIO.
5 * 5 *
6 * Copyright IBM Corporation 2002, 2009 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -151,8 +151,7 @@ static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
151} 151}
152 152
153static struct qdio_buffer_element * 153static struct qdio_buffer_element *
154zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 154zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
155 unsigned long sbtype)
156{ 155{
157 struct qdio_buffer_element *sbale; 156 struct qdio_buffer_element *sbale;
158 157
@@ -180,17 +179,16 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
180 179
181 /* set storage-block type for new SBAL */ 180 /* set storage-block type for new SBAL */
182 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 181 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
183 sbale->flags |= sbtype; 182 sbale->flags |= q_req->sbtype;
184 183
185 return sbale; 184 return sbale;
186} 185}
187 186
188static struct qdio_buffer_element * 187static struct qdio_buffer_element *
189zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 188zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
190 unsigned int sbtype)
191{ 189{
192 if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 190 if (q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL)
193 return zfcp_qdio_sbal_chain(qdio, q_req, sbtype); 191 return zfcp_qdio_sbal_chain(qdio, q_req);
194 q_req->sbale_curr++; 192 q_req->sbale_curr++;
195 return zfcp_qdio_sbale_curr(qdio, q_req); 193 return zfcp_qdio_sbale_curr(qdio, q_req);
196} 194}
@@ -206,62 +204,38 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
206 zfcp_qdio_zero_sbals(sbal, first, count); 204 zfcp_qdio_zero_sbals(sbal, first, count);
207} 205}
208 206
209static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
210 struct zfcp_qdio_req *q_req,
211 unsigned int sbtype, void *start_addr,
212 unsigned int total_length)
213{
214 struct qdio_buffer_element *sbale;
215 unsigned long remaining, length;
216 void *addr;
217
218 /* split segment up */
219 for (addr = start_addr, remaining = total_length; remaining > 0;
220 addr += length, remaining -= length) {
221 sbale = zfcp_qdio_sbale_next(qdio, q_req, sbtype);
222 if (!sbale) {
223 atomic_inc(&qdio->req_q_full);
224 zfcp_qdio_undo_sbals(qdio, q_req);
225 return -EINVAL;
226 }
227
228 /* new piece must not exceed next page boundary */
229 length = min(remaining,
230 (PAGE_SIZE - ((unsigned long)addr &
231 (PAGE_SIZE - 1))));
232 sbale->addr = addr;
233 sbale->length = length;
234 }
235 return 0;
236}
237
238/** 207/**
239 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list 208 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
240 * @fsf_req: request to be processed 209 * @qdio: pointer to struct zfcp_qdio
241 * @sbtype: SBALE flags 210 * @q_req: pointer to struct zfcp_qdio_req
242 * @sg: scatter-gather list 211 * @sg: scatter-gather list
243 * @max_sbals: upper bound for number of SBALs to be used 212 * @max_sbals: upper bound for number of SBALs to be used
244 * Returns: number of bytes, or error (negativ) 213 * Returns: number of bytes, or error (negativ)
245 */ 214 */
246int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 215int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
247 unsigned long sbtype, struct scatterlist *sg, 216 struct scatterlist *sg, int max_sbals)
248 int max_sbals)
249{ 217{
250 struct qdio_buffer_element *sbale; 218 struct qdio_buffer_element *sbale;
251 int retval, bytes = 0; 219 int bytes = 0;
252 220
253 /* figure out last allowed SBAL */ 221 /* figure out last allowed SBAL */
254 zfcp_qdio_sbal_limit(qdio, q_req, max_sbals); 222 zfcp_qdio_sbal_limit(qdio, q_req, max_sbals);
255 223
256 /* set storage-block type for this request */ 224 /* set storage-block type for this request */
257 sbale = zfcp_qdio_sbale_req(qdio, q_req); 225 sbale = zfcp_qdio_sbale_req(qdio, q_req);
258 sbale->flags |= sbtype; 226 sbale->flags |= q_req->sbtype;
259 227
260 for (; sg; sg = sg_next(sg)) { 228 for (; sg; sg = sg_next(sg)) {
261 retval = zfcp_qdio_fill_sbals(qdio, q_req, sbtype, 229 sbale = zfcp_qdio_sbale_next(qdio, q_req);
262 sg_virt(sg), sg->length); 230 if (!sbale) {
263 if (retval < 0) 231 atomic_inc(&qdio->req_q_full);
264 return retval; 232 zfcp_qdio_undo_sbals(qdio, q_req);
233 return -EINVAL;
234 }
235
236 sbale->addr = sg_virt(sg);
237 sbale->length = sg->length;
238
265 bytes += sg->length; 239 bytes += sg->length;
266 } 240 }
267 241
@@ -272,6 +246,46 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
272 return bytes; 246 return bytes;
273} 247}
274 248
249static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
250{
251 struct zfcp_qdio_queue *req_q = &qdio->req_q;
252
253 spin_lock_bh(&qdio->req_q_lock);
254 if (atomic_read(&req_q->count))
255 return 1;
256 spin_unlock_bh(&qdio->req_q_lock);
257 return 0;
258}
259
260/**
261 * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
262 * @qdio: pointer to struct zfcp_qdio
263 *
264 * The req_q_lock must be held by the caller of this function, and
265 * this function may only be called from process context; it will
266 * sleep when waiting for a free sbal.
267 *
268 * Returns: 0 on success, -EIO if there is no free sbal after waiting.
269 */
270int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
271{
272 long ret;
273
274 spin_unlock_bh(&qdio->req_q_lock);
275 ret = wait_event_interruptible_timeout(qdio->req_q_wq,
276 zfcp_qdio_sbal_check(qdio), 5 * HZ);
277 if (ret > 0)
278 return 0;
279 if (!ret) {
280 atomic_inc(&qdio->req_q_full);
281 /* assume hanging outbound queue, try queue recovery */
282 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL);
283 }
284
285 spin_lock_bh(&qdio->req_q_lock);
286 return -EIO;
287}
288
275/** 289/**
276 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO 290 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
277 * @qdio: pointer to struct zfcp_qdio 291 * @qdio: pointer to struct zfcp_qdio
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 8cca54631e1e..138fba577b48 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -11,6 +11,14 @@
11 11
12#include <asm/qdio.h> 12#include <asm/qdio.h>
13 13
14#define ZFCP_QDIO_SBALE_LEN PAGE_SIZE
15
16/* DMQ bug workaround: don't use last SBALE */
17#define ZFCP_QDIO_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
18
19/* index of last SBALE (with respect to DMQ bug workaround) */
20#define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1)
21
14/** 22/**
15 * struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count 23 * struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count
16 * @sbal: qdio buffers 24 * @sbal: qdio buffers
@@ -49,6 +57,7 @@ struct zfcp_qdio {
49 57
50/** 58/**
51 * struct zfcp_qdio_req - qdio queue related values for a request 59 * struct zfcp_qdio_req - qdio queue related values for a request
60 * @sbtype: sbal type flags for sbale 0
52 * @sbal_number: number of free sbals 61 * @sbal_number: number of free sbals
53 * @sbal_first: first sbal for this request 62 * @sbal_first: first sbal for this request
54 * @sbal_last: last sbal for this request 63 * @sbal_last: last sbal for this request
@@ -59,6 +68,7 @@ struct zfcp_qdio {
59 * @qdio_inb_usage: usage of inbound queue 68 * @qdio_inb_usage: usage of inbound queue
60 */ 69 */
61struct zfcp_qdio_req { 70struct zfcp_qdio_req {
71 u32 sbtype;
62 u8 sbal_number; 72 u8 sbal_number;
63 u8 sbal_first; 73 u8 sbal_first;
64 u8 sbal_last; 74 u8 sbal_last;
@@ -106,4 +116,98 @@ zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
106 q_req->sbale_curr); 116 q_req->sbale_curr);
107} 117}
108 118
119/**
120 * zfcp_qdio_req_init - initialize qdio request
121 * @qdio: request queue where to start putting the request
122 * @q_req: the qdio request to start
123 * @req_id: The request id
124 * @sbtype: type flags to set for all sbals
125 * @data: First data block
126 * @len: Length of first data block
127 *
128 * This is the start of putting the request into the queue, the last
129 * step is passing the request to zfcp_qdio_send. The request queue
130 * lock must be held during the whole process from init to send.
131 */
132static inline
133void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
134 unsigned long req_id, u32 sbtype, void *data, u32 len)
135{
136 struct qdio_buffer_element *sbale;
137
138 q_req->sbal_first = q_req->sbal_last = qdio->req_q.first;
139 q_req->sbal_number = 1;
140 q_req->sbtype = sbtype;
141
142 sbale = zfcp_qdio_sbale_req(qdio, q_req);
143 sbale->addr = (void *) req_id;
144 sbale->flags |= SBAL_FLAGS0_COMMAND;
145 sbale->flags |= sbtype;
146
147 q_req->sbale_curr = 1;
148 sbale++;
149 sbale->addr = data;
150 if (likely(data))
151 sbale->length = len;
152}
153
154/**
155 * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
156 * @qdio: pointer to struct zfcp_qdio
157 * @q_req: pointer to struct zfcp_queue_req
158 *
159 * This is only required for single sbal requests, calling it when
160 * wrapping around to the next sbal is a bug.
161 */
162static inline
163void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
164 void *data, u32 len)
165{
166 struct qdio_buffer_element *sbale;
167
168 BUG_ON(q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL);
169 q_req->sbale_curr++;
170 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
171 sbale->addr = data;
172 sbale->length = len;
173}
174
175/**
176 * zfcp_qdio_set_sbale_last - set last entry flag in current sbale
177 * @qdio: pointer to struct zfcp_qdio
178 * @q_req: pointer to struct zfcp_queue_req
179 */
180static inline
181void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
182 struct zfcp_qdio_req *q_req)
183{
184 struct qdio_buffer_element *sbale;
185
186 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
187 sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
188}
189
190/**
191 * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data
192 * @sg: The scatterlist where to check the data size
193 *
194 * Returns: 1 when one sbale is enough for the data in the scatterlist,
195 * 0 if not.
196 */
197static inline
198int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
199{
200 return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN;
201}
202
203/**
204 * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
205 * @q_req: The current zfcp_qdio_req
206 */
207static inline
208void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req)
209{
210 q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL;
211}
212
109#endif /* ZFCP_QDIO_H */ 213#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 174b6d57d576..be5d2c60453d 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -175,7 +175,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
175 struct zfcp_fsf_req *old_req, *abrt_req; 175 struct zfcp_fsf_req *old_req, *abrt_req;
176 unsigned long flags; 176 unsigned long flags;
177 unsigned long old_reqid = (unsigned long) scpnt->host_scribble; 177 unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
178 int retval = SUCCESS; 178 int retval = SUCCESS, ret;
179 int retry = 3; 179 int retry = 3;
180 char *dbf_tag; 180 char *dbf_tag;
181 181
@@ -200,7 +200,9 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
200 break; 200 break;
201 201
202 zfcp_erp_wait(adapter); 202 zfcp_erp_wait(adapter);
203 fc_block_scsi_eh(scpnt); 203 ret = fc_block_scsi_eh(scpnt);
204 if (ret)
205 return ret;
204 if (!(atomic_read(&adapter->status) & 206 if (!(atomic_read(&adapter->status) &
205 ZFCP_STATUS_COMMON_RUNNING)) { 207 ZFCP_STATUS_COMMON_RUNNING)) {
206 zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL, 208 zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL,
@@ -231,7 +233,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
231 struct zfcp_unit *unit = scpnt->device->hostdata; 233 struct zfcp_unit *unit = scpnt->device->hostdata;
232 struct zfcp_adapter *adapter = unit->port->adapter; 234 struct zfcp_adapter *adapter = unit->port->adapter;
233 struct zfcp_fsf_req *fsf_req = NULL; 235 struct zfcp_fsf_req *fsf_req = NULL;
234 int retval = SUCCESS; 236 int retval = SUCCESS, ret;
235 int retry = 3; 237 int retry = 3;
236 238
237 while (retry--) { 239 while (retry--) {
@@ -240,7 +242,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
240 break; 242 break;
241 243
242 zfcp_erp_wait(adapter); 244 zfcp_erp_wait(adapter);
243 fc_block_scsi_eh(scpnt); 245 ret = fc_block_scsi_eh(scpnt);
246 if (ret)
247 return ret;
248
244 if (!(atomic_read(&adapter->status) & 249 if (!(atomic_read(&adapter->status) &
245 ZFCP_STATUS_COMMON_RUNNING)) { 250 ZFCP_STATUS_COMMON_RUNNING)) {
246 zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt); 251 zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt);
@@ -276,10 +281,13 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
276{ 281{
277 struct zfcp_unit *unit = scpnt->device->hostdata; 282 struct zfcp_unit *unit = scpnt->device->hostdata;
278 struct zfcp_adapter *adapter = unit->port->adapter; 283 struct zfcp_adapter *adapter = unit->port->adapter;
284 int ret;
279 285
280 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt); 286 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
281 zfcp_erp_wait(adapter); 287 zfcp_erp_wait(adapter);
282 fc_block_scsi_eh(scpnt); 288 ret = fc_block_scsi_eh(scpnt);
289 if (ret)
290 return ret;
283 291
284 return SUCCESS; 292 return SUCCESS;
285} 293}
@@ -669,11 +677,12 @@ struct zfcp_data zfcp_data = {
669 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, 677 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
670 .can_queue = 4096, 678 .can_queue = 4096,
671 .this_id = -1, 679 .this_id = -1,
672 .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ, 680 .sg_tablesize = ZFCP_FSF_MAX_SBALES_PER_REQ,
673 .cmd_per_lun = 1, 681 .cmd_per_lun = 1,
674 .use_clustering = 1, 682 .use_clustering = 1,
675 .sdev_attrs = zfcp_sysfs_sdev_attrs, 683 .sdev_attrs = zfcp_sysfs_sdev_attrs,
676 .max_sectors = (ZFCP_MAX_SBALES_PER_REQ * 8), 684 .max_sectors = (ZFCP_FSF_MAX_SBALES_PER_REQ * 8),
685 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
677 .shost_attrs = zfcp_sysfs_shost_attrs, 686 .shost_attrs = zfcp_sysfs_shost_attrs,
678 }, 687 },
679}; 688};
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index e9788f55ab13..1bb774becf25 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1,10 +1,11 @@
1/* 1/*
2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux. 2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
3 3
4 Written By: Adam Radford <linuxraid@amcc.com> 4 Written By: Adam Radford <linuxraid@lsi.com>
5 Modifications By: Tom Couch <linuxraid@amcc.com> 5 Modifications By: Tom Couch <linuxraid@lsi.com>
6 6
7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation. 7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8 Copyright (C) 2010 LSI Corporation.
8 9
9 This program is free software; you can redistribute it and/or modify 10 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by 11 it under the terms of the GNU General Public License as published by
@@ -40,10 +41,10 @@
40 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
41 42
42 Bugs/Comments/Suggestions should be mailed to: 43 Bugs/Comments/Suggestions should be mailed to:
43 linuxraid@amcc.com 44 linuxraid@lsi.com
44 45
45 For more information, goto: 46 For more information, goto:
46 http://www.amcc.com 47 http://www.lsi.com
47 48
48 Note: This version of the driver does not contain a bundled firmware 49 Note: This version of the driver does not contain a bundled firmware
49 image. 50 image.
@@ -77,6 +78,7 @@
77 Use pci_resource_len() for ioremap(). 78 Use pci_resource_len() for ioremap().
78 2.26.02.012 - Add power management support. 79 2.26.02.012 - Add power management support.
79 2.26.02.013 - Fix bug in twa_load_sgl(). 80 2.26.02.013 - Fix bug in twa_load_sgl().
81 2.26.02.014 - Force 60 second timeout default.
80*/ 82*/
81 83
82#include <linux/module.h> 84#include <linux/module.h>
@@ -102,14 +104,14 @@
102#include "3w-9xxx.h" 104#include "3w-9xxx.h"
103 105
104/* Globals */ 106/* Globals */
105#define TW_DRIVER_VERSION "2.26.02.013" 107#define TW_DRIVER_VERSION "2.26.02.014"
106static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 108static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
107static unsigned int twa_device_extension_count; 109static unsigned int twa_device_extension_count;
108static int twa_major = -1; 110static int twa_major = -1;
109extern struct timezone sys_tz; 111extern struct timezone sys_tz;
110 112
111/* Module parameters */ 113/* Module parameters */
112MODULE_AUTHOR ("AMCC"); 114MODULE_AUTHOR ("LSI");
113MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver"); 115MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
114MODULE_LICENSE("GPL"); 116MODULE_LICENSE("GPL");
115MODULE_VERSION(TW_DRIVER_VERSION); 117MODULE_VERSION(TW_DRIVER_VERSION);
@@ -1990,6 +1992,15 @@ static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1990 scsi_dma_unmap(cmd); 1992 scsi_dma_unmap(cmd);
1991} /* End twa_unmap_scsi_data() */ 1993} /* End twa_unmap_scsi_data() */
1992 1994
1995/* This function gets called when a disk is coming on-line */
1996static int twa_slave_configure(struct scsi_device *sdev)
1997{
1998 /* Force 60 second timeout */
1999 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
2000
2001 return 0;
2002} /* End twa_slave_configure() */
2003
1993/* scsi_host_template initializer */ 2004/* scsi_host_template initializer */
1994static struct scsi_host_template driver_template = { 2005static struct scsi_host_template driver_template = {
1995 .module = THIS_MODULE, 2006 .module = THIS_MODULE,
@@ -1999,6 +2010,7 @@ static struct scsi_host_template driver_template = {
1999 .bios_param = twa_scsi_biosparam, 2010 .bios_param = twa_scsi_biosparam,
2000 .change_queue_depth = twa_change_queue_depth, 2011 .change_queue_depth = twa_change_queue_depth,
2001 .can_queue = TW_Q_LENGTH-2, 2012 .can_queue = TW_Q_LENGTH-2,
2013 .slave_configure = twa_slave_configure,
2002 .this_id = -1, 2014 .this_id = -1,
2003 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH, 2015 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
2004 .max_sectors = TW_MAX_SECTORS, 2016 .max_sectors = TW_MAX_SECTORS,
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 2893eec78ed2..3343824855d0 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -1,10 +1,11 @@
1/* 1/*
2 3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux. 2 3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux.
3 3
4 Written By: Adam Radford <linuxraid@amcc.com> 4 Written By: Adam Radford <linuxraid@lsi.com>
5 Modifications By: Tom Couch <linuxraid@amcc.com> 5 Modifications By: Tom Couch <linuxraid@lsi.com>
6 6
7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation. 7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8 Copyright (C) 2010 LSI Corporation.
8 9
9 This program is free software; you can redistribute it and/or modify 10 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by 11 it under the terms of the GNU General Public License as published by
@@ -40,10 +41,10 @@
40 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
41 42
42 Bugs/Comments/Suggestions should be mailed to: 43 Bugs/Comments/Suggestions should be mailed to:
43 linuxraid@amcc.com 44 linuxraid@lsi.com
44 45
45 For more information, goto: 46 For more information, goto:
46 http://www.amcc.com 47 http://www.lsi.com
47*/ 48*/
48 49
49#ifndef _3W_9XXX_H 50#ifndef _3W_9XXX_H
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 5faf903ca8c8..d119a614bf7d 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1,12 +1,12 @@
1/* 1/*
2 3w-xxxx.c -- 3ware Storage Controller device driver for Linux. 2 3w-xxxx.c -- 3ware Storage Controller device driver for Linux.
3 3
4 Written By: Adam Radford <linuxraid@amcc.com> 4 Written By: Adam Radford <linuxraid@lsi.com>
5 Modifications By: Joel Jacobson <linux@3ware.com> 5 Modifications By: Joel Jacobson <linux@3ware.com>
6 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 Brad Strand <linux@3ware.com> 7 Brad Strand <linux@3ware.com>
8 8
9 Copyright (C) 1999-2009 3ware Inc. 9 Copyright (C) 1999-2010 3ware Inc.
10 10
11 Kernel compatibility By: Andre Hedrick <andre@suse.com> 11 Kernel compatibility By: Andre Hedrick <andre@suse.com>
12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com> 12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
@@ -47,10 +47,10 @@
47 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 47 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
48 48
49 Bugs/Comments/Suggestions should be mailed to: 49 Bugs/Comments/Suggestions should be mailed to:
50 linuxraid@amcc.com 50 linuxraid@lsi.com
51 51
52 For more information, goto: 52 For more information, goto:
53 http://www.amcc.com 53 http://www.lsi.com
54 54
55 History 55 History
56 ------- 56 -------
@@ -194,6 +194,7 @@
194 1.26.02.002 - Free irq handler in __tw_shutdown(). 194 1.26.02.002 - Free irq handler in __tw_shutdown().
195 Turn on RCD bit for caching mode page. 195 Turn on RCD bit for caching mode page.
196 Serialize reset code. 196 Serialize reset code.
197 1.26.02.003 - Force 60 second timeout default.
197*/ 198*/
198 199
199#include <linux/module.h> 200#include <linux/module.h>
@@ -219,13 +220,13 @@
219#include "3w-xxxx.h" 220#include "3w-xxxx.h"
220 221
221/* Globals */ 222/* Globals */
222#define TW_DRIVER_VERSION "1.26.02.002" 223#define TW_DRIVER_VERSION "1.26.02.003"
223static TW_Device_Extension *tw_device_extension_list[TW_MAX_SLOT]; 224static TW_Device_Extension *tw_device_extension_list[TW_MAX_SLOT];
224static int tw_device_extension_count = 0; 225static int tw_device_extension_count = 0;
225static int twe_major = -1; 226static int twe_major = -1;
226 227
227/* Module parameters */ 228/* Module parameters */
228MODULE_AUTHOR("AMCC"); 229MODULE_AUTHOR("LSI");
229MODULE_DESCRIPTION("3ware Storage Controller Linux Driver"); 230MODULE_DESCRIPTION("3ware Storage Controller Linux Driver");
230MODULE_LICENSE("GPL"); 231MODULE_LICENSE("GPL");
231MODULE_VERSION(TW_DRIVER_VERSION); 232MODULE_VERSION(TW_DRIVER_VERSION);
@@ -2245,6 +2246,15 @@ static void tw_shutdown(struct pci_dev *pdev)
2245 __tw_shutdown(tw_dev); 2246 __tw_shutdown(tw_dev);
2246} /* End tw_shutdown() */ 2247} /* End tw_shutdown() */
2247 2248
2249/* This function gets called when a disk is coming online */
2250static int tw_slave_configure(struct scsi_device *sdev)
2251{
2252 /* Force 60 second timeout */
2253 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
2254
2255 return 0;
2256} /* End tw_slave_configure() */
2257
2248static struct scsi_host_template driver_template = { 2258static struct scsi_host_template driver_template = {
2249 .module = THIS_MODULE, 2259 .module = THIS_MODULE,
2250 .name = "3ware Storage Controller", 2260 .name = "3ware Storage Controller",
@@ -2253,6 +2263,7 @@ static struct scsi_host_template driver_template = {
2253 .bios_param = tw_scsi_biosparam, 2263 .bios_param = tw_scsi_biosparam,
2254 .change_queue_depth = tw_change_queue_depth, 2264 .change_queue_depth = tw_change_queue_depth,
2255 .can_queue = TW_Q_LENGTH-2, 2265 .can_queue = TW_Q_LENGTH-2,
2266 .slave_configure = tw_slave_configure,
2256 .this_id = -1, 2267 .this_id = -1,
2257 .sg_tablesize = TW_MAX_SGL_LENGTH, 2268 .sg_tablesize = TW_MAX_SGL_LENGTH,
2258 .max_sectors = TW_MAX_SECTORS, 2269 .max_sectors = TW_MAX_SECTORS,
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index a5a2ba2561d9..8b9f9d17e7fe 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -1,12 +1,12 @@
1/* 1/*
2 3w-xxxx.h -- 3ware Storage Controller device driver for Linux. 2 3w-xxxx.h -- 3ware Storage Controller device driver for Linux.
3 3
4 Written By: Adam Radford <linuxraid@amcc.com> 4 Written By: Adam Radford <linuxraid@lsi.com>
5 Modifications By: Joel Jacobson <linux@3ware.com> 5 Modifications By: Joel Jacobson <linux@3ware.com>
6 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 Brad Strand <linux@3ware.com> 7 Brad Strand <linux@3ware.com>
8 8
9 Copyright (C) 1999-2009 3ware Inc. 9 Copyright (C) 1999-2010 3ware Inc.
10 10
11 Kernel compatiblity By: Andre Hedrick <andre@suse.com> 11 Kernel compatiblity By: Andre Hedrick <andre@suse.com>
12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com> 12 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
@@ -45,10 +45,10 @@
45 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 45 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 46
47 Bugs/Comments/Suggestions should be mailed to: 47 Bugs/Comments/Suggestions should be mailed to:
48 linuxraid@amcc.com 48 linuxraid@lsi.com
49 49
50 For more information, goto: 50 For more information, goto:
51 http://www.amcc.com 51 http://www.lsi.com
52*/ 52*/
53 53
54#ifndef _3W_XXXX_H 54#ifndef _3W_XXXX_H
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 92a8c500b23d..1c7ac49be649 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -162,6 +162,7 @@ scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o
162scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o 162scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
163scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o 163scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
164scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o 164scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
165scsi_mod-y += scsi_trace.o
165 166
166scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o 167scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
167 168
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index d8fe5b76fee0..308541ff85cf 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -19,186 +19,190 @@
19#include "wd33c93.h" 19#include "wd33c93.h"
20#include "a2091.h" 20#include "a2091.h"
21 21
22#include<linux/stat.h> 22#include <linux/stat.h>
23 23
24#define DMA(ptr) ((a2091_scsiregs *)((ptr)->base))
25#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
26 24
27static int a2091_release(struct Scsi_Host *instance); 25static int a2091_release(struct Scsi_Host *instance);
28 26
29static irqreturn_t a2091_intr (int irq, void *_instance) 27static irqreturn_t a2091_intr(int irq, void *data)
30{ 28{
31 unsigned long flags; 29 struct Scsi_Host *instance = data;
32 unsigned int status; 30 a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base);
33 struct Scsi_Host *instance = (struct Scsi_Host *)_instance; 31 unsigned int status = regs->ISTR;
34 32 unsigned long flags;
35 status = DMA(instance)->ISTR; 33
36 if (!(status & (ISTR_INT_F|ISTR_INT_P)) || !(status & ISTR_INTS)) 34 if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS))
37 return IRQ_NONE; 35 return IRQ_NONE;
38 36
39 spin_lock_irqsave(instance->host_lock, flags); 37 spin_lock_irqsave(instance->host_lock, flags);
40 wd33c93_intr(instance); 38 wd33c93_intr(instance);
41 spin_unlock_irqrestore(instance->host_lock, flags); 39 spin_unlock_irqrestore(instance->host_lock, flags);
42 return IRQ_HANDLED; 40 return IRQ_HANDLED;
43} 41}
44 42
45static int dma_setup(struct scsi_cmnd *cmd, int dir_in) 43static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
46{ 44{
47 unsigned short cntr = CNTR_PDMD | CNTR_INTEN; 45 struct Scsi_Host *instance = cmd->device->host;
48 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 46 struct WD33C93_hostdata *hdata = shost_priv(instance);
49 struct Scsi_Host *instance = cmd->device->host; 47 a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base);
50 48 unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
51 /* don't allow DMA if the physical address is bad */ 49 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
52 if (addr & A2091_XFER_MASK)
53 {
54 HDATA(instance)->dma_bounce_len = (cmd->SCp.this_residual + 511)
55 & ~0x1ff;
56 HDATA(instance)->dma_bounce_buffer =
57 kmalloc (HDATA(instance)->dma_bounce_len, GFP_KERNEL);
58
59 /* can't allocate memory; use PIO */
60 if (!HDATA(instance)->dma_bounce_buffer) {
61 HDATA(instance)->dma_bounce_len = 0;
62 return 1;
63 }
64
65 /* get the physical address of the bounce buffer */
66 addr = virt_to_bus(HDATA(instance)->dma_bounce_buffer);
67 50
68 /* the bounce buffer may not be in the first 16M of physmem */ 51 /* don't allow DMA if the physical address is bad */
69 if (addr & A2091_XFER_MASK) { 52 if (addr & A2091_XFER_MASK) {
70 /* we could use chipmem... maybe later */ 53 hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
71 kfree (HDATA(instance)->dma_bounce_buffer); 54 hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len,
72 HDATA(instance)->dma_bounce_buffer = NULL; 55 GFP_KERNEL);
73 HDATA(instance)->dma_bounce_len = 0; 56
74 return 1; 57 /* can't allocate memory; use PIO */
58 if (!hdata->dma_bounce_buffer) {
59 hdata->dma_bounce_len = 0;
60 return 1;
61 }
62
63 /* get the physical address of the bounce buffer */
64 addr = virt_to_bus(hdata->dma_bounce_buffer);
65
66 /* the bounce buffer may not be in the first 16M of physmem */
67 if (addr & A2091_XFER_MASK) {
68 /* we could use chipmem... maybe later */
69 kfree(hdata->dma_bounce_buffer);
70 hdata->dma_bounce_buffer = NULL;
71 hdata->dma_bounce_len = 0;
72 return 1;
73 }
74
75 if (!dir_in) {
76 /* copy to bounce buffer for a write */
77 memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr,
78 cmd->SCp.this_residual);
79 }
75 } 80 }
76 81
77 if (!dir_in) { 82 /* setup dma direction */
78 /* copy to bounce buffer for a write */ 83 if (!dir_in)
79 memcpy (HDATA(instance)->dma_bounce_buffer, 84 cntr |= CNTR_DDIR;
80 cmd->SCp.ptr, cmd->SCp.this_residual);
81 }
82 }
83 85
84 /* setup dma direction */ 86 /* remember direction */
85 if (!dir_in) 87 hdata->dma_dir = dir_in;
86 cntr |= CNTR_DDIR;
87 88
88 /* remember direction */ 89 regs->CNTR = cntr;
89 HDATA(cmd->device->host)->dma_dir = dir_in;
90 90
91 DMA(cmd->device->host)->CNTR = cntr; 91 /* setup DMA *physical* address */
92 regs->ACR = addr;
92 93
93 /* setup DMA *physical* address */ 94 if (dir_in) {
94 DMA(cmd->device->host)->ACR = addr; 95 /* invalidate any cache */
95 96 cache_clear(addr, cmd->SCp.this_residual);
96 if (dir_in){ 97 } else {
97 /* invalidate any cache */ 98 /* push any dirty cache */
98 cache_clear (addr, cmd->SCp.this_residual); 99 cache_push(addr, cmd->SCp.this_residual);
99 }else{ 100 }
100 /* push any dirty cache */ 101 /* start DMA */
101 cache_push (addr, cmd->SCp.this_residual); 102 regs->ST_DMA = 1;
102 }
103 /* start DMA */
104 DMA(cmd->device->host)->ST_DMA = 1;
105 103
106 /* return success */ 104 /* return success */
107 return 0; 105 return 0;
108} 106}
109 107
110static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, 108static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
111 int status) 109 int status)
112{ 110{
113 /* disable SCSI interrupts */ 111 struct WD33C93_hostdata *hdata = shost_priv(instance);
114 unsigned short cntr = CNTR_PDMD; 112 a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base);
115 113
116 if (!HDATA(instance)->dma_dir) 114 /* disable SCSI interrupts */
117 cntr |= CNTR_DDIR; 115 unsigned short cntr = CNTR_PDMD;
118 116
119 /* disable SCSI interrupts */ 117 if (!hdata->dma_dir)
120 DMA(instance)->CNTR = cntr; 118 cntr |= CNTR_DDIR;
121 119
122 /* flush if we were reading */ 120 /* disable SCSI interrupts */
123 if (HDATA(instance)->dma_dir) { 121 regs->CNTR = cntr;
124 DMA(instance)->FLUSH = 1; 122
125 while (!(DMA(instance)->ISTR & ISTR_FE_FLG)) 123 /* flush if we were reading */
126 ; 124 if (hdata->dma_dir) {
127 } 125 regs->FLUSH = 1;
128 126 while (!(regs->ISTR & ISTR_FE_FLG))
129 /* clear a possible interrupt */ 127 ;
130 DMA(instance)->CINT = 1; 128 }
131 129
132 /* stop DMA */ 130 /* clear a possible interrupt */
133 DMA(instance)->SP_DMA = 1; 131 regs->CINT = 1;
134 132
135 /* restore the CONTROL bits (minus the direction flag) */ 133 /* stop DMA */
136 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN; 134 regs->SP_DMA = 1;
137 135
138 /* copy from a bounce buffer, if necessary */ 136 /* restore the CONTROL bits (minus the direction flag) */
139 if (status && HDATA(instance)->dma_bounce_buffer) { 137 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
140 if( HDATA(instance)->dma_dir ) 138
141 memcpy (SCpnt->SCp.ptr, 139 /* copy from a bounce buffer, if necessary */
142 HDATA(instance)->dma_bounce_buffer, 140 if (status && hdata->dma_bounce_buffer) {
143 SCpnt->SCp.this_residual); 141 if (hdata->dma_dir)
144 kfree (HDATA(instance)->dma_bounce_buffer); 142 memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer,
145 HDATA(instance)->dma_bounce_buffer = NULL; 143 SCpnt->SCp.this_residual);
146 HDATA(instance)->dma_bounce_len = 0; 144 kfree(hdata->dma_bounce_buffer);
147 } 145 hdata->dma_bounce_buffer = NULL;
146 hdata->dma_bounce_len = 0;
147 }
148} 148}
149 149
150static int __init a2091_detect(struct scsi_host_template *tpnt) 150static int __init a2091_detect(struct scsi_host_template *tpnt)
151{ 151{
152 static unsigned char called = 0; 152 static unsigned char called = 0;
153 struct Scsi_Host *instance; 153 struct Scsi_Host *instance;
154 unsigned long address; 154 unsigned long address;
155 struct zorro_dev *z = NULL; 155 struct zorro_dev *z = NULL;
156 wd33c93_regs regs; 156 wd33c93_regs wdregs;
157 int num_a2091 = 0; 157 a2091_scsiregs *regs;
158 158 struct WD33C93_hostdata *hdata;
159 if (!MACH_IS_AMIGA || called) 159 int num_a2091 = 0;
160 return 0; 160
161 called = 1; 161 if (!MACH_IS_AMIGA || called)
162 162 return 0;
163 tpnt->proc_name = "A2091"; 163 called = 1;
164 tpnt->proc_info = &wd33c93_proc_info; 164
165 165 tpnt->proc_name = "A2091";
166 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { 166 tpnt->proc_info = &wd33c93_proc_info;
167 if (z->id != ZORRO_PROD_CBM_A590_A2091_1 && 167
168 z->id != ZORRO_PROD_CBM_A590_A2091_2) 168 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
169 continue; 169 if (z->id != ZORRO_PROD_CBM_A590_A2091_1 &&
170 address = z->resource.start; 170 z->id != ZORRO_PROD_CBM_A590_A2091_2)
171 if (!request_mem_region(address, 256, "wd33c93")) 171 continue;
172 continue; 172 address = z->resource.start;
173 173 if (!request_mem_region(address, 256, "wd33c93"))
174 instance = scsi_register (tpnt, sizeof (struct WD33C93_hostdata)); 174 continue;
175 if (instance == NULL) 175
176 goto release; 176 instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
177 instance->base = ZTWO_VADDR(address); 177 if (instance == NULL)
178 instance->irq = IRQ_AMIGA_PORTS; 178 goto release;
179 instance->unique_id = z->slotaddr; 179 instance->base = ZTWO_VADDR(address);
180 DMA(instance)->DAWR = DAWR_A2091; 180 instance->irq = IRQ_AMIGA_PORTS;
181 regs.SASR = &(DMA(instance)->SASR); 181 instance->unique_id = z->slotaddr;
182 regs.SCMD = &(DMA(instance)->SCMD); 182 regs = (a2091_scsiregs *)(instance->base);
183 HDATA(instance)->no_sync = 0xff; 183 regs->DAWR = DAWR_A2091;
184 HDATA(instance)->fast = 0; 184 wdregs.SASR = &regs->SASR;
185 HDATA(instance)->dma_mode = CTRL_DMA; 185 wdregs.SCMD = &regs->SCMD;
186 wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10); 186 hdata = shost_priv(instance);
187 if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI", 187 hdata->no_sync = 0xff;
188 instance)) 188 hdata->fast = 0;
189 goto unregister; 189 hdata->dma_mode = CTRL_DMA;
190 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN; 190 wd33c93_init(instance, wdregs, dma_setup, dma_stop,
191 num_a2091++; 191 WD33C93_FS_8_10);
192 continue; 192 if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED,
193 "A2091 SCSI", instance))
194 goto unregister;
195 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
196 num_a2091++;
197 continue;
193 198
194unregister: 199unregister:
195 scsi_unregister(instance); 200 scsi_unregister(instance);
196 wd33c93_release();
197release: 201release:
198 release_mem_region(address, 256); 202 release_mem_region(address, 256);
199 } 203 }
200 204
201 return num_a2091; 205 return num_a2091;
202} 206}
203 207
204static int a2091_bus_reset(struct scsi_cmnd *cmd) 208static int a2091_bus_reset(struct scsi_cmnd *cmd)
@@ -239,10 +243,11 @@ static struct scsi_host_template driver_template = {
239static int a2091_release(struct Scsi_Host *instance) 243static int a2091_release(struct Scsi_Host *instance)
240{ 244{
241#ifdef MODULE 245#ifdef MODULE
242 DMA(instance)->CNTR = 0; 246 a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base);
247
248 regs->CNTR = 0;
243 release_mem_region(ZTWO_PADDR(instance->base), 256); 249 release_mem_region(ZTWO_PADDR(instance->base), 256);
244 free_irq(IRQ_AMIGA_PORTS, instance); 250 free_irq(IRQ_AMIGA_PORTS, instance);
245 wd33c93_release();
246#endif 251#endif
247 return 1; 252 return 1;
248} 253}
diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h
index 252528f2672e..1c3daa1fd754 100644
--- a/drivers/scsi/a2091.h
+++ b/drivers/scsi/a2091.h
@@ -12,38 +12,38 @@
12#include <linux/types.h> 12#include <linux/types.h>
13 13
14#ifndef CMD_PER_LUN 14#ifndef CMD_PER_LUN
15#define CMD_PER_LUN 2 15#define CMD_PER_LUN 2
16#endif 16#endif
17 17
18#ifndef CAN_QUEUE 18#ifndef CAN_QUEUE
19#define CAN_QUEUE 16 19#define CAN_QUEUE 16
20#endif 20#endif
21 21
22/* 22/*
23 * if the transfer address ANDed with this results in a non-zero 23 * if the transfer address ANDed with this results in a non-zero
24 * result, then we can't use DMA. 24 * result, then we can't use DMA.
25 */ 25 */
26#define A2091_XFER_MASK (0xff000001) 26#define A2091_XFER_MASK (0xff000001)
27 27
28typedef struct { 28typedef struct {
29 unsigned char pad1[64]; 29 unsigned char pad1[64];
30 volatile unsigned short ISTR; 30 volatile unsigned short ISTR;
31 volatile unsigned short CNTR; 31 volatile unsigned short CNTR;
32 unsigned char pad2[60]; 32 unsigned char pad2[60];
33 volatile unsigned int WTC; 33 volatile unsigned int WTC;
34 volatile unsigned long ACR; 34 volatile unsigned long ACR;
35 unsigned char pad3[6]; 35 unsigned char pad3[6];
36 volatile unsigned short DAWR; 36 volatile unsigned short DAWR;
37 unsigned char pad4; 37 unsigned char pad4;
38 volatile unsigned char SASR; 38 volatile unsigned char SASR;
39 unsigned char pad5; 39 unsigned char pad5;
40 volatile unsigned char SCMD; 40 volatile unsigned char SCMD;
41 unsigned char pad6[76]; 41 unsigned char pad6[76];
42 volatile unsigned short ST_DMA; 42 volatile unsigned short ST_DMA;
43 volatile unsigned short SP_DMA; 43 volatile unsigned short SP_DMA;
44 volatile unsigned short CINT; 44 volatile unsigned short CINT;
45 unsigned char pad7[2]; 45 unsigned char pad7[2];
46 volatile unsigned short FLUSH; 46 volatile unsigned short FLUSH;
47} a2091_scsiregs; 47} a2091_scsiregs;
48 48
49#define DAWR_A2091 (3) 49#define DAWR_A2091 (3)
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index c35fc55f1c96..bc6eb69f5fd0 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -19,26 +19,25 @@
19#include "wd33c93.h" 19#include "wd33c93.h"
20#include "a3000.h" 20#include "a3000.h"
21 21
22#include<linux/stat.h> 22#include <linux/stat.h>
23 23
24#define DMA(ptr) ((a3000_scsiregs *)((ptr)->base)) 24
25#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata)) 25#define DMA(ptr) ((a3000_scsiregs *)((ptr)->base))
26 26
27static struct Scsi_Host *a3000_host = NULL; 27static struct Scsi_Host *a3000_host = NULL;
28 28
29static int a3000_release(struct Scsi_Host *instance); 29static int a3000_release(struct Scsi_Host *instance);
30 30
31static irqreturn_t a3000_intr (int irq, void *dummy) 31static irqreturn_t a3000_intr(int irq, void *dummy)
32{ 32{
33 unsigned long flags; 33 unsigned long flags;
34 unsigned int status = DMA(a3000_host)->ISTR; 34 unsigned int status = DMA(a3000_host)->ISTR;
35 35
36 if (!(status & ISTR_INT_P)) 36 if (!(status & ISTR_INT_P))
37 return IRQ_NONE; 37 return IRQ_NONE;
38 if (status & ISTR_INTS) 38 if (status & ISTR_INTS) {
39 {
40 spin_lock_irqsave(a3000_host->host_lock, flags); 39 spin_lock_irqsave(a3000_host->host_lock, flags);
41 wd33c93_intr (a3000_host); 40 wd33c93_intr(a3000_host);
42 spin_unlock_irqrestore(a3000_host->host_lock, flags); 41 spin_unlock_irqrestore(a3000_host->host_lock, flags);
43 return IRQ_HANDLED; 42 return IRQ_HANDLED;
44 } 43 }
@@ -48,162 +47,165 @@ static irqreturn_t a3000_intr (int irq, void *dummy)
48 47
49static int dma_setup(struct scsi_cmnd *cmd, int dir_in) 48static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
50{ 49{
51 unsigned short cntr = CNTR_PDMD | CNTR_INTEN; 50 struct WD33C93_hostdata *hdata = shost_priv(a3000_host);
52 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 51 unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
53 52 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
54 /* 53
55 * if the physical address has the wrong alignment, or if 54 /*
56 * physical address is bad, or if it is a write and at the 55 * if the physical address has the wrong alignment, or if
57 * end of a physical memory chunk, then allocate a bounce 56 * physical address is bad, or if it is a write and at the
58 * buffer 57 * end of a physical memory chunk, then allocate a bounce
59 */ 58 * buffer
60 if (addr & A3000_XFER_MASK) 59 */
61 { 60 if (addr & A3000_XFER_MASK) {
62 HDATA(a3000_host)->dma_bounce_len = (cmd->SCp.this_residual + 511) 61 hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
63 & ~0x1ff; 62 hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len,
64 HDATA(a3000_host)->dma_bounce_buffer = 63 GFP_KERNEL);
65 kmalloc (HDATA(a3000_host)->dma_bounce_len, GFP_KERNEL); 64
66 65 /* can't allocate memory; use PIO */
67 /* can't allocate memory; use PIO */ 66 if (!hdata->dma_bounce_buffer) {
68 if (!HDATA(a3000_host)->dma_bounce_buffer) { 67 hdata->dma_bounce_len = 0;
69 HDATA(a3000_host)->dma_bounce_len = 0; 68 return 1;
70 return 1; 69 }
71 } 70
72 71 if (!dir_in) {
73 if (!dir_in) { 72 /* copy to bounce buffer for a write */
74 /* copy to bounce buffer for a write */ 73 memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr,
75 memcpy (HDATA(a3000_host)->dma_bounce_buffer, 74 cmd->SCp.this_residual);
76 cmd->SCp.ptr, cmd->SCp.this_residual); 75 }
76
77 addr = virt_to_bus(hdata->dma_bounce_buffer);
77 } 78 }
78 79
79 addr = virt_to_bus(HDATA(a3000_host)->dma_bounce_buffer); 80 /* setup dma direction */
80 } 81 if (!dir_in)
82 cntr |= CNTR_DDIR;
81 83
82 /* setup dma direction */ 84 /* remember direction */
83 if (!dir_in) 85 hdata->dma_dir = dir_in;
84 cntr |= CNTR_DDIR;
85 86
86 /* remember direction */ 87 DMA(a3000_host)->CNTR = cntr;
87 HDATA(a3000_host)->dma_dir = dir_in;
88 88
89 DMA(a3000_host)->CNTR = cntr; 89 /* setup DMA *physical* address */
90 DMA(a3000_host)->ACR = addr;
90 91
91 /* setup DMA *physical* address */ 92 if (dir_in) {
92 DMA(a3000_host)->ACR = addr; 93 /* invalidate any cache */
93 94 cache_clear(addr, cmd->SCp.this_residual);
94 if (dir_in) 95 } else {
95 /* invalidate any cache */ 96 /* push any dirty cache */
96 cache_clear (addr, cmd->SCp.this_residual); 97 cache_push(addr, cmd->SCp.this_residual);
97 else 98 }
98 /* push any dirty cache */
99 cache_push (addr, cmd->SCp.this_residual);
100 99
101 /* start DMA */ 100 /* start DMA */
102 mb(); /* make sure setup is completed */ 101 mb(); /* make sure setup is completed */
103 DMA(a3000_host)->ST_DMA = 1; 102 DMA(a3000_host)->ST_DMA = 1;
104 mb(); /* make sure DMA has started before next IO */ 103 mb(); /* make sure DMA has started before next IO */
105 104
106 /* return success */ 105 /* return success */
107 return 0; 106 return 0;
108} 107}
109 108
110static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, 109static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
111 int status) 110 int status)
112{ 111{
113 /* disable SCSI interrupts */ 112 struct WD33C93_hostdata *hdata = shost_priv(instance);
114 unsigned short cntr = CNTR_PDMD; 113
115 114 /* disable SCSI interrupts */
116 if (!HDATA(instance)->dma_dir) 115 unsigned short cntr = CNTR_PDMD;
117 cntr |= CNTR_DDIR; 116
118 117 if (!hdata->dma_dir)
119 DMA(instance)->CNTR = cntr; 118 cntr |= CNTR_DDIR;
120 mb(); /* make sure CNTR is updated before next IO */ 119
121 120 DMA(instance)->CNTR = cntr;
122 /* flush if we were reading */ 121 mb(); /* make sure CNTR is updated before next IO */
123 if (HDATA(instance)->dma_dir) { 122
124 DMA(instance)->FLUSH = 1; 123 /* flush if we were reading */
125 mb(); /* don't allow prefetch */ 124 if (hdata->dma_dir) {
126 while (!(DMA(instance)->ISTR & ISTR_FE_FLG)) 125 DMA(instance)->FLUSH = 1;
127 barrier(); 126 mb(); /* don't allow prefetch */
128 mb(); /* no IO until FLUSH is done */ 127 while (!(DMA(instance)->ISTR & ISTR_FE_FLG))
129 } 128 barrier();
130 129 mb(); /* no IO until FLUSH is done */
131 /* clear a possible interrupt */ 130 }
132 /* I think that this CINT is only necessary if you are 131
133 * using the terminal count features. HM 7 Mar 1994 132 /* clear a possible interrupt */
134 */ 133 /* I think that this CINT is only necessary if you are
135 DMA(instance)->CINT = 1; 134 * using the terminal count features. HM 7 Mar 1994
136 135 */
137 /* stop DMA */ 136 DMA(instance)->CINT = 1;
138 DMA(instance)->SP_DMA = 1; 137
139 mb(); /* make sure DMA is stopped before next IO */ 138 /* stop DMA */
140 139 DMA(instance)->SP_DMA = 1;
141 /* restore the CONTROL bits (minus the direction flag) */ 140 mb(); /* make sure DMA is stopped before next IO */
142 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN; 141
143 mb(); /* make sure CNTR is updated before next IO */ 142 /* restore the CONTROL bits (minus the direction flag) */
144 143 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
145 /* copy from a bounce buffer, if necessary */ 144 mb(); /* make sure CNTR is updated before next IO */
146 if (status && HDATA(instance)->dma_bounce_buffer) { 145
147 if (SCpnt) { 146 /* copy from a bounce buffer, if necessary */
148 if (HDATA(instance)->dma_dir && SCpnt) 147 if (status && hdata->dma_bounce_buffer) {
149 memcpy (SCpnt->SCp.ptr, 148 if (SCpnt) {
150 HDATA(instance)->dma_bounce_buffer, 149 if (hdata->dma_dir && SCpnt)
151 SCpnt->SCp.this_residual); 150 memcpy(SCpnt->SCp.ptr,
152 kfree (HDATA(instance)->dma_bounce_buffer); 151 hdata->dma_bounce_buffer,
153 HDATA(instance)->dma_bounce_buffer = NULL; 152 SCpnt->SCp.this_residual);
154 HDATA(instance)->dma_bounce_len = 0; 153 kfree(hdata->dma_bounce_buffer);
155 } else { 154 hdata->dma_bounce_buffer = NULL;
156 kfree (HDATA(instance)->dma_bounce_buffer); 155 hdata->dma_bounce_len = 0;
157 HDATA(instance)->dma_bounce_buffer = NULL; 156 } else {
158 HDATA(instance)->dma_bounce_len = 0; 157 kfree(hdata->dma_bounce_buffer);
158 hdata->dma_bounce_buffer = NULL;
159 hdata->dma_bounce_len = 0;
160 }
159 } 161 }
160 }
161} 162}
162 163
163static int __init a3000_detect(struct scsi_host_template *tpnt) 164static int __init a3000_detect(struct scsi_host_template *tpnt)
164{ 165{
165 wd33c93_regs regs; 166 wd33c93_regs regs;
166 167 struct WD33C93_hostdata *hdata;
167 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(A3000_SCSI)) 168
168 return 0; 169 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(A3000_SCSI))
169 if (!request_mem_region(0xDD0000, 256, "wd33c93")) 170 return 0;
170 return 0; 171 if (!request_mem_region(0xDD0000, 256, "wd33c93"))
171 172 return 0;
172 tpnt->proc_name = "A3000"; 173
173 tpnt->proc_info = &wd33c93_proc_info; 174 tpnt->proc_name = "A3000";
174 175 tpnt->proc_info = &wd33c93_proc_info;
175 a3000_host = scsi_register (tpnt, sizeof(struct WD33C93_hostdata)); 176
176 if (a3000_host == NULL) 177 a3000_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
177 goto fail_register; 178 if (a3000_host == NULL)
178 179 goto fail_register;
179 a3000_host->base = ZTWO_VADDR(0xDD0000); 180
180 a3000_host->irq = IRQ_AMIGA_PORTS; 181 a3000_host->base = ZTWO_VADDR(0xDD0000);
181 DMA(a3000_host)->DAWR = DAWR_A3000; 182 a3000_host->irq = IRQ_AMIGA_PORTS;
182 regs.SASR = &(DMA(a3000_host)->SASR); 183 DMA(a3000_host)->DAWR = DAWR_A3000;
183 regs.SCMD = &(DMA(a3000_host)->SCMD); 184 regs.SASR = &(DMA(a3000_host)->SASR);
184 HDATA(a3000_host)->no_sync = 0xff; 185 regs.SCMD = &(DMA(a3000_host)->SCMD);
185 HDATA(a3000_host)->fast = 0; 186 hdata = shost_priv(a3000_host);
186 HDATA(a3000_host)->dma_mode = CTRL_DMA; 187 hdata->no_sync = 0xff;
187 wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15); 188 hdata->fast = 0;
188 if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI", 189 hdata->dma_mode = CTRL_DMA;
189 a3000_intr)) 190 wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15);
190 goto fail_irq; 191 if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI",
191 DMA(a3000_host)->CNTR = CNTR_PDMD | CNTR_INTEN; 192 a3000_intr))
192 193 goto fail_irq;
193 return 1; 194 DMA(a3000_host)->CNTR = CNTR_PDMD | CNTR_INTEN;
195
196 return 1;
194 197
195fail_irq: 198fail_irq:
196 wd33c93_release(); 199 scsi_unregister(a3000_host);
197 scsi_unregister(a3000_host);
198fail_register: 200fail_register:
199 release_mem_region(0xDD0000, 256); 201 release_mem_region(0xDD0000, 256);
200 return 0; 202 return 0;
201} 203}
202 204
203static int a3000_bus_reset(struct scsi_cmnd *cmd) 205static int a3000_bus_reset(struct scsi_cmnd *cmd)
204{ 206{
205 /* FIXME perform bus-specific reset */ 207 /* FIXME perform bus-specific reset */
206 208
207 /* FIXME 2: kill this entire function, which should 209 /* FIXME 2: kill this entire function, which should
208 cause mid-layer to call wd33c93_host_reset anyway? */ 210 cause mid-layer to call wd33c93_host_reset anyway? */
209 211
@@ -237,11 +239,10 @@ static struct scsi_host_template driver_template = {
237 239
238static int a3000_release(struct Scsi_Host *instance) 240static int a3000_release(struct Scsi_Host *instance)
239{ 241{
240 wd33c93_release(); 242 DMA(instance)->CNTR = 0;
241 DMA(instance)->CNTR = 0; 243 release_mem_region(0xDD0000, 256);
242 release_mem_region(0xDD0000, 256); 244 free_irq(IRQ_AMIGA_PORTS, a3000_intr);
243 free_irq(IRQ_AMIGA_PORTS, a3000_intr); 245 return 1;
244 return 1;
245} 246}
246 247
247MODULE_LICENSE("GPL"); 248MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h
index c7afe16fd6e4..684813ee378c 100644
--- a/drivers/scsi/a3000.h
+++ b/drivers/scsi/a3000.h
@@ -12,40 +12,40 @@
12#include <linux/types.h> 12#include <linux/types.h>
13 13
14#ifndef CMD_PER_LUN 14#ifndef CMD_PER_LUN
15#define CMD_PER_LUN 2 15#define CMD_PER_LUN 2
16#endif 16#endif
17 17
18#ifndef CAN_QUEUE 18#ifndef CAN_QUEUE
19#define CAN_QUEUE 16 19#define CAN_QUEUE 16
20#endif 20#endif
21 21
22/* 22/*
23 * if the transfer address ANDed with this results in a non-zero 23 * if the transfer address ANDed with this results in a non-zero
24 * result, then we can't use DMA. 24 * result, then we can't use DMA.
25 */ 25 */
26#define A3000_XFER_MASK (0x00000003) 26#define A3000_XFER_MASK (0x00000003)
27 27
28typedef struct { 28typedef struct {
29 unsigned char pad1[2]; 29 unsigned char pad1[2];
30 volatile unsigned short DAWR; 30 volatile unsigned short DAWR;
31 volatile unsigned int WTC; 31 volatile unsigned int WTC;
32 unsigned char pad2[2]; 32 unsigned char pad2[2];
33 volatile unsigned short CNTR; 33 volatile unsigned short CNTR;
34 volatile unsigned long ACR; 34 volatile unsigned long ACR;
35 unsigned char pad3[2]; 35 unsigned char pad3[2];
36 volatile unsigned short ST_DMA; 36 volatile unsigned short ST_DMA;
37 unsigned char pad4[2]; 37 unsigned char pad4[2];
38 volatile unsigned short FLUSH; 38 volatile unsigned short FLUSH;
39 unsigned char pad5[2]; 39 unsigned char pad5[2];
40 volatile unsigned short CINT; 40 volatile unsigned short CINT;
41 unsigned char pad6[2]; 41 unsigned char pad6[2];
42 volatile unsigned short ISTR; 42 volatile unsigned short ISTR;
43 unsigned char pad7[30]; 43 unsigned char pad7[30];
44 volatile unsigned short SP_DMA; 44 volatile unsigned short SP_DMA;
45 unsigned char pad8; 45 unsigned char pad8;
46 volatile unsigned char SASR; 46 volatile unsigned char SASR;
47 unsigned char pad9; 47 unsigned char pad9;
48 volatile unsigned char SCMD; 48 volatile unsigned char SCMD;
49} a3000_scsiregs; 49} a3000_scsiregs;
50 50
51#define DAWR_A3000 (3) 51#define DAWR_A3000 (3)
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 7e26ebc26661..7df2dd1d2c6f 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -328,6 +328,16 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
328 return status; 328 return status;
329} 329}
330 330
331static void aac_expose_phy_device(struct scsi_cmnd *scsicmd)
332{
333 char inq_data;
334 scsi_sg_copy_to_buffer(scsicmd, &inq_data, sizeof(inq_data));
335 if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) {
336 inq_data &= 0xdf;
337 scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
338 }
339}
340
331/** 341/**
332 * aac_get_containers - list containers 342 * aac_get_containers - list containers
333 * @common: adapter to probe 343 * @common: adapter to probe
@@ -1598,6 +1608,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
1598 int status; 1608 int status;
1599 struct aac_dev *dev; 1609 struct aac_dev *dev;
1600 struct fib * cmd_fibcontext; 1610 struct fib * cmd_fibcontext;
1611 int cid;
1601 1612
1602 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 1613 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1603 /* 1614 /*
@@ -1647,6 +1658,22 @@ static int aac_read(struct scsi_cmnd * scsicmd)
1647 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 1658 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
1648 break; 1659 break;
1649 } 1660 }
1661
1662 if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
1663 cid = scmd_id(scsicmd);
1664 dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
1665 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1666 SAM_STAT_CHECK_CONDITION;
1667 set_sense(&dev->fsa_dev[cid].sense_data,
1668 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
1669 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
1670 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1671 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1672 SCSI_SENSE_BUFFERSIZE));
1673 scsicmd->scsi_done(scsicmd);
1674 return 1;
1675 }
1676
1650 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", 1677 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
1651 smp_processor_id(), (unsigned long long)lba, jiffies)); 1678 smp_processor_id(), (unsigned long long)lba, jiffies));
1652 if (aac_adapter_bounds(dev,scsicmd,lba)) 1679 if (aac_adapter_bounds(dev,scsicmd,lba))
@@ -1688,6 +1715,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
1688 int status; 1715 int status;
1689 struct aac_dev *dev; 1716 struct aac_dev *dev;
1690 struct fib * cmd_fibcontext; 1717 struct fib * cmd_fibcontext;
1718 int cid;
1691 1719
1692 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 1720 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1693 /* 1721 /*
@@ -1727,6 +1755,22 @@ static int aac_write(struct scsi_cmnd * scsicmd)
1727 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 1755 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
1728 fua = scsicmd->cmnd[1] & 0x8; 1756 fua = scsicmd->cmnd[1] & 0x8;
1729 } 1757 }
1758
1759 if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
1760 cid = scmd_id(scsicmd);
1761 dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
1762 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1763 SAM_STAT_CHECK_CONDITION;
1764 set_sense(&dev->fsa_dev[cid].sense_data,
1765 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
1766 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
1767 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1768 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1769 SCSI_SENSE_BUFFERSIZE));
1770 scsicmd->scsi_done(scsicmd);
1771 return 1;
1772 }
1773
1730 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", 1774 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
1731 smp_processor_id(), (unsigned long long)lba, jiffies)); 1775 smp_processor_id(), (unsigned long long)lba, jiffies));
1732 if (aac_adapter_bounds(dev,scsicmd,lba)) 1776 if (aac_adapter_bounds(dev,scsicmd,lba))
@@ -2573,6 +2617,11 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2573 2617
2574 scsi_dma_unmap(scsicmd); 2618 scsi_dma_unmap(scsicmd);
2575 2619
2620 /* expose physical device if expose_physicald flag is on */
2621 if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
2622 && expose_physicals > 0)
2623 aac_expose_phy_device(scsicmd);
2624
2576 /* 2625 /*
2577 * First check the fib status 2626 * First check the fib status
2578 */ 2627 */
@@ -2678,8 +2727,22 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2678 scsicmd->cmnd[0], 2727 scsicmd->cmnd[0],
2679 le32_to_cpu(srbreply->scsi_status)); 2728 le32_to_cpu(srbreply->scsi_status));
2680#endif 2729#endif
2681 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; 2730 if ((scsicmd->cmnd[0] == ATA_12)
2682 break; 2731 || (scsicmd->cmnd[0] == ATA_16)) {
2732 if (scsicmd->cmnd[2] & (0x01 << 5)) {
2733 scsicmd->result = DID_OK << 16
2734 | COMMAND_COMPLETE << 8;
2735 break;
2736 } else {
2737 scsicmd->result = DID_ERROR << 16
2738 | COMMAND_COMPLETE << 8;
2739 break;
2740 }
2741 } else {
2742 scsicmd->result = DID_ERROR << 16
2743 | COMMAND_COMPLETE << 8;
2744 break;
2745 }
2683 } 2746 }
2684 if (le32_to_cpu(srbreply->scsi_status) == SAM_STAT_CHECK_CONDITION) { 2747 if (le32_to_cpu(srbreply->scsi_status) == SAM_STAT_CHECK_CONDITION) {
2685 int len; 2748 int len;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 619c02d9c862..4dbcc055ac78 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
12 *----------------------------------------------------------------------------*/ 12 *----------------------------------------------------------------------------*/
13 13
14#ifndef AAC_DRIVER_BUILD 14#ifndef AAC_DRIVER_BUILD
15# define AAC_DRIVER_BUILD 24702 15# define AAC_DRIVER_BUILD 26400
16# define AAC_DRIVER_BRANCH "-ms" 16# define AAC_DRIVER_BRANCH "-ms"
17#endif 17#endif
18#define MAXIMUM_NUM_CONTAINERS 32 18#define MAXIMUM_NUM_CONTAINERS 32
@@ -26,6 +26,8 @@
26#define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff) 26#define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff)
27#define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)256) 27#define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)256)
28 28
29#define AAC_DEBUG_INSTRUMENT_AIF_DELETE
30
29/* 31/*
30 * These macros convert from physical channels to virtual channels 32 * These macros convert from physical channels to virtual channels
31 */ 33 */
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 94d2954d79ae..70079146e203 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -966,6 +966,16 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
966 device_config_needed = 966 device_config_needed =
967 (((__le32 *)aifcmd->data)[0] == 967 (((__le32 *)aifcmd->data)[0] ==
968 cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE; 968 cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
969 if (device_config_needed == ADD) {
970 device = scsi_device_lookup(dev->scsi_host_ptr,
971 channel,
972 id,
973 lun);
974 if (device) {
975 scsi_remove_device(device);
976 scsi_device_put(device);
977 }
978 }
969 break; 979 break;
970 980
971 case AifEnEnclosureManagement: 981 case AifEnEnclosureManagement:
@@ -1123,6 +1133,9 @@ retry_next:
1123 if (device) { 1133 if (device) {
1124 switch (device_config_needed) { 1134 switch (device_config_needed) {
1125 case DELETE: 1135 case DELETE:
1136#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1137 scsi_remove_device(device);
1138#else
1126 if (scsi_device_online(device)) { 1139 if (scsi_device_online(device)) {
1127 scsi_device_set_state(device, SDEV_OFFLINE); 1140 scsi_device_set_state(device, SDEV_OFFLINE);
1128 sdev_printk(KERN_INFO, device, 1141 sdev_printk(KERN_INFO, device,
@@ -1131,6 +1144,7 @@ retry_next:
1131 "array deleted" : 1144 "array deleted" :
1132 "enclosure services event"); 1145 "enclosure services event");
1133 } 1146 }
1147#endif
1134 break; 1148 break;
1135 case ADD: 1149 case ADD:
1136 if (!scsi_device_online(device)) { 1150 if (!scsi_device_online(device)) {
@@ -1145,12 +1159,16 @@ retry_next:
1145 case CHANGE: 1159 case CHANGE:
1146 if ((channel == CONTAINER_CHANNEL) 1160 if ((channel == CONTAINER_CHANNEL)
1147 && (!dev->fsa_dev[container].valid)) { 1161 && (!dev->fsa_dev[container].valid)) {
1162#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1163 scsi_remove_device(device);
1164#else
1148 if (!scsi_device_online(device)) 1165 if (!scsi_device_online(device))
1149 break; 1166 break;
1150 scsi_device_set_state(device, SDEV_OFFLINE); 1167 scsi_device_set_state(device, SDEV_OFFLINE);
1151 sdev_printk(KERN_INFO, device, 1168 sdev_printk(KERN_INFO, device,
1152 "Device offlined - %s\n", 1169 "Device offlined - %s\n",
1153 "array failed"); 1170 "array failed");
1171#endif
1154 break; 1172 break;
1155 } 1173 }
1156 scsi_rescan_device(&device->sdev_gendev); 1174 scsi_rescan_device(&device->sdev_gendev);
diff --git a/drivers/scsi/bfa/bfa_cb_ioim_macros.h b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
index 961fe439daad..53a616f5f50d 100644
--- a/drivers/scsi/bfa/bfa_cb_ioim_macros.h
+++ b/drivers/scsi/bfa/bfa_cb_ioim_macros.h
@@ -117,35 +117,6 @@ bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio)
117} 117}
118 118
119/** 119/**
120 * Get SG element for the I/O request given the SG element index
121 */
122static inline union bfi_addr_u
123bfa_cb_ioim_get_sgaddr(struct bfad_ioim_s *dio, int sgeid)
124{
125 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
126 struct scatterlist *sge;
127 u64 addr;
128
129 sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid;
130 addr = (u64) sg_dma_address(sge);
131
132 return *((union bfi_addr_u *) &addr);
133}
134
135static inline u32
136bfa_cb_ioim_get_sglen(struct bfad_ioim_s *dio, int sgeid)
137{
138 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
139 struct scatterlist *sge;
140 u32 len;
141
142 sge = (struct scatterlist *)scsi_sglist(cmnd) + sgeid;
143 len = sg_dma_len(sge);
144
145 return len;
146}
147
148/**
149 * Get Command Reference Number for the I/O request. 0 if none. 120 * Get Command Reference Number for the I/O request. 0 if none.
150 */ 121 */
151static inline u8 122static inline u8
diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c
index 5b107abe46e5..687f3d6e252b 100644
--- a/drivers/scsi/bfa/bfa_ioim.c
+++ b/drivers/scsi/bfa/bfa_ioim.c
@@ -731,6 +731,9 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
731 static struct fcp_cmnd_s cmnd_z0 = { 0 }; 731 static struct fcp_cmnd_s cmnd_z0 = { 0 };
732 struct bfi_sge_s *sge; 732 struct bfi_sge_s *sge;
733 u32 pgdlen = 0; 733 u32 pgdlen = 0;
734 u64 addr;
735 struct scatterlist *sg;
736 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
734 737
735 /** 738 /**
736 * check for room in queue to send request now 739 * check for room in queue to send request now
@@ -754,8 +757,10 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
754 */ 757 */
755 sge = &m->sges[0]; 758 sge = &m->sges[0];
756 if (ioim->nsges) { 759 if (ioim->nsges) {
757 sge->sga = bfa_cb_ioim_get_sgaddr(ioim->dio, 0); 760 sg = (struct scatterlist *)scsi_sglist(cmnd);
758 pgdlen = bfa_cb_ioim_get_sglen(ioim->dio, 0); 761 addr = bfa_os_sgaddr(sg_dma_address(sg));
762 sge->sga = *(union bfi_addr_u *) &addr;
763 pgdlen = sg_dma_len(sg);
759 sge->sg_len = pgdlen; 764 sge->sg_len = pgdlen;
760 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ? 765 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
761 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST; 766 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
@@ -868,10 +873,16 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
868 struct bfi_sge_s *sge; 873 struct bfi_sge_s *sge;
869 struct bfa_sgpg_s *sgpg; 874 struct bfa_sgpg_s *sgpg;
870 u32 pgcumsz; 875 u32 pgcumsz;
876 u64 addr;
877 struct scatterlist *sg;
878 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
871 879
872 sgeid = BFI_SGE_INLINE; 880 sgeid = BFI_SGE_INLINE;
873 ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q); 881 ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
874 882
883 sg = scsi_sglist(cmnd);
884 sg = sg_next(sg);
885
875 do { 886 do {
876 sge = sgpg->sgpg->sges; 887 sge = sgpg->sgpg->sges;
877 nsges = ioim->nsges - sgeid; 888 nsges = ioim->nsges - sgeid;
@@ -879,9 +890,10 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
879 nsges = BFI_SGPG_DATA_SGES; 890 nsges = BFI_SGPG_DATA_SGES;
880 891
881 pgcumsz = 0; 892 pgcumsz = 0;
882 for (i = 0; i < nsges; i++, sge++, sgeid++) { 893 for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
883 sge->sga = bfa_cb_ioim_get_sgaddr(ioim->dio, sgeid); 894 addr = bfa_os_sgaddr(sg_dma_address(sg));
884 sge->sg_len = bfa_cb_ioim_get_sglen(ioim->dio, sgeid); 895 sge->sga = *(union bfi_addr_u *) &addr;
896 sge->sg_len = sg_dma_len(sg);
885 pgcumsz += sge->sg_len; 897 pgcumsz += sge->sg_len;
886 898
887 /** 899 /**
diff --git a/drivers/scsi/bfa/bfa_os_inc.h b/drivers/scsi/bfa/bfa_os_inc.h
index 10a89f75fa94..bd1cd3ee3022 100644
--- a/drivers/scsi/bfa/bfa_os_inc.h
+++ b/drivers/scsi/bfa/bfa_os_inc.h
@@ -50,6 +50,10 @@
50#include <scsi/scsi_transport_fc.h> 50#include <scsi/scsi_transport_fc.h>
51#include <scsi/scsi_transport.h> 51#include <scsi/scsi_transport.h>
52 52
53#ifdef __BIG_ENDIAN
54#define __BIGENDIAN
55#endif
56
53#define BFA_ERR KERN_ERR 57#define BFA_ERR KERN_ERR
54#define BFA_WARNING KERN_WARNING 58#define BFA_WARNING KERN_WARNING
55#define BFA_NOTICE KERN_NOTICE 59#define BFA_NOTICE KERN_NOTICE
@@ -123,6 +127,15 @@ int bfa_os_MWB(void *);
123 (((_x) & 0x00ff0000) >> 8) | \ 127 (((_x) & 0x00ff0000) >> 8) | \
124 (((_x) & 0xff000000) >> 24)) 128 (((_x) & 0xff000000) >> 24))
125 129
130#define bfa_os_swap_sgaddr(_x) ((u64)( \
131 (((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \
132 (((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \
133 (((u64)(_x) & (u64)0x0000000000ff0000ull) << 32) | \
134 (((u64)(_x) & (u64)0x00000000ff000000ull) << 32) | \
135 (((u64)(_x) & (u64)0x000000ff00000000ull) >> 32) | \
136 (((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32) | \
137 (((u64)(_x) & (u64)0x00ff000000000000ull) >> 32) | \
138 (((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
126 139
127#ifndef __BIGENDIAN 140#ifndef __BIGENDIAN
128#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \ 141#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \
@@ -133,6 +146,7 @@ int bfa_os_MWB(void *);
133#define bfa_os_hton3b(_x) bfa_swap_3b(_x) 146#define bfa_os_hton3b(_x) bfa_swap_3b(_x)
134 147
135#define bfa_os_wtole(_x) (_x) 148#define bfa_os_wtole(_x) (_x)
149#define bfa_os_sgaddr(_x) (_x)
136 150
137#else 151#else
138 152
@@ -141,6 +155,7 @@ int bfa_os_MWB(void *);
141#define bfa_os_hton3b(_x) (_x) 155#define bfa_os_hton3b(_x) (_x)
142#define bfa_os_htonll(_x) (_x) 156#define bfa_os_htonll(_x) (_x)
143#define bfa_os_wtole(_x) bfa_os_swap32(_x) 157#define bfa_os_wtole(_x) bfa_os_swap32(_x)
158#define bfa_os_sgaddr(_x) bfa_os_swap_sgaddr(_x)
144 159
145#endif 160#endif
146 161
@@ -161,12 +176,12 @@ int bfa_os_MWB(void *);
161#define bfa_os_addr_t char __iomem * 176#define bfa_os_addr_t char __iomem *
162#define bfa_os_panic() 177#define bfa_os_panic()
163 178
164#define bfa_os_reg_read(_raddr) bfa_os_wtole(readl(_raddr)) 179#define bfa_os_reg_read(_raddr) readl(_raddr)
165#define bfa_os_reg_write(_raddr, _val) writel(bfa_os_wtole((_val)), (_raddr)) 180#define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr))
166#define bfa_os_mem_read(_raddr, _off) \ 181#define bfa_os_mem_read(_raddr, _off) \
167 bfa_os_ntohl(readl(((_raddr) + (_off)))) 182 bfa_os_swap32(readl(((_raddr) + (_off))))
168#define bfa_os_mem_write(_raddr, _off, _val) \ 183#define bfa_os_mem_write(_raddr, _off, _val) \
169 writel(bfa_os_htonl((_val)), ((_raddr) + (_off))) 184 writel(bfa_os_swap32((_val)), ((_raddr) + (_off)))
170 185
171#define BFA_TRC_TS(_trcm) \ 186#define BFA_TRC_TS(_trcm) \
172 ({ \ 187 ({ \
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 13f5feb308c2..d4fc4287ebd3 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -33,7 +33,7 @@
33#include <fcb/bfa_fcb.h> 33#include <fcb/bfa_fcb.h>
34 34
35BFA_TRC_FILE(LDRV, BFAD); 35BFA_TRC_FILE(LDRV, BFAD);
36static DEFINE_MUTEX(bfad_mutex); 36DEFINE_MUTEX(bfad_mutex);
37LIST_HEAD(bfad_list); 37LIST_HEAD(bfad_list);
38static int bfad_inst; 38static int bfad_inst;
39int bfad_supported_fc4s; 39int bfad_supported_fc4s;
@@ -299,8 +299,6 @@ bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv)
299 complete(vport_drv->comp_del); 299 complete(vport_drv->comp_del);
300 return; 300 return;
301 } 301 }
302
303 kfree(vport_drv);
304} 302}
305 303
306/** 304/**
@@ -483,7 +481,7 @@ ext:
483 */ 481 */
484bfa_status_t 482bfa_status_t
485bfad_vport_create(struct bfad_s *bfad, u16 vf_id, 483bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
486 struct bfa_port_cfg_s *port_cfg) 484 struct bfa_port_cfg_s *port_cfg, struct device *dev)
487{ 485{
488 struct bfad_vport_s *vport; 486 struct bfad_vport_s *vport;
489 int rc = BFA_STATUS_OK; 487 int rc = BFA_STATUS_OK;
@@ -506,7 +504,8 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
506 goto ext_free_vport; 504 goto ext_free_vport;
507 505
508 if (port_cfg->roles & BFA_PORT_ROLE_FCP_IM) { 506 if (port_cfg->roles & BFA_PORT_ROLE_FCP_IM) {
509 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port); 507 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
508 dev);
510 if (rc != BFA_STATUS_OK) 509 if (rc != BFA_STATUS_OK)
511 goto ext_free_fcs_vport; 510 goto ext_free_fcs_vport;
512 } 511 }
@@ -591,7 +590,6 @@ bfad_init_timer(struct bfad_s *bfad)
591int 590int
592bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) 591bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
593{ 592{
594 unsigned long bar0_len;
595 int rc = -ENODEV; 593 int rc = -ENODEV;
596 594
597 if (pci_enable_device(pdev)) { 595 if (pci_enable_device(pdev)) {
@@ -611,9 +609,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
611 goto out_release_region; 609 goto out_release_region;
612 } 610 }
613 611
614 bfad->pci_bar0_map = pci_resource_start(pdev, 0); 612 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
615 bar0_len = pci_resource_len(pdev, 0);
616 bfad->pci_bar0_kva = ioremap(bfad->pci_bar0_map, bar0_len);
617 613
618 if (bfad->pci_bar0_kva == NULL) { 614 if (bfad->pci_bar0_kva == NULL) {
619 BFA_PRINTF(BFA_ERR, "Fail to map bar0\n"); 615 BFA_PRINTF(BFA_ERR, "Fail to map bar0\n");
@@ -646,11 +642,7 @@ out:
646void 642void
647bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) 643bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
648{ 644{
649#if defined(__ia64__)
650 pci_iounmap(pdev, bfad->pci_bar0_kva); 645 pci_iounmap(pdev, bfad->pci_bar0_kva);
651#else
652 iounmap(bfad->pci_bar0_kva);
653#endif
654 pci_release_regions(pdev); 646 pci_release_regions(pdev);
655 pci_disable_device(pdev); 647 pci_disable_device(pdev);
656 pci_set_drvdata(pdev, NULL); 648 pci_set_drvdata(pdev, NULL);
@@ -848,7 +840,8 @@ bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role)
848 goto out; 840 goto out;
849 } 841 }
850 842
851 rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port); 843 rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port,
844 &bfad->pcidev->dev);
852 if (rc != BFA_STATUS_OK) 845 if (rc != BFA_STATUS_OK)
853 goto out; 846 goto out;
854 847
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 6a2efdd5ef24..e477bfbfa7d8 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -364,6 +364,152 @@ bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
364 364
365} 365}
366 366
367static int
368bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
369{
370 char *vname = fc_vport->symbolic_name;
371 struct Scsi_Host *shost = fc_vport->shost;
372 struct bfad_im_port_s *im_port =
373 (struct bfad_im_port_s *) shost->hostdata[0];
374 struct bfad_s *bfad = im_port->bfad;
375 struct bfa_port_cfg_s port_cfg;
376 int status = 0, rc;
377 unsigned long flags;
378
379 memset(&port_cfg, 0, sizeof(port_cfg));
380
381 port_cfg.pwwn = wwn_to_u64((u8 *) &fc_vport->port_name);
382 port_cfg.nwwn = wwn_to_u64((u8 *) &fc_vport->node_name);
383
384 if (strlen(vname) > 0)
385 strcpy((char *)&port_cfg.sym_name, vname);
386
387 port_cfg.roles = BFA_PORT_ROLE_FCP_IM;
388 rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev);
389
390 if (rc == BFA_STATUS_OK) {
391 struct bfad_vport_s *vport;
392 struct bfa_fcs_vport_s *fcs_vport;
393 struct Scsi_Host *vshost;
394
395 spin_lock_irqsave(&bfad->bfad_lock, flags);
396 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0,
397 port_cfg.pwwn);
398 if (fcs_vport == NULL) {
399 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
400 return VPCERR_BAD_WWN;
401 }
402
403 fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
404 if (disable) {
405 bfa_fcs_vport_stop(fcs_vport);
406 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
407 }
408 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
409
410 vport = fcs_vport->vport_drv;
411 vshost = vport->drv_port.im_port->shost;
412 fc_host_node_name(vshost) = wwn_to_u64((u8 *) &port_cfg.nwwn);
413 fc_host_port_name(vshost) = wwn_to_u64((u8 *) &port_cfg.pwwn);
414 fc_vport->dd_data = vport;
415 vport->drv_port.im_port->fc_vport = fc_vport;
416
417 } else if (rc == BFA_STATUS_INVALID_WWN)
418 return VPCERR_BAD_WWN;
419 else if (rc == BFA_STATUS_VPORT_EXISTS)
420 return VPCERR_BAD_WWN;
421 else if (rc == BFA_STATUS_VPORT_MAX)
422 return VPCERR_NO_FABRIC_SUPP;
423 else if (rc == BFA_STATUS_VPORT_WWN_BP)
424 return VPCERR_BAD_WWN;
425 else
426 return FC_VPORT_FAILED;
427
428 return status;
429}
430
431static int
432bfad_im_vport_delete(struct fc_vport *fc_vport)
433{
434 struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
435 struct bfad_im_port_s *im_port =
436 (struct bfad_im_port_s *) vport->drv_port.im_port;
437 struct bfad_s *bfad = im_port->bfad;
438 struct bfad_port_s *port;
439 struct bfa_fcs_vport_s *fcs_vport;
440 struct Scsi_Host *vshost;
441 wwn_t pwwn;
442 int rc;
443 unsigned long flags;
444 struct completion fcomp;
445
446 if (im_port->flags & BFAD_PORT_DELETE)
447 goto free_scsi_host;
448
449 port = im_port->port;
450
451 vshost = vport->drv_port.im_port->shost;
452 pwwn = wwn_to_u64((u8 *) &fc_host_port_name(vshost));
453
454 spin_lock_irqsave(&bfad->bfad_lock, flags);
455 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
456 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
457
458 if (fcs_vport == NULL)
459 return VPCERR_BAD_WWN;
460
461 vport->drv_port.flags |= BFAD_PORT_DELETE;
462
463 vport->comp_del = &fcomp;
464 init_completion(vport->comp_del);
465
466 spin_lock_irqsave(&bfad->bfad_lock, flags);
467 rc = bfa_fcs_vport_delete(&vport->fcs_vport);
468 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
469
470 wait_for_completion(vport->comp_del);
471
472free_scsi_host:
473 bfad_os_scsi_host_free(bfad, im_port);
474
475 kfree(vport);
476
477 return 0;
478}
479
480static int
481bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
482{
483 struct bfad_vport_s *vport;
484 struct bfad_s *bfad;
485 struct bfa_fcs_vport_s *fcs_vport;
486 struct Scsi_Host *vshost;
487 wwn_t pwwn;
488 unsigned long flags;
489
490 vport = (struct bfad_vport_s *)fc_vport->dd_data;
491 bfad = vport->drv_port.bfad;
492 vshost = vport->drv_port.im_port->shost;
493 pwwn = wwn_to_u64((u8 *) &fc_vport->port_name);
494
495 spin_lock_irqsave(&bfad->bfad_lock, flags);
496 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
497 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
498
499 if (fcs_vport == NULL)
500 return VPCERR_BAD_WWN;
501
502 if (disable) {
503 bfa_fcs_vport_stop(fcs_vport);
504 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
505 } else {
506 bfa_fcs_vport_start(fcs_vport);
507 fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
508 }
509
510 return 0;
511}
512
367struct fc_function_template bfad_im_fc_function_template = { 513struct fc_function_template bfad_im_fc_function_template = {
368 514
369 /* Target dynamic attributes */ 515 /* Target dynamic attributes */
@@ -413,6 +559,61 @@ struct fc_function_template bfad_im_fc_function_template = {
413 .show_rport_dev_loss_tmo = 1, 559 .show_rport_dev_loss_tmo = 1,
414 .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo, 560 .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo,
415 .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, 561 .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
562
563 .vport_create = bfad_im_vport_create,
564 .vport_delete = bfad_im_vport_delete,
565 .vport_disable = bfad_im_vport_disable,
566};
567
568struct fc_function_template bfad_im_vport_fc_function_template = {
569
570 /* Target dynamic attributes */
571 .get_starget_port_id = bfad_im_get_starget_port_id,
572 .show_starget_port_id = 1,
573 .get_starget_node_name = bfad_im_get_starget_node_name,
574 .show_starget_node_name = 1,
575 .get_starget_port_name = bfad_im_get_starget_port_name,
576 .show_starget_port_name = 1,
577
578 /* Host dynamic attribute */
579 .get_host_port_id = bfad_im_get_host_port_id,
580 .show_host_port_id = 1,
581
582 /* Host fixed attributes */
583 .show_host_node_name = 1,
584 .show_host_port_name = 1,
585 .show_host_supported_classes = 1,
586 .show_host_supported_fc4s = 1,
587 .show_host_supported_speeds = 1,
588 .show_host_maxframe_size = 1,
589
590 /* More host dynamic attributes */
591 .show_host_port_type = 1,
592 .get_host_port_type = bfad_im_get_host_port_type,
593 .show_host_port_state = 1,
594 .get_host_port_state = bfad_im_get_host_port_state,
595 .show_host_active_fc4s = 1,
596 .get_host_active_fc4s = bfad_im_get_host_active_fc4s,
597 .show_host_speed = 1,
598 .get_host_speed = bfad_im_get_host_speed,
599 .show_host_fabric_name = 1,
600 .get_host_fabric_name = bfad_im_get_host_fabric_name,
601
602 .show_host_symbolic_name = 1,
603
604 /* Statistics */
605 .get_fc_host_stats = bfad_im_get_stats,
606 .reset_fc_host_stats = bfad_im_reset_stats,
607
608 /* Allocation length for host specific data */
609 .dd_fcrport_size = sizeof(struct bfad_itnim_data_s *),
610
611 /* Remote port fixed attributes */
612 .show_rport_maxframe_size = 1,
613 .show_rport_supported_classes = 1,
614 .show_rport_dev_loss_tmo = 1,
615 .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo,
616 .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
416}; 617};
417 618
418/** 619/**
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 107848cd3b6d..6c920c1b53a4 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -162,7 +162,6 @@ struct bfad_s {
162 const char *pci_name; 162 const char *pci_name;
163 struct bfa_pcidev_s hal_pcidev; 163 struct bfa_pcidev_s hal_pcidev;
164 struct bfa_ioc_pci_attr_s pci_attr; 164 struct bfa_ioc_pci_attr_s pci_attr;
165 unsigned long pci_bar0_map;
166 void __iomem *pci_bar0_kva; 165 void __iomem *pci_bar0_kva;
167 struct completion comp; 166 struct completion comp;
168 struct completion suspend; 167 struct completion suspend;
@@ -254,7 +253,7 @@ do { \
254 253
255 254
256bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id, 255bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
257 struct bfa_port_cfg_s *port_cfg); 256 struct bfa_port_cfg_s *port_cfg, struct device *dev);
258bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id, 257bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
259 struct bfa_port_cfg_s *port_cfg); 258 struct bfa_port_cfg_s *port_cfg);
260bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role); 259bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role);
@@ -294,5 +293,6 @@ extern struct list_head bfad_list;
294extern int bfa_lun_queue_depth; 293extern int bfa_lun_queue_depth;
295extern int bfad_supported_fc4s; 294extern int bfad_supported_fc4s;
296extern int bfa_linkup_delay; 295extern int bfa_linkup_delay;
296extern struct mutex bfad_mutex;
297 297
298#endif /* __BFAD_DRV_H__ */ 298#endif /* __BFAD_DRV_H__ */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 78f42aa57369..5b7cf539e50b 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -30,6 +30,7 @@ BFA_TRC_FILE(LDRV, IM);
30 30
31DEFINE_IDR(bfad_im_port_index); 31DEFINE_IDR(bfad_im_port_index);
32struct scsi_transport_template *bfad_im_scsi_transport_template; 32struct scsi_transport_template *bfad_im_scsi_transport_template;
33struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
33static void bfad_im_itnim_work_handler(struct work_struct *work); 34static void bfad_im_itnim_work_handler(struct work_struct *work);
34static int bfad_im_queuecommand(struct scsi_cmnd *cmnd, 35static int bfad_im_queuecommand(struct scsi_cmnd *cmnd,
35 void (*done)(struct scsi_cmnd *)); 36 void (*done)(struct scsi_cmnd *));
@@ -252,7 +253,6 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
252 struct bfa_itnim_s *bfa_itnim; 253 struct bfa_itnim_s *bfa_itnim;
253 bfa_status_t rc = BFA_STATUS_OK; 254 bfa_status_t rc = BFA_STATUS_OK;
254 255
255 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
256 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); 256 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
257 if (!tskim) { 257 if (!tskim) {
258 BFA_DEV_PRINTF(bfad, BFA_ERR, 258 BFA_DEV_PRINTF(bfad, BFA_ERR,
@@ -513,11 +513,14 @@ void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
513 * Allocate a Scsi_Host for a port. 513 * Allocate a Scsi_Host for a port.
514 */ 514 */
515int 515int
516bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port) 516bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
517 struct device *dev)
517{ 518{
518 int error = 1; 519 int error = 1;
519 520
521 mutex_lock(&bfad_mutex);
520 if (!idr_pre_get(&bfad_im_port_index, GFP_KERNEL)) { 522 if (!idr_pre_get(&bfad_im_port_index, GFP_KERNEL)) {
523 mutex_unlock(&bfad_mutex);
521 printk(KERN_WARNING "idr_pre_get failure\n"); 524 printk(KERN_WARNING "idr_pre_get failure\n");
522 goto out; 525 goto out;
523 } 526 }
@@ -525,10 +528,13 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
525 error = idr_get_new(&bfad_im_port_index, im_port, 528 error = idr_get_new(&bfad_im_port_index, im_port,
526 &im_port->idr_id); 529 &im_port->idr_id);
527 if (error) { 530 if (error) {
531 mutex_unlock(&bfad_mutex);
528 printk(KERN_WARNING "idr_get_new failure\n"); 532 printk(KERN_WARNING "idr_get_new failure\n");
529 goto out; 533 goto out;
530 } 534 }
531 535
536 mutex_unlock(&bfad_mutex);
537
532 im_port->shost = bfad_os_scsi_host_alloc(im_port, bfad); 538 im_port->shost = bfad_os_scsi_host_alloc(im_port, bfad);
533 if (!im_port->shost) { 539 if (!im_port->shost) {
534 error = 1; 540 error = 1;
@@ -542,12 +548,15 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
542 im_port->shost->max_lun = MAX_FCP_LUN; 548 im_port->shost->max_lun = MAX_FCP_LUN;
543 im_port->shost->max_cmd_len = 16; 549 im_port->shost->max_cmd_len = 16;
544 im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth; 550 im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth;
545 im_port->shost->transportt = bfad_im_scsi_transport_template; 551 if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
552 im_port->shost->transportt = bfad_im_scsi_transport_template;
553 else
554 im_port->shost->transportt =
555 bfad_im_scsi_vport_transport_template;
546 556
547 error = bfad_os_scsi_add_host(im_port->shost, im_port, bfad); 557 error = scsi_add_host(im_port->shost, dev);
548 if (error) { 558 if (error) {
549 printk(KERN_WARNING "bfad_os_scsi_add_host failure %d\n", 559 printk(KERN_WARNING "scsi_add_host failure %d\n", error);
550 error);
551 goto out_fc_rel; 560 goto out_fc_rel;
552 } 561 }
553 562
@@ -559,7 +568,9 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
559out_fc_rel: 568out_fc_rel:
560 scsi_host_put(im_port->shost); 569 scsi_host_put(im_port->shost);
561out_free_idr: 570out_free_idr:
571 mutex_lock(&bfad_mutex);
562 idr_remove(&bfad_im_port_index, im_port->idr_id); 572 idr_remove(&bfad_im_port_index, im_port->idr_id);
573 mutex_unlock(&bfad_mutex);
563out: 574out:
564 return error; 575 return error;
565} 576}
@@ -567,8 +578,6 @@ out:
567void 578void
568bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) 579bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
569{ 580{
570 unsigned long flags;
571
572 bfa_trc(bfad, bfad->inst_no); 581 bfa_trc(bfad, bfad->inst_no);
573 bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_HOST_FREE, 582 bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_HOST_FREE,
574 im_port->shost->host_no); 583 im_port->shost->host_no);
@@ -578,9 +587,9 @@ bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
578 scsi_remove_host(im_port->shost); 587 scsi_remove_host(im_port->shost);
579 scsi_host_put(im_port->shost); 588 scsi_host_put(im_port->shost);
580 589
581 spin_lock_irqsave(&bfad->bfad_lock, flags); 590 mutex_lock(&bfad_mutex);
582 idr_remove(&bfad_im_port_index, im_port->idr_id); 591 idr_remove(&bfad_im_port_index, im_port->idr_id);
583 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 592 mutex_unlock(&bfad_mutex);
584} 593}
585 594
586static void 595static void
@@ -589,9 +598,11 @@ bfad_im_port_delete_handler(struct work_struct *work)
589 struct bfad_im_port_s *im_port = 598 struct bfad_im_port_s *im_port =
590 container_of(work, struct bfad_im_port_s, port_delete_work); 599 container_of(work, struct bfad_im_port_s, port_delete_work);
591 600
592 bfad_im_scsi_host_free(im_port->bfad, im_port); 601 if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
593 bfad_im_port_clean(im_port); 602 im_port->flags |= BFAD_PORT_DELETE;
594 kfree(im_port); 603 fc_vport_terminate(im_port->fc_vport);
604 }
605
595} 606}
596 607
597bfa_status_t 608bfa_status_t
@@ -690,23 +701,6 @@ bfad_im_probe_undo(struct bfad_s *bfad)
690 } 701 }
691} 702}
692 703
693
694
695
696int
697bfad_os_scsi_add_host(struct Scsi_Host *shost, struct bfad_im_port_s *im_port,
698 struct bfad_s *bfad)
699{
700 struct device *dev;
701
702 if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
703 dev = &bfad->pcidev->dev;
704 else
705 dev = &bfad->pport.im_port->shost->shost_gendev;
706
707 return scsi_add_host(shost, dev);
708}
709
710struct Scsi_Host * 704struct Scsi_Host *
711bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) 705bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
712{ 706{
@@ -725,7 +719,8 @@ bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
725void 719void
726bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) 720bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
727{ 721{
728 flush_workqueue(bfad->im->drv_workq); 722 if (!(im_port->flags & BFAD_PORT_DELETE))
723 flush_workqueue(bfad->im->drv_workq);
729 bfad_im_scsi_host_free(im_port->bfad, im_port); 724 bfad_im_scsi_host_free(im_port->bfad, im_port);
730 bfad_im_port_clean(im_port); 725 bfad_im_port_clean(im_port);
731 kfree(im_port); 726 kfree(im_port);
@@ -830,6 +825,13 @@ bfad_im_module_init(void)
830 if (!bfad_im_scsi_transport_template) 825 if (!bfad_im_scsi_transport_template)
831 return BFA_STATUS_ENOMEM; 826 return BFA_STATUS_ENOMEM;
832 827
828 bfad_im_scsi_vport_transport_template =
829 fc_attach_transport(&bfad_im_vport_fc_function_template);
830 if (!bfad_im_scsi_vport_transport_template) {
831 fc_release_transport(bfad_im_scsi_transport_template);
832 return BFA_STATUS_ENOMEM;
833 }
834
833 return BFA_STATUS_OK; 835 return BFA_STATUS_OK;
834} 836}
835 837
@@ -838,6 +840,8 @@ bfad_im_module_exit(void)
838{ 840{
839 if (bfad_im_scsi_transport_template) 841 if (bfad_im_scsi_transport_template)
840 fc_release_transport(bfad_im_scsi_transport_template); 842 fc_release_transport(bfad_im_scsi_transport_template);
843 if (bfad_im_scsi_vport_transport_template)
844 fc_release_transport(bfad_im_scsi_vport_transport_template);
841} 845}
842 846
843void 847void
@@ -938,6 +942,7 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
938 bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port))); 942 bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port)));
939 fc_host_port_name(host) = 943 fc_host_port_name(host) =
940 bfa_os_htonll((bfa_fcs_port_get_pwwn(port->fcs_port))); 944 bfa_os_htonll((bfa_fcs_port_get_pwwn(port->fcs_port)));
945 fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
941 946
942 fc_host_supported_classes(host) = FC_COS_CLASS3; 947 fc_host_supported_classes(host) = FC_COS_CLASS3;
943 948
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 85ab2da21321..973cab4d09c7 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -34,7 +34,7 @@ void bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port);
34void bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port); 34void bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port);
35void bfad_im_port_clean(struct bfad_im_port_s *im_port); 35void bfad_im_port_clean(struct bfad_im_port_s *im_port);
36int bfad_im_scsi_host_alloc(struct bfad_s *bfad, 36int bfad_im_scsi_host_alloc(struct bfad_s *bfad,
37 struct bfad_im_port_s *im_port); 37 struct bfad_im_port_s *im_port, struct device *dev);
38void bfad_im_scsi_host_free(struct bfad_s *bfad, 38void bfad_im_scsi_host_free(struct bfad_s *bfad,
39 struct bfad_im_port_s *im_port); 39 struct bfad_im_port_s *im_port);
40 40
@@ -64,9 +64,11 @@ struct bfad_im_port_s {
64 struct work_struct port_delete_work; 64 struct work_struct port_delete_work;
65 int idr_id; 65 int idr_id;
66 u16 cur_scsi_id; 66 u16 cur_scsi_id;
67 u16 flags;
67 struct list_head binding_list; 68 struct list_head binding_list;
68 struct Scsi_Host *shost; 69 struct Scsi_Host *shost;
69 struct list_head itnim_mapped_list; 70 struct list_head itnim_mapped_list;
71 struct fc_vport *fc_vport;
70}; 72};
71 73
72enum bfad_itnim_state { 74enum bfad_itnim_state {
@@ -140,6 +142,8 @@ void bfad_im_itnim_unmap(struct bfad_im_port_s *im_port,
140extern struct scsi_host_template bfad_im_scsi_host_template; 142extern struct scsi_host_template bfad_im_scsi_host_template;
141extern struct scsi_host_template bfad_im_vport_template; 143extern struct scsi_host_template bfad_im_vport_template;
142extern struct fc_function_template bfad_im_fc_function_template; 144extern struct fc_function_template bfad_im_fc_function_template;
145extern struct fc_function_template bfad_im_vport_fc_function_template;
143extern struct scsi_transport_template *bfad_im_scsi_transport_template; 146extern struct scsi_transport_template *bfad_im_scsi_transport_template;
147extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
144 148
145#endif 149#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 18352ff82101..3a66ca24c7bd 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -347,6 +347,7 @@ int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
347 347
348 login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn); 348 login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
349 login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn); 349 login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
350 login_wqe->flags = ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN;
350 351
351 login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma; 352 login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
352 login_wqe->resp_bd_list_addr_hi = 353 login_wqe->resp_bd_list_addr_hi =
@@ -356,7 +357,6 @@ int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
356 (bnx2i_conn->gen_pdu.resp_buf_size << 357 (bnx2i_conn->gen_pdu.resp_buf_size <<
357 ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT)); 358 ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
358 login_wqe->resp_buffer = dword; 359 login_wqe->resp_buffer = dword;
359 login_wqe->flags = 0;
360 login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma; 360 login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
361 login_wqe->bd_list_addr_hi = 361 login_wqe->bd_list_addr_hi =
362 (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32); 362 (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 5d9296c599f6..af6a00a600fb 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -17,8 +17,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
17static u32 adapter_count; 17static u32 adapter_count;
18 18
19#define DRV_MODULE_NAME "bnx2i" 19#define DRV_MODULE_NAME "bnx2i"
20#define DRV_MODULE_VERSION "2.1.0" 20#define DRV_MODULE_VERSION "2.1.1"
21#define DRV_MODULE_RELDATE "Dec 06, 2009" 21#define DRV_MODULE_RELDATE "Mar 24, 2010"
22 22
23static char version[] __devinitdata = 23static char version[] __devinitdata =
24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ 24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -26,7 +26,8 @@ static char version[] __devinitdata =
26 26
27 27
28MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>"); 28MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
29MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver"); 29MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711"
30 " iSCSI Driver");
30MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
31MODULE_VERSION(DRV_MODULE_VERSION); 32MODULE_VERSION(DRV_MODULE_VERSION);
32 33
@@ -289,6 +290,7 @@ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
289 int rc; 290 int rc;
290 291
291 mutex_lock(&bnx2i_dev_lock); 292 mutex_lock(&bnx2i_dev_lock);
293 hba->cnic = cnic;
292 rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba); 294 rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
293 if (!rc) { 295 if (!rc) {
294 hba->age++; 296 hba->age++;
@@ -335,8 +337,7 @@ void bnx2i_ulp_init(struct cnic_dev *dev)
335 if (bnx2i_init_one(hba, dev)) { 337 if (bnx2i_init_one(hba, dev)) {
336 printk(KERN_ERR "bnx2i - hba %p init failed\n", hba); 338 printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
337 bnx2i_free_hba(hba); 339 bnx2i_free_hba(hba);
338 } else 340 }
339 hba->cnic = dev;
340} 341}
341 342
342 343
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c
index d0ab23a58355..685af3698518 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_init.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_init.c
@@ -104,8 +104,10 @@ static int __init cxgb3i_init_module(void)
104 return err; 104 return err;
105 105
106 err = cxgb3i_pdu_init(); 106 err = cxgb3i_pdu_init();
107 if (err < 0) 107 if (err < 0) {
108 cxgb3i_iscsi_cleanup();
108 return err; 109 return err;
110 }
109 111
110 cxgb3_register_client(&t3c_client); 112 cxgb3_register_client(&t3c_client);
111 113
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index e8a0bc3efd49..6faf472f7537 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -285,13 +285,11 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
285 switch (cmd) { 285 switch (cmd) {
286 case MODE_SELECT: 286 case MODE_SELECT:
287 len = sizeof(short_trespass); 287 len = sizeof(short_trespass);
288 rq->cmd_flags |= REQ_RW;
289 rq->cmd[1] = 0x10; 288 rq->cmd[1] = 0x10;
290 rq->cmd[4] = len; 289 rq->cmd[4] = len;
291 break; 290 break;
292 case MODE_SELECT_10: 291 case MODE_SELECT_10:
293 len = sizeof(long_trespass); 292 len = sizeof(long_trespass);
294 rq->cmd_flags |= REQ_RW;
295 rq->cmd[1] = 0x10; 293 rq->cmd[1] = 0x10;
296 rq->cmd[8] = len; 294 rq->cmd[8] = len;
297 break; 295 break;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index f01b9b44e8aa..ba75a98c960c 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -74,6 +74,7 @@ static int fcoe_rcv(struct sk_buff *, struct net_device *,
74static int fcoe_percpu_receive_thread(void *); 74static int fcoe_percpu_receive_thread(void *);
75static void fcoe_clean_pending_queue(struct fc_lport *); 75static void fcoe_clean_pending_queue(struct fc_lport *);
76static void fcoe_percpu_clean(struct fc_lport *); 76static void fcoe_percpu_clean(struct fc_lport *);
77static int fcoe_link_speed_update(struct fc_lport *);
77static int fcoe_link_ok(struct fc_lport *); 78static int fcoe_link_ok(struct fc_lport *);
78 79
79static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); 80static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
@@ -146,6 +147,7 @@ static int fcoe_vport_destroy(struct fc_vport *);
146static int fcoe_vport_create(struct fc_vport *, bool disabled); 147static int fcoe_vport_create(struct fc_vport *, bool disabled);
147static int fcoe_vport_disable(struct fc_vport *, bool disable); 148static int fcoe_vport_disable(struct fc_vport *, bool disable);
148static void fcoe_set_vport_symbolic_name(struct fc_vport *); 149static void fcoe_set_vport_symbolic_name(struct fc_vport *);
150static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
149 151
150static struct libfc_function_template fcoe_libfc_fcn_templ = { 152static struct libfc_function_template fcoe_libfc_fcn_templ = {
151 .frame_send = fcoe_xmit, 153 .frame_send = fcoe_xmit,
@@ -153,6 +155,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
153 .ddp_done = fcoe_ddp_done, 155 .ddp_done = fcoe_ddp_done,
154 .elsct_send = fcoe_elsct_send, 156 .elsct_send = fcoe_elsct_send,
155 .get_lesb = fcoe_get_lesb, 157 .get_lesb = fcoe_get_lesb,
158 .lport_set_port_id = fcoe_set_port_id,
156}; 159};
157 160
158struct fc_function_template fcoe_transport_function = { 161struct fc_function_template fcoe_transport_function = {
@@ -629,6 +632,8 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
629 port->fcoe_pending_queue_active = 0; 632 port->fcoe_pending_queue_active = 0;
630 setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport); 633 setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport);
631 634
635 fcoe_link_speed_update(lport);
636
632 if (!lport->vport) { 637 if (!lport->vport) {
633 /* 638 /*
634 * Use NAA 1&2 (FC-FS Rev. 2.0, Sec. 15) to generate WWNN/WWPN: 639 * Use NAA 1&2 (FC-FS Rev. 2.0, Sec. 15) to generate WWNN/WWPN:
@@ -653,15 +658,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
653/** 658/**
654 * fcoe_shost_config() - Set up the SCSI host associated with a local port 659 * fcoe_shost_config() - Set up the SCSI host associated with a local port
655 * @lport: The local port 660 * @lport: The local port
656 * @shost: The SCSI host to associate with the local port
657 * @dev: The device associated with the SCSI host 661 * @dev: The device associated with the SCSI host
658 * 662 *
659 * Must be called after fcoe_lport_config() and fcoe_netdev_config() 663 * Must be called after fcoe_lport_config() and fcoe_netdev_config()
660 * 664 *
661 * Returns: 0 for success 665 * Returns: 0 for success
662 */ 666 */
663static int fcoe_shost_config(struct fc_lport *lport, struct Scsi_Host *shost, 667static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
664 struct device *dev)
665{ 668{
666 int rc = 0; 669 int rc = 0;
667 670
@@ -669,6 +672,8 @@ static int fcoe_shost_config(struct fc_lport *lport, struct Scsi_Host *shost,
669 lport->host->max_lun = FCOE_MAX_LUN; 672 lport->host->max_lun = FCOE_MAX_LUN;
670 lport->host->max_id = FCOE_MAX_FCP_TARGET; 673 lport->host->max_id = FCOE_MAX_FCP_TARGET;
671 lport->host->max_channel = 0; 674 lport->host->max_channel = 0;
675 lport->host->max_cmd_len = FCOE_MAX_CMD_LEN;
676
672 if (lport->vport) 677 if (lport->vport)
673 lport->host->transportt = fcoe_vport_transport_template; 678 lport->host->transportt = fcoe_vport_transport_template;
674 else 679 else
@@ -796,6 +801,12 @@ skip_oem:
796/** 801/**
797 * fcoe_if_destroy() - Tear down a SW FCoE instance 802 * fcoe_if_destroy() - Tear down a SW FCoE instance
798 * @lport: The local port to be destroyed 803 * @lport: The local port to be destroyed
804 *
805 * Locking: must be called with the RTNL mutex held and RTNL mutex
806 * needed to be dropped by this function since not dropping RTNL
807 * would cause circular locking warning on synchronous fip worker
808 * cancelling thru fcoe_interface_put invoked by this function.
809 *
799 */ 810 */
800static void fcoe_if_destroy(struct fc_lport *lport) 811static void fcoe_if_destroy(struct fc_lport *lport)
801{ 812{
@@ -818,7 +829,6 @@ static void fcoe_if_destroy(struct fc_lport *lport)
818 /* Free existing transmit skbs */ 829 /* Free existing transmit skbs */
819 fcoe_clean_pending_queue(lport); 830 fcoe_clean_pending_queue(lport);
820 831
821 rtnl_lock();
822 if (!is_zero_ether_addr(port->data_src_addr)) 832 if (!is_zero_ether_addr(port->data_src_addr))
823 dev_unicast_delete(netdev, port->data_src_addr); 833 dev_unicast_delete(netdev, port->data_src_addr);
824 rtnl_unlock(); 834 rtnl_unlock();
@@ -841,6 +851,7 @@ static void fcoe_if_destroy(struct fc_lport *lport)
841 851
842 /* Release the Scsi_Host */ 852 /* Release the Scsi_Host */
843 scsi_host_put(lport->host); 853 scsi_host_put(lport->host);
854 module_put(THIS_MODULE);
844} 855}
845 856
846/** 857/**
@@ -897,7 +908,6 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
897 struct net_device *netdev = fcoe->netdev; 908 struct net_device *netdev = fcoe->netdev;
898 struct fc_lport *lport = NULL; 909 struct fc_lport *lport = NULL;
899 struct fcoe_port *port; 910 struct fcoe_port *port;
900 struct Scsi_Host *shost;
901 int rc; 911 int rc;
902 /* 912 /*
903 * parent is only a vport if npiv is 1, 913 * parent is only a vport if npiv is 1,
@@ -919,7 +929,6 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
919 rc = -ENOMEM; 929 rc = -ENOMEM;
920 goto out; 930 goto out;
921 } 931 }
922 shost = lport->host;
923 port = lport_priv(lport); 932 port = lport_priv(lport);
924 port->lport = lport; 933 port->lport = lport;
925 port->fcoe = fcoe; 934 port->fcoe = fcoe;
@@ -934,7 +943,8 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
934 } 943 }
935 944
936 if (npiv) { 945 if (npiv) {
937 FCOE_NETDEV_DBG(netdev, "Setting vport names, 0x%llX 0x%llX\n", 946 FCOE_NETDEV_DBG(netdev, "Setting vport names, "
947 "%16.16llx %16.16llx\n",
938 vport->node_name, vport->port_name); 948 vport->node_name, vport->port_name);
939 fc_set_wwnn(lport, vport->node_name); 949 fc_set_wwnn(lport, vport->node_name);
940 fc_set_wwpn(lport, vport->port_name); 950 fc_set_wwpn(lport, vport->port_name);
@@ -949,7 +959,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
949 } 959 }
950 960
951 /* configure lport scsi host properties */ 961 /* configure lport scsi host properties */
952 rc = fcoe_shost_config(lport, shost, parent); 962 rc = fcoe_shost_config(lport, parent);
953 if (rc) { 963 if (rc) {
954 FCOE_NETDEV_DBG(netdev, "Could not configure shost for the " 964 FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
955 "interface\n"); 965 "interface\n");
@@ -1073,7 +1083,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
1073 struct sk_buff *skb; 1083 struct sk_buff *skb;
1074#ifdef CONFIG_SMP 1084#ifdef CONFIG_SMP
1075 struct fcoe_percpu_s *p0; 1085 struct fcoe_percpu_s *p0;
1076 unsigned targ_cpu = smp_processor_id(); 1086 unsigned targ_cpu = get_cpu();
1077#endif /* CONFIG_SMP */ 1087#endif /* CONFIG_SMP */
1078 1088
1079 FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); 1089 FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
@@ -1129,6 +1139,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
1129 kfree_skb(skb); 1139 kfree_skb(skb);
1130 spin_unlock_bh(&p->fcoe_rx_list.lock); 1140 spin_unlock_bh(&p->fcoe_rx_list.lock);
1131 } 1141 }
1142 put_cpu();
1132#else 1143#else
1133 /* 1144 /*
1134 * This a non-SMP scenario where the singular Rx thread is 1145 * This a non-SMP scenario where the singular Rx thread is
@@ -1297,8 +1308,8 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1297 1308
1298 return 0; 1309 return 0;
1299err: 1310err:
1300 fc_lport_get_stats(lport)->ErrorFrames++; 1311 per_cpu_ptr(lport->dev_stats, get_cpu())->ErrorFrames++;
1301 1312 put_cpu();
1302err2: 1313err2:
1303 kfree_skb(skb); 1314 kfree_skb(skb);
1304 return -1; 1315 return -1;
@@ -1444,7 +1455,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1444 return 0; 1455 return 0;
1445 } 1456 }
1446 1457
1447 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && 1458 if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
1448 fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb)) 1459 fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
1449 return 0; 1460 return 0;
1450 1461
@@ -1527,9 +1538,10 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1527 skb_shinfo(skb)->gso_size = 0; 1538 skb_shinfo(skb)->gso_size = 0;
1528 } 1539 }
1529 /* update tx stats: regardless if LLD fails */ 1540 /* update tx stats: regardless if LLD fails */
1530 stats = fc_lport_get_stats(lport); 1541 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1531 stats->TxFrames++; 1542 stats->TxFrames++;
1532 stats->TxWords += wlen; 1543 stats->TxWords += wlen;
1544 put_cpu();
1533 1545
1534 /* send down to lld */ 1546 /* send down to lld */
1535 fr_dev(fp) = lport; 1547 fr_dev(fp) = lport;
@@ -1563,7 +1575,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
1563 struct fc_frame_header *fh; 1575 struct fc_frame_header *fh;
1564 struct fcoe_crc_eof crc_eof; 1576 struct fcoe_crc_eof crc_eof;
1565 struct fc_frame *fp; 1577 struct fc_frame *fp;
1566 u8 *mac = NULL;
1567 struct fcoe_port *port; 1578 struct fcoe_port *port;
1568 struct fcoe_hdr *hp; 1579 struct fcoe_hdr *hp;
1569 1580
@@ -1583,13 +1594,9 @@ static void fcoe_recv_frame(struct sk_buff *skb)
1583 skb_end_pointer(skb), skb->csum, 1594 skb_end_pointer(skb), skb->csum,
1584 skb->dev ? skb->dev->name : "<NULL>"); 1595 skb->dev ? skb->dev->name : "<NULL>");
1585 1596
1586 /*
1587 * Save source MAC address before discarding header.
1588 */
1589 port = lport_priv(lport); 1597 port = lport_priv(lport);
1590 if (skb_is_nonlinear(skb)) 1598 if (skb_is_nonlinear(skb))
1591 skb_linearize(skb); /* not ideal */ 1599 skb_linearize(skb); /* not ideal */
1592 mac = eth_hdr(skb)->h_source;
1593 1600
1594 /* 1601 /*
1595 * Frame length checks and setting up the header pointers 1602 * Frame length checks and setting up the header pointers
@@ -1598,7 +1605,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
1598 hp = (struct fcoe_hdr *) skb_network_header(skb); 1605 hp = (struct fcoe_hdr *) skb_network_header(skb);
1599 fh = (struct fc_frame_header *) skb_transport_header(skb); 1606 fh = (struct fc_frame_header *) skb_transport_header(skb);
1600 1607
1601 stats = fc_lport_get_stats(lport); 1608 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1602 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { 1609 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
1603 if (stats->ErrorFrames < 5) 1610 if (stats->ErrorFrames < 5)
1604 printk(KERN_WARNING "fcoe: FCoE version " 1611 printk(KERN_WARNING "fcoe: FCoE version "
@@ -1607,9 +1614,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
1607 "initiator supports version " 1614 "initiator supports version "
1608 "%x\n", FC_FCOE_DECAPS_VER(hp), 1615 "%x\n", FC_FCOE_DECAPS_VER(hp),
1609 FC_FCOE_VER); 1616 FC_FCOE_VER);
1610 stats->ErrorFrames++; 1617 goto drop;
1611 kfree_skb(skb);
1612 return;
1613 } 1618 }
1614 1619
1615 skb_pull(skb, sizeof(struct fcoe_hdr)); 1620 skb_pull(skb, sizeof(struct fcoe_hdr));
@@ -1624,16 +1629,12 @@ static void fcoe_recv_frame(struct sk_buff *skb)
1624 fr_sof(fp) = hp->fcoe_sof; 1629 fr_sof(fp) = hp->fcoe_sof;
1625 1630
1626 /* Copy out the CRC and EOF trailer for access */ 1631 /* Copy out the CRC and EOF trailer for access */
1627 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { 1632 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof)))
1628 kfree_skb(skb); 1633 goto drop;
1629 return;
1630 }
1631 fr_eof(fp) = crc_eof.fcoe_eof; 1634 fr_eof(fp) = crc_eof.fcoe_eof;
1632 fr_crc(fp) = crc_eof.fcoe_crc32; 1635 fr_crc(fp) = crc_eof.fcoe_crc32;
1633 if (pskb_trim(skb, fr_len)) { 1636 if (pskb_trim(skb, fr_len))
1634 kfree_skb(skb); 1637 goto drop;
1635 return;
1636 }
1637 1638
1638 /* 1639 /*
1639 * We only check CRC if no offload is available and if it is 1640 * We only check CRC if no offload is available and if it is
@@ -1647,25 +1648,27 @@ static void fcoe_recv_frame(struct sk_buff *skb)
1647 fr_flags(fp) |= FCPHF_CRC_UNCHECKED; 1648 fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
1648 1649
1649 fh = fc_frame_header_get(fp); 1650 fh = fc_frame_header_get(fp);
1650 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && 1651 if ((fh->fh_r_ctl != FC_RCTL_DD_SOL_DATA ||
1651 fh->fh_type == FC_TYPE_FCP) { 1652 fh->fh_type != FC_TYPE_FCP) &&
1652 fc_exch_recv(lport, fp); 1653 (fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
1653 return;
1654 }
1655 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
1656 if (le32_to_cpu(fr_crc(fp)) != 1654 if (le32_to_cpu(fr_crc(fp)) !=
1657 ~crc32(~0, skb->data, fr_len)) { 1655 ~crc32(~0, skb->data, fr_len)) {
1658 if (stats->InvalidCRCCount < 5) 1656 if (stats->InvalidCRCCount < 5)
1659 printk(KERN_WARNING "fcoe: dropping " 1657 printk(KERN_WARNING "fcoe: dropping "
1660 "frame with CRC error\n"); 1658 "frame with CRC error\n");
1661 stats->InvalidCRCCount++; 1659 stats->InvalidCRCCount++;
1662 stats->ErrorFrames++; 1660 goto drop;
1663 fc_frame_free(fp);
1664 return;
1665 } 1661 }
1666 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; 1662 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1667 } 1663 }
1664 put_cpu();
1668 fc_exch_recv(lport, fp); 1665 fc_exch_recv(lport, fp);
1666 return;
1667
1668drop:
1669 stats->ErrorFrames++;
1670 put_cpu();
1671 kfree_skb(skb);
1669} 1672}
1670 1673
1671/** 1674/**
@@ -1835,11 +1838,15 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1835 FCOE_NETDEV_DBG(netdev, "Unknown event %ld " 1838 FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
1836 "from netdev netlink\n", event); 1839 "from netdev netlink\n", event);
1837 } 1840 }
1841
1842 fcoe_link_speed_update(lport);
1843
1838 if (link_possible && !fcoe_link_ok(lport)) 1844 if (link_possible && !fcoe_link_ok(lport))
1839 fcoe_ctlr_link_up(&fcoe->ctlr); 1845 fcoe_ctlr_link_up(&fcoe->ctlr);
1840 else if (fcoe_ctlr_link_down(&fcoe->ctlr)) { 1846 else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
1841 stats = fc_lport_get_stats(lport); 1847 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1842 stats->LinkFailureCount++; 1848 stats->LinkFailureCount++;
1849 put_cpu();
1843 fcoe_clean_pending_queue(lport); 1850 fcoe_clean_pending_queue(lport);
1844 } 1851 }
1845out: 1852out:
@@ -1901,13 +1908,19 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp)
1901 goto out_nodev; 1908 goto out_nodev;
1902 } 1909 }
1903 1910
1904 rtnl_lock(); 1911 if (!rtnl_trylock()) {
1912 dev_put(netdev);
1913 mutex_unlock(&fcoe_config_mutex);
1914 return restart_syscall();
1915 }
1916
1905 fcoe = fcoe_hostlist_lookup_port(netdev); 1917 fcoe = fcoe_hostlist_lookup_port(netdev);
1906 rtnl_unlock(); 1918 rtnl_unlock();
1907 1919
1908 if (fcoe) 1920 if (fcoe) {
1909 fc_fabric_logoff(fcoe->ctlr.lp); 1921 fc_fabric_logoff(fcoe->ctlr.lp);
1910 else 1922 fcoe_ctlr_link_down(&fcoe->ctlr);
1923 } else
1911 rc = -ENODEV; 1924 rc = -ENODEV;
1912 1925
1913 dev_put(netdev); 1926 dev_put(netdev);
@@ -1950,13 +1963,20 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp)
1950 goto out_nodev; 1963 goto out_nodev;
1951 } 1964 }
1952 1965
1953 rtnl_lock(); 1966 if (!rtnl_trylock()) {
1967 dev_put(netdev);
1968 mutex_unlock(&fcoe_config_mutex);
1969 return restart_syscall();
1970 }
1971
1954 fcoe = fcoe_hostlist_lookup_port(netdev); 1972 fcoe = fcoe_hostlist_lookup_port(netdev);
1955 rtnl_unlock(); 1973 rtnl_unlock();
1956 1974
1957 if (fcoe) 1975 if (fcoe) {
1976 if (!fcoe_link_ok(fcoe->ctlr.lp))
1977 fcoe_ctlr_link_up(&fcoe->ctlr);
1958 rc = fc_fabric_login(fcoe->ctlr.lp); 1978 rc = fc_fabric_login(fcoe->ctlr.lp);
1959 else 1979 } else
1960 rc = -ENODEV; 1980 rc = -ENODEV;
1961 1981
1962 dev_put(netdev); 1982 dev_put(netdev);
@@ -1999,7 +2019,12 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
1999 goto out_nodev; 2019 goto out_nodev;
2000 } 2020 }
2001 2021
2002 rtnl_lock(); 2022 if (!rtnl_trylock()) {
2023 dev_put(netdev);
2024 mutex_unlock(&fcoe_config_mutex);
2025 return restart_syscall();
2026 }
2027
2003 fcoe = fcoe_hostlist_lookup_port(netdev); 2028 fcoe = fcoe_hostlist_lookup_port(netdev);
2004 if (!fcoe) { 2029 if (!fcoe) {
2005 rtnl_unlock(); 2030 rtnl_unlock();
@@ -2008,9 +2033,8 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
2008 } 2033 }
2009 list_del(&fcoe->list); 2034 list_del(&fcoe->list);
2010 fcoe_interface_cleanup(fcoe); 2035 fcoe_interface_cleanup(fcoe);
2011 rtnl_unlock(); 2036 /* RTNL mutex is dropped by fcoe_if_destroy */
2012 fcoe_if_destroy(fcoe->ctlr.lp); 2037 fcoe_if_destroy(fcoe->ctlr.lp);
2013 module_put(THIS_MODULE);
2014 2038
2015out_putdev: 2039out_putdev:
2016 dev_put(netdev); 2040 dev_put(netdev);
@@ -2029,6 +2053,8 @@ static void fcoe_destroy_work(struct work_struct *work)
2029 2053
2030 port = container_of(work, struct fcoe_port, destroy_work); 2054 port = container_of(work, struct fcoe_port, destroy_work);
2031 mutex_lock(&fcoe_config_mutex); 2055 mutex_lock(&fcoe_config_mutex);
2056 rtnl_lock();
2057 /* RTNL mutex is dropped by fcoe_if_destroy */
2032 fcoe_if_destroy(port->lport); 2058 fcoe_if_destroy(port->lport);
2033 mutex_unlock(&fcoe_config_mutex); 2059 mutex_unlock(&fcoe_config_mutex);
2034} 2060}
@@ -2050,6 +2076,12 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
2050 struct net_device *netdev; 2076 struct net_device *netdev;
2051 2077
2052 mutex_lock(&fcoe_config_mutex); 2078 mutex_lock(&fcoe_config_mutex);
2079
2080 if (!rtnl_trylock()) {
2081 mutex_unlock(&fcoe_config_mutex);
2082 return restart_syscall();
2083 }
2084
2053#ifdef CONFIG_FCOE_MODULE 2085#ifdef CONFIG_FCOE_MODULE
2054 /* 2086 /*
2055 * Make sure the module has been initialized, and is not about to be 2087 * Make sure the module has been initialized, and is not about to be
@@ -2058,7 +2090,7 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
2058 */ 2090 */
2059 if (THIS_MODULE->state != MODULE_STATE_LIVE) { 2091 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
2060 rc = -ENODEV; 2092 rc = -ENODEV;
2061 goto out_nodev; 2093 goto out_nomod;
2062 } 2094 }
2063#endif 2095#endif
2064 2096
@@ -2067,7 +2099,6 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
2067 goto out_nomod; 2099 goto out_nomod;
2068 } 2100 }
2069 2101
2070 rtnl_lock();
2071 netdev = fcoe_if_to_netdev(buffer); 2102 netdev = fcoe_if_to_netdev(buffer);
2072 if (!netdev) { 2103 if (!netdev) {
2073 rc = -ENODEV; 2104 rc = -ENODEV;
@@ -2122,35 +2153,27 @@ out_free:
2122out_putdev: 2153out_putdev:
2123 dev_put(netdev); 2154 dev_put(netdev);
2124out_nodev: 2155out_nodev:
2125 rtnl_unlock();
2126 module_put(THIS_MODULE); 2156 module_put(THIS_MODULE);
2127out_nomod: 2157out_nomod:
2158 rtnl_unlock();
2128 mutex_unlock(&fcoe_config_mutex); 2159 mutex_unlock(&fcoe_config_mutex);
2129 return rc; 2160 return rc;
2130} 2161}
2131 2162
2132/** 2163/**
2133 * fcoe_link_ok() - Check if the link is OK for a local port 2164 * fcoe_link_speed_update() - Update the supported and actual link speeds
2134 * @lport: The local port to check link on 2165 * @lport: The local port to update speeds for
2135 *
2136 * Any permanently-disqualifying conditions have been previously checked.
2137 * This also updates the speed setting, which may change with link for 100/1000.
2138 *
2139 * This function should probably be checking for PAUSE support at some point
2140 * in the future. Currently Per-priority-pause is not determinable using
2141 * ethtool, so we shouldn't be restrictive until that problem is resolved.
2142 *
2143 * Returns: 0 if link is OK for use by FCoE.
2144 * 2166 *
2167 * Returns: 0 if the ethtool query was successful
2168 * -1 if the ethtool query failed
2145 */ 2169 */
2146int fcoe_link_ok(struct fc_lport *lport) 2170int fcoe_link_speed_update(struct fc_lport *lport)
2147{ 2171{
2148 struct fcoe_port *port = lport_priv(lport); 2172 struct fcoe_port *port = lport_priv(lport);
2149 struct net_device *netdev = port->fcoe->netdev; 2173 struct net_device *netdev = port->fcoe->netdev;
2150 struct ethtool_cmd ecmd = { ETHTOOL_GSET }; 2174 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
2151 2175
2152 if ((netdev->flags & IFF_UP) && netif_carrier_ok(netdev) && 2176 if (!dev_ethtool_get_settings(netdev, &ecmd)) {
2153 (!dev_ethtool_get_settings(netdev, &ecmd))) {
2154 lport->link_supported_speeds &= 2177 lport->link_supported_speeds &=
2155 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); 2178 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
2156 if (ecmd.supported & (SUPPORTED_1000baseT_Half | 2179 if (ecmd.supported & (SUPPORTED_1000baseT_Half |
@@ -2170,6 +2193,23 @@ int fcoe_link_ok(struct fc_lport *lport)
2170} 2193}
2171 2194
2172/** 2195/**
2196 * fcoe_link_ok() - Check if the link is OK for a local port
2197 * @lport: The local port to check link on
2198 *
2199 * Returns: 0 if link is UP and OK, -1 if not
2200 *
2201 */
2202int fcoe_link_ok(struct fc_lport *lport)
2203{
2204 struct fcoe_port *port = lport_priv(lport);
2205 struct net_device *netdev = port->fcoe->netdev;
2206
2207 if (netif_oper_up(netdev))
2208 return 0;
2209 return -1;
2210}
2211
2212/**
2173 * fcoe_percpu_clean() - Clear all pending skbs for an local port 2213 * fcoe_percpu_clean() - Clear all pending skbs for an local port
2174 * @lport: The local port whose skbs are to be cleared 2214 * @lport: The local port whose skbs are to be cleared
2175 * 2215 *
@@ -2631,3 +2671,25 @@ static void fcoe_get_lesb(struct fc_lport *lport,
2631 lesb->lesb_miss_fka = htonl(mdac); 2671 lesb->lesb_miss_fka = htonl(mdac);
2632 lesb->lesb_fcs_error = htonl(dev_get_stats(netdev)->rx_crc_errors); 2672 lesb->lesb_fcs_error = htonl(dev_get_stats(netdev)->rx_crc_errors);
2633} 2673}
2674
2675/**
2676 * fcoe_set_port_id() - Callback from libfc when Port_ID is set.
2677 * @lport: the local port
2678 * @port_id: the port ID
2679 * @fp: the received frame, if any, that caused the port_id to be set.
2680 *
2681 * This routine handles the case where we received a FLOGI and are
2682 * entering point-to-point mode. We need to call fcoe_ctlr_recv_flogi()
2683 * so it can set the non-mapped mode and gateway address.
2684 *
2685 * The FLOGI LS_ACC is handled by fcoe_flogi_resp().
2686 */
2687static void fcoe_set_port_id(struct fc_lport *lport,
2688 u32 port_id, struct fc_frame *fp)
2689{
2690 struct fcoe_port *port = lport_priv(lport);
2691 struct fcoe_interface *fcoe = port->fcoe;
2692
2693 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
2694 fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
2695}
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 3440da48d169..50aaa4bcfc50 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -51,7 +51,7 @@ MODULE_LICENSE("GPL v2");
51#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */ 51#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */
52 52
53static void fcoe_ctlr_timeout(unsigned long); 53static void fcoe_ctlr_timeout(unsigned long);
54static void fcoe_ctlr_link_work(struct work_struct *); 54static void fcoe_ctlr_timer_work(struct work_struct *);
55static void fcoe_ctlr_recv_work(struct work_struct *); 55static void fcoe_ctlr_recv_work(struct work_struct *);
56 56
57static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; 57static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
@@ -116,7 +116,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip)
116 spin_lock_init(&fip->lock); 116 spin_lock_init(&fip->lock);
117 fip->flogi_oxid = FC_XID_UNKNOWN; 117 fip->flogi_oxid = FC_XID_UNKNOWN;
118 setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip); 118 setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
119 INIT_WORK(&fip->link_work, fcoe_ctlr_link_work); 119 INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work);
120 INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work); 120 INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work);
121 skb_queue_head_init(&fip->fip_recv_list); 121 skb_queue_head_init(&fip->fip_recv_list);
122} 122}
@@ -164,7 +164,7 @@ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
164 fcoe_ctlr_reset_fcfs(fip); 164 fcoe_ctlr_reset_fcfs(fip);
165 spin_unlock_bh(&fip->lock); 165 spin_unlock_bh(&fip->lock);
166 del_timer_sync(&fip->timer); 166 del_timer_sync(&fip->timer);
167 cancel_work_sync(&fip->link_work); 167 cancel_work_sync(&fip->timer_work);
168} 168}
169EXPORT_SYMBOL(fcoe_ctlr_destroy); 169EXPORT_SYMBOL(fcoe_ctlr_destroy);
170 170
@@ -257,14 +257,10 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
257{ 257{
258 spin_lock_bh(&fip->lock); 258 spin_lock_bh(&fip->lock);
259 if (fip->state == FIP_ST_NON_FIP || fip->state == FIP_ST_AUTO) { 259 if (fip->state == FIP_ST_NON_FIP || fip->state == FIP_ST_AUTO) {
260 fip->last_link = 1;
261 fip->link = 1;
262 spin_unlock_bh(&fip->lock); 260 spin_unlock_bh(&fip->lock);
263 fc_linkup(fip->lp); 261 fc_linkup(fip->lp);
264 } else if (fip->state == FIP_ST_LINK_WAIT) { 262 } else if (fip->state == FIP_ST_LINK_WAIT) {
265 fip->state = fip->mode; 263 fip->state = fip->mode;
266 fip->last_link = 1;
267 fip->link = 1;
268 spin_unlock_bh(&fip->lock); 264 spin_unlock_bh(&fip->lock);
269 if (fip->state == FIP_ST_AUTO) 265 if (fip->state == FIP_ST_AUTO)
270 LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n"); 266 LIBFCOE_FIP_DBG(fip, "%s", "setting AUTO mode.\n");
@@ -306,9 +302,7 @@ int fcoe_ctlr_link_down(struct fcoe_ctlr *fip)
306 LIBFCOE_FIP_DBG(fip, "link down.\n"); 302 LIBFCOE_FIP_DBG(fip, "link down.\n");
307 spin_lock_bh(&fip->lock); 303 spin_lock_bh(&fip->lock);
308 fcoe_ctlr_reset(fip); 304 fcoe_ctlr_reset(fip);
309 link_dropped = fip->link; 305 link_dropped = fip->state != FIP_ST_LINK_WAIT;
310 fip->link = 0;
311 fip->last_link = 0;
312 fip->state = FIP_ST_LINK_WAIT; 306 fip->state = FIP_ST_LINK_WAIT;
313 spin_unlock_bh(&fip->lock); 307 spin_unlock_bh(&fip->lock);
314 308
@@ -349,7 +343,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
349 343
350 fcf = fip->sel_fcf; 344 fcf = fip->sel_fcf;
351 lp = fip->lp; 345 lp = fip->lp;
352 if (!fcf || !fc_host_port_id(lp->host)) 346 if (!fcf || !lp->port_id)
353 return; 347 return;
354 348
355 len = sizeof(*kal) + ports * sizeof(*vn); 349 len = sizeof(*kal) + ports * sizeof(*vn);
@@ -380,8 +374,8 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
380 vn->fd_desc.fip_dtype = FIP_DT_VN_ID; 374 vn->fd_desc.fip_dtype = FIP_DT_VN_ID;
381 vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW; 375 vn->fd_desc.fip_dlen = sizeof(*vn) / FIP_BPW;
382 memcpy(vn->fd_mac, fip->get_src_addr(lport), ETH_ALEN); 376 memcpy(vn->fd_mac, fip->get_src_addr(lport), ETH_ALEN);
383 hton24(vn->fd_fc_id, fc_host_port_id(lp->host)); 377 hton24(vn->fd_fc_id, lport->port_id);
384 put_unaligned_be64(lp->wwpn, &vn->fd_wwpn); 378 put_unaligned_be64(lport->wwpn, &vn->fd_wwpn);
385 } 379 }
386 skb_put(skb, len); 380 skb_put(skb, len);
387 skb->protocol = htons(ETH_P_FIP); 381 skb->protocol = htons(ETH_P_FIP);
@@ -445,13 +439,18 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
445 cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW; 439 cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW;
446 440
447 mac = (struct fip_mac_desc *)skb_put(skb, sizeof(*mac)); 441 mac = (struct fip_mac_desc *)skb_put(skb, sizeof(*mac));
448 memset(mac, 0, sizeof(mac)); 442 memset(mac, 0, sizeof(*mac));
449 mac->fd_desc.fip_dtype = FIP_DT_MAC; 443 mac->fd_desc.fip_dtype = FIP_DT_MAC;
450 mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW; 444 mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW;
451 if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) 445 if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) {
452 memcpy(mac->fd_mac, fip->get_src_addr(lport), ETH_ALEN); 446 memcpy(mac->fd_mac, fip->get_src_addr(lport), ETH_ALEN);
453 else if (fip->spma) 447 } else if (fip_flags & FIP_FL_SPMA) {
448 LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with SPMA\n");
454 memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN); 449 memcpy(mac->fd_mac, fip->ctl_src_addr, ETH_ALEN);
450 } else {
451 LIBFCOE_FIP_DBG(fip, "FLOGI/FDISC sent with FPMA\n");
452 /* FPMA only FLOGI must leave the MAC desc set to all 0s */
453 }
455 454
456 skb->protocol = htons(ETH_P_FIP); 455 skb->protocol = htons(ETH_P_FIP);
457 skb_reset_mac_header(skb); 456 skb_reset_mac_header(skb);
@@ -556,7 +555,7 @@ EXPORT_SYMBOL(fcoe_ctlr_els_send);
556 * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller 555 * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller
557 * @fip: The FCoE controller to free FCFs on 556 * @fip: The FCoE controller to free FCFs on
558 * 557 *
559 * Called with lock held. 558 * Called with lock held and preemption disabled.
560 * 559 *
561 * An FCF is considered old if we have missed three advertisements. 560 * An FCF is considered old if we have missed three advertisements.
562 * That is, there have been no valid advertisement from it for three 561 * That is, there have been no valid advertisement from it for three
@@ -573,17 +572,20 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
573 struct fcoe_fcf *next; 572 struct fcoe_fcf *next;
574 unsigned long sel_time = 0; 573 unsigned long sel_time = 0;
575 unsigned long mda_time = 0; 574 unsigned long mda_time = 0;
575 struct fcoe_dev_stats *stats;
576 576
577 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { 577 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
578 mda_time = fcf->fka_period + (fcf->fka_period >> 1); 578 mda_time = fcf->fka_period + (fcf->fka_period >> 1);
579 if ((fip->sel_fcf == fcf) && 579 if ((fip->sel_fcf == fcf) &&
580 (time_after(jiffies, fcf->time + mda_time))) { 580 (time_after(jiffies, fcf->time + mda_time))) {
581 mod_timer(&fip->timer, jiffies + mda_time); 581 mod_timer(&fip->timer, jiffies + mda_time);
582 fc_lport_get_stats(fip->lp)->MissDiscAdvCount++; 582 stats = per_cpu_ptr(fip->lp->dev_stats,
583 smp_processor_id());
584 stats->MissDiscAdvCount++;
583 printk(KERN_INFO "libfcoe: host%d: Missing Discovery " 585 printk(KERN_INFO "libfcoe: host%d: Missing Discovery "
584 "Advertisement for fab %llx count %lld\n", 586 "Advertisement for fab %16.16llx count %lld\n",
585 fip->lp->host->host_no, fcf->fabric_name, 587 fip->lp->host->host_no, fcf->fabric_name,
586 fc_lport_get_stats(fip->lp)->MissDiscAdvCount); 588 stats->MissDiscAdvCount);
587 } 589 }
588 if (time_after(jiffies, fcf->time + fcf->fka_period * 3 + 590 if (time_after(jiffies, fcf->time + fcf->fka_period * 3 +
589 msecs_to_jiffies(FIP_FCF_FUZZ * 3))) { 591 msecs_to_jiffies(FIP_FCF_FUZZ * 3))) {
@@ -593,7 +595,9 @@ static void fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
593 WARN_ON(!fip->fcf_count); 595 WARN_ON(!fip->fcf_count);
594 fip->fcf_count--; 596 fip->fcf_count--;
595 kfree(fcf); 597 kfree(fcf);
596 fc_lport_get_stats(fip->lp)->VLinkFailureCount++; 598 stats = per_cpu_ptr(fip->lp->dev_stats,
599 smp_processor_id());
600 stats->VLinkFailureCount++;
597 } else if (fcoe_ctlr_mtu_valid(fcf) && 601 } else if (fcoe_ctlr_mtu_valid(fcf) &&
598 (!sel_time || time_before(sel_time, fcf->time))) { 602 (!sel_time || time_before(sel_time, fcf->time))) {
599 sel_time = fcf->time; 603 sel_time = fcf->time;
@@ -776,7 +780,8 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
776 mtu_valid = fcoe_ctlr_mtu_valid(fcf); 780 mtu_valid = fcoe_ctlr_mtu_valid(fcf);
777 fcf->time = jiffies; 781 fcf->time = jiffies;
778 if (!found) { 782 if (!found) {
779 LIBFCOE_FIP_DBG(fip, "New FCF for fab %llx map %x val %d\n", 783 LIBFCOE_FIP_DBG(fip, "New FCF for fab %16.16llx "
784 "map %x val %d\n",
780 fcf->fabric_name, fcf->fc_map, mtu_valid); 785 fcf->fabric_name, fcf->fc_map, mtu_valid);
781 } 786 }
782 787
@@ -906,9 +911,10 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
906 fr_eof(fp) = FC_EOF_T; 911 fr_eof(fp) = FC_EOF_T;
907 fr_dev(fp) = lport; 912 fr_dev(fp) = lport;
908 913
909 stats = fc_lport_get_stats(lport); 914 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
910 stats->RxFrames++; 915 stats->RxFrames++;
911 stats->RxWords += skb->len / FIP_BPW; 916 stats->RxWords += skb->len / FIP_BPW;
917 put_cpu();
912 918
913 fc_exch_recv(lport, fp); 919 fc_exch_recv(lport, fp);
914 return; 920 return;
@@ -942,9 +948,8 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
942 u32 desc_mask; 948 u32 desc_mask;
943 949
944 LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n"); 950 LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
945 if (!fcf) 951
946 return; 952 if (!fcf || !lport->port_id)
947 if (!fcf || !fc_host_port_id(lport->host))
948 return; 953 return;
949 954
950 /* 955 /*
@@ -982,8 +987,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
982 if (compare_ether_addr(vp->fd_mac, 987 if (compare_ether_addr(vp->fd_mac,
983 fip->get_src_addr(lport)) == 0 && 988 fip->get_src_addr(lport)) == 0 &&
984 get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn && 989 get_unaligned_be64(&vp->fd_wwpn) == lport->wwpn &&
985 ntoh24(vp->fd_fc_id) == 990 ntoh24(vp->fd_fc_id) == lport->port_id)
986 fc_host_port_id(lport->host))
987 desc_mask &= ~BIT(FIP_DT_VN_ID); 991 desc_mask &= ~BIT(FIP_DT_VN_ID);
988 break; 992 break;
989 default: 993 default:
@@ -1006,7 +1010,8 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1006 LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n"); 1010 LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n");
1007 1011
1008 spin_lock_bh(&fip->lock); 1012 spin_lock_bh(&fip->lock);
1009 fc_lport_get_stats(lport)->VLinkFailureCount++; 1013 per_cpu_ptr(lport->dev_stats,
1014 smp_processor_id())->VLinkFailureCount++;
1010 fcoe_ctlr_reset(fip); 1015 fcoe_ctlr_reset(fip);
1011 spin_unlock_bh(&fip->lock); 1016 spin_unlock_bh(&fip->lock);
1012 1017
@@ -1102,15 +1107,17 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
1102 struct fcoe_fcf *best = NULL; 1107 struct fcoe_fcf *best = NULL;
1103 1108
1104 list_for_each_entry(fcf, &fip->fcfs, list) { 1109 list_for_each_entry(fcf, &fip->fcfs, list) {
1105 LIBFCOE_FIP_DBG(fip, "consider FCF for fab %llx VFID %d map %x " 1110 LIBFCOE_FIP_DBG(fip, "consider FCF for fab %16.16llx "
1106 "val %d\n", fcf->fabric_name, fcf->vfid, 1111 "VFID %d map %x val %d\n",
1112 fcf->fabric_name, fcf->vfid,
1107 fcf->fc_map, fcoe_ctlr_mtu_valid(fcf)); 1113 fcf->fc_map, fcoe_ctlr_mtu_valid(fcf));
1108 if (!fcoe_ctlr_fcf_usable(fcf)) { 1114 if (!fcoe_ctlr_fcf_usable(fcf)) {
1109 LIBFCOE_FIP_DBG(fip, "FCF for fab %llx map %x %svalid " 1115 LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx "
1110 "%savailable\n", fcf->fabric_name, 1116 "map %x %svalid %savailable\n",
1111 fcf->fc_map, (fcf->flags & FIP_FL_SOL) 1117 fcf->fabric_name, fcf->fc_map,
1112 ? "" : "in", (fcf->flags & FIP_FL_AVAIL) 1118 (fcf->flags & FIP_FL_SOL) ? "" : "in",
1113 ? "" : "un"); 1119 (fcf->flags & FIP_FL_AVAIL) ?
1120 "" : "un");
1114 continue; 1121 continue;
1115 } 1122 }
1116 if (!best) { 1123 if (!best) {
@@ -1175,7 +1182,7 @@ static void fcoe_ctlr_timeout(unsigned long arg)
1175 "Starting FCF discovery.\n", 1182 "Starting FCF discovery.\n",
1176 fip->lp->host->host_no); 1183 fip->lp->host->host_no);
1177 fip->reset_req = 1; 1184 fip->reset_req = 1;
1178 schedule_work(&fip->link_work); 1185 schedule_work(&fip->timer_work);
1179 } 1186 }
1180 } 1187 }
1181 1188
@@ -1201,43 +1208,31 @@ static void fcoe_ctlr_timeout(unsigned long arg)
1201 mod_timer(&fip->timer, next_timer); 1208 mod_timer(&fip->timer, next_timer);
1202 } 1209 }
1203 if (fip->send_ctlr_ka || fip->send_port_ka) 1210 if (fip->send_ctlr_ka || fip->send_port_ka)
1204 schedule_work(&fip->link_work); 1211 schedule_work(&fip->timer_work);
1205 spin_unlock_bh(&fip->lock); 1212 spin_unlock_bh(&fip->lock);
1206} 1213}
1207 1214
1208/** 1215/**
1209 * fcoe_ctlr_link_work() - Worker thread function for link changes 1216 * fcoe_ctlr_timer_work() - Worker thread function for timer work
1210 * @work: Handle to a FCoE controller 1217 * @work: Handle to a FCoE controller
1211 * 1218 *
1212 * See if the link status has changed and if so, report it. 1219 * Sends keep-alives and resets which must not
1213 *
1214 * This is here because fc_linkup() and fc_linkdown() must not
1215 * be called from the timer directly, since they use a mutex. 1220 * be called from the timer directly, since they use a mutex.
1216 */ 1221 */
1217static void fcoe_ctlr_link_work(struct work_struct *work) 1222static void fcoe_ctlr_timer_work(struct work_struct *work)
1218{ 1223{
1219 struct fcoe_ctlr *fip; 1224 struct fcoe_ctlr *fip;
1220 struct fc_lport *vport; 1225 struct fc_lport *vport;
1221 u8 *mac; 1226 u8 *mac;
1222 int link;
1223 int last_link;
1224 int reset; 1227 int reset;
1225 1228
1226 fip = container_of(work, struct fcoe_ctlr, link_work); 1229 fip = container_of(work, struct fcoe_ctlr, timer_work);
1227 spin_lock_bh(&fip->lock); 1230 spin_lock_bh(&fip->lock);
1228 last_link = fip->last_link;
1229 link = fip->link;
1230 fip->last_link = link;
1231 reset = fip->reset_req; 1231 reset = fip->reset_req;
1232 fip->reset_req = 0; 1232 fip->reset_req = 0;
1233 spin_unlock_bh(&fip->lock); 1233 spin_unlock_bh(&fip->lock);
1234 1234
1235 if (last_link != link) { 1235 if (reset)
1236 if (link)
1237 fc_linkup(fip->lp);
1238 else
1239 fc_linkdown(fip->lp);
1240 } else if (reset && link)
1241 fc_lport_reset(fip->lp); 1236 fc_lport_reset(fip->lp);
1242 1237
1243 if (fip->send_ctlr_ka) { 1238 if (fip->send_ctlr_ka) {
@@ -1334,9 +1329,9 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport,
1334 if (fip->state == FIP_ST_AUTO || fip->state == FIP_ST_NON_FIP) { 1329 if (fip->state == FIP_ST_AUTO || fip->state == FIP_ST_NON_FIP) {
1335 memcpy(fip->dest_addr, sa, ETH_ALEN); 1330 memcpy(fip->dest_addr, sa, ETH_ALEN);
1336 fip->map_dest = 0; 1331 fip->map_dest = 0;
1337 if (fip->state == FIP_ST_NON_FIP) 1332 if (fip->state == FIP_ST_AUTO)
1338 LIBFCOE_FIP_DBG(fip, "received FLOGI REQ, " 1333 LIBFCOE_FIP_DBG(fip, "received non-FIP FLOGI. "
1339 "using non-FIP mode\n"); 1334 "Setting non-FIP mode\n");
1340 fip->state = FIP_ST_NON_FIP; 1335 fip->state = FIP_ST_NON_FIP;
1341 } 1336 }
1342 spin_unlock_bh(&fip->lock); 1337 spin_unlock_bh(&fip->lock);
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 3966c71d0095..19338e0ba2c5 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -36,7 +36,7 @@
36 36
37#define DRV_NAME "fnic" 37#define DRV_NAME "fnic"
38#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 38#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
39#define DRV_VERSION "1.4.0.98" 39#define DRV_VERSION "1.4.0.145"
40#define PFX DRV_NAME ": " 40#define PFX DRV_NAME ": "
41#define DFX DRV_NAME "%d: " 41#define DFX DRV_NAME "%d: "
42 42
@@ -45,7 +45,7 @@
45#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ 45#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
46#define FNIC_DFLT_QUEUE_DEPTH 32 46#define FNIC_DFLT_QUEUE_DEPTH 32
47#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ 47#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
48#define FNIC_MAX_CMD_LEN 16 /* Supported CDB length */ 48
49/* 49/*
50 * Tag bits used for special requests. 50 * Tag bits used for special requests.
51 */ 51 */
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 5259888fbfb1..2b48d79bad94 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -617,7 +617,7 @@ void fnic_flush_tx(struct fnic *fnic)
617 struct sk_buff *skb; 617 struct sk_buff *skb;
618 struct fc_frame *fp; 618 struct fc_frame *fp;
619 619
620 while ((skb = skb_dequeue(&fnic->frame_queue))) { 620 while ((skb = skb_dequeue(&fnic->tx_queue))) {
621 fp = (struct fc_frame *)skb; 621 fp = (struct fc_frame *)skb;
622 fnic_send_frame(fnic, fp); 622 fnic_send_frame(fnic, fp);
623 } 623 }
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 97b212570bcc..265e73d9cd6f 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -556,7 +556,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
556 } 556 }
557 host->max_lun = fnic->config.luns_per_tgt; 557 host->max_lun = fnic->config.luns_per_tgt;
558 host->max_id = FNIC_MAX_FCP_TARGET; 558 host->max_id = FNIC_MAX_FCP_TARGET;
559 host->max_cmd_len = FNIC_MAX_CMD_LEN; 559 host->max_cmd_len = FCOE_MAX_CMD_LEN;
560 560
561 fnic_get_res_counts(fnic); 561 fnic_get_res_counts(fnic);
562 562
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 35a4b3073ec3..a765fe7a55c3 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -3842,7 +3842,7 @@ int __init option_setup(char *str)
3842 3842
3843 TRACE2(("option_setup() str %s\n", str ? str:"NULL")); 3843 TRACE2(("option_setup() str %s\n", str ? str:"NULL"));
3844 3844
3845 while (cur && isdigit(*cur) && i <= MAXHA) { 3845 while (cur && isdigit(*cur) && i < MAXHA) {
3846 ints[i++] = simple_strtoul(cur, NULL, 0); 3846 ints[i++] = simple_strtoul(cur, NULL, 0);
3847 if ((cur = strchr(cur, ',')) != NULL) cur++; 3847 if ((cur = strchr(cur, ',')) != NULL) cur++;
3848 } 3848 }
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 48f406850c65..18b7102bb80e 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -19,332 +19,334 @@
19#include "wd33c93.h" 19#include "wd33c93.h"
20#include "gvp11.h" 20#include "gvp11.h"
21 21
22#include<linux/stat.h> 22#include <linux/stat.h>
23 23
24#define DMA(ptr) ((gvp11_scsiregs *)((ptr)->base))
25#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
26 24
27static irqreturn_t gvp11_intr (int irq, void *_instance) 25#define DMA(ptr) ((gvp11_scsiregs *)((ptr)->base))
26
27static irqreturn_t gvp11_intr(int irq, void *_instance)
28{ 28{
29 unsigned long flags; 29 unsigned long flags;
30 unsigned int status; 30 unsigned int status;
31 struct Scsi_Host *instance = (struct Scsi_Host *)_instance; 31 struct Scsi_Host *instance = (struct Scsi_Host *)_instance;
32 32
33 status = DMA(instance)->CNTR; 33 status = DMA(instance)->CNTR;
34 if (!(status & GVP11_DMAC_INT_PENDING)) 34 if (!(status & GVP11_DMAC_INT_PENDING))
35 return IRQ_NONE; 35 return IRQ_NONE;
36 36
37 spin_lock_irqsave(instance->host_lock, flags); 37 spin_lock_irqsave(instance->host_lock, flags);
38 wd33c93_intr(instance); 38 wd33c93_intr(instance);
39 spin_unlock_irqrestore(instance->host_lock, flags); 39 spin_unlock_irqrestore(instance->host_lock, flags);
40 return IRQ_HANDLED; 40 return IRQ_HANDLED;
41} 41}
42 42
43static int gvp11_xfer_mask = 0; 43static int gvp11_xfer_mask = 0;
44 44
45void gvp11_setup (char *str, int *ints) 45void gvp11_setup(char *str, int *ints)
46{ 46{
47 gvp11_xfer_mask = ints[1]; 47 gvp11_xfer_mask = ints[1];
48} 48}
49 49
50static int dma_setup(struct scsi_cmnd *cmd, int dir_in) 50static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
51{ 51{
52 unsigned short cntr = GVP11_DMAC_INT_ENABLE; 52 struct Scsi_Host *instance = cmd->device->host;
53 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 53 struct WD33C93_hostdata *hdata = shost_priv(instance);
54 int bank_mask; 54 unsigned short cntr = GVP11_DMAC_INT_ENABLE;
55 static int scsi_alloc_out_of_range = 0; 55 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
56 56 int bank_mask;
57 /* use bounce buffer if the physical address is bad */ 57 static int scsi_alloc_out_of_range = 0;
58 if (addr & HDATA(cmd->device->host)->dma_xfer_mask) 58
59 { 59 /* use bounce buffer if the physical address is bad */
60 HDATA(cmd->device->host)->dma_bounce_len = (cmd->SCp.this_residual + 511) 60 if (addr & hdata->dma_xfer_mask) {
61 & ~0x1ff; 61 hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
62 62
63 if( !scsi_alloc_out_of_range ) { 63 if (!scsi_alloc_out_of_range) {
64 HDATA(cmd->device->host)->dma_bounce_buffer = 64 hdata->dma_bounce_buffer =
65 kmalloc (HDATA(cmd->device->host)->dma_bounce_len, GFP_KERNEL); 65 kmalloc(hdata->dma_bounce_len, GFP_KERNEL);
66 HDATA(cmd->device->host)->dma_buffer_pool = BUF_SCSI_ALLOCED; 66 hdata->dma_buffer_pool = BUF_SCSI_ALLOCED;
67 } 67 }
68 68
69 if (scsi_alloc_out_of_range || 69 if (scsi_alloc_out_of_range ||
70 !HDATA(cmd->device->host)->dma_bounce_buffer) { 70 !hdata->dma_bounce_buffer) {
71 HDATA(cmd->device->host)->dma_bounce_buffer = 71 hdata->dma_bounce_buffer =
72 amiga_chip_alloc(HDATA(cmd->device->host)->dma_bounce_len, 72 amiga_chip_alloc(hdata->dma_bounce_len,
73 "GVP II SCSI Bounce Buffer"); 73 "GVP II SCSI Bounce Buffer");
74 74
75 if(!HDATA(cmd->device->host)->dma_bounce_buffer) 75 if (!hdata->dma_bounce_buffer) {
76 { 76 hdata->dma_bounce_len = 0;
77 HDATA(cmd->device->host)->dma_bounce_len = 0; 77 return 1;
78 return 1; 78 }
79 }
80 79
81 HDATA(cmd->device->host)->dma_buffer_pool = BUF_CHIP_ALLOCED; 80 hdata->dma_buffer_pool = BUF_CHIP_ALLOCED;
82 } 81 }
83 82
84 /* check if the address of the bounce buffer is OK */ 83 /* check if the address of the bounce buffer is OK */
85 addr = virt_to_bus(HDATA(cmd->device->host)->dma_bounce_buffer); 84 addr = virt_to_bus(hdata->dma_bounce_buffer);
86 85
87 if (addr & HDATA(cmd->device->host)->dma_xfer_mask) { 86 if (addr & hdata->dma_xfer_mask) {
88 /* fall back to Chip RAM if address out of range */ 87 /* fall back to Chip RAM if address out of range */
89 if( HDATA(cmd->device->host)->dma_buffer_pool == BUF_SCSI_ALLOCED) { 88 if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED) {
90 kfree (HDATA(cmd->device->host)->dma_bounce_buffer); 89 kfree(hdata->dma_bounce_buffer);
91 scsi_alloc_out_of_range = 1; 90 scsi_alloc_out_of_range = 1;
92 } else { 91 } else {
93 amiga_chip_free (HDATA(cmd->device->host)->dma_bounce_buffer); 92 amiga_chip_free(hdata->dma_bounce_buffer);
94 } 93 }
95 94
96 HDATA(cmd->device->host)->dma_bounce_buffer = 95 hdata->dma_bounce_buffer =
97 amiga_chip_alloc(HDATA(cmd->device->host)->dma_bounce_len, 96 amiga_chip_alloc(hdata->dma_bounce_len,
98 "GVP II SCSI Bounce Buffer"); 97 "GVP II SCSI Bounce Buffer");
99 98
100 if(!HDATA(cmd->device->host)->dma_bounce_buffer) 99 if (!hdata->dma_bounce_buffer) {
101 { 100 hdata->dma_bounce_len = 0;
102 HDATA(cmd->device->host)->dma_bounce_len = 0; 101 return 1;
103 return 1; 102 }
104 } 103
105 104 addr = virt_to_bus(hdata->dma_bounce_buffer);
106 addr = virt_to_bus(HDATA(cmd->device->host)->dma_bounce_buffer); 105 hdata->dma_buffer_pool = BUF_CHIP_ALLOCED;
107 HDATA(cmd->device->host)->dma_buffer_pool = BUF_CHIP_ALLOCED; 106 }
108 } 107
109 108 if (!dir_in) {
110 if (!dir_in) { 109 /* copy to bounce buffer for a write */
111 /* copy to bounce buffer for a write */ 110 memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr,
112 memcpy (HDATA(cmd->device->host)->dma_bounce_buffer, 111 cmd->SCp.this_residual);
113 cmd->SCp.ptr, cmd->SCp.this_residual); 112 }
114 } 113 }
115 }
116 114
117 /* setup dma direction */ 115 /* setup dma direction */
118 if (!dir_in) 116 if (!dir_in)
119 cntr |= GVP11_DMAC_DIR_WRITE; 117 cntr |= GVP11_DMAC_DIR_WRITE;
120 118
121 HDATA(cmd->device->host)->dma_dir = dir_in; 119 hdata->dma_dir = dir_in;
122 DMA(cmd->device->host)->CNTR = cntr; 120 DMA(cmd->device->host)->CNTR = cntr;
123 121
124 /* setup DMA *physical* address */ 122 /* setup DMA *physical* address */
125 DMA(cmd->device->host)->ACR = addr; 123 DMA(cmd->device->host)->ACR = addr;
126 124
127 if (dir_in) 125 if (dir_in) {
128 /* invalidate any cache */ 126 /* invalidate any cache */
129 cache_clear (addr, cmd->SCp.this_residual); 127 cache_clear(addr, cmd->SCp.this_residual);
130 else 128 } else {
131 /* push any dirty cache */ 129 /* push any dirty cache */
132 cache_push (addr, cmd->SCp.this_residual); 130 cache_push(addr, cmd->SCp.this_residual);
131 }
133 132
134 if ((bank_mask = (~HDATA(cmd->device->host)->dma_xfer_mask >> 18) & 0x01c0)) 133 bank_mask = (~hdata->dma_xfer_mask >> 18) & 0x01c0;
135 DMA(cmd->device->host)->BANK = bank_mask & (addr >> 18); 134 if (bank_mask)
135 DMA(cmd->device->host)->BANK = bank_mask & (addr >> 18);
136 136
137 /* start DMA */ 137 /* start DMA */
138 DMA(cmd->device->host)->ST_DMA = 1; 138 DMA(cmd->device->host)->ST_DMA = 1;
139 139
140 /* return success */ 140 /* return success */
141 return 0; 141 return 0;
142} 142}
143 143
144static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, 144static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
145 int status) 145 int status)
146{ 146{
147 /* stop DMA */ 147 struct WD33C93_hostdata *hdata = shost_priv(instance);
148 DMA(instance)->SP_DMA = 1; 148
149 /* remove write bit from CONTROL bits */ 149 /* stop DMA */
150 DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE; 150 DMA(instance)->SP_DMA = 1;
151 151 /* remove write bit from CONTROL bits */
152 /* copy from a bounce buffer, if necessary */ 152 DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
153 if (status && HDATA(instance)->dma_bounce_buffer) { 153
154 if (HDATA(instance)->dma_dir && SCpnt) 154 /* copy from a bounce buffer, if necessary */
155 memcpy (SCpnt->SCp.ptr, 155 if (status && hdata->dma_bounce_buffer) {
156 HDATA(instance)->dma_bounce_buffer, 156 if (hdata->dma_dir && SCpnt)
157 SCpnt->SCp.this_residual); 157 memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer,
158 158 SCpnt->SCp.this_residual);
159 if (HDATA(instance)->dma_buffer_pool == BUF_SCSI_ALLOCED) 159
160 kfree (HDATA(instance)->dma_bounce_buffer); 160 if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED)
161 else 161 kfree(hdata->dma_bounce_buffer);
162 amiga_chip_free(HDATA(instance)->dma_bounce_buffer); 162 else
163 163 amiga_chip_free(hdata->dma_bounce_buffer);
164 HDATA(instance)->dma_bounce_buffer = NULL; 164
165 HDATA(instance)->dma_bounce_len = 0; 165 hdata->dma_bounce_buffer = NULL;
166 } 166 hdata->dma_bounce_len = 0;
167 }
167} 168}
168 169
169#define CHECK_WD33C93 170#define CHECK_WD33C93
170 171
171int __init gvp11_detect(struct scsi_host_template *tpnt) 172int __init gvp11_detect(struct scsi_host_template *tpnt)
172{ 173{
173 static unsigned char called = 0; 174 static unsigned char called = 0;
174 struct Scsi_Host *instance; 175 struct Scsi_Host *instance;
175 unsigned long address; 176 unsigned long address;
176 unsigned int epc; 177 unsigned int epc;
177 struct zorro_dev *z = NULL; 178 struct zorro_dev *z = NULL;
178 unsigned int default_dma_xfer_mask; 179 unsigned int default_dma_xfer_mask;
179 wd33c93_regs regs; 180 struct WD33C93_hostdata *hdata;
180 int num_gvp11 = 0; 181 wd33c93_regs regs;
182 int num_gvp11 = 0;
181#ifdef CHECK_WD33C93 183#ifdef CHECK_WD33C93
182 volatile unsigned char *sasr_3393, *scmd_3393; 184 volatile unsigned char *sasr_3393, *scmd_3393;
183 unsigned char save_sasr; 185 unsigned char save_sasr;
184 unsigned char q, qq; 186 unsigned char q, qq;
185#endif 187#endif
186 188
187 if (!MACH_IS_AMIGA || called) 189 if (!MACH_IS_AMIGA || called)
188 return 0; 190 return 0;
189 called = 1; 191 called = 1;
190 192
191 tpnt->proc_name = "GVP11"; 193 tpnt->proc_name = "GVP11";
192 tpnt->proc_info = &wd33c93_proc_info; 194 tpnt->proc_info = &wd33c93_proc_info;
193 195
194 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { 196 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
195 /* 197 /*
196 * This should (hopefully) be the correct way to identify 198 * This should (hopefully) be the correct way to identify
197 * all the different GVP SCSI controllers (except for the 199 * all the different GVP SCSI controllers (except for the
198 * SERIES I though). 200 * SERIES I though).
199 */ 201 */
200 202
201 if (z->id == ZORRO_PROD_GVP_COMBO_030_R3_SCSI || 203 if (z->id == ZORRO_PROD_GVP_COMBO_030_R3_SCSI ||
202 z->id == ZORRO_PROD_GVP_SERIES_II) 204 z->id == ZORRO_PROD_GVP_SERIES_II)
203 default_dma_xfer_mask = ~0x00ffffff; 205 default_dma_xfer_mask = ~0x00ffffff;
204 else if (z->id == ZORRO_PROD_GVP_GFORCE_030_SCSI || 206 else if (z->id == ZORRO_PROD_GVP_GFORCE_030_SCSI ||
205 z->id == ZORRO_PROD_GVP_A530_SCSI || 207 z->id == ZORRO_PROD_GVP_A530_SCSI ||
206 z->id == ZORRO_PROD_GVP_COMBO_030_R4_SCSI) 208 z->id == ZORRO_PROD_GVP_COMBO_030_R4_SCSI)
207 default_dma_xfer_mask = ~0x01ffffff; 209 default_dma_xfer_mask = ~0x01ffffff;
208 else if (z->id == ZORRO_PROD_GVP_A1291 || 210 else if (z->id == ZORRO_PROD_GVP_A1291 ||
209 z->id == ZORRO_PROD_GVP_GFORCE_040_SCSI_1) 211 z->id == ZORRO_PROD_GVP_GFORCE_040_SCSI_1)
210 default_dma_xfer_mask = ~0x07ffffff; 212 default_dma_xfer_mask = ~0x07ffffff;
211 else 213 else
212 continue; 214 continue;
213 215
214 /* 216 /*
215 * Rumors state that some GVP ram boards use the same product 217 * Rumors state that some GVP ram boards use the same product
216 * code as the SCSI controllers. Therefore if the board-size 218 * code as the SCSI controllers. Therefore if the board-size
217 * is not 64KB we asume it is a ram board and bail out. 219 * is not 64KB we asume it is a ram board and bail out.
218 */ 220 */
219 if (z->resource.end-z->resource.start != 0xffff) 221 if (z->resource.end - z->resource.start != 0xffff)
220 continue; 222 continue;
221 223
222 address = z->resource.start; 224 address = z->resource.start;
223 if (!request_mem_region(address, 256, "wd33c93")) 225 if (!request_mem_region(address, 256, "wd33c93"))
224 continue; 226 continue;
225 227
226#ifdef CHECK_WD33C93 228#ifdef CHECK_WD33C93
227 229
228 /* 230 /*
229 * These darn GVP boards are a problem - it can be tough to tell 231 * These darn GVP boards are a problem - it can be tough to tell
230 * whether or not they include a SCSI controller. This is the 232 * whether or not they include a SCSI controller. This is the
231 * ultimate Yet-Another-GVP-Detection-Hack in that it actually 233 * ultimate Yet-Another-GVP-Detection-Hack in that it actually
232 * probes for a WD33c93 chip: If we find one, it's extremely 234 * probes for a WD33c93 chip: If we find one, it's extremely
233 * likely that this card supports SCSI, regardless of Product_ 235 * likely that this card supports SCSI, regardless of Product_
234 * Code, Board_Size, etc. 236 * Code, Board_Size, etc.
235 */ 237 */
236 238
237 /* Get pointers to the presumed register locations and save contents */ 239 /* Get pointers to the presumed register locations and save contents */
238 240
239 sasr_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SASR); 241 sasr_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SASR);
240 scmd_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SCMD); 242 scmd_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SCMD);
241 save_sasr = *sasr_3393; 243 save_sasr = *sasr_3393;
242 244
243 /* First test the AuxStatus Reg */ 245 /* First test the AuxStatus Reg */
244 246
245 q = *sasr_3393; /* read it */ 247 q = *sasr_3393; /* read it */
246 if (q & 0x08) /* bit 3 should always be clear */ 248 if (q & 0x08) /* bit 3 should always be clear */
247 goto release; 249 goto release;
248 *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */ 250 *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */
249 if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */ 251 if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */
250 *sasr_3393 = save_sasr; /* Oops - restore this byte */ 252 *sasr_3393 = save_sasr; /* Oops - restore this byte */
251 goto release; 253 goto release;
252 } 254 }
253 if (*sasr_3393 != q) { /* should still read the same */ 255 if (*sasr_3393 != q) { /* should still read the same */
254 *sasr_3393 = save_sasr; /* Oops - restore this byte */ 256 *sasr_3393 = save_sasr; /* Oops - restore this byte */
255 goto release; 257 goto release;
256 } 258 }
257 if (*scmd_3393 != q) /* and so should the image at 0x1f */ 259 if (*scmd_3393 != q) /* and so should the image at 0x1f */
258 goto release; 260 goto release;
259 261
260 262 /*
261 /* Ok, we probably have a wd33c93, but let's check a few other places 263 * Ok, we probably have a wd33c93, but let's check a few other places
262 * for good measure. Make sure that this works for both 'A and 'B 264 * for good measure. Make sure that this works for both 'A and 'B
263 * chip versions. 265 * chip versions.
264 */ 266 */
265 267
266 *sasr_3393 = WD_SCSI_STATUS; 268 *sasr_3393 = WD_SCSI_STATUS;
267 q = *scmd_3393; 269 q = *scmd_3393;
268 *sasr_3393 = WD_SCSI_STATUS; 270 *sasr_3393 = WD_SCSI_STATUS;
269 *scmd_3393 = ~q; 271 *scmd_3393 = ~q;
270 *sasr_3393 = WD_SCSI_STATUS; 272 *sasr_3393 = WD_SCSI_STATUS;
271 qq = *scmd_3393; 273 qq = *scmd_3393;
272 *sasr_3393 = WD_SCSI_STATUS; 274 *sasr_3393 = WD_SCSI_STATUS;
273 *scmd_3393 = q; 275 *scmd_3393 = q;
274 if (qq != q) /* should be read only */ 276 if (qq != q) /* should be read only */
275 goto release; 277 goto release;
276 *sasr_3393 = 0x1e; /* this register is unimplemented */ 278 *sasr_3393 = 0x1e; /* this register is unimplemented */
277 q = *scmd_3393; 279 q = *scmd_3393;
278 *sasr_3393 = 0x1e; 280 *sasr_3393 = 0x1e;
279 *scmd_3393 = ~q; 281 *scmd_3393 = ~q;
280 *sasr_3393 = 0x1e; 282 *sasr_3393 = 0x1e;
281 qq = *scmd_3393; 283 qq = *scmd_3393;
282 *sasr_3393 = 0x1e; 284 *sasr_3393 = 0x1e;
283 *scmd_3393 = q; 285 *scmd_3393 = q;
284 if (qq != q || qq != 0xff) /* should be read only, all 1's */ 286 if (qq != q || qq != 0xff) /* should be read only, all 1's */
285 goto release; 287 goto release;
286 *sasr_3393 = WD_TIMEOUT_PERIOD; 288 *sasr_3393 = WD_TIMEOUT_PERIOD;
287 q = *scmd_3393; 289 q = *scmd_3393;
288 *sasr_3393 = WD_TIMEOUT_PERIOD; 290 *sasr_3393 = WD_TIMEOUT_PERIOD;
289 *scmd_3393 = ~q; 291 *scmd_3393 = ~q;
290 *sasr_3393 = WD_TIMEOUT_PERIOD; 292 *sasr_3393 = WD_TIMEOUT_PERIOD;
291 qq = *scmd_3393; 293 qq = *scmd_3393;
292 *sasr_3393 = WD_TIMEOUT_PERIOD; 294 *sasr_3393 = WD_TIMEOUT_PERIOD;
293 *scmd_3393 = q; 295 *scmd_3393 = q;
294 if (qq != (~q & 0xff)) /* should be read/write */ 296 if (qq != (~q & 0xff)) /* should be read/write */
295 goto release; 297 goto release;
296#endif 298#endif
297 299
298 instance = scsi_register (tpnt, sizeof (struct WD33C93_hostdata)); 300 instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
299 if(instance == NULL) 301 if (instance == NULL)
300 goto release; 302 goto release;
301 instance->base = ZTWO_VADDR(address); 303 instance->base = ZTWO_VADDR(address);
302 instance->irq = IRQ_AMIGA_PORTS; 304 instance->irq = IRQ_AMIGA_PORTS;
303 instance->unique_id = z->slotaddr; 305 instance->unique_id = z->slotaddr;
304 306
305 if (gvp11_xfer_mask) 307 hdata = shost_priv(instance);
306 HDATA(instance)->dma_xfer_mask = gvp11_xfer_mask; 308 if (gvp11_xfer_mask)
307 else 309 hdata->dma_xfer_mask = gvp11_xfer_mask;
308 HDATA(instance)->dma_xfer_mask = default_dma_xfer_mask; 310 else
309 311 hdata->dma_xfer_mask = default_dma_xfer_mask;
310 312
311 DMA(instance)->secret2 = 1; 313 DMA(instance)->secret2 = 1;
312 DMA(instance)->secret1 = 0; 314 DMA(instance)->secret1 = 0;
313 DMA(instance)->secret3 = 15; 315 DMA(instance)->secret3 = 15;
314 while (DMA(instance)->CNTR & GVP11_DMAC_BUSY) ; 316 while (DMA(instance)->CNTR & GVP11_DMAC_BUSY)
315 DMA(instance)->CNTR = 0; 317 ;
316 318 DMA(instance)->CNTR = 0;
317 DMA(instance)->BANK = 0; 319
318 320 DMA(instance)->BANK = 0;
319 epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000); 321
320 322 epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000);
321 /* 323
322 * Check for 14MHz SCSI clock 324 /*
323 */ 325 * Check for 14MHz SCSI clock
324 regs.SASR = &(DMA(instance)->SASR); 326 */
325 regs.SCMD = &(DMA(instance)->SCMD); 327 regs.SASR = &(DMA(instance)->SASR);
326 HDATA(instance)->no_sync = 0xff; 328 regs.SCMD = &(DMA(instance)->SCMD);
327 HDATA(instance)->fast = 0; 329 hdata->no_sync = 0xff;
328 HDATA(instance)->dma_mode = CTRL_DMA; 330 hdata->fast = 0;
329 wd33c93_init(instance, regs, dma_setup, dma_stop, 331 hdata->dma_mode = CTRL_DMA;
330 (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10 332 wd33c93_init(instance, regs, dma_setup, dma_stop,
331 : WD33C93_FS_12_15); 333 (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
332 334 : WD33C93_FS_12_15);
333 if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, "GVP11 SCSI", 335
334 instance)) 336 if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED,
335 goto unregister; 337 "GVP11 SCSI", instance))
336 DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE; 338 goto unregister;
337 num_gvp11++; 339 DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
338 continue; 340 num_gvp11++;
341 continue;
339 342
340unregister: 343unregister:
341 scsi_unregister(instance); 344 scsi_unregister(instance);
342 wd33c93_release();
343release: 345release:
344 release_mem_region(address, 256); 346 release_mem_region(address, 256);
345 } 347 }
346 348
347 return num_gvp11; 349 return num_gvp11;
348} 350}
349 351
350static int gvp11_bus_reset(struct scsi_cmnd *cmd) 352static int gvp11_bus_reset(struct scsi_cmnd *cmd)
@@ -389,12 +391,11 @@ static struct scsi_host_template driver_template = {
389int gvp11_release(struct Scsi_Host *instance) 391int gvp11_release(struct Scsi_Host *instance)
390{ 392{
391#ifdef MODULE 393#ifdef MODULE
392 DMA(instance)->CNTR = 0; 394 DMA(instance)->CNTR = 0;
393 release_mem_region(ZTWO_PADDR(instance->base), 256); 395 release_mem_region(ZTWO_PADDR(instance->base), 256);
394 free_irq(IRQ_AMIGA_PORTS, instance); 396 free_irq(IRQ_AMIGA_PORTS, instance);
395 wd33c93_release();
396#endif 397#endif
397 return 1; 398 return 1;
398} 399}
399 400
400MODULE_LICENSE("GPL"); 401MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h
index bf22859a5035..e2efdf9601ef 100644
--- a/drivers/scsi/gvp11.h
+++ b/drivers/scsi/gvp11.h
@@ -15,11 +15,11 @@ int gvp11_detect(struct scsi_host_template *);
15int gvp11_release(struct Scsi_Host *); 15int gvp11_release(struct Scsi_Host *);
16 16
17#ifndef CMD_PER_LUN 17#ifndef CMD_PER_LUN
18#define CMD_PER_LUN 2 18#define CMD_PER_LUN 2
19#endif 19#endif
20 20
21#ifndef CAN_QUEUE 21#ifndef CAN_QUEUE
22#define CAN_QUEUE 16 22#define CAN_QUEUE 16
23#endif 23#endif
24 24
25#ifndef HOSTS_C 25#ifndef HOSTS_C
@@ -28,24 +28,24 @@ int gvp11_release(struct Scsi_Host *);
28 * if the transfer address ANDed with this results in a non-zero 28 * if the transfer address ANDed with this results in a non-zero
29 * result, then we can't use DMA. 29 * result, then we can't use DMA.
30 */ 30 */
31#define GVP11_XFER_MASK (0xff000001) 31#define GVP11_XFER_MASK (0xff000001)
32 32
33typedef struct { 33typedef struct {
34 unsigned char pad1[64]; 34 unsigned char pad1[64];
35 volatile unsigned short CNTR; 35 volatile unsigned short CNTR;
36 unsigned char pad2[31]; 36 unsigned char pad2[31];
37 volatile unsigned char SASR; 37 volatile unsigned char SASR;
38 unsigned char pad3; 38 unsigned char pad3;
39 volatile unsigned char SCMD; 39 volatile unsigned char SCMD;
40 unsigned char pad4[4]; 40 unsigned char pad4[4];
41 volatile unsigned short BANK; 41 volatile unsigned short BANK;
42 unsigned char pad5[6]; 42 unsigned char pad5[6];
43 volatile unsigned long ACR; 43 volatile unsigned long ACR;
44 volatile unsigned short secret1; /* store 0 here */ 44 volatile unsigned short secret1; /* store 0 here */
45 volatile unsigned short ST_DMA; 45 volatile unsigned short ST_DMA;
46 volatile unsigned short SP_DMA; 46 volatile unsigned short SP_DMA;
47 volatile unsigned short secret2; /* store 1 here */ 47 volatile unsigned short secret2; /* store 1 here */
48 volatile unsigned short secret3; /* store 15 here */ 48 volatile unsigned short secret3; /* store 15 here */
49} gvp11_scsiregs; 49} gvp11_scsiregs;
50 50
51/* bits in CNTR */ 51/* bits in CNTR */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 183d3a43c280..c016426b31b2 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2708,14 +2708,6 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2708 c->Request.CDB[8] = (size >> 8) & 0xFF; 2708 c->Request.CDB[8] = (size >> 8) & 0xFF;
2709 c->Request.CDB[9] = size & 0xFF; 2709 c->Request.CDB[9] = size & 0xFF;
2710 break; 2710 break;
2711
2712 case HPSA_READ_CAPACITY:
2713 c->Request.CDBLen = 10;
2714 c->Request.Type.Attribute = ATTR_SIMPLE;
2715 c->Request.Type.Direction = XFER_READ;
2716 c->Request.Timeout = 0;
2717 c->Request.CDB[0] = cmd;
2718 break;
2719 case HPSA_CACHE_FLUSH: 2711 case HPSA_CACHE_FLUSH:
2720 c->Request.CDBLen = 12; 2712 c->Request.CDBLen = 12;
2721 c->Request.Type.Attribute = ATTR_SIMPLE; 2713 c->Request.Type.Attribute = ATTR_SIMPLE;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 56fb9827681e..78de9b6d1e0b 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -152,21 +152,6 @@ struct SenseSubsystem_info {
152 u8 reserved1[1108]; 152 u8 reserved1[1108];
153}; 153};
154 154
155#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
156struct ReadCapdata {
157 u8 total_size[4]; /* Total size in blocks */
158 u8 block_size[4]; /* Size of blocks in bytes */
159};
160
161#if 0
162/* 12 byte commands not implemented in firmware yet. */
163#define HPSA_READ 0xa8
164#define HPSA_WRITE 0xaa
165#endif
166
167#define HPSA_READ 0x28 /* Read(10) */
168#define HPSA_WRITE 0x2a /* Write(10) */
169
170/* BMIC commands */ 155/* BMIC commands */
171#define BMIC_READ 0x26 156#define BMIC_READ 0x26
172#define BMIC_WRITE 0x27 157#define BMIC_WRITE 0x27
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index c2eea711a5ce..d18f45c95639 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2245,7 +2245,7 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2245 DECLARE_COMPLETION_ONSTACK(comp); 2245 DECLARE_COMPLETION_ONSTACK(comp);
2246 int wait; 2246 int wait;
2247 unsigned long flags; 2247 unsigned long flags;
2248 signed long timeout = init_timeout * HZ; 2248 signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2249 2249
2250 ENTER; 2250 ENTER;
2251 do { 2251 do {
@@ -3013,6 +3013,7 @@ static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3013 if (crq->valid & 0x80) { 3013 if (crq->valid & 0x80) {
3014 if (++async_crq->cur == async_crq->size) 3014 if (++async_crq->cur == async_crq->size)
3015 async_crq->cur = 0; 3015 async_crq->cur = 0;
3016 rmb();
3016 } else 3017 } else
3017 crq = NULL; 3018 crq = NULL;
3018 3019
@@ -3035,6 +3036,7 @@ static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3035 if (crq->valid & 0x80) { 3036 if (crq->valid & 0x80) {
3036 if (++queue->cur == queue->size) 3037 if (++queue->cur == queue->size)
3037 queue->cur = 0; 3038 queue->cur = 0;
3039 rmb();
3038 } else 3040 } else
3039 crq = NULL; 3041 crq = NULL;
3040 3042
@@ -3083,12 +3085,14 @@ static void ibmvfc_tasklet(void *data)
3083 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) { 3085 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3084 ibmvfc_handle_async(async, vhost); 3086 ibmvfc_handle_async(async, vhost);
3085 async->valid = 0; 3087 async->valid = 0;
3088 wmb();
3086 } 3089 }
3087 3090
3088 /* Pull all the valid messages off the CRQ */ 3091 /* Pull all the valid messages off the CRQ */
3089 while ((crq = ibmvfc_next_crq(vhost)) != NULL) { 3092 while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3090 ibmvfc_handle_crq(crq, vhost); 3093 ibmvfc_handle_crq(crq, vhost);
3091 crq->valid = 0; 3094 crq->valid = 0;
3095 wmb();
3092 } 3096 }
3093 3097
3094 vio_enable_interrupts(vdev); 3098 vio_enable_interrupts(vdev);
@@ -3096,10 +3100,12 @@ static void ibmvfc_tasklet(void *data)
3096 vio_disable_interrupts(vdev); 3100 vio_disable_interrupts(vdev);
3097 ibmvfc_handle_async(async, vhost); 3101 ibmvfc_handle_async(async, vhost);
3098 async->valid = 0; 3102 async->valid = 0;
3103 wmb();
3099 } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) { 3104 } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3100 vio_disable_interrupts(vdev); 3105 vio_disable_interrupts(vdev);
3101 ibmvfc_handle_crq(crq, vhost); 3106 ibmvfc_handle_crq(crq, vhost);
3102 crq->valid = 0; 3107 crq->valid = 0;
3108 wmb();
3103 } else 3109 } else
3104 done = 1; 3110 done = 1;
3105 } 3111 }
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index d25106a958d7..7e9742764e4b 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -38,6 +38,7 @@
38#define IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT \ 38#define IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT \
39 (IBMVFC_ADISC_TIMEOUT + IBMVFC_ADISC_CANCEL_TIMEOUT) 39 (IBMVFC_ADISC_TIMEOUT + IBMVFC_ADISC_CANCEL_TIMEOUT)
40#define IBMVFC_INIT_TIMEOUT 120 40#define IBMVFC_INIT_TIMEOUT 120
41#define IBMVFC_ABORT_WAIT_TIMEOUT 40
41#define IBMVFC_MAX_REQUESTS_DEFAULT 100 42#define IBMVFC_MAX_REQUESTS_DEFAULT 100
42 43
43#define IBMVFC_DEBUG 0 44#define IBMVFC_DEBUG 0
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 02143af7c1af..cdb4fd8cf23b 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -206,8 +206,10 @@ static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
206} 206}
207 207
208static void 208static void
209iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_sw_tcp_conn *tcp_sw_conn) 209iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)
210{ 210{
211 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
212 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
211 struct sock *sk = tcp_sw_conn->sock->sk; 213 struct sock *sk = tcp_sw_conn->sock->sk;
212 214
213 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ 215 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
@@ -555,7 +557,7 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
555 return; 557 return;
556 558
557 sock_hold(sock->sk); 559 sock_hold(sock->sk);
558 iscsi_sw_tcp_conn_restore_callbacks(tcp_sw_conn); 560 iscsi_sw_tcp_conn_restore_callbacks(conn);
559 sock_put(sock->sk); 561 sock_put(sock->sk);
560 562
561 spin_lock_bh(&session->lock); 563 spin_lock_bh(&session->lock);
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index ca6b7bc64de0..94644bad0ed7 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -36,7 +36,6 @@ struct iscsi_sw_tcp_send {
36}; 36};
37 37
38struct iscsi_sw_tcp_conn { 38struct iscsi_sw_tcp_conn {
39 struct iscsi_conn *iscsi_conn;
40 struct socket *sock; 39 struct socket *sock;
41 40
42 struct iscsi_sw_tcp_send out; 41 struct iscsi_sw_tcp_send out;
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 1087a7f18e84..c7985da88099 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -132,7 +132,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
132 switch (fmt) { 132 switch (fmt) {
133 case ELS_ADDR_FMT_PORT: 133 case ELS_ADDR_FMT_PORT:
134 FC_DISC_DBG(disc, "Port address format for port " 134 FC_DISC_DBG(disc, "Port address format for port "
135 "(%6x)\n", ntoh24(pp->rscn_fid)); 135 "(%6.6x)\n", ntoh24(pp->rscn_fid));
136 dp = kzalloc(sizeof(*dp), GFP_KERNEL); 136 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
137 if (!dp) { 137 if (!dp) {
138 redisc = 1; 138 redisc = 1;
@@ -440,7 +440,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
440 ids.port_id = ntoh24(np->fp_fid); 440 ids.port_id = ntoh24(np->fp_fid);
441 ids.port_name = ntohll(np->fp_wwpn); 441 ids.port_name = ntohll(np->fp_wwpn);
442 442
443 if (ids.port_id != fc_host_port_id(lport->host) && 443 if (ids.port_id != lport->port_id &&
444 ids.port_name != lport->wwpn) { 444 ids.port_name != lport->wwpn) {
445 rdata = lport->tt.rport_create(lport, ids.port_id); 445 rdata = lport->tt.rport_create(lport, ids.port_id);
446 if (rdata) { 446 if (rdata) {
@@ -449,7 +449,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
449 } else { 449 } else {
450 printk(KERN_WARNING "libfc: Failed to allocate " 450 printk(KERN_WARNING "libfc: Failed to allocate "
451 "memory for the newly discovered port " 451 "memory for the newly discovered port "
452 "(%6x)\n", ids.port_id); 452 "(%6.6x)\n", ids.port_id);
453 error = -ENOMEM; 453 error = -ENOMEM;
454 } 454 }
455 } 455 }
@@ -607,7 +607,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
607 rdata->ids.port_name = port_name; 607 rdata->ids.port_name = port_name;
608 else if (rdata->ids.port_name != port_name) { 608 else if (rdata->ids.port_name != port_name) {
609 FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. " 609 FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. "
610 "Port-id %x wwpn %llx\n", 610 "Port-id %6.6x wwpn %16.16llx\n",
611 rdata->ids.port_id, port_name); 611 rdata->ids.port_id, port_name);
612 lport->tt.rport_logoff(rdata); 612 lport->tt.rport_logoff(rdata);
613 613
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index 53748724f2c5..e9412b710fab 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -63,7 +63,7 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
63 return NULL; 63 return NULL;
64 } 64 }
65 65
66 fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type, 66 fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type,
67 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 67 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
68 68
69 return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec); 69 return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index e5df0d4db67e..104e0fba7c43 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -488,7 +488,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
488 */ 488 */
489 spin_lock_bh(&ep->ex_lock); 489 spin_lock_bh(&ep->ex_lock);
490 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ 490 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
491 if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT)) 491 if (f_ctl & FC_FC_SEQ_INIT)
492 ep->esb_stat &= ~ESB_ST_SEQ_INIT; 492 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
493 spin_unlock_bh(&ep->ex_lock); 493 spin_unlock_bh(&ep->ex_lock);
494 return error; 494 return error;
@@ -676,9 +676,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
676 } 676 }
677 memset(ep, 0, sizeof(*ep)); 677 memset(ep, 0, sizeof(*ep));
678 678
679 cpu = smp_processor_id(); 679 cpu = get_cpu();
680 pool = per_cpu_ptr(mp->pool, cpu); 680 pool = per_cpu_ptr(mp->pool, cpu);
681 spin_lock_bh(&pool->lock); 681 spin_lock_bh(&pool->lock);
682 put_cpu();
682 index = pool->next_index; 683 index = pool->next_index;
683 /* allocate new exch from pool */ 684 /* allocate new exch from pool */
684 while (fc_exch_ptr_get(pool, index)) { 685 while (fc_exch_ptr_get(pool, index)) {
@@ -734,19 +735,14 @@ err:
734 * EM is selected when a NULL match function pointer is encountered 735 * EM is selected when a NULL match function pointer is encountered
735 * or when a call to a match function returns true. 736 * or when a call to a match function returns true.
736 */ 737 */
737static struct fc_exch *fc_exch_alloc(struct fc_lport *lport, 738static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
738 struct fc_frame *fp) 739 struct fc_frame *fp)
739{ 740{
740 struct fc_exch_mgr_anchor *ema; 741 struct fc_exch_mgr_anchor *ema;
741 struct fc_exch *ep;
742 742
743 list_for_each_entry(ema, &lport->ema_list, ema_list) { 743 list_for_each_entry(ema, &lport->ema_list, ema_list)
744 if (!ema->match || ema->match(fp)) { 744 if (!ema->match || ema->match(fp))
745 ep = fc_exch_em_alloc(lport, ema->mp); 745 return fc_exch_em_alloc(lport, ema->mp);
746 if (ep)
747 return ep;
748 }
749 }
750 return NULL; 746 return NULL;
751} 747}
752 748
@@ -920,13 +916,9 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
920 * Find or create the sequence. 916 * Find or create the sequence.
921 */ 917 */
922 if (fc_sof_is_init(fr_sof(fp))) { 918 if (fc_sof_is_init(fr_sof(fp))) {
923 sp = fc_seq_start_next(&ep->seq); 919 sp = &ep->seq;
924 if (!sp) {
925 reject = FC_RJT_SEQ_XS; /* exchange shortage */
926 goto rel;
927 }
928 sp->id = fh->fh_seq_id;
929 sp->ssb_stat |= SSB_ST_RESP; 920 sp->ssb_stat |= SSB_ST_RESP;
921 sp->id = fh->fh_seq_id;
930 } else { 922 } else {
931 sp = &ep->seq; 923 sp = &ep->seq;
932 if (sp->id != fh->fh_seq_id) { 924 if (sp->id != fh->fh_seq_id) {
@@ -1250,9 +1242,6 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1250 struct fc_frame_header *fh = fc_frame_header_get(fp); 1242 struct fc_frame_header *fh = fc_frame_header_get(fp);
1251 struct fc_seq *sp = NULL; 1243 struct fc_seq *sp = NULL;
1252 struct fc_exch *ep = NULL; 1244 struct fc_exch *ep = NULL;
1253 enum fc_sof sof;
1254 enum fc_eof eof;
1255 u32 f_ctl;
1256 enum fc_pf_rjt_reason reject; 1245 enum fc_pf_rjt_reason reject;
1257 1246
1258 /* We can have the wrong fc_lport at this point with NPIV, which is a 1247 /* We can have the wrong fc_lport at this point with NPIV, which is a
@@ -1269,9 +1258,6 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1269 if (reject == FC_RJT_NONE) { 1258 if (reject == FC_RJT_NONE) {
1270 sp = fr_seq(fp); /* sequence will be held */ 1259 sp = fr_seq(fp); /* sequence will be held */
1271 ep = fc_seq_exch(sp); 1260 ep = fc_seq_exch(sp);
1272 sof = fr_sof(fp);
1273 eof = fr_eof(fp);
1274 f_ctl = ntoh24(fh->fh_f_ctl);
1275 fc_seq_send_ack(sp, fp); 1261 fc_seq_send_ack(sp, fp);
1276 1262
1277 /* 1263 /*
@@ -1336,17 +1322,15 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1336 goto rel; 1322 goto rel;
1337 } 1323 }
1338 sof = fr_sof(fp); 1324 sof = fr_sof(fp);
1325 sp = &ep->seq;
1339 if (fc_sof_is_init(sof)) { 1326 if (fc_sof_is_init(sof)) {
1340 sp = fc_seq_start_next(&ep->seq);
1341 sp->id = fh->fh_seq_id;
1342 sp->ssb_stat |= SSB_ST_RESP; 1327 sp->ssb_stat |= SSB_ST_RESP;
1343 } else { 1328 sp->id = fh->fh_seq_id;
1344 sp = &ep->seq; 1329 } else if (sp->id != fh->fh_seq_id) {
1345 if (sp->id != fh->fh_seq_id) { 1330 atomic_inc(&mp->stats.seq_not_found);
1346 atomic_inc(&mp->stats.seq_not_found); 1331 goto rel;
1347 goto rel;
1348 }
1349 } 1332 }
1333
1350 f_ctl = ntoh24(fh->fh_f_ctl); 1334 f_ctl = ntoh24(fh->fh_f_ctl);
1351 fr_seq(fp) = sp; 1335 fr_seq(fp) = sp;
1352 if (f_ctl & FC_FC_SEQ_INIT) 1336 if (f_ctl & FC_FC_SEQ_INIT)
@@ -1763,7 +1747,6 @@ static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
1763 fc_exch_done(sp); 1747 fc_exch_done(sp);
1764 goto out; 1748 goto out;
1765 } 1749 }
1766 sp = fc_seq_start_next(sp);
1767 acc = fc_frame_payload_get(fp, sizeof(*acc)); 1750 acc = fc_frame_payload_get(fp, sizeof(*acc));
1768 memset(acc, 0, sizeof(*acc)); 1751 memset(acc, 0, sizeof(*acc));
1769 acc->reca_cmd = ELS_LS_ACC; 1752 acc->reca_cmd = ELS_LS_ACC;
@@ -1944,7 +1927,7 @@ static void fc_exch_rrq(struct fc_exch *ep)
1944 did = ep->sid; 1927 did = ep->sid;
1945 1928
1946 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did, 1929 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
1947 fc_host_port_id(lport->host), FC_TYPE_ELS, 1930 lport->port_id, FC_TYPE_ELS,
1948 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1931 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1949 1932
1950 if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep, 1933 if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 17396c708b08..ec1f66c4a9d4 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -97,7 +97,7 @@ static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
97static void fc_fcp_complete_locked(struct fc_fcp_pkt *); 97static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
98static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); 98static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
99static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *); 99static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
100static void fc_timeout_error(struct fc_fcp_pkt *); 100static void fc_fcp_recovery(struct fc_fcp_pkt *);
101static void fc_fcp_timeout(unsigned long); 101static void fc_fcp_timeout(unsigned long);
102static void fc_fcp_rec(struct fc_fcp_pkt *); 102static void fc_fcp_rec(struct fc_fcp_pkt *);
103static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); 103static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
@@ -121,7 +121,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
121#define FC_DATA_UNDRUN 7 121#define FC_DATA_UNDRUN 7
122#define FC_ERROR 8 122#define FC_ERROR 8
123#define FC_HRD_ERROR 9 123#define FC_HRD_ERROR 9
124#define FC_CMD_TIME_OUT 10 124#define FC_CMD_RECOVERY 10
125 125
126/* 126/*
127 * Error recovery timeout values. 127 * Error recovery timeout values.
@@ -446,9 +446,16 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
446 len = fr_len(fp) - sizeof(*fh); 446 len = fr_len(fp) - sizeof(*fh);
447 buf = fc_frame_payload_get(fp, 0); 447 buf = fc_frame_payload_get(fp, 0);
448 448
449 /* if this I/O is ddped, update xfer len */ 449 /*
450 fc_fcp_ddp_done(fsp); 450 * if this I/O is ddped then clear it
451 451 * and initiate recovery since data
452 * frames are expected to be placed
453 * directly in that case.
454 */
455 if (fsp->xfer_ddp != FC_XID_UNKNOWN) {
456 fc_fcp_ddp_done(fsp);
457 goto err;
458 }
452 if (offset + len > fsp->data_len) { 459 if (offset + len > fsp->data_len) {
453 /* this should never happen */ 460 /* this should never happen */
454 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && 461 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
@@ -456,8 +463,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
456 goto crc_err; 463 goto crc_err;
457 FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx " 464 FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
458 "data_len %x\n", len, offset, fsp->data_len); 465 "data_len %x\n", len, offset, fsp->data_len);
459 fc_fcp_retry_cmd(fsp); 466 goto err;
460 return;
461 } 467 }
462 if (offset != fsp->xfer_len) 468 if (offset != fsp->xfer_len)
463 fsp->state |= FC_SRB_DISCONTIG; 469 fsp->state |= FC_SRB_DISCONTIG;
@@ -478,13 +484,14 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
478 484
479 if (~crc != le32_to_cpu(fr_crc(fp))) { 485 if (~crc != le32_to_cpu(fr_crc(fp))) {
480crc_err: 486crc_err:
481 stats = fc_lport_get_stats(lport); 487 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
482 stats->ErrorFrames++; 488 stats->ErrorFrames++;
483 /* FIXME - per cpu count, not total count! */ 489 /* per cpu count, not total count, but OK for limit */
484 if (stats->InvalidCRCCount++ < 5) 490 if (stats->InvalidCRCCount++ < 5)
485 printk(KERN_WARNING "libfc: CRC error on data " 491 printk(KERN_WARNING "libfc: CRC error on data "
486 "frame for port (%6x)\n", 492 "frame for port (%6.6x)\n",
487 fc_host_port_id(lport->host)); 493 lport->port_id);
494 put_cpu();
488 /* 495 /*
489 * Assume the frame is total garbage. 496 * Assume the frame is total garbage.
490 * We may have copied it over the good part 497 * We may have copied it over the good part
@@ -493,7 +500,7 @@ crc_err:
493 * Otherwise, ignore it. 500 * Otherwise, ignore it.
494 */ 501 */
495 if (fsp->state & FC_SRB_DISCONTIG) 502 if (fsp->state & FC_SRB_DISCONTIG)
496 fc_fcp_retry_cmd(fsp); 503 goto err;
497 return; 504 return;
498 } 505 }
499 } 506 }
@@ -509,6 +516,9 @@ crc_err:
509 if (unlikely(fsp->state & FC_SRB_RCV_STATUS) && 516 if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
510 fsp->xfer_len == fsp->data_len - fsp->scsi_resid) 517 fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
511 fc_fcp_complete_locked(fsp); 518 fc_fcp_complete_locked(fsp);
519 return;
520err:
521 fc_fcp_recovery(fsp);
512} 522}
513 523
514/** 524/**
@@ -834,8 +844,7 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
834 * exit here 844 * exit here
835 */ 845 */
836 return; 846 return;
837 } else 847 }
838 goto err;
839 } 848 }
840 if (flags & FCP_SNS_LEN_VAL) { 849 if (flags & FCP_SNS_LEN_VAL) {
841 snsl = ntohl(rp_ex->fr_sns_len); 850 snsl = ntohl(rp_ex->fr_sns_len);
@@ -885,7 +894,7 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
885 return; 894 return;
886 } 895 }
887 fsp->status_code = FC_DATA_OVRRUN; 896 fsp->status_code = FC_DATA_OVRRUN;
888 FC_FCP_DBG(fsp, "tgt %6x xfer len %zx greater than expected, " 897 FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx greater than expected, "
889 "len %x, data len %x\n", 898 "len %x, data len %x\n",
890 fsp->rport->port_id, 899 fsp->rport->port_id,
891 fsp->xfer_len, expected_len, fsp->data_len); 900 fsp->xfer_len, expected_len, fsp->data_len);
@@ -1100,7 +1109,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1100 rpriv = rport->dd_data; 1109 rpriv = rport->dd_data;
1101 1110
1102 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id, 1111 fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
1103 fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP, 1112 rpriv->local_port->port_id, FC_TYPE_FCP,
1104 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1113 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1105 1114
1106 seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, 1115 seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy,
@@ -1341,7 +1350,7 @@ static void fc_fcp_timeout(unsigned long data)
1341 else if (fsp->state & FC_SRB_RCV_STATUS) 1350 else if (fsp->state & FC_SRB_RCV_STATUS)
1342 fc_fcp_complete_locked(fsp); 1351 fc_fcp_complete_locked(fsp);
1343 else 1352 else
1344 fc_timeout_error(fsp); 1353 fc_fcp_recovery(fsp);
1345 fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO; 1354 fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
1346unlock: 1355unlock:
1347 fc_fcp_unlock_pkt(fsp); 1356 fc_fcp_unlock_pkt(fsp);
@@ -1373,7 +1382,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
1373 1382
1374 fr_seq(fp) = fsp->seq_ptr; 1383 fr_seq(fp) = fsp->seq_ptr;
1375 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id, 1384 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
1376 fc_host_port_id(rpriv->local_port->host), FC_TYPE_ELS, 1385 rpriv->local_port->port_id, FC_TYPE_ELS,
1377 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1386 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1378 if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC, 1387 if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC,
1379 fc_fcp_rec_resp, fsp, 1388 fc_fcp_rec_resp, fsp,
@@ -1385,7 +1394,7 @@ retry:
1385 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1394 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1386 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); 1395 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
1387 else 1396 else
1388 fc_timeout_error(fsp); 1397 fc_fcp_recovery(fsp);
1389} 1398}
1390 1399
1391/** 1400/**
@@ -1454,7 +1463,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1454 fc_fcp_retry_cmd(fsp); 1463 fc_fcp_retry_cmd(fsp);
1455 break; 1464 break;
1456 } 1465 }
1457 fc_timeout_error(fsp); 1466 fc_fcp_recovery(fsp);
1458 break; 1467 break;
1459 } 1468 }
1460 } else if (opcode == ELS_LS_ACC) { 1469 } else if (opcode == ELS_LS_ACC) {
@@ -1553,7 +1562,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1553 break; 1562 break;
1554 1563
1555 default: 1564 default:
1556 FC_FCP_DBG(fsp, "REC %p fid %x error unexpected error %d\n", 1565 FC_FCP_DBG(fsp, "REC %p fid %6.6x error unexpected error %d\n",
1557 fsp, fsp->rport->port_id, error); 1566 fsp, fsp->rport->port_id, error);
1558 fsp->status_code = FC_CMD_PLOGO; 1567 fsp->status_code = FC_CMD_PLOGO;
1559 /* fall through */ 1568 /* fall through */
@@ -1563,13 +1572,13 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1563 * Assume REC or LS_ACC was lost. 1572 * Assume REC or LS_ACC was lost.
1564 * The exchange manager will have aborted REC, so retry. 1573 * The exchange manager will have aborted REC, so retry.
1565 */ 1574 */
1566 FC_FCP_DBG(fsp, "REC fid %x error error %d retry %d/%d\n", 1575 FC_FCP_DBG(fsp, "REC fid %6.6x error error %d retry %d/%d\n",
1567 fsp->rport->port_id, error, fsp->recov_retry, 1576 fsp->rport->port_id, error, fsp->recov_retry,
1568 FC_MAX_RECOV_RETRY); 1577 FC_MAX_RECOV_RETRY);
1569 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1578 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1570 fc_fcp_rec(fsp); 1579 fc_fcp_rec(fsp);
1571 else 1580 else
1572 fc_timeout_error(fsp); 1581 fc_fcp_recovery(fsp);
1573 break; 1582 break;
1574 } 1583 }
1575 fc_fcp_unlock_pkt(fsp); 1584 fc_fcp_unlock_pkt(fsp);
@@ -1578,12 +1587,12 @@ out:
1578} 1587}
1579 1588
1580/** 1589/**
1581 * fc_timeout_error() - Handler for fcp_pkt timeouts 1590 * fc_fcp_recovery() - Handler for fcp_pkt recovery
1582 * @fsp: The FCP packt that has timed out 1591 * @fsp: The FCP pkt that needs to be aborted
1583 */ 1592 */
1584static void fc_timeout_error(struct fc_fcp_pkt *fsp) 1593static void fc_fcp_recovery(struct fc_fcp_pkt *fsp)
1585{ 1594{
1586 fsp->status_code = FC_CMD_TIME_OUT; 1595 fsp->status_code = FC_CMD_RECOVERY;
1587 fsp->cdb_status = 0; 1596 fsp->cdb_status = 0;
1588 fsp->io_status = 0; 1597 fsp->io_status = 0;
1589 /* 1598 /*
@@ -1631,7 +1640,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
1631 srr->srr_rel_off = htonl(offset); 1640 srr->srr_rel_off = htonl(offset);
1632 1641
1633 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id, 1642 fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
1634 fc_host_port_id(rpriv->local_port->host), FC_TYPE_FCP, 1643 rpriv->local_port->port_id, FC_TYPE_FCP,
1635 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 1644 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1636 1645
1637 seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL, 1646 seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
@@ -1689,7 +1698,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
1689 break; 1698 break;
1690 case ELS_LS_RJT: 1699 case ELS_LS_RJT:
1691 default: 1700 default:
1692 fc_timeout_error(fsp); 1701 fc_fcp_recovery(fsp);
1693 break; 1702 break;
1694 } 1703 }
1695 fc_fcp_unlock_pkt(fsp); 1704 fc_fcp_unlock_pkt(fsp);
@@ -1715,7 +1724,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
1715 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) 1724 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
1716 fc_fcp_rec(fsp); 1725 fc_fcp_rec(fsp);
1717 else 1726 else
1718 fc_timeout_error(fsp); 1727 fc_fcp_recovery(fsp);
1719 break; 1728 break;
1720 case -FC_EX_CLOSED: /* e.g., link failure */ 1729 case -FC_EX_CLOSED: /* e.g., link failure */
1721 /* fall through */ 1730 /* fall through */
@@ -1810,7 +1819,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1810 /* 1819 /*
1811 * setup the data direction 1820 * setup the data direction
1812 */ 1821 */
1813 stats = fc_lport_get_stats(lport); 1822 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1814 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 1823 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
1815 fsp->req_flags = FC_SRB_READ; 1824 fsp->req_flags = FC_SRB_READ;
1816 stats->InputRequests++; 1825 stats->InputRequests++;
@@ -1823,6 +1832,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1823 fsp->req_flags = 0; 1832 fsp->req_flags = 0;
1824 stats->ControlRequests++; 1833 stats->ControlRequests++;
1825 } 1834 }
1835 put_cpu();
1826 1836
1827 fsp->tgt_flags = rpriv->flags; 1837 fsp->tgt_flags = rpriv->flags;
1828 1838
@@ -1907,6 +1917,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1907 } 1917 }
1908 break; 1918 break;
1909 case FC_ERROR: 1919 case FC_ERROR:
1920 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
1921 "due to FC_ERROR\n");
1910 sc_cmd->result = DID_ERROR << 16; 1922 sc_cmd->result = DID_ERROR << 16;
1911 break; 1923 break;
1912 case FC_DATA_UNDRUN: 1924 case FC_DATA_UNDRUN:
@@ -1915,12 +1927,19 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1915 * scsi status is good but transport level 1927 * scsi status is good but transport level
1916 * underrun. 1928 * underrun.
1917 */ 1929 */
1918 sc_cmd->result = (fsp->state & FC_SRB_RCV_STATUS ? 1930 if (fsp->state & FC_SRB_RCV_STATUS) {
1919 DID_OK : DID_ERROR) << 16; 1931 sc_cmd->result = DID_OK << 16;
1932 } else {
1933 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml"
1934 " due to FC_DATA_UNDRUN (trans)\n");
1935 sc_cmd->result = DID_ERROR << 16;
1936 }
1920 } else { 1937 } else {
1921 /* 1938 /*
1922 * scsi got underrun, this is an error 1939 * scsi got underrun, this is an error
1923 */ 1940 */
1941 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
1942 "due to FC_DATA_UNDRUN (scsi)\n");
1924 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid; 1943 CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
1925 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 1944 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1926 } 1945 }
@@ -1929,12 +1948,16 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1929 /* 1948 /*
1930 * overrun is an error 1949 * overrun is an error
1931 */ 1950 */
1951 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
1952 "due to FC_DATA_OVRRUN\n");
1932 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; 1953 sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
1933 break; 1954 break;
1934 case FC_CMD_ABORTED: 1955 case FC_CMD_ABORTED:
1956 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
1957 "due to FC_CMD_ABORTED\n");
1935 sc_cmd->result = (DID_ERROR << 16) | fsp->io_status; 1958 sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
1936 break; 1959 break;
1937 case FC_CMD_TIME_OUT: 1960 case FC_CMD_RECOVERY:
1938 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; 1961 sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
1939 break; 1962 break;
1940 case FC_CMD_RESET: 1963 case FC_CMD_RESET:
@@ -1944,6 +1967,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1944 sc_cmd->result = (DID_NO_CONNECT << 16); 1967 sc_cmd->result = (DID_NO_CONNECT << 16);
1945 break; 1968 break;
1946 default: 1969 default:
1970 FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
1971 "due to unknown error\n");
1947 sc_cmd->result = (DID_ERROR << 16); 1972 sc_cmd->result = (DID_ERROR << 16);
1948 break; 1973 break;
1949 } 1974 }
@@ -2028,7 +2053,7 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
2028 if (lport->state != LPORT_ST_READY) 2053 if (lport->state != LPORT_ST_READY)
2029 return rc; 2054 return rc;
2030 2055
2031 FC_SCSI_DBG(lport, "Resetting rport (%6x)\n", rport->port_id); 2056 FC_SCSI_DBG(lport, "Resetting rport (%6.6x)\n", rport->port_id);
2032 2057
2033 fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO); 2058 fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO);
2034 if (fsp == NULL) { 2059 if (fsp == NULL) {
@@ -2076,12 +2101,12 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
2076 2101
2077 if (fc_fcp_lport_queue_ready(lport)) { 2102 if (fc_fcp_lport_queue_ready(lport)) {
2078 shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded " 2103 shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
2079 "on port (%6x)\n", fc_host_port_id(lport->host)); 2104 "on port (%6.6x)\n", lport->port_id);
2080 return SUCCESS; 2105 return SUCCESS;
2081 } else { 2106 } else {
2082 shost_printk(KERN_INFO, shost, "libfc: Host reset failed, " 2107 shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
2083 "port (%6x) is not ready.\n", 2108 "port (%6.6x) is not ready.\n",
2084 fc_host_port_id(lport->host)); 2109 lport->port_id);
2085 return FAILED; 2110 return FAILED;
2086 } 2111 }
2087} 2112}
@@ -2166,7 +2191,7 @@ void fc_fcp_destroy(struct fc_lport *lport)
2166 2191
2167 if (!list_empty(&si->scsi_pkt_queue)) 2192 if (!list_empty(&si->scsi_pkt_queue))
2168 printk(KERN_ERR "libfc: Leaked SCSI packets when destroying " 2193 printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
2169 "port (%6x)\n", fc_host_port_id(lport->host)); 2194 "port (%6.6x)\n", lport->port_id);
2170 2195
2171 mempool_destroy(si->scsi_pkt_pool); 2196 mempool_destroy(si->scsi_pkt_pool);
2172 kfree(si); 2197 kfree(si);
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index 741fd5c72e13..f5c0ca4b6ef8 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -45,9 +45,9 @@ extern unsigned int fc_debug_logging;
45 45
46#define FC_LPORT_DBG(lport, fmt, args...) \ 46#define FC_LPORT_DBG(lport, fmt, args...) \
47 FC_CHECK_LOGGING(FC_LPORT_LOGGING, \ 47 FC_CHECK_LOGGING(FC_LPORT_LOGGING, \
48 printk(KERN_INFO "host%u: lport %6x: " fmt, \ 48 printk(KERN_INFO "host%u: lport %6.6x: " fmt, \
49 (lport)->host->host_no, \ 49 (lport)->host->host_no, \
50 fc_host_port_id((lport)->host), ##args)) 50 (lport)->port_id, ##args))
51 51
52#define FC_DISC_DBG(disc, fmt, args...) \ 52#define FC_DISC_DBG(disc, fmt, args...) \
53 FC_CHECK_LOGGING(FC_DISC_LOGGING, \ 53 FC_CHECK_LOGGING(FC_DISC_LOGGING, \
@@ -57,7 +57,7 @@ extern unsigned int fc_debug_logging;
57 57
58#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \ 58#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \
59 FC_CHECK_LOGGING(FC_RPORT_LOGGING, \ 59 FC_CHECK_LOGGING(FC_RPORT_LOGGING, \
60 printk(KERN_INFO "host%u: rport %6x: " fmt, \ 60 printk(KERN_INFO "host%u: rport %6.6x: " fmt, \
61 (lport)->host->host_no, \ 61 (lport)->host->host_no, \
62 (port_id), ##args)) 62 (port_id), ##args))
63 63
@@ -66,7 +66,7 @@ extern unsigned int fc_debug_logging;
66 66
67#define FC_FCP_DBG(pkt, fmt, args...) \ 67#define FC_FCP_DBG(pkt, fmt, args...) \
68 FC_CHECK_LOGGING(FC_FCP_LOGGING, \ 68 FC_CHECK_LOGGING(FC_FCP_LOGGING, \
69 printk(KERN_INFO "host%u: fcp: %6x: " fmt, \ 69 printk(KERN_INFO "host%u: fcp: %6.6x: " fmt, \
70 (pkt)->lp->host->host_no, \ 70 (pkt)->lp->host->host_no, \
71 pkt->rport->port_id, ##args)) 71 pkt->rport->port_id, ##args))
72 72
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index d126ecfff704..79c9e3ccd341 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -172,7 +172,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
172 struct fc_rport_priv *rdata, 172 struct fc_rport_priv *rdata,
173 enum fc_rport_event event) 173 enum fc_rport_event event)
174{ 174{
175 FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event, 175 FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event,
176 rdata->ids.port_id); 176 rdata->ids.port_id);
177 177
178 mutex_lock(&lport->lp_mutex); 178 mutex_lock(&lport->lp_mutex);
@@ -183,7 +183,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
183 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID); 183 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
184 } else { 184 } else {
185 FC_LPORT_DBG(lport, "Received an READY event " 185 FC_LPORT_DBG(lport, "Received an READY event "
186 "on port (%6x) for the directory " 186 "on port (%6.6x) for the directory "
187 "server, but the lport is not " 187 "server, but the lport is not "
188 "in the DNS state, it's in the " 188 "in the DNS state, it's in the "
189 "%d state", rdata->ids.port_id, 189 "%d state", rdata->ids.port_id,
@@ -228,9 +228,12 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
228 u64 remote_wwnn) 228 u64 remote_wwnn)
229{ 229{
230 mutex_lock(&lport->disc.disc_mutex); 230 mutex_lock(&lport->disc.disc_mutex);
231 if (lport->ptp_rdata) 231 if (lport->ptp_rdata) {
232 lport->tt.rport_logoff(lport->ptp_rdata); 232 lport->tt.rport_logoff(lport->ptp_rdata);
233 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
234 }
233 lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid); 235 lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
236 kref_get(&lport->ptp_rdata->kref);
234 lport->ptp_rdata->ids.port_name = remote_wwpn; 237 lport->ptp_rdata->ids.port_name = remote_wwpn;
235 lport->ptp_rdata->ids.node_name = remote_wwnn; 238 lport->ptp_rdata->ids.node_name = remote_wwnn;
236 mutex_unlock(&lport->disc.disc_mutex); 239 mutex_unlock(&lport->disc.disc_mutex);
@@ -241,17 +244,6 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
241} 244}
242 245
243/** 246/**
244 * fc_get_host_port_type() - Return the port type of the given Scsi_Host
245 * @shost: The SCSI host whose port type is to be determined
246 */
247void fc_get_host_port_type(struct Scsi_Host *shost)
248{
249 /* TODO - currently just NPORT */
250 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
251}
252EXPORT_SYMBOL(fc_get_host_port_type);
253
254/**
255 * fc_get_host_port_state() - Return the port state of the given Scsi_Host 247 * fc_get_host_port_state() - Return the port state of the given Scsi_Host
256 * @shost: The SCSI host whose port state is to be determined 248 * @shost: The SCSI host whose port state is to be determined
257 */ 249 */
@@ -572,8 +564,8 @@ void __fc_linkup(struct fc_lport *lport)
572 */ 564 */
573void fc_linkup(struct fc_lport *lport) 565void fc_linkup(struct fc_lport *lport)
574{ 566{
575 printk(KERN_INFO "host%d: libfc: Link up on port (%6x)\n", 567 printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n",
576 lport->host->host_no, fc_host_port_id(lport->host)); 568 lport->host->host_no, lport->port_id);
577 569
578 mutex_lock(&lport->lp_mutex); 570 mutex_lock(&lport->lp_mutex);
579 __fc_linkup(lport); 571 __fc_linkup(lport);
@@ -602,8 +594,8 @@ void __fc_linkdown(struct fc_lport *lport)
602 */ 594 */
603void fc_linkdown(struct fc_lport *lport) 595void fc_linkdown(struct fc_lport *lport)
604{ 596{
605 printk(KERN_INFO "host%d: libfc: Link down on port (%6x)\n", 597 printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n",
606 lport->host->host_no, fc_host_port_id(lport->host)); 598 lport->host->host_no, lport->port_id);
607 599
608 mutex_lock(&lport->lp_mutex); 600 mutex_lock(&lport->lp_mutex);
609 __fc_linkdown(lport); 601 __fc_linkdown(lport);
@@ -704,8 +696,8 @@ void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
704 break; 696 break;
705 case DISC_EV_FAILED: 697 case DISC_EV_FAILED:
706 printk(KERN_ERR "host%d: libfc: " 698 printk(KERN_ERR "host%d: libfc: "
707 "Discovery failed for port (%6x)\n", 699 "Discovery failed for port (%6.6x)\n",
708 lport->host->host_no, fc_host_port_id(lport->host)); 700 lport->host->host_no, lport->port_id);
709 mutex_lock(&lport->lp_mutex); 701 mutex_lock(&lport->lp_mutex);
710 fc_lport_enter_reset(lport); 702 fc_lport_enter_reset(lport);
711 mutex_unlock(&lport->lp_mutex); 703 mutex_unlock(&lport->lp_mutex);
@@ -750,10 +742,14 @@ static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
750 struct fc_frame *fp) 742 struct fc_frame *fp)
751{ 743{
752 if (port_id) 744 if (port_id)
753 printk(KERN_INFO "host%d: Assigned Port ID %6x\n", 745 printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
754 lport->host->host_no, port_id); 746 lport->host->host_no, port_id);
755 747
748 lport->port_id = port_id;
749
750 /* Update the fc_host */
756 fc_host_port_id(lport->host) = port_id; 751 fc_host_port_id(lport->host) = port_id;
752
757 if (lport->tt.lport_set_port_id) 753 if (lport->tt.lport_set_port_id)
758 lport->tt.lport_set_port_id(lport, port_id, fp); 754 lport->tt.lport_set_port_id(lport, port_id, fp);
759} 755}
@@ -797,11 +793,11 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
797 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); 793 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
798 if (remote_wwpn == lport->wwpn) { 794 if (remote_wwpn == lport->wwpn) {
799 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port " 795 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
800 "with same WWPN %llx\n", 796 "with same WWPN %16.16llx\n",
801 lport->host->host_no, remote_wwpn); 797 lport->host->host_no, remote_wwpn);
802 goto out; 798 goto out;
803 } 799 }
804 FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn); 800 FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn);
805 801
806 /* 802 /*
807 * XXX what is the right thing to do for FIDs? 803 * XXX what is the right thing to do for FIDs?
@@ -832,7 +828,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
832 */ 828 */
833 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; 829 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
834 ep = fc_seq_exch(sp); 830 ep = fc_seq_exch(sp);
835 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, 831 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, remote_fid, local_fid,
836 FC_TYPE_ELS, f_ctl, 0); 832 FC_TYPE_ELS, f_ctl, 0);
837 lport->tt.seq_send(lport, sp, fp); 833 lport->tt.seq_send(lport, sp, fp);
838 834
@@ -947,14 +943,18 @@ static void fc_lport_reset_locked(struct fc_lport *lport)
947 if (lport->dns_rdata) 943 if (lport->dns_rdata)
948 lport->tt.rport_logoff(lport->dns_rdata); 944 lport->tt.rport_logoff(lport->dns_rdata);
949 945
950 lport->ptp_rdata = NULL; 946 if (lport->ptp_rdata) {
947 lport->tt.rport_logoff(lport->ptp_rdata);
948 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
949 lport->ptp_rdata = NULL;
950 }
951 951
952 lport->tt.disc_stop(lport); 952 lport->tt.disc_stop(lport);
953 953
954 lport->tt.exch_mgr_reset(lport, 0, 0); 954 lport->tt.exch_mgr_reset(lport, 0, 0);
955 fc_host_fabric_name(lport->host) = 0; 955 fc_host_fabric_name(lport->host) = 0;
956 956
957 if (fc_host_port_id(lport->host)) 957 if (lport->port_id)
958 fc_lport_set_port_id(lport, 0, NULL); 958 fc_lport_set_port_id(lport, 0, NULL);
959} 959}
960 960
@@ -1492,7 +1492,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1492 lport->r_a_tov = 2 * e_d_tov; 1492 lport->r_a_tov = 2 * e_d_tov;
1493 fc_lport_set_port_id(lport, did, fp); 1493 fc_lport_set_port_id(lport, did, fp);
1494 printk(KERN_INFO "host%d: libfc: " 1494 printk(KERN_INFO "host%d: libfc: "
1495 "Port (%6x) entered " 1495 "Port (%6.6x) entered "
1496 "point-to-point mode\n", 1496 "point-to-point mode\n",
1497 lport->host->host_no, did); 1497 lport->host->host_no, did);
1498 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id), 1498 fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
@@ -1699,7 +1699,7 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
1699 fh = fc_frame_header_get(fp); 1699 fh = fc_frame_header_get(fp);
1700 fh->fh_r_ctl = FC_RCTL_ELS_REQ; 1700 fh->fh_r_ctl = FC_RCTL_ELS_REQ;
1701 hton24(fh->fh_d_id, did); 1701 hton24(fh->fh_d_id, did);
1702 hton24(fh->fh_s_id, fc_host_port_id(lport->host)); 1702 hton24(fh->fh_s_id, lport->port_id);
1703 fh->fh_type = FC_TYPE_ELS; 1703 fh->fh_type = FC_TYPE_ELS;
1704 hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ | 1704 hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ |
1705 FC_FC_END_SEQ | FC_FC_SEQ_INIT); 1705 FC_FC_END_SEQ | FC_FC_SEQ_INIT);
@@ -1759,7 +1759,7 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
1759 fh = fc_frame_header_get(fp); 1759 fh = fc_frame_header_get(fp);
1760 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL; 1760 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
1761 hton24(fh->fh_d_id, did); 1761 hton24(fh->fh_d_id, did);
1762 hton24(fh->fh_s_id, fc_host_port_id(lport->host)); 1762 hton24(fh->fh_s_id, lport->port_id);
1763 fh->fh_type = FC_TYPE_CT; 1763 fh->fh_type = FC_TYPE_CT;
1764 hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ | 1764 hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ |
1765 FC_FC_END_SEQ | FC_FC_SEQ_INIT); 1765 FC_FC_END_SEQ | FC_FC_SEQ_INIT);
diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c
index c68f6c7341c2..dd2b43bb1c70 100644
--- a/drivers/scsi/libfc/fc_npiv.c
+++ b/drivers/scsi/libfc/fc_npiv.c
@@ -69,12 +69,15 @@ struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id)
69 struct fc_lport *lport = NULL; 69 struct fc_lport *lport = NULL;
70 struct fc_lport *vn_port; 70 struct fc_lport *vn_port;
71 71
72 if (fc_host_port_id(n_port->host) == port_id) 72 if (n_port->port_id == port_id)
73 return n_port; 73 return n_port;
74 74
75 if (port_id == FC_FID_FLOGI)
76 return n_port; /* for point-to-point */
77
75 mutex_lock(&n_port->lp_mutex); 78 mutex_lock(&n_port->lp_mutex);
76 list_for_each_entry(vn_port, &n_port->vports, list) { 79 list_for_each_entry(vn_port, &n_port->vports, list) {
77 if (fc_host_port_id(vn_port->host) == port_id) { 80 if (vn_port->port_id == port_id) {
78 lport = vn_port; 81 lport = vn_port;
79 break; 82 break;
80 } 83 }
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index b37d0ff28b35..39e440f0f54a 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1442,136 +1442,115 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1442 struct fc_els_spp *spp; /* response spp */ 1442 struct fc_els_spp *spp; /* response spp */
1443 unsigned int len; 1443 unsigned int len;
1444 unsigned int plen; 1444 unsigned int plen;
1445 enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1446 enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1447 enum fc_els_spp_resp resp; 1445 enum fc_els_spp_resp resp;
1448 struct fc_seq_els_data rjt_data; 1446 struct fc_seq_els_data rjt_data;
1449 u32 f_ctl; 1447 u32 f_ctl;
1450 u32 fcp_parm; 1448 u32 fcp_parm;
1451 u32 roles = FC_RPORT_ROLE_UNKNOWN; 1449 u32 roles = FC_RPORT_ROLE_UNKNOWN;
1452 rjt_data.fp = NULL;
1453 1450
1451 rjt_data.fp = NULL;
1454 fh = fc_frame_header_get(rx_fp); 1452 fh = fc_frame_header_get(rx_fp);
1455 1453
1456 FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n", 1454 FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
1457 fc_rport_state(rdata)); 1455 fc_rport_state(rdata));
1458 1456
1459 switch (rdata->rp_state) {
1460 case RPORT_ST_PRLI:
1461 case RPORT_ST_RTV:
1462 case RPORT_ST_READY:
1463 case RPORT_ST_ADISC:
1464 reason = ELS_RJT_NONE;
1465 break;
1466 default:
1467 fc_frame_free(rx_fp);
1468 return;
1469 break;
1470 }
1471 len = fr_len(rx_fp) - sizeof(*fh); 1457 len = fr_len(rx_fp) - sizeof(*fh);
1472 pp = fc_frame_payload_get(rx_fp, sizeof(*pp)); 1458 pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1473 if (pp == NULL) { 1459 if (!pp)
1474 reason = ELS_RJT_PROT; 1460 goto reject_len;
1475 explan = ELS_EXPL_INV_LEN; 1461 plen = ntohs(pp->prli.prli_len);
1476 } else { 1462 if ((plen % 4) != 0 || plen > len || plen < 16)
1477 plen = ntohs(pp->prli.prli_len); 1463 goto reject_len;
1478 if ((plen % 4) != 0 || plen > len) { 1464 if (plen < len)
1479 reason = ELS_RJT_PROT; 1465 len = plen;
1480 explan = ELS_EXPL_INV_LEN; 1466 plen = pp->prli.prli_spp_len;
1481 } else if (plen < len) { 1467 if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1482 len = plen; 1468 plen > len || len < sizeof(*pp) || plen < 12)
1483 } 1469 goto reject_len;
1484 plen = pp->prli.prli_spp_len; 1470 rspp = &pp->spp;
1485 if ((plen % 4) != 0 || plen < sizeof(*spp) || 1471
1486 plen > len || len < sizeof(*pp)) { 1472 fp = fc_frame_alloc(lport, len);
1487 reason = ELS_RJT_PROT; 1473 if (!fp) {
1488 explan = ELS_EXPL_INV_LEN; 1474 rjt_data.reason = ELS_RJT_UNAB;
1489 } 1475 rjt_data.explan = ELS_EXPL_INSUF_RES;
1490 rspp = &pp->spp; 1476 goto reject;
1491 } 1477 }
1492 if (reason != ELS_RJT_NONE || 1478 sp = lport->tt.seq_start_next(sp);
1493 (fp = fc_frame_alloc(lport, len)) == NULL) { 1479 WARN_ON(!sp);
1494 rjt_data.reason = reason; 1480 pp = fc_frame_payload_get(fp, len);
1495 rjt_data.explan = explan; 1481 WARN_ON(!pp);
1496 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); 1482 memset(pp, 0, len);
1497 } else { 1483 pp->prli.prli_cmd = ELS_LS_ACC;
1498 sp = lport->tt.seq_start_next(sp); 1484 pp->prli.prli_spp_len = plen;
1499 WARN_ON(!sp); 1485 pp->prli.prli_len = htons(len);
1500 pp = fc_frame_payload_get(fp, len); 1486 len -= sizeof(struct fc_els_prli);
1501 WARN_ON(!pp);
1502 memset(pp, 0, len);
1503 pp->prli.prli_cmd = ELS_LS_ACC;
1504 pp->prli.prli_spp_len = plen;
1505 pp->prli.prli_len = htons(len);
1506 len -= sizeof(struct fc_els_prli);
1507
1508 /* reinitialize remote port roles */
1509 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
1510
1511 /*
1512 * Go through all the service parameter pages and build
1513 * response. If plen indicates longer SPP than standard,
1514 * use that. The entire response has been pre-cleared above.
1515 */
1516 spp = &pp->spp;
1517 while (len >= plen) {
1518 spp->spp_type = rspp->spp_type;
1519 spp->spp_type_ext = rspp->spp_type_ext;
1520 spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1521 resp = FC_SPP_RESP_ACK;
1522 if (rspp->spp_flags & FC_SPP_RPA_VAL)
1523 resp = FC_SPP_RESP_NO_PA;
1524 switch (rspp->spp_type) {
1525 case 0: /* common to all FC-4 types */
1526 break;
1527 case FC_TYPE_FCP:
1528 fcp_parm = ntohl(rspp->spp_params);
1529 if (fcp_parm & FCP_SPPF_RETRY)
1530 rdata->flags |= FC_RP_FLAGS_RETRY;
1531 rdata->supported_classes = FC_COS_CLASS3;
1532 if (fcp_parm & FCP_SPPF_INIT_FCN)
1533 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1534 if (fcp_parm & FCP_SPPF_TARG_FCN)
1535 roles |= FC_RPORT_ROLE_FCP_TARGET;
1536 rdata->ids.roles = roles;
1537
1538 spp->spp_params =
1539 htonl(lport->service_params);
1540 break;
1541 default:
1542 resp = FC_SPP_RESP_INVL;
1543 break;
1544 }
1545 spp->spp_flags |= resp;
1546 len -= plen;
1547 rspp = (struct fc_els_spp *)((char *)rspp + plen);
1548 spp = (struct fc_els_spp *)((char *)spp + plen);
1549 }
1550 1487
1551 /* 1488 /* reinitialize remote port roles */
1552 * Send LS_ACC. If this fails, the originator should retry. 1489 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
1553 */
1554 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1555 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1556 ep = fc_seq_exch(sp);
1557 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1558 FC_TYPE_ELS, f_ctl, 0);
1559 lport->tt.seq_send(lport, sp, fp);
1560 1490
1561 /* 1491 /*
1562 * Get lock and re-check state. 1492 * Go through all the service parameter pages and build
1563 */ 1493 * response. If plen indicates longer SPP than standard,
1564 switch (rdata->rp_state) { 1494 * use that. The entire response has been pre-cleared above.
1565 case RPORT_ST_PRLI: 1495 */
1566 fc_rport_enter_ready(rdata); 1496 spp = &pp->spp;
1497 while (len >= plen) {
1498 spp->spp_type = rspp->spp_type;
1499 spp->spp_type_ext = rspp->spp_type_ext;
1500 spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1501 resp = FC_SPP_RESP_ACK;
1502
1503 switch (rspp->spp_type) {
1504 case 0: /* common to all FC-4 types */
1567 break; 1505 break;
1568 case RPORT_ST_READY: 1506 case FC_TYPE_FCP:
1569 case RPORT_ST_ADISC: 1507 fcp_parm = ntohl(rspp->spp_params);
1508 if (fcp_parm & FCP_SPPF_RETRY)
1509 rdata->flags |= FC_RP_FLAGS_RETRY;
1510 rdata->supported_classes = FC_COS_CLASS3;
1511 if (fcp_parm & FCP_SPPF_INIT_FCN)
1512 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1513 if (fcp_parm & FCP_SPPF_TARG_FCN)
1514 roles |= FC_RPORT_ROLE_FCP_TARGET;
1515 rdata->ids.roles = roles;
1516
1517 spp->spp_params = htonl(lport->service_params);
1570 break; 1518 break;
1571 default: 1519 default:
1520 resp = FC_SPP_RESP_INVL;
1572 break; 1521 break;
1573 } 1522 }
1523 spp->spp_flags |= resp;
1524 len -= plen;
1525 rspp = (struct fc_els_spp *)((char *)rspp + plen);
1526 spp = (struct fc_els_spp *)((char *)spp + plen);
1527 }
1528
1529 /*
1530 * Send LS_ACC. If this fails, the originator should retry.
1531 */
1532 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1533 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1534 ep = fc_seq_exch(sp);
1535 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1536 FC_TYPE_ELS, f_ctl, 0);
1537 lport->tt.seq_send(lport, sp, fp);
1538
1539 switch (rdata->rp_state) {
1540 case RPORT_ST_PRLI:
1541 fc_rport_enter_ready(rdata);
1542 break;
1543 default:
1544 break;
1574 } 1545 }
1546 goto drop;
1547
1548reject_len:
1549 rjt_data.reason = ELS_RJT_PROT;
1550 rjt_data.explan = ELS_EXPL_INV_LEN;
1551reject:
1552 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1553drop:
1575 fc_frame_free(rx_fp); 1554 fc_frame_free(rx_fp);
1576} 1555}
1577 1556
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 5c92620292fb..8eeb39ffa37f 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -421,7 +421,7 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
421 struct iscsi_conn *conn = tcp_conn->iscsi_conn; 421 struct iscsi_conn *conn = tcp_conn->iscsi_conn;
422 struct hash_desc *rx_hash = NULL; 422 struct hash_desc *rx_hash = NULL;
423 423
424 if (conn->datadgst_en & 424 if (conn->datadgst_en &&
425 !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) 425 !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
426 rx_hash = tcp_conn->rx_hash; 426 rx_hash = tcp_conn->rx_hash;
427 427
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 565e16dd74fc..e35a4c71eb9a 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -310,7 +310,9 @@ struct lpfc_vport {
310#define FC_NLP_MORE 0x40 /* More node to process in node tbl */ 310#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
311#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */ 311#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
312#define FC_FABRIC 0x100 /* We are fabric attached */ 312#define FC_FABRIC 0x100 /* We are fabric attached */
313#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
313#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ 314#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
315#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
314#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ 316#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
315#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ 317#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
316#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ 318#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
@@ -554,6 +556,7 @@ struct lpfc_hba {
554 struct lpfc_dmabuf slim2p; 556 struct lpfc_dmabuf slim2p;
555 557
556 MAILBOX_t *mbox; 558 MAILBOX_t *mbox;
559 uint32_t *mbox_ext;
557 uint32_t *inb_ha_copy; 560 uint32_t *inb_ha_copy;
558 uint32_t *inb_counter; 561 uint32_t *inb_counter;
559 uint32_t inb_last_counter; 562 uint32_t inb_last_counter;
@@ -622,6 +625,7 @@ struct lpfc_hba {
622 uint32_t cfg_enable_hba_reset; 625 uint32_t cfg_enable_hba_reset;
623 uint32_t cfg_enable_hba_heartbeat; 626 uint32_t cfg_enable_hba_heartbeat;
624 uint32_t cfg_enable_bg; 627 uint32_t cfg_enable_bg;
628 uint32_t cfg_hostmem_hgp;
625 uint32_t cfg_log_verbose; 629 uint32_t cfg_log_verbose;
626 uint32_t cfg_aer_support; 630 uint32_t cfg_aer_support;
627 uint32_t cfg_suppress_link_up; 631 uint32_t cfg_suppress_link_up;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 1849e33e68f9..2e5f376d9ccc 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -869,6 +869,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
869 LPFC_MBOXQ_t *pmboxq; 869 LPFC_MBOXQ_t *pmboxq;
870 MAILBOX_t *pmb; 870 MAILBOX_t *pmb;
871 int rc = 0; 871 int rc = 0;
872 uint32_t max_vpi;
872 873
873 /* 874 /*
874 * prevent udev from issuing mailbox commands until the port is 875 * prevent udev from issuing mailbox commands until the port is
@@ -916,11 +917,17 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
916 if (axri) 917 if (axri)
917 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) - 918 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
918 phba->sli4_hba.max_cfg_param.xri_used; 919 phba->sli4_hba.max_cfg_param.xri_used;
920
921 /* Account for differences with SLI-3. Get vpi count from
922 * mailbox data and subtract one for max vpi value.
923 */
924 max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
925 (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
926
919 if (mvpi) 927 if (mvpi)
920 *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 928 *mvpi = max_vpi;
921 if (avpi) 929 if (avpi)
922 *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 930 *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
923 phba->sli4_hba.max_cfg_param.vpi_used;
924 } else { 931 } else {
925 if (mrpi) 932 if (mrpi)
926 *mrpi = pmb->un.varRdConfig.max_rpi; 933 *mrpi = pmb->un.varRdConfig.max_rpi;
@@ -1925,13 +1932,12 @@ MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
1925 " 2 - select SLI-2 even on SLI-3 capable HBAs," 1932 " 2 - select SLI-2 even on SLI-3 capable HBAs,"
1926 " 3 - select SLI-3"); 1933 " 3 - select SLI-3");
1927 1934
1928int lpfc_enable_npiv = 0; 1935int lpfc_enable_npiv = 1;
1929module_param(lpfc_enable_npiv, int, 0); 1936module_param(lpfc_enable_npiv, int, 0);
1930MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality"); 1937MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
1931lpfc_param_show(enable_npiv); 1938lpfc_param_show(enable_npiv);
1932lpfc_param_init(enable_npiv, 0, 0, 1); 1939lpfc_param_init(enable_npiv, 1, 0, 1);
1933static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, 1940static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
1934 lpfc_enable_npiv_show, NULL);
1935 1941
1936/* 1942/*
1937# lpfc_suppress_link_up: Bring link up at initialization 1943# lpfc_suppress_link_up: Bring link up at initialization
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index d62b3e467926..dcf088262b20 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -79,6 +79,12 @@ struct lpfc_bsg_iocb {
79struct lpfc_bsg_mbox { 79struct lpfc_bsg_mbox {
80 LPFC_MBOXQ_t *pmboxq; 80 LPFC_MBOXQ_t *pmboxq;
81 MAILBOX_t *mb; 81 MAILBOX_t *mb;
82 struct lpfc_dmabuf *rxbmp; /* for BIU diags */
83 struct lpfc_dmabufext *dmp; /* for BIU diags */
84 uint8_t *ext; /* extended mailbox data */
85 uint32_t mbOffset; /* from app */
86 uint32_t inExtWLen; /* from app */
87 uint32_t outExtWLen; /* from app */
82 88
83 /* job waiting for this mbox command to finish */ 89 /* job waiting for this mbox command to finish */
84 struct fc_bsg_job *set_job; 90 struct fc_bsg_job *set_job;
@@ -1708,21 +1714,26 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
1708 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1714 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1709 if (dmabuf) { 1715 if (dmabuf) {
1710 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); 1716 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
1711 INIT_LIST_HEAD(&dmabuf->list); 1717 if (dmabuf->virt) {
1712 bpl = (struct ulp_bde64 *) dmabuf->virt; 1718 INIT_LIST_HEAD(&dmabuf->list);
1713 memset(bpl, 0, sizeof(*bpl)); 1719 bpl = (struct ulp_bde64 *) dmabuf->virt;
1714 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); 1720 memset(bpl, 0, sizeof(*bpl));
1715 bpl->addrHigh = 1721 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
1716 le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl))); 1722 bpl->addrHigh =
1717 bpl->addrLow = 1723 le32_to_cpu(putPaddrHigh(dmabuf->phys +
1718 le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl))); 1724 sizeof(*bpl)));
1719 bpl->tus.f.bdeFlags = 0; 1725 bpl->addrLow =
1720 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; 1726 le32_to_cpu(putPaddrLow(dmabuf->phys +
1721 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1727 sizeof(*bpl)));
1728 bpl->tus.f.bdeFlags = 0;
1729 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
1730 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1731 }
1722 } 1732 }
1723 1733
1724 if (cmdiocbq == NULL || rspiocbq == NULL || 1734 if (cmdiocbq == NULL || rspiocbq == NULL ||
1725 dmabuf == NULL || bpl == NULL || ctreq == NULL) { 1735 dmabuf == NULL || bpl == NULL || ctreq == NULL ||
1736 dmabuf->virt == NULL) {
1726 ret_val = ENOMEM; 1737 ret_val = ENOMEM;
1727 goto err_get_xri_exit; 1738 goto err_get_xri_exit;
1728 } 1739 }
@@ -1918,9 +1929,11 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
1918 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1929 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1919 if (rxbmp != NULL) { 1930 if (rxbmp != NULL) {
1920 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 1931 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
1921 INIT_LIST_HEAD(&rxbmp->list); 1932 if (rxbmp->virt) {
1922 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 1933 INIT_LIST_HEAD(&rxbmp->list);
1923 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); 1934 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
1935 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
1936 }
1924 } 1937 }
1925 1938
1926 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { 1939 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
@@ -2174,14 +2187,16 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job)
2174 2187
2175 if (txbmp) { 2188 if (txbmp) {
2176 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); 2189 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
2177 INIT_LIST_HEAD(&txbmp->list); 2190 if (txbmp->virt) {
2178 txbpl = (struct ulp_bde64 *) txbmp->virt; 2191 INIT_LIST_HEAD(&txbmp->list);
2179 if (txbpl) 2192 txbpl = (struct ulp_bde64 *) txbmp->virt;
2180 txbuffer = diag_cmd_data_alloc(phba, 2193 txbuffer = diag_cmd_data_alloc(phba,
2181 txbpl, full_size, 0); 2194 txbpl, full_size, 0);
2195 }
2182 } 2196 }
2183 2197
2184 if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer) { 2198 if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer ||
2199 !txbmp->virt) {
2185 rc = -ENOMEM; 2200 rc = -ENOMEM;
2186 goto err_loopback_test_exit; 2201 goto err_loopback_test_exit;
2187 } 2202 }
@@ -2377,35 +2392,90 @@ void
2377lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2392lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2378{ 2393{
2379 struct bsg_job_data *dd_data; 2394 struct bsg_job_data *dd_data;
2380 MAILBOX_t *pmb;
2381 MAILBOX_t *mb;
2382 struct fc_bsg_job *job; 2395 struct fc_bsg_job *job;
2383 uint32_t size; 2396 uint32_t size;
2384 unsigned long flags; 2397 unsigned long flags;
2398 uint8_t *to;
2399 uint8_t *from;
2385 2400
2386 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2401 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2387 dd_data = pmboxq->context1; 2402 dd_data = pmboxq->context1;
2403 /* job already timed out? */
2388 if (!dd_data) { 2404 if (!dd_data) {
2389 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2405 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2390 return; 2406 return;
2391 } 2407 }
2392 2408
2393 pmb = &dd_data->context_un.mbox.pmboxq->u.mb; 2409 /* build the outgoing buffer to do an sg copy
2394 mb = dd_data->context_un.mbox.mb; 2410 * the format is the response mailbox followed by any extended
2411 * mailbox data
2412 */
2413 from = (uint8_t *)&pmboxq->u.mb;
2414 to = (uint8_t *)dd_data->context_un.mbox.mb;
2415 memcpy(to, from, sizeof(MAILBOX_t));
2416 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
2417 /* copy the extended data if any, count is in words */
2418 if (dd_data->context_un.mbox.outExtWLen) {
2419 from = (uint8_t *)dd_data->context_un.mbox.ext;
2420 to += sizeof(MAILBOX_t);
2421 size = dd_data->context_un.mbox.outExtWLen *
2422 sizeof(uint32_t);
2423 memcpy(to, from, size);
2424 } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
2425 from = (uint8_t *)dd_data->context_un.mbox.
2426 dmp->dma.virt;
2427 to += sizeof(MAILBOX_t);
2428 size = dd_data->context_un.mbox.dmp->size;
2429 memcpy(to, from, size);
2430 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
2431 (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
2432 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
2433 virt;
2434 to += sizeof(MAILBOX_t);
2435 size = pmboxq->u.mb.un.varWords[5];
2436 memcpy(to, from, size);
2437 } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
2438 from = (uint8_t *)dd_data->context_un.
2439 mbox.dmp->dma.virt;
2440 to += sizeof(MAILBOX_t);
2441 size = dd_data->context_un.mbox.dmp->size;
2442 memcpy(to, from, size);
2443 }
2444 }
2445
2446 from = (uint8_t *)dd_data->context_un.mbox.mb;
2395 job = dd_data->context_un.mbox.set_job; 2447 job = dd_data->context_un.mbox.set_job;
2396 memcpy(mb, pmb, sizeof(*pmb)); 2448 size = job->reply_payload.payload_len;
2397 size = job->request_payload.payload_len;
2398 job->reply->reply_payload_rcv_len = 2449 job->reply->reply_payload_rcv_len =
2399 sg_copy_from_buffer(job->reply_payload.sg_list, 2450 sg_copy_from_buffer(job->reply_payload.sg_list,
2400 job->reply_payload.sg_cnt, 2451 job->reply_payload.sg_cnt,
2401 mb, size); 2452 from, size);
2402 job->reply->result = 0; 2453 job->reply->result = 0;
2454
2403 dd_data->context_un.mbox.set_job = NULL; 2455 dd_data->context_un.mbox.set_job = NULL;
2404 job->dd_data = NULL; 2456 job->dd_data = NULL;
2405 job->job_done(job); 2457 job->job_done(job);
2458 /* need to hold the lock until we call job done to hold off
2459 * the timeout handler returning to the midlayer while
2460 * we are stillprocessing the job
2461 */
2406 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2462 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2463
2464 kfree(dd_data->context_un.mbox.mb);
2407 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 2465 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
2408 kfree(mb); 2466 kfree(dd_data->context_un.mbox.ext);
2467 if (dd_data->context_un.mbox.dmp) {
2468 dma_free_coherent(&phba->pcidev->dev,
2469 dd_data->context_un.mbox.dmp->size,
2470 dd_data->context_un.mbox.dmp->dma.virt,
2471 dd_data->context_un.mbox.dmp->dma.phys);
2472 kfree(dd_data->context_un.mbox.dmp);
2473 }
2474 if (dd_data->context_un.mbox.rxbmp) {
2475 lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
2476 dd_data->context_un.mbox.rxbmp->phys);
2477 kfree(dd_data->context_un.mbox.rxbmp);
2478 }
2409 kfree(dd_data); 2479 kfree(dd_data);
2410 return; 2480 return;
2411} 2481}
@@ -2464,10 +2534,12 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2464 case MBX_SET_DEBUG: 2534 case MBX_SET_DEBUG:
2465 case MBX_WRITE_WWN: 2535 case MBX_WRITE_WWN:
2466 case MBX_SLI4_CONFIG: 2536 case MBX_SLI4_CONFIG:
2537 case MBX_READ_EVENT_LOG:
2467 case MBX_READ_EVENT_LOG_STATUS: 2538 case MBX_READ_EVENT_LOG_STATUS:
2468 case MBX_WRITE_EVENT_LOG: 2539 case MBX_WRITE_EVENT_LOG:
2469 case MBX_PORT_CAPABILITIES: 2540 case MBX_PORT_CAPABILITIES:
2470 case MBX_PORT_IOV_CONTROL: 2541 case MBX_PORT_IOV_CONTROL:
2542 case MBX_RUN_BIU_DIAG64:
2471 break; 2543 break;
2472 case MBX_SET_VARIABLE: 2544 case MBX_SET_VARIABLE:
2473 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2545 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -2482,8 +2554,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2482 phba->fc_topology = TOPOLOGY_PT_PT; 2554 phba->fc_topology = TOPOLOGY_PT_PT;
2483 } 2555 }
2484 break; 2556 break;
2485 case MBX_RUN_BIU_DIAG64:
2486 case MBX_READ_EVENT_LOG:
2487 case MBX_READ_SPARM64: 2557 case MBX_READ_SPARM64:
2488 case MBX_READ_LA: 2558 case MBX_READ_LA:
2489 case MBX_READ_LA64: 2559 case MBX_READ_LA64:
@@ -2518,97 +2588,365 @@ static uint32_t
2518lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, 2588lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2519 struct lpfc_vport *vport) 2589 struct lpfc_vport *vport)
2520{ 2590{
2521 LPFC_MBOXQ_t *pmboxq; 2591 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
2522 MAILBOX_t *pmb; 2592 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
2523 MAILBOX_t *mb; 2593 /* a 4k buffer to hold the mb and extended data from/to the bsg */
2524 struct bsg_job_data *dd_data; 2594 MAILBOX_t *mb = NULL;
2595 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
2525 uint32_t size; 2596 uint32_t size;
2597 struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
2598 struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
2599 struct ulp_bde64 *rxbpl = NULL;
2600 struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
2601 job->request->rqst_data.h_vendor.vendor_cmd;
2602 uint8_t *ext = NULL;
2526 int rc = 0; 2603 int rc = 0;
2604 uint8_t *from;
2605
2606 /* in case no data is transferred */
2607 job->reply->reply_payload_rcv_len = 0;
2608
2609 /* check if requested extended data lengths are valid */
2610 if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) ||
2611 (mbox_req->outExtWLen > MAILBOX_EXT_SIZE)) {
2612 rc = -ERANGE;
2613 goto job_done;
2614 }
2527 2615
2528 /* allocate our bsg tracking structure */ 2616 /* allocate our bsg tracking structure */
2529 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 2617 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2530 if (!dd_data) { 2618 if (!dd_data) {
2531 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2619 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2532 "2727 Failed allocation of dd_data\n"); 2620 "2727 Failed allocation of dd_data\n");
2533 return -ENOMEM; 2621 rc = -ENOMEM;
2622 goto job_done;
2534 } 2623 }
2535 2624
2536 mb = kzalloc(PAGE_SIZE, GFP_KERNEL); 2625 mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
2537 if (!mb) { 2626 if (!mb) {
2538 kfree(dd_data); 2627 rc = -ENOMEM;
2539 return -ENOMEM; 2628 goto job_done;
2540 } 2629 }
2541 2630
2542 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2631 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2543 if (!pmboxq) { 2632 if (!pmboxq) {
2544 kfree(dd_data); 2633 rc = -ENOMEM;
2545 kfree(mb); 2634 goto job_done;
2546 return -ENOMEM;
2547 } 2635 }
2636 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
2548 2637
2549 size = job->request_payload.payload_len; 2638 size = job->request_payload.payload_len;
2550 job->reply->reply_payload_rcv_len = 2639 sg_copy_to_buffer(job->request_payload.sg_list,
2551 sg_copy_to_buffer(job->request_payload.sg_list, 2640 job->request_payload.sg_cnt,
2552 job->request_payload.sg_cnt, 2641 mb, size);
2553 mb, size);
2554 2642
2555 rc = lpfc_bsg_check_cmd_access(phba, mb, vport); 2643 rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
2556 if (rc != 0) { 2644 if (rc != 0)
2557 kfree(dd_data); 2645 goto job_done; /* must be negative */
2558 kfree(mb);
2559 mempool_free(pmboxq, phba->mbox_mem_pool);
2560 return rc; /* must be negative */
2561 }
2562 2646
2563 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
2564 pmb = &pmboxq->u.mb; 2647 pmb = &pmboxq->u.mb;
2565 memcpy(pmb, mb, sizeof(*pmb)); 2648 memcpy(pmb, mb, sizeof(*pmb));
2566 pmb->mbxOwner = OWN_HOST; 2649 pmb->mbxOwner = OWN_HOST;
2567 pmboxq->context1 = NULL;
2568 pmboxq->vport = vport; 2650 pmboxq->vport = vport;
2569 2651
2570 if ((vport->fc_flag & FC_OFFLINE_MODE) || 2652 /* If HBA encountered an error attention, allow only DUMP
2571 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { 2653 * or RESTART mailbox commands until the HBA is restarted.
2572 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 2654 */
2573 if (rc != MBX_SUCCESS) { 2655 if (phba->pport->stopped &&
2574 if (rc != MBX_TIMEOUT) { 2656 pmb->mbxCommand != MBX_DUMP_MEMORY &&
2575 kfree(dd_data); 2657 pmb->mbxCommand != MBX_RESTART &&
2576 kfree(mb); 2658 pmb->mbxCommand != MBX_WRITE_VPARMS &&
2577 mempool_free(pmboxq, phba->mbox_mem_pool); 2659 pmb->mbxCommand != MBX_WRITE_WWN)
2660 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2661 "2797 mbox: Issued mailbox cmd "
2662 "0x%x while in stopped state.\n",
2663 pmb->mbxCommand);
2664
2665 /* Don't allow mailbox commands to be sent when blocked
2666 * or when in the middle of discovery
2667 */
2668 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
2669 rc = -EAGAIN;
2670 goto job_done;
2671 }
2672
2673 /* extended mailbox commands will need an extended buffer */
2674 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
2675 ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
2676 if (!ext) {
2677 rc = -ENOMEM;
2678 goto job_done;
2679 }
2680
2681 /* any data for the device? */
2682 if (mbox_req->inExtWLen) {
2683 from = (uint8_t *)mb;
2684 from += sizeof(MAILBOX_t);
2685 memcpy((uint8_t *)ext, from,
2686 mbox_req->inExtWLen * sizeof(uint32_t));
2687 }
2688
2689 pmboxq->context2 = ext;
2690 pmboxq->in_ext_byte_len =
2691 mbox_req->inExtWLen *
2692 sizeof(uint32_t);
2693 pmboxq->out_ext_byte_len =
2694 mbox_req->outExtWLen *
2695 sizeof(uint32_t);
2696 pmboxq->mbox_offset_word =
2697 mbox_req->mbOffset;
2698 pmboxq->context2 = ext;
2699 pmboxq->in_ext_byte_len =
2700 mbox_req->inExtWLen * sizeof(uint32_t);
2701 pmboxq->out_ext_byte_len =
2702 mbox_req->outExtWLen * sizeof(uint32_t);
2703 pmboxq->mbox_offset_word = mbox_req->mbOffset;
2704 }
2705
2706 /* biu diag will need a kernel buffer to transfer the data
2707 * allocate our own buffer and setup the mailbox command to
2708 * use ours
2709 */
2710 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
2711 uint32_t transmit_length = pmb->un.varWords[1];
2712 uint32_t receive_length = pmb->un.varWords[4];
2713 /* transmit length cannot be greater than receive length or
2714 * mailbox extension size
2715 */
2716 if ((transmit_length > receive_length) ||
2717 (transmit_length > MAILBOX_EXT_SIZE)) {
2718 rc = -ERANGE;
2719 goto job_done;
2720 }
2721
2722 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2723 if (!rxbmp) {
2724 rc = -ENOMEM;
2725 goto job_done;
2726 }
2727
2728 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2729 if (!rxbmp->virt) {
2730 rc = -ENOMEM;
2731 goto job_done;
2732 }
2733
2734 INIT_LIST_HEAD(&rxbmp->list);
2735 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2736 dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
2737 if (!dmp) {
2738 rc = -ENOMEM;
2739 goto job_done;
2740 }
2741
2742 INIT_LIST_HEAD(&dmp->dma.list);
2743 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
2744 putPaddrHigh(dmp->dma.phys);
2745 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
2746 putPaddrLow(dmp->dma.phys);
2747
2748 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
2749 putPaddrHigh(dmp->dma.phys +
2750 pmb->un.varBIUdiag.un.s2.
2751 xmit_bde64.tus.f.bdeSize);
2752 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
2753 putPaddrLow(dmp->dma.phys +
2754 pmb->un.varBIUdiag.un.s2.
2755 xmit_bde64.tus.f.bdeSize);
2756
2757 /* copy the transmit data found in the mailbox extension area */
2758 from = (uint8_t *)mb;
2759 from += sizeof(MAILBOX_t);
2760 memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
2761 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
2762 struct READ_EVENT_LOG_VAR *rdEventLog =
2763 &pmb->un.varRdEventLog ;
2764 uint32_t receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
2765 uint32_t mode = bf_get(lpfc_event_log, rdEventLog);
2766
2767 /* receive length cannot be greater than mailbox
2768 * extension size
2769 */
2770 if (receive_length > MAILBOX_EXT_SIZE) {
2771 rc = -ERANGE;
2772 goto job_done;
2773 }
2774
2775 /* mode zero uses a bde like biu diags command */
2776 if (mode == 0) {
2777
2778 /* rebuild the command for sli4 using our own buffers
2779 * like we do for biu diags
2780 */
2781
2782 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2783 if (!rxbmp) {
2784 rc = -ENOMEM;
2785 goto job_done;
2578 } 2786 }
2579 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; 2787
2788 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2789 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2790 if (rxbpl) {
2791 INIT_LIST_HEAD(&rxbmp->list);
2792 dmp = diag_cmd_data_alloc(phba, rxbpl,
2793 receive_length, 0);
2794 }
2795
2796 if (!dmp) {
2797 rc = -ENOMEM;
2798 goto job_done;
2799 }
2800
2801 INIT_LIST_HEAD(&dmp->dma.list);
2802 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
2803 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
2580 } 2804 }
2805 } else if (phba->sli_rev == LPFC_SLI_REV4) {
2806 if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
2807 /* rebuild the command for sli4 using our own buffers
2808 * like we do for biu diags
2809 */
2810 uint32_t receive_length = pmb->un.varWords[2];
2811 /* receive length cannot be greater than mailbox
2812 * extension size
2813 */
2814 if ((receive_length == 0) ||
2815 (receive_length > MAILBOX_EXT_SIZE)) {
2816 rc = -ERANGE;
2817 goto job_done;
2818 }
2581 2819
2582 memcpy(mb, pmb, sizeof(*pmb)); 2820 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2583 job->reply->reply_payload_rcv_len = 2821 if (!rxbmp) {
2584 sg_copy_from_buffer(job->reply_payload.sg_list, 2822 rc = -ENOMEM;
2585 job->reply_payload.sg_cnt, 2823 goto job_done;
2586 mb, size); 2824 }
2587 kfree(dd_data); 2825
2588 kfree(mb); 2826 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2589 mempool_free(pmboxq, phba->mbox_mem_pool); 2827 if (!rxbmp->virt) {
2590 /* not waiting mbox already done */ 2828 rc = -ENOMEM;
2591 return 0; 2829 goto job_done;
2830 }
2831
2832 INIT_LIST_HEAD(&rxbmp->list);
2833 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2834 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
2835 0);
2836 if (!dmp) {
2837 rc = -ENOMEM;
2838 goto job_done;
2839 }
2840
2841 INIT_LIST_HEAD(&dmp->dma.list);
2842 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
2843 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
2844 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
2845 pmb->un.varUpdateCfg.co) {
2846 struct ulp_bde64 *bde =
2847 (struct ulp_bde64 *)&pmb->un.varWords[4];
2848
2849 /* bde size cannot be greater than mailbox ext size */
2850 if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) {
2851 rc = -ERANGE;
2852 goto job_done;
2853 }
2854
2855 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2856 if (!rxbmp) {
2857 rc = -ENOMEM;
2858 goto job_done;
2859 }
2860
2861 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2862 if (!rxbmp->virt) {
2863 rc = -ENOMEM;
2864 goto job_done;
2865 }
2866
2867 INIT_LIST_HEAD(&rxbmp->list);
2868 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2869 dmp = diag_cmd_data_alloc(phba, rxbpl,
2870 bde->tus.f.bdeSize, 0);
2871 if (!dmp) {
2872 rc = -ENOMEM;
2873 goto job_done;
2874 }
2875
2876 INIT_LIST_HEAD(&dmp->dma.list);
2877 bde->addrHigh = putPaddrHigh(dmp->dma.phys);
2878 bde->addrLow = putPaddrLow(dmp->dma.phys);
2879
2880 /* copy the transmit data found in the mailbox
2881 * extension area
2882 */
2883 from = (uint8_t *)mb;
2884 from += sizeof(MAILBOX_t);
2885 memcpy((uint8_t *)dmp->dma.virt, from,
2886 bde->tus.f.bdeSize);
2887 }
2592 } 2888 }
2593 2889
2890 dd_data->context_un.mbox.rxbmp = rxbmp;
2891 dd_data->context_un.mbox.dmp = dmp;
2892
2594 /* setup wake call as IOCB callback */ 2893 /* setup wake call as IOCB callback */
2595 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait; 2894 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
2895
2596 /* setup context field to pass wait_queue pointer to wake function */ 2896 /* setup context field to pass wait_queue pointer to wake function */
2597 pmboxq->context1 = dd_data; 2897 pmboxq->context1 = dd_data;
2598 dd_data->type = TYPE_MBOX; 2898 dd_data->type = TYPE_MBOX;
2599 dd_data->context_un.mbox.pmboxq = pmboxq; 2899 dd_data->context_un.mbox.pmboxq = pmboxq;
2600 dd_data->context_un.mbox.mb = mb; 2900 dd_data->context_un.mbox.mb = mb;
2601 dd_data->context_un.mbox.set_job = job; 2901 dd_data->context_un.mbox.set_job = job;
2902 dd_data->context_un.mbox.ext = ext;
2903 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
2904 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
2905 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
2602 job->dd_data = dd_data; 2906 job->dd_data = dd_data;
2907
2908 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
2909 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
2910 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2911 if (rc != MBX_SUCCESS) {
2912 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
2913 goto job_done;
2914 }
2915
2916 /* job finished, copy the data */
2917 memcpy(mb, pmb, sizeof(*pmb));
2918 job->reply->reply_payload_rcv_len =
2919 sg_copy_from_buffer(job->reply_payload.sg_list,
2920 job->reply_payload.sg_cnt,
2921 mb, size);
2922 /* not waiting mbox already done */
2923 rc = 0;
2924 goto job_done;
2925 }
2926
2603 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 2927 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
2604 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 2928 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
2605 kfree(dd_data); 2929 return 1; /* job started */
2606 kfree(mb); 2930
2931job_done:
2932 /* common exit for error or job completed inline */
2933 kfree(mb);
2934 if (pmboxq)
2607 mempool_free(pmboxq, phba->mbox_mem_pool); 2935 mempool_free(pmboxq, phba->mbox_mem_pool);
2608 return -EIO; 2936 kfree(ext);
2937 if (dmp) {
2938 dma_free_coherent(&phba->pcidev->dev,
2939 dmp->size, dmp->dma.virt,
2940 dmp->dma.phys);
2941 kfree(dmp);
2609 } 2942 }
2943 if (rxbmp) {
2944 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2945 kfree(rxbmp);
2946 }
2947 kfree(dd_data);
2610 2948
2611 return 1; 2949 return rc;
2612} 2950}
2613 2951
2614/** 2952/**
@@ -2633,7 +2971,12 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
2633 goto job_error; 2971 goto job_error;
2634 } 2972 }
2635 2973
2636 if (job->request_payload.payload_len != PAGE_SIZE) { 2974 if (job->request_payload.payload_len != BSG_MBOX_SIZE) {
2975 rc = -EINVAL;
2976 goto job_error;
2977 }
2978
2979 if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
2637 rc = -EINVAL; 2980 rc = -EINVAL;
2638 goto job_error; 2981 goto job_error;
2639 } 2982 }
@@ -3094,6 +3437,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
3094 job->dd_data = NULL; 3437 job->dd_data = NULL;
3095 job->reply->reply_payload_rcv_len = 0; 3438 job->reply->reply_payload_rcv_len = 0;
3096 job->reply->result = -EAGAIN; 3439 job->reply->result = -EAGAIN;
3440 /* the mbox completion handler can now be run */
3097 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3441 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3098 job->job_done(job); 3442 job->job_done(job);
3099 break; 3443 break;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 5bc630819b9e..a2c33e7c9152 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -91,11 +91,12 @@ struct get_mgmt_rev_reply {
91 struct MgmtRevInfo info; 91 struct MgmtRevInfo info;
92}; 92};
93 93
94#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
94struct dfc_mbox_req { 95struct dfc_mbox_req {
95 uint32_t command; 96 uint32_t command;
97 uint32_t mbOffset;
96 uint32_t inExtWLen; 98 uint32_t inExtWLen;
97 uint32_t outExtWLen; 99 uint32_t outExtWLen;
98 uint8_t mbOffset;
99}; 100};
100 101
101/* Used for menlo command or menlo data. The xri is only used for menlo data */ 102/* Used for menlo command or menlo data. The xri is only used for menlo data */
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 5087c4211b43..fbc9baeb6048 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -65,6 +65,7 @@ void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
65void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 65void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
66void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); 66void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
67void lpfc_retry_pport_discovery(struct lpfc_hba *); 67void lpfc_retry_pport_discovery(struct lpfc_hba *);
68void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
68 69
69void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 70void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
70void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); 71void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 2851d75ffc6f..36257a685509 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -38,6 +38,7 @@ enum lpfc_work_type {
38 LPFC_EVT_ELS_RETRY, 38 LPFC_EVT_ELS_RETRY,
39 LPFC_EVT_DEV_LOSS, 39 LPFC_EVT_DEV_LOSS,
40 LPFC_EVT_FASTPATH_MGMT_EVT, 40 LPFC_EVT_FASTPATH_MGMT_EVT,
41 LPFC_EVT_RESET_HBA,
41}; 42};
42 43
43/* structure used to queue event to the discovery tasklet */ 44/* structure used to queue event to the discovery tasklet */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 5fbdb22c1899..c4c7f0ad7468 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -584,6 +584,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
584 spin_unlock_irq(shost->host_lock); 584 spin_unlock_irq(shost->host_lock);
585 lpfc_unreg_rpi(vport, np); 585 lpfc_unreg_rpi(vport, np);
586 } 586 }
587 lpfc_cleanup_pending_mbox(vport);
587 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 588 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
588 lpfc_mbx_unreg_vpi(vport); 589 lpfc_mbx_unreg_vpi(vport);
589 spin_lock_irq(shost->host_lock); 590 spin_lock_irq(shost->host_lock);
@@ -864,6 +865,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
864 } 865 }
865 spin_lock_irq(shost->host_lock); 866 spin_lock_irq(shost->host_lock);
866 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 867 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
868 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
867 spin_unlock_irq(shost->host_lock); 869 spin_unlock_irq(shost->host_lock);
868 870
869 /* 871 /*
@@ -893,11 +895,14 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
893 895
894 if (!rc) { 896 if (!rc) {
895 /* Mark the FCF discovery process done */ 897 /* Mark the FCF discovery process done */
896 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS, 898 if (phba->hba_flag & HBA_FIP_SUPPORT)
897 "2769 FLOGI successful on FCF record: " 899 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
898 "current_fcf_index:x%x, terminate FCF " 900 LOG_ELS,
899 "round robin failover process\n", 901 "2769 FLOGI successful on FCF "
900 phba->fcf.current_rec.fcf_indx); 902 "record: current_fcf_index:"
903 "x%x, terminate FCF round "
904 "robin failover process\n",
905 phba->fcf.current_rec.fcf_indx);
901 spin_lock_irq(&phba->hbalock); 906 spin_lock_irq(&phba->hbalock);
902 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 907 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
903 spin_unlock_irq(&phba->hbalock); 908 spin_unlock_irq(&phba->hbalock);
@@ -5366,7 +5371,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
5366 sizeof(struct lpfc_name)); 5371 sizeof(struct lpfc_name));
5367 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 5372 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
5368 cmdiocbp->context2)->virt); 5373 cmdiocbp->context2)->virt);
5369 lsrjt_event.command = *pcmd; 5374 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
5370 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); 5375 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
5371 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 5376 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
5372 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 5377 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
@@ -6050,7 +6055,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6050 spin_lock_irq(shost->host_lock); 6055 spin_lock_irq(shost->host_lock);
6051 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 6056 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6052 spin_unlock_irq(shost->host_lock); 6057 spin_unlock_irq(shost->host_lock);
6053 if (vport->port_type == LPFC_PHYSICAL_PORT) 6058 if (vport->port_type == LPFC_PHYSICAL_PORT
6059 && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
6054 lpfc_initial_flogi(vport); 6060 lpfc_initial_flogi(vport);
6055 else 6061 else
6056 lpfc_initial_fdisc(vport); 6062 lpfc_initial_fdisc(vport);
@@ -6286,6 +6292,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6286 } 6292 }
6287 spin_lock_irq(shost->host_lock); 6293 spin_lock_irq(shost->host_lock);
6288 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 6294 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
6295 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
6289 vport->fc_flag |= FC_FABRIC; 6296 vport->fc_flag |= FC_FABRIC;
6290 if (vport->phba->fc_topology == TOPOLOGY_LOOP) 6297 if (vport->phba->fc_topology == TOPOLOGY_LOOP)
6291 vport->fc_flag |= FC_PUBLIC_LOOP; 6298 vport->fc_flag |= FC_PUBLIC_LOOP;
@@ -6310,11 +6317,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6310 spin_unlock_irq(shost->host_lock); 6317 spin_unlock_irq(shost->host_lock);
6311 lpfc_unreg_rpi(vport, np); 6318 lpfc_unreg_rpi(vport, np);
6312 } 6319 }
6320 lpfc_cleanup_pending_mbox(vport);
6313 lpfc_mbx_unreg_vpi(vport); 6321 lpfc_mbx_unreg_vpi(vport);
6314 spin_lock_irq(shost->host_lock); 6322 spin_lock_irq(shost->host_lock);
6315 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 6323 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6316 if (phba->sli_rev == LPFC_SLI_REV4) 6324 if (phba->sli_rev == LPFC_SLI_REV4)
6317 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 6325 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6326 else
6327 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
6318 spin_unlock_irq(shost->host_lock); 6328 spin_unlock_irq(shost->host_lock);
6319 } 6329 }
6320 6330
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index e1466eec56b7..1f87b4fb8b50 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -475,6 +475,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
475 lpfc_send_fastpath_evt(phba, evtp); 475 lpfc_send_fastpath_evt(phba, evtp);
476 free_evt = 0; 476 free_evt = 0;
477 break; 477 break;
478 case LPFC_EVT_RESET_HBA:
479 if (!(phba->pport->load_flag & FC_UNLOADING))
480 lpfc_reset_hba(phba);
481 break;
478 } 482 }
479 if (free_evt) 483 if (free_evt)
480 kfree(evtp); 484 kfree(evtp);
@@ -1531,7 +1535,37 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1531} 1535}
1532 1536
1533/** 1537/**
1534 * lpfc_sli4_fcf_rec_mbox_parse - parse non-embedded fcf record mailbox command 1538 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1539 * @phba: pointer to lpfc hba data structure.
1540 * @fcf_cnt: number of eligible fcf record seen so far.
1541 *
1542 * This function makes an running random selection decision on FCF record to
1543 * use through a sequence of @fcf_cnt eligible FCF records with equal
1544 * probability. To perform integer manunipulation of random numbers with
1545 * size unit32_t, the lower 16 bits of the 32-bit random number returned
1546 * from random32() are taken as the random random number generated.
1547 *
1548 * Returns true when outcome is for the newly read FCF record should be
1549 * chosen; otherwise, return false when outcome is for keeping the previously
1550 * chosen FCF record.
1551 **/
1552static bool
1553lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
1554{
1555 uint32_t rand_num;
1556
1557 /* Get 16-bit uniform random number */
1558 rand_num = (0xFFFF & random32());
1559
1560 /* Decision with probability 1/fcf_cnt */
1561 if ((fcf_cnt * rand_num) < 0xFFFF)
1562 return true;
1563 else
1564 return false;
1565}
1566
1567/**
1568 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
1535 * @phba: pointer to lpfc hba data structure. 1569 * @phba: pointer to lpfc hba data structure.
1536 * @mboxq: pointer to mailbox object. 1570 * @mboxq: pointer to mailbox object.
1537 * @next_fcf_index: pointer to holder of next fcf index. 1571 * @next_fcf_index: pointer to holder of next fcf index.
@@ -1592,7 +1626,9 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1592 new_fcf_record = (struct fcf_record *)(virt_addr + 1626 new_fcf_record = (struct fcf_record *)(virt_addr +
1593 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1627 sizeof(struct lpfc_mbx_read_fcf_tbl));
1594 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, 1628 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1595 sizeof(struct fcf_record)); 1629 offsetof(struct fcf_record, vlan_bitmap));
1630 new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
1631 new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
1596 1632
1597 return new_fcf_record; 1633 return new_fcf_record;
1598} 1634}
@@ -1679,6 +1715,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1679 uint16_t fcf_index, next_fcf_index; 1715 uint16_t fcf_index, next_fcf_index;
1680 struct lpfc_fcf_rec *fcf_rec = NULL; 1716 struct lpfc_fcf_rec *fcf_rec = NULL;
1681 uint16_t vlan_id; 1717 uint16_t vlan_id;
1718 uint32_t seed;
1719 bool select_new_fcf;
1682 int rc; 1720 int rc;
1683 1721
1684 /* If there is pending FCoE event restart FCF table scan */ 1722 /* If there is pending FCoE event restart FCF table scan */
@@ -1809,9 +1847,21 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1809 * than the driver FCF record, use the new record. 1847 * than the driver FCF record, use the new record.
1810 */ 1848 */
1811 if (new_fcf_record->fip_priority < fcf_rec->priority) { 1849 if (new_fcf_record->fip_priority < fcf_rec->priority) {
1812 /* Choose this FCF record */ 1850 /* Choose the new FCF record with lower priority */
1813 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 1851 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1814 addr_mode, vlan_id, 0); 1852 addr_mode, vlan_id, 0);
1853 /* Reset running random FCF selection count */
1854 phba->fcf.eligible_fcf_cnt = 1;
1855 } else if (new_fcf_record->fip_priority == fcf_rec->priority) {
1856 /* Update running random FCF selection count */
1857 phba->fcf.eligible_fcf_cnt++;
1858 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
1859 phba->fcf.eligible_fcf_cnt);
1860 if (select_new_fcf)
1861 /* Choose the new FCF by random selection */
1862 __lpfc_update_fcf_record(phba, fcf_rec,
1863 new_fcf_record,
1864 addr_mode, vlan_id, 0);
1815 } 1865 }
1816 spin_unlock_irq(&phba->hbalock); 1866 spin_unlock_irq(&phba->hbalock);
1817 goto read_next_fcf; 1867 goto read_next_fcf;
@@ -1825,6 +1875,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1825 addr_mode, vlan_id, (boot_flag ? 1875 addr_mode, vlan_id, (boot_flag ?
1826 BOOT_ENABLE : 0)); 1876 BOOT_ENABLE : 0));
1827 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1877 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1878 /* Setup initial running random FCF selection count */
1879 phba->fcf.eligible_fcf_cnt = 1;
1880 /* Seeding the random number generator for random selection */
1881 seed = (uint32_t)(0xFFFFFFFF & jiffies);
1882 srandom32(seed);
1828 } 1883 }
1829 spin_unlock_irq(&phba->hbalock); 1884 spin_unlock_irq(&phba->hbalock);
1830 goto read_next_fcf; 1885 goto read_next_fcf;
@@ -2686,11 +2741,18 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2686 switch (mb->mbxStatus) { 2741 switch (mb->mbxStatus) {
2687 case 0x0011: 2742 case 0x0011:
2688 case 0x0020: 2743 case 0x0020:
2689 case 0x9700:
2690 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 2744 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2691 "0911 cmpl_unreg_vpi, mb status = 0x%x\n", 2745 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
2692 mb->mbxStatus); 2746 mb->mbxStatus);
2693 break; 2747 break;
2748 /* If VPI is busy, reset the HBA */
2749 case 0x9700:
2750 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
2751 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
2752 vport->vpi, mb->mbxStatus);
2753 if (!(phba->pport->load_flag & FC_UNLOADING))
2754 lpfc_workq_post_event(phba, NULL, NULL,
2755 LPFC_EVT_RESET_HBA);
2694 } 2756 }
2695 spin_lock_irq(shost->host_lock); 2757 spin_lock_irq(shost->host_lock);
2696 vport->vpi_state &= ~LPFC_VPI_REGISTERED; 2758 vport->vpi_state &= ~LPFC_VPI_REGISTERED;
@@ -2965,7 +3027,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2965 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3027 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2966 3028
2967 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 3029 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
2968 lpfc_start_fdiscs(phba); 3030 /* when physical port receive logo donot start
3031 * vport discovery */
3032 if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
3033 lpfc_start_fdiscs(phba);
3034 else
3035 vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
2969 lpfc_do_scr_ns_plogi(phba, vport); 3036 lpfc_do_scr_ns_plogi(phba, vport);
2970 } 3037 }
2971 3038
@@ -3177,7 +3244,6 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3177 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3244 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3178 3245
3179 if (new_state == NLP_STE_UNMAPPED_NODE) { 3246 if (new_state == NLP_STE_UNMAPPED_NODE) {
3180 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
3181 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; 3247 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
3182 ndlp->nlp_type |= NLP_FC_NODE; 3248 ndlp->nlp_type |= NLP_FC_NODE;
3183 } 3249 }
@@ -4935,6 +5001,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
4935 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 5001 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
4936 if (ndlp) 5002 if (ndlp)
4937 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 5003 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
5004 lpfc_cleanup_pending_mbox(vports[i]);
4938 lpfc_mbx_unreg_vpi(vports[i]); 5005 lpfc_mbx_unreg_vpi(vports[i]);
4939 shost = lpfc_shost_from_vport(vports[i]); 5006 shost = lpfc_shost_from_vport(vports[i]);
4940 spin_lock_irq(shost->host_lock); 5007 spin_lock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 89ff7c09e298..e654d01dad24 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1565,95 +1565,83 @@ enum lpfc_protgrp_type {
1565}; 1565};
1566 1566
1567/* PDE Descriptors */ 1567/* PDE Descriptors */
1568#define LPFC_PDE1_DESCRIPTOR 0x81 1568#define LPFC_PDE5_DESCRIPTOR 0x85
1569#define LPFC_PDE2_DESCRIPTOR 0x82 1569#define LPFC_PDE6_DESCRIPTOR 0x86
1570#define LPFC_PDE3_DESCRIPTOR 0x83 1570#define LPFC_PDE7_DESCRIPTOR 0x87
1571 1571
1572/* BlockGuard Profiles */ 1572/* BlockGuard Opcodes */
1573enum lpfc_bg_prof_codes { 1573#define BG_OP_IN_NODIF_OUT_CRC 0x0
1574 LPFC_PROF_INVALID, 1574#define BG_OP_IN_CRC_OUT_NODIF 0x1
1575 LPFC_PROF_A1 = 128, /* Full Protection */ 1575#define BG_OP_IN_NODIF_OUT_CSUM 0x2
1576 LPFC_PROF_A2, /* Disabled Protection Checks:A2~A4 */ 1576#define BG_OP_IN_CSUM_OUT_NODIF 0x3
1577 LPFC_PROF_A3, 1577#define BG_OP_IN_CRC_OUT_CRC 0x4
1578 LPFC_PROF_A4, 1578#define BG_OP_IN_CSUM_OUT_CSUM 0x5
1579 LPFC_PROF_B1, /* Embedded DIFs: B1~B3 */ 1579#define BG_OP_IN_CRC_OUT_CSUM 0x6
1580 LPFC_PROF_B2, 1580#define BG_OP_IN_CSUM_OUT_CRC 0x7
1581 LPFC_PROF_B3, 1581
1582 LPFC_PROF_C1, /* Separate DIFs: C1~C3 */ 1582struct lpfc_pde5 {
1583 LPFC_PROF_C2, 1583 uint32_t word0;
1584 LPFC_PROF_C3, 1584#define pde5_type_SHIFT 24
1585 LPFC_PROF_D1, /* Full Protection */ 1585#define pde5_type_MASK 0x000000ff
1586 LPFC_PROF_D2, /* Partial Protection & Check Disabling */ 1586#define pde5_type_WORD word0
1587 LPFC_PROF_D3, 1587#define pde5_rsvd0_SHIFT 0
1588 LPFC_PROF_E1, /* E1~E4:out - check-only, in - update apptag */ 1588#define pde5_rsvd0_MASK 0x00ffffff
1589 LPFC_PROF_E2, 1589#define pde5_rsvd0_WORD word0
1590 LPFC_PROF_E3, 1590 uint32_t reftag; /* Reference Tag Value */
1591 LPFC_PROF_E4, 1591 uint32_t reftagtr; /* Reference Tag Translation Value */
1592 LPFC_PROF_F1, /* Full Translation - F1 Prot Descriptor */
1593 /* F1 Translation BDE */
1594 LPFC_PROF_ANT1, /* TCP checksum, DIF inline with data buffers */
1595 LPFC_PROF_AST1, /* TCP checksum, DIF split from data buffer */
1596 LPFC_PROF_ANT2,
1597 LPFC_PROF_AST2
1598}; 1592};
1599 1593
1600/* BlockGuard error-control defines */ 1594struct lpfc_pde6 {
1601#define BG_EC_STOP_ERR 0x00 1595 uint32_t word0;
1602#define BG_EC_CONT_ERR 0x01 1596#define pde6_type_SHIFT 24
1603#define BG_EC_IGN_UNINIT_STOP_ERR 0x10 1597#define pde6_type_MASK 0x000000ff
1604#define BG_EC_IGN_UNINIT_CONT_ERR 0x11 1598#define pde6_type_WORD word0
1605 1599#define pde6_rsvd0_SHIFT 0
1606/* PDE (Protection Descriptor Entry) word 0 bit masks and shifts */ 1600#define pde6_rsvd0_MASK 0x00ffffff
1607#define PDE_DESC_TYPE_MASK 0xff000000 1601#define pde6_rsvd0_WORD word0
1608#define PDE_DESC_TYPE_SHIFT 24 1602 uint32_t word1;
1609#define PDE_BG_PROFILE_MASK 0x00ff0000 1603#define pde6_rsvd1_SHIFT 26
1610#define PDE_BG_PROFILE_SHIFT 16 1604#define pde6_rsvd1_MASK 0x0000003f
1611#define PDE_BLOCK_LEN_MASK 0x0000fffc 1605#define pde6_rsvd1_WORD word1
1612#define PDE_BLOCK_LEN_SHIFT 2 1606#define pde6_na_SHIFT 25
1613#define PDE_ERR_CTRL_MASK 0x00000003 1607#define pde6_na_MASK 0x00000001
1614#define PDE_ERR_CTRL_SHIFT 0 1608#define pde6_na_WORD word1
1615/* PDE word 1 bit masks and shifts */ 1609#define pde6_rsvd2_SHIFT 16
1616#define PDE_APPTAG_MASK_MASK 0xffff0000 1610#define pde6_rsvd2_MASK 0x000001FF
1617#define PDE_APPTAG_MASK_SHIFT 16 1611#define pde6_rsvd2_WORD word1
1618#define PDE_APPTAG_VAL_MASK 0x0000ffff 1612#define pde6_apptagtr_SHIFT 0
1619#define PDE_APPTAG_VAL_SHIFT 0 1613#define pde6_apptagtr_MASK 0x0000ffff
1620struct lpfc_pde { 1614#define pde6_apptagtr_WORD word1
1621 uint32_t parms; /* bitfields of descriptor, prof, len, and ec */ 1615 uint32_t word2;
1622 uint32_t apptag; /* bitfields of app tag maskand app tag value */ 1616#define pde6_optx_SHIFT 28
1623 uint32_t reftag; /* reference tag occupying all 32 bits */ 1617#define pde6_optx_MASK 0x0000000f
1618#define pde6_optx_WORD word2
1619#define pde6_oprx_SHIFT 24
1620#define pde6_oprx_MASK 0x0000000f
1621#define pde6_oprx_WORD word2
1622#define pde6_nr_SHIFT 23
1623#define pde6_nr_MASK 0x00000001
1624#define pde6_nr_WORD word2
1625#define pde6_ce_SHIFT 22
1626#define pde6_ce_MASK 0x00000001
1627#define pde6_ce_WORD word2
1628#define pde6_re_SHIFT 21
1629#define pde6_re_MASK 0x00000001
1630#define pde6_re_WORD word2
1631#define pde6_ae_SHIFT 20
1632#define pde6_ae_MASK 0x00000001
1633#define pde6_ae_WORD word2
1634#define pde6_ai_SHIFT 19
1635#define pde6_ai_MASK 0x00000001
1636#define pde6_ai_WORD word2
1637#define pde6_bs_SHIFT 16
1638#define pde6_bs_MASK 0x00000007
1639#define pde6_bs_WORD word2
1640#define pde6_apptagval_SHIFT 0
1641#define pde6_apptagval_MASK 0x0000ffff
1642#define pde6_apptagval_WORD word2
1624}; 1643};
1625 1644
1626/* inline function to set fields in parms of PDE */
1627static inline void
1628lpfc_pde_set_bg_parms(struct lpfc_pde *p, u8 desc, u8 prof, u16 len, u8 ec)
1629{
1630 uint32_t *wp = &p->parms;
1631
1632 /* spec indicates that adapter appends two 0's to length field */
1633 len = len >> 2;
1634
1635 *wp &= 0;
1636 *wp |= ((desc << PDE_DESC_TYPE_SHIFT) & PDE_DESC_TYPE_MASK);
1637 *wp |= ((prof << PDE_BG_PROFILE_SHIFT) & PDE_BG_PROFILE_MASK);
1638 *wp |= ((len << PDE_BLOCK_LEN_SHIFT) & PDE_BLOCK_LEN_MASK);
1639 *wp |= ((ec << PDE_ERR_CTRL_SHIFT) & PDE_ERR_CTRL_MASK);
1640 *wp = le32_to_cpu(*wp);
1641}
1642
1643/* inline function to set apptag and reftag fields of PDE */
1644static inline void
1645lpfc_pde_set_dif_parms(struct lpfc_pde *p, u16 apptagmask, u16 apptagval,
1646 u32 reftag)
1647{
1648 uint32_t *wp = &p->apptag;
1649 *wp &= 0;
1650 *wp |= ((apptagmask << PDE_APPTAG_MASK_SHIFT) & PDE_APPTAG_MASK_MASK);
1651 *wp |= ((apptagval << PDE_APPTAG_VAL_SHIFT) & PDE_APPTAG_VAL_MASK);
1652 *wp = le32_to_cpu(*wp);
1653 wp = &p->reftag;
1654 *wp = le32_to_cpu(reftag);
1655}
1656
1657 1645
1658/* Structure for MB Command LOAD_SM and DOWN_LOAD */ 1646/* Structure for MB Command LOAD_SM and DOWN_LOAD */
1659 1647
@@ -1744,6 +1732,17 @@ typedef struct {
1744 } un; 1732 } un;
1745} BIU_DIAG_VAR; 1733} BIU_DIAG_VAR;
1746 1734
1735/* Structure for MB command READ_EVENT_LOG (0x38) */
1736struct READ_EVENT_LOG_VAR {
1737 uint32_t word1;
1738#define lpfc_event_log_SHIFT 29
1739#define lpfc_event_log_MASK 0x00000001
1740#define lpfc_event_log_WORD word1
1741#define USE_MAILBOX_RESPONSE 1
1742 uint32_t offset;
1743 struct ulp_bde64 rcv_bde64;
1744};
1745
1747/* Structure for MB Command INIT_LINK (05) */ 1746/* Structure for MB Command INIT_LINK (05) */
1748 1747
1749typedef struct { 1748typedef struct {
@@ -2487,8 +2486,8 @@ typedef struct {
2487#define DMP_VPORT_REGION_SIZE 0x200 2486#define DMP_VPORT_REGION_SIZE 0x200
2488#define DMP_MBOX_OFFSET_WORD 0x5 2487#define DMP_MBOX_OFFSET_WORD 0x5
2489 2488
2490#define DMP_REGION_23 0x17 /* fcoe param and port state region */ 2489#define DMP_REGION_23 0x17 /* fcoe param and port state region */
2491#define DMP_RGN23_SIZE 0x400 2490#define DMP_RGN23_SIZE 0x400
2492 2491
2493#define WAKE_UP_PARMS_REGION_ID 4 2492#define WAKE_UP_PARMS_REGION_ID 4
2494#define WAKE_UP_PARMS_WORD_SIZE 15 2493#define WAKE_UP_PARMS_WORD_SIZE 15
@@ -2503,9 +2502,9 @@ struct vport_rec {
2503#define VPORT_INFO_REV 0x1 2502#define VPORT_INFO_REV 0x1
2504#define MAX_STATIC_VPORT_COUNT 16 2503#define MAX_STATIC_VPORT_COUNT 16
2505struct static_vport_info { 2504struct static_vport_info {
2506 uint32_t signature; 2505 uint32_t signature;
2507 uint32_t rev; 2506 uint32_t rev;
2508 struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT]; 2507 struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT];
2509 uint32_t resvd[66]; 2508 uint32_t resvd[66];
2510}; 2509};
2511 2510
@@ -2934,6 +2933,12 @@ typedef struct {
2934/* Union of all Mailbox Command types */ 2933/* Union of all Mailbox Command types */
2935#define MAILBOX_CMD_WSIZE 32 2934#define MAILBOX_CMD_WSIZE 32
2936#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t)) 2935#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t))
2936/* ext_wsize times 4 bytes should not be greater than max xmit size */
2937#define MAILBOX_EXT_WSIZE 512
2938#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
2939#define MAILBOX_HBA_EXT_OFFSET 0x100
2940/* max mbox xmit size is a page size for sysfs IO operations */
2941#define MAILBOX_MAX_XMIT_SIZE PAGE_SIZE
2937 2942
2938typedef union { 2943typedef union {
2939 uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/ 2944 uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
@@ -2972,6 +2977,9 @@ typedef union {
2972 REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */ 2977 REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
2973 UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */ 2978 UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
2974 ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */ 2979 ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
2980 struct READ_EVENT_LOG_VAR varRdEventLog; /* cmd = 0x38
2981 * (READ_EVENT_LOG)
2982 */
2975 struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI) */ 2983 struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI) */
2976} MAILVARIANTS; 2984} MAILVARIANTS;
2977 2985
@@ -3652,7 +3660,8 @@ typedef struct _IOCB { /* IOCB structure */
3652/* Maximum IOCBs that will fit in SLI2 slim */ 3660/* Maximum IOCBs that will fit in SLI2 slim */
3653#define MAX_SLI2_IOCB 498 3661#define MAX_SLI2_IOCB 498
3654#define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \ 3662#define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \
3655 (sizeof(MAILBOX_t) + sizeof(PCB_t))) 3663 (sizeof(MAILBOX_t) + sizeof(PCB_t) + \
3664 sizeof(uint32_t) * MAILBOX_EXT_WSIZE))
3656 3665
3657/* HBQ entries are 4 words each = 4k */ 3666/* HBQ entries are 4 words each = 4k */
3658#define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) * \ 3667#define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) * \
@@ -3660,6 +3669,7 @@ typedef struct _IOCB { /* IOCB structure */
3660 3669
3661struct lpfc_sli2_slim { 3670struct lpfc_sli2_slim {
3662 MAILBOX_t mbx; 3671 MAILBOX_t mbx;
3672 uint32_t mbx_ext_words[MAILBOX_EXT_WSIZE];
3663 PCB_t pcb; 3673 PCB_t pcb;
3664 IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE]; 3674 IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE];
3665}; 3675};
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 820015fbc4d6..bbdcf96800f6 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -41,8 +41,14 @@
41 * Or clear that bit field: 41 * Or clear that bit field:
42 * bf_set(example_bit_field, &t1, 0); 42 * bf_set(example_bit_field, &t1, 0);
43 */ 43 */
44#define bf_get_le32(name, ptr) \
45 ((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
44#define bf_get(name, ptr) \ 46#define bf_get(name, ptr) \
45 (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK) 47 (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
48#define bf_set_le32(name, ptr, value) \
49 ((ptr)->name##_WORD = cpu_to_le32(((((value) & \
50 name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \
51 ~(name##_MASK << name##_SHIFT)))))
46#define bf_set(name, ptr, value) \ 52#define bf_set(name, ptr, value) \
47 ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \ 53 ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
48 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT)))) 54 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
@@ -781,6 +787,7 @@ struct mbox_header {
781#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 787#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
782#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A 788#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
783#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D 789#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
790#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
784 791
785/* FCoE Opcodes */ 792/* FCoE Opcodes */
786#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01 793#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
@@ -1102,6 +1109,39 @@ struct lpfc_mbx_mq_create {
1102 } u; 1109 } u;
1103}; 1110};
1104 1111
1112struct lpfc_mbx_mq_create_ext {
1113 struct mbox_header header;
1114 union {
1115 struct {
1116 uint32_t word0;
1117#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0
1118#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF
1119#define lpfc_mbx_mq_create_ext_num_pages_WORD word0
1120 uint32_t async_evt_bmap;
1121#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK
1122#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001
1123#define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap
1124#define lpfc_mbx_mq_create_ext_async_evt_fcfste_SHIFT LPFC_TRAILER_CODE_FCOE
1125#define lpfc_mbx_mq_create_ext_async_evt_fcfste_MASK 0x00000001
1126#define lpfc_mbx_mq_create_ext_async_evt_fcfste_WORD async_evt_bmap
1127#define lpfc_mbx_mq_create_ext_async_evt_group5_SHIFT LPFC_TRAILER_CODE_GRP5
1128#define lpfc_mbx_mq_create_ext_async_evt_group5_MASK 0x00000001
1129#define lpfc_mbx_mq_create_ext_async_evt_group5_WORD async_evt_bmap
1130 struct mq_context context;
1131 struct dma_address page[LPFC_MAX_MQ_PAGE];
1132 } request;
1133 struct {
1134 uint32_t word0;
1135#define lpfc_mbx_mq_create_q_id_SHIFT 0
1136#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF
1137#define lpfc_mbx_mq_create_q_id_WORD word0
1138 } response;
1139 } u;
1140#define LPFC_ASYNC_EVENT_LINK_STATE 0x2
1141#define LPFC_ASYNC_EVENT_FCF_STATE 0x4
1142#define LPFC_ASYNC_EVENT_GROUP5 0x20
1143};
1144
1105struct lpfc_mbx_mq_destroy { 1145struct lpfc_mbx_mq_destroy {
1106 struct mbox_header header; 1146 struct mbox_header header;
1107 union { 1147 union {
@@ -1428,8 +1468,8 @@ struct lpfc_mbx_reg_vfi {
1428#define lpfc_reg_vfi_fcfi_WORD word2 1468#define lpfc_reg_vfi_fcfi_WORD word2
1429 uint32_t wwn[2]; 1469 uint32_t wwn[2];
1430 struct ulp_bde64 bde; 1470 struct ulp_bde64 bde;
1431 uint32_t word8_rsvd; 1471 uint32_t e_d_tov;
1432 uint32_t word9_rsvd; 1472 uint32_t r_a_tov;
1433 uint32_t word10; 1473 uint32_t word10;
1434#define lpfc_reg_vfi_nport_id_SHIFT 0 1474#define lpfc_reg_vfi_nport_id_SHIFT 0
1435#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF 1475#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF
@@ -1940,6 +1980,7 @@ struct lpfc_mbx_sli4_params {
1940#define rdma_MASK 0x00000001 1980#define rdma_MASK 0x00000001
1941#define rdma_WORD word3 1981#define rdma_WORD word3
1942 uint32_t sge_supp_len; 1982 uint32_t sge_supp_len;
1983#define SLI4_PAGE_SIZE 4096
1943 uint32_t word5; 1984 uint32_t word5;
1944#define if_page_sz_SHIFT 0 1985#define if_page_sz_SHIFT 0
1945#define if_page_sz_MASK 0x0000ffff 1986#define if_page_sz_MASK 0x0000ffff
@@ -2041,6 +2082,7 @@ struct lpfc_mqe {
2041 struct lpfc_mbx_reg_fcfi reg_fcfi; 2082 struct lpfc_mbx_reg_fcfi reg_fcfi;
2042 struct lpfc_mbx_unreg_fcfi unreg_fcfi; 2083 struct lpfc_mbx_unreg_fcfi unreg_fcfi;
2043 struct lpfc_mbx_mq_create mq_create; 2084 struct lpfc_mbx_mq_create mq_create;
2085 struct lpfc_mbx_mq_create_ext mq_create_ext;
2044 struct lpfc_mbx_eq_create eq_create; 2086 struct lpfc_mbx_eq_create eq_create;
2045 struct lpfc_mbx_cq_create cq_create; 2087 struct lpfc_mbx_cq_create cq_create;
2046 struct lpfc_mbx_wq_create wq_create; 2088 struct lpfc_mbx_wq_create wq_create;
@@ -2099,6 +2141,7 @@ struct lpfc_mcqe {
2099#define LPFC_TRAILER_CODE_LINK 0x1 2141#define LPFC_TRAILER_CODE_LINK 0x1
2100#define LPFC_TRAILER_CODE_FCOE 0x2 2142#define LPFC_TRAILER_CODE_FCOE 0x2
2101#define LPFC_TRAILER_CODE_DCBX 0x3 2143#define LPFC_TRAILER_CODE_DCBX 0x3
2144#define LPFC_TRAILER_CODE_GRP5 0x5
2102}; 2145};
2103 2146
2104struct lpfc_acqe_link { 2147struct lpfc_acqe_link {
@@ -2168,6 +2211,19 @@ struct lpfc_acqe_dcbx {
2168 uint32_t trailer; 2211 uint32_t trailer;
2169}; 2212};
2170 2213
2214struct lpfc_acqe_grp5 {
2215 uint32_t word0;
2216#define lpfc_acqe_grp5_pport_SHIFT 0
2217#define lpfc_acqe_grp5_pport_MASK 0x000000FF
2218#define lpfc_acqe_grp5_pport_WORD word0
2219 uint32_t word1;
2220#define lpfc_acqe_grp5_llink_spd_SHIFT 16
2221#define lpfc_acqe_grp5_llink_spd_MASK 0x0000FFFF
2222#define lpfc_acqe_grp5_llink_spd_WORD word1
2223 uint32_t event_tag;
2224 uint32_t trailer;
2225};
2226
2171/* 2227/*
2172 * Define the bootstrap mailbox (bmbx) region used to communicate 2228 * Define the bootstrap mailbox (bmbx) region used to communicate
2173 * mailbox command between the host and port. The mailbox consists 2229 * mailbox command between the host and port. The mailbox consists
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 774663e8e1fe..cd9697edf860 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2566,7 +2566,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2566 shost->max_cmd_len = 16; 2566 shost->max_cmd_len = 16;
2567 if (phba->sli_rev == LPFC_SLI_REV4) { 2567 if (phba->sli_rev == LPFC_SLI_REV4) {
2568 shost->dma_boundary = 2568 shost->dma_boundary =
2569 phba->sli4_hba.pc_sli4_params.sge_supp_len; 2569 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2570 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2570 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2571 } 2571 }
2572 2572
@@ -2600,15 +2600,6 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2600 init_timer(&vport->els_tmofunc); 2600 init_timer(&vport->els_tmofunc);
2601 vport->els_tmofunc.function = lpfc_els_timeout; 2601 vport->els_tmofunc.function = lpfc_els_timeout;
2602 vport->els_tmofunc.data = (unsigned long)vport; 2602 vport->els_tmofunc.data = (unsigned long)vport;
2603 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
2604 phba->menlo_flag |= HBA_MENLO_SUPPORT;
2605 /* check for menlo minimum sg count */
2606 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
2607 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
2608 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2609 }
2610 }
2611
2612 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2603 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2613 if (error) 2604 if (error)
2614 goto out_put_shost; 2605 goto out_put_shost;
@@ -3236,12 +3227,26 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3236 3227
3237 if (!vport) 3228 if (!vport)
3238 return NULL; 3229 return NULL;
3239 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3240 if (!ndlp)
3241 return NULL;
3242 phba = vport->phba; 3230 phba = vport->phba;
3243 if (!phba) 3231 if (!phba)
3244 return NULL; 3232 return NULL;
3233 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3234 if (!ndlp) {
3235 /* Cannot find existing Fabric ndlp, so allocate a new one */
3236 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3237 if (!ndlp)
3238 return 0;
3239 lpfc_nlp_init(vport, ndlp, Fabric_DID);
3240 /* Set the node type */
3241 ndlp->nlp_type |= NLP_FABRIC;
3242 /* Put ndlp onto node list */
3243 lpfc_enqueue_node(vport, ndlp);
3244 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
3245 /* re-setup ndlp without removing from node list */
3246 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3247 if (!ndlp)
3248 return 0;
3249 }
3245 if (phba->pport->port_state <= LPFC_FLOGI) 3250 if (phba->pport->port_state <= LPFC_FLOGI)
3246 return NULL; 3251 return NULL;
3247 /* If virtual link is not yet instantiated ignore CVL */ 3252 /* If virtual link is not yet instantiated ignore CVL */
@@ -3304,11 +3309,20 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3304 switch (event_type) { 3309 switch (event_type) {
3305 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3310 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
3306 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: 3311 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
3307 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3312 if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
3308 "2546 New FCF found/FCF parameter modified event: " 3313 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3309 "evt_tag:x%x, fcf_index:x%x\n", 3314 LOG_DISCOVERY,
3310 acqe_fcoe->event_tag, acqe_fcoe->index); 3315 "2546 New FCF found event: "
3311 3316 "evt_tag:x%x, fcf_index:x%x\n",
3317 acqe_fcoe->event_tag,
3318 acqe_fcoe->index);
3319 else
3320 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3321 LOG_DISCOVERY,
3322 "2788 FCF parameter modified event: "
3323 "evt_tag:x%x, fcf_index:x%x\n",
3324 acqe_fcoe->event_tag,
3325 acqe_fcoe->index);
3312 spin_lock_irq(&phba->hbalock); 3326 spin_lock_irq(&phba->hbalock);
3313 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || 3327 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
3314 (phba->hba_flag & FCF_DISC_INPROGRESS)) { 3328 (phba->hba_flag & FCF_DISC_INPROGRESS)) {
@@ -3517,6 +3531,32 @@ lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3517} 3531}
3518 3532
3519/** 3533/**
3534 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3535 * @phba: pointer to lpfc hba data structure.
3536 * @acqe_link: pointer to the async grp5 completion queue entry.
3537 *
3538 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3539 * is an asynchronous notified of a logical link speed change. The Port
3540 * reports the logical link speed in units of 10Mbps.
3541 **/
3542static void
3543lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3544 struct lpfc_acqe_grp5 *acqe_grp5)
3545{
3546 uint16_t prev_ll_spd;
3547
3548 phba->fc_eventTag = acqe_grp5->event_tag;
3549 phba->fcoe_eventtag = acqe_grp5->event_tag;
3550 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3551 phba->sli4_hba.link_state.logical_speed =
3552 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3553 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3554 "2789 GRP5 Async Event: Updating logical link speed "
3555 "from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3556 (phba->sli4_hba.link_state.logical_speed*10));
3557}
3558
3559/**
3520 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 3560 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3521 * @phba: pointer to lpfc hba data structure. 3561 * @phba: pointer to lpfc hba data structure.
3522 * 3562 *
@@ -3552,6 +3592,10 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3552 lpfc_sli4_async_dcbx_evt(phba, 3592 lpfc_sli4_async_dcbx_evt(phba,
3553 &cq_event->cqe.acqe_dcbx); 3593 &cq_event->cqe.acqe_dcbx);
3554 break; 3594 break;
3595 case LPFC_TRAILER_CODE_GRP5:
3596 lpfc_sli4_async_grp5_evt(phba,
3597 &cq_event->cqe.acqe_grp5);
3598 break;
3555 default: 3599 default:
3556 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3600 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3557 "1804 Invalid asynchrous event code: " 3601 "1804 Invalid asynchrous event code: "
@@ -3813,6 +3857,13 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
3813 3857
3814 /* Get all the module params for configuring this host */ 3858 /* Get all the module params for configuring this host */
3815 lpfc_get_cfgparam(phba); 3859 lpfc_get_cfgparam(phba);
3860 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
3861 phba->menlo_flag |= HBA_MENLO_SUPPORT;
3862 /* check for menlo minimum sg count */
3863 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
3864 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
3865 }
3866
3816 /* 3867 /*
3817 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 3868 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
3818 * used to create the sg_dma_buf_pool must be dynamically calculated. 3869 * used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -4030,6 +4081,43 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4030 if (unlikely(rc)) 4081 if (unlikely(rc))
4031 goto out_free_bsmbx; 4082 goto out_free_bsmbx;
4032 4083
4084 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4085 GFP_KERNEL);
4086 if (!mboxq) {
4087 rc = -ENOMEM;
4088 goto out_free_bsmbx;
4089 }
4090
4091 /* Get the Supported Pages. It is always available. */
4092 lpfc_supported_pages(mboxq);
4093 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4094 if (unlikely(rc)) {
4095 rc = -EIO;
4096 mempool_free(mboxq, phba->mbox_mem_pool);
4097 goto out_free_bsmbx;
4098 }
4099
4100 mqe = &mboxq->u.mqe;
4101 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4102 LPFC_MAX_SUPPORTED_PAGES);
4103 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4104 switch (pn_page[i]) {
4105 case LPFC_SLI4_PARAMETERS:
4106 phba->sli4_hba.pc_sli4_params.supported = 1;
4107 break;
4108 default:
4109 break;
4110 }
4111 }
4112
4113 /* Read the port's SLI4 Parameters capabilities if supported. */
4114 if (phba->sli4_hba.pc_sli4_params.supported)
4115 rc = lpfc_pc_sli4_params_get(phba, mboxq);
4116 mempool_free(mboxq, phba->mbox_mem_pool);
4117 if (rc) {
4118 rc = -EIO;
4119 goto out_free_bsmbx;
4120 }
4033 /* Create all the SLI4 queues */ 4121 /* Create all the SLI4 queues */
4034 rc = lpfc_sli4_queue_create(phba); 4122 rc = lpfc_sli4_queue_create(phba);
4035 if (rc) 4123 if (rc)
@@ -4090,43 +4178,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4090 goto out_free_fcp_eq_hdl; 4178 goto out_free_fcp_eq_hdl;
4091 } 4179 }
4092 4180
4093 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4094 GFP_KERNEL);
4095 if (!mboxq) {
4096 rc = -ENOMEM;
4097 goto out_free_fcp_eq_hdl;
4098 }
4099
4100 /* Get the Supported Pages. It is always available. */
4101 lpfc_supported_pages(mboxq);
4102 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4103 if (unlikely(rc)) {
4104 rc = -EIO;
4105 mempool_free(mboxq, phba->mbox_mem_pool);
4106 goto out_free_fcp_eq_hdl;
4107 }
4108
4109 mqe = &mboxq->u.mqe;
4110 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4111 LPFC_MAX_SUPPORTED_PAGES);
4112 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4113 switch (pn_page[i]) {
4114 case LPFC_SLI4_PARAMETERS:
4115 phba->sli4_hba.pc_sli4_params.supported = 1;
4116 break;
4117 default:
4118 break;
4119 }
4120 }
4121
4122 /* Read the port's SLI4 Parameters capabilities if supported. */
4123 if (phba->sli4_hba.pc_sli4_params.supported)
4124 rc = lpfc_pc_sli4_params_get(phba, mboxq);
4125 mempool_free(mboxq, phba->mbox_mem_pool);
4126 if (rc) {
4127 rc = -EIO;
4128 goto out_free_fcp_eq_hdl;
4129 }
4130 return rc; 4181 return rc;
4131 4182
4132out_free_fcp_eq_hdl: 4183out_free_fcp_eq_hdl:
@@ -5050,6 +5101,8 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5050 5101
5051 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 5102 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5052 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 5103 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5104 phba->mbox_ext = (phba->slim2p.virt +
5105 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5053 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 5106 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5054 phba->IOCBs = (phba->slim2p.virt + 5107 phba->IOCBs = (phba->slim2p.virt +
5055 offsetof(struct lpfc_sli2_slim, IOCBs)); 5108 offsetof(struct lpfc_sli2_slim, IOCBs));
@@ -7753,21 +7806,23 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
7753 * @phba: pointer to lpfc hba data structure. 7806 * @phba: pointer to lpfc hba data structure.
7754 * 7807 *
7755 * This routine is called to prepare the SLI3 device for PCI slot recover. It 7808 * This routine is called to prepare the SLI3 device for PCI slot recover. It
7756 * aborts and stops all the on-going I/Os on the pci device. 7809 * aborts all the outstanding SCSI I/Os to the pci device.
7757 **/ 7810 **/
7758static void 7811static void
7759lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 7812lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7760{ 7813{
7814 struct lpfc_sli *psli = &phba->sli;
7815 struct lpfc_sli_ring *pring;
7816
7761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7817 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7762 "2723 PCI channel I/O abort preparing for recovery\n"); 7818 "2723 PCI channel I/O abort preparing for recovery\n");
7763 /* Prepare for bringing HBA offline */ 7819
7764 lpfc_offline_prep(phba); 7820 /*
7765 /* Clear sli active flag to prevent sysfs access to HBA */ 7821 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
7766 spin_lock_irq(&phba->hbalock); 7822 * and let the SCSI mid-layer to retry them to recover.
7767 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 7823 */
7768 spin_unlock_irq(&phba->hbalock); 7824 pring = &psli->ring[psli->fcp_ring];
7769 /* Stop and flush all I/Os and bring HBA offline */ 7825 lpfc_sli_abort_iocb_ring(phba, pring);
7770 lpfc_offline(phba);
7771} 7826}
7772 7827
7773/** 7828/**
@@ -7781,21 +7836,20 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
7781static void 7836static void
7782lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 7837lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
7783{ 7838{
7784 struct lpfc_sli *psli = &phba->sli;
7785 struct lpfc_sli_ring *pring;
7786
7787 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7788 "2710 PCI channel disable preparing for reset\n"); 7840 "2710 PCI channel disable preparing for reset\n");
7841
7842 /* Block all SCSI devices' I/Os on the host */
7843 lpfc_scsi_dev_block(phba);
7844
7845 /* stop all timers */
7846 lpfc_stop_hba_timers(phba);
7847
7789 /* Disable interrupt and pci device */ 7848 /* Disable interrupt and pci device */
7790 lpfc_sli_disable_intr(phba); 7849 lpfc_sli_disable_intr(phba);
7791 pci_disable_device(phba->pcidev); 7850 pci_disable_device(phba->pcidev);
7792 /* 7851 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
7793 * There may be I/Os dropped by the firmware. 7852 lpfc_sli_flush_fcp_rings(phba);
7794 * Error iocb (I/O) on txcmplq and let the SCSI layer
7795 * retry it after re-establishing link.
7796 */
7797 pring = &psli->ring[psli->fcp_ring];
7798 lpfc_sli_abort_iocb_ring(phba, pring);
7799} 7853}
7800 7854
7801/** 7855/**
@@ -7811,6 +7865,12 @@ lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
7811{ 7865{
7812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7866 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7813 "2711 PCI channel permanent disable for failure\n"); 7867 "2711 PCI channel permanent disable for failure\n");
7868 /* Block all SCSI devices' I/Os on the host */
7869 lpfc_scsi_dev_block(phba);
7870
7871 /* stop all timers */
7872 lpfc_stop_hba_timers(phba);
7873
7814 /* Clean up all driver's outstanding SCSI I/Os */ 7874 /* Clean up all driver's outstanding SCSI I/Os */
7815 lpfc_sli_flush_fcp_rings(phba); 7875 lpfc_sli_flush_fcp_rings(phba);
7816} 7876}
@@ -7839,9 +7899,6 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
7839 struct Scsi_Host *shost = pci_get_drvdata(pdev); 7899 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7840 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 7900 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7841 7901
7842 /* Block all SCSI devices' I/Os on the host */
7843 lpfc_scsi_dev_block(phba);
7844
7845 switch (state) { 7902 switch (state) {
7846 case pci_channel_io_normal: 7903 case pci_channel_io_normal:
7847 /* Non-fatal error, prepare for recovery */ 7904 /* Non-fatal error, prepare for recovery */
@@ -7948,7 +8005,7 @@ lpfc_io_resume_s3(struct pci_dev *pdev)
7948 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8005 struct Scsi_Host *shost = pci_get_drvdata(pdev);
7949 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8006 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
7950 8007
7951 /* Bring the device online */ 8008 /* Bring device online, it will be no-op for non-fatal error resume */
7952 lpfc_online(phba); 8009 lpfc_online(phba);
7953 8010
7954 /* Clean up Advanced Error Reporting (AER) if needed */ 8011 /* Clean up Advanced Error Reporting (AER) if needed */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 72e6adb0643e..e84dc33ca201 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1216,7 +1216,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1216 phba->pcb->feature = FEATURE_INITIAL_SLI2; 1216 phba->pcb->feature = FEATURE_INITIAL_SLI2;
1217 1217
1218 /* Setup Mailbox pointers */ 1218 /* Setup Mailbox pointers */
1219 phba->pcb->mailBoxSize = sizeof(MAILBOX_t); 1219 phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
1220 offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt; 1220 offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
1221 pdma_addr = phba->slim2p.phys + offset; 1221 pdma_addr = phba->slim2p.phys + offset;
1222 phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr); 1222 phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
@@ -1272,28 +1272,41 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1272 * 1272 *
1273 */ 1273 */
1274 1274
1275 if (phba->sli_rev == 3) { 1275 if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
1276 phba->host_gp = &mb_slim->us.s3.host[0]; 1276 phba->host_gp = &phba->mbox->us.s2.host[0];
1277 phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
1278 } else {
1279 phba->host_gp = &mb_slim->us.s2.host[0];
1280 phba->hbq_put = NULL; 1277 phba->hbq_put = NULL;
1281 } 1278 offset = (uint8_t *)&phba->mbox->us.s2.host -
1279 (uint8_t *)phba->slim2p.virt;
1280 pdma_addr = phba->slim2p.phys + offset;
1281 phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
1282 phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
1283 } else {
1284 /* Always Host Group Pointer is in SLIM */
1285 mb->un.varCfgPort.hps = 1;
1282 1286
1283 /* mask off BAR0's flag bits 0 - 3 */ 1287 if (phba->sli_rev == 3) {
1284 phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) + 1288 phba->host_gp = &mb_slim->us.s3.host[0];
1285 (void __iomem *)phba->host_gp - 1289 phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
1286 (void __iomem *)phba->MBslimaddr; 1290 } else {
1287 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64) 1291 phba->host_gp = &mb_slim->us.s2.host[0];
1288 phba->pcb->hgpAddrHigh = bar_high; 1292 phba->hbq_put = NULL;
1289 else 1293 }
1290 phba->pcb->hgpAddrHigh = 0;
1291 /* write HGP data to SLIM at the required longword offset */
1292 memset(&hgp, 0, sizeof(struct lpfc_hgp));
1293 1294
1294 for (i=0; i < phba->sli.num_rings; i++) { 1295 /* mask off BAR0's flag bits 0 - 3 */
1295 lpfc_memcpy_to_slim(phba->host_gp + i, &hgp, 1296 phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
1297 (void __iomem *)phba->host_gp -
1298 (void __iomem *)phba->MBslimaddr;
1299 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
1300 phba->pcb->hgpAddrHigh = bar_high;
1301 else
1302 phba->pcb->hgpAddrHigh = 0;
1303 /* write HGP data to SLIM at the required longword offset */
1304 memset(&hgp, 0, sizeof(struct lpfc_hgp));
1305
1306 for (i = 0; i < phba->sli.num_rings; i++) {
1307 lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
1296 sizeof(*phba->host_gp)); 1308 sizeof(*phba->host_gp));
1309 }
1297 } 1310 }
1298 1311
1299 /* Setup Port Group offset */ 1312 /* Setup Port Group offset */
@@ -1598,7 +1611,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1598 for (sgentry = 0; sgentry < sgecount; sgentry++) { 1611 for (sgentry = 0; sgentry < sgecount; sgentry++) {
1599 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge); 1612 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1600 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo); 1613 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1601 dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE, 1614 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
1602 mbox->sge_array->addr[sgentry], phyaddr); 1615 mbox->sge_array->addr[sgentry], phyaddr);
1603 } 1616 }
1604 /* Free the sge address array memory */ 1617 /* Free the sge address array memory */
@@ -1656,7 +1669,7 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1656 } 1669 }
1657 1670
1658 /* Setup for the none-embedded mbox command */ 1671 /* Setup for the none-embedded mbox command */
1659 pcount = (PAGE_ALIGN(length))/PAGE_SIZE; 1672 pcount = (PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
1660 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? 1673 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1661 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; 1674 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1662 /* Allocate record for keeping SGE virtual addresses */ 1675 /* Allocate record for keeping SGE virtual addresses */
@@ -1671,24 +1684,24 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1671 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) { 1684 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1672 /* The DMA memory is always allocated in the length of a 1685 /* The DMA memory is always allocated in the length of a
1673 * page even though the last SGE might not fill up to a 1686 * page even though the last SGE might not fill up to a
1674 * page, this is used as a priori size of PAGE_SIZE for 1687 * page, this is used as a priori size of SLI4_PAGE_SIZE for
1675 * the later DMA memory free. 1688 * the later DMA memory free.
1676 */ 1689 */
1677 viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE, 1690 viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
1678 &phyaddr, GFP_KERNEL); 1691 &phyaddr, GFP_KERNEL);
1679 /* In case of malloc fails, proceed with whatever we have */ 1692 /* In case of malloc fails, proceed with whatever we have */
1680 if (!viraddr) 1693 if (!viraddr)
1681 break; 1694 break;
1682 memset(viraddr, 0, PAGE_SIZE); 1695 memset(viraddr, 0, SLI4_PAGE_SIZE);
1683 mbox->sge_array->addr[pagen] = viraddr; 1696 mbox->sge_array->addr[pagen] = viraddr;
1684 /* Keep the first page for later sub-header construction */ 1697 /* Keep the first page for later sub-header construction */
1685 if (pagen == 0) 1698 if (pagen == 0)
1686 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr; 1699 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1687 resid_len = length - alloc_len; 1700 resid_len = length - alloc_len;
1688 if (resid_len > PAGE_SIZE) { 1701 if (resid_len > SLI4_PAGE_SIZE) {
1689 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, 1702 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1690 PAGE_SIZE); 1703 SLI4_PAGE_SIZE);
1691 alloc_len += PAGE_SIZE; 1704 alloc_len += SLI4_PAGE_SIZE;
1692 } else { 1705 } else {
1693 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, 1706 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1694 resid_len); 1707 resid_len);
@@ -1886,6 +1899,8 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1886 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); 1899 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
1887 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); 1900 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
1888 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); 1901 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
1902 reg_vfi->e_d_tov = vport->phba->fc_edtov;
1903 reg_vfi->r_a_tov = vport->phba->fc_ratov;
1889 reg_vfi->bde.addrHigh = putPaddrHigh(phys); 1904 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
1890 reg_vfi->bde.addrLow = putPaddrLow(phys); 1905 reg_vfi->bde.addrLow = putPaddrLow(phys);
1891 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); 1906 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index e331204a4d56..b90820a699fd 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -493,6 +493,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
493 struct lpfc_iocbq *cmdiocb, uint32_t els_cmd) 493 struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
494{ 494{
495 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 495 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
496 struct lpfc_hba *phba = vport->phba;
497 struct lpfc_vport **vports;
498 int i, active_vlink_present = 0 ;
496 499
497 /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */ 500 /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
498 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary 501 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
@@ -505,15 +508,44 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
505 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 508 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
506 else 509 else
507 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 510 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
508 if ((ndlp->nlp_DID == Fabric_DID) && 511 if (ndlp->nlp_DID == Fabric_DID) {
509 vport->port_type == LPFC_NPIV_PORT) { 512 if (vport->port_state <= LPFC_FDISC)
513 goto out;
510 lpfc_linkdown_port(vport); 514 lpfc_linkdown_port(vport);
511 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
512 spin_lock_irq(shost->host_lock); 515 spin_lock_irq(shost->host_lock);
513 ndlp->nlp_flag |= NLP_DELAY_TMO; 516 vport->fc_flag |= FC_VPORT_LOGO_RCVD;
514 spin_unlock_irq(shost->host_lock); 517 spin_unlock_irq(shost->host_lock);
518 vports = lpfc_create_vport_work_array(phba);
519 if (vports) {
520 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
521 i++) {
522 if ((!(vports[i]->fc_flag &
523 FC_VPORT_LOGO_RCVD)) &&
524 (vports[i]->port_state > LPFC_FDISC)) {
525 active_vlink_present = 1;
526 break;
527 }
528 }
529 lpfc_destroy_vport_work_array(phba, vports);
530 }
515 531
516 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 532 if (active_vlink_present) {
533 /*
534 * If there are other active VLinks present,
535 * re-instantiate the Vlink using FDISC.
536 */
537 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
538 spin_lock_irq(shost->host_lock);
539 ndlp->nlp_flag |= NLP_DELAY_TMO;
540 spin_unlock_irq(shost->host_lock);
541 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
542 vport->port_state = LPFC_FDISC;
543 } else {
544 spin_lock_irq(shost->host_lock);
545 phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
546 spin_unlock_irq(shost->host_lock);
547 lpfc_retry_pport_discovery(phba);
548 }
517 } else if ((!(ndlp->nlp_type & NLP_FABRIC) && 549 } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
518 ((ndlp->nlp_type & NLP_FCP_TARGET) || 550 ((ndlp->nlp_type & NLP_FCP_TARGET) ||
519 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || 551 !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
@@ -526,6 +558,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
526 558
527 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 559 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
528 } 560 }
561out:
529 ndlp->nlp_prev_state = ndlp->nlp_state; 562 ndlp->nlp_prev_state = ndlp->nlp_state;
530 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 563 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
531 564
@@ -604,11 +637,55 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
604 lpfc_unreg_rpi(vport, ndlp); 637 lpfc_unreg_rpi(vport, ndlp);
605 return 0; 638 return 0;
606} 639}
640/**
641 * lpfc_release_rpi - Release a RPI by issueing unreg_login mailbox cmd.
642 * @phba : Pointer to lpfc_hba structure.
643 * @vport: Pointer to lpfc_vport structure.
644 * @rpi : rpi to be release.
645 *
646 * This function will send a unreg_login mailbox command to the firmware
647 * to release a rpi.
648 **/
649void
650lpfc_release_rpi(struct lpfc_hba *phba,
651 struct lpfc_vport *vport,
652 uint16_t rpi)
653{
654 LPFC_MBOXQ_t *pmb;
655 int rc;
656
657 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
658 GFP_KERNEL);
659 if (!pmb)
660 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
661 "2796 mailbox memory allocation failed \n");
662 else {
663 lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
664 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
665 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
666 if (rc == MBX_NOT_FINISHED)
667 mempool_free(pmb, phba->mbox_mem_pool);
668 }
669}
607 670
608static uint32_t 671static uint32_t
609lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 672lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
610 void *arg, uint32_t evt) 673 void *arg, uint32_t evt)
611{ 674{
675 struct lpfc_hba *phba;
676 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
677 MAILBOX_t *mb;
678 uint16_t rpi;
679
680 phba = vport->phba;
681 /* Release the RPI if reglogin completing */
682 if (!(phba->pport->load_flag & FC_UNLOADING) &&
683 (evt == NLP_EVT_CMPL_REG_LOGIN) &&
684 (!pmb->u.mb.mbxStatus)) {
685 mb = &pmb->u.mb;
686 rpi = pmb->u.mb.un.varWords[0];
687 lpfc_release_rpi(phba, vport, rpi);
688 }
612 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 689 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
613 "0271 Illegal State Transition: node x%x " 690 "0271 Illegal State Transition: node x%x "
614 "event x%x, state x%x Data: x%x x%x\n", 691 "event x%x, state x%x Data: x%x x%x\n",
@@ -944,6 +1021,18 @@ static uint32_t
944lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport, 1021lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
945 struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) 1022 struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
946{ 1023{
1024 struct lpfc_hba *phba;
1025 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1026 MAILBOX_t *mb = &pmb->u.mb;
1027 uint16_t rpi;
1028
1029 phba = vport->phba;
1030 /* Release the RPI */
1031 if (!(phba->pport->load_flag & FC_UNLOADING) &&
1032 !mb->mbxStatus) {
1033 rpi = pmb->u.mb.un.varWords[0];
1034 lpfc_release_rpi(phba, vport, rpi);
1035 }
947 return ndlp->nlp_state; 1036 return ndlp->nlp_state;
948} 1037}
949 1038
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index dccdb822328c..f4a3b2e79eea 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1141,37 +1141,47 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1141} 1141}
1142 1142
1143/* 1143/*
1144 * Given a scsi cmnd, determine the BlockGuard profile to be used 1144 * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it
1145 * with the cmd 1145 * @sc: The SCSI command to examine
1146 * @txopt: (out) BlockGuard operation for transmitted data
1147 * @rxopt: (out) BlockGuard operation for received data
1148 *
1149 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1150 *
1146 */ 1151 */
1147static int 1152static int
1148lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc) 1153lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1154 uint8_t *txop, uint8_t *rxop)
1149{ 1155{
1150 uint8_t guard_type = scsi_host_get_guard(sc->device->host); 1156 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1151 uint8_t ret_prof = LPFC_PROF_INVALID; 1157 uint8_t ret = 0;
1152 1158
1153 if (guard_type == SHOST_DIX_GUARD_IP) { 1159 if (guard_type == SHOST_DIX_GUARD_IP) {
1154 switch (scsi_get_prot_op(sc)) { 1160 switch (scsi_get_prot_op(sc)) {
1155 case SCSI_PROT_READ_INSERT: 1161 case SCSI_PROT_READ_INSERT:
1156 case SCSI_PROT_WRITE_STRIP: 1162 case SCSI_PROT_WRITE_STRIP:
1157 ret_prof = LPFC_PROF_AST2; 1163 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1164 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1158 break; 1165 break;
1159 1166
1160 case SCSI_PROT_READ_STRIP: 1167 case SCSI_PROT_READ_STRIP:
1161 case SCSI_PROT_WRITE_INSERT: 1168 case SCSI_PROT_WRITE_INSERT:
1162 ret_prof = LPFC_PROF_A1; 1169 *txop = BG_OP_IN_NODIF_OUT_CRC;
1170 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1163 break; 1171 break;
1164 1172
1165 case SCSI_PROT_READ_PASS: 1173 case SCSI_PROT_READ_PASS:
1166 case SCSI_PROT_WRITE_PASS: 1174 case SCSI_PROT_WRITE_PASS:
1167 ret_prof = LPFC_PROF_AST1; 1175 *txop = BG_OP_IN_CSUM_OUT_CRC;
1176 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1168 break; 1177 break;
1169 1178
1170 case SCSI_PROT_NORMAL: 1179 case SCSI_PROT_NORMAL:
1171 default: 1180 default:
1172 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1181 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1173 "9063 BLKGRD:Bad op/guard:%d/%d combination\n", 1182 "9063 BLKGRD: Bad op/guard:%d/%d combination\n",
1174 scsi_get_prot_op(sc), guard_type); 1183 scsi_get_prot_op(sc), guard_type);
1184 ret = 1;
1175 break; 1185 break;
1176 1186
1177 } 1187 }
@@ -1179,12 +1189,14 @@ lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1179 switch (scsi_get_prot_op(sc)) { 1189 switch (scsi_get_prot_op(sc)) {
1180 case SCSI_PROT_READ_STRIP: 1190 case SCSI_PROT_READ_STRIP:
1181 case SCSI_PROT_WRITE_INSERT: 1191 case SCSI_PROT_WRITE_INSERT:
1182 ret_prof = LPFC_PROF_A1; 1192 *txop = BG_OP_IN_NODIF_OUT_CRC;
1193 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1183 break; 1194 break;
1184 1195
1185 case SCSI_PROT_READ_PASS: 1196 case SCSI_PROT_READ_PASS:
1186 case SCSI_PROT_WRITE_PASS: 1197 case SCSI_PROT_WRITE_PASS:
1187 ret_prof = LPFC_PROF_C1; 1198 *txop = BG_OP_IN_CRC_OUT_CRC;
1199 *rxop = BG_OP_IN_CRC_OUT_CRC;
1188 break; 1200 break;
1189 1201
1190 case SCSI_PROT_READ_INSERT: 1202 case SCSI_PROT_READ_INSERT:
@@ -1194,6 +1206,7 @@ lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1194 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1206 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1195 "9075 BLKGRD: Bad op/guard:%d/%d combination\n", 1207 "9075 BLKGRD: Bad op/guard:%d/%d combination\n",
1196 scsi_get_prot_op(sc), guard_type); 1208 scsi_get_prot_op(sc), guard_type);
1209 ret = 1;
1197 break; 1210 break;
1198 } 1211 }
1199 } else { 1212 } else {
@@ -1201,7 +1214,7 @@ lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1201 BUG(); 1214 BUG();
1202 } 1215 }
1203 1216
1204 return ret_prof; 1217 return ret;
1205} 1218}
1206 1219
1207struct scsi_dif_tuple { 1220struct scsi_dif_tuple {
@@ -1266,7 +1279,9 @@ lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
1266 * The buffer list consists of just one protection group described 1279 * The buffer list consists of just one protection group described
1267 * below: 1280 * below:
1268 * +-------------------------+ 1281 * +-------------------------+
1269 * start of prot group --> | PDE_1 | 1282 * start of prot group --> | PDE_5 |
1283 * +-------------------------+
1284 * | PDE_6 |
1270 * +-------------------------+ 1285 * +-------------------------+
1271 * | Data BDE | 1286 * | Data BDE |
1272 * +-------------------------+ 1287 * +-------------------------+
@@ -1284,30 +1299,49 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1284 struct ulp_bde64 *bpl, int datasegcnt) 1299 struct ulp_bde64 *bpl, int datasegcnt)
1285{ 1300{
1286 struct scatterlist *sgde = NULL; /* s/g data entry */ 1301 struct scatterlist *sgde = NULL; /* s/g data entry */
1287 struct lpfc_pde *pde1 = NULL; 1302 struct lpfc_pde5 *pde5 = NULL;
1303 struct lpfc_pde6 *pde6 = NULL;
1288 dma_addr_t physaddr; 1304 dma_addr_t physaddr;
1289 int i = 0, num_bde = 0; 1305 int i = 0, num_bde = 0, status;
1290 int datadir = sc->sc_data_direction; 1306 int datadir = sc->sc_data_direction;
1291 int prof = LPFC_PROF_INVALID;
1292 unsigned blksize; 1307 unsigned blksize;
1293 uint32_t reftag; 1308 uint32_t reftag;
1294 uint16_t apptagmask, apptagval; 1309 uint16_t apptagmask, apptagval;
1310 uint8_t txop, rxop;
1295 1311
1296 pde1 = (struct lpfc_pde *) bpl; 1312 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1297 prof = lpfc_sc_to_sli_prof(phba, sc); 1313 if (status)
1298
1299 if (prof == LPFC_PROF_INVALID)
1300 goto out; 1314 goto out;
1301 1315
1302 /* extract some info from the scsi command for PDE1*/ 1316 /* extract some info from the scsi command for pde*/
1303 blksize = lpfc_cmd_blksize(sc); 1317 blksize = lpfc_cmd_blksize(sc);
1304 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); 1318 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1305 1319
1306 /* setup PDE1 with what we have */ 1320 /* setup PDE5 with what we have */
1307 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize, 1321 pde5 = (struct lpfc_pde5 *) bpl;
1308 BG_EC_STOP_ERR); 1322 memset(pde5, 0, sizeof(struct lpfc_pde5));
1309 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag); 1323 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1324 pde5->reftag = reftag;
1310 1325
1326 /* advance bpl and increment bde count */
1327 num_bde++;
1328 bpl++;
1329 pde6 = (struct lpfc_pde6 *) bpl;
1330
1331 /* setup PDE6 with the rest of the info */
1332 memset(pde6, 0, sizeof(struct lpfc_pde6));
1333 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1334 bf_set(pde6_optx, pde6, txop);
1335 bf_set(pde6_oprx, pde6, rxop);
1336 if (datadir == DMA_FROM_DEVICE) {
1337 bf_set(pde6_ce, pde6, 1);
1338 bf_set(pde6_re, pde6, 1);
1339 bf_set(pde6_ae, pde6, 1);
1340 }
1341 bf_set(pde6_ai, pde6, 1);
1342 bf_set(pde6_apptagval, pde6, apptagval);
1343
1344 /* advance bpl and increment bde count */
1311 num_bde++; 1345 num_bde++;
1312 bpl++; 1346 bpl++;
1313 1347
@@ -1342,15 +1376,17 @@ out:
1342 * The buffer list for this type consists of one or more of the 1376 * The buffer list for this type consists of one or more of the
1343 * protection groups described below: 1377 * protection groups described below:
1344 * +-------------------------+ 1378 * +-------------------------+
1345 * start of first prot group --> | PDE_1 | 1379 * start of first prot group --> | PDE_5 |
1380 * +-------------------------+
1381 * | PDE_6 |
1346 * +-------------------------+ 1382 * +-------------------------+
1347 * | PDE_3 (Prot BDE) | 1383 * | PDE_7 (Prot BDE) |
1348 * +-------------------------+ 1384 * +-------------------------+
1349 * | Data BDE | 1385 * | Data BDE |
1350 * +-------------------------+ 1386 * +-------------------------+
1351 * |more Data BDE's ... (opt)| 1387 * |more Data BDE's ... (opt)|
1352 * +-------------------------+ 1388 * +-------------------------+
1353 * start of new prot group --> | PDE_1 | 1389 * start of new prot group --> | PDE_5 |
1354 * +-------------------------+ 1390 * +-------------------------+
1355 * | ... | 1391 * | ... |
1356 * +-------------------------+ 1392 * +-------------------------+
@@ -1369,19 +1405,21 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1369{ 1405{
1370 struct scatterlist *sgde = NULL; /* s/g data entry */ 1406 struct scatterlist *sgde = NULL; /* s/g data entry */
1371 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 1407 struct scatterlist *sgpe = NULL; /* s/g prot entry */
1372 struct lpfc_pde *pde1 = NULL; 1408 struct lpfc_pde5 *pde5 = NULL;
1409 struct lpfc_pde6 *pde6 = NULL;
1373 struct ulp_bde64 *prot_bde = NULL; 1410 struct ulp_bde64 *prot_bde = NULL;
1374 dma_addr_t dataphysaddr, protphysaddr; 1411 dma_addr_t dataphysaddr, protphysaddr;
1375 unsigned short curr_data = 0, curr_prot = 0; 1412 unsigned short curr_data = 0, curr_prot = 0;
1376 unsigned int split_offset, protgroup_len; 1413 unsigned int split_offset, protgroup_len;
1377 unsigned int protgrp_blks, protgrp_bytes; 1414 unsigned int protgrp_blks, protgrp_bytes;
1378 unsigned int remainder, subtotal; 1415 unsigned int remainder, subtotal;
1379 int prof = LPFC_PROF_INVALID; 1416 int status;
1380 int datadir = sc->sc_data_direction; 1417 int datadir = sc->sc_data_direction;
1381 unsigned char pgdone = 0, alldone = 0; 1418 unsigned char pgdone = 0, alldone = 0;
1382 unsigned blksize; 1419 unsigned blksize;
1383 uint32_t reftag; 1420 uint32_t reftag;
1384 uint16_t apptagmask, apptagval; 1421 uint16_t apptagmask, apptagval;
1422 uint8_t txop, rxop;
1385 int num_bde = 0; 1423 int num_bde = 0;
1386 1424
1387 sgpe = scsi_prot_sglist(sc); 1425 sgpe = scsi_prot_sglist(sc);
@@ -1394,31 +1432,47 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1394 return 0; 1432 return 0;
1395 } 1433 }
1396 1434
1397 prof = lpfc_sc_to_sli_prof(phba, sc); 1435 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1398 if (prof == LPFC_PROF_INVALID) 1436 if (status)
1399 goto out; 1437 goto out;
1400 1438
1401 /* extract some info from the scsi command for PDE1*/ 1439 /* extract some info from the scsi command */
1402 blksize = lpfc_cmd_blksize(sc); 1440 blksize = lpfc_cmd_blksize(sc);
1403 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); 1441 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1404 1442
1405 split_offset = 0; 1443 split_offset = 0;
1406 do { 1444 do {
1407 /* setup the first PDE_1 */ 1445 /* setup PDE5 with what we have */
1408 pde1 = (struct lpfc_pde *) bpl; 1446 pde5 = (struct lpfc_pde5 *) bpl;
1409 1447 memset(pde5, 0, sizeof(struct lpfc_pde5));
1410 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize, 1448 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1411 BG_EC_STOP_ERR); 1449 pde5->reftag = reftag;
1412 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
1413 1450
1451 /* advance bpl and increment bde count */
1452 num_bde++;
1453 bpl++;
1454 pde6 = (struct lpfc_pde6 *) bpl;
1455
1456 /* setup PDE6 with the rest of the info */
1457 memset(pde6, 0, sizeof(struct lpfc_pde6));
1458 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1459 bf_set(pde6_optx, pde6, txop);
1460 bf_set(pde6_oprx, pde6, rxop);
1461 bf_set(pde6_ce, pde6, 1);
1462 bf_set(pde6_re, pde6, 1);
1463 bf_set(pde6_ae, pde6, 1);
1464 bf_set(pde6_ai, pde6, 1);
1465 bf_set(pde6_apptagval, pde6, apptagval);
1466
1467 /* advance bpl and increment bde count */
1414 num_bde++; 1468 num_bde++;
1415 bpl++; 1469 bpl++;
1416 1470
1417 /* setup the first BDE that points to protection buffer */ 1471 /* setup the first BDE that points to protection buffer */
1418 prot_bde = (struct ulp_bde64 *) bpl; 1472 prot_bde = (struct ulp_bde64 *) bpl;
1419 protphysaddr = sg_dma_address(sgpe); 1473 protphysaddr = sg_dma_address(sgpe);
1420 prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr)); 1474 prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
1421 prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr)); 1475 prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
1422 protgroup_len = sg_dma_len(sgpe); 1476 protgroup_len = sg_dma_len(sgpe);
1423 1477
1424 1478
@@ -1429,10 +1483,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1429 protgrp_bytes = protgrp_blks * blksize; 1483 protgrp_bytes = protgrp_blks * blksize;
1430 1484
1431 prot_bde->tus.f.bdeSize = protgroup_len; 1485 prot_bde->tus.f.bdeSize = protgroup_len;
1432 if (datadir == DMA_TO_DEVICE) 1486 prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR;
1433 prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1434 else
1435 prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1436 prot_bde->tus.w = le32_to_cpu(bpl->tus.w); 1487 prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
1437 1488
1438 curr_prot++; 1489 curr_prot++;
@@ -1484,6 +1535,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1484 1535
1485 /* Move to the next s/g segment if possible */ 1536 /* Move to the next s/g segment if possible */
1486 sgde = sg_next(sgde); 1537 sgde = sg_next(sgde);
1538
1487 } 1539 }
1488 1540
1489 /* are we done ? */ 1541 /* are we done ? */
@@ -1506,7 +1558,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1506 1558
1507out: 1559out:
1508 1560
1509
1510 return num_bde; 1561 return num_bde;
1511} 1562}
1512/* 1563/*
@@ -1828,8 +1879,8 @@ out:
1828 * field of @lpfc_cmd for device with SLI-4 interface spec. 1879 * field of @lpfc_cmd for device with SLI-4 interface spec.
1829 * 1880 *
1830 * Return codes: 1881 * Return codes:
1831 * 1 - Error 1882 * 1 - Error
1832 * 0 - Success 1883 * 0 - Success
1833 **/ 1884 **/
1834static int 1885static int
1835lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 1886lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
@@ -1937,8 +1988,8 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1937 * lpfc_hba struct. 1988 * lpfc_hba struct.
1938 * 1989 *
1939 * Return codes: 1990 * Return codes:
1940 * 1 - Error 1991 * 1 - Error
1941 * 0 - Success 1992 * 0 - Success
1942 **/ 1993 **/
1943static inline int 1994static inline int
1944lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 1995lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 049fb9a17b3f..7a61455140b6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -212,7 +212,7 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
212 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; 212 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
213 213
214 /* If the next EQE is not valid then we are done */ 214 /* If the next EQE is not valid then we are done */
215 if (!bf_get(lpfc_eqe_valid, eqe)) 215 if (!bf_get_le32(lpfc_eqe_valid, eqe))
216 return NULL; 216 return NULL;
217 /* If the host has not yet processed the next entry then we are done */ 217 /* If the host has not yet processed the next entry then we are done */
218 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 218 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
@@ -247,7 +247,7 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
247 /* while there are valid entries */ 247 /* while there are valid entries */
248 while (q->hba_index != q->host_index) { 248 while (q->hba_index != q->host_index) {
249 temp_eqe = q->qe[q->host_index].eqe; 249 temp_eqe = q->qe[q->host_index].eqe;
250 bf_set(lpfc_eqe_valid, temp_eqe, 0); 250 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
251 released++; 251 released++;
252 q->host_index = ((q->host_index + 1) % q->entry_count); 252 q->host_index = ((q->host_index + 1) % q->entry_count);
253 } 253 }
@@ -285,7 +285,7 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
285 struct lpfc_cqe *cqe; 285 struct lpfc_cqe *cqe;
286 286
287 /* If the next CQE is not valid then we are done */ 287 /* If the next CQE is not valid then we are done */
288 if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 288 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
289 return NULL; 289 return NULL;
290 /* If the host has not yet processed the next entry then we are done */ 290 /* If the host has not yet processed the next entry then we are done */
291 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 291 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
@@ -321,7 +321,7 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
321 /* while there are valid entries */ 321 /* while there are valid entries */
322 while (q->hba_index != q->host_index) { 322 while (q->hba_index != q->host_index) {
323 temp_qe = q->qe[q->host_index].cqe; 323 temp_qe = q->qe[q->host_index].cqe;
324 bf_set(lpfc_cqe_valid, temp_qe, 0); 324 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
325 released++; 325 released++;
326 q->host_index = ((q->host_index + 1) % q->entry_count); 326 q->host_index = ((q->host_index + 1) % q->entry_count);
327 } 327 }
@@ -1659,6 +1659,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1659 case MBX_INIT_VPI: 1659 case MBX_INIT_VPI:
1660 case MBX_INIT_VFI: 1660 case MBX_INIT_VFI:
1661 case MBX_RESUME_RPI: 1661 case MBX_RESUME_RPI:
1662 case MBX_READ_EVENT_LOG_STATUS:
1663 case MBX_READ_EVENT_LOG:
1662 ret = mbxCommand; 1664 ret = mbxCommand;
1663 break; 1665 break;
1664 default: 1666 default:
@@ -4296,7 +4298,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4296 "2570 Failed to read FCoE parameters\n"); 4298 "2570 Failed to read FCoE parameters\n");
4297 4299
4298 /* Issue READ_REV to collect vpd and FW information. */ 4300 /* Issue READ_REV to collect vpd and FW information. */
4299 vpd_size = PAGE_SIZE; 4301 vpd_size = SLI4_PAGE_SIZE;
4300 vpd = kzalloc(vpd_size, GFP_KERNEL); 4302 vpd = kzalloc(vpd_size, GFP_KERNEL);
4301 if (!vpd) { 4303 if (!vpd) {
4302 rc = -ENOMEM; 4304 rc = -ENOMEM;
@@ -4891,9 +4893,34 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
4891 mb->mbxOwner = OWN_CHIP; 4893 mb->mbxOwner = OWN_CHIP;
4892 4894
4893 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 4895 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
4894 /* First copy command data to host SLIM area */ 4896 /* Populate mbox extension offset word. */
4897 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
4898 *(((uint32_t *)mb) + pmbox->mbox_offset_word)
4899 = (uint8_t *)phba->mbox_ext
4900 - (uint8_t *)phba->mbox;
4901 }
4902
4903 /* Copy the mailbox extension data */
4904 if (pmbox->in_ext_byte_len && pmbox->context2) {
4905 lpfc_sli_pcimem_bcopy(pmbox->context2,
4906 (uint8_t *)phba->mbox_ext,
4907 pmbox->in_ext_byte_len);
4908 }
4909 /* Copy command data to host SLIM area */
4895 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 4910 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
4896 } else { 4911 } else {
4912 /* Populate mbox extension offset word. */
4913 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
4914 *(((uint32_t *)mb) + pmbox->mbox_offset_word)
4915 = MAILBOX_HBA_EXT_OFFSET;
4916
4917 /* Copy the mailbox extension data */
4918 if (pmbox->in_ext_byte_len && pmbox->context2) {
4919 lpfc_memcpy_to_slim(phba->MBslimaddr +
4920 MAILBOX_HBA_EXT_OFFSET,
4921 pmbox->context2, pmbox->in_ext_byte_len);
4922
4923 }
4897 if (mb->mbxCommand == MBX_CONFIG_PORT) { 4924 if (mb->mbxCommand == MBX_CONFIG_PORT) {
4898 /* copy command data into host mbox for cmpl */ 4925 /* copy command data into host mbox for cmpl */
4899 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 4926 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
@@ -5003,15 +5030,22 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
5003 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 5030 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
5004 /* copy results back to user */ 5031 /* copy results back to user */
5005 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 5032 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
5033 /* Copy the mailbox extension data */
5034 if (pmbox->out_ext_byte_len && pmbox->context2) {
5035 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
5036 pmbox->context2,
5037 pmbox->out_ext_byte_len);
5038 }
5006 } else { 5039 } else {
5007 /* First copy command data */ 5040 /* First copy command data */
5008 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 5041 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
5009 MAILBOX_CMD_SIZE); 5042 MAILBOX_CMD_SIZE);
5010 if ((mb->mbxCommand == MBX_DUMP_MEMORY) && 5043 /* Copy the mailbox extension data */
5011 pmbox->context2) { 5044 if (pmbox->out_ext_byte_len && pmbox->context2) {
5012 lpfc_memcpy_from_slim((void *)pmbox->context2, 5045 lpfc_memcpy_from_slim(pmbox->context2,
5013 phba->MBslimaddr + DMP_RSP_OFFSET, 5046 phba->MBslimaddr +
5014 mb->un.varDmp.word_cnt); 5047 MAILBOX_HBA_EXT_OFFSET,
5048 pmbox->out_ext_byte_len);
5015 } 5049 }
5016 } 5050 }
5017 5051
@@ -7104,13 +7138,11 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7104 */ 7138 */
7105 list_del_init(&abort_iocb->list); 7139 list_del_init(&abort_iocb->list);
7106 pring->txcmplq_cnt--; 7140 pring->txcmplq_cnt--;
7107 spin_unlock_irq(&phba->hbalock);
7108 7141
7109 /* Firmware could still be in progress of DMAing 7142 /* Firmware could still be in progress of DMAing
7110 * payload, so don't free data buffer till after 7143 * payload, so don't free data buffer till after
7111 * a hbeat. 7144 * a hbeat.
7112 */ 7145 */
7113 spin_lock_irq(&phba->hbalock);
7114 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; 7146 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
7115 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; 7147 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
7116 spin_unlock_irq(&phba->hbalock); 7148 spin_unlock_irq(&phba->hbalock);
@@ -7118,7 +7150,8 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7118 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 7150 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
7119 abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED; 7151 abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
7120 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); 7152 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
7121 } 7153 } else
7154 spin_unlock_irq(&phba->hbalock);
7122 } 7155 }
7123 7156
7124 lpfc_sli_release_iocbq(phba, cmdiocb); 7157 lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -8133,6 +8166,12 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
8133 if (pmb->mbox_cmpl) { 8166 if (pmb->mbox_cmpl) {
8134 lpfc_sli_pcimem_bcopy(mbox, pmbox, 8167 lpfc_sli_pcimem_bcopy(mbox, pmbox,
8135 MAILBOX_CMD_SIZE); 8168 MAILBOX_CMD_SIZE);
8169 if (pmb->out_ext_byte_len &&
8170 pmb->context2)
8171 lpfc_sli_pcimem_bcopy(
8172 phba->mbox_ext,
8173 pmb->context2,
8174 pmb->out_ext_byte_len);
8136 } 8175 }
8137 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 8176 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
8138 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 8177 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
@@ -8983,17 +9022,17 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8983 int ecount = 0; 9022 int ecount = 0;
8984 uint16_t cqid; 9023 uint16_t cqid;
8985 9024
8986 if (bf_get(lpfc_eqe_major_code, eqe) != 0) { 9025 if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
8987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9026 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8988 "0359 Not a valid slow-path completion " 9027 "0359 Not a valid slow-path completion "
8989 "event: majorcode=x%x, minorcode=x%x\n", 9028 "event: majorcode=x%x, minorcode=x%x\n",
8990 bf_get(lpfc_eqe_major_code, eqe), 9029 bf_get_le32(lpfc_eqe_major_code, eqe),
8991 bf_get(lpfc_eqe_minor_code, eqe)); 9030 bf_get_le32(lpfc_eqe_minor_code, eqe));
8992 return; 9031 return;
8993 } 9032 }
8994 9033
8995 /* Get the reference to the corresponding CQ */ 9034 /* Get the reference to the corresponding CQ */
8996 cqid = bf_get(lpfc_eqe_resource_id, eqe); 9035 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
8997 9036
8998 /* Search for completion queue pointer matching this cqid */ 9037 /* Search for completion queue pointer matching this cqid */
8999 speq = phba->sli4_hba.sp_eq; 9038 speq = phba->sli4_hba.sp_eq;
@@ -9221,12 +9260,12 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9221 uint16_t cqid; 9260 uint16_t cqid;
9222 int ecount = 0; 9261 int ecount = 0;
9223 9262
9224 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) { 9263 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
9225 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9264 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9226 "0366 Not a valid fast-path completion " 9265 "0366 Not a valid fast-path completion "
9227 "event: majorcode=x%x, minorcode=x%x\n", 9266 "event: majorcode=x%x, minorcode=x%x\n",
9228 bf_get(lpfc_eqe_major_code, eqe), 9267 bf_get_le32(lpfc_eqe_major_code, eqe),
9229 bf_get(lpfc_eqe_minor_code, eqe)); 9268 bf_get_le32(lpfc_eqe_minor_code, eqe));
9230 return; 9269 return;
9231 } 9270 }
9232 9271
@@ -9239,7 +9278,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9239 } 9278 }
9240 9279
9241 /* Get the reference to the corresponding CQ */ 9280 /* Get the reference to the corresponding CQ */
9242 cqid = bf_get(lpfc_eqe_resource_id, eqe); 9281 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
9243 if (unlikely(cqid != cq->queue_id)) { 9282 if (unlikely(cqid != cq->queue_id)) {
9244 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9283 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9245 "0368 Miss-matched fast-path completion " 9284 "0368 Miss-matched fast-path completion "
@@ -9506,7 +9545,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
9506 while (!list_empty(&queue->page_list)) { 9545 while (!list_empty(&queue->page_list)) {
9507 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 9546 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
9508 list); 9547 list);
9509 dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE, 9548 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
9510 dmabuf->virt, dmabuf->phys); 9549 dmabuf->virt, dmabuf->phys);
9511 kfree(dmabuf); 9550 kfree(dmabuf);
9512 } 9551 }
@@ -9532,13 +9571,17 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9532 struct lpfc_dmabuf *dmabuf; 9571 struct lpfc_dmabuf *dmabuf;
9533 int x, total_qe_count; 9572 int x, total_qe_count;
9534 void *dma_pointer; 9573 void *dma_pointer;
9574 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
9535 9575
9576 if (!phba->sli4_hba.pc_sli4_params.supported)
9577 hw_page_size = SLI4_PAGE_SIZE;
9536 9578
9537 queue = kzalloc(sizeof(struct lpfc_queue) + 9579 queue = kzalloc(sizeof(struct lpfc_queue) +
9538 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 9580 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
9539 if (!queue) 9581 if (!queue)
9540 return NULL; 9582 return NULL;
9541 queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE; 9583 queue->page_count = (ALIGN(entry_size * entry_count,
9584 hw_page_size))/hw_page_size;
9542 INIT_LIST_HEAD(&queue->list); 9585 INIT_LIST_HEAD(&queue->list);
9543 INIT_LIST_HEAD(&queue->page_list); 9586 INIT_LIST_HEAD(&queue->page_list);
9544 INIT_LIST_HEAD(&queue->child_list); 9587 INIT_LIST_HEAD(&queue->child_list);
@@ -9547,19 +9590,19 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9547 if (!dmabuf) 9590 if (!dmabuf)
9548 goto out_fail; 9591 goto out_fail;
9549 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 9592 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9550 PAGE_SIZE, &dmabuf->phys, 9593 hw_page_size, &dmabuf->phys,
9551 GFP_KERNEL); 9594 GFP_KERNEL);
9552 if (!dmabuf->virt) { 9595 if (!dmabuf->virt) {
9553 kfree(dmabuf); 9596 kfree(dmabuf);
9554 goto out_fail; 9597 goto out_fail;
9555 } 9598 }
9556 memset(dmabuf->virt, 0, PAGE_SIZE); 9599 memset(dmabuf->virt, 0, hw_page_size);
9557 dmabuf->buffer_tag = x; 9600 dmabuf->buffer_tag = x;
9558 list_add_tail(&dmabuf->list, &queue->page_list); 9601 list_add_tail(&dmabuf->list, &queue->page_list);
9559 /* initialize queue's entry array */ 9602 /* initialize queue's entry array */
9560 dma_pointer = dmabuf->virt; 9603 dma_pointer = dmabuf->virt;
9561 for (; total_qe_count < entry_count && 9604 for (; total_qe_count < entry_count &&
9562 dma_pointer < (PAGE_SIZE + dmabuf->virt); 9605 dma_pointer < (hw_page_size + dmabuf->virt);
9563 total_qe_count++, dma_pointer += entry_size) { 9606 total_qe_count++, dma_pointer += entry_size) {
9564 queue->qe[total_qe_count].address = dma_pointer; 9607 queue->qe[total_qe_count].address = dma_pointer;
9565 } 9608 }
@@ -9604,6 +9647,10 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9604 uint32_t shdr_status, shdr_add_status; 9647 uint32_t shdr_status, shdr_add_status;
9605 union lpfc_sli4_cfg_shdr *shdr; 9648 union lpfc_sli4_cfg_shdr *shdr;
9606 uint16_t dmult; 9649 uint16_t dmult;
9650 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
9651
9652 if (!phba->sli4_hba.pc_sli4_params.supported)
9653 hw_page_size = SLI4_PAGE_SIZE;
9607 9654
9608 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9655 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9609 if (!mbox) 9656 if (!mbox)
@@ -9653,6 +9700,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9653 break; 9700 break;
9654 } 9701 }
9655 list_for_each_entry(dmabuf, &eq->page_list, list) { 9702 list_for_each_entry(dmabuf, &eq->page_list, list) {
9703 memset(dmabuf->virt, 0, hw_page_size);
9656 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 9704 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9657 putPaddrLow(dmabuf->phys); 9705 putPaddrLow(dmabuf->phys);
9658 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 9706 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -9715,6 +9763,11 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
9715 int rc, length, status = 0; 9763 int rc, length, status = 0;
9716 uint32_t shdr_status, shdr_add_status; 9764 uint32_t shdr_status, shdr_add_status;
9717 union lpfc_sli4_cfg_shdr *shdr; 9765 union lpfc_sli4_cfg_shdr *shdr;
9766 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
9767
9768 if (!phba->sli4_hba.pc_sli4_params.supported)
9769 hw_page_size = SLI4_PAGE_SIZE;
9770
9718 9771
9719 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9772 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9720 if (!mbox) 9773 if (!mbox)
@@ -9752,6 +9805,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
9752 break; 9805 break;
9753 } 9806 }
9754 list_for_each_entry(dmabuf, &cq->page_list, list) { 9807 list_for_each_entry(dmabuf, &cq->page_list, list) {
9808 memset(dmabuf->virt, 0, hw_page_size);
9755 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 9809 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9756 putPaddrLow(dmabuf->phys); 9810 putPaddrLow(dmabuf->phys);
9757 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 9811 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -9791,9 +9845,70 @@ out:
9791} 9845}
9792 9846
9793/** 9847/**
9848 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
9849 * @phba: HBA structure that indicates port to create a queue on.
9850 * @mq: The queue structure to use to create the mailbox queue.
9851 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
9852 * @cq: The completion queue to associate with this cq.
9853 *
9854 * This function provides failback (fb) functionality when the
9855 * mq_create_ext fails on older FW generations. It's purpose is identical
9856 * to mq_create_ext otherwise.
9857 *
9858 * This routine cannot fail as all attributes were previously accessed and
9859 * initialized in mq_create_ext.
9860 **/
9861static void
9862lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
9863 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
9864{
9865 struct lpfc_mbx_mq_create *mq_create;
9866 struct lpfc_dmabuf *dmabuf;
9867 int length;
9868
9869 length = (sizeof(struct lpfc_mbx_mq_create) -
9870 sizeof(struct lpfc_sli4_cfg_mhdr));
9871 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9872 LPFC_MBOX_OPCODE_MQ_CREATE,
9873 length, LPFC_SLI4_MBX_EMBED);
9874 mq_create = &mbox->u.mqe.un.mq_create;
9875 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
9876 mq->page_count);
9877 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
9878 cq->queue_id);
9879 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
9880 switch (mq->entry_count) {
9881 case 16:
9882 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9883 LPFC_MQ_CNT_16);
9884 break;
9885 case 32:
9886 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9887 LPFC_MQ_CNT_32);
9888 break;
9889 case 64:
9890 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9891 LPFC_MQ_CNT_64);
9892 break;
9893 case 128:
9894 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9895 LPFC_MQ_CNT_128);
9896 break;
9897 }
9898 list_for_each_entry(dmabuf, &mq->page_list, list) {
9899 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9900 putPaddrLow(dmabuf->phys);
9901 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9902 putPaddrHigh(dmabuf->phys);
9903 }
9904}
9905
9906/**
9794 * lpfc_mq_create - Create a mailbox Queue on the HBA 9907 * lpfc_mq_create - Create a mailbox Queue on the HBA
9795 * @phba: HBA structure that indicates port to create a queue on. 9908 * @phba: HBA structure that indicates port to create a queue on.
9796 * @mq: The queue structure to use to create the mailbox queue. 9909 * @mq: The queue structure to use to create the mailbox queue.
9910 * @cq: The completion queue to associate with this cq.
9911 * @subtype: The queue's subtype.
9797 * 9912 *
9798 * This function creates a mailbox queue, as detailed in @mq, on a port, 9913 * This function creates a mailbox queue, as detailed in @mq, on a port,
9799 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 9914 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
@@ -9809,31 +9924,43 @@ out:
9809 * memory this function will return ENOMEM. If the queue create mailbox command 9924 * memory this function will return ENOMEM. If the queue create mailbox command
9810 * fails this function will return ENXIO. 9925 * fails this function will return ENXIO.
9811 **/ 9926 **/
9812uint32_t 9927int32_t
9813lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 9928lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9814 struct lpfc_queue *cq, uint32_t subtype) 9929 struct lpfc_queue *cq, uint32_t subtype)
9815{ 9930{
9816 struct lpfc_mbx_mq_create *mq_create; 9931 struct lpfc_mbx_mq_create *mq_create;
9932 struct lpfc_mbx_mq_create_ext *mq_create_ext;
9817 struct lpfc_dmabuf *dmabuf; 9933 struct lpfc_dmabuf *dmabuf;
9818 LPFC_MBOXQ_t *mbox; 9934 LPFC_MBOXQ_t *mbox;
9819 int rc, length, status = 0; 9935 int rc, length, status = 0;
9820 uint32_t shdr_status, shdr_add_status; 9936 uint32_t shdr_status, shdr_add_status;
9821 union lpfc_sli4_cfg_shdr *shdr; 9937 union lpfc_sli4_cfg_shdr *shdr;
9938 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
9939
9940 if (!phba->sli4_hba.pc_sli4_params.supported)
9941 hw_page_size = SLI4_PAGE_SIZE;
9822 9942
9823 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9943 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9824 if (!mbox) 9944 if (!mbox)
9825 return -ENOMEM; 9945 return -ENOMEM;
9826 length = (sizeof(struct lpfc_mbx_mq_create) - 9946 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
9827 sizeof(struct lpfc_sli4_cfg_mhdr)); 9947 sizeof(struct lpfc_sli4_cfg_mhdr));
9828 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 9948 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9829 LPFC_MBOX_OPCODE_MQ_CREATE, 9949 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
9830 length, LPFC_SLI4_MBX_EMBED); 9950 length, LPFC_SLI4_MBX_EMBED);
9831 mq_create = &mbox->u.mqe.un.mq_create; 9951
9832 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 9952 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
9953 bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request,
9833 mq->page_count); 9954 mq->page_count);
9834 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 9955 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request,
9835 cq->queue_id); 9956 1);
9836 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 9957 bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste,
9958 &mq_create_ext->u.request, 1);
9959 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
9960 &mq_create_ext->u.request, 1);
9961 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
9962 cq->queue_id);
9963 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
9837 switch (mq->entry_count) { 9964 switch (mq->entry_count) {
9838 default: 9965 default:
9839 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9966 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -9843,31 +9970,47 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9843 return -EINVAL; 9970 return -EINVAL;
9844 /* otherwise default to smallest count (drop through) */ 9971 /* otherwise default to smallest count (drop through) */
9845 case 16: 9972 case 16:
9846 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 9973 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
9847 LPFC_MQ_CNT_16); 9974 LPFC_MQ_CNT_16);
9848 break; 9975 break;
9849 case 32: 9976 case 32:
9850 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 9977 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
9851 LPFC_MQ_CNT_32); 9978 LPFC_MQ_CNT_32);
9852 break; 9979 break;
9853 case 64: 9980 case 64:
9854 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 9981 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
9855 LPFC_MQ_CNT_64); 9982 LPFC_MQ_CNT_64);
9856 break; 9983 break;
9857 case 128: 9984 case 128:
9858 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 9985 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
9859 LPFC_MQ_CNT_128); 9986 LPFC_MQ_CNT_128);
9860 break; 9987 break;
9861 } 9988 }
9862 list_for_each_entry(dmabuf, &mq->page_list, list) { 9989 list_for_each_entry(dmabuf, &mq->page_list, list) {
9863 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 9990 memset(dmabuf->virt, 0, hw_page_size);
9991 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
9864 putPaddrLow(dmabuf->phys); 9992 putPaddrLow(dmabuf->phys);
9865 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 9993 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
9866 putPaddrHigh(dmabuf->phys); 9994 putPaddrHigh(dmabuf->phys);
9867 } 9995 }
9868 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 9996 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9997 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
9998 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
9999 &mq_create_ext->u.response);
10000 if (rc != MBX_SUCCESS) {
10001 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10002 "2795 MQ_CREATE_EXT failed with "
10003 "status x%x. Failback to MQ_CREATE.\n",
10004 rc);
10005 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
10006 mq_create = &mbox->u.mqe.un.mq_create;
10007 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10008 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
10009 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
10010 &mq_create->u.response);
10011 }
10012
9869 /* The IOCTL status is embedded in the mailbox subheader. */ 10013 /* The IOCTL status is embedded in the mailbox subheader. */
9870 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
9871 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10014 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9872 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10015 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9873 if (shdr_status || shdr_add_status || rc) { 10016 if (shdr_status || shdr_add_status || rc) {
@@ -9878,7 +10021,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9878 status = -ENXIO; 10021 status = -ENXIO;
9879 goto out; 10022 goto out;
9880 } 10023 }
9881 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
9882 if (mq->queue_id == 0xFFFF) { 10024 if (mq->queue_id == 0xFFFF) {
9883 status = -ENXIO; 10025 status = -ENXIO;
9884 goto out; 10026 goto out;
@@ -9927,6 +10069,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9927 int rc, length, status = 0; 10069 int rc, length, status = 0;
9928 uint32_t shdr_status, shdr_add_status; 10070 uint32_t shdr_status, shdr_add_status;
9929 union lpfc_sli4_cfg_shdr *shdr; 10071 union lpfc_sli4_cfg_shdr *shdr;
10072 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
10073
10074 if (!phba->sli4_hba.pc_sli4_params.supported)
10075 hw_page_size = SLI4_PAGE_SIZE;
9930 10076
9931 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10077 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9932 if (!mbox) 10078 if (!mbox)
@@ -9942,6 +10088,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9942 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 10088 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
9943 cq->queue_id); 10089 cq->queue_id);
9944 list_for_each_entry(dmabuf, &wq->page_list, list) { 10090 list_for_each_entry(dmabuf, &wq->page_list, list) {
10091 memset(dmabuf->virt, 0, hw_page_size);
9945 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10092 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9946 putPaddrLow(dmabuf->phys); 10093 putPaddrLow(dmabuf->phys);
9947 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10094 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -10010,6 +10157,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10010 int rc, length, status = 0; 10157 int rc, length, status = 0;
10011 uint32_t shdr_status, shdr_add_status; 10158 uint32_t shdr_status, shdr_add_status;
10012 union lpfc_sli4_cfg_shdr *shdr; 10159 union lpfc_sli4_cfg_shdr *shdr;
10160 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
10161
10162 if (!phba->sli4_hba.pc_sli4_params.supported)
10163 hw_page_size = SLI4_PAGE_SIZE;
10013 10164
10014 if (hrq->entry_count != drq->entry_count) 10165 if (hrq->entry_count != drq->entry_count)
10015 return -EINVAL; 10166 return -EINVAL;
@@ -10054,6 +10205,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10054 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 10205 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
10055 LPFC_HDR_BUF_SIZE); 10206 LPFC_HDR_BUF_SIZE);
10056 list_for_each_entry(dmabuf, &hrq->page_list, list) { 10207 list_for_each_entry(dmabuf, &hrq->page_list, list) {
10208 memset(dmabuf->virt, 0, hw_page_size);
10057 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10209 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
10058 putPaddrLow(dmabuf->phys); 10210 putPaddrLow(dmabuf->phys);
10059 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10211 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -10626,7 +10778,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10626 10778
10627 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + 10779 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
10628 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 10780 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10629 if (reqlen > PAGE_SIZE) { 10781 if (reqlen > SLI4_PAGE_SIZE) {
10630 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10782 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10631 "2559 Block sgl registration required DMA " 10783 "2559 Block sgl registration required DMA "
10632 "size (%d) great than a page\n", reqlen); 10784 "size (%d) great than a page\n", reqlen);
@@ -10732,7 +10884,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
10732 /* Calculate the requested length of the dma memory */ 10884 /* Calculate the requested length of the dma memory */
10733 reqlen = cnt * sizeof(struct sgl_page_pairs) + 10885 reqlen = cnt * sizeof(struct sgl_page_pairs) +
10734 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 10886 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10735 if (reqlen > PAGE_SIZE) { 10887 if (reqlen > SLI4_PAGE_SIZE) {
10736 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 10888 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10737 "0217 Block sgl registration required DMA " 10889 "0217 Block sgl registration required DMA "
10738 "size (%d) great than a page\n", reqlen); 10890 "size (%d) great than a page\n", reqlen);
@@ -11568,8 +11720,8 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
11568 * 11720 *
11569 * This routine is invoked to post rpi header templates to the 11721 * This routine is invoked to post rpi header templates to the
11570 * HBA consistent with the SLI-4 interface spec. This routine 11722 * HBA consistent with the SLI-4 interface spec. This routine
11571 * posts a PAGE_SIZE memory region to the port to hold up to 11723 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
11572 * PAGE_SIZE modulo 64 rpi context headers. 11724 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
11573 * 11725 *
11574 * This routine does not require any locks. It's usage is expected 11726 * This routine does not require any locks. It's usage is expected
11575 * to be driver load or reset recovery when the driver is 11727 * to be driver load or reset recovery when the driver is
@@ -11672,8 +11824,8 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
11672 * 11824 *
11673 * This routine is invoked to post rpi header templates to the 11825 * This routine is invoked to post rpi header templates to the
11674 * HBA consistent with the SLI-4 interface spec. This routine 11826 * HBA consistent with the SLI-4 interface spec. This routine
11675 * posts a PAGE_SIZE memory region to the port to hold up to 11827 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
11676 * PAGE_SIZE modulo 64 rpi context headers. 11828 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
11677 * 11829 *
11678 * Returns 11830 * Returns
11679 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 11831 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
@@ -12040,9 +12192,11 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12040 phba->hba_flag |= FCF_DISC_INPROGRESS; 12192 phba->hba_flag |= FCF_DISC_INPROGRESS;
12041 spin_unlock_irq(&phba->hbalock); 12193 spin_unlock_irq(&phba->hbalock);
12042 /* Reset FCF round robin index bmask for new scan */ 12194 /* Reset FCF round robin index bmask for new scan */
12043 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 12195 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) {
12044 memset(phba->fcf.fcf_rr_bmask, 0, 12196 memset(phba->fcf.fcf_rr_bmask, 0,
12045 sizeof(*phba->fcf.fcf_rr_bmask)); 12197 sizeof(*phba->fcf.fcf_rr_bmask));
12198 phba->fcf.eligible_fcf_cnt = 0;
12199 }
12046 error = 0; 12200 error = 0;
12047 } 12201 }
12048fail_fcf_scan: 12202fail_fcf_scan:
@@ -12507,6 +12661,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12507 struct lpfc_hba *phba = vport->phba; 12661 struct lpfc_hba *phba = vport->phba;
12508 LPFC_MBOXQ_t *mb, *nextmb; 12662 LPFC_MBOXQ_t *mb, *nextmb;
12509 struct lpfc_dmabuf *mp; 12663 struct lpfc_dmabuf *mp;
12664 struct lpfc_nodelist *ndlp;
12510 12665
12511 spin_lock_irq(&phba->hbalock); 12666 spin_lock_irq(&phba->hbalock);
12512 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 12667 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
@@ -12523,6 +12678,11 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12523 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 12678 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
12524 kfree(mp); 12679 kfree(mp);
12525 } 12680 }
12681 ndlp = (struct lpfc_nodelist *) mb->context2;
12682 if (ndlp) {
12683 lpfc_nlp_put(ndlp);
12684 mb->context2 = NULL;
12685 }
12526 } 12686 }
12527 list_del(&mb->list); 12687 list_del(&mb->list);
12528 mempool_free(mb, phba->mbox_mem_pool); 12688 mempool_free(mb, phba->mbox_mem_pool);
@@ -12532,6 +12692,15 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
12532 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 12692 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
12533 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 12693 (mb->u.mb.mbxCommand == MBX_REG_VPI))
12534 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12694 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12695 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
12696 ndlp = (struct lpfc_nodelist *) mb->context2;
12697 if (ndlp) {
12698 lpfc_nlp_put(ndlp);
12699 mb->context2 = NULL;
12700 }
12701 /* Unregister the RPI when mailbox complete */
12702 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
12703 }
12535 } 12704 }
12536 spin_unlock_irq(&phba->hbalock); 12705 spin_unlock_irq(&phba->hbalock);
12537} 12706}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index b4a639c47616..e3792151ca06 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -36,6 +36,7 @@ struct lpfc_cq_event {
36 struct lpfc_acqe_link acqe_link; 36 struct lpfc_acqe_link acqe_link;
37 struct lpfc_acqe_fcoe acqe_fcoe; 37 struct lpfc_acqe_fcoe acqe_fcoe;
38 struct lpfc_acqe_dcbx acqe_dcbx; 38 struct lpfc_acqe_dcbx acqe_dcbx;
39 struct lpfc_acqe_grp5 acqe_grp5;
39 struct lpfc_rcqe rcqe_cmpl; 40 struct lpfc_rcqe rcqe_cmpl;
40 struct sli4_wcqe_xri_aborted wcqe_axri; 41 struct sli4_wcqe_xri_aborted wcqe_axri;
41 struct lpfc_wcqe_complete wcqe_cmpl; 42 struct lpfc_wcqe_complete wcqe_cmpl;
@@ -110,6 +111,9 @@ typedef struct lpfcMboxq {
110 111
111 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); 112 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
112 uint8_t mbox_flag; 113 uint8_t mbox_flag;
114 uint16_t in_ext_byte_len;
115 uint16_t out_ext_byte_len;
116 uint8_t mbox_offset_word;
113 struct lpfc_mcqe mcqe; 117 struct lpfc_mcqe mcqe;
114 struct lpfc_mbx_nembed_sge_virt *sge_array; 118 struct lpfc_mbx_nembed_sge_virt *sge_array;
115} LPFC_MBOXQ_t; 119} LPFC_MBOXQ_t;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 4a35e7b9bc5b..58bb4c81b54e 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -162,6 +162,7 @@ struct lpfc_fcf {
162#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ 162#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
163 uint32_t addr_mode; 163 uint32_t addr_mode;
164 uint16_t fcf_rr_init_indx; 164 uint16_t fcf_rr_init_indx;
165 uint32_t eligible_fcf_cnt;
165 struct lpfc_fcf_rec current_rec; 166 struct lpfc_fcf_rec current_rec;
166 struct lpfc_fcf_rec failover_rec; 167 struct lpfc_fcf_rec failover_rec;
167 struct timer_list redisc_wait; 168 struct timer_list redisc_wait;
@@ -492,8 +493,8 @@ void lpfc_sli4_queue_free(struct lpfc_queue *);
492uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t); 493uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
493uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, 494uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
494 struct lpfc_queue *, uint32_t, uint32_t); 495 struct lpfc_queue *, uint32_t, uint32_t);
495uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, 496int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
496 struct lpfc_queue *, uint32_t); 497 struct lpfc_queue *, uint32_t);
497uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *, 498uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
498 struct lpfc_queue *, uint32_t); 499 struct lpfc_queue *, uint32_t);
499uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, 500uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 013deec5dae8..5294c3a515a1 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.10" 21#define LPFC_DRIVER_VERSION "8.3.12"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index ffd575c379f3..ab91359bde20 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -763,7 +763,9 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
763 spin_lock_irq(&phba->hbalock); 763 spin_lock_irq(&phba->hbalock);
764 list_for_each_entry(port_iterator, &phba->port_list, listentry) { 764 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
765 if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) { 765 if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
766 lpfc_printf_vlog(port_iterator, KERN_WARNING, LOG_VPORT, 766 if (!(port_iterator->load_flag & FC_UNLOADING))
767 lpfc_printf_vlog(port_iterator, KERN_ERR,
768 LOG_VPORT,
767 "1801 Create vport work array FAILED: " 769 "1801 Create vport work array FAILED: "
768 "cannot do scsi_host_get\n"); 770 "cannot do scsi_host_get\n");
769 continue; 771 continue;
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig
index ba8e128de238..bbb7e4bf30a3 100644
--- a/drivers/scsi/mpt2sas/Kconfig
+++ b/drivers/scsi/mpt2sas/Kconfig
@@ -2,7 +2,7 @@
2# Kernel configuration file for the MPT2SAS 2# Kernel configuration file for the MPT2SAS
3# 3#
4# This code is based on drivers/scsi/mpt2sas/Kconfig 4# This code is based on drivers/scsi/mpt2sas/Kconfig
5# Copyright (C) 2007-2009 LSI Corporation 5# Copyright (C) 2007-2010 LSI Corporation
6# (mailto:DL-MPTFusionLinux@lsi.com) 6# (mailto:DL-MPTFusionLinux@lsi.com)
7 7
8# This program is free software; you can redistribute it and/or 8# This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 9958d847a88d..dada0a13223f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2009 LSI Corporation. 2 * Copyright (c) 2000-2010 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2.h 5 * Name: mpi2.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index cf0ac9f40c97..d4e9d6f8452e 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2009 LSI Corporation. 2 * Copyright (c) 2000-2010 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_cnfg.h 5 * Name: mpi2_cnfg.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
index c4adf76b49d9..bd6c92b5fae5 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
@@ -2,7 +2,7 @@
2 Fusion-MPT MPI 2.0 Header File Change History 2 Fusion-MPT MPI 2.0 Header File Change History
3 ============================== 3 ==============================
4 4
5 Copyright (c) 2000-2009 LSI Corporation. 5 Copyright (c) 2000-2010 LSI Corporation.
6 6
7 --------------------------------------- 7 ---------------------------------------
8 Header Set Release Version: 02.00.14 8 Header Set Release Version: 02.00.14
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 6541945e97c3..220bf65a9216 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2009 LSI Corporation. 2 * Copyright (c) 2000-2010 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_init.h 5 * Name: mpi2_init.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 754938422f6a..f18f114922ba 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2009 LSI Corporation. 2 * Copyright (c) 2000-2010 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_ioc.h 5 * Name: mpi2_ioc.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 73fcdbf92632..686b09b81219 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2009 LSI Corporation. 2 * Copyright (c) 2000-2010 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_tool.h 5 * Name: mpi2_tool.h
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 88e6eebc3159..b830d61684dd 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3,7 +3,7 @@
3 * for access to MPT (Message Passing Technology) firmware. 3 * for access to MPT (Message Passing Technology) firmware.
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c 5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
6 * Copyright (C) 2007-2009 LSI Corporation 6 * Copyright (C) 2007-2010 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -58,6 +58,7 @@
58#include <linux/sort.h> 58#include <linux/sort.h>
59#include <linux/io.h> 59#include <linux/io.h>
60#include <linux/time.h> 60#include <linux/time.h>
61#include <linux/aer.h>
61 62
62#include "mpt2sas_base.h" 63#include "mpt2sas_base.h"
63 64
@@ -285,6 +286,9 @@ _base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
285 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION) 286 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
286 return; 287 return;
287 288
289 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
290 return;
291
288 switch (ioc_status) { 292 switch (ioc_status) {
289 293
290/**************************************************************************** 294/****************************************************************************
@@ -517,8 +521,18 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
517 desc = "IR Operation Status"; 521 desc = "IR Operation Status";
518 break; 522 break;
519 case MPI2_EVENT_SAS_DISCOVERY: 523 case MPI2_EVENT_SAS_DISCOVERY:
520 desc = "Discovery"; 524 {
521 break; 525 Mpi2EventDataSasDiscovery_t *event_data =
526 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
527 printk(MPT2SAS_INFO_FMT "Discovery: (%s)", ioc->name,
528 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
529 "start" : "stop");
530 if (event_data->DiscoveryStatus)
531 printk("discovery_status(0x%08x)",
532 le32_to_cpu(event_data->DiscoveryStatus));
533 printk("\n");
534 return;
535 }
522 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 536 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
523 desc = "SAS Broadcast Primitive"; 537 desc = "SAS Broadcast Primitive";
524 break; 538 break;
@@ -1243,6 +1257,9 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1243 goto out_fail; 1257 goto out_fail;
1244 } 1258 }
1245 1259
1260 /* AER (Advanced Error Reporting) hooks */
1261 pci_enable_pcie_error_reporting(pdev);
1262
1246 pci_set_master(pdev); 1263 pci_set_master(pdev);
1247 1264
1248 if (_base_config_dma_addressing(ioc, pdev) != 0) { 1265 if (_base_config_dma_addressing(ioc, pdev) != 0) {
@@ -1253,7 +1270,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1253 } 1270 }
1254 1271
1255 for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) { 1272 for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
1256 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) { 1273 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1257 if (pio_sz) 1274 if (pio_sz)
1258 continue; 1275 continue;
1259 pio_chip = (u64)pci_resource_start(pdev, i); 1276 pio_chip = (u64)pci_resource_start(pdev, i);
@@ -1261,15 +1278,18 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1261 } else { 1278 } else {
1262 if (memap_sz) 1279 if (memap_sz)
1263 continue; 1280 continue;
1264 ioc->chip_phys = pci_resource_start(pdev, i); 1281 /* verify memory resource is valid before using */
1265 chip_phys = (u64)ioc->chip_phys; 1282 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
1266 memap_sz = pci_resource_len(pdev, i); 1283 ioc->chip_phys = pci_resource_start(pdev, i);
1267 ioc->chip = ioremap(ioc->chip_phys, memap_sz); 1284 chip_phys = (u64)ioc->chip_phys;
1268 if (ioc->chip == NULL) { 1285 memap_sz = pci_resource_len(pdev, i);
1269 printk(MPT2SAS_ERR_FMT "unable to map adapter " 1286 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1270 "memory!\n", ioc->name); 1287 if (ioc->chip == NULL) {
1271 r = -EINVAL; 1288 printk(MPT2SAS_ERR_FMT "unable to map "
1272 goto out_fail; 1289 "adapter memory!\n", ioc->name);
1290 r = -EINVAL;
1291 goto out_fail;
1292 }
1273 } 1293 }
1274 } 1294 }
1275 } 1295 }
@@ -1295,6 +1315,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1295 ioc->chip_phys = 0; 1315 ioc->chip_phys = 0;
1296 ioc->pci_irq = -1; 1316 ioc->pci_irq = -1;
1297 pci_release_selected_regions(ioc->pdev, ioc->bars); 1317 pci_release_selected_regions(ioc->pdev, ioc->bars);
1318 pci_disable_pcie_error_reporting(pdev);
1298 pci_disable_device(pdev); 1319 pci_disable_device(pdev);
1299 return r; 1320 return r;
1300} 1321}
@@ -1898,7 +1919,10 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
1898 ioc->config_page, ioc->config_page_dma); 1919 ioc->config_page, ioc->config_page_dma);
1899 } 1920 }
1900 1921
1901 kfree(ioc->scsi_lookup); 1922 if (ioc->scsi_lookup) {
1923 free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
1924 ioc->scsi_lookup = NULL;
1925 }
1902 kfree(ioc->hpr_lookup); 1926 kfree(ioc->hpr_lookup);
1903 kfree(ioc->internal_lookup); 1927 kfree(ioc->internal_lookup);
1904} 1928}
@@ -2110,11 +2134,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2110 ioc->name, (unsigned long long) ioc->request_dma)); 2134 ioc->name, (unsigned long long) ioc->request_dma));
2111 total_sz += sz; 2135 total_sz += sz;
2112 2136
2113 ioc->scsi_lookup = kcalloc(ioc->scsiio_depth, 2137 sz = ioc->scsiio_depth * sizeof(struct request_tracker);
2114 sizeof(struct request_tracker), GFP_KERNEL); 2138 ioc->scsi_lookup_pages = get_order(sz);
2139 ioc->scsi_lookup = (struct request_tracker *)__get_free_pages(
2140 GFP_KERNEL, ioc->scsi_lookup_pages);
2115 if (!ioc->scsi_lookup) { 2141 if (!ioc->scsi_lookup) {
2116 printk(MPT2SAS_ERR_FMT "scsi_lookup: kcalloc failed\n", 2142 printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, "
2117 ioc->name); 2143 "sz(%d)\n", ioc->name, (int)sz);
2118 goto out; 2144 goto out;
2119 } 2145 }
2120 2146
@@ -3006,8 +3032,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3006 * since epoch ~ midnight January 1, 1970. 3032 * since epoch ~ midnight January 1, 1970.
3007 */ 3033 */
3008 do_gettimeofday(&current_time); 3034 do_gettimeofday(&current_time);
3009 mpi_request.TimeStamp = (current_time.tv_sec * 1000) + 3035 mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
3010 (current_time.tv_usec >> 3); 3036 (current_time.tv_usec / 1000));
3011 3037
3012 if (ioc->logging_level & MPT_DEBUG_INIT) { 3038 if (ioc->logging_level & MPT_DEBUG_INIT) {
3013 u32 *mfp; 3039 u32 *mfp;
@@ -3179,7 +3205,7 @@ _base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3179 mpi_request->VP_ID = 0; 3205 mpi_request->VP_ID = 0;
3180 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3206 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3181 mpi_request->EventMasks[i] = 3207 mpi_request->EventMasks[i] =
3182 le32_to_cpu(ioc->event_masks[i]); 3208 cpu_to_le32(ioc->event_masks[i]);
3183 mpt2sas_base_put_smid_default(ioc, smid); 3209 mpt2sas_base_put_smid_default(ioc, smid);
3184 init_completion(&ioc->base_cmds.done); 3210 init_completion(&ioc->base_cmds.done);
3185 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); 3211 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
@@ -3516,7 +3542,9 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
3516 __func__)); 3542 __func__));
3517 3543
3518 _base_mask_interrupts(ioc); 3544 _base_mask_interrupts(ioc);
3545 ioc->shost_recovery = 1;
3519 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); 3546 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
3547 ioc->shost_recovery = 0;
3520 if (ioc->pci_irq) { 3548 if (ioc->pci_irq) {
3521 synchronize_irq(pdev->irq); 3549 synchronize_irq(pdev->irq);
3522 free_irq(ioc->pci_irq, ioc); 3550 free_irq(ioc->pci_irq, ioc);
@@ -3527,6 +3555,7 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
3527 ioc->pci_irq = -1; 3555 ioc->pci_irq = -1;
3528 ioc->chip_phys = 0; 3556 ioc->chip_phys = 0;
3529 pci_release_selected_regions(ioc->pdev, ioc->bars); 3557 pci_release_selected_regions(ioc->pdev, ioc->bars);
3558 pci_disable_pcie_error_reporting(pdev);
3530 pci_disable_device(pdev); 3559 pci_disable_device(pdev);
3531 return; 3560 return;
3532} 3561}
@@ -3560,8 +3589,10 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3560 3589
3561 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, 3590 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
3562 sizeof(Mpi2PortFactsReply_t), GFP_KERNEL); 3591 sizeof(Mpi2PortFactsReply_t), GFP_KERNEL);
3563 if (!ioc->pfacts) 3592 if (!ioc->pfacts) {
3593 r = -ENOMEM;
3564 goto out_free_resources; 3594 goto out_free_resources;
3595 }
3565 3596
3566 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { 3597 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
3567 r = _base_get_port_facts(ioc, i, CAN_SLEEP); 3598 r = _base_get_port_facts(ioc, i, CAN_SLEEP);
@@ -3607,6 +3638,15 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3607 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED; 3638 ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
3608 mutex_init(&ioc->ctl_cmds.mutex); 3639 mutex_init(&ioc->ctl_cmds.mutex);
3609 3640
3641 if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
3642 !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
3643 !ioc->config_cmds.reply || !ioc->ctl_cmds.reply) {
3644 r = -ENOMEM;
3645 goto out_free_resources;
3646 }
3647
3648 init_completion(&ioc->shost_recovery_done);
3649
3610 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 3650 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3611 ioc->event_masks[i] = -1; 3651 ioc->event_masks[i] = -1;
3612 3652
@@ -3639,6 +3679,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3639 pci_set_drvdata(ioc->pdev, NULL); 3679 pci_set_drvdata(ioc->pdev, NULL);
3640 kfree(ioc->tm_cmds.reply); 3680 kfree(ioc->tm_cmds.reply);
3641 kfree(ioc->transport_cmds.reply); 3681 kfree(ioc->transport_cmds.reply);
3682 kfree(ioc->scsih_cmds.reply);
3642 kfree(ioc->config_cmds.reply); 3683 kfree(ioc->config_cmds.reply);
3643 kfree(ioc->base_cmds.reply); 3684 kfree(ioc->base_cmds.reply);
3644 kfree(ioc->ctl_cmds.reply); 3685 kfree(ioc->ctl_cmds.reply);
@@ -3646,6 +3687,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3646 ioc->ctl_cmds.reply = NULL; 3687 ioc->ctl_cmds.reply = NULL;
3647 ioc->base_cmds.reply = NULL; 3688 ioc->base_cmds.reply = NULL;
3648 ioc->tm_cmds.reply = NULL; 3689 ioc->tm_cmds.reply = NULL;
3690 ioc->scsih_cmds.reply = NULL;
3649 ioc->transport_cmds.reply = NULL; 3691 ioc->transport_cmds.reply = NULL;
3650 ioc->config_cmds.reply = NULL; 3692 ioc->config_cmds.reply = NULL;
3651 ioc->pfacts = NULL; 3693 ioc->pfacts = NULL;
@@ -3675,6 +3717,7 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
3675 kfree(ioc->base_cmds.reply); 3717 kfree(ioc->base_cmds.reply);
3676 kfree(ioc->tm_cmds.reply); 3718 kfree(ioc->tm_cmds.reply);
3677 kfree(ioc->transport_cmds.reply); 3719 kfree(ioc->transport_cmds.reply);
3720 kfree(ioc->scsih_cmds.reply);
3678 kfree(ioc->config_cmds.reply); 3721 kfree(ioc->config_cmds.reply);
3679} 3722}
3680 3723
@@ -3811,9 +3854,8 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3811 3854
3812 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 3855 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
3813 ioc->shost_recovery = 0; 3856 ioc->shost_recovery = 0;
3857 complete(&ioc->shost_recovery_done);
3814 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 3858 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
3815 3859
3816 if (!r)
3817 _base_reset_handler(ioc, MPT2_IOC_RUNNING);
3818 return r; 3860 return r;
3819} 3861}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index e18b0544c38f..b4afe431ac1e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -3,7 +3,7 @@
3 * for access to MPT (Message Passing Technology) firmware. 3 * for access to MPT (Message Passing Technology) firmware.
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.h 5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.h
6 * Copyright (C) 2007-2009 LSI Corporation 6 * Copyright (C) 2007-2010 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -69,11 +69,11 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "04.100.01.00" 72#define MPT2SAS_DRIVER_VERSION "05.100.00.02"
73#define MPT2SAS_MAJOR_VERSION 04 73#define MPT2SAS_MAJOR_VERSION 05
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 01 75#define MPT2SAS_BUILD_VERSION 00
76#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 02
77 77
78/* 78/*
79 * Set MPT2SAS_SG_DEPTH value based on user input. 79 * Set MPT2SAS_SG_DEPTH value based on user input.
@@ -119,7 +119,6 @@
119#define MPT2_IOC_PRE_RESET 1 /* prior to host reset */ 119#define MPT2_IOC_PRE_RESET 1 /* prior to host reset */
120#define MPT2_IOC_AFTER_RESET 2 /* just after host reset */ 120#define MPT2_IOC_AFTER_RESET 2 /* just after host reset */
121#define MPT2_IOC_DONE_RESET 3 /* links re-initialized */ 121#define MPT2_IOC_DONE_RESET 3 /* links re-initialized */
122#define MPT2_IOC_RUNNING 4 /* shost running */
123 122
124/* 123/*
125 * logging format 124 * logging format
@@ -260,16 +259,6 @@ struct _internal_cmd {
260 u16 smid; 259 u16 smid;
261}; 260};
262 261
263/*
264 * SAS Topology Structures
265 */
266
267#define MPTSAS_STATE_TR_SEND 0x0001
268#define MPTSAS_STATE_TR_COMPLETE 0x0002
269#define MPTSAS_STATE_CNTRL_SEND 0x0004
270#define MPTSAS_STATE_CNTRL_COMPLETE 0x0008
271
272#define MPT2SAS_REQ_SAS_CNTRL 0x0010
273 262
274/** 263/**
275 * struct _sas_device - attached device information 264 * struct _sas_device - attached device information
@@ -307,7 +296,6 @@ struct _sas_device {
307 u16 slot; 296 u16 slot;
308 u8 hidden_raid_component; 297 u8 hidden_raid_component;
309 u8 responding; 298 u8 responding;
310 u16 state;
311}; 299};
312 300
313/** 301/**
@@ -378,6 +366,7 @@ struct _sas_port {
378 * @phy_id: unique phy id 366 * @phy_id: unique phy id
379 * @handle: device handle for this phy 367 * @handle: device handle for this phy
380 * @attached_handle: device handle for attached device 368 * @attached_handle: device handle for attached device
369 * @phy_belongs_to_port: port has been created for this phy
381 */ 370 */
382struct _sas_phy { 371struct _sas_phy {
383 struct list_head port_siblings; 372 struct list_head port_siblings;
@@ -387,6 +376,7 @@ struct _sas_phy {
387 u8 phy_id; 376 u8 phy_id;
388 u16 handle; 377 u16 handle;
389 u16 attached_handle; 378 u16 attached_handle;
379 u8 phy_belongs_to_port;
390}; 380};
391 381
392/** 382/**
@@ -603,7 +593,6 @@ struct MPT2SAS_ADAPTER {
603 /* fw event handler */ 593 /* fw event handler */
604 char firmware_event_name[20]; 594 char firmware_event_name[20];
605 struct workqueue_struct *firmware_event_thread; 595 struct workqueue_struct *firmware_event_thread;
606 u8 fw_events_off;
607 spinlock_t fw_event_lock; 596 spinlock_t fw_event_lock;
608 struct list_head fw_event_list; 597 struct list_head fw_event_list;
609 598
@@ -611,6 +600,7 @@ struct MPT2SAS_ADAPTER {
611 int aen_event_read_flag; 600 int aen_event_read_flag;
612 u8 broadcast_aen_busy; 601 u8 broadcast_aen_busy;
613 u8 shost_recovery; 602 u8 shost_recovery;
603 struct completion shost_recovery_done;
614 spinlock_t ioc_reset_in_progress_lock; 604 spinlock_t ioc_reset_in_progress_lock;
615 u8 ioc_link_reset_in_progress; 605 u8 ioc_link_reset_in_progress;
616 u8 ignore_loginfos; 606 u8 ignore_loginfos;
@@ -688,7 +678,8 @@ struct MPT2SAS_ADAPTER {
688 dma_addr_t request_dma; 678 dma_addr_t request_dma;
689 u32 request_dma_sz; 679 u32 request_dma_sz;
690 struct request_tracker *scsi_lookup; 680 struct request_tracker *scsi_lookup;
691 spinlock_t scsi_lookup_lock; 681 ulong scsi_lookup_pages;
682 spinlock_t scsi_lookup_lock;
692 struct list_head free_list; 683 struct list_head free_list;
693 int pending_io_count; 684 int pending_io_count;
694 wait_queue_head_t reset_wq; 685 wait_queue_head_t reset_wq;
@@ -700,7 +691,7 @@ struct MPT2SAS_ADAPTER {
700 u16 max_sges_in_chain_message; 691 u16 max_sges_in_chain_message;
701 u16 chains_needed_per_io; 692 u16 chains_needed_per_io;
702 u16 chain_offset_value_for_main_message; 693 u16 chain_offset_value_for_main_message;
703 u16 chain_depth; 694 u32 chain_depth;
704 695
705 /* hi-priority queue */ 696 /* hi-priority queue */
706 u16 hi_priority_smid; 697 u16 hi_priority_smid;
@@ -814,8 +805,9 @@ void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
814/* scsih shared API */ 805/* scsih shared API */
815u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, 806u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
816 u32 reply); 807 u32 reply);
817void mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun, 808int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
818 u8 type, u16 smid_task, ulong timeout); 809 uint channel, uint id, uint lun, u8 type, u16 smid_task,
810 ulong timeout, struct scsi_cmnd *scmd);
819void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); 811void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
820void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); 812void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
821struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc, 813struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index cf44b355bc97..e762dd3e2fcb 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -2,7 +2,7 @@
2 * This module provides common API for accessing firmware configuration pages 2 * This module provides common API for accessing firmware configuration pages
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
5 * Copyright (C) 2007-2009 LSI Corporation 5 * Copyright (C) 2007-2010 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
@@ -1390,12 +1390,12 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
1390 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 1390 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
1391 goto out; 1391 goto out;
1392 for (i = 0; i < config_page->NumElements; i++) { 1392 for (i = 0; i < config_page->NumElements; i++) {
1393 if ((config_page->ConfigElement[i].ElementFlags & 1393 if ((le16_to_cpu(config_page->ConfigElement[i].ElementFlags) &
1394 MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE) != 1394 MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE) !=
1395 MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT) 1395 MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT)
1396 continue; 1396 continue;
1397 if (config_page->ConfigElement[i].PhysDiskDevHandle == 1397 if (le16_to_cpu(config_page->ConfigElement[i].
1398 pd_handle) { 1398 PhysDiskDevHandle) == pd_handle) {
1399 *volume_handle = le16_to_cpu(config_page-> 1399 *volume_handle = le16_to_cpu(config_page->
1400 ConfigElement[i].VolDevHandle); 1400 ConfigElement[i].VolDevHandle);
1401 r = 0; 1401 r = 0;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index fa9bf83819d5..d88e9756d8f5 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -3,7 +3,7 @@
3 * controllers 3 * controllers
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c 5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
6 * Copyright (C) 2007-2009 LSI Corporation 6 * Copyright (C) 2007-2010 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -533,7 +533,7 @@ _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
533 if (!found) { 533 if (!found) {
534 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 534 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
535 "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name, 535 "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
536 desc, tm_request->DevHandle, lun)); 536 desc, le16_to_cpu(tm_request->DevHandle), lun));
537 tm_reply = ioc->ctl_cmds.reply; 537 tm_reply = ioc->ctl_cmds.reply;
538 tm_reply->DevHandle = tm_request->DevHandle; 538 tm_reply->DevHandle = tm_request->DevHandle;
539 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 539 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -551,7 +551,8 @@ _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
551 551
552 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 552 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
553 "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name, 553 "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
554 desc, tm_request->DevHandle, lun, tm_request->TaskMID)); 554 desc, le16_to_cpu(tm_request->DevHandle), lun,
555 le16_to_cpu(tm_request->TaskMID)));
555 return 0; 556 return 0;
556} 557}
557 558
@@ -647,9 +648,9 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
647 648
648 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 649 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
649 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 650 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
650 if (!mpi_request->FunctionDependent1 || 651 if (!le16_to_cpu(mpi_request->FunctionDependent1) ||
651 mpi_request->FunctionDependent1 > 652 le16_to_cpu(mpi_request->FunctionDependent1) >
652 cpu_to_le16(ioc->facts.MaxDevHandle)) { 653 ioc->facts.MaxDevHandle) {
653 ret = -EINVAL; 654 ret = -EINVAL;
654 mpt2sas_base_free_smid(ioc, smid); 655 mpt2sas_base_free_smid(ioc, smid);
655 goto out; 656 goto out;
@@ -743,8 +744,11 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
743 mpt2sas_base_get_sense_buffer_dma(ioc, smid); 744 mpt2sas_base_get_sense_buffer_dma(ioc, smid);
744 priv_sense = mpt2sas_base_get_sense_buffer(ioc, smid); 745 priv_sense = mpt2sas_base_get_sense_buffer(ioc, smid);
745 memset(priv_sense, 0, SCSI_SENSE_BUFFERSIZE); 746 memset(priv_sense, 0, SCSI_SENSE_BUFFERSIZE);
746 mpt2sas_base_put_smid_scsi_io(ioc, smid, 747 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
747 le16_to_cpu(mpi_request->FunctionDependent1)); 748 mpt2sas_base_put_smid_scsi_io(ioc, smid,
749 le16_to_cpu(mpi_request->FunctionDependent1));
750 else
751 mpt2sas_base_put_smid_default(ioc, smid);
748 break; 752 break;
749 } 753 }
750 case MPI2_FUNCTION_SCSI_TASK_MGMT: 754 case MPI2_FUNCTION_SCSI_TASK_MGMT:
@@ -752,6 +756,10 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
752 Mpi2SCSITaskManagementRequest_t *tm_request = 756 Mpi2SCSITaskManagementRequest_t *tm_request =
753 (Mpi2SCSITaskManagementRequest_t *)mpi_request; 757 (Mpi2SCSITaskManagementRequest_t *)mpi_request;
754 758
759 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "TASK_MGMT: "
760 "handle(0x%04x), task_type(0x%02x)\n", ioc->name,
761 le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
762
755 if (tm_request->TaskType == 763 if (tm_request->TaskType ==
756 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || 764 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
757 tm_request->TaskType == 765 tm_request->TaskType ==
@@ -762,7 +770,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
762 } 770 }
763 } 771 }
764 772
765 mutex_lock(&ioc->tm_cmds.mutex);
766 mpt2sas_scsih_set_tm_flag(ioc, le16_to_cpu( 773 mpt2sas_scsih_set_tm_flag(ioc, le16_to_cpu(
767 tm_request->DevHandle)); 774 tm_request->DevHandle));
768 mpt2sas_base_put_smid_hi_priority(ioc, smid); 775 mpt2sas_base_put_smid_hi_priority(ioc, smid);
@@ -818,7 +825,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
818 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 825 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
819 Mpi2SCSITaskManagementRequest_t *tm_request = 826 Mpi2SCSITaskManagementRequest_t *tm_request =
820 (Mpi2SCSITaskManagementRequest_t *)mpi_request; 827 (Mpi2SCSITaskManagementRequest_t *)mpi_request;
821 mutex_unlock(&ioc->tm_cmds.mutex);
822 mpt2sas_scsih_clear_tm_flag(ioc, le16_to_cpu( 828 mpt2sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
823 tm_request->DevHandle)); 829 tm_request->DevHandle));
824 } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH || 830 } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
@@ -897,14 +903,13 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
897 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { 903 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
898 printk(MPT2SAS_INFO_FMT "issue target reset: handle " 904 printk(MPT2SAS_INFO_FMT "issue target reset: handle "
899 "= (0x%04x)\n", ioc->name, 905 "= (0x%04x)\n", ioc->name,
900 mpi_request->FunctionDependent1); 906 le16_to_cpu(mpi_request->FunctionDependent1));
901 mpt2sas_halt_firmware(ioc); 907 mpt2sas_halt_firmware(ioc);
902 mutex_lock(&ioc->tm_cmds.mutex);
903 mpt2sas_scsih_issue_tm(ioc, 908 mpt2sas_scsih_issue_tm(ioc,
904 mpi_request->FunctionDependent1, 0, 909 le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
905 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10); 910 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10,
911 NULL);
906 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 912 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
907 mutex_unlock(&ioc->tm_cmds.mutex);
908 } else 913 } else
909 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, 914 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
910 FORCE_BIG_HAMMER); 915 FORCE_BIG_HAMMER);
@@ -1373,7 +1378,8 @@ _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
1373 1378
1374 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: diag_buffer(0x%p), " 1379 dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: diag_buffer(0x%p), "
1375 "dma(0x%llx), sz(%d)\n", ioc->name, __func__, request_data, 1380 "dma(0x%llx), sz(%d)\n", ioc->name, __func__, request_data,
1376 (unsigned long long)request_data_dma, mpi_request->BufferLength)); 1381 (unsigned long long)request_data_dma,
1382 le32_to_cpu(mpi_request->BufferLength)));
1377 1383
1378 for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++) 1384 for (i = 0; i < MPT2_PRODUCT_SPECIFIC_DWORDS; i++)
1379 mpi_request->ProductSpecific[i] = 1385 mpi_request->ProductSpecific[i] =
@@ -2334,8 +2340,8 @@ _ctl_version_nvdata_persistent_show(struct device *cdev,
2334 struct Scsi_Host *shost = class_to_shost(cdev); 2340 struct Scsi_Host *shost = class_to_shost(cdev);
2335 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 2341 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2336 2342
2337 return snprintf(buf, PAGE_SIZE, "%02xh\n", 2343 return snprintf(buf, PAGE_SIZE, "%08xh\n",
2338 le16_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); 2344 le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
2339} 2345}
2340static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO, 2346static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
2341 _ctl_version_nvdata_persistent_show, NULL); 2347 _ctl_version_nvdata_persistent_show, NULL);
@@ -2354,8 +2360,8 @@ _ctl_version_nvdata_default_show(struct device *cdev,
2354 struct Scsi_Host *shost = class_to_shost(cdev); 2360 struct Scsi_Host *shost = class_to_shost(cdev);
2355 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 2361 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2356 2362
2357 return snprintf(buf, PAGE_SIZE, "%02xh\n", 2363 return snprintf(buf, PAGE_SIZE, "%08xh\n",
2358 le16_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); 2364 le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
2359} 2365}
2360static DEVICE_ATTR(version_nvdata_default, S_IRUGO, 2366static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
2361 _ctl_version_nvdata_default_show, NULL); 2367 _ctl_version_nvdata_default_show, NULL);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index 8a5eeb1a5c84..69916e46e04f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -3,7 +3,7 @@
3 * controllers 3 * controllers
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h 5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
6 * Copyright (C) 2007-2009 LSI Corporation 6 * Copyright (C) 2007-2010 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
index 5308a25cb307..3dcddfeb6f4c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_debug.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -2,7 +2,7 @@
2 * Logging Support for MPT (Message Passing Technology) based controllers 2 * Logging Support for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
5 * Copyright (C) 2007-2009 LSI Corporation 5 * Copyright (C) 2007-2010 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index be171ed682e0..c5ff26a2a51d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2,7 +2,7 @@
2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers 2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
5 * Copyright (C) 2007-2009 LSI Corporation 5 * Copyright (C) 2007-2010 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
@@ -52,6 +52,7 @@
52#include <linux/delay.h> 52#include <linux/delay.h>
53#include <linux/pci.h> 53#include <linux/pci.h>
54#include <linux/interrupt.h> 54#include <linux/interrupt.h>
55#include <linux/aer.h>
55#include <linux/raid_class.h> 56#include <linux/raid_class.h>
56#include <linux/slab.h> 57#include <linux/slab.h>
57 58
@@ -109,14 +110,16 @@ struct sense_info {
109}; 110};
110 111
111 112
113#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
114
112/** 115/**
113 * struct fw_event_work - firmware event struct 116 * struct fw_event_work - firmware event struct
114 * @list: link list framework 117 * @list: link list framework
115 * @work: work object (ioc->fault_reset_work_q) 118 * @work: work object (ioc->fault_reset_work_q)
119 * @cancel_pending_work: flag set during reset handling
116 * @ioc: per adapter object 120 * @ioc: per adapter object
117 * @VF_ID: virtual function id 121 * @VF_ID: virtual function id
118 * @VP_ID: virtual port id 122 * @VP_ID: virtual port id
119 * @host_reset_handling: handling events during host reset
120 * @ignore: flag meaning this event has been marked to ignore 123 * @ignore: flag meaning this event has been marked to ignore
121 * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h 124 * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h
122 * @event_data: reply event data payload follows 125 * @event_data: reply event data payload follows
@@ -125,11 +128,11 @@ struct sense_info {
125 */ 128 */
126struct fw_event_work { 129struct fw_event_work {
127 struct list_head list; 130 struct list_head list;
128 struct work_struct work; 131 u8 cancel_pending_work;
132 struct delayed_work delayed_work;
129 struct MPT2SAS_ADAPTER *ioc; 133 struct MPT2SAS_ADAPTER *ioc;
130 u8 VF_ID; 134 u8 VF_ID;
131 u8 VP_ID; 135 u8 VP_ID;
132 u8 host_reset_handling;
133 u8 ignore; 136 u8 ignore;
134 u16 event; 137 u16 event;
135 void *event_data; 138 void *event_data;
@@ -482,27 +485,17 @@ struct _sas_device *
482mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc, 485mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
483 u64 sas_address) 486 u64 sas_address)
484{ 487{
485 struct _sas_device *sas_device, *r; 488 struct _sas_device *sas_device;
486 489
487 r = NULL; 490 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
488 /* check the sas_device_init_list */ 491 if (sas_device->sas_address == sas_address)
489 list_for_each_entry(sas_device, &ioc->sas_device_init_list, 492 return sas_device;
490 list) {
491 if (sas_device->sas_address != sas_address)
492 continue;
493 r = sas_device;
494 goto out;
495 }
496 493
497 /* then check the sas_device_list */ 494 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
498 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 495 if (sas_device->sas_address == sas_address)
499 if (sas_device->sas_address != sas_address) 496 return sas_device;
500 continue; 497
501 r = sas_device; 498 return NULL;
502 goto out;
503 }
504 out:
505 return r;
506} 499}
507 500
508/** 501/**
@@ -517,28 +510,17 @@ mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
517static struct _sas_device * 510static struct _sas_device *
518_scsih_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) 511_scsih_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
519{ 512{
520 struct _sas_device *sas_device, *r; 513 struct _sas_device *sas_device;
521 514
522 r = NULL; 515 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
523 if (ioc->wait_for_port_enable_to_complete) { 516 if (sas_device->handle == handle)
524 list_for_each_entry(sas_device, &ioc->sas_device_init_list, 517 return sas_device;
525 list) {
526 if (sas_device->handle != handle)
527 continue;
528 r = sas_device;
529 goto out;
530 }
531 } else {
532 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
533 if (sas_device->handle != handle)
534 continue;
535 r = sas_device;
536 goto out;
537 }
538 }
539 518
540 out: 519 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
541 return r; 520 if (sas_device->handle == handle)
521 return sas_device;
522
523 return NULL;
542} 524}
543 525
544/** 526/**
@@ -555,10 +537,15 @@ _scsih_sas_device_remove(struct MPT2SAS_ADAPTER *ioc,
555{ 537{
556 unsigned long flags; 538 unsigned long flags;
557 539
540 if (!sas_device)
541 return;
542
558 spin_lock_irqsave(&ioc->sas_device_lock, flags); 543 spin_lock_irqsave(&ioc->sas_device_lock, flags);
559 list_del(&sas_device->list); 544 if (mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
560 memset(sas_device, 0, sizeof(struct _sas_device)); 545 sas_device->sas_address)) {
561 kfree(sas_device); 546 list_del(&sas_device->list);
547 kfree(sas_device);
548 }
562 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 549 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
563} 550}
564 551
@@ -988,7 +975,7 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
988 u32 chain_offset; 975 u32 chain_offset;
989 u32 chain_length; 976 u32 chain_length;
990 u32 chain_flags; 977 u32 chain_flags;
991 u32 sges_left; 978 int sges_left;
992 u32 sges_in_segment; 979 u32 sges_in_segment;
993 u32 sgl_flags; 980 u32 sgl_flags;
994 u32 sgl_flags_last_element; 981 u32 sgl_flags_last_element;
@@ -1009,7 +996,7 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
1009 996
1010 sg_scmd = scsi_sglist(scmd); 997 sg_scmd = scsi_sglist(scmd);
1011 sges_left = scsi_dma_map(scmd); 998 sges_left = scsi_dma_map(scmd);
1012 if (!sges_left) { 999 if (sges_left < 0) {
1013 sdev_printk(KERN_ERR, scmd->device, "pci_map_sg" 1000 sdev_printk(KERN_ERR, scmd->device, "pci_map_sg"
1014 " failed: request for %d bytes!\n", scsi_bufflen(scmd)); 1001 " failed: request for %d bytes!\n", scsi_bufflen(scmd));
1015 return -ENOMEM; 1002 return -ENOMEM;
@@ -1395,7 +1382,7 @@ _scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
1395 } 1382 }
1396 1383
1397 flags = le16_to_cpu(sas_device_pg0.Flags); 1384 flags = le16_to_cpu(sas_device_pg0.Flags);
1398 device_info = le16_to_cpu(sas_device_pg0.DeviceInfo); 1385 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
1399 1386
1400 sdev_printk(KERN_INFO, sdev, 1387 sdev_printk(KERN_INFO, sdev,
1401 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), " 1388 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
@@ -1963,65 +1950,78 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
1963 } 1950 }
1964} 1951}
1965 1952
1953
1966/** 1954/**
1967 * mpt2sas_scsih_issue_tm - main routine for sending tm requests 1955 * mpt2sas_scsih_issue_tm - main routine for sending tm requests
1968 * @ioc: per adapter struct 1956 * @ioc: per adapter struct
1969 * @device_handle: device handle 1957 * @device_handle: device handle
1958 * @channel: the channel assigned by the OS
1959 * @id: the id assigned by the OS
1970 * @lun: lun number 1960 * @lun: lun number
1971 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 1961 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
1972 * @smid_task: smid assigned to the task 1962 * @smid_task: smid assigned to the task
1973 * @timeout: timeout in seconds 1963 * @timeout: timeout in seconds
1974 * Context: The calling function needs to acquire the tm_cmds.mutex 1964 * Context: user
1975 * 1965 *
1976 * A generic API for sending task management requests to firmware. 1966 * A generic API for sending task management requests to firmware.
1977 * 1967 *
1978 * The ioc->tm_cmds.status flag should be MPT2_CMD_NOT_USED before calling
1979 * this API.
1980 *
1981 * The callback index is set inside `ioc->tm_cb_idx`. 1968 * The callback index is set inside `ioc->tm_cb_idx`.
1982 * 1969 *
1983 * Return nothing. 1970 * Return SUCCESS or FAILED.
1984 */ 1971 */
1985void 1972int
1986mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun, 1973mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
1987 u8 type, u16 smid_task, ulong timeout) 1974 uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
1975 struct scsi_cmnd *scmd)
1988{ 1976{
1989 Mpi2SCSITaskManagementRequest_t *mpi_request; 1977 Mpi2SCSITaskManagementRequest_t *mpi_request;
1990 Mpi2SCSITaskManagementReply_t *mpi_reply; 1978 Mpi2SCSITaskManagementReply_t *mpi_reply;
1991 u16 smid = 0; 1979 u16 smid = 0;
1992 u32 ioc_state; 1980 u32 ioc_state;
1993 unsigned long timeleft; 1981 unsigned long timeleft;
1982 struct scsi_cmnd *scmd_lookup;
1983 int rc;
1994 1984
1985 mutex_lock(&ioc->tm_cmds.mutex);
1995 if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) { 1986 if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) {
1996 printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n", 1987 printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n",
1997 __func__, ioc->name); 1988 __func__, ioc->name);
1998 return; 1989 rc = FAILED;
1990 goto err_out;
1999 } 1991 }
2000 1992
2001 if (ioc->shost_recovery) { 1993 if (ioc->shost_recovery || ioc->remove_host) {
2002 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", 1994 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
2003 __func__, ioc->name); 1995 __func__, ioc->name);
2004 return; 1996 rc = FAILED;
1997 goto err_out;
2005 } 1998 }
2006 1999
2007 ioc_state = mpt2sas_base_get_iocstate(ioc, 0); 2000 ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
2008 if (ioc_state & MPI2_DOORBELL_USED) { 2001 if (ioc_state & MPI2_DOORBELL_USED) {
2009 dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "unexpected doorbell " 2002 dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "unexpected doorbell "
2010 "active!\n", ioc->name)); 2003 "active!\n", ioc->name));
2011 goto issue_host_reset; 2004 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
2005 FORCE_BIG_HAMMER);
2006 rc = SUCCESS;
2007 goto err_out;
2012 } 2008 }
2013 2009
2014 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 2010 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2015 mpt2sas_base_fault_info(ioc, ioc_state & 2011 mpt2sas_base_fault_info(ioc, ioc_state &
2016 MPI2_DOORBELL_DATA_MASK); 2012 MPI2_DOORBELL_DATA_MASK);
2017 goto issue_host_reset; 2013 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
2014 FORCE_BIG_HAMMER);
2015 rc = SUCCESS;
2016 goto err_out;
2018 } 2017 }
2019 2018
2020 smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx); 2019 smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
2021 if (!smid) { 2020 if (!smid) {
2022 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", 2021 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
2023 ioc->name, __func__); 2022 ioc->name, __func__);
2024 return; 2023 rc = FAILED;
2024 goto err_out;
2025 } 2025 }
2026 2026
2027 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "sending tm: handle(0x%04x)," 2027 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "sending tm: handle(0x%04x),"
@@ -2035,21 +2035,24 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
2035 mpi_request->DevHandle = cpu_to_le16(handle); 2035 mpi_request->DevHandle = cpu_to_le16(handle);
2036 mpi_request->TaskType = type; 2036 mpi_request->TaskType = type;
2037 mpi_request->TaskMID = cpu_to_le16(smid_task); 2037 mpi_request->TaskMID = cpu_to_le16(smid_task);
2038 mpi_request->VP_ID = 0; /* TODO */
2039 mpi_request->VF_ID = 0;
2040 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); 2038 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
2041 mpt2sas_scsih_set_tm_flag(ioc, handle); 2039 mpt2sas_scsih_set_tm_flag(ioc, handle);
2042 init_completion(&ioc->tm_cmds.done); 2040 init_completion(&ioc->tm_cmds.done);
2043 mpt2sas_base_put_smid_hi_priority(ioc, smid); 2041 mpt2sas_base_put_smid_hi_priority(ioc, smid);
2044 timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); 2042 timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
2045 mpt2sas_scsih_clear_tm_flag(ioc, handle);
2046 if (!(ioc->tm_cmds.status & MPT2_CMD_COMPLETE)) { 2043 if (!(ioc->tm_cmds.status & MPT2_CMD_COMPLETE)) {
2047 printk(MPT2SAS_ERR_FMT "%s: timeout\n", 2044 printk(MPT2SAS_ERR_FMT "%s: timeout\n",
2048 ioc->name, __func__); 2045 ioc->name, __func__);
2049 _debug_dump_mf(mpi_request, 2046 _debug_dump_mf(mpi_request,
2050 sizeof(Mpi2SCSITaskManagementRequest_t)/4); 2047 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
2051 if (!(ioc->tm_cmds.status & MPT2_CMD_RESET)) 2048 if (!(ioc->tm_cmds.status & MPT2_CMD_RESET)) {
2052 goto issue_host_reset; 2049 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
2050 FORCE_BIG_HAMMER);
2051 rc = SUCCESS;
2052 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
2053 mpt2sas_scsih_clear_tm_flag(ioc, handle);
2054 goto err_out;
2055 }
2053 } 2056 }
2054 2057
2055 if (ioc->tm_cmds.status & MPT2_CMD_REPLY_VALID) { 2058 if (ioc->tm_cmds.status & MPT2_CMD_REPLY_VALID) {
@@ -2059,12 +2062,57 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
2059 ioc->name, le16_to_cpu(mpi_reply->IOCStatus), 2062 ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
2060 le32_to_cpu(mpi_reply->IOCLogInfo), 2063 le32_to_cpu(mpi_reply->IOCLogInfo),
2061 le32_to_cpu(mpi_reply->TerminationCount))); 2064 le32_to_cpu(mpi_reply->TerminationCount)));
2062 if (ioc->logging_level & MPT_DEBUG_TM) 2065 if (ioc->logging_level & MPT_DEBUG_TM) {
2063 _scsih_response_code(ioc, mpi_reply->ResponseCode); 2066 _scsih_response_code(ioc, mpi_reply->ResponseCode);
2067 if (mpi_reply->IOCStatus)
2068 _debug_dump_mf(mpi_request,
2069 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
2070 }
2064 } 2071 }
2065 return; 2072
2066 issue_host_reset: 2073 /* sanity check:
2067 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); 2074 * Check to see the commands were terminated.
2075 * This is only needed for eh callbacks, hence the scmd check.
2076 */
2077 rc = FAILED;
2078 if (scmd == NULL)
2079 goto bypass_sanity_checks;
2080 switch (type) {
2081 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
2082 scmd_lookup = _scsih_scsi_lookup_get(ioc, smid_task);
2083 if (scmd_lookup && (scmd_lookup->serial_number ==
2084 scmd->serial_number))
2085 rc = FAILED;
2086 else
2087 rc = SUCCESS;
2088 break;
2089
2090 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2091 if (_scsih_scsi_lookup_find_by_target(ioc, id, channel))
2092 rc = FAILED;
2093 else
2094 rc = SUCCESS;
2095 break;
2096
2097 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2098 if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
2099 rc = FAILED;
2100 else
2101 rc = SUCCESS;
2102 break;
2103 }
2104
2105 bypass_sanity_checks:
2106
2107 mpt2sas_scsih_clear_tm_flag(ioc, handle);
2108 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
2109 mutex_unlock(&ioc->tm_cmds.mutex);
2110
2111 return rc;
2112
2113 err_out:
2114 mutex_unlock(&ioc->tm_cmds.mutex);
2115 return rc;
2068} 2116}
2069 2117
2070/** 2118/**
@@ -2081,7 +2129,6 @@ _scsih_abort(struct scsi_cmnd *scmd)
2081 u16 smid; 2129 u16 smid;
2082 u16 handle; 2130 u16 handle;
2083 int r; 2131 int r;
2084 struct scsi_cmnd *scmd_lookup;
2085 2132
2086 printk(MPT2SAS_INFO_FMT "attempting task abort! scmd(%p)\n", 2133 printk(MPT2SAS_INFO_FMT "attempting task abort! scmd(%p)\n",
2087 ioc->name, scmd); 2134 ioc->name, scmd);
@@ -2116,19 +2163,10 @@ _scsih_abort(struct scsi_cmnd *scmd)
2116 2163
2117 mpt2sas_halt_firmware(ioc); 2164 mpt2sas_halt_firmware(ioc);
2118 2165
2119 mutex_lock(&ioc->tm_cmds.mutex);
2120 handle = sas_device_priv_data->sas_target->handle; 2166 handle = sas_device_priv_data->sas_target->handle;
2121 mpt2sas_scsih_issue_tm(ioc, handle, sas_device_priv_data->lun, 2167 r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
2122 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30); 2168 scmd->device->id, scmd->device->lun,
2123 2169 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, scmd);
2124 /* sanity check - see whether command actually completed */
2125 scmd_lookup = _scsih_scsi_lookup_get(ioc, smid);
2126 if (scmd_lookup && (scmd_lookup->serial_number == scmd->serial_number))
2127 r = FAILED;
2128 else
2129 r = SUCCESS;
2130 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
2131 mutex_unlock(&ioc->tm_cmds.mutex);
2132 2170
2133 out: 2171 out:
2134 printk(MPT2SAS_INFO_FMT "task abort: %s scmd(%p)\n", 2172 printk(MPT2SAS_INFO_FMT "task abort: %s scmd(%p)\n",
@@ -2185,22 +2223,9 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
2185 goto out; 2223 goto out;
2186 } 2224 }
2187 2225
2188 mutex_lock(&ioc->tm_cmds.mutex); 2226 r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
2189 mpt2sas_scsih_issue_tm(ioc, handle, 0, 2227 scmd->device->id, scmd->device->lun,
2190 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, scmd->device->lun, 2228 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, scmd);
2191 30);
2192
2193 /*
2194 * sanity check see whether all commands to this device been
2195 * completed
2196 */
2197 if (_scsih_scsi_lookup_find_by_lun(ioc, scmd->device->id,
2198 scmd->device->lun, scmd->device->channel))
2199 r = FAILED;
2200 else
2201 r = SUCCESS;
2202 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
2203 mutex_unlock(&ioc->tm_cmds.mutex);
2204 2229
2205 out: 2230 out:
2206 printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n", 2231 printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n",
@@ -2257,21 +2282,9 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
2257 goto out; 2282 goto out;
2258 } 2283 }
2259 2284
2260 mutex_lock(&ioc->tm_cmds.mutex); 2285 r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
2261 mpt2sas_scsih_issue_tm(ioc, handle, 0, 2286 scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
2262 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30); 2287 30, scmd);
2263
2264 /*
2265 * sanity check see whether all commands to this target been
2266 * completed
2267 */
2268 if (_scsih_scsi_lookup_find_by_target(ioc, scmd->device->id,
2269 scmd->device->channel))
2270 r = FAILED;
2271 else
2272 r = SUCCESS;
2273 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
2274 mutex_unlock(&ioc->tm_cmds.mutex);
2275 2288
2276 out: 2289 out:
2277 printk(MPT2SAS_INFO_FMT "target reset: %s scmd(%p)\n", 2290 printk(MPT2SAS_INFO_FMT "target reset: %s scmd(%p)\n",
@@ -2325,8 +2338,9 @@ _scsih_fw_event_add(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
2325 2338
2326 spin_lock_irqsave(&ioc->fw_event_lock, flags); 2339 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2327 list_add_tail(&fw_event->list, &ioc->fw_event_list); 2340 list_add_tail(&fw_event->list, &ioc->fw_event_list);
2328 INIT_WORK(&fw_event->work, _firmware_event_work); 2341 INIT_DELAYED_WORK(&fw_event->delayed_work, _firmware_event_work);
2329 queue_work(ioc->firmware_event_thread, &fw_event->work); 2342 queue_delayed_work(ioc->firmware_event_thread,
2343 &fw_event->delayed_work, 0);
2330 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 2344 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2331} 2345}
2332 2346
@@ -2353,61 +2367,53 @@ _scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
2353 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 2367 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2354} 2368}
2355 2369
2370
2356/** 2371/**
2357 * _scsih_fw_event_add - requeue an event 2372 * _scsih_queue_rescan - queue a topology rescan from user context
2358 * @ioc: per adapter object 2373 * @ioc: per adapter object
2359 * @fw_event: object describing the event
2360 * Context: This function will acquire ioc->fw_event_lock.
2361 * 2374 *
2362 * Return nothing. 2375 * Return nothing.
2363 */ 2376 */
2364static void 2377static void
2365_scsih_fw_event_requeue(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work 2378_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc)
2366 *fw_event, unsigned long delay)
2367{ 2379{
2368 unsigned long flags; 2380 struct fw_event_work *fw_event;
2369 if (ioc->firmware_event_thread == NULL)
2370 return;
2371 2381
2372 spin_lock_irqsave(&ioc->fw_event_lock, flags); 2382 if (ioc->wait_for_port_enable_to_complete)
2373 queue_work(ioc->firmware_event_thread, &fw_event->work); 2383 return;
2374 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 2384 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
2385 if (!fw_event)
2386 return;
2387 fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET;
2388 fw_event->ioc = ioc;
2389 _scsih_fw_event_add(ioc, fw_event);
2375} 2390}
2376 2391
2377/** 2392/**
2378 * _scsih_fw_event_off - turn flag off preventing event handling 2393 * _scsih_fw_event_cleanup_queue - cleanup event queue
2379 * @ioc: per adapter object 2394 * @ioc: per adapter object
2380 * 2395 *
2381 * Used to prevent handling of firmware events during adapter reset 2396 * Walk the firmware event queue, either killing timers, or waiting
2382 * driver unload. 2397 * for outstanding events to complete
2383 * 2398 *
2384 * Return nothing. 2399 * Return nothing.
2385 */ 2400 */
2386static void 2401static void
2387_scsih_fw_event_off(struct MPT2SAS_ADAPTER *ioc) 2402_scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc)
2388{ 2403{
2389 unsigned long flags; 2404 struct fw_event_work *fw_event, *next;
2390 2405
2391 spin_lock_irqsave(&ioc->fw_event_lock, flags); 2406 if (list_empty(&ioc->fw_event_list) ||
2392 ioc->fw_events_off = 1; 2407 !ioc->firmware_event_thread || in_interrupt())
2393 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 2408 return;
2394
2395}
2396
2397/**
2398 * _scsih_fw_event_on - turn flag on allowing firmware event handling
2399 * @ioc: per adapter object
2400 *
2401 * Returns nothing.
2402 */
2403static void
2404_scsih_fw_event_on(struct MPT2SAS_ADAPTER *ioc)
2405{
2406 unsigned long flags;
2407 2409
2408 spin_lock_irqsave(&ioc->fw_event_lock, flags); 2410 list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
2409 ioc->fw_events_off = 0; 2411 if (cancel_delayed_work(&fw_event->delayed_work)) {
2410 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 2412 _scsih_fw_event_free(ioc, fw_event);
2413 continue;
2414 }
2415 fw_event->cancel_pending_work = 1;
2416 }
2411} 2417}
2412 2418
2413/** 2419/**
@@ -2571,25 +2577,24 @@ static void
2571_scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle) 2577_scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2572{ 2578{
2573 Mpi2SCSITaskManagementRequest_t *mpi_request; 2579 Mpi2SCSITaskManagementRequest_t *mpi_request;
2574 struct MPT2SAS_TARGET *sas_target_priv_data;
2575 u16 smid; 2580 u16 smid;
2576 struct _sas_device *sas_device; 2581 struct _sas_device *sas_device;
2577 unsigned long flags; 2582 unsigned long flags;
2578 struct _tr_list *delayed_tr; 2583 struct _tr_list *delayed_tr;
2579 2584
2580 if (ioc->shost_recovery) { 2585 if (ioc->shost_recovery || ioc->remove_host) {
2581 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", 2586 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
2582 __func__, ioc->name); 2587 "progress!\n", __func__, ioc->name));
2583 return; 2588 return;
2584 } 2589 }
2585 2590
2586 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2591 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2587 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 2592 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
2588 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2593 if (sas_device && sas_device->hidden_raid_component) {
2589 2594 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2590 /* skip is hidden raid component */
2591 if (sas_device && sas_device->hidden_raid_component)
2592 return; 2595 return;
2596 }
2597 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2593 2598
2594 smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); 2599 smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
2595 if (!smid) { 2600 if (!smid) {
@@ -2598,36 +2603,16 @@ _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle)
2598 return; 2603 return;
2599 INIT_LIST_HEAD(&delayed_tr->list); 2604 INIT_LIST_HEAD(&delayed_tr->list);
2600 delayed_tr->handle = handle; 2605 delayed_tr->handle = handle;
2601 delayed_tr->state = MPT2SAS_REQ_SAS_CNTRL; 2606 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
2602 list_add_tail(&delayed_tr->list,
2603 &ioc->delayed_tr_list);
2604 if (sas_device && sas_device->starget) {
2605 dewtprintk(ioc, starget_printk(KERN_INFO,
2606 sas_device->starget, "DELAYED:tr:handle(0x%04x), "
2607 "(open)\n", handle));
2608 } else {
2609 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
2610 "DELAYED:tr:handle(0x%04x), (open)\n",
2611 ioc->name, handle));
2612 }
2613 return;
2614 }
2615
2616 if (sas_device) {
2617 sas_device->state |= MPTSAS_STATE_TR_SEND;
2618 sas_device->state |= MPT2SAS_REQ_SAS_CNTRL;
2619 if (sas_device->starget && sas_device->starget->hostdata) {
2620 sas_target_priv_data = sas_device->starget->hostdata;
2621 sas_target_priv_data->tm_busy = 1;
2622 dewtprintk(ioc, starget_printk(KERN_INFO,
2623 sas_device->starget, "tr:handle(0x%04x), (open)\n",
2624 handle));
2625 }
2626 } else {
2627 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT 2607 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
2628 "tr:handle(0x%04x), (open)\n", ioc->name, handle)); 2608 "DELAYED:tr:handle(0x%04x), (open)\n",
2609 ioc->name, handle));
2610 return;
2629 } 2611 }
2630 2612
2613 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_send:handle(0x%04x), "
2614 "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid,
2615 ioc->tm_tr_cb_idx));
2631 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); 2616 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
2632 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 2617 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
2633 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 2618 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -2657,35 +2642,15 @@ static u8
2657_scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, 2642_scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
2658 u8 msix_index, u32 reply) 2643 u8 msix_index, u32 reply)
2659{ 2644{
2660 unsigned long flags;
2661 u16 handle;
2662 struct _sas_device *sas_device;
2663 Mpi2SasIoUnitControlReply_t *mpi_reply = 2645 Mpi2SasIoUnitControlReply_t *mpi_reply =
2664 mpt2sas_base_get_reply_virt_addr(ioc, reply); 2646 mpt2sas_base_get_reply_virt_addr(ioc, reply);
2665 2647
2666 handle = le16_to_cpu(mpi_reply->DevHandle); 2648 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
2667 2649 "sc_complete:handle(0x%04x), (open) "
2668 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2650 "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
2669 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 2651 ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
2670 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2652 le16_to_cpu(mpi_reply->IOCStatus),
2671 2653 le32_to_cpu(mpi_reply->IOCLogInfo)));
2672 if (sas_device) {
2673 sas_device->state |= MPTSAS_STATE_CNTRL_COMPLETE;
2674 if (sas_device->starget)
2675 dewtprintk(ioc, starget_printk(KERN_INFO,
2676 sas_device->starget,
2677 "sc_complete:handle(0x%04x), "
2678 "ioc_status(0x%04x), loginfo(0x%08x)\n",
2679 handle, le16_to_cpu(mpi_reply->IOCStatus),
2680 le32_to_cpu(mpi_reply->IOCLogInfo)));
2681 } else {
2682 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
2683 "sc_complete:handle(0x%04x), "
2684 "ioc_status(0x%04x), loginfo(0x%08x)\n",
2685 ioc->name, handle, le16_to_cpu(mpi_reply->IOCStatus),
2686 le32_to_cpu(mpi_reply->IOCLogInfo)));
2687 }
2688
2689 return 1; 2654 return 1;
2690} 2655}
2691 2656
@@ -2709,87 +2674,63 @@ static u8
2709_scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 2674_scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
2710 u32 reply) 2675 u32 reply)
2711{ 2676{
2712 unsigned long flags;
2713 u16 handle; 2677 u16 handle;
2714 struct _sas_device *sas_device; 2678 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
2715 Mpi2SCSITaskManagementReply_t *mpi_reply = 2679 Mpi2SCSITaskManagementReply_t *mpi_reply =
2716 mpt2sas_base_get_reply_virt_addr(ioc, reply); 2680 mpt2sas_base_get_reply_virt_addr(ioc, reply);
2717 Mpi2SasIoUnitControlRequest_t *mpi_request; 2681 Mpi2SasIoUnitControlRequest_t *mpi_request;
2718 u16 smid_sas_ctrl; 2682 u16 smid_sas_ctrl;
2719 struct MPT2SAS_TARGET *sas_target_priv_data;
2720 struct _tr_list *delayed_tr; 2683 struct _tr_list *delayed_tr;
2721 u8 rc;
2722 2684
2723 handle = le16_to_cpu(mpi_reply->DevHandle); 2685 if (ioc->shost_recovery || ioc->remove_host) {
2724 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2686 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in "
2725 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 2687 "progress!\n", __func__, ioc->name));
2726 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2688 return 1;
2727
2728 if (sas_device) {
2729 sas_device->state |= MPTSAS_STATE_TR_COMPLETE;
2730 if (sas_device->starget) {
2731 dewtprintk(ioc, starget_printk(KERN_INFO,
2732 sas_device->starget, "tr_complete:handle(0x%04x), "
2733 "(%s) ioc_status(0x%04x), loginfo(0x%08x), "
2734 "completed(%d)\n", sas_device->handle,
2735 (sas_device->state & MPT2SAS_REQ_SAS_CNTRL) ?
2736 "open" : "active",
2737 le16_to_cpu(mpi_reply->IOCStatus),
2738 le32_to_cpu(mpi_reply->IOCLogInfo),
2739 le32_to_cpu(mpi_reply->TerminationCount)));
2740 if (sas_device->starget->hostdata) {
2741 sas_target_priv_data =
2742 sas_device->starget->hostdata;
2743 sas_target_priv_data->tm_busy = 0;
2744 }
2745 }
2746 } else {
2747 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
2748 "tr_complete:handle(0x%04x), (open) ioc_status(0x%04x), "
2749 "loginfo(0x%08x), completed(%d)\n", ioc->name,
2750 handle, le16_to_cpu(mpi_reply->IOCStatus),
2751 le32_to_cpu(mpi_reply->IOCLogInfo),
2752 le32_to_cpu(mpi_reply->TerminationCount)));
2753 } 2689 }
2754 2690
2755 if (!list_empty(&ioc->delayed_tr_list)) { 2691 mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid);
2756 delayed_tr = list_entry(ioc->delayed_tr_list.next, 2692 handle = le16_to_cpu(mpi_request_tm->DevHandle);
2757 struct _tr_list, list); 2693 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
2758 mpt2sas_base_free_smid(ioc, smid); 2694 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "spurious interrupt: "
2759 if (delayed_tr->state & MPT2SAS_REQ_SAS_CNTRL) 2695 "handle(0x%04x:0x%04x), smid(%d)!!!\n", ioc->name, handle,
2760 _scsih_tm_tr_send(ioc, delayed_tr->handle); 2696 le16_to_cpu(mpi_reply->DevHandle), smid));
2761 list_del(&delayed_tr->list); 2697 return 0;
2762 kfree(delayed_tr);
2763 rc = 0; /* tells base_interrupt not to free mf */
2764 } else
2765 rc = 1;
2766
2767 if (sas_device && !(sas_device->state & MPT2SAS_REQ_SAS_CNTRL))
2768 return rc;
2769
2770 if (ioc->shost_recovery) {
2771 printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
2772 __func__, ioc->name);
2773 return rc;
2774 } 2698 }
2775 2699
2700 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
2701 "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
2702 "loginfo(0x%08x), completed(%d)\n", ioc->name,
2703 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
2704 le32_to_cpu(mpi_reply->IOCLogInfo),
2705 le32_to_cpu(mpi_reply->TerminationCount)));
2706
2776 smid_sas_ctrl = mpt2sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); 2707 smid_sas_ctrl = mpt2sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
2777 if (!smid_sas_ctrl) { 2708 if (!smid_sas_ctrl) {
2778 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", 2709 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
2779 ioc->name, __func__); 2710 ioc->name, __func__);
2780 return rc; 2711 return 1;
2781 } 2712 }
2782 2713
2783 if (sas_device) 2714 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sc_send:handle(0x%04x), "
2784 sas_device->state |= MPTSAS_STATE_CNTRL_SEND; 2715 "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid_sas_ctrl,
2785 2716 ioc->tm_sas_control_cb_idx));
2786 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid_sas_ctrl); 2717 mpi_request = mpt2sas_base_get_msg_frame(ioc, smid_sas_ctrl);
2787 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 2718 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
2788 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 2719 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
2789 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 2720 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
2790 mpi_request->DevHandle = mpi_reply->DevHandle; 2721 mpi_request->DevHandle = mpi_request_tm->DevHandle;
2791 mpt2sas_base_put_smid_default(ioc, smid_sas_ctrl); 2722 mpt2sas_base_put_smid_default(ioc, smid_sas_ctrl);
2792 return rc; 2723
2724 if (!list_empty(&ioc->delayed_tr_list)) {
2725 delayed_tr = list_entry(ioc->delayed_tr_list.next,
2726 struct _tr_list, list);
2727 mpt2sas_base_free_smid(ioc, smid);
2728 _scsih_tm_tr_send(ioc, delayed_tr->handle);
2729 list_del(&delayed_tr->list);
2730 kfree(delayed_tr);
2731 return 0; /* tells base_interrupt not to free mf */
2732 }
2733 return 1;
2793} 2734}
2794 2735
2795/** 2736/**
@@ -3021,25 +2962,32 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
3021 2962
3022 scmd->scsi_done = done; 2963 scmd->scsi_done = done;
3023 sas_device_priv_data = scmd->device->hostdata; 2964 sas_device_priv_data = scmd->device->hostdata;
3024 if (!sas_device_priv_data) { 2965 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
3025 scmd->result = DID_NO_CONNECT << 16; 2966 scmd->result = DID_NO_CONNECT << 16;
3026 scmd->scsi_done(scmd); 2967 scmd->scsi_done(scmd);
3027 return 0; 2968 return 0;
3028 } 2969 }
3029 2970
3030 sas_target_priv_data = sas_device_priv_data->sas_target; 2971 sas_target_priv_data = sas_device_priv_data->sas_target;
3031 if (!sas_target_priv_data || sas_target_priv_data->handle == 2972 /* invalid device handle */
3032 MPT2SAS_INVALID_DEVICE_HANDLE || sas_target_priv_data->deleted) { 2973 if (sas_target_priv_data->handle == MPT2SAS_INVALID_DEVICE_HANDLE) {
3033 scmd->result = DID_NO_CONNECT << 16; 2974 scmd->result = DID_NO_CONNECT << 16;
3034 scmd->scsi_done(scmd); 2975 scmd->scsi_done(scmd);
3035 return 0; 2976 return 0;
3036 } 2977 }
3037 2978
3038 /* see if we are busy with task managment stuff */ 2979 /* host recovery or link resets sent via IOCTLs */
3039 if (sas_device_priv_data->block || sas_target_priv_data->tm_busy) 2980 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
3040 return SCSI_MLQUEUE_DEVICE_BUSY;
3041 else if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
3042 return SCSI_MLQUEUE_HOST_BUSY; 2981 return SCSI_MLQUEUE_HOST_BUSY;
2982 /* device busy with task managment */
2983 else if (sas_device_priv_data->block || sas_target_priv_data->tm_busy)
2984 return SCSI_MLQUEUE_DEVICE_BUSY;
2985 /* device has been deleted */
2986 else if (sas_target_priv_data->deleted) {
2987 scmd->result = DID_NO_CONNECT << 16;
2988 scmd->scsi_done(scmd);
2989 return 0;
2990 }
3043 2991
3044 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 2992 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
3045 mpi_control = MPI2_SCSIIO_CONTROL_READ; 2993 mpi_control = MPI2_SCSIIO_CONTROL_READ;
@@ -3110,8 +3058,11 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
3110 } 3058 }
3111 } 3059 }
3112 3060
3113 mpt2sas_base_put_smid_scsi_io(ioc, smid, 3061 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST))
3114 sas_device_priv_data->sas_target->handle); 3062 mpt2sas_base_put_smid_scsi_io(ioc, smid,
3063 sas_device_priv_data->sas_target->handle);
3064 else
3065 mpt2sas_base_put_smid_default(ioc, smid);
3115 return 0; 3066 return 0;
3116 3067
3117 out: 3068 out:
@@ -3301,8 +3252,8 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3301 struct sense_info data; 3252 struct sense_info data;
3302 _scsih_normalize_sense(scmd->sense_buffer, &data); 3253 _scsih_normalize_sense(scmd->sense_buffer, &data);
3303 printk(MPT2SAS_WARN_FMT "\t[sense_key,asc,ascq]: " 3254 printk(MPT2SAS_WARN_FMT "\t[sense_key,asc,ascq]: "
3304 "[0x%02x,0x%02x,0x%02x]\n", ioc->name, data.skey, 3255 "[0x%02x,0x%02x,0x%02x], count(%d)\n", ioc->name, data.skey,
3305 data.asc, data.ascq); 3256 data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
3306 } 3257 }
3307 3258
3308 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 3259 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
@@ -3356,7 +3307,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3356 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 3307 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
3357 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; 3308 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
3358 mpi_request.SlotStatus = 3309 mpi_request.SlotStatus =
3359 MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT; 3310 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
3360 mpi_request.DevHandle = cpu_to_le16(handle); 3311 mpi_request.DevHandle = cpu_to_le16(handle);
3361 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; 3312 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
3362 if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply, 3313 if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
@@ -4008,6 +3959,134 @@ _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
4008} 3959}
4009 3960
4010/** 3961/**
3962 * _scsih_check_access_status - check access flags
3963 * @ioc: per adapter object
3964 * @sas_address: sas address
3965 * @handle: sas device handle
3966 * @access_flags: errors returned during discovery of the device
3967 *
3968 * Return 0 for success, else failure
3969 */
3970static u8
3971_scsih_check_access_status(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
3972 u16 handle, u8 access_status)
3973{
3974 u8 rc = 1;
3975 char *desc = NULL;
3976
3977 switch (access_status) {
3978 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
3979 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
3980 rc = 0;
3981 break;
3982 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
3983 desc = "sata capability failed";
3984 break;
3985 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
3986 desc = "sata affiliation conflict";
3987 break;
3988 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
3989 desc = "route not addressable";
3990 break;
3991 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
3992 desc = "smp error not addressable";
3993 break;
3994 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
3995 desc = "device blocked";
3996 break;
3997 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
3998 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
3999 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
4000 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
4001 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
4002 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
4003 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
4004 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
4005 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
4006 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
4007 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
4008 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
4009 desc = "sata initialization failed";
4010 break;
4011 default:
4012 desc = "unknown";
4013 break;
4014 }
4015
4016 if (!rc)
4017 return 0;
4018
4019 printk(MPT2SAS_ERR_FMT "discovery errors(%s): sas_address(0x%016llx), "
4020 "handle(0x%04x)\n", ioc->name, desc,
4021 (unsigned long long)sas_address, handle);
4022 return rc;
4023}
4024
4025static void
4026_scsih_check_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4027{
4028 Mpi2ConfigReply_t mpi_reply;
4029 Mpi2SasDevicePage0_t sas_device_pg0;
4030 struct _sas_device *sas_device;
4031 u32 ioc_status;
4032 unsigned long flags;
4033 u64 sas_address;
4034 struct scsi_target *starget;
4035 struct MPT2SAS_TARGET *sas_target_priv_data;
4036 u32 device_info;
4037
4038 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
4039 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
4040 return;
4041
4042 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
4043 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
4044 return;
4045
4046 /* check if this is end device */
4047 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
4048 if (!(_scsih_is_end_device(device_info)))
4049 return;
4050
4051 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4052 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
4053 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
4054 sas_address);
4055
4056 if (!sas_device) {
4057 printk(MPT2SAS_ERR_FMT "device is not present "
4058 "handle(0x%04x), no sas_device!!!\n", ioc->name, handle);
4059 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4060 return;
4061 }
4062
4063 if (unlikely(sas_device->handle != handle)) {
4064 starget = sas_device->starget;
4065 sas_target_priv_data = starget->hostdata;
4066 starget_printk(KERN_INFO, starget, "handle changed from(0x%04x)"
4067 " to (0x%04x)!!!\n", sas_device->handle, handle);
4068 sas_target_priv_data->handle = handle;
4069 sas_device->handle = handle;
4070 }
4071 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4072
4073 /* check if device is present */
4074 if (!(le16_to_cpu(sas_device_pg0.Flags) &
4075 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
4076 printk(MPT2SAS_ERR_FMT "device is not present "
4077 "handle(0x%04x), flags!!!\n", ioc->name, handle);
4078 return;
4079 }
4080
4081 /* check if there were any issues with discovery */
4082 if (_scsih_check_access_status(ioc, sas_address, handle,
4083 sas_device_pg0.AccessStatus))
4084 return;
4085 _scsih_ublock_io_device(ioc, handle);
4086
4087}
4088
4089/**
4011 * _scsih_add_device - creating sas device object 4090 * _scsih_add_device - creating sas device object
4012 * @ioc: per adapter object 4091 * @ioc: per adapter object
4013 * @handle: sas device handle 4092 * @handle: sas device handle
@@ -4045,6 +4124,8 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
4045 return -1; 4124 return -1;
4046 } 4125 }
4047 4126
4127 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
4128
4048 /* check if device is present */ 4129 /* check if device is present */
4049 if (!(le16_to_cpu(sas_device_pg0.Flags) & 4130 if (!(le16_to_cpu(sas_device_pg0.Flags) &
4050 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 4131 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
@@ -4055,15 +4136,10 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
4055 return -1; 4136 return -1;
4056 } 4137 }
4057 4138
4058 /* check if there were any issus with discovery */ 4139 /* check if there were any issues with discovery */
4059 if (sas_device_pg0.AccessStatus == 4140 if (_scsih_check_access_status(ioc, sas_address, handle,
4060 MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED) { 4141 sas_device_pg0.AccessStatus))
4061 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
4062 ioc->name, __FILE__, __LINE__, __func__);
4063 printk(MPT2SAS_ERR_FMT "AccessStatus = 0x%02x\n",
4064 ioc->name, sas_device_pg0.AccessStatus);
4065 return -1; 4142 return -1;
4066 }
4067 4143
4068 /* check if this is end device */ 4144 /* check if this is end device */
4069 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 4145 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
@@ -4073,17 +4149,14 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
4073 return -1; 4149 return -1;
4074 } 4150 }
4075 4151
4076 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
4077 4152
4078 spin_lock_irqsave(&ioc->sas_device_lock, flags); 4153 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4079 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, 4154 sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
4080 sas_address); 4155 sas_address);
4081 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4156 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4082 4157
4083 if (sas_device) { 4158 if (sas_device)
4084 _scsih_ublock_io_device(ioc, handle);
4085 return 0; 4159 return 0;
4086 }
4087 4160
4088 sas_device = kzalloc(sizeof(struct _sas_device), 4161 sas_device = kzalloc(sizeof(struct _sas_device),
4089 GFP_KERNEL); 4162 GFP_KERNEL);
@@ -4126,67 +4199,38 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
4126} 4199}
4127 4200
4128/** 4201/**
4129 * _scsih_remove_device - removing sas device object 4202 * _scsih_remove_pd_device - removing sas device pd object
4130 * @ioc: per adapter object 4203 * @ioc: per adapter object
4131 * @sas_device: the sas_device object 4204 * @sas_device_delete: the sas_device object
4132 * 4205 *
4206 * For hidden raid components, we do driver-fw handshake from
4207 * hotplug work threads.
4133 * Return nothing. 4208 * Return nothing.
4134 */ 4209 */
4135static void 4210static void
4136_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device 4211_scsih_remove_pd_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device
4137 *sas_device) 4212 sas_device)
4138{ 4213{
4139 struct MPT2SAS_TARGET *sas_target_priv_data;
4140 Mpi2SasIoUnitControlReply_t mpi_reply; 4214 Mpi2SasIoUnitControlReply_t mpi_reply;
4141 Mpi2SasIoUnitControlRequest_t mpi_request; 4215 Mpi2SasIoUnitControlRequest_t mpi_request;
4142 u16 device_handle, handle; 4216 u16 vol_handle, handle;
4143
4144 if (!sas_device)
4145 return;
4146 4217
4147 handle = sas_device->handle; 4218 handle = sas_device.handle;
4148 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: handle(0x%04x)," 4219 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: handle(0x%04x),"
4149 " sas_addr(0x%016llx)\n", ioc->name, __func__, handle, 4220 " sas_addr(0x%016llx)\n", ioc->name, __func__, handle,
4150 (unsigned long long) sas_device->sas_address)); 4221 (unsigned long long) sas_device.sas_address));
4151
4152 if (sas_device->starget && sas_device->starget->hostdata) {
4153 sas_target_priv_data = sas_device->starget->hostdata;
4154 sas_target_priv_data->deleted = 1;
4155 }
4156
4157 if (ioc->remove_host || ioc->shost_recovery || !handle)
4158 goto out;
4159 4222
4160 if ((sas_device->state & MPTSAS_STATE_TR_COMPLETE)) { 4223 vol_handle = sas_device.volume_handle;
4161 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "\tskip " 4224 if (!vol_handle)
4162 "target_reset handle(0x%04x)\n", ioc->name, 4225 return;
4163 handle)); 4226 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset: "
4164 goto skip_tr; 4227 "handle(0x%04x)\n", ioc->name, vol_handle));
4165 } 4228 mpt2sas_scsih_issue_tm(ioc, vol_handle, 0, 0, 0,
4166 4229 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, NULL);
4167 /* Target Reset to flush out all the outstanding IO */ 4230 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset "
4168 device_handle = (sas_device->hidden_raid_component) ? 4231 "done: handle(0x%04x)\n", ioc->name, vol_handle));
4169 sas_device->volume_handle : handle; 4232 if (ioc->shost_recovery)
4170 if (device_handle) { 4233 return;
4171 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset: "
4172 "handle(0x%04x)\n", ioc->name, device_handle));
4173 mutex_lock(&ioc->tm_cmds.mutex);
4174 mpt2sas_scsih_issue_tm(ioc, device_handle, 0,
4175 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10);
4176 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
4177 mutex_unlock(&ioc->tm_cmds.mutex);
4178 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset "
4179 "done: handle(0x%04x)\n", ioc->name, device_handle));
4180 if (ioc->shost_recovery)
4181 goto out;
4182 }
4183 skip_tr:
4184
4185 if ((sas_device->state & MPTSAS_STATE_CNTRL_COMPLETE)) {
4186 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "\tskip "
4187 "sas_cntrl handle(0x%04x)\n", ioc->name, handle));
4188 goto out;
4189 }
4190 4234
4191 /* SAS_IO_UNIT_CNTR - send REMOVE_DEVICE */ 4235 /* SAS_IO_UNIT_CNTR - send REMOVE_DEVICE */
4192 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sas_iounit: handle" 4236 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sas_iounit: handle"
@@ -4194,34 +4238,68 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device
4194 memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 4238 memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4195 mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 4239 mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4196 mpi_request.Operation = MPI2_SAS_OP_REMOVE_DEVICE; 4240 mpi_request.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4197 mpi_request.DevHandle = handle; 4241 mpi_request.DevHandle = cpu_to_le16(handle);
4198 mpi_request.VF_ID = 0; /* TODO */
4199 mpi_request.VP_ID = 0;
4200 if ((mpt2sas_base_sas_iounit_control(ioc, &mpi_reply, 4242 if ((mpt2sas_base_sas_iounit_control(ioc, &mpi_reply,
4201 &mpi_request)) != 0) { 4243 &mpi_request)) != 0)
4202 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 4244 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
4203 ioc->name, __FILE__, __LINE__, __func__); 4245 ioc->name, __FILE__, __LINE__, __func__);
4204 }
4205 4246
4206 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sas_iounit: ioc_status" 4247 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sas_iounit: ioc_status"
4207 "(0x%04x), loginfo(0x%08x)\n", ioc->name, 4248 "(0x%04x), loginfo(0x%08x)\n", ioc->name,
4208 le16_to_cpu(mpi_reply.IOCStatus), 4249 le16_to_cpu(mpi_reply.IOCStatus),
4209 le32_to_cpu(mpi_reply.IOCLogInfo))); 4250 le32_to_cpu(mpi_reply.IOCLogInfo)));
4210 4251
4211 out: 4252 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: handle(0x%04x),"
4253 " sas_addr(0x%016llx)\n", ioc->name, __func__, handle,
4254 (unsigned long long) sas_device.sas_address));
4255}
4212 4256
4213 _scsih_ublock_io_device(ioc, handle); 4257/**
4258 * _scsih_remove_device - removing sas device object
4259 * @ioc: per adapter object
4260 * @sas_device_delete: the sas_device object
4261 *
4262 * Return nothing.
4263 */
4264static void
4265_scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
4266 struct _sas_device *sas_device)
4267{
4268 struct _sas_device sas_device_backup;
4269 struct MPT2SAS_TARGET *sas_target_priv_data;
4214 4270
4215 mpt2sas_transport_port_remove(ioc, sas_device->sas_address, 4271 if (!sas_device)
4216 sas_device->sas_address_parent); 4272 return;
4217 4273
4218 printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr" 4274 memcpy(&sas_device_backup, sas_device, sizeof(struct _sas_device));
4219 "(0x%016llx)\n", ioc->name, handle,
4220 (unsigned long long) sas_device->sas_address);
4221 _scsih_sas_device_remove(ioc, sas_device); 4275 _scsih_sas_device_remove(ioc, sas_device);
4222 4276
4223 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: handle" 4277 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: "
4224 "(0x%04x)\n", ioc->name, __func__, handle)); 4278 "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
4279 sas_device_backup.handle, (unsigned long long)
4280 sas_device_backup.sas_address));
4281
4282 if (sas_device_backup.starget && sas_device_backup.starget->hostdata) {
4283 sas_target_priv_data = sas_device_backup.starget->hostdata;
4284 sas_target_priv_data->deleted = 1;
4285 }
4286
4287 if (sas_device_backup.hidden_raid_component)
4288 _scsih_remove_pd_device(ioc, sas_device_backup);
4289
4290 _scsih_ublock_io_device(ioc, sas_device_backup.handle);
4291
4292 mpt2sas_transport_port_remove(ioc, sas_device_backup.sas_address,
4293 sas_device_backup.sas_address_parent);
4294
4295 printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr"
4296 "(0x%016llx)\n", ioc->name, sas_device_backup.handle,
4297 (unsigned long long) sas_device_backup.sas_address);
4298
4299 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: "
4300 "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
4301 sas_device_backup.handle, (unsigned long long)
4302 sas_device_backup.sas_address));
4225} 4303}
4226 4304
4227#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 4305#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -4331,7 +4409,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
4331 _scsih_sas_topology_change_event_debug(ioc, event_data); 4409 _scsih_sas_topology_change_event_debug(ioc, event_data);
4332#endif 4410#endif
4333 4411
4334 if (ioc->shost_recovery) 4412 if (ioc->shost_recovery || ioc->remove_host)
4335 return; 4413 return;
4336 4414
4337 if (!ioc->sas_hba.num_phys) 4415 if (!ioc->sas_hba.num_phys)
@@ -4370,7 +4448,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
4370 "expander event\n", ioc->name)); 4448 "expander event\n", ioc->name));
4371 return; 4449 return;
4372 } 4450 }
4373 if (ioc->shost_recovery) 4451 if (ioc->shost_recovery || ioc->remove_host)
4374 return; 4452 return;
4375 phy_number = event_data->StartPhyNum + i; 4453 phy_number = event_data->StartPhyNum + i;
4376 reason_code = event_data->PHY[i].PhyStatus & 4454 reason_code = event_data->PHY[i].PhyStatus &
@@ -4393,8 +4471,10 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
4393 mpt2sas_transport_update_links(ioc, sas_address, 4471 mpt2sas_transport_update_links(ioc, sas_address,
4394 handle, phy_number, link_rate); 4472 handle, phy_number, link_rate);
4395 4473
4396 if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5) 4474 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
4397 _scsih_ublock_io_device(ioc, handle); 4475 break;
4476
4477 _scsih_check_device(ioc, handle);
4398 break; 4478 break;
4399 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 4479 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
4400 4480
@@ -4520,10 +4600,10 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
4520 event_data); 4600 event_data);
4521#endif 4601#endif
4522 4602
4523 if (!(event_data->ReasonCode == 4603 if (event_data->ReasonCode !=
4524 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && 4604 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
4525 event_data->ReasonCode == 4605 event_data->ReasonCode !=
4526 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)) 4606 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
4527 return; 4607 return;
4528 4608
4529 spin_lock_irqsave(&ioc->sas_device_lock, flags); 4609 spin_lock_irqsave(&ioc->sas_device_lock, flags);
@@ -4630,7 +4710,6 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
4630 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name, 4710 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
4631 __func__)); 4711 __func__));
4632 4712
4633 mutex_lock(&ioc->tm_cmds.mutex);
4634 termination_count = 0; 4713 termination_count = 0;
4635 query_count = 0; 4714 query_count = 0;
4636 mpi_reply = ioc->tm_cmds.reply; 4715 mpi_reply = ioc->tm_cmds.reply;
@@ -4654,8 +4733,8 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
4654 lun = sas_device_priv_data->lun; 4733 lun = sas_device_priv_data->lun;
4655 query_count++; 4734 query_count++;
4656 4735
4657 mpt2sas_scsih_issue_tm(ioc, handle, lun, 4736 mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
4658 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30); 4737 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL);
4659 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 4738 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
4660 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) 4739 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
4661 & MPI2_IOCSTATUS_MASK; 4740 & MPI2_IOCSTATUS_MASK;
@@ -4666,13 +4745,11 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
4666 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) 4745 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
4667 continue; 4746 continue;
4668 4747
4669 mpt2sas_scsih_issue_tm(ioc, handle, lun, 4748 mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
4670 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30); 4749 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30, NULL);
4671 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
4672 termination_count += le32_to_cpu(mpi_reply->TerminationCount); 4750 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
4673 } 4751 }
4674 ioc->broadcast_aen_busy = 0; 4752 ioc->broadcast_aen_busy = 0;
4675 mutex_unlock(&ioc->tm_cmds.mutex);
4676 4753
4677 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT 4754 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT
4678 "%s - exit, query_count = %d termination_count = %d\n", 4755 "%s - exit, query_count = %d termination_count = %d\n",
@@ -5442,6 +5519,26 @@ _scsih_task_set_full(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
5442} 5519}
5443 5520
5444/** 5521/**
5522 * _scsih_prep_device_scan - initialize parameters prior to device scan
5523 * @ioc: per adapter object
5524 *
5525 * Set the deleted flag prior to device scan. If the device is found during
5526 * the scan, then we clear the deleted flag.
5527 */
5528static void
5529_scsih_prep_device_scan(struct MPT2SAS_ADAPTER *ioc)
5530{
5531 struct MPT2SAS_DEVICE *sas_device_priv_data;
5532 struct scsi_device *sdev;
5533
5534 shost_for_each_device(sdev, ioc->shost) {
5535 sas_device_priv_data = sdev->hostdata;
5536 if (sas_device_priv_data && sas_device_priv_data->sas_target)
5537 sas_device_priv_data->sas_target->deleted = 1;
5538 }
5539}
5540
5541/**
5445 * _scsih_mark_responding_sas_device - mark a sas_devices as responding 5542 * _scsih_mark_responding_sas_device - mark a sas_devices as responding
5446 * @ioc: per adapter object 5543 * @ioc: per adapter object
5447 * @sas_address: sas address 5544 * @sas_address: sas address
@@ -5467,10 +5564,13 @@ _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
5467 if (sas_device->sas_address == sas_address && 5564 if (sas_device->sas_address == sas_address &&
5468 sas_device->slot == slot && sas_device->starget) { 5565 sas_device->slot == slot && sas_device->starget) {
5469 sas_device->responding = 1; 5566 sas_device->responding = 1;
5470 sas_device->state = 0;
5471 starget = sas_device->starget; 5567 starget = sas_device->starget;
5472 sas_target_priv_data = starget->hostdata; 5568 if (starget && starget->hostdata) {
5473 sas_target_priv_data->tm_busy = 0; 5569 sas_target_priv_data = starget->hostdata;
5570 sas_target_priv_data->tm_busy = 0;
5571 sas_target_priv_data->deleted = 0;
5572 } else
5573 sas_target_priv_data = NULL;
5474 starget_printk(KERN_INFO, sas_device->starget, 5574 starget_printk(KERN_INFO, sas_device->starget,
5475 "handle(0x%04x), sas_addr(0x%016llx), enclosure " 5575 "handle(0x%04x), sas_addr(0x%016llx), enclosure "
5476 "logical id(0x%016llx), slot(%d)\n", handle, 5576 "logical id(0x%016llx), slot(%d)\n", handle,
@@ -5483,7 +5583,8 @@ _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
5483 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", 5583 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
5484 sas_device->handle); 5584 sas_device->handle);
5485 sas_device->handle = handle; 5585 sas_device->handle = handle;
5486 sas_target_priv_data->handle = handle; 5586 if (sas_target_priv_data)
5587 sas_target_priv_data->handle = handle;
5487 goto out; 5588 goto out;
5488 } 5589 }
5489 } 5590 }
@@ -5558,6 +5659,12 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
5558 spin_lock_irqsave(&ioc->raid_device_lock, flags); 5659 spin_lock_irqsave(&ioc->raid_device_lock, flags);
5559 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 5660 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
5560 if (raid_device->wwid == wwid && raid_device->starget) { 5661 if (raid_device->wwid == wwid && raid_device->starget) {
5662 starget = raid_device->starget;
5663 if (starget && starget->hostdata) {
5664 sas_target_priv_data = starget->hostdata;
5665 sas_target_priv_data->deleted = 0;
5666 } else
5667 sas_target_priv_data = NULL;
5561 raid_device->responding = 1; 5668 raid_device->responding = 1;
5562 starget_printk(KERN_INFO, raid_device->starget, 5669 starget_printk(KERN_INFO, raid_device->starget,
5563 "handle(0x%04x), wwid(0x%016llx)\n", handle, 5670 "handle(0x%04x), wwid(0x%016llx)\n", handle,
@@ -5567,9 +5674,8 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
5567 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", 5674 printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
5568 raid_device->handle); 5675 raid_device->handle);
5569 raid_device->handle = handle; 5676 raid_device->handle = handle;
5570 starget = raid_device->starget; 5677 if (sas_target_priv_data)
5571 sas_target_priv_data = starget->hostdata; 5678 sas_target_priv_data->handle = handle;
5572 sas_target_priv_data->handle = handle;
5573 goto out; 5679 goto out;
5574 } 5680 }
5575 } 5681 }
@@ -5694,13 +5800,13 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
5694} 5800}
5695 5801
5696/** 5802/**
5697 * _scsih_remove_unresponding_devices - removing unresponding devices 5803 * _scsih_remove_unresponding_sas_devices - removing unresponding devices
5698 * @ioc: per adapter object 5804 * @ioc: per adapter object
5699 * 5805 *
5700 * Return nothing. 5806 * Return nothing.
5701 */ 5807 */
5702static void 5808static void
5703_scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc) 5809_scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
5704{ 5810{
5705 struct _sas_device *sas_device, *sas_device_next; 5811 struct _sas_device *sas_device, *sas_device_next;
5706 struct _sas_node *sas_expander; 5812 struct _sas_node *sas_expander;
@@ -5722,8 +5828,6 @@ _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
5722 (unsigned long long) 5828 (unsigned long long)
5723 sas_device->enclosure_logical_id, 5829 sas_device->enclosure_logical_id,
5724 sas_device->slot); 5830 sas_device->slot);
5725 /* invalidate the device handle */
5726 sas_device->handle = 0;
5727 _scsih_remove_device(ioc, sas_device); 5831 _scsih_remove_device(ioc, sas_device);
5728 } 5832 }
5729 5833
@@ -5774,32 +5878,33 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
5774 case MPT2_IOC_PRE_RESET: 5878 case MPT2_IOC_PRE_RESET:
5775 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 5879 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
5776 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__)); 5880 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
5777 _scsih_fw_event_off(ioc);
5778 break; 5881 break;
5779 case MPT2_IOC_AFTER_RESET: 5882 case MPT2_IOC_AFTER_RESET:
5780 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 5883 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
5781 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__)); 5884 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
5885 if (ioc->scsih_cmds.status & MPT2_CMD_PENDING) {
5886 ioc->scsih_cmds.status |= MPT2_CMD_RESET;
5887 mpt2sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
5888 complete(&ioc->scsih_cmds.done);
5889 }
5782 if (ioc->tm_cmds.status & MPT2_CMD_PENDING) { 5890 if (ioc->tm_cmds.status & MPT2_CMD_PENDING) {
5783 ioc->tm_cmds.status |= MPT2_CMD_RESET; 5891 ioc->tm_cmds.status |= MPT2_CMD_RESET;
5784 mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid); 5892 mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid);
5785 complete(&ioc->tm_cmds.done); 5893 complete(&ioc->tm_cmds.done);
5786 } 5894 }
5787 _scsih_fw_event_on(ioc); 5895 _scsih_fw_event_cleanup_queue(ioc);
5788 _scsih_flush_running_cmds(ioc); 5896 _scsih_flush_running_cmds(ioc);
5897 _scsih_queue_rescan(ioc);
5789 break; 5898 break;
5790 case MPT2_IOC_DONE_RESET: 5899 case MPT2_IOC_DONE_RESET:
5791 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 5900 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
5792 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); 5901 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
5793 _scsih_sas_host_refresh(ioc); 5902 _scsih_sas_host_refresh(ioc);
5903 _scsih_prep_device_scan(ioc);
5794 _scsih_search_responding_sas_devices(ioc); 5904 _scsih_search_responding_sas_devices(ioc);
5795 _scsih_search_responding_raid_devices(ioc); 5905 _scsih_search_responding_raid_devices(ioc);
5796 _scsih_search_responding_expanders(ioc); 5906 _scsih_search_responding_expanders(ioc);
5797 break; 5907 break;
5798 case MPT2_IOC_RUNNING:
5799 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
5800 "MPT2_IOC_RUNNING\n", ioc->name, __func__));
5801 _scsih_remove_unresponding_devices(ioc);
5802 break;
5803 } 5908 }
5804} 5909}
5805 5910
@@ -5815,21 +5920,28 @@ static void
5815_firmware_event_work(struct work_struct *work) 5920_firmware_event_work(struct work_struct *work)
5816{ 5921{
5817 struct fw_event_work *fw_event = container_of(work, 5922 struct fw_event_work *fw_event = container_of(work,
5818 struct fw_event_work, work); 5923 struct fw_event_work, delayed_work.work);
5819 unsigned long flags; 5924 unsigned long flags;
5820 struct MPT2SAS_ADAPTER *ioc = fw_event->ioc; 5925 struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
5821 5926
5822 /* the queue is being flushed so ignore this event */ 5927 /* the queue is being flushed so ignore this event */
5823 spin_lock_irqsave(&ioc->fw_event_lock, flags); 5928 if (ioc->remove_host || fw_event->cancel_pending_work) {
5824 if (ioc->fw_events_off || ioc->remove_host) {
5825 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
5826 _scsih_fw_event_free(ioc, fw_event); 5929 _scsih_fw_event_free(ioc, fw_event);
5827 return; 5930 return;
5828 } 5931 }
5829 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
5830 5932
5831 if (ioc->shost_recovery) { 5933 if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) {
5832 _scsih_fw_event_requeue(ioc, fw_event, 1000); 5934 _scsih_fw_event_free(ioc, fw_event);
5935 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5936 if (ioc->shost_recovery) {
5937 init_completion(&ioc->shost_recovery_done);
5938 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
5939 flags);
5940 wait_for_completion(&ioc->shost_recovery_done);
5941 } else
5942 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
5943 flags);
5944 _scsih_remove_unresponding_sas_devices(ioc);
5833 return; 5945 return;
5834 } 5946 }
5835 5947
@@ -5891,16 +6003,12 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
5891{ 6003{
5892 struct fw_event_work *fw_event; 6004 struct fw_event_work *fw_event;
5893 Mpi2EventNotificationReply_t *mpi_reply; 6005 Mpi2EventNotificationReply_t *mpi_reply;
5894 unsigned long flags;
5895 u16 event; 6006 u16 event;
6007 u16 sz;
5896 6008
5897 /* events turned off due to host reset or driver unloading */ 6009 /* events turned off due to host reset or driver unloading */
5898 spin_lock_irqsave(&ioc->fw_event_lock, flags); 6010 if (ioc->remove_host)
5899 if (ioc->fw_events_off || ioc->remove_host) {
5900 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
5901 return 1; 6011 return 1;
5902 }
5903 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
5904 6012
5905 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 6013 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
5906 event = le16_to_cpu(mpi_reply->Event); 6014 event = le16_to_cpu(mpi_reply->Event);
@@ -5947,8 +6055,8 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
5947 ioc->name, __FILE__, __LINE__, __func__); 6055 ioc->name, __FILE__, __LINE__, __func__);
5948 return 1; 6056 return 1;
5949 } 6057 }
5950 fw_event->event_data = 6058 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
5951 kzalloc(mpi_reply->EventDataLength*4, GFP_ATOMIC); 6059 fw_event->event_data = kzalloc(sz, GFP_ATOMIC);
5952 if (!fw_event->event_data) { 6060 if (!fw_event->event_data) {
5953 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 6061 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
5954 ioc->name, __FILE__, __LINE__, __func__); 6062 ioc->name, __FILE__, __LINE__, __func__);
@@ -5957,7 +6065,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
5957 } 6065 }
5958 6066
5959 memcpy(fw_event->event_data, mpi_reply->EventData, 6067 memcpy(fw_event->event_data, mpi_reply->EventData,
5960 mpi_reply->EventDataLength*4); 6068 sz);
5961 fw_event->ioc = ioc; 6069 fw_event->ioc = ioc;
5962 fw_event->VF_ID = mpi_reply->VF_ID; 6070 fw_event->VF_ID = mpi_reply->VF_ID;
5963 fw_event->VP_ID = mpi_reply->VP_ID; 6071 fw_event->VP_ID = mpi_reply->VP_ID;
@@ -6158,6 +6266,18 @@ _scsih_shutdown(struct pci_dev *pdev)
6158{ 6266{
6159 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6267 struct Scsi_Host *shost = pci_get_drvdata(pdev);
6160 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 6268 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
6269 struct workqueue_struct *wq;
6270 unsigned long flags;
6271
6272 ioc->remove_host = 1;
6273 _scsih_fw_event_cleanup_queue(ioc);
6274
6275 spin_lock_irqsave(&ioc->fw_event_lock, flags);
6276 wq = ioc->firmware_event_thread;
6277 ioc->firmware_event_thread = NULL;
6278 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
6279 if (wq)
6280 destroy_workqueue(wq);
6161 6281
6162 _scsih_ir_shutdown(ioc); 6282 _scsih_ir_shutdown(ioc);
6163 mpt2sas_base_detach(ioc); 6283 mpt2sas_base_detach(ioc);
@@ -6184,7 +6304,7 @@ _scsih_remove(struct pci_dev *pdev)
6184 unsigned long flags; 6304 unsigned long flags;
6185 6305
6186 ioc->remove_host = 1; 6306 ioc->remove_host = 1;
6187 _scsih_fw_event_off(ioc); 6307 _scsih_fw_event_cleanup_queue(ioc);
6188 6308
6189 spin_lock_irqsave(&ioc->fw_event_lock, flags); 6309 spin_lock_irqsave(&ioc->fw_event_lock, flags);
6190 wq = ioc->firmware_event_thread; 6310 wq = ioc->firmware_event_thread;
@@ -6557,6 +6677,122 @@ _scsih_resume(struct pci_dev *pdev)
6557} 6677}
6558#endif /* CONFIG_PM */ 6678#endif /* CONFIG_PM */
6559 6679
6680/**
6681 * _scsih_pci_error_detected - Called when a PCI error is detected.
6682 * @pdev: PCI device struct
6683 * @state: PCI channel state
6684 *
6685 * Description: Called when a PCI error is detected.
6686 *
6687 * Return value:
6688 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
6689 */
6690static pci_ers_result_t
6691_scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6692{
6693 struct Scsi_Host *shost = pci_get_drvdata(pdev);
6694 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
6695
6696 printk(MPT2SAS_INFO_FMT "PCI error: detected callback, state(%d)!!\n",
6697 ioc->name, state);
6698
6699 switch (state) {
6700 case pci_channel_io_normal:
6701 return PCI_ERS_RESULT_CAN_RECOVER;
6702 case pci_channel_io_frozen:
6703 scsi_block_requests(ioc->shost);
6704 mpt2sas_base_stop_watchdog(ioc);
6705 mpt2sas_base_free_resources(ioc);
6706 return PCI_ERS_RESULT_NEED_RESET;
6707 case pci_channel_io_perm_failure:
6708 _scsih_remove(pdev);
6709 return PCI_ERS_RESULT_DISCONNECT;
6710 }
6711 return PCI_ERS_RESULT_NEED_RESET;
6712}
6713
6714/**
6715 * _scsih_pci_slot_reset - Called when PCI slot has been reset.
6716 * @pdev: PCI device struct
6717 *
6718 * Description: This routine is called by the pci error recovery
6719 * code after the PCI slot has been reset, just before we
6720 * should resume normal operations.
6721 */
6722static pci_ers_result_t
6723_scsih_pci_slot_reset(struct pci_dev *pdev)
6724{
6725 struct Scsi_Host *shost = pci_get_drvdata(pdev);
6726 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
6727 int rc;
6728
6729 printk(MPT2SAS_INFO_FMT "PCI error: slot reset callback!!\n",
6730 ioc->name);
6731
6732 ioc->pdev = pdev;
6733 rc = mpt2sas_base_map_resources(ioc);
6734 if (rc)
6735 return PCI_ERS_RESULT_DISCONNECT;
6736
6737
6738 rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
6739 FORCE_BIG_HAMMER);
6740
6741 printk(MPT2SAS_WARN_FMT "hard reset: %s\n", ioc->name,
6742 (rc == 0) ? "success" : "failed");
6743
6744 if (!rc)
6745 return PCI_ERS_RESULT_RECOVERED;
6746 else
6747 return PCI_ERS_RESULT_DISCONNECT;
6748}
6749
6750/**
6751 * _scsih_pci_resume() - resume normal ops after PCI reset
6752 * @pdev: pointer to PCI device
6753 *
6754 * Called when the error recovery driver tells us that its
6755 * OK to resume normal operation. Use completion to allow
6756 * halted scsi ops to resume.
6757 */
6758static void
6759_scsih_pci_resume(struct pci_dev *pdev)
6760{
6761 struct Scsi_Host *shost = pci_get_drvdata(pdev);
6762 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
6763
6764 printk(MPT2SAS_INFO_FMT "PCI error: resume callback!!\n", ioc->name);
6765
6766 pci_cleanup_aer_uncorrect_error_status(pdev);
6767 mpt2sas_base_start_watchdog(ioc);
6768 scsi_unblock_requests(ioc->shost);
6769}
6770
6771/**
6772 * _scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
6773 * @pdev: pointer to PCI device
6774 */
6775static pci_ers_result_t
6776_scsih_pci_mmio_enabled(struct pci_dev *pdev)
6777{
6778 struct Scsi_Host *shost = pci_get_drvdata(pdev);
6779 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
6780
6781 printk(MPT2SAS_INFO_FMT "PCI error: mmio enabled callback!!\n",
6782 ioc->name);
6783
6784 /* TODO - dump whatever for debugging purposes */
6785
6786 /* Request a slot reset. */
6787 return PCI_ERS_RESULT_NEED_RESET;
6788}
6789
6790static struct pci_error_handlers _scsih_err_handler = {
6791 .error_detected = _scsih_pci_error_detected,
6792 .mmio_enabled = _scsih_pci_mmio_enabled,
6793 .slot_reset = _scsih_pci_slot_reset,
6794 .resume = _scsih_pci_resume,
6795};
6560 6796
6561static struct pci_driver scsih_driver = { 6797static struct pci_driver scsih_driver = {
6562 .name = MPT2SAS_DRIVER_NAME, 6798 .name = MPT2SAS_DRIVER_NAME,
@@ -6564,6 +6800,7 @@ static struct pci_driver scsih_driver = {
6564 .probe = _scsih_probe, 6800 .probe = _scsih_probe,
6565 .remove = __devexit_p(_scsih_remove), 6801 .remove = __devexit_p(_scsih_remove),
6566 .shutdown = _scsih_shutdown, 6802 .shutdown = _scsih_shutdown,
6803 .err_handler = &_scsih_err_handler,
6567#ifdef CONFIG_PM 6804#ifdef CONFIG_PM
6568 .suspend = _scsih_suspend, 6805 .suspend = _scsih_suspend,
6569 .resume = _scsih_resume, 6806 .resume = _scsih_resume,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index bd7ca2b49f81..2727c3b65104 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -2,7 +2,7 @@
2 * SAS Transport Layer for MPT (Message Passing Technology) based controllers 2 * SAS Transport Layer for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
5 * Copyright (C) 2007-2009 LSI Corporation 5 * Copyright (C) 2007-2010 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
@@ -465,6 +465,85 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
465 return rc; 465 return rc;
466} 466}
467 467
468
469/**
470 * _transport_delete_duplicate_port - (see below description)
471 * @ioc: per adapter object
472 * @sas_node: sas node object (either expander or sas host)
473 * @sas_address: sas address of device being added
474 * @phy_num: phy number
475 *
476 * This function is called when attempting to add a new port that is claiming
477 * the same phy resources already in use by another port. If we don't release
478 * the claimed phy resources, the sas transport layer will hang from the BUG
479 * in sas_port_add_phy.
480 *
481 * The reason we would hit this issue is becuase someone is changing the
482 * sas address of a device on the fly, meanwhile controller firmware sends
483 * EVENTs out of order when removing the previous instance of the device.
484 */
485static void
486_transport_delete_duplicate_port(struct MPT2SAS_ADAPTER *ioc,
487 struct _sas_node *sas_node, u64 sas_address, int phy_num)
488{
489 struct _sas_port *mpt2sas_port, *mpt2sas_port_duplicate;
490 struct _sas_phy *mpt2sas_phy;
491
492 printk(MPT2SAS_ERR_FMT "new device located at sas_addr(0x%016llx), "
493 "phy_id(%d)\n", ioc->name, (unsigned long long)sas_address,
494 phy_num);
495
496 mpt2sas_port_duplicate = NULL;
497 list_for_each_entry(mpt2sas_port, &sas_node->sas_port_list, port_list) {
498 dev_printk(KERN_ERR, &mpt2sas_port->port->dev,
499 "existing device at sas_addr(0x%016llx), num_phys(%d)\n",
500 (unsigned long long)
501 mpt2sas_port->remote_identify.sas_address,
502 mpt2sas_port->num_phys);
503 list_for_each_entry(mpt2sas_phy, &mpt2sas_port->phy_list,
504 port_siblings) {
505 dev_printk(KERN_ERR, &mpt2sas_phy->phy->dev,
506 "phy_number(%d)\n", mpt2sas_phy->phy_id);
507 if (mpt2sas_phy->phy_id == phy_num)
508 mpt2sas_port_duplicate = mpt2sas_port;
509 }
510 }
511
512 if (!mpt2sas_port_duplicate)
513 return;
514
515 dev_printk(KERN_ERR, &mpt2sas_port_duplicate->port->dev,
516 "deleting duplicate device at sas_addr(0x%016llx), phy(%d)!!!!\n",
517 (unsigned long long)
518 mpt2sas_port_duplicate->remote_identify.sas_address, phy_num);
519 ioc->logging_level |= MPT_DEBUG_TRANSPORT;
520 mpt2sas_transport_port_remove(ioc,
521 mpt2sas_port_duplicate->remote_identify.sas_address,
522 sas_node->sas_address);
523 ioc->logging_level &= ~MPT_DEBUG_TRANSPORT;
524}
525
526/**
527 * _transport_sanity_check - sanity check when adding a new port
528 * @ioc: per adapter object
529 * @sas_node: sas node object (either expander or sas host)
530 * @sas_address: sas address of device being added
531 *
532 * See the explanation above from _transport_delete_duplicate_port
533 */
534static void
535_transport_sanity_check(struct MPT2SAS_ADAPTER *ioc, struct _sas_node *sas_node,
536 u64 sas_address)
537{
538 int i;
539
540 for (i = 0; i < sas_node->num_phys; i++)
541 if (sas_node->phy[i].remote_identify.sas_address == sas_address)
542 if (sas_node->phy[i].phy_belongs_to_port)
543 _transport_delete_duplicate_port(ioc, sas_node,
544 sas_address, i);
545}
546
468/** 547/**
469 * mpt2sas_transport_port_add - insert port to the list 548 * mpt2sas_transport_port_add - insert port to the list
470 * @ioc: per adapter object 549 * @ioc: per adapter object
@@ -522,6 +601,9 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
522 goto out_fail; 601 goto out_fail;
523 } 602 }
524 603
604 _transport_sanity_check(ioc, sas_node,
605 mpt2sas_port->remote_identify.sas_address);
606
525 for (i = 0; i < sas_node->num_phys; i++) { 607 for (i = 0; i < sas_node->num_phys; i++) {
526 if (sas_node->phy[i].remote_identify.sas_address != 608 if (sas_node->phy[i].remote_identify.sas_address !=
527 mpt2sas_port->remote_identify.sas_address) 609 mpt2sas_port->remote_identify.sas_address)
@@ -553,6 +635,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
553 mpt2sas_port->remote_identify.sas_address, 635 mpt2sas_port->remote_identify.sas_address,
554 mpt2sas_phy->phy_id); 636 mpt2sas_phy->phy_id);
555 sas_port_add_phy(port, mpt2sas_phy->phy); 637 sas_port_add_phy(port, mpt2sas_phy->phy);
638 mpt2sas_phy->phy_belongs_to_port = 1;
556 } 639 }
557 640
558 mpt2sas_port->port = port; 641 mpt2sas_port->port = port;
@@ -651,6 +734,7 @@ mpt2sas_transport_port_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address,
651 (unsigned long long) 734 (unsigned long long)
652 mpt2sas_port->remote_identify.sas_address, 735 mpt2sas_port->remote_identify.sas_address,
653 mpt2sas_phy->phy_id); 736 mpt2sas_phy->phy_id);
737 mpt2sas_phy->phy_belongs_to_port = 0;
654 sas_port_delete_phy(mpt2sas_port->port, mpt2sas_phy->phy); 738 sas_port_delete_phy(mpt2sas_port->port, mpt2sas_phy->phy);
655 list_del(&mpt2sas_phy->port_siblings); 739 list_del(&mpt2sas_phy->port_siblings);
656 } 740 }
@@ -1341,7 +1425,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1341 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply)); 1425 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
1342 req->sense_len = sizeof(*mpi_reply); 1426 req->sense_len = sizeof(*mpi_reply);
1343 req->resid_len = 0; 1427 req->resid_len = 0;
1344 rsp->resid_len -= mpi_reply->ResponseDataLength; 1428 rsp->resid_len -=
1429 le16_to_cpu(mpi_reply->ResponseDataLength);
1345 } else { 1430 } else {
1346 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT 1431 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
1347 "%s - no reply\n", ioc->name, __func__)); 1432 "%s - no reply\n", ioc->name, __func__));
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index d722235111a8..716d1785cda7 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -13,112 +13,116 @@
13#include "wd33c93.h" 13#include "wd33c93.h"
14#include "mvme147.h" 14#include "mvme147.h"
15 15
16#include<linux/stat.h> 16#include <linux/stat.h>
17 17
18#define HDATA(ptr) ((struct WD33C93_hostdata *)((ptr)->hostdata))
19 18
20static struct Scsi_Host *mvme147_host = NULL; 19static struct Scsi_Host *mvme147_host = NULL;
21 20
22static irqreturn_t mvme147_intr (int irq, void *dummy) 21static irqreturn_t mvme147_intr(int irq, void *dummy)
23{ 22{
24 if (irq == MVME147_IRQ_SCSI_PORT) 23 if (irq == MVME147_IRQ_SCSI_PORT)
25 wd33c93_intr (mvme147_host); 24 wd33c93_intr(mvme147_host);
26 else 25 else
27 m147_pcc->dma_intr = 0x89; /* Ack and enable ints */ 26 m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
28 return IRQ_HANDLED; 27 return IRQ_HANDLED;
29} 28}
30 29
31static int dma_setup(struct scsi_cmnd *cmd, int dir_in) 30static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
32{ 31{
33 unsigned char flags = 0x01; 32 struct WD33C93_hostdata *hdata = shost_priv(mvme147_host);
34 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 33 unsigned char flags = 0x01;
35 34 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
36 /* setup dma direction */ 35
37 if (!dir_in) 36 /* setup dma direction */
38 flags |= 0x04; 37 if (!dir_in)
39 38 flags |= 0x04;
40 /* remember direction */ 39
41 HDATA(mvme147_host)->dma_dir = dir_in; 40 /* remember direction */
42 41 hdata->dma_dir = dir_in;
43 if (dir_in) 42
44 /* invalidate any cache */ 43 if (dir_in) {
45 cache_clear (addr, cmd->SCp.this_residual); 44 /* invalidate any cache */
46 else 45 cache_clear(addr, cmd->SCp.this_residual);
47 /* push any dirty cache */ 46 } else {
48 cache_push (addr, cmd->SCp.this_residual); 47 /* push any dirty cache */
49 48 cache_push(addr, cmd->SCp.this_residual);
50 /* start DMA */ 49 }
51 m147_pcc->dma_bcr = cmd->SCp.this_residual | (1<<24); 50
52 m147_pcc->dma_dadr = addr; 51 /* start DMA */
53 m147_pcc->dma_cntrl = flags; 52 m147_pcc->dma_bcr = cmd->SCp.this_residual | (1 << 24);
54 53 m147_pcc->dma_dadr = addr;
55 /* return success */ 54 m147_pcc->dma_cntrl = flags;
56 return 0; 55
56 /* return success */
57 return 0;
57} 58}
58 59
59static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, 60static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
60 int status) 61 int status)
61{ 62{
62 m147_pcc->dma_cntrl = 0; 63 m147_pcc->dma_cntrl = 0;
63} 64}
64 65
65int mvme147_detect(struct scsi_host_template *tpnt) 66int mvme147_detect(struct scsi_host_template *tpnt)
66{ 67{
67 static unsigned char called = 0; 68 static unsigned char called = 0;
68 wd33c93_regs regs; 69 wd33c93_regs regs;
69 70 struct WD33C93_hostdata *hdata;
70 if (!MACH_IS_MVME147 || called) 71
71 return 0; 72 if (!MACH_IS_MVME147 || called)
72 called++; 73 return 0;
73 74 called++;
74 tpnt->proc_name = "MVME147"; 75
75 tpnt->proc_info = &wd33c93_proc_info; 76 tpnt->proc_name = "MVME147";
76 77 tpnt->proc_info = &wd33c93_proc_info;
77 mvme147_host = scsi_register (tpnt, sizeof(struct WD33C93_hostdata)); 78
78 if (!mvme147_host) 79 mvme147_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
79 goto err_out; 80 if (!mvme147_host)
80 81 goto err_out;
81 mvme147_host->base = 0xfffe4000; 82
82 mvme147_host->irq = MVME147_IRQ_SCSI_PORT; 83 mvme147_host->base = 0xfffe4000;
83 regs.SASR = (volatile unsigned char *)0xfffe4000; 84 mvme147_host->irq = MVME147_IRQ_SCSI_PORT;
84 regs.SCMD = (volatile unsigned char *)0xfffe4001; 85 regs.SASR = (volatile unsigned char *)0xfffe4000;
85 HDATA(mvme147_host)->no_sync = 0xff; 86 regs.SCMD = (volatile unsigned char *)0xfffe4001;
86 HDATA(mvme147_host)->fast = 0; 87 hdata = shost_priv(mvme147_host);
87 HDATA(mvme147_host)->dma_mode = CTRL_DMA; 88 hdata->no_sync = 0xff;
88 wd33c93_init(mvme147_host, regs, dma_setup, dma_stop, WD33C93_FS_8_10); 89 hdata->fast = 0;
89 90 hdata->dma_mode = CTRL_DMA;
90 if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, "MVME147 SCSI PORT", mvme147_intr)) 91 wd33c93_init(mvme147_host, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
91 goto err_unregister; 92
92 if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0, "MVME147 SCSI DMA", mvme147_intr)) 93 if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0,
93 goto err_free_irq; 94 "MVME147 SCSI PORT", mvme147_intr))
95 goto err_unregister;
96 if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0,
97 "MVME147 SCSI DMA", mvme147_intr))
98 goto err_free_irq;
94#if 0 /* Disabled; causes problems booting */ 99#if 0 /* Disabled; causes problems booting */
95 m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */ 100 m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */
96 udelay(100); 101 udelay(100);
97 m147_pcc->scsi_interrupt = 0x00; /* Negate SCSI bus reset */ 102 m147_pcc->scsi_interrupt = 0x00; /* Negate SCSI bus reset */
98 udelay(2000); 103 udelay(2000);
99 m147_pcc->scsi_interrupt = 0x40; /* Clear bus reset interrupt */ 104 m147_pcc->scsi_interrupt = 0x40; /* Clear bus reset interrupt */
100#endif 105#endif
101 m147_pcc->scsi_interrupt = 0x09; /* Enable interrupt */ 106 m147_pcc->scsi_interrupt = 0x09; /* Enable interrupt */
102 107
103 m147_pcc->dma_cntrl = 0x00; /* ensure DMA is stopped */ 108 m147_pcc->dma_cntrl = 0x00; /* ensure DMA is stopped */
104 m147_pcc->dma_intr = 0x89; /* Ack and enable ints */ 109 m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
105 110
106 return 1; 111 return 1;
107 112
108 err_free_irq: 113err_free_irq:
109 free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr); 114 free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
110 err_unregister: 115err_unregister:
111 wd33c93_release(); 116 scsi_unregister(mvme147_host);
112 scsi_unregister(mvme147_host); 117err_out:
113 err_out: 118 return 0;
114 return 0;
115} 119}
116 120
117static int mvme147_bus_reset(struct scsi_cmnd *cmd) 121static int mvme147_bus_reset(struct scsi_cmnd *cmd)
118{ 122{
119 /* FIXME perform bus-specific reset */ 123 /* FIXME perform bus-specific reset */
120 124
121 /* FIXME 2: kill this function, and let midlayer fallback to 125 /* FIXME 2: kill this function, and let midlayer fallback to
122 the same result, calling wd33c93_host_reset() */ 126 the same result, calling wd33c93_host_reset() */
123 127
124 spin_lock_irq(cmd->device->host->host_lock); 128 spin_lock_irq(cmd->device->host->host_lock);
@@ -154,10 +158,9 @@ static struct scsi_host_template driver_template = {
154int mvme147_release(struct Scsi_Host *instance) 158int mvme147_release(struct Scsi_Host *instance)
155{ 159{
156#ifdef MODULE 160#ifdef MODULE
157 /* XXX Make sure DMA is stopped! */ 161 /* XXX Make sure DMA is stopped! */
158 wd33c93_release(); 162 free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
159 free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr); 163 free_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr);
160 free_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr);
161#endif 164#endif
162 return 1; 165 return 1;
163} 166}
diff --git a/drivers/scsi/mvme147.h b/drivers/scsi/mvme147.h
index 32aee85434d8..bfd4566ef050 100644
--- a/drivers/scsi/mvme147.h
+++ b/drivers/scsi/mvme147.h
@@ -14,11 +14,11 @@ int mvme147_detect(struct scsi_host_template *);
14int mvme147_release(struct Scsi_Host *); 14int mvme147_release(struct Scsi_Host *);
15 15
16#ifndef CMD_PER_LUN 16#ifndef CMD_PER_LUN
17#define CMD_PER_LUN 2 17#define CMD_PER_LUN 2
18#endif 18#endif
19 19
20#ifndef CAN_QUEUE 20#ifndef CAN_QUEUE
21#define CAN_QUEUE 16 21#define CAN_QUEUE 16
22#endif 22#endif
23 23
24#endif /* MVME147_H */ 24#endif /* MVME147_H */
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index 10a5077b6aed..afc7f6f3a13e 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -132,9 +132,9 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
132 tmp &= ~PHYEV_RDY_CH; 132 tmp &= ~PHYEV_RDY_CH;
133 mvs_write_port_irq_stat(mvi, phy_id, tmp); 133 mvs_write_port_irq_stat(mvi, phy_id, tmp);
134 tmp = mvs_read_phy_ctl(mvi, phy_id); 134 tmp = mvs_read_phy_ctl(mvi, phy_id);
135 if (hard) 135 if (hard == 1)
136 tmp |= PHY_RST_HARD; 136 tmp |= PHY_RST_HARD;
137 else 137 else if (hard == 0)
138 tmp |= PHY_RST; 138 tmp |= PHY_RST;
139 mvs_write_phy_ctl(mvi, phy_id, tmp); 139 mvs_write_phy_ctl(mvi, phy_id, tmp);
140 if (hard) { 140 if (hard) {
@@ -144,6 +144,26 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
144 } 144 }
145} 145}
146 146
147void mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
148{
149 void __iomem *regs = mvi->regs;
150 u32 tmp;
151 if (clear_all) {
152 tmp = mr32(MVS_INT_STAT_SRS_0);
153 if (tmp) {
154 printk(KERN_DEBUG "check SRS 0 %08X.\n", tmp);
155 mw32(MVS_INT_STAT_SRS_0, tmp);
156 }
157 } else {
158 tmp = mr32(MVS_INT_STAT_SRS_0);
159 if (tmp & (1 << (reg_set % 32))) {
160 printk(KERN_DEBUG "register set 0x%x was stopped.\n",
161 reg_set);
162 mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
163 }
164 }
165}
166
147static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi) 167static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
148{ 168{
149 void __iomem *regs = mvi->regs; 169 void __iomem *regs = mvi->regs;
@@ -761,6 +781,7 @@ const struct mvs_dispatch mvs_64xx_dispatch = {
761 mvs_write_port_irq_mask, 781 mvs_write_port_irq_mask,
762 mvs_get_sas_addr, 782 mvs_get_sas_addr,
763 mvs_64xx_command_active, 783 mvs_64xx_command_active,
784 mvs_64xx_clear_srs_irq,
764 mvs_64xx_issue_stop, 785 mvs_64xx_issue_stop,
765 mvs_start_delivery, 786 mvs_start_delivery,
766 mvs_rx_update, 787 mvs_rx_update,
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 0940fae19d20..eed4c5c72013 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -616,6 +616,15 @@ void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
616} 616}
617#endif 617#endif
618 618
619/*
620 * FIXME JEJB: temporary nop clear_srs_irq to make 94xx still work
621 * with 64xx fixes
622 */
623static void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set,
624 u8 clear_all)
625{
626}
627
619const struct mvs_dispatch mvs_94xx_dispatch = { 628const struct mvs_dispatch mvs_94xx_dispatch = {
620 "mv94xx", 629 "mv94xx",
621 mvs_94xx_init, 630 mvs_94xx_init,
@@ -640,6 +649,7 @@ const struct mvs_dispatch mvs_94xx_dispatch = {
640 mvs_write_port_irq_mask, 649 mvs_write_port_irq_mask,
641 mvs_get_sas_addr, 650 mvs_get_sas_addr,
642 mvs_94xx_command_active, 651 mvs_94xx_command_active,
652 mvs_94xx_clear_srs_irq,
643 mvs_94xx_issue_stop, 653 mvs_94xx_issue_stop,
644 mvs_start_delivery, 654 mvs_start_delivery,
645 mvs_rx_update, 655 mvs_rx_update,
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index cae6b2cf492f..19ad34f381a5 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -37,6 +37,7 @@ static const struct mvs_chip_info mvs_chips[] = {
37}; 37};
38 38
39#define SOC_SAS_NUM 2 39#define SOC_SAS_NUM 2
40#define SG_MX 64
40 41
41static struct scsi_host_template mvs_sht = { 42static struct scsi_host_template mvs_sht = {
42 .module = THIS_MODULE, 43 .module = THIS_MODULE,
@@ -53,10 +54,10 @@ static struct scsi_host_template mvs_sht = {
53 .can_queue = 1, 54 .can_queue = 1,
54 .cmd_per_lun = 1, 55 .cmd_per_lun = 1,
55 .this_id = -1, 56 .this_id = -1,
56 .sg_tablesize = SG_ALL, 57 .sg_tablesize = SG_MX,
57 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 58 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
58 .use_clustering = ENABLE_CLUSTERING, 59 .use_clustering = ENABLE_CLUSTERING,
59 .eh_device_reset_handler = sas_eh_device_reset_handler, 60 .eh_device_reset_handler = sas_eh_device_reset_handler,
60 .eh_bus_reset_handler = sas_eh_bus_reset_handler, 61 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
61 .slave_alloc = mvs_slave_alloc, 62 .slave_alloc = mvs_slave_alloc,
62 .target_destroy = sas_target_destroy, 63 .target_destroy = sas_target_destroy,
@@ -65,19 +66,17 @@ static struct scsi_host_template mvs_sht = {
65 66
66static struct sas_domain_function_template mvs_transport_ops = { 67static struct sas_domain_function_template mvs_transport_ops = {
67 .lldd_dev_found = mvs_dev_found, 68 .lldd_dev_found = mvs_dev_found,
68 .lldd_dev_gone = mvs_dev_gone, 69 .lldd_dev_gone = mvs_dev_gone,
69
70 .lldd_execute_task = mvs_queue_command, 70 .lldd_execute_task = mvs_queue_command,
71 .lldd_control_phy = mvs_phy_control, 71 .lldd_control_phy = mvs_phy_control,
72 72
73 .lldd_abort_task = mvs_abort_task, 73 .lldd_abort_task = mvs_abort_task,
74 .lldd_abort_task_set = mvs_abort_task_set, 74 .lldd_abort_task_set = mvs_abort_task_set,
75 .lldd_clear_aca = mvs_clear_aca, 75 .lldd_clear_aca = mvs_clear_aca,
76 .lldd_clear_task_set = mvs_clear_task_set, 76 .lldd_clear_task_set = mvs_clear_task_set,
77 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, 77 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
78 .lldd_lu_reset = mvs_lu_reset, 78 .lldd_lu_reset = mvs_lu_reset,
79 .lldd_query_task = mvs_query_task, 79 .lldd_query_task = mvs_query_task,
80
81 .lldd_port_formed = mvs_port_formed, 80 .lldd_port_formed = mvs_port_formed,
82 .lldd_port_deformed = mvs_port_deformed, 81 .lldd_port_deformed = mvs_port_deformed,
83 82
@@ -213,7 +212,7 @@ static irqreturn_t mvs_interrupt(int irq, void *opaque)
213 212
214static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) 213static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
215{ 214{
216 int i, slot_nr; 215 int i = 0, slot_nr;
217 216
218 if (mvi->flags & MVF_FLAG_SOC) 217 if (mvi->flags & MVF_FLAG_SOC)
219 slot_nr = MVS_SOC_SLOTS; 218 slot_nr = MVS_SOC_SLOTS;
@@ -232,6 +231,7 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
232 mvi->devices[i].dev_type = NO_DEVICE; 231 mvi->devices[i].dev_type = NO_DEVICE;
233 mvi->devices[i].device_id = i; 232 mvi->devices[i].device_id = i;
234 mvi->devices[i].dev_status = MVS_DEV_NORMAL; 233 mvi->devices[i].dev_status = MVS_DEV_NORMAL;
234 init_timer(&mvi->devices[i].timer);
235 } 235 }
236 236
237 /* 237 /*
@@ -437,6 +437,7 @@ static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
437 437
438 sha->sas_phy = arr_phy; 438 sha->sas_phy = arr_phy;
439 sha->sas_port = arr_port; 439 sha->sas_port = arr_port;
440 sha->core.shost = shost;
440 441
441 sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL); 442 sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
442 if (!sha->lldd_ha) 443 if (!sha->lldd_ha)
@@ -574,6 +575,10 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
574 } 575 }
575 nhost++; 576 nhost++;
576 } while (nhost < chip->n_host); 577 } while (nhost < chip->n_host);
578#ifdef MVS_USE_TASKLET
579 tasklet_init(&mv_tasklet, mvs_tasklet,
580 (unsigned long)SHOST_TO_SAS_HA(shost));
581#endif
577 582
578 mvs_post_sas_ha_init(shost, chip); 583 mvs_post_sas_ha_init(shost, chip);
579 584
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 0d2138641214..f5e321791903 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -259,8 +259,6 @@ static inline void mvs_free_reg_set(struct mvs_info *mvi,
259 mv_printk("device has been free.\n"); 259 mv_printk("device has been free.\n");
260 return; 260 return;
261 } 261 }
262 if (dev->runing_req != 0)
263 return;
264 if (dev->taskfileset == MVS_ID_NOT_MAPPED) 262 if (dev->taskfileset == MVS_ID_NOT_MAPPED)
265 return; 263 return;
266 MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset); 264 MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
@@ -762,8 +760,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
762 } 760 }
763 if (is_tmf) 761 if (is_tmf)
764 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT); 762 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
765 else
766 flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
767 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT)); 763 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
768 hdr->tags = cpu_to_le32(tag); 764 hdr->tags = cpu_to_le32(tag);
769 hdr->data_len = cpu_to_le32(task->total_xfer_len); 765 hdr->data_len = cpu_to_le32(task->total_xfer_len);
@@ -878,14 +874,15 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
878 struct mvs_slot_info *slot; 874 struct mvs_slot_info *slot;
879 u32 tag = 0xdeadbeef, rc, n_elem = 0; 875 u32 tag = 0xdeadbeef, rc, n_elem = 0;
880 u32 n = num, pass = 0; 876 u32 n = num, pass = 0;
881 unsigned long flags = 0; 877 unsigned long flags = 0, flags_libsas = 0;
882 878
883 if (!dev->port) { 879 if (!dev->port) {
884 struct task_status_struct *tsm = &t->task_status; 880 struct task_status_struct *tsm = &t->task_status;
885 881
886 tsm->resp = SAS_TASK_UNDELIVERED; 882 tsm->resp = SAS_TASK_UNDELIVERED;
887 tsm->stat = SAS_PHY_DOWN; 883 tsm->stat = SAS_PHY_DOWN;
888 t->task_done(t); 884 if (dev->dev_type != SATA_DEV)
885 t->task_done(t);
889 return 0; 886 return 0;
890 } 887 }
891 888
@@ -910,12 +907,25 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
910 else 907 else
911 tei.port = &mvi->port[dev->port->id]; 908 tei.port = &mvi->port[dev->port->id];
912 909
913 if (!tei.port->port_attached) { 910 if (tei.port && !tei.port->port_attached) {
914 if (sas_protocol_ata(t->task_proto)) { 911 if (sas_protocol_ata(t->task_proto)) {
912 struct task_status_struct *ts = &t->task_status;
913
915 mv_dprintk("port %d does not" 914 mv_dprintk("port %d does not"
916 "attached device.\n", dev->port->id); 915 "attached device.\n", dev->port->id);
917 rc = SAS_PHY_DOWN; 916 ts->stat = SAS_PROTO_RESPONSE;
918 goto out_done; 917 ts->stat = SAS_PHY_DOWN;
918 spin_unlock_irqrestore(dev->sata_dev.ap->lock,
919 flags_libsas);
920 spin_unlock_irqrestore(&mvi->lock, flags);
921 t->task_done(t);
922 spin_lock_irqsave(&mvi->lock, flags);
923 spin_lock_irqsave(dev->sata_dev.ap->lock,
924 flags_libsas);
925 if (n > 1)
926 t = list_entry(t->list.next,
927 struct sas_task, list);
928 continue;
919 } else { 929 } else {
920 struct task_status_struct *ts = &t->task_status; 930 struct task_status_struct *ts = &t->task_status;
921 ts->resp = SAS_TASK_UNDELIVERED; 931 ts->resp = SAS_TASK_UNDELIVERED;
@@ -973,8 +983,8 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
973 break; 983 break;
974 default: 984 default:
975 dev_printk(KERN_ERR, mvi->dev, 985 dev_printk(KERN_ERR, mvi->dev,
976 "unknown sas_task proto: 0x%x\n", 986 "unknown sas_task proto: 0x%x\n",
977 t->task_proto); 987 t->task_proto);
978 rc = -EINVAL; 988 rc = -EINVAL;
979 break; 989 break;
980 } 990 }
@@ -993,11 +1003,15 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
993 spin_unlock(&t->task_state_lock); 1003 spin_unlock(&t->task_state_lock);
994 1004
995 mvs_hba_memory_dump(mvi, tag, t->task_proto); 1005 mvs_hba_memory_dump(mvi, tag, t->task_proto);
996 mvi_dev->runing_req++; 1006 mvi_dev->running_req++;
997 ++pass; 1007 ++pass;
998 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); 1008 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
999 if (n > 1) 1009 if (n > 1)
1000 t = list_entry(t->list.next, struct sas_task, list); 1010 t = list_entry(t->list.next, struct sas_task, list);
1011 if (likely(pass))
1012 MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
1013 (MVS_CHIP_SLOT_SZ - 1));
1014
1001 } while (--n); 1015 } while (--n);
1002 rc = 0; 1016 rc = 0;
1003 goto out_done; 1017 goto out_done;
@@ -1012,10 +1026,6 @@ err_out:
1012 dma_unmap_sg(mvi->dev, t->scatter, n_elem, 1026 dma_unmap_sg(mvi->dev, t->scatter, n_elem,
1013 t->data_dir); 1027 t->data_dir);
1014out_done: 1028out_done:
1015 if (likely(pass)) {
1016 MVS_CHIP_DISP->start_delivery(mvi,
1017 (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1018 }
1019 spin_unlock_irqrestore(&mvi->lock, flags); 1029 spin_unlock_irqrestore(&mvi->lock, flags);
1020 return rc; 1030 return rc;
1021} 1031}
@@ -1187,7 +1197,7 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1187 MVS_CHIP_DISP->phy_reset(mvi, i, 0); 1197 MVS_CHIP_DISP->phy_reset(mvi, i, 0);
1188 goto out_done; 1198 goto out_done;
1189 } 1199 }
1190 } else if (phy->phy_type & PORT_TYPE_SAS 1200 } else if (phy->phy_type & PORT_TYPE_SAS
1191 || phy->att_dev_info & PORT_SSP_INIT_MASK) { 1201 || phy->att_dev_info & PORT_SSP_INIT_MASK) {
1192 phy->phy_attached = 1; 1202 phy->phy_attached = 1;
1193 phy->identify.device_type = 1203 phy->identify.device_type =
@@ -1256,7 +1266,20 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1256 1266
1257static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock) 1267static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
1258{ 1268{
1259 /*Nothing*/ 1269 struct domain_device *dev;
1270 struct mvs_phy *phy = sas_phy->lldd_phy;
1271 struct mvs_info *mvi = phy->mvi;
1272 struct asd_sas_port *port = sas_phy->port;
1273 int phy_no = 0;
1274
1275 while (phy != &mvi->phy[phy_no]) {
1276 phy_no++;
1277 if (phy_no >= MVS_MAX_PHYS)
1278 return;
1279 }
1280 list_for_each_entry(dev, &port->dev_list, dev_list_node)
1281 mvs_do_release_task(phy->mvi, phy_no, NULL);
1282
1260} 1283}
1261 1284
1262 1285
@@ -1316,6 +1339,7 @@ int mvs_dev_found_notify(struct domain_device *dev, int lock)
1316 goto found_out; 1339 goto found_out;
1317 } 1340 }
1318 dev->lldd_dev = mvi_device; 1341 dev->lldd_dev = mvi_device;
1342 mvi_device->dev_status = MVS_DEV_NORMAL;
1319 mvi_device->dev_type = dev->dev_type; 1343 mvi_device->dev_type = dev->dev_type;
1320 mvi_device->mvi_info = mvi; 1344 mvi_device->mvi_info = mvi;
1321 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { 1345 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
@@ -1351,18 +1375,18 @@ int mvs_dev_found(struct domain_device *dev)
1351 return mvs_dev_found_notify(dev, 1); 1375 return mvs_dev_found_notify(dev, 1);
1352} 1376}
1353 1377
1354void mvs_dev_gone_notify(struct domain_device *dev, int lock) 1378void mvs_dev_gone_notify(struct domain_device *dev)
1355{ 1379{
1356 unsigned long flags = 0; 1380 unsigned long flags = 0;
1357 struct mvs_device *mvi_dev = dev->lldd_dev; 1381 struct mvs_device *mvi_dev = dev->lldd_dev;
1358 struct mvs_info *mvi = mvi_dev->mvi_info; 1382 struct mvs_info *mvi = mvi_dev->mvi_info;
1359 1383
1360 if (lock) 1384 spin_lock_irqsave(&mvi->lock, flags);
1361 spin_lock_irqsave(&mvi->lock, flags);
1362 1385
1363 if (mvi_dev) { 1386 if (mvi_dev) {
1364 mv_dprintk("found dev[%d:%x] is gone.\n", 1387 mv_dprintk("found dev[%d:%x] is gone.\n",
1365 mvi_dev->device_id, mvi_dev->dev_type); 1388 mvi_dev->device_id, mvi_dev->dev_type);
1389 mvs_release_task(mvi, dev);
1366 mvs_free_reg_set(mvi, mvi_dev); 1390 mvs_free_reg_set(mvi, mvi_dev);
1367 mvs_free_dev(mvi_dev); 1391 mvs_free_dev(mvi_dev);
1368 } else { 1392 } else {
@@ -1370,14 +1394,13 @@ void mvs_dev_gone_notify(struct domain_device *dev, int lock)
1370 } 1394 }
1371 dev->lldd_dev = NULL; 1395 dev->lldd_dev = NULL;
1372 1396
1373 if (lock) 1397 spin_unlock_irqrestore(&mvi->lock, flags);
1374 spin_unlock_irqrestore(&mvi->lock, flags);
1375} 1398}
1376 1399
1377 1400
1378void mvs_dev_gone(struct domain_device *dev) 1401void mvs_dev_gone(struct domain_device *dev)
1379{ 1402{
1380 mvs_dev_gone_notify(dev, 1); 1403 mvs_dev_gone_notify(dev);
1381} 1404}
1382 1405
1383static struct sas_task *mvs_alloc_task(void) 1406static struct sas_task *mvs_alloc_task(void)
@@ -1540,7 +1563,7 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1540 num = mvs_find_dev_phyno(dev, phyno); 1563 num = mvs_find_dev_phyno(dev, phyno);
1541 spin_lock_irqsave(&mvi->lock, flags); 1564 spin_lock_irqsave(&mvi->lock, flags);
1542 for (i = 0; i < num; i++) 1565 for (i = 0; i < num; i++)
1543 mvs_release_task(mvi, phyno[i], dev); 1566 mvs_release_task(mvi, dev);
1544 spin_unlock_irqrestore(&mvi->lock, flags); 1567 spin_unlock_irqrestore(&mvi->lock, flags);
1545 } 1568 }
1546 /* If failed, fall-through I_T_Nexus reset */ 1569 /* If failed, fall-through I_T_Nexus reset */
@@ -1552,8 +1575,8 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1552int mvs_I_T_nexus_reset(struct domain_device *dev) 1575int mvs_I_T_nexus_reset(struct domain_device *dev)
1553{ 1576{
1554 unsigned long flags; 1577 unsigned long flags;
1555 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; 1578 int rc = TMF_RESP_FUNC_FAILED;
1556 struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev; 1579 struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
1557 struct mvs_info *mvi = mvi_dev->mvi_info; 1580 struct mvs_info *mvi = mvi_dev->mvi_info;
1558 1581
1559 if (mvi_dev->dev_status != MVS_DEV_EH) 1582 if (mvi_dev->dev_status != MVS_DEV_EH)
@@ -1563,10 +1586,8 @@ int mvs_I_T_nexus_reset(struct domain_device *dev)
1563 __func__, mvi_dev->device_id, rc); 1586 __func__, mvi_dev->device_id, rc);
1564 1587
1565 /* housekeeper */ 1588 /* housekeeper */
1566 num = mvs_find_dev_phyno(dev, phyno);
1567 spin_lock_irqsave(&mvi->lock, flags); 1589 spin_lock_irqsave(&mvi->lock, flags);
1568 for (i = 0; i < num; i++) 1590 mvs_release_task(mvi, dev);
1569 mvs_release_task(mvi, phyno[i], dev);
1570 spin_unlock_irqrestore(&mvi->lock, flags); 1591 spin_unlock_irqrestore(&mvi->lock, flags);
1571 1592
1572 return rc; 1593 return rc;
@@ -1603,6 +1624,9 @@ int mvs_query_task(struct sas_task *task)
1603 case TMF_RESP_FUNC_FAILED: 1624 case TMF_RESP_FUNC_FAILED:
1604 case TMF_RESP_FUNC_COMPLETE: 1625 case TMF_RESP_FUNC_COMPLETE:
1605 break; 1626 break;
1627 default:
1628 rc = TMF_RESP_FUNC_COMPLETE;
1629 break;
1606 } 1630 }
1607 } 1631 }
1608 mv_printk("%s:rc= %d\n", __func__, rc); 1632 mv_printk("%s:rc= %d\n", __func__, rc);
@@ -1621,8 +1645,11 @@ int mvs_abort_task(struct sas_task *task)
1621 unsigned long flags; 1645 unsigned long flags;
1622 u32 tag; 1646 u32 tag;
1623 1647
1624 if (mvi->exp_req) 1648 if (!mvi_dev) {
1625 mvi->exp_req--; 1649 mv_printk("%s:%d TMF_RESP_FUNC_FAILED\n", __func__, __LINE__);
1650 rc = TMF_RESP_FUNC_FAILED;
1651 }
1652
1626 spin_lock_irqsave(&task->task_state_lock, flags); 1653 spin_lock_irqsave(&task->task_state_lock, flags);
1627 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1654 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1628 spin_unlock_irqrestore(&task->task_state_lock, flags); 1655 spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -1630,6 +1657,7 @@ int mvs_abort_task(struct sas_task *task)
1630 goto out; 1657 goto out;
1631 } 1658 }
1632 spin_unlock_irqrestore(&task->task_state_lock, flags); 1659 spin_unlock_irqrestore(&task->task_state_lock, flags);
1660 mvi_dev->dev_status = MVS_DEV_EH;
1633 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1661 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1634 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; 1662 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1635 1663
@@ -1654,12 +1682,31 @@ int mvs_abort_task(struct sas_task *task)
1654 if (task->lldd_task) { 1682 if (task->lldd_task) {
1655 slot = task->lldd_task; 1683 slot = task->lldd_task;
1656 slot_no = (u32) (slot - mvi->slot_info); 1684 slot_no = (u32) (slot - mvi->slot_info);
1685 spin_lock_irqsave(&mvi->lock, flags);
1657 mvs_slot_complete(mvi, slot_no, 1); 1686 mvs_slot_complete(mvi, slot_no, 1);
1687 spin_unlock_irqrestore(&mvi->lock, flags);
1658 } 1688 }
1659 } 1689 }
1690
1660 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1691 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1661 task->task_proto & SAS_PROTOCOL_STP) { 1692 task->task_proto & SAS_PROTOCOL_STP) {
1662 /* to do free register_set */ 1693 /* to do free register_set */
1694 if (SATA_DEV == dev->dev_type) {
1695 struct mvs_slot_info *slot = task->lldd_task;
1696 struct task_status_struct *tstat;
1697 u32 slot_idx = (u32)(slot - mvi->slot_info);
1698 tstat = &task->task_status;
1699 mv_dprintk(KERN_DEBUG "mv_abort_task() mvi=%p task=%p "
1700 "slot=%p slot_idx=x%x\n",
1701 mvi, task, slot, slot_idx);
1702 tstat->stat = SAS_ABORTED_TASK;
1703 if (mvi_dev && mvi_dev->running_req)
1704 mvi_dev->running_req--;
1705 if (sas_protocol_ata(task->task_proto))
1706 mvs_free_reg_set(mvi, mvi_dev);
1707 mvs_slot_task_free(mvi, task, slot, slot_idx);
1708 return -1;
1709 }
1663 } else { 1710 } else {
1664 /* SMP */ 1711 /* SMP */
1665 1712
@@ -1717,8 +1764,13 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1717 SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset), 1764 SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
1718 sizeof(struct dev_to_host_fis)); 1765 sizeof(struct dev_to_host_fis));
1719 tstat->buf_valid_size = sizeof(*resp); 1766 tstat->buf_valid_size = sizeof(*resp);
1720 if (unlikely(err)) 1767 if (unlikely(err)) {
1721 stat = SAS_PROTO_RESPONSE; 1768 if (unlikely(err & CMD_ISS_STPD))
1769 stat = SAS_OPEN_REJECT;
1770 else
1771 stat = SAS_PROTO_RESPONSE;
1772 }
1773
1722 return stat; 1774 return stat;
1723} 1775}
1724 1776
@@ -1753,9 +1805,7 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1753 mv_printk("find reserved error, why?\n"); 1805 mv_printk("find reserved error, why?\n");
1754 1806
1755 task->ata_task.use_ncq = 0; 1807 task->ata_task.use_ncq = 0;
1756 stat = SAS_PROTO_RESPONSE; 1808 mvs_sata_done(mvi, task, slot_idx, err_dw0);
1757 mvs_sata_done(mvi, task, slot_idx, 1);
1758
1759 } 1809 }
1760 break; 1810 break;
1761 default: 1811 default:
@@ -1772,18 +1822,20 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1772 struct sas_task *task = slot->task; 1822 struct sas_task *task = slot->task;
1773 struct mvs_device *mvi_dev = NULL; 1823 struct mvs_device *mvi_dev = NULL;
1774 struct task_status_struct *tstat; 1824 struct task_status_struct *tstat;
1825 struct domain_device *dev;
1826 u32 aborted;
1775 1827
1776 bool aborted;
1777 void *to; 1828 void *to;
1778 enum exec_status sts; 1829 enum exec_status sts;
1779 1830
1780 if (mvi->exp_req) 1831 if (mvi->exp_req)
1781 mvi->exp_req--; 1832 mvi->exp_req--;
1782 if (unlikely(!task || !task->lldd_task)) 1833 if (unlikely(!task || !task->lldd_task || !task->dev))
1783 return -1; 1834 return -1;
1784 1835
1785 tstat = &task->task_status; 1836 tstat = &task->task_status;
1786 mvi_dev = task->dev->lldd_dev; 1837 dev = task->dev;
1838 mvi_dev = dev->lldd_dev;
1787 1839
1788 mvs_hba_cq_dump(mvi); 1840 mvs_hba_cq_dump(mvi);
1789 1841
@@ -1800,8 +1852,8 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1800 1852
1801 if (unlikely(aborted)) { 1853 if (unlikely(aborted)) {
1802 tstat->stat = SAS_ABORTED_TASK; 1854 tstat->stat = SAS_ABORTED_TASK;
1803 if (mvi_dev) 1855 if (mvi_dev && mvi_dev->running_req)
1804 mvi_dev->runing_req--; 1856 mvi_dev->running_req--;
1805 if (sas_protocol_ata(task->task_proto)) 1857 if (sas_protocol_ata(task->task_proto))
1806 mvs_free_reg_set(mvi, mvi_dev); 1858 mvs_free_reg_set(mvi, mvi_dev);
1807 1859
@@ -1809,24 +1861,17 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1809 return -1; 1861 return -1;
1810 } 1862 }
1811 1863
1812 if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) { 1864 if (unlikely(!mvi_dev || flags)) {
1813 mv_dprintk("port has not device.\n"); 1865 if (!mvi_dev)
1866 mv_dprintk("port has not device.\n");
1814 tstat->stat = SAS_PHY_DOWN; 1867 tstat->stat = SAS_PHY_DOWN;
1815 goto out; 1868 goto out;
1816 } 1869 }
1817 1870
1818 /*
1819 if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) {
1820 mv_dprintk("Find device[%016llx] RXQ_ERR %X,
1821 err info:%016llx\n",
1822 SAS_ADDR(task->dev->sas_addr),
1823 rx_desc, (u64)(*(u64 *) slot->response));
1824 }
1825 */
1826
1827 /* error info record present */ 1871 /* error info record present */
1828 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { 1872 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1829 tstat->stat = mvs_slot_err(mvi, task, slot_idx); 1873 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1874 tstat->resp = SAS_TASK_COMPLETE;
1830 goto out; 1875 goto out;
1831 } 1876 }
1832 1877
@@ -1868,11 +1913,16 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1868 tstat->stat = SAM_CHECK_COND; 1913 tstat->stat = SAM_CHECK_COND;
1869 break; 1914 break;
1870 } 1915 }
1916 if (!slot->port->port_attached) {
1917 mv_dprintk("port %d has removed.\n", slot->port->sas_port.id);
1918 tstat->stat = SAS_PHY_DOWN;
1919 }
1920
1871 1921
1872out: 1922out:
1873 if (mvi_dev) { 1923 if (mvi_dev && mvi_dev->running_req) {
1874 mvi_dev->runing_req--; 1924 mvi_dev->running_req--;
1875 if (sas_protocol_ata(task->task_proto)) 1925 if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req)
1876 mvs_free_reg_set(mvi, mvi_dev); 1926 mvs_free_reg_set(mvi, mvi_dev);
1877 } 1927 }
1878 mvs_slot_task_free(mvi, task, slot, slot_idx); 1928 mvs_slot_task_free(mvi, task, slot, slot_idx);
@@ -1888,10 +1938,10 @@ out:
1888 return sts; 1938 return sts;
1889} 1939}
1890 1940
1891void mvs_release_task(struct mvs_info *mvi, 1941void mvs_do_release_task(struct mvs_info *mvi,
1892 int phy_no, struct domain_device *dev) 1942 int phy_no, struct domain_device *dev)
1893{ 1943{
1894 int i = 0; u32 slot_idx; 1944 u32 slot_idx;
1895 struct mvs_phy *phy; 1945 struct mvs_phy *phy;
1896 struct mvs_port *port; 1946 struct mvs_port *port;
1897 struct mvs_slot_info *slot, *slot2; 1947 struct mvs_slot_info *slot, *slot2;
@@ -1900,6 +1950,10 @@ void mvs_release_task(struct mvs_info *mvi,
1900 port = phy->port; 1950 port = phy->port;
1901 if (!port) 1951 if (!port)
1902 return; 1952 return;
1953 /* clean cmpl queue in case request is already finished */
1954 mvs_int_rx(mvi, false);
1955
1956
1903 1957
1904 list_for_each_entry_safe(slot, slot2, &port->list, entry) { 1958 list_for_each_entry_safe(slot, slot2, &port->list, entry) {
1905 struct sas_task *task; 1959 struct sas_task *task;
@@ -1911,18 +1965,22 @@ void mvs_release_task(struct mvs_info *mvi,
1911 1965
1912 mv_printk("Release slot [%x] tag[%x], task [%p]:\n", 1966 mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
1913 slot_idx, slot->slot_tag, task); 1967 slot_idx, slot->slot_tag, task);
1914 1968 MVS_CHIP_DISP->command_active(mvi, slot_idx);
1915 if (task->task_proto & SAS_PROTOCOL_SSP) {
1916 mv_printk("attached with SSP task CDB[");
1917 for (i = 0; i < 16; i++)
1918 mv_printk(" %02x", task->ssp_task.cdb[i]);
1919 mv_printk(" ]\n");
1920 }
1921 1969
1922 mvs_slot_complete(mvi, slot_idx, 1); 1970 mvs_slot_complete(mvi, slot_idx, 1);
1923 } 1971 }
1924} 1972}
1925 1973
1974void mvs_release_task(struct mvs_info *mvi,
1975 struct domain_device *dev)
1976{
1977 int i, phyno[WIDE_PORT_MAX_PHY], num;
1978 /* housekeeper */
1979 num = mvs_find_dev_phyno(dev, phyno);
1980 for (i = 0; i < num; i++)
1981 mvs_do_release_task(mvi, phyno[i], dev);
1982}
1983
1926static void mvs_phy_disconnected(struct mvs_phy *phy) 1984static void mvs_phy_disconnected(struct mvs_phy *phy)
1927{ 1985{
1928 phy->phy_attached = 0; 1986 phy->phy_attached = 0;
@@ -2029,16 +2087,18 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2029 * we need check the interrupt status which belongs to per port. 2087 * we need check the interrupt status which belongs to per port.
2030 */ 2088 */
2031 2089
2032 if (phy->irq_status & PHYEV_DCDR_ERR) 2090 if (phy->irq_status & PHYEV_DCDR_ERR) {
2033 mv_dprintk("port %d STP decoding error.\n", 2091 mv_dprintk("port %d STP decoding error.\n",
2034 phy_no+mvi->id*mvi->chip->n_phy); 2092 phy_no + mvi->id*mvi->chip->n_phy);
2093 }
2035 2094
2036 if (phy->irq_status & PHYEV_POOF) { 2095 if (phy->irq_status & PHYEV_POOF) {
2037 if (!(phy->phy_event & PHY_PLUG_OUT)) { 2096 if (!(phy->phy_event & PHY_PLUG_OUT)) {
2038 int dev_sata = phy->phy_type & PORT_TYPE_SATA; 2097 int dev_sata = phy->phy_type & PORT_TYPE_SATA;
2039 int ready; 2098 int ready;
2040 mvs_release_task(mvi, phy_no, NULL); 2099 mvs_do_release_task(mvi, phy_no, NULL);
2041 phy->phy_event |= PHY_PLUG_OUT; 2100 phy->phy_event |= PHY_PLUG_OUT;
2101 MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1);
2042 mvs_handle_event(mvi, 2102 mvs_handle_event(mvi,
2043 (void *)(unsigned long)phy_no, 2103 (void *)(unsigned long)phy_no,
2044 PHY_PLUG_EVENT); 2104 PHY_PLUG_EVENT);
@@ -2085,6 +2145,11 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2085 phy_no, tmp); 2145 phy_no, tmp);
2086 } 2146 }
2087 mvs_update_phyinfo(mvi, phy_no, 0); 2147 mvs_update_phyinfo(mvi, phy_no, 0);
2148 if (phy->phy_type & PORT_TYPE_SAS) {
2149 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 2);
2150 mdelay(10);
2151 }
2152
2088 mvs_bytes_dmaed(mvi, phy_no); 2153 mvs_bytes_dmaed(mvi, phy_no);
2089 /* whether driver is going to handle hot plug */ 2154 /* whether driver is going to handle hot plug */
2090 if (phy->phy_event & PHY_PLUG_OUT) { 2155 if (phy->phy_event & PHY_PLUG_OUT) {
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 885858bcc403..77ddc7c1e5f2 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -39,6 +39,7 @@
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/vmalloc.h> 40#include <linux/vmalloc.h>
41#include <scsi/libsas.h> 41#include <scsi/libsas.h>
42#include <scsi/scsi.h>
42#include <scsi/scsi_tcq.h> 43#include <scsi/scsi_tcq.h>
43#include <scsi/sas_ata.h> 44#include <scsi/sas_ata.h>
44#include <linux/version.h> 45#include <linux/version.h>
@@ -49,7 +50,7 @@
49#define _MV_DUMP 0 50#define _MV_DUMP 0
50#define MVS_ID_NOT_MAPPED 0x7f 51#define MVS_ID_NOT_MAPPED 0x7f
51/* #define DISABLE_HOTPLUG_DMA_FIX */ 52/* #define DISABLE_HOTPLUG_DMA_FIX */
52#define MAX_EXP_RUNNING_REQ 2 53// #define MAX_EXP_RUNNING_REQ 2
53#define WIDE_PORT_MAX_PHY 4 54#define WIDE_PORT_MAX_PHY 4
54#define MV_DISABLE_NCQ 0 55#define MV_DISABLE_NCQ 0
55#define mv_printk(fmt, arg ...) \ 56#define mv_printk(fmt, arg ...) \
@@ -129,6 +130,7 @@ struct mvs_dispatch {
129 130
130 void (*get_sas_addr)(void *buf, u32 buflen); 131 void (*get_sas_addr)(void *buf, u32 buflen);
131 void (*command_active)(struct mvs_info *mvi, u32 slot_idx); 132 void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
133 void (*clear_srs_irq)(struct mvs_info *mvi, u8 reg_set, u8 clear_all);
132 void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type, 134 void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
133 u32 tfs); 135 u32 tfs);
134 void (*start_delivery)(struct mvs_info *mvi, u32 tx); 136 void (*start_delivery)(struct mvs_info *mvi, u32 tx);
@@ -236,9 +238,10 @@ struct mvs_device {
236 enum sas_dev_type dev_type; 238 enum sas_dev_type dev_type;
237 struct mvs_info *mvi_info; 239 struct mvs_info *mvi_info;
238 struct domain_device *sas_device; 240 struct domain_device *sas_device;
241 struct timer_list timer;
239 u32 attached_phy; 242 u32 attached_phy;
240 u32 device_id; 243 u32 device_id;
241 u32 runing_req; 244 u32 running_req;
242 u8 taskfileset; 245 u8 taskfileset;
243 u8 dev_status; 246 u8 dev_status;
244 u16 reserved; 247 u16 reserved;
@@ -397,7 +400,9 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun);
397int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags); 400int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags);
398int mvs_I_T_nexus_reset(struct domain_device *dev); 401int mvs_I_T_nexus_reset(struct domain_device *dev);
399int mvs_query_task(struct sas_task *task); 402int mvs_query_task(struct sas_task *task);
400void mvs_release_task(struct mvs_info *mvi, int phy_no, 403void mvs_release_task(struct mvs_info *mvi,
404 struct domain_device *dev);
405void mvs_do_release_task(struct mvs_info *mvi, int phy_no,
401 struct domain_device *dev); 406 struct domain_device *dev);
402void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events); 407void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
403void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); 408void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 909c00ec044f..5ff8261c5d67 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4390,7 +4390,6 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
4390 return -ENOMEM; 4390 return -ENOMEM;
4391 } 4391 }
4392 } 4392 }
4393 memset(buffer, 0, fw_control->len);
4394 memcpy(buffer, fw_control->buffer, fw_control->len); 4393 memcpy(buffer, fw_control->buffer, fw_control->len);
4395 flash_update_info.sgl.addr = cpu_to_le64(phys_addr); 4394 flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
4396 flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); 4395 flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index bff4f5139b9c..cd02ceaf67ff 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -885,11 +885,13 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
885 u32 tag; 885 u32 tag;
886 struct pm8001_hba_info *pm8001_ha; 886 struct pm8001_hba_info *pm8001_ha;
887 struct pm8001_device *pm8001_dev = dev->lldd_dev; 887 struct pm8001_device *pm8001_dev = dev->lldd_dev;
888 u32 device_id = pm8001_dev->device_id; 888
889 pm8001_ha = pm8001_find_ha_by_dev(dev); 889 pm8001_ha = pm8001_find_ha_by_dev(dev);
890 spin_lock_irqsave(&pm8001_ha->lock, flags); 890 spin_lock_irqsave(&pm8001_ha->lock, flags);
891 pm8001_tag_alloc(pm8001_ha, &tag); 891 pm8001_tag_alloc(pm8001_ha, &tag);
892 if (pm8001_dev) { 892 if (pm8001_dev) {
893 u32 device_id = pm8001_dev->device_id;
894
893 PM8001_DISC_DBG(pm8001_ha, 895 PM8001_DISC_DBG(pm8001_ha,
894 pm8001_printk("found dev[%d:%x] is gone.\n", 896 pm8001_printk("found dev[%d:%x] is gone.\n",
895 pm8001_dev->device_id, pm8001_dev->dev_type)); 897 pm8001_dev->device_id, pm8001_dev->dev_type));
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 53aefffbaead..c44e4ab4e938 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3751,12 +3751,6 @@ static int pmcraid_check_ioctl_buffer(
3751 return -EINVAL; 3751 return -EINVAL;
3752 } 3752 }
3753 3753
3754 /* buffer length can't be negetive */
3755 if (hdr->buffer_length < 0) {
3756 pmcraid_err("ioctl: invalid buffer length specified\n");
3757 return -EINVAL;
3758 }
3759
3760 /* check for appropriate buffer access */ 3754 /* check for appropriate buffer access */
3761 if ((_IOC_DIR(cmd) & _IOC_READ) == _IOC_READ) 3755 if ((_IOC_DIR(cmd) & _IOC_READ) == _IOC_READ)
3762 access = VERIFY_WRITE; 3756 access = VERIFY_WRITE;
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index c51fd1f86639..5df782f4a097 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,4 +1,5 @@
1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ 1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o 2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
3 qla_nx.o
3 4
4obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o 5obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 1c7ef55966fb..3b708606b932 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -12,9 +12,7 @@
12#include <linux/delay.h> 12#include <linux/delay.h>
13 13
14static int qla24xx_vport_disable(struct fc_vport *, bool); 14static int qla24xx_vport_disable(struct fc_vport *, bool);
15static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *); 15
16int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
17static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
18/* SYSFS attributes --------------------------------------------------------- */ 16/* SYSFS attributes --------------------------------------------------------- */
19 17
20static ssize_t 18static ssize_t
@@ -43,6 +41,12 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
43 struct qla_hw_data *ha = vha->hw; 41 struct qla_hw_data *ha = vha->hw;
44 int reading; 42 int reading;
45 43
44 if (IS_QLA82XX(ha)) {
45 DEBUG2(qla_printk(KERN_INFO, ha,
46 "Firmware dump not supported for ISP82xx\n"));
47 return count;
48 }
49
46 if (off != 0) 50 if (off != 0)
47 return (0); 51 return (0);
48 52
@@ -277,6 +281,12 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
277 return count; 281 return count;
278 } 282 }
279 283
284 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
285 qla_printk(KERN_WARNING, ha,
286 "HBA not online, failing NVRAM update.\n");
287 return -EAGAIN;
288 }
289
280 DEBUG2(qla_printk(KERN_INFO, ha, 290 DEBUG2(qla_printk(KERN_INFO, ha,
281 "Reading flash region -- 0x%x/0x%x.\n", 291 "Reading flash region -- 0x%x/0x%x.\n",
282 ha->optrom_region_start, ha->optrom_region_size)); 292 ha->optrom_region_start, ha->optrom_region_size));
@@ -315,8 +325,8 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
315 else if (start == (ha->flt_region_boot * 4) || 325 else if (start == (ha->flt_region_boot * 4) ||
316 start == (ha->flt_region_fw * 4)) 326 start == (ha->flt_region_fw * 4))
317 valid = 1; 327 valid = 1;
318 else if (IS_QLA25XX(ha) || IS_QLA81XX(ha)) 328 else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
319 valid = 1; 329 valid = 1;
320 if (!valid) { 330 if (!valid) {
321 qla_printk(KERN_WARNING, ha, 331 qla_printk(KERN_WARNING, ha,
322 "Invalid start region 0x%x/0x%x.\n", start, size); 332 "Invalid start region 0x%x/0x%x.\n", start, size);
@@ -519,6 +529,7 @@ qla2x00_sysfs_write_reset(struct kobject *kobj,
519 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 529 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
520 struct device, kobj))); 530 struct device, kobj)));
521 struct qla_hw_data *ha = vha->hw; 531 struct qla_hw_data *ha = vha->hw;
532 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
522 int type; 533 int type;
523 534
524 if (off != 0) 535 if (off != 0)
@@ -553,6 +564,20 @@ qla2x00_sysfs_write_reset(struct kobject *kobj,
553 "MPI reset failed on (%ld).\n", vha->host_no); 564 "MPI reset failed on (%ld).\n", vha->host_no);
554 scsi_unblock_requests(vha->host); 565 scsi_unblock_requests(vha->host);
555 break; 566 break;
567 case 0x2025e:
568 if (!IS_QLA82XX(ha) || vha != base_vha) {
569 qla_printk(KERN_INFO, ha,
570 "FCoE ctx reset not supported for host%ld.\n",
571 vha->host_no);
572 return count;
573 }
574
575 qla_printk(KERN_INFO, ha,
576 "Issuing FCoE CTX reset on host%ld.\n", vha->host_no);
577 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
578 qla2xxx_wake_dpc(vha);
579 qla2x00_wait_for_fcoe_ctx_reset(vha);
580 break;
556 } 581 }
557 return count; 582 return count;
558} 583}
@@ -838,7 +863,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
838 continue; 863 continue;
839 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) 864 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
840 continue; 865 continue;
841 if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw)) 866 if (iter->is4GBp_only == 3 && !(IS_QLA8XXX_TYPE(vha->hw)))
842 continue; 867 continue;
843 868
844 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 869 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@@ -862,7 +887,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
862 continue; 887 continue;
863 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) 888 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
864 continue; 889 continue;
865 if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha)) 890 if (iter->is4GBp_only == 3 && !!(IS_QLA8XXX_TYPE(vha->hw)))
866 continue; 891 continue;
867 892
868 sysfs_remove_bin_file(&host->shost_gendev.kobj, 893 sysfs_remove_bin_file(&host->shost_gendev.kobj,
@@ -968,7 +993,8 @@ qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
968 int len = 0; 993 int len = 0;
969 994
970 if (atomic_read(&vha->loop_state) == LOOP_DOWN || 995 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
971 atomic_read(&vha->loop_state) == LOOP_DEAD) 996 atomic_read(&vha->loop_state) == LOOP_DEAD ||
997 vha->device_flags & DFLG_NO_CABLE)
972 len = snprintf(buf, PAGE_SIZE, "Link Down\n"); 998 len = snprintf(buf, PAGE_SIZE, "Link Down\n");
973 else if (atomic_read(&vha->loop_state) != LOOP_READY || 999 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
974 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1000 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
@@ -1179,15 +1205,15 @@ qla24xx_84xx_fw_version_show(struct device *dev,
1179 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1205 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1180 struct qla_hw_data *ha = vha->hw; 1206 struct qla_hw_data *ha = vha->hw;
1181 1207
1182 if (IS_QLA84XX(ha) && ha->cs84xx) { 1208 if (!IS_QLA84XX(ha))
1183 if (ha->cs84xx->op_fw_version == 0) { 1209 return snprintf(buf, PAGE_SIZE, "\n");
1184 rval = qla84xx_verify_chip(vha, status); 1210
1185 } 1211 if (ha->cs84xx && ha->cs84xx->op_fw_version == 0)
1212 rval = qla84xx_verify_chip(vha, status);
1186 1213
1187 if ((rval == QLA_SUCCESS) && (status[0] == 0)) 1214 if ((rval == QLA_SUCCESS) && (status[0] == 0))
1188 return snprintf(buf, PAGE_SIZE, "%u\n", 1215 return snprintf(buf, PAGE_SIZE, "%u\n",
1189 (uint32_t)ha->cs84xx->op_fw_version); 1216 (uint32_t)ha->cs84xx->op_fw_version);
1190 }
1191 1217
1192 return snprintf(buf, PAGE_SIZE, "\n"); 1218 return snprintf(buf, PAGE_SIZE, "\n");
1193} 1219}
@@ -1237,7 +1263,7 @@ qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1237{ 1263{
1238 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1264 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1239 1265
1240 if (!IS_QLA81XX(vha->hw)) 1266 if (!IS_QLA8XXX_TYPE(vha->hw))
1241 return snprintf(buf, PAGE_SIZE, "\n"); 1267 return snprintf(buf, PAGE_SIZE, "\n");
1242 1268
1243 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); 1269 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
@@ -1249,7 +1275,7 @@ qla2x00_vn_port_mac_address_show(struct device *dev,
1249{ 1275{
1250 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1276 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1251 1277
1252 if (!IS_QLA81XX(vha->hw)) 1278 if (!IS_QLA8XXX_TYPE(vha->hw))
1253 return snprintf(buf, PAGE_SIZE, "\n"); 1279 return snprintf(buf, PAGE_SIZE, "\n");
1254 1280
1255 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n", 1281 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -1706,6 +1732,22 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1706 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1732 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1707 } 1733 }
1708 1734
1735 if (IS_QLA25XX(ha) && ql2xenabledif) {
1736 if (ha->fw_attributes & BIT_4) {
1737 vha->flags.difdix_supported = 1;
1738 DEBUG18(qla_printk(KERN_INFO, ha,
1739 "Registering for DIF/DIX type 1 and 3"
1740 " protection.\n"));
1741 scsi_host_set_prot(vha->host,
1742 SHOST_DIF_TYPE1_PROTECTION
1743 | SHOST_DIF_TYPE3_PROTECTION
1744 | SHOST_DIX_TYPE1_PROTECTION
1745 | SHOST_DIX_TYPE3_PROTECTION);
1746 scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC);
1747 } else
1748 vha->flags.difdix_supported = 0;
1749 }
1750
1709 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, 1751 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1710 &ha->pdev->dev)) { 1752 &ha->pdev->dev)) {
1711 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", 1753 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
@@ -1825,582 +1867,6 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1825 return 0; 1867 return 0;
1826} 1868}
1827 1869
1828/* BSG support for ELS/CT pass through */
1829inline srb_t *
1830qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
1831{
1832 srb_t *sp;
1833 struct qla_hw_data *ha = vha->hw;
1834 struct srb_bsg_ctx *ctx;
1835
1836 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
1837 if (!sp)
1838 goto done;
1839 ctx = kzalloc(size, GFP_KERNEL);
1840 if (!ctx) {
1841 mempool_free(sp, ha->srb_mempool);
1842 goto done;
1843 }
1844
1845 memset(sp, 0, sizeof(*sp));
1846 sp->fcport = fcport;
1847 sp->ctx = ctx;
1848done:
1849 return sp;
1850}
1851
1852static int
1853qla2x00_process_els(struct fc_bsg_job *bsg_job)
1854{
1855 struct fc_rport *rport;
1856 fc_port_t *fcport;
1857 struct Scsi_Host *host;
1858 scsi_qla_host_t *vha;
1859 struct qla_hw_data *ha;
1860 srb_t *sp;
1861 const char *type;
1862 int req_sg_cnt, rsp_sg_cnt;
1863 int rval = (DRIVER_ERROR << 16);
1864 uint16_t nextlid = 0;
1865 struct srb_bsg *els;
1866
1867 /* Multiple SG's are not supported for ELS requests */
1868 if (bsg_job->request_payload.sg_cnt > 1 ||
1869 bsg_job->reply_payload.sg_cnt > 1) {
1870 DEBUG2(printk(KERN_INFO
1871 "multiple SG's are not supported for ELS requests"
1872 " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
1873 bsg_job->request_payload.sg_cnt,
1874 bsg_job->reply_payload.sg_cnt));
1875 rval = -EPERM;
1876 goto done;
1877 }
1878
1879 /* ELS request for rport */
1880 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1881 rport = bsg_job->rport;
1882 fcport = *(fc_port_t **) rport->dd_data;
1883 host = rport_to_shost(rport);
1884 vha = shost_priv(host);
1885 ha = vha->hw;
1886 type = "FC_BSG_RPT_ELS";
1887
1888 /* make sure the rport is logged in,
1889 * if not perform fabric login
1890 */
1891 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
1892 DEBUG2(qla_printk(KERN_WARNING, ha,
1893 "failed to login port %06X for ELS passthru\n",
1894 fcport->d_id.b24));
1895 rval = -EIO;
1896 goto done;
1897 }
1898 } else {
1899 host = bsg_job->shost;
1900 vha = shost_priv(host);
1901 ha = vha->hw;
1902 type = "FC_BSG_HST_ELS_NOLOGIN";
1903
1904 /* Allocate a dummy fcport structure, since functions
1905 * preparing the IOCB and mailbox command retrieves port
1906 * specific information from fcport structure. For Host based
1907 * ELS commands there will be no fcport structure allocated
1908 */
1909 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1910 if (!fcport) {
1911 rval = -ENOMEM;
1912 goto done;
1913 }
1914
1915 /* Initialize all required fields of fcport */
1916 fcport->vha = vha;
1917 fcport->vp_idx = vha->vp_idx;
1918 fcport->d_id.b.al_pa =
1919 bsg_job->request->rqst_data.h_els.port_id[0];
1920 fcport->d_id.b.area =
1921 bsg_job->request->rqst_data.h_els.port_id[1];
1922 fcport->d_id.b.domain =
1923 bsg_job->request->rqst_data.h_els.port_id[2];
1924 fcport->loop_id =
1925 (fcport->d_id.b.al_pa == 0xFD) ?
1926 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
1927 }
1928
1929 if (!vha->flags.online) {
1930 DEBUG2(qla_printk(KERN_WARNING, ha,
1931 "host not online\n"));
1932 rval = -EIO;
1933 goto done;
1934 }
1935
1936 req_sg_cnt =
1937 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1938 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1939 if (!req_sg_cnt) {
1940 rval = -ENOMEM;
1941 goto done_free_fcport;
1942 }
1943 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1944 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1945 if (!rsp_sg_cnt) {
1946 rval = -ENOMEM;
1947 goto done_free_fcport;
1948 }
1949
1950 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1951 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
1952 {
1953 DEBUG2(printk(KERN_INFO
1954 "dma mapping resulted in different sg counts \
1955 [request_sg_cnt: %x dma_request_sg_cnt: %x\
1956 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
1957 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1958 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
1959 rval = -EAGAIN;
1960 goto done_unmap_sg;
1961 }
1962
1963 /* Alloc SRB structure */
1964 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
1965 if (!sp) {
1966 rval = -ENOMEM;
1967 goto done_unmap_sg;
1968 }
1969
1970 els = sp->ctx;
1971 els->ctx.type =
1972 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
1973 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
1974 els->bsg_job = bsg_job;
1975
1976 DEBUG2(qla_printk(KERN_INFO, ha,
1977 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
1978 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
1979 bsg_job->request->rqst_data.h_els.command_code,
1980 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
1981 fcport->d_id.b.al_pa));
1982
1983 rval = qla2x00_start_sp(sp);
1984 if (rval != QLA_SUCCESS) {
1985 kfree(sp->ctx);
1986 mempool_free(sp, ha->srb_mempool);
1987 rval = -EIO;
1988 goto done_unmap_sg;
1989 }
1990 return rval;
1991
1992done_unmap_sg:
1993 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1994 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1995 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1996 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1997 goto done_free_fcport;
1998
1999done_free_fcport:
2000 if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
2001 kfree(fcport);
2002done:
2003 return rval;
2004}
2005
2006static int
2007qla2x00_process_ct(struct fc_bsg_job *bsg_job)
2008{
2009 srb_t *sp;
2010 struct Scsi_Host *host = bsg_job->shost;
2011 scsi_qla_host_t *vha = shost_priv(host);
2012 struct qla_hw_data *ha = vha->hw;
2013 int rval = (DRIVER_ERROR << 16);
2014 int req_sg_cnt, rsp_sg_cnt;
2015 uint16_t loop_id;
2016 struct fc_port *fcport;
2017 char *type = "FC_BSG_HST_CT";
2018 struct srb_bsg *ct;
2019
2020 /* pass through is supported only for ISP 4Gb or higher */
2021 if (!IS_FWI2_CAPABLE(ha)) {
2022 DEBUG2(qla_printk(KERN_INFO, ha,
2023 "scsi(%ld):Firmware is not capable to support FC "
2024 "CT pass thru\n", vha->host_no));
2025 rval = -EPERM;
2026 goto done;
2027 }
2028
2029 req_sg_cnt =
2030 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2031 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2032 if (!req_sg_cnt) {
2033 rval = -ENOMEM;
2034 goto done;
2035 }
2036
2037 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2038 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2039 if (!rsp_sg_cnt) {
2040 rval = -ENOMEM;
2041 goto done;
2042 }
2043
2044 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2045 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2046 {
2047 DEBUG2(qla_printk(KERN_WARNING, ha,
2048 "dma mapping resulted in different sg counts \
2049 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2050 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2051 bsg_job->request_payload.sg_cnt, req_sg_cnt,
2052 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
2053 rval = -EAGAIN;
2054 goto done_unmap_sg;
2055 }
2056
2057 if (!vha->flags.online) {
2058 DEBUG2(qla_printk(KERN_WARNING, ha,
2059 "host not online\n"));
2060 rval = -EIO;
2061 goto done_unmap_sg;
2062 }
2063
2064 loop_id =
2065 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
2066 >> 24;
2067 switch (loop_id) {
2068 case 0xFC:
2069 loop_id = cpu_to_le16(NPH_SNS);
2070 break;
2071 case 0xFA:
2072 loop_id = vha->mgmt_svr_loop_id;
2073 break;
2074 default:
2075 DEBUG2(qla_printk(KERN_INFO, ha,
2076 "Unknown loop id: %x\n", loop_id));
2077 rval = -EINVAL;
2078 goto done_unmap_sg;
2079 }
2080
2081 /* Allocate a dummy fcport structure, since functions preparing the
2082 * IOCB and mailbox command retrieves port specific information
2083 * from fcport structure. For Host based ELS commands there will be
2084 * no fcport structure allocated
2085 */
2086 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2087 if (!fcport)
2088 {
2089 rval = -ENOMEM;
2090 goto done_unmap_sg;
2091 }
2092
2093 /* Initialize all required fields of fcport */
2094 fcport->vha = vha;
2095 fcport->vp_idx = vha->vp_idx;
2096 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
2097 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
2098 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
2099 fcport->loop_id = loop_id;
2100
2101 /* Alloc SRB structure */
2102 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
2103 if (!sp) {
2104 rval = -ENOMEM;
2105 goto done_free_fcport;
2106 }
2107
2108 ct = sp->ctx;
2109 ct->ctx.type = SRB_CT_CMD;
2110 ct->bsg_job = bsg_job;
2111
2112 DEBUG2(qla_printk(KERN_INFO, ha,
2113 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
2114 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
2115 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
2116 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
2117 fcport->d_id.b.al_pa));
2118
2119 rval = qla2x00_start_sp(sp);
2120 if (rval != QLA_SUCCESS) {
2121 kfree(sp->ctx);
2122 mempool_free(sp, ha->srb_mempool);
2123 rval = -EIO;
2124 goto done_free_fcport;
2125 }
2126 return rval;
2127
2128done_free_fcport:
2129 kfree(fcport);
2130done_unmap_sg:
2131 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2132 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2133 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2134 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2135done:
2136 return rval;
2137}
2138
2139static int
2140qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2141{
2142 struct Scsi_Host *host = bsg_job->shost;
2143 scsi_qla_host_t *vha = shost_priv(host);
2144 struct qla_hw_data *ha = vha->hw;
2145 int rval;
2146 uint8_t command_sent;
2147 uint32_t vendor_cmd;
2148 char *type;
2149 struct msg_echo_lb elreq;
2150 uint16_t response[MAILBOX_REGISTER_COUNT];
2151 uint8_t* fw_sts_ptr;
2152 uint8_t *req_data;
2153 dma_addr_t req_data_dma;
2154 uint32_t req_data_len;
2155 uint8_t *rsp_data;
2156 dma_addr_t rsp_data_dma;
2157 uint32_t rsp_data_len;
2158
2159 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2160 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2161 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
2162 rval = -EBUSY;
2163 goto done;
2164 }
2165
2166 if (!vha->flags.online) {
2167 DEBUG2(qla_printk(KERN_WARNING, ha,
2168 "host not online\n"));
2169 rval = -EIO;
2170 goto done;
2171 }
2172
2173 elreq.req_sg_cnt =
2174 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2175 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2176 if (!elreq.req_sg_cnt) {
2177 rval = -ENOMEM;
2178 goto done;
2179 }
2180 elreq.rsp_sg_cnt =
2181 dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2182 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2183 if (!elreq.rsp_sg_cnt) {
2184 rval = -ENOMEM;
2185 goto done;
2186 }
2187
2188 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2189 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2190 {
2191 DEBUG2(printk(KERN_INFO
2192 "dma mapping resulted in different sg counts \
2193 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2194 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2195 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
2196 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
2197 rval = -EAGAIN;
2198 goto done_unmap_sg;
2199 }
2200 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2201 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
2202 &req_data_dma, GFP_KERNEL);
2203
2204 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
2205 &rsp_data_dma, GFP_KERNEL);
2206
2207 /* Copy the request buffer in req_data now */
2208 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2209 bsg_job->request_payload.sg_cnt, req_data,
2210 req_data_len);
2211
2212 elreq.send_dma = req_data_dma;
2213 elreq.rcv_dma = rsp_data_dma;
2214 elreq.transfer_size = req_data_len;
2215
2216 /* Vendor cmd : loopback or ECHO diagnostic
2217 * Options:
2218 * Loopback : Either internal or external loopback
2219 * ECHO: ECHO ELS or Vendor specific FC4 link data
2220 */
2221 vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
2222 elreq.options =
2223 *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
2224 + 1);
2225
2226 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
2227 case QL_VND_LOOPBACK:
2228 if (ha->current_topology != ISP_CFG_F) {
2229 type = "FC_BSG_HST_VENDOR_LOOPBACK";
2230
2231 DEBUG2(qla_printk(KERN_INFO, ha,
2232 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2233 vha->host_no, type, vendor_cmd, elreq.options));
2234
2235 command_sent = INT_DEF_LB_LOOPBACK_CMD;
2236 rval = qla2x00_loopback_test(vha, &elreq, response);
2237 if (IS_QLA81XX(ha)) {
2238 if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
2239 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
2240 "ISP\n", __func__, vha->host_no));
2241 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2242 qla2xxx_wake_dpc(vha);
2243 }
2244 }
2245 } else {
2246 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
2247 DEBUG2(qla_printk(KERN_INFO, ha,
2248 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2249 vha->host_no, type, vendor_cmd, elreq.options));
2250
2251 command_sent = INT_DEF_LB_ECHO_CMD;
2252 rval = qla2x00_echo_test(vha, &elreq, response);
2253 }
2254 break;
2255 case QLA84_RESET:
2256 if (!IS_QLA84XX(vha->hw)) {
2257 rval = -EINVAL;
2258 DEBUG16(printk(
2259 "%s(%ld): 8xxx exiting.\n",
2260 __func__, vha->host_no));
2261 return rval;
2262 }
2263 rval = qla84xx_reset(vha, &elreq, bsg_job);
2264 break;
2265 case QLA84_MGMT_CMD:
2266 if (!IS_QLA84XX(vha->hw)) {
2267 rval = -EINVAL;
2268 DEBUG16(printk(
2269 "%s(%ld): 8xxx exiting.\n",
2270 __func__, vha->host_no));
2271 return rval;
2272 }
2273 rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
2274 break;
2275 default:
2276 rval = -ENOSYS;
2277 }
2278
2279 if (rval != QLA_SUCCESS) {
2280 DEBUG2(qla_printk(KERN_WARNING, ha,
2281 "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
2282 rval = 0;
2283 bsg_job->reply->result = (DID_ERROR << 16);
2284 bsg_job->reply->reply_payload_rcv_len = 0;
2285 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2286 memcpy( fw_sts_ptr, response, sizeof(response));
2287 fw_sts_ptr += sizeof(response);
2288 *fw_sts_ptr = command_sent;
2289 } else {
2290 DEBUG2(qla_printk(KERN_WARNING, ha,
2291 "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
2292 rval = bsg_job->reply->result = 0;
2293 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
2294 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2295 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2296 memcpy(fw_sts_ptr, response, sizeof(response));
2297 fw_sts_ptr += sizeof(response);
2298 *fw_sts_ptr = command_sent;
2299 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2300 bsg_job->reply_payload.sg_cnt, rsp_data,
2301 rsp_data_len);
2302 }
2303 bsg_job->job_done(bsg_job);
2304
2305done_unmap_sg:
2306
2307 if(req_data)
2308 dma_free_coherent(&ha->pdev->dev, req_data_len,
2309 req_data, req_data_dma);
2310 dma_unmap_sg(&ha->pdev->dev,
2311 bsg_job->request_payload.sg_list,
2312 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2313 dma_unmap_sg(&ha->pdev->dev,
2314 bsg_job->reply_payload.sg_list,
2315 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2316
2317done:
2318 return rval;
2319}
2320
2321static int
2322qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2323{
2324 int ret = -EINVAL;
2325
2326 switch (bsg_job->request->msgcode) {
2327 case FC_BSG_RPT_ELS:
2328 case FC_BSG_HST_ELS_NOLOGIN:
2329 ret = qla2x00_process_els(bsg_job);
2330 break;
2331 case FC_BSG_HST_CT:
2332 ret = qla2x00_process_ct(bsg_job);
2333 break;
2334 case FC_BSG_HST_VENDOR:
2335 ret = qla2x00_process_vendor_specific(bsg_job);
2336 break;
2337 case FC_BSG_HST_ADD_RPORT:
2338 case FC_BSG_HST_DEL_RPORT:
2339 case FC_BSG_RPT_CT:
2340 default:
2341 DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
2342 break;
2343 }
2344 return ret;
2345}
2346
2347static int
2348qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2349{
2350 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
2351 struct qla_hw_data *ha = vha->hw;
2352 srb_t *sp;
2353 int cnt, que;
2354 unsigned long flags;
2355 struct req_que *req;
2356 struct srb_bsg *sp_bsg;
2357
2358 /* find the bsg job from the active list of commands */
2359 spin_lock_irqsave(&ha->hardware_lock, flags);
2360 for (que = 0; que < ha->max_req_queues; que++) {
2361 req = ha->req_q_map[que];
2362 if (!req)
2363 continue;
2364
2365 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
2366 sp = req->outstanding_cmds[cnt];
2367
2368 if (sp) {
2369 sp_bsg = (struct srb_bsg*)sp->ctx;
2370
2371 if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
2372 (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
2373 || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
2374 (sp_bsg->bsg_job == bsg_job)) {
2375 if (ha->isp_ops->abort_command(sp)) {
2376 DEBUG2(qla_printk(KERN_INFO, ha,
2377 "scsi(%ld): mbx abort_command failed\n", vha->host_no));
2378 bsg_job->req->errors = bsg_job->reply->result = -EIO;
2379 } else {
2380 DEBUG2(qla_printk(KERN_INFO, ha,
2381 "scsi(%ld): mbx abort_command success\n", vha->host_no));
2382 bsg_job->req->errors = bsg_job->reply->result = 0;
2383 }
2384 goto done;
2385 }
2386 }
2387 }
2388 }
2389 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2390 DEBUG2(qla_printk(KERN_INFO, ha,
2391 "scsi(%ld) SRB not found to abort\n", vha->host_no));
2392 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
2393 return 0;
2394
2395done:
2396 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2397 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
2398 kfree(sp->fcport);
2399 kfree(sp->ctx);
2400 mempool_free(sp, ha->srb_mempool);
2401 return 0;
2402}
2403
2404struct fc_function_template qla2xxx_transport_functions = { 1870struct fc_function_template qla2xxx_transport_functions = {
2405 1871
2406 .show_host_node_name = 1, 1872 .show_host_node_name = 1,
@@ -2502,7 +1968,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
2502 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; 1968 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
2503 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; 1969 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
2504 1970
2505 if (IS_QLA81XX(ha)) 1971 if (IS_QLA8XXX_TYPE(ha))
2506 speed = FC_PORTSPEED_10GBIT; 1972 speed = FC_PORTSPEED_10GBIT;
2507 else if (IS_QLA25XX(ha)) 1973 else if (IS_QLA25XX(ha))
2508 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 1974 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
@@ -2516,125 +1982,3 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
2516 speed = FC_PORTSPEED_1GBIT; 1982 speed = FC_PORTSPEED_1GBIT;
2517 fc_host_supported_speeds(vha->host) = speed; 1983 fc_host_supported_speeds(vha->host) = speed;
2518} 1984}
2519static int
2520qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2521{
2522 int ret = 0;
2523 int cmd;
2524 uint16_t cmd_status;
2525
2526 DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2527
2528 cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
2529 == A84_RESET_FLAG_ENABLE_DIAG_FW ?
2530 A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
2531 ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
2532 &cmd_status);
2533 return ret;
2534}
2535
2536static int
2537qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2538{
2539 struct access_chip_84xx *mn;
2540 dma_addr_t mn_dma, mgmt_dma;
2541 void *mgmt_b = NULL;
2542 int ret = 0;
2543 int rsp_hdr_len, len = 0;
2544 struct qla84_msg_mgmt *ql84_mgmt;
2545
2546 ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
2547 ql84_mgmt->cmd =
2548 *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
2549 ql84_mgmt->mgmtp.u.mem.start_addr =
2550 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
2551 ql84_mgmt->len =
2552 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
2553 ql84_mgmt->mgmtp.u.config.id =
2554 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
2555 ql84_mgmt->mgmtp.u.config.param0 =
2556 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
2557 ql84_mgmt->mgmtp.u.config.param1 =
2558 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
2559 ql84_mgmt->mgmtp.u.info.type =
2560 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
2561 ql84_mgmt->mgmtp.u.info.context =
2562 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
2563
2564 rsp_hdr_len = bsg_job->request_payload.payload_len;
2565
2566 mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
2567 if (mn == NULL) {
2568 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
2569 "failed%lu\n", __func__, ha->host_no));
2570 return -ENOMEM;
2571 }
2572
2573 memset(mn, 0, sizeof (struct access_chip_84xx));
2574
2575 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
2576 mn->entry_count = 1;
2577
2578 switch (ql84_mgmt->cmd) {
2579 case QLA84_MGMT_READ_MEM:
2580 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
2581 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2582 break;
2583 case QLA84_MGMT_WRITE_MEM:
2584 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
2585 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2586 break;
2587 case QLA84_MGMT_CHNG_CONFIG:
2588 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
2589 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
2590 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
2591 mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
2592 break;
2593 case QLA84_MGMT_GET_INFO:
2594 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
2595 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
2596 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
2597 break;
2598 default:
2599 ret = -EIO;
2600 goto exit_mgmt0;
2601 }
2602
2603 if ((len == ql84_mgmt->len) &&
2604 ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
2605 mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
2606 &mgmt_dma, GFP_KERNEL);
2607 if (mgmt_b == NULL) {
2608 DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
2609 "failed%lu\n", __func__, ha->host_no));
2610 ret = -ENOMEM;
2611 goto exit_mgmt0;
2612 }
2613 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
2614 mn->dseg_count = cpu_to_le16(1);
2615 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
2616 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
2617 mn->dseg_length = cpu_to_le32(len);
2618
2619 if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
2620 memcpy(mgmt_b, ql84_mgmt->payload, len);
2621 }
2622 }
2623
2624 ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
2625 if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
2626 || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
2627 if (ret != QLA_SUCCESS)
2628 DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
2629 __func__, ha->host_no));
2630 } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
2631 (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
2632 }
2633
2634 if (mgmt_b)
2635 dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
2636
2637exit_mgmt0:
2638 dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
2639 return ret;
2640}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
new file mode 100644
index 000000000000..b905dfe5ea61
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -0,0 +1,1212 @@
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8
9#include <linux/kthread.h>
10#include <linux/vmalloc.h>
11#include <linux/delay.h>
12
13/* BSG support for ELS/CT pass through */
14inline srb_t *
15qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
16{
17 srb_t *sp;
18 struct qla_hw_data *ha = vha->hw;
19 struct srb_ctx *ctx;
20
21 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
22 if (!sp)
23 goto done;
24 ctx = kzalloc(size, GFP_KERNEL);
25 if (!ctx) {
26 mempool_free(sp, ha->srb_mempool);
27 sp = NULL;
28 goto done;
29 }
30
31 memset(sp, 0, sizeof(*sp));
32 sp->fcport = fcport;
33 sp->ctx = ctx;
34done:
35 return sp;
36}
37
38int
39qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
40{
41 int i, ret, num_valid;
42 uint8_t *bcode;
43 struct qla_fcp_prio_entry *pri_entry;
44
45 ret = 1;
46 num_valid = 0;
47 bcode = (uint8_t *)pri_cfg;
48
49 if (bcode[0x0] != 'H' || bcode[0x1] != 'Q' || bcode[0x2] != 'O' ||
50 bcode[0x3] != 'S') {
51 return 0;
52 }
53 if (flag != 1)
54 return ret;
55
56 pri_entry = &pri_cfg->entry[0];
57 for (i = 0; i < pri_cfg->num_entries; i++) {
58 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
59 num_valid++;
60 pri_entry++;
61 }
62
63 if (num_valid == 0)
64 ret = 0;
65
66 return ret;
67}
68
69static int
70qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
71{
72 struct Scsi_Host *host = bsg_job->shost;
73 scsi_qla_host_t *vha = shost_priv(host);
74 struct qla_hw_data *ha = vha->hw;
75 int ret = 0;
76 uint32_t len;
77 uint32_t oper;
78
79 bsg_job->reply->reply_payload_rcv_len = 0;
80
81 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
82 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
83 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
84 ret = -EBUSY;
85 goto exit_fcp_prio_cfg;
86 }
87
88 /* Get the sub command */
89 oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
90
91 /* Only set config is allowed if config memory is not allocated */
92 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
93 ret = -EINVAL;
94 goto exit_fcp_prio_cfg;
95 }
96 switch (oper) {
97 case QLFC_FCP_PRIO_DISABLE:
98 if (ha->flags.fcp_prio_enabled) {
99 ha->flags.fcp_prio_enabled = 0;
100 ha->fcp_prio_cfg->attributes &=
101 ~FCP_PRIO_ATTR_ENABLE;
102 qla24xx_update_all_fcp_prio(vha);
103 bsg_job->reply->result = DID_OK;
104 } else {
105 ret = -EINVAL;
106 bsg_job->reply->result = (DID_ERROR << 16);
107 goto exit_fcp_prio_cfg;
108 }
109 break;
110
111 case QLFC_FCP_PRIO_ENABLE:
112 if (!ha->flags.fcp_prio_enabled) {
113 if (ha->fcp_prio_cfg) {
114 ha->flags.fcp_prio_enabled = 1;
115 ha->fcp_prio_cfg->attributes |=
116 FCP_PRIO_ATTR_ENABLE;
117 qla24xx_update_all_fcp_prio(vha);
118 bsg_job->reply->result = DID_OK;
119 } else {
120 ret = -EINVAL;
121 bsg_job->reply->result = (DID_ERROR << 16);
122 goto exit_fcp_prio_cfg;
123 }
124 }
125 break;
126
127 case QLFC_FCP_PRIO_GET_CONFIG:
128 len = bsg_job->reply_payload.payload_len;
129 if (!len || len > FCP_PRIO_CFG_SIZE) {
130 ret = -EINVAL;
131 bsg_job->reply->result = (DID_ERROR << 16);
132 goto exit_fcp_prio_cfg;
133 }
134
135 bsg_job->reply->result = DID_OK;
136 bsg_job->reply->reply_payload_rcv_len =
137 sg_copy_from_buffer(
138 bsg_job->reply_payload.sg_list,
139 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
140 len);
141
142 break;
143
144 case QLFC_FCP_PRIO_SET_CONFIG:
145 len = bsg_job->request_payload.payload_len;
146 if (!len || len > FCP_PRIO_CFG_SIZE) {
147 bsg_job->reply->result = (DID_ERROR << 16);
148 ret = -EINVAL;
149 goto exit_fcp_prio_cfg;
150 }
151
152 if (!ha->fcp_prio_cfg) {
153 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
154 if (!ha->fcp_prio_cfg) {
155 qla_printk(KERN_WARNING, ha,
156 "Unable to allocate memory "
157 "for fcp prio config data (%x).\n",
158 FCP_PRIO_CFG_SIZE);
159 bsg_job->reply->result = (DID_ERROR << 16);
160 ret = -ENOMEM;
161 goto exit_fcp_prio_cfg;
162 }
163 }
164
165 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
166 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
167 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
168 FCP_PRIO_CFG_SIZE);
169
170 /* validate fcp priority data */
171 if (!qla24xx_fcp_prio_cfg_valid(
172 (struct qla_fcp_prio_cfg *)
173 ha->fcp_prio_cfg, 1)) {
174 bsg_job->reply->result = (DID_ERROR << 16);
175 ret = -EINVAL;
176 /* If buffer was invalidatic int
177 * fcp_prio_cfg is of no use
178 */
179 vfree(ha->fcp_prio_cfg);
180 ha->fcp_prio_cfg = NULL;
181 goto exit_fcp_prio_cfg;
182 }
183
184 ha->flags.fcp_prio_enabled = 0;
185 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
186 ha->flags.fcp_prio_enabled = 1;
187 qla24xx_update_all_fcp_prio(vha);
188 bsg_job->reply->result = DID_OK;
189 break;
190 default:
191 ret = -EINVAL;
192 break;
193 }
194exit_fcp_prio_cfg:
195 bsg_job->job_done(bsg_job);
196 return ret;
197}
198static int
199qla2x00_process_els(struct fc_bsg_job *bsg_job)
200{
201 struct fc_rport *rport;
202 fc_port_t *fcport;
203 struct Scsi_Host *host;
204 scsi_qla_host_t *vha;
205 struct qla_hw_data *ha;
206 srb_t *sp;
207 const char *type;
208 int req_sg_cnt, rsp_sg_cnt;
209 int rval = (DRIVER_ERROR << 16);
210 uint16_t nextlid = 0;
211 struct srb_ctx *els;
212
213 /* Multiple SG's are not supported for ELS requests */
214 if (bsg_job->request_payload.sg_cnt > 1 ||
215 bsg_job->reply_payload.sg_cnt > 1) {
216 DEBUG2(printk(KERN_INFO
217 "multiple SG's are not supported for ELS requests"
218 " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
219 bsg_job->request_payload.sg_cnt,
220 bsg_job->reply_payload.sg_cnt));
221 rval = -EPERM;
222 goto done;
223 }
224
225 /* ELS request for rport */
226 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
227 rport = bsg_job->rport;
228 fcport = *(fc_port_t **) rport->dd_data;
229 host = rport_to_shost(rport);
230 vha = shost_priv(host);
231 ha = vha->hw;
232 type = "FC_BSG_RPT_ELS";
233
234 /* make sure the rport is logged in,
235 * if not perform fabric login
236 */
237 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
238 DEBUG2(qla_printk(KERN_WARNING, ha,
239 "failed to login port %06X for ELS passthru\n",
240 fcport->d_id.b24));
241 rval = -EIO;
242 goto done;
243 }
244 } else {
245 host = bsg_job->shost;
246 vha = shost_priv(host);
247 ha = vha->hw;
248 type = "FC_BSG_HST_ELS_NOLOGIN";
249
250 /* Allocate a dummy fcport structure, since functions
251 * preparing the IOCB and mailbox command retrieves port
252 * specific information from fcport structure. For Host based
253 * ELS commands there will be no fcport structure allocated
254 */
255 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
256 if (!fcport) {
257 rval = -ENOMEM;
258 goto done;
259 }
260
261 /* Initialize all required fields of fcport */
262 fcport->vha = vha;
263 fcport->vp_idx = vha->vp_idx;
264 fcport->d_id.b.al_pa =
265 bsg_job->request->rqst_data.h_els.port_id[0];
266 fcport->d_id.b.area =
267 bsg_job->request->rqst_data.h_els.port_id[1];
268 fcport->d_id.b.domain =
269 bsg_job->request->rqst_data.h_els.port_id[2];
270 fcport->loop_id =
271 (fcport->d_id.b.al_pa == 0xFD) ?
272 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
273 }
274
275 if (!vha->flags.online) {
276 DEBUG2(qla_printk(KERN_WARNING, ha,
277 "host not online\n"));
278 rval = -EIO;
279 goto done;
280 }
281
282 req_sg_cnt =
283 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
284 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
285 if (!req_sg_cnt) {
286 rval = -ENOMEM;
287 goto done_free_fcport;
288 }
289
290 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
291 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
292 if (!rsp_sg_cnt) {
293 rval = -ENOMEM;
294 goto done_free_fcport;
295 }
296
297 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
298 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
299 DEBUG2(printk(KERN_INFO
300 "dma mapping resulted in different sg counts \
301 [request_sg_cnt: %x dma_request_sg_cnt: %x\
302 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
303 bsg_job->request_payload.sg_cnt, req_sg_cnt,
304 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
305 rval = -EAGAIN;
306 goto done_unmap_sg;
307 }
308
309 /* Alloc SRB structure */
310 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
311 if (!sp) {
312 rval = -ENOMEM;
313 goto done_unmap_sg;
314 }
315
316 els = sp->ctx;
317 els->type =
318 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
319 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
320 els->name =
321 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
322 "bsg_els_rpt" : "bsg_els_hst");
323 els->u.bsg_job = bsg_job;
324
325 DEBUG2(qla_printk(KERN_INFO, ha,
326 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
327 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
328 bsg_job->request->rqst_data.h_els.command_code,
329 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
330 fcport->d_id.b.al_pa));
331
332 rval = qla2x00_start_sp(sp);
333 if (rval != QLA_SUCCESS) {
334 kfree(sp->ctx);
335 mempool_free(sp, ha->srb_mempool);
336 rval = -EIO;
337 goto done_unmap_sg;
338 }
339 return rval;
340
341done_unmap_sg:
342 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
343 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
344 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
345 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
346 goto done_free_fcport;
347
348done_free_fcport:
349 if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
350 kfree(fcport);
351done:
352 return rval;
353}
354
355static int
356qla2x00_process_ct(struct fc_bsg_job *bsg_job)
357{
358 srb_t *sp;
359 struct Scsi_Host *host = bsg_job->shost;
360 scsi_qla_host_t *vha = shost_priv(host);
361 struct qla_hw_data *ha = vha->hw;
362 int rval = (DRIVER_ERROR << 16);
363 int req_sg_cnt, rsp_sg_cnt;
364 uint16_t loop_id;
365 struct fc_port *fcport;
366 char *type = "FC_BSG_HST_CT";
367 struct srb_ctx *ct;
368
369 /* pass through is supported only for ISP 4Gb or higher */
370 if (!IS_FWI2_CAPABLE(ha)) {
371 DEBUG2(qla_printk(KERN_INFO, ha,
372 "scsi(%ld):Firmware is not capable to support FC "
373 "CT pass thru\n", vha->host_no));
374 rval = -EPERM;
375 goto done;
376 }
377
378 req_sg_cnt =
379 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
380 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
381 if (!req_sg_cnt) {
382 rval = -ENOMEM;
383 goto done;
384 }
385
386 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
387 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
388 if (!rsp_sg_cnt) {
389 rval = -ENOMEM;
390 goto done;
391 }
392
393 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
394 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
395 DEBUG2(qla_printk(KERN_WARNING, ha,
396 "[request_sg_cnt: %x dma_request_sg_cnt: %x\
397 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
398 bsg_job->request_payload.sg_cnt, req_sg_cnt,
399 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
400 rval = -EAGAIN;
401 goto done_unmap_sg;
402 }
403
404 if (!vha->flags.online) {
405 DEBUG2(qla_printk(KERN_WARNING, ha,
406 "host not online\n"));
407 rval = -EIO;
408 goto done_unmap_sg;
409 }
410
411 loop_id =
412 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
413 >> 24;
414 switch (loop_id) {
415 case 0xFC:
416 loop_id = cpu_to_le16(NPH_SNS);
417 break;
418 case 0xFA:
419 loop_id = vha->mgmt_svr_loop_id;
420 break;
421 default:
422 DEBUG2(qla_printk(KERN_INFO, ha,
423 "Unknown loop id: %x\n", loop_id));
424 rval = -EINVAL;
425 goto done_unmap_sg;
426 }
427
428 /* Allocate a dummy fcport structure, since functions preparing the
429 * IOCB and mailbox command retrieves port specific information
430 * from fcport structure. For Host based ELS commands there will be
431 * no fcport structure allocated
432 */
433 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
434 if (!fcport) {
435 rval = -ENOMEM;
436 goto done_unmap_sg;
437 }
438
439 /* Initialize all required fields of fcport */
440 fcport->vha = vha;
441 fcport->vp_idx = vha->vp_idx;
442 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
443 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
444 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
445 fcport->loop_id = loop_id;
446
447 /* Alloc SRB structure */
448 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
449 if (!sp) {
450 rval = -ENOMEM;
451 goto done_free_fcport;
452 }
453
454 ct = sp->ctx;
455 ct->type = SRB_CT_CMD;
456 ct->name = "bsg_ct";
457 ct->u.bsg_job = bsg_job;
458
459 DEBUG2(qla_printk(KERN_INFO, ha,
460 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
461 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
462 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
463 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
464 fcport->d_id.b.al_pa));
465
466 rval = qla2x00_start_sp(sp);
467 if (rval != QLA_SUCCESS) {
468 kfree(sp->ctx);
469 mempool_free(sp, ha->srb_mempool);
470 rval = -EIO;
471 goto done_free_fcport;
472 }
473 return rval;
474
475done_free_fcport:
476 kfree(fcport);
477done_unmap_sg:
478 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
479 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
480 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
481 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
482done:
483 return rval;
484}
485
486static int
487qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
488{
489 struct Scsi_Host *host = bsg_job->shost;
490 scsi_qla_host_t *vha = shost_priv(host);
491 struct qla_hw_data *ha = vha->hw;
492 int rval;
493 uint8_t command_sent;
494 char *type;
495 struct msg_echo_lb elreq;
496 uint16_t response[MAILBOX_REGISTER_COUNT];
497 uint8_t *fw_sts_ptr;
498 uint8_t *req_data = NULL;
499 dma_addr_t req_data_dma;
500 uint32_t req_data_len;
501 uint8_t *rsp_data = NULL;
502 dma_addr_t rsp_data_dma;
503 uint32_t rsp_data_len;
504
505 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
506 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
507 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
508 return -EBUSY;
509
510 if (!vha->flags.online) {
511 DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n"));
512 return -EIO;
513 }
514
515 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
516 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
517 DMA_TO_DEVICE);
518
519 if (!elreq.req_sg_cnt)
520 return -ENOMEM;
521
522 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
523 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
524 DMA_FROM_DEVICE);
525
526 if (!elreq.rsp_sg_cnt) {
527 rval = -ENOMEM;
528 goto done_unmap_req_sg;
529 }
530
531 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
532 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
533 DEBUG2(printk(KERN_INFO
534 "dma mapping resulted in different sg counts "
535 "[request_sg_cnt: %x dma_request_sg_cnt: %x "
536 "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
537 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
538 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
539 rval = -EAGAIN;
540 goto done_unmap_sg;
541 }
542 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
543 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
544 &req_data_dma, GFP_KERNEL);
545 if (!req_data) {
546 DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data "
547 "failed for host=%lu\n", __func__, vha->host_no));
548 rval = -ENOMEM;
549 goto done_unmap_sg;
550 }
551
552 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
553 &rsp_data_dma, GFP_KERNEL);
554 if (!rsp_data) {
555 DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data "
556 "failed for host=%lu\n", __func__, vha->host_no));
557 rval = -ENOMEM;
558 goto done_free_dma_req;
559 }
560
561 /* Copy the request buffer in req_data now */
562 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
563 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
564
565 elreq.send_dma = req_data_dma;
566 elreq.rcv_dma = rsp_data_dma;
567 elreq.transfer_size = req_data_len;
568
569 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
570
571 if (ha->current_topology != ISP_CFG_F) {
572 type = "FC_BSG_HST_VENDOR_LOOPBACK";
573 DEBUG2(qla_printk(KERN_INFO, ha,
574 "scsi(%ld) bsg rqst type: %s\n",
575 vha->host_no, type));
576
577 command_sent = INT_DEF_LB_LOOPBACK_CMD;
578 rval = qla2x00_loopback_test(vha, &elreq, response);
579 if (IS_QLA81XX(ha)) {
580 if (response[0] == MBS_COMMAND_ERROR &&
581 response[1] == MBS_LB_RESET) {
582 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
583 "ISP\n", __func__, vha->host_no));
584 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
585 qla2xxx_wake_dpc(vha);
586 }
587 }
588 } else {
589 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
590 DEBUG2(qla_printk(KERN_INFO, ha,
591 "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type));
592 command_sent = INT_DEF_LB_ECHO_CMD;
593 rval = qla2x00_echo_test(vha, &elreq, response);
594 }
595
596 if (rval) {
597 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
598 "request %s failed\n", vha->host_no, type));
599
600 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
601 sizeof(struct fc_bsg_reply);
602
603 memcpy(fw_sts_ptr, response, sizeof(response));
604 fw_sts_ptr += sizeof(response);
605 *fw_sts_ptr = command_sent;
606 rval = 0;
607 bsg_job->reply->reply_payload_rcv_len = 0;
608 bsg_job->reply->result = (DID_ERROR << 16);
609 } else {
610 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
611 "request %s completed\n", vha->host_no, type));
612
613 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
614 sizeof(response) + sizeof(uint8_t);
615 bsg_job->reply->reply_payload_rcv_len =
616 bsg_job->reply_payload.payload_len;
617 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
618 sizeof(struct fc_bsg_reply);
619 memcpy(fw_sts_ptr, response, sizeof(response));
620 fw_sts_ptr += sizeof(response);
621 *fw_sts_ptr = command_sent;
622 bsg_job->reply->result = DID_OK;
623 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
624 bsg_job->reply_payload.sg_cnt, rsp_data,
625 rsp_data_len);
626 }
627 bsg_job->job_done(bsg_job);
628
629 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
630 rsp_data, rsp_data_dma);
631done_free_dma_req:
632 dma_free_coherent(&ha->pdev->dev, req_data_len,
633 req_data, req_data_dma);
634done_unmap_sg:
635 dma_unmap_sg(&ha->pdev->dev,
636 bsg_job->reply_payload.sg_list,
637 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
638done_unmap_req_sg:
639 dma_unmap_sg(&ha->pdev->dev,
640 bsg_job->request_payload.sg_list,
641 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
642 return rval;
643}
644
645static int
646qla84xx_reset(struct fc_bsg_job *bsg_job)
647{
648 struct Scsi_Host *host = bsg_job->shost;
649 scsi_qla_host_t *vha = shost_priv(host);
650 struct qla_hw_data *ha = vha->hw;
651 int rval = 0;
652 uint32_t flag;
653
654 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
655 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
656 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
657 return -EBUSY;
658
659 if (!IS_QLA84XX(ha)) {
660 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
661 "exiting.\n", vha->host_no));
662 return -EINVAL;
663 }
664
665 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
666
667 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
668
669 if (rval) {
670 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
671 "request 84xx reset failed\n", vha->host_no));
672 rval = bsg_job->reply->reply_payload_rcv_len = 0;
673 bsg_job->reply->result = (DID_ERROR << 16);
674
675 } else {
676 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
677 "request 84xx reset completed\n", vha->host_no));
678 bsg_job->reply->result = DID_OK;
679 }
680
681 bsg_job->job_done(bsg_job);
682 return rval;
683}
684
685static int
686qla84xx_updatefw(struct fc_bsg_job *bsg_job)
687{
688 struct Scsi_Host *host = bsg_job->shost;
689 scsi_qla_host_t *vha = shost_priv(host);
690 struct qla_hw_data *ha = vha->hw;
691 struct verify_chip_entry_84xx *mn = NULL;
692 dma_addr_t mn_dma, fw_dma;
693 void *fw_buf = NULL;
694 int rval = 0;
695 uint32_t sg_cnt;
696 uint32_t data_len;
697 uint16_t options;
698 uint32_t flag;
699 uint32_t fw_ver;
700
701 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
702 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
703 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
704 return -EBUSY;
705
706 if (!IS_QLA84XX(ha)) {
707 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
708 "exiting.\n", vha->host_no));
709 return -EINVAL;
710 }
711
712 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
713 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
714 if (!sg_cnt)
715 return -ENOMEM;
716
717 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
718 DEBUG2(printk(KERN_INFO
719 "dma mapping resulted in different sg counts "
720 "request_sg_cnt: %x dma_request_sg_cnt: %x ",
721 bsg_job->request_payload.sg_cnt, sg_cnt));
722 rval = -EAGAIN;
723 goto done_unmap_sg;
724 }
725
726 data_len = bsg_job->request_payload.payload_len;
727 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
728 &fw_dma, GFP_KERNEL);
729 if (!fw_buf) {
730 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf "
731 "failed for host=%lu\n", __func__, vha->host_no));
732 rval = -ENOMEM;
733 goto done_unmap_sg;
734 }
735
736 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
737 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
738
739 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
740 if (!mn) {
741 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
742 "failed for host=%lu\n", __func__, vha->host_no));
743 rval = -ENOMEM;
744 goto done_free_fw_buf;
745 }
746
747 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
748 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
749
750 memset(mn, 0, sizeof(struct access_chip_84xx));
751 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
752 mn->entry_count = 1;
753
754 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
755 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
756 options |= VCO_DIAG_FW;
757
758 mn->options = cpu_to_le16(options);
759 mn->fw_ver = cpu_to_le32(fw_ver);
760 mn->fw_size = cpu_to_le32(data_len);
761 mn->fw_seq_size = cpu_to_le32(data_len);
762 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
763 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
764 mn->dseg_length = cpu_to_le32(data_len);
765 mn->data_seg_cnt = cpu_to_le16(1);
766
767 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
768
769 if (rval) {
770 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
771 "request 84xx updatefw failed\n", vha->host_no));
772
773 rval = bsg_job->reply->reply_payload_rcv_len = 0;
774 bsg_job->reply->result = (DID_ERROR << 16);
775
776 } else {
777 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
778 "request 84xx updatefw completed\n", vha->host_no));
779
780 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
781 bsg_job->reply->result = DID_OK;
782 }
783
784 bsg_job->job_done(bsg_job);
785 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
786
787done_free_fw_buf:
788 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
789
790done_unmap_sg:
791 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
792 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
793
794 return rval;
795}
796
797static int
798qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
799{
800 struct Scsi_Host *host = bsg_job->shost;
801 scsi_qla_host_t *vha = shost_priv(host);
802 struct qla_hw_data *ha = vha->hw;
803 struct access_chip_84xx *mn = NULL;
804 dma_addr_t mn_dma, mgmt_dma;
805 void *mgmt_b = NULL;
806 int rval = 0;
807 struct qla_bsg_a84_mgmt *ql84_mgmt;
808 uint32_t sg_cnt;
809 uint32_t data_len = 0;
810 uint32_t dma_direction = DMA_NONE;
811
812 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
813 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
814 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
815 return -EBUSY;
816
817 if (!IS_QLA84XX(ha)) {
818 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
819 "exiting.\n", vha->host_no));
820 return -EINVAL;
821 }
822
823 ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
824 sizeof(struct fc_bsg_request));
825 if (!ql84_mgmt) {
826 DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
827 __func__, vha->host_no));
828 return -EINVAL;
829 }
830
831 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
832 if (!mn) {
833 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
834 "failed for host=%lu\n", __func__, vha->host_no));
835 return -ENOMEM;
836 }
837
838 memset(mn, 0, sizeof(struct access_chip_84xx));
839 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
840 mn->entry_count = 1;
841
842 switch (ql84_mgmt->mgmt.cmd) {
843 case QLA84_MGMT_READ_MEM:
844 case QLA84_MGMT_GET_INFO:
845 sg_cnt = dma_map_sg(&ha->pdev->dev,
846 bsg_job->reply_payload.sg_list,
847 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
848 if (!sg_cnt) {
849 rval = -ENOMEM;
850 goto exit_mgmt;
851 }
852
853 dma_direction = DMA_FROM_DEVICE;
854
855 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
856 DEBUG2(printk(KERN_INFO
857 "dma mapping resulted in different sg counts "
858 "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
859 bsg_job->reply_payload.sg_cnt, sg_cnt));
860 rval = -EAGAIN;
861 goto done_unmap_sg;
862 }
863
864 data_len = bsg_job->reply_payload.payload_len;
865
866 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
867 &mgmt_dma, GFP_KERNEL);
868 if (!mgmt_b) {
869 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
870 "failed for host=%lu\n",
871 __func__, vha->host_no));
872 rval = -ENOMEM;
873 goto done_unmap_sg;
874 }
875
876 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
877 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
878 mn->parameter1 =
879 cpu_to_le32(
880 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
881
882 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
883 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
884 mn->parameter1 =
885 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
886
887 mn->parameter2 =
888 cpu_to_le32(
889 ql84_mgmt->mgmt.mgmtp.u.info.context);
890 }
891 break;
892
893 case QLA84_MGMT_WRITE_MEM:
894 sg_cnt = dma_map_sg(&ha->pdev->dev,
895 bsg_job->request_payload.sg_list,
896 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
897
898 if (!sg_cnt) {
899 rval = -ENOMEM;
900 goto exit_mgmt;
901 }
902
903 dma_direction = DMA_TO_DEVICE;
904
905 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
906 DEBUG2(printk(KERN_INFO
907 "dma mapping resulted in different sg counts "
908 "request_sg_cnt: %x dma_request_sg_cnt: %x ",
909 bsg_job->request_payload.sg_cnt, sg_cnt));
910 rval = -EAGAIN;
911 goto done_unmap_sg;
912 }
913
914 data_len = bsg_job->request_payload.payload_len;
915 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
916 &mgmt_dma, GFP_KERNEL);
917 if (!mgmt_b) {
918 DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
919 "failed for host=%lu\n",
920 __func__, vha->host_no));
921 rval = -ENOMEM;
922 goto done_unmap_sg;
923 }
924
925 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
926 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
927
928 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
929 mn->parameter1 =
930 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
931 break;
932
933 case QLA84_MGMT_CHNG_CONFIG:
934 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
935 mn->parameter1 =
936 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
937
938 mn->parameter2 =
939 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
940
941 mn->parameter3 =
942 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
943 break;
944
945 default:
946 rval = -EIO;
947 goto exit_mgmt;
948 }
949
950 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
951 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
952 mn->dseg_count = cpu_to_le16(1);
953 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
954 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
955 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
956 }
957
958 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
959
960 if (rval) {
961 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
962 "request 84xx mgmt failed\n", vha->host_no));
963
964 rval = bsg_job->reply->reply_payload_rcv_len = 0;
965 bsg_job->reply->result = (DID_ERROR << 16);
966
967 } else {
968 DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
969 "request 84xx mgmt completed\n", vha->host_no));
970
971 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
972 bsg_job->reply->result = DID_OK;
973
974 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
975 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
976 bsg_job->reply->reply_payload_rcv_len =
977 bsg_job->reply_payload.payload_len;
978
979 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
980 bsg_job->reply_payload.sg_cnt, mgmt_b,
981 data_len);
982 }
983 }
984
985 bsg_job->job_done(bsg_job);
986
987done_unmap_sg:
988 if (mgmt_b)
989 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
990
991 if (dma_direction == DMA_TO_DEVICE)
992 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
993 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
994 else if (dma_direction == DMA_FROM_DEVICE)
995 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
996 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
997
998exit_mgmt:
999 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1000
1001 return rval;
1002}
1003
1004static int
1005qla24xx_iidma(struct fc_bsg_job *bsg_job)
1006{
1007 struct Scsi_Host *host = bsg_job->shost;
1008 scsi_qla_host_t *vha = shost_priv(host);
1009 struct qla_hw_data *ha = vha->hw;
1010 int rval = 0;
1011 struct qla_port_param *port_param = NULL;
1012 fc_port_t *fcport = NULL;
1013 uint16_t mb[MAILBOX_REGISTER_COUNT];
1014 uint8_t *rsp_ptr = NULL;
1015
1016 bsg_job->reply->reply_payload_rcv_len = 0;
1017
1018 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1019 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1020 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
1021 return -EBUSY;
1022
1023 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1024 DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not "
1025 "supported\n", __func__, vha->host_no));
1026 return -EINVAL;
1027 }
1028
1029 port_param = (struct qla_port_param *)((char *)bsg_job->request +
1030 sizeof(struct fc_bsg_request));
1031 if (!port_param) {
1032 DEBUG2(printk("%s(%ld): port_param header not provided, "
1033 "exiting.\n", __func__, vha->host_no));
1034 return -EINVAL;
1035 }
1036
1037 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1038 DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n",
1039 __func__, vha->host_no));
1040 return -EINVAL;
1041 }
1042
1043 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1044 if (fcport->port_type != FCT_TARGET)
1045 continue;
1046
1047 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1048 fcport->port_name, sizeof(fcport->port_name)))
1049 continue;
1050 break;
1051 }
1052
1053 if (!fcport) {
1054 DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n",
1055 __func__, vha->host_no));
1056 return -EINVAL;
1057 }
1058
1059 if (port_param->mode)
1060 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1061 port_param->speed, mb);
1062 else
1063 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1064 &port_param->speed, mb);
1065
1066 if (rval) {
1067 DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for "
1068 "%02x%02x%02x%02x%02x%02x%02x%02x -- "
1069 "%04x %x %04x %04x.\n",
1070 vha->host_no, fcport->port_name[0],
1071 fcport->port_name[1],
1072 fcport->port_name[2], fcport->port_name[3],
1073 fcport->port_name[4], fcport->port_name[5],
1074 fcport->port_name[6], fcport->port_name[7], rval,
1075 fcport->fp_speed, mb[0], mb[1]));
1076 rval = 0;
1077 bsg_job->reply->result = (DID_ERROR << 16);
1078
1079 } else {
1080 if (!port_param->mode) {
1081 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1082 sizeof(struct qla_port_param);
1083
1084 rsp_ptr = ((uint8_t *)bsg_job->reply) +
1085 sizeof(struct fc_bsg_reply);
1086
1087 memcpy(rsp_ptr, port_param,
1088 sizeof(struct qla_port_param));
1089 }
1090
1091 bsg_job->reply->result = DID_OK;
1092 }
1093
1094 bsg_job->job_done(bsg_job);
1095 return rval;
1096}
1097
1098static int
1099qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1100{
1101 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
1102 case QL_VND_LOOPBACK:
1103 return qla2x00_process_loopback(bsg_job);
1104
1105 case QL_VND_A84_RESET:
1106 return qla84xx_reset(bsg_job);
1107
1108 case QL_VND_A84_UPDATE_FW:
1109 return qla84xx_updatefw(bsg_job);
1110
1111 case QL_VND_A84_MGMT_CMD:
1112 return qla84xx_mgmt_cmd(bsg_job);
1113
1114 case QL_VND_IIDMA:
1115 return qla24xx_iidma(bsg_job);
1116
1117 case QL_VND_FCP_PRIO_CFG_CMD:
1118 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
1119
1120 default:
1121 bsg_job->reply->result = (DID_ERROR << 16);
1122 bsg_job->job_done(bsg_job);
1123 return -ENOSYS;
1124 }
1125}
1126
1127int
1128qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1129{
1130 int ret = -EINVAL;
1131
1132 switch (bsg_job->request->msgcode) {
1133 case FC_BSG_RPT_ELS:
1134 case FC_BSG_HST_ELS_NOLOGIN:
1135 ret = qla2x00_process_els(bsg_job);
1136 break;
1137 case FC_BSG_HST_CT:
1138 ret = qla2x00_process_ct(bsg_job);
1139 break;
1140 case FC_BSG_HST_VENDOR:
1141 ret = qla2x00_process_vendor_specific(bsg_job);
1142 break;
1143 case FC_BSG_HST_ADD_RPORT:
1144 case FC_BSG_HST_DEL_RPORT:
1145 case FC_BSG_RPT_CT:
1146 default:
1147 DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
1148 break;
1149 }
1150 return ret;
1151}
1152
1153int
1154qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1155{
1156 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
1157 struct qla_hw_data *ha = vha->hw;
1158 srb_t *sp;
1159 int cnt, que;
1160 unsigned long flags;
1161 struct req_que *req;
1162 struct srb_ctx *sp_bsg;
1163
1164 /* find the bsg job from the active list of commands */
1165 spin_lock_irqsave(&ha->hardware_lock, flags);
1166 for (que = 0; que < ha->max_req_queues; que++) {
1167 req = ha->req_q_map[que];
1168 if (!req)
1169 continue;
1170
1171 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1172 sp = req->outstanding_cmds[cnt];
1173 if (sp) {
1174 sp_bsg = sp->ctx;
1175
1176 if (((sp_bsg->type == SRB_CT_CMD) ||
1177 (sp_bsg->type == SRB_ELS_CMD_HST))
1178 && (sp_bsg->u.bsg_job == bsg_job)) {
1179 if (ha->isp_ops->abort_command(sp)) {
1180 DEBUG2(qla_printk(KERN_INFO, ha,
1181 "scsi(%ld): mbx "
1182 "abort_command failed\n",
1183 vha->host_no));
1184 bsg_job->req->errors =
1185 bsg_job->reply->result = -EIO;
1186 } else {
1187 DEBUG2(qla_printk(KERN_INFO, ha,
1188 "scsi(%ld): mbx "
1189 "abort_command success\n",
1190 vha->host_no));
1191 bsg_job->req->errors =
1192 bsg_job->reply->result = 0;
1193 }
1194 goto done;
1195 }
1196 }
1197 }
1198 }
1199 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1200 DEBUG2(qla_printk(KERN_INFO, ha,
1201 "scsi(%ld) SRB not found to abort\n", vha->host_no));
1202 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1203 return 0;
1204
1205done:
1206 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1207 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
1208 kfree(sp->fcport);
1209 kfree(sp->ctx);
1210 mempool_free(sp, ha->srb_mempool);
1211 return 0;
1212}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
new file mode 100644
index 000000000000..76ed92dd2ef2
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -0,0 +1,135 @@
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#ifndef __QLA_BSG_H
8#define __QLA_BSG_H
9
10/* BSG Vendor specific commands */
11#define QL_VND_LOOPBACK 0x01
12#define QL_VND_A84_RESET 0x02
13#define QL_VND_A84_UPDATE_FW 0x03
14#define QL_VND_A84_MGMT_CMD 0x04
15#define QL_VND_IIDMA 0x05
16#define QL_VND_FCP_PRIO_CFG_CMD 0x06
17
18/* BSG definations for interpreting CommandSent field */
19#define INT_DEF_LB_LOOPBACK_CMD 0
20#define INT_DEF_LB_ECHO_CMD 1
21
22/* BSG Vendor specific definations */
23#define A84_ISSUE_WRITE_TYPE_CMD 0
24#define A84_ISSUE_READ_TYPE_CMD 1
25#define A84_CLEANUP_CMD 2
26#define A84_ISSUE_RESET_OP_FW 3
27#define A84_ISSUE_RESET_DIAG_FW 4
28#define A84_ISSUE_UPDATE_OPFW_CMD 5
29#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
30
31struct qla84_mgmt_param {
32 union {
33 struct {
34 uint32_t start_addr;
35 } mem; /* for QLA84_MGMT_READ/WRITE_MEM */
36 struct {
37 uint32_t id;
38#define QLA84_MGMT_CONFIG_ID_UIF 1
39#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
40#define QLA84_MGMT_CONFIG_ID_PAUSE 3
41#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
42
43 uint32_t param0;
44 uint32_t param1;
45 } config; /* for QLA84_MGMT_CHNG_CONFIG */
46
47 struct {
48 uint32_t type;
49#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
50#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
51#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
52#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
53#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
54#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
55#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
56
57 uint32_t context;
58/*
59* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
60*/
61#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
62#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
63#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
64#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
65#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
66#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
67#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
68#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
69#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
70#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
71
72/*
73* context definitions for QLA84_MGMT_INFO_PORT_STAT
74*/
75#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
76#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
77#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
78#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
79#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
80#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
81
82
83/*
84* context definitions for QLA84_MGMT_INFO_LIF_STAT
85*/
86#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
87#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
88#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
89#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
90#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
91
92 } info; /* for QLA84_MGMT_GET_INFO */
93 } u;
94};
95
96struct qla84_msg_mgmt {
97 uint16_t cmd;
98#define QLA84_MGMT_READ_MEM 0x00
99#define QLA84_MGMT_WRITE_MEM 0x01
100#define QLA84_MGMT_CHNG_CONFIG 0x02
101#define QLA84_MGMT_GET_INFO 0x03
102 uint16_t rsrvd;
103 struct qla84_mgmt_param mgmtp;/* parameters for cmd */
104 uint32_t len; /* bytes in payload following this struct */
105 uint8_t payload[0]; /* payload for cmd */
106};
107
108struct qla_bsg_a84_mgmt {
109 struct qla84_msg_mgmt mgmt;
110} __attribute__ ((packed));
111
112struct qla_scsi_addr {
113 uint16_t bus;
114 uint16_t target;
115} __attribute__ ((packed));
116
117struct qla_ext_dest_addr {
118 union {
119 uint8_t wwnn[8];
120 uint8_t wwpn[8];
121 uint8_t id[4];
122 struct qla_scsi_addr scsi_addr;
123 } dest_addr;
124 uint16_t dest_type;
125#define EXT_DEF_TYPE_WWPN 2
126 uint16_t lun;
127 uint16_t padding[2];
128} __attribute__ ((packed));
129
130struct qla_port_param {
131 struct qla_ext_dest_addr fc_scsi_addr;
132 uint16_t mode;
133 uint16_t speed;
134} __attribute__ ((packed));
135#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index cb2eca4c26d8..2afc8a362f2c 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -769,6 +769,9 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
769 void *nxt; 769 void *nxt;
770 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 770 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
771 771
772 if (IS_QLA82XX(ha))
773 return;
774
772 risc_address = ext_mem_cnt = 0; 775 risc_address = ext_mem_cnt = 0;
773 flags = 0; 776 flags = 0;
774 777
@@ -1660,4 +1663,62 @@ qla2x00_dump_buffer(uint8_t * b, uint32_t size)
1660 printk("\n"); 1663 printk("\n");
1661} 1664}
1662 1665
1666void
1667qla2x00_dump_buffer_zipped(uint8_t *b, uint32_t size)
1668{
1669 uint32_t cnt;
1670 uint8_t c;
1671 uint8_t last16[16], cur16[16];
1672 uint32_t lc = 0, num_same16 = 0, j;
1673
1674 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 "
1675 "Ah Bh Ch Dh Eh Fh\n");
1676 printk(KERN_DEBUG "----------------------------------------"
1677 "----------------------\n");
1678
1679 for (cnt = 0; cnt < size;) {
1680 c = *b++;
1663 1681
1682 cur16[lc++] = c;
1683
1684 cnt++;
1685 if (cnt % 16)
1686 continue;
1687
1688 /* We have 16 now */
1689 lc = 0;
1690 if (num_same16 == 0) {
1691 memcpy(last16, cur16, 16);
1692 num_same16++;
1693 continue;
1694 }
1695 if (memcmp(cur16, last16, 16) == 0) {
1696 num_same16++;
1697 continue;
1698 }
1699 for (j = 0; j < 16; j++)
1700 printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
1701 printk(KERN_DEBUG "\n");
1702
1703 if (num_same16 > 1)
1704 printk(KERN_DEBUG "> prev pattern repeats (%u)"
1705 "more times\n", num_same16-1);
1706 memcpy(last16, cur16, 16);
1707 num_same16 = 1;
1708 }
1709
1710 if (num_same16) {
1711 for (j = 0; j < 16; j++)
1712 printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
1713 printk(KERN_DEBUG "\n");
1714
1715 if (num_same16 > 1)
1716 printk(KERN_DEBUG "> prev pattern repeats (%u)"
1717 "more times\n", num_same16-1);
1718 }
1719 if (lc) {
1720 for (j = 0; j < lc; j++)
1721 printk(KERN_DEBUG "%02x ", (uint32_t)cur16[j]);
1722 printk(KERN_DEBUG "\n");
1723 }
1724}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index d6d9c86cb058..916c81f3f55d 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -27,6 +27,9 @@
27/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ 27/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
28/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ 28/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
29/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ 29/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
30/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
31
32/* #define QL_PRINTK_BUF */ /* Captures printk to buffer */
30 33
31/* 34/*
32* Macros use for debugging the driver. 35* Macros use for debugging the driver.
@@ -139,6 +142,13 @@
139#define DEBUG17(x) do {} while (0) 142#define DEBUG17(x) do {} while (0)
140#endif 143#endif
141 144
145#if defined(QL_DEBUG_LEVEL_18)
146#define DEBUG18(x) do {if (ql2xextended_error_logging) x; } while (0)
147#else
148#define DEBUG18(x) do {} while (0)
149#endif
150
151
142/* 152/*
143 * Firmware Dump structure definition 153 * Firmware Dump structure definition
144 */ 154 */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index afa95614aaf8..839610909018 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -33,7 +33,10 @@
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34#include <scsi/scsi_bsg_fc.h> 34#include <scsi/scsi_bsg_fc.h>
35 35
36#define QLA2XXX_DRIVER_NAME "qla2xxx" 36#include "qla_bsg.h"
37#include "qla_nx.h"
38#define QLA2XXX_DRIVER_NAME "qla2xxx"
39#define QLA2XXX_APIDEV "ql2xapidev"
37 40
38/* 41/*
39 * We have MAILBOX_REGISTER_COUNT sized arrays in a few places, 42 * We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
@@ -186,6 +189,16 @@
186struct req_que; 189struct req_que;
187 190
188/* 191/*
192 * (sd.h is not exported, hence local inclusion)
193 * Data Integrity Field tuple.
194 */
195struct sd_dif_tuple {
196 __be16 guard_tag; /* Checksum */
197 __be16 app_tag; /* Opaque storage */
198 __be32 ref_tag; /* Target LBA or indirect LBA */
199};
200
201/*
189 * SCSI Request Block 202 * SCSI Request Block
190 */ 203 */
191typedef struct srb { 204typedef struct srb {
@@ -205,40 +218,73 @@ typedef struct srb {
205/* 218/*
206 * SRB flag definitions 219 * SRB flag definitions
207 */ 220 */
208#define SRB_DMA_VALID BIT_0 /* Command sent to ISP */ 221#define SRB_DMA_VALID BIT_0 /* Command sent to ISP */
222#define SRB_FCP_CMND_DMA_VALID BIT_12 /* DIF: DSD List valid */
223#define SRB_CRC_CTX_DMA_VALID BIT_2 /* DIF: context DMA valid */
224#define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */
225#define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */
226
227/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
228#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
209 229
210/* 230/*
211 * SRB extensions. 231 * SRB extensions.
212 */ 232 */
213struct srb_ctx { 233struct srb_iocb {
214#define SRB_LOGIN_CMD 1 234 union {
215#define SRB_LOGOUT_CMD 2 235 struct {
216 uint16_t type; 236 uint16_t flags;
217 struct timer_list timer;
218
219 void (*free)(srb_t *sp);
220 void (*timeout)(srb_t *sp);
221};
222
223struct srb_logio {
224 struct srb_ctx ctx;
225
226#define SRB_LOGIN_RETRIED BIT_0 237#define SRB_LOGIN_RETRIED BIT_0
227#define SRB_LOGIN_COND_PLOGI BIT_1 238#define SRB_LOGIN_COND_PLOGI BIT_1
228#define SRB_LOGIN_SKIP_PRLI BIT_2 239#define SRB_LOGIN_SKIP_PRLI BIT_2
229 uint16_t flags; 240 uint16_t data[2];
241 } logio;
242 struct {
243 /*
244 * Values for flags field below are as
245 * defined in tsk_mgmt_entry struct
246 * for control_flags field in qla_fw.h.
247 */
248 uint32_t flags;
249 uint32_t lun;
250 uint32_t data;
251 } tmf;
252 struct {
253 /*
254 * values for modif field below are as
255 * defined in mrk_entry_24xx struct
256 * for the modifier field in qla_fw.h.
257 */
258 uint8_t modif;
259 uint16_t lun;
260 uint32_t data;
261 } marker;
262 } u;
263
264 struct timer_list timer;
265
266 void (*done)(srb_t *);
267 void (*free)(srb_t *);
268 void (*timeout)(srb_t *);
230}; 269};
231 270
232struct srb_bsg_ctx { 271/* Values for srb_ctx type */
272#define SRB_LOGIN_CMD 1
273#define SRB_LOGOUT_CMD 2
233#define SRB_ELS_CMD_RPT 3 274#define SRB_ELS_CMD_RPT 3
234#define SRB_ELS_CMD_HST 4 275#define SRB_ELS_CMD_HST 4
235#define SRB_CT_CMD 5 276#define SRB_CT_CMD 5
236 uint16_t type; 277#define SRB_ADISC_CMD 6
237}; 278#define SRB_TM_CMD 7
279#define SRB_MARKER_CMD 8
238 280
239struct srb_bsg { 281struct srb_ctx {
240 struct srb_bsg_ctx ctx; 282 uint16_t type;
241 struct fc_bsg_job *bsg_job; 283 char *name;
284 union {
285 struct srb_iocb *iocb_cmd;
286 struct fc_bsg_job *bsg_job;
287 } u;
242}; 288};
243 289
244struct msg_echo_lb { 290struct msg_echo_lb {
@@ -416,6 +462,7 @@ typedef union {
416 struct device_reg_2xxx isp; 462 struct device_reg_2xxx isp;
417 struct device_reg_24xx isp24; 463 struct device_reg_24xx isp24;
418 struct device_reg_25xxmq isp25mq; 464 struct device_reg_25xxmq isp25mq;
465 struct device_reg_82xx isp82;
419} device_reg_t; 466} device_reg_t;
420 467
421#define ISP_REQ_Q_IN(ha, reg) \ 468#define ISP_REQ_Q_IN(ha, reg) \
@@ -1299,6 +1346,66 @@ typedef struct {
1299 uint32_t dseg_4_length; /* Data segment 4 length. */ 1346 uint32_t dseg_4_length; /* Data segment 4 length. */
1300} cont_a64_entry_t; 1347} cont_a64_entry_t;
1301 1348
1349#define PO_MODE_DIF_INSERT 0
1350#define PO_MODE_DIF_REMOVE BIT_0
1351#define PO_MODE_DIF_PASS BIT_1
1352#define PO_MODE_DIF_REPLACE (BIT_0 + BIT_1)
1353#define PO_ENABLE_DIF_BUNDLING BIT_8
1354#define PO_ENABLE_INCR_GUARD_SEED BIT_3
1355#define PO_DISABLE_INCR_REF_TAG BIT_5
1356#define PO_DISABLE_GUARD_CHECK BIT_4
1357/*
1358 * ISP queue - 64-Bit addressing, continuation crc entry structure definition.
1359 */
1360struct crc_context {
1361 uint32_t handle; /* System handle. */
1362 uint32_t ref_tag;
1363 uint16_t app_tag;
1364 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
1365 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
1366 uint16_t guard_seed; /* Initial Guard Seed */
1367 uint16_t prot_opts; /* Requested Data Protection Mode */
1368 uint16_t blk_size; /* Data size in bytes */
1369 uint16_t runt_blk_guard; /* Guard value for runt block (tape
1370 * only) */
1371 uint32_t byte_count; /* Total byte count/ total data
1372 * transfer count */
1373 union {
1374 struct {
1375 uint32_t reserved_1;
1376 uint16_t reserved_2;
1377 uint16_t reserved_3;
1378 uint32_t reserved_4;
1379 uint32_t data_address[2];
1380 uint32_t data_length;
1381 uint32_t reserved_5[2];
1382 uint32_t reserved_6;
1383 } nobundling;
1384 struct {
1385 uint32_t dif_byte_count; /* Total DIF byte
1386 * count */
1387 uint16_t reserved_1;
1388 uint16_t dseg_count; /* Data segment count */
1389 uint32_t reserved_2;
1390 uint32_t data_address[2];
1391 uint32_t data_length;
1392 uint32_t dif_address[2];
1393 uint32_t dif_length; /* Data segment 0
1394 * length */
1395 } bundling;
1396 } u;
1397
1398 struct fcp_cmnd fcp_cmnd;
1399 dma_addr_t crc_ctx_dma;
1400 /* List of DMA context transfers */
1401 struct list_head dsd_list;
1402
1403 /* This structure should not exceed 512 bytes */
1404};
1405
1406#define CRC_CONTEXT_LEN_FW (offsetof(struct crc_context, fcp_cmnd.lun))
1407#define CRC_CONTEXT_FCPCMND_OFF (offsetof(struct crc_context, fcp_cmnd.lun))
1408
1302/* 1409/*
1303 * ISP queue - status entry structure definition. 1410 * ISP queue - status entry structure definition.
1304 */ 1411 */
@@ -1359,6 +1466,7 @@ typedef struct {
1359#define CS_ABORTED 0x5 /* System aborted command. */ 1466#define CS_ABORTED 0x5 /* System aborted command. */
1360#define CS_TIMEOUT 0x6 /* Timeout error. */ 1467#define CS_TIMEOUT 0x6 /* Timeout error. */
1361#define CS_DATA_OVERRUN 0x7 /* Data overrun. */ 1468#define CS_DATA_OVERRUN 0x7 /* Data overrun. */
1469#define CS_DIF_ERROR 0xC /* DIF error detected */
1362 1470
1363#define CS_DATA_UNDERRUN 0x15 /* Data Underrun. */ 1471#define CS_DATA_UNDERRUN 0x15 /* Data Underrun. */
1364#define CS_QUEUE_FULL 0x1C /* Queue Full. */ 1472#define CS_QUEUE_FULL 0x1C /* Queue Full. */
@@ -1579,6 +1687,8 @@ typedef struct fc_port {
1579 uint16_t loop_id; 1687 uint16_t loop_id;
1580 uint16_t old_loop_id; 1688 uint16_t old_loop_id;
1581 1689
1690 uint8_t fcp_prio;
1691
1582 uint8_t fabric_port_name[WWN_SIZE]; 1692 uint8_t fabric_port_name[WWN_SIZE];
1583 uint16_t fp_speed; 1693 uint16_t fp_speed;
1584 1694
@@ -1611,6 +1721,7 @@ typedef struct fc_port {
1611#define FCF_FABRIC_DEVICE BIT_0 1721#define FCF_FABRIC_DEVICE BIT_0
1612#define FCF_LOGIN_NEEDED BIT_1 1722#define FCF_LOGIN_NEEDED BIT_1
1613#define FCF_FCP2_DEVICE BIT_2 1723#define FCF_FCP2_DEVICE BIT_2
1724#define FCF_ASYNC_SENT BIT_3
1614 1725
1615/* No loop ID flag. */ 1726/* No loop ID flag. */
1616#define FC_NO_LOOP_ID 0x1000 1727#define FC_NO_LOOP_ID 0x1000
@@ -2109,6 +2220,7 @@ struct isp_operations {
2109 2220
2110 int (*get_flash_version) (struct scsi_qla_host *, void *); 2221 int (*get_flash_version) (struct scsi_qla_host *, void *);
2111 int (*start_scsi) (srb_t *); 2222 int (*start_scsi) (srb_t *);
2223 int (*abort_isp) (struct scsi_qla_host *);
2112}; 2224};
2113 2225
2114/* MSI-X Support *************************************************************/ 2226/* MSI-X Support *************************************************************/
@@ -2143,6 +2255,8 @@ enum qla_work_type {
2143 QLA_EVT_ASYNC_LOGIN_DONE, 2255 QLA_EVT_ASYNC_LOGIN_DONE,
2144 QLA_EVT_ASYNC_LOGOUT, 2256 QLA_EVT_ASYNC_LOGOUT,
2145 QLA_EVT_ASYNC_LOGOUT_DONE, 2257 QLA_EVT_ASYNC_LOGOUT_DONE,
2258 QLA_EVT_ASYNC_ADISC,
2259 QLA_EVT_ASYNC_ADISC_DONE,
2146 QLA_EVT_UEVENT, 2260 QLA_EVT_UEVENT,
2147}; 2261};
2148 2262
@@ -2295,6 +2409,7 @@ struct qla_hw_data {
2295 uint32_t eeh_busy :1; 2409 uint32_t eeh_busy :1;
2296 uint32_t cpu_affinity_enabled :1; 2410 uint32_t cpu_affinity_enabled :1;
2297 uint32_t disable_msix_handshake :1; 2411 uint32_t disable_msix_handshake :1;
2412 uint32_t fcp_prio_enabled :1;
2298 } flags; 2413 } flags;
2299 2414
2300 /* This spinlock is used to protect "io transactions", you must 2415 /* This spinlock is used to protect "io transactions", you must
@@ -2382,7 +2497,8 @@ struct qla_hw_data {
2382#define DT_ISP2532 BIT_11 2497#define DT_ISP2532 BIT_11
2383#define DT_ISP8432 BIT_12 2498#define DT_ISP8432 BIT_12
2384#define DT_ISP8001 BIT_13 2499#define DT_ISP8001 BIT_13
2385#define DT_ISP_LAST (DT_ISP8001 << 1) 2500#define DT_ISP8021 BIT_14
2501#define DT_ISP_LAST (DT_ISP8021 << 1)
2386 2502
2387#define DT_IIDMA BIT_26 2503#define DT_IIDMA BIT_26
2388#define DT_FWI2 BIT_27 2504#define DT_FWI2 BIT_27
@@ -2405,6 +2521,7 @@ struct qla_hw_data {
2405#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532) 2521#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
2406#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432) 2522#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
2407#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001) 2523#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001)
2524#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021)
2408 2525
2409#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ 2526#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
2410 IS_QLA6312(ha) || IS_QLA6322(ha)) 2527 IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2415,8 +2532,10 @@ struct qla_hw_data {
2415#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ 2532#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
2416 IS_QLA84XX(ha)) 2533 IS_QLA84XX(ha))
2417#define IS_QLA81XX(ha) (IS_QLA8001(ha)) 2534#define IS_QLA81XX(ha) (IS_QLA8001(ha))
2535#define IS_QLA8XXX_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha))
2418#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ 2536#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
2419 IS_QLA25XX(ha) || IS_QLA81XX(ha)) 2537 IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
2538 IS_QLA82XX(ha))
2420#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha)) 2539#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha))
2421#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \ 2540#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
2422 (ha)->flags.msix_enabled) 2541 (ha)->flags.msix_enabled)
@@ -2496,6 +2615,9 @@ struct qla_hw_data {
2496 dma_addr_t ex_init_cb_dma; 2615 dma_addr_t ex_init_cb_dma;
2497 struct ex_init_cb_81xx *ex_init_cb; 2616 struct ex_init_cb_81xx *ex_init_cb;
2498 2617
2618 void *async_pd;
2619 dma_addr_t async_pd_dma;
2620
2499 /* These are used by mailbox operations. */ 2621 /* These are used by mailbox operations. */
2500 volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT]; 2622 volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
2501 2623
@@ -2598,6 +2720,8 @@ struct qla_hw_data {
2598 uint32_t flt_region_nvram; 2720 uint32_t flt_region_nvram;
2599 uint32_t flt_region_npiv_conf; 2721 uint32_t flt_region_npiv_conf;
2600 uint32_t flt_region_gold_fw; 2722 uint32_t flt_region_gold_fw;
2723 uint32_t flt_region_fcp_prio;
2724 uint32_t flt_region_bootload;
2601 2725
2602 /* Needed for BEACON */ 2726 /* Needed for BEACON */
2603 uint16_t beacon_blink_led; 2727 uint16_t beacon_blink_led;
@@ -2626,6 +2750,39 @@ struct qla_hw_data {
2626 struct isp_operations *isp_ops; 2750 struct isp_operations *isp_ops;
2627 struct workqueue_struct *wq; 2751 struct workqueue_struct *wq;
2628 struct qlfc_fw fw_buf; 2752 struct qlfc_fw fw_buf;
2753
2754 /* FCP_CMND priority support */
2755 struct qla_fcp_prio_cfg *fcp_prio_cfg;
2756
2757 struct dma_pool *dl_dma_pool;
2758#define DSD_LIST_DMA_POOL_SIZE 512
2759
2760 struct dma_pool *fcp_cmnd_dma_pool;
2761 mempool_t *ctx_mempool;
2762#define FCP_CMND_DMA_POOL_SIZE 512
2763
2764 unsigned long nx_pcibase; /* Base I/O address */
2765 uint8_t *nxdb_rd_ptr; /* Doorbell read pointer */
2766 unsigned long nxdb_wr_ptr; /* Door bell write pointer */
2767
2768 uint32_t crb_win;
2769 uint32_t curr_window;
2770 uint32_t ddr_mn_window;
2771 unsigned long mn_win_crb;
2772 unsigned long ms_win_crb;
2773 int qdr_sn_window;
2774 uint32_t nx_dev_init_timeout;
2775 uint32_t nx_reset_timeout;
2776 rwlock_t hw_lock;
2777 uint16_t portnum; /* port number */
2778 int link_width;
2779 struct fw_blob *hablob;
2780 struct qla82xx_legacy_intr_set nx_legacy_intr;
2781
2782 uint16_t gbl_dsd_inuse;
2783 uint16_t gbl_dsd_avail;
2784 struct list_head gbl_dsd_list;
2785#define NUM_DSD_CHAIN 4096
2629}; 2786};
2630 2787
2631/* 2788/*
@@ -2650,6 +2807,7 @@ typedef struct scsi_qla_host {
2650 2807
2651 uint32_t management_server_logged_in :1; 2808 uint32_t management_server_logged_in :1;
2652 uint32_t process_response_queue :1; 2809 uint32_t process_response_queue :1;
2810 uint32_t difdix_supported:1;
2653 } flags; 2811 } flags;
2654 2812
2655 atomic_t loop_state; 2813 atomic_t loop_state;
@@ -2678,10 +2836,13 @@ typedef struct scsi_qla_host {
2678#define VP_DPC_NEEDED 14 /* wake up for VP dpc handling */ 2836#define VP_DPC_NEEDED 14 /* wake up for VP dpc handling */
2679#define UNLOADING 15 2837#define UNLOADING 15
2680#define NPIV_CONFIG_NEEDED 16 2838#define NPIV_CONFIG_NEEDED 16
2839#define ISP_UNRECOVERABLE 17
2840#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
2681 2841
2682 uint32_t device_flags; 2842 uint32_t device_flags;
2683#define SWITCH_FOUND BIT_0 2843#define SWITCH_FOUND BIT_0
2684#define DFLG_NO_CABLE BIT_1 2844#define DFLG_NO_CABLE BIT_1
2845#define DFLG_DEV_FAILED BIT_5
2685 2846
2686 /* ISP configuration data. */ 2847 /* ISP configuration data. */
2687 uint16_t loop_id; /* Host adapter loop id */ 2848 uint16_t loop_id; /* Host adapter loop id */
@@ -2739,6 +2900,8 @@ typedef struct scsi_qla_host {
2739#define VP_ERR_ADAP_NORESOURCES 5 2900#define VP_ERR_ADAP_NORESOURCES 5
2740 struct qla_hw_data *hw; 2901 struct qla_hw_data *hw;
2741 struct req_que *req; 2902 struct req_que *req;
2903 int fw_heartbeat_counter;
2904 int seconds_since_last_heartbeat;
2742} scsi_qla_host_t; 2905} scsi_qla_host_t;
2743 2906
2744/* 2907/*
@@ -2791,134 +2954,16 @@ typedef struct scsi_qla_host {
2791#define OPTROM_SIZE_24XX 0x100000 2954#define OPTROM_SIZE_24XX 0x100000
2792#define OPTROM_SIZE_25XX 0x200000 2955#define OPTROM_SIZE_25XX 0x200000
2793#define OPTROM_SIZE_81XX 0x400000 2956#define OPTROM_SIZE_81XX 0x400000
2957#define OPTROM_SIZE_82XX 0x800000
2958
2959#define OPTROM_BURST_SIZE 0x1000
2960#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
2961
2962#define QLA_DSDS_PER_IOCB 37
2794 2963
2795#include "qla_gbl.h" 2964#include "qla_gbl.h"
2796#include "qla_dbg.h" 2965#include "qla_dbg.h"
2797#include "qla_inline.h" 2966#include "qla_inline.h"
2798 2967
2799#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) 2968#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
2800
2801/*
2802 * BSG Vendor specific commands
2803 */
2804
2805#define QL_VND_LOOPBACK 0x01
2806#define QLA84_RESET 0x02
2807#define QLA84_UPDATE_FW 0x03
2808#define QLA84_MGMT_CMD 0x04
2809
2810/* BSG definations for interpreting CommandSent field */
2811#define INT_DEF_LB_LOOPBACK_CMD 0
2812#define INT_DEF_LB_ECHO_CMD 1
2813
2814/* BSG Vendor specific definations */
2815typedef struct _A84_RESET {
2816 uint16_t Flags;
2817 uint16_t Reserved;
2818#define A84_RESET_FLAG_ENABLE_DIAG_FW 1
2819} __attribute__((packed)) A84_RESET, *PA84_RESET;
2820
2821#define A84_ISSUE_WRITE_TYPE_CMD 0
2822#define A84_ISSUE_READ_TYPE_CMD 1
2823#define A84_CLEANUP_CMD 2
2824#define A84_ISSUE_RESET_OP_FW 3
2825#define A84_ISSUE_RESET_DIAG_FW 4
2826#define A84_ISSUE_UPDATE_OPFW_CMD 5
2827#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
2828
2829struct qla84_mgmt_param {
2830 union {
2831 struct {
2832 uint32_t start_addr;
2833 } mem; /* for QLA84_MGMT_READ/WRITE_MEM */
2834 struct {
2835 uint32_t id;
2836#define QLA84_MGMT_CONFIG_ID_UIF 1
2837#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
2838#define QLA84_MGMT_CONFIG_ID_PAUSE 3
2839#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
2840
2841 uint32_t param0;
2842 uint32_t param1;
2843 } config; /* for QLA84_MGMT_CHNG_CONFIG */
2844
2845 struct {
2846 uint32_t type;
2847#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
2848#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
2849#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
2850#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
2851#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
2852#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
2853#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
2854
2855 uint32_t context;
2856/*
2857* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
2858*/
2859#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
2860#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
2861#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
2862#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
2863#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
2864#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
2865#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
2866#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
2867#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
2868#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
2869
2870/*
2871* context definitions for QLA84_MGMT_INFO_PORT_STAT
2872*/
2873#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
2874#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
2875#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
2876#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
2877#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
2878#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
2879
2880
2881/*
2882* context definitions for QLA84_MGMT_INFO_LIF_STAT
2883*/
2884#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
2885#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
2886#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
2887#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
2888#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
2889
2890 } info; /* for QLA84_MGMT_GET_INFO */
2891 } u;
2892};
2893
2894struct qla84_msg_mgmt {
2895 uint16_t cmd;
2896#define QLA84_MGMT_READ_MEM 0x00
2897#define QLA84_MGMT_WRITE_MEM 0x01
2898#define QLA84_MGMT_CHNG_CONFIG 0x02
2899#define QLA84_MGMT_GET_INFO 0x03
2900 uint16_t rsrvd;
2901 struct qla84_mgmt_param mgmtp;/* parameters for cmd */
2902 uint32_t len; /* bytes in payload following this struct */
2903 uint8_t payload[0]; /* payload for cmd */
2904};
2905
2906struct msg_update_fw {
2907 /*
2908 * diag_fw = 0 operational fw
2909 * otherwise diagnostic fw
2910 * offset, len, fw_len are present to overcome the current limitation
2911 * of 128Kb xfer size. The fw is sent in smaller chunks. Each chunk
2912 * specifies the byte "offset" where it fits in the fw buffer. The
2913 * number of bytes in each chunk is specified in "len". "fw_len"
2914 * is the total size of fw. The first chunk should start at offset = 0.
2915 * When offset+len == fw_len, the fw is written to the HBA.
2916 */
2917 uint32_t diag_fw;
2918 uint32_t offset;/* start offset */
2919 uint32_t len; /* num bytes in cur xfer */
2920 uint32_t fw_len; /* size of fw in bytes */
2921 uint8_t fw_bytes[0];
2922};
2923
2924#endif 2969#endif
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 42c5587cc50c..93f833960147 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -400,6 +400,7 @@ struct cmd_type_6 {
400 struct scsi_lun lun; /* FCP LUN (BE). */ 400 struct scsi_lun lun; /* FCP LUN (BE). */
401 401
402 uint16_t control_flags; /* Control flags. */ 402 uint16_t control_flags; /* Control flags. */
403#define CF_DIF_SEG_DESCR_ENABLE BIT_3
403#define CF_DATA_SEG_DESCR_ENABLE BIT_2 404#define CF_DATA_SEG_DESCR_ENABLE BIT_2
404#define CF_READ_DATA BIT_1 405#define CF_READ_DATA BIT_1
405#define CF_WRITE_DATA BIT_0 406#define CF_WRITE_DATA BIT_0
@@ -466,6 +467,43 @@ struct cmd_type_7 {
466 uint32_t dseg_0_len; /* Data segment 0 length. */ 467 uint32_t dseg_0_len; /* Data segment 0 length. */
467}; 468};
468 469
470#define COMMAND_TYPE_CRC_2 0x6A /* Command Type CRC_2 (Type 6)
471 * (T10-DIF) */
472struct cmd_type_crc_2 {
473 uint8_t entry_type; /* Entry type. */
474 uint8_t entry_count; /* Entry count. */
475 uint8_t sys_define; /* System defined. */
476 uint8_t entry_status; /* Entry Status. */
477
478 uint32_t handle; /* System handle. */
479
480 uint16_t nport_handle; /* N_PORT handle. */
481 uint16_t timeout; /* Command timeout. */
482
483 uint16_t dseg_count; /* Data segment count. */
484
485 uint16_t fcp_rsp_dseg_len; /* FCP_RSP DSD length. */
486
487 struct scsi_lun lun; /* FCP LUN (BE). */
488
489 uint16_t control_flags; /* Control flags. */
490
491 uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
492 uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */
493
494 uint32_t fcp_rsp_dseg_address[2]; /* Data segment address. */
495
496 uint32_t byte_count; /* Total byte count. */
497
498 uint8_t port_id[3]; /* PortID of destination port. */
499 uint8_t vp_index;
500
501 uint32_t crc_context_address[2]; /* Data segment address. */
502 uint16_t crc_context_len; /* Data segment length. */
503 uint16_t reserved_1; /* MUST be set to 0. */
504};
505
506
469/* 507/*
470 * ISP queue - status entry structure definition. 508 * ISP queue - status entry structure definition.
471 */ 509 */
@@ -496,10 +534,17 @@ struct sts_entry_24xx {
496 534
497 uint32_t sense_len; /* FCP SENSE length. */ 535 uint32_t sense_len; /* FCP SENSE length. */
498 uint32_t rsp_data_len; /* FCP response data length. */ 536 uint32_t rsp_data_len; /* FCP response data length. */
499
500 uint8_t data[28]; /* FCP response/sense information. */ 537 uint8_t data[28]; /* FCP response/sense information. */
538 /*
539 * If DIF Error is set in comp_status, these additional fields are
540 * defined:
541 * &data[10] : uint8_t report_runt_bg[2]; - computed guard
542 * &data[12] : uint8_t actual_dif[8]; - DIF Data recieved
543 * &data[20] : uint8_t expected_dif[8]; - DIF Data computed
544 */
501}; 545};
502 546
547
503/* 548/*
504 * Status entry completion status 549 * Status entry completion status
505 */ 550 */
@@ -841,6 +886,8 @@ struct device_reg_24xx {
841#define FA_HW_EVENT_ENTRY_SIZE 4 886#define FA_HW_EVENT_ENTRY_SIZE 4
842#define FA_NPIV_CONF0_ADDR 0x5C000 887#define FA_NPIV_CONF0_ADDR 0x5C000
843#define FA_NPIV_CONF1_ADDR 0x5D000 888#define FA_NPIV_CONF1_ADDR 0x5D000
889#define FA_FCP_PRIO0_ADDR 0x10000
890#define FA_FCP_PRIO1_ADDR 0x12000
844 891
845/* 892/*
846 * Flash Error Log Event Codes. 893 * Flash Error Log Event Codes.
@@ -1274,6 +1321,8 @@ struct qla_flt_header {
1274#define FLT_REG_NPIV_CONF_0 0x29 1321#define FLT_REG_NPIV_CONF_0 0x29
1275#define FLT_REG_NPIV_CONF_1 0x2a 1322#define FLT_REG_NPIV_CONF_1 0x2a
1276#define FLT_REG_GOLD_FW 0x2f 1323#define FLT_REG_GOLD_FW 0x2f
1324#define FLT_REG_FCP_PRIO_0 0x87
1325#define FLT_REG_FCP_PRIO_1 0x88
1277 1326
1278struct qla_flt_region { 1327struct qla_flt_region {
1279 uint32_t code; 1328 uint32_t code;
@@ -1750,6 +1799,61 @@ struct ex_init_cb_81xx {
1750#define FARX_ACCESS_FLASH_CONF_81XX 0x7FFD0000 1799#define FARX_ACCESS_FLASH_CONF_81XX 0x7FFD0000
1751#define FARX_ACCESS_FLASH_DATA_81XX 0x7F800000 1800#define FARX_ACCESS_FLASH_DATA_81XX 0x7F800000
1752 1801
1802/* FCP priority config defines *************************************/
1803/* operations */
1804#define QLFC_FCP_PRIO_DISABLE 0x0
1805#define QLFC_FCP_PRIO_ENABLE 0x1
1806#define QLFC_FCP_PRIO_GET_CONFIG 0x2
1807#define QLFC_FCP_PRIO_SET_CONFIG 0x3
1808
1809struct qla_fcp_prio_entry {
1810 uint16_t flags; /* Describes parameter(s) in FCP */
1811 /* priority entry that are valid */
1812#define FCP_PRIO_ENTRY_VALID 0x1
1813#define FCP_PRIO_ENTRY_TAG_VALID 0x2
1814#define FCP_PRIO_ENTRY_SPID_VALID 0x4
1815#define FCP_PRIO_ENTRY_DPID_VALID 0x8
1816#define FCP_PRIO_ENTRY_LUNB_VALID 0x10
1817#define FCP_PRIO_ENTRY_LUNE_VALID 0x20
1818#define FCP_PRIO_ENTRY_SWWN_VALID 0x40
1819#define FCP_PRIO_ENTRY_DWWN_VALID 0x80
1820 uint8_t tag; /* Priority value */
1821 uint8_t reserved; /* Reserved for future use */
1822 uint32_t src_pid; /* Src port id. high order byte */
1823 /* unused; -1 (wild card) */
1824 uint32_t dst_pid; /* Src port id. high order byte */
1825 /* unused; -1 (wild card) */
1826 uint16_t lun_beg; /* 1st lun num of lun range. */
1827 /* -1 (wild card) */
1828 uint16_t lun_end; /* 2nd lun num of lun range. */
1829 /* -1 (wild card) */
1830 uint8_t src_wwpn[8]; /* Source WWPN: -1 (wild card) */
1831 uint8_t dst_wwpn[8]; /* Destination WWPN: -1 (wild card) */
1832};
1833
1834struct qla_fcp_prio_cfg {
1835 uint8_t signature[4]; /* "HQOS" signature of config data */
1836 uint16_t version; /* 1: Initial version */
1837 uint16_t length; /* config data size in num bytes */
1838 uint16_t checksum; /* config data bytes checksum */
1839 uint16_t num_entries; /* Number of entries */
1840 uint16_t size_of_entry; /* Size of each entry in num bytes */
1841 uint8_t attributes; /* enable/disable, persistence */
1842#define FCP_PRIO_ATTR_DISABLE 0x0
1843#define FCP_PRIO_ATTR_ENABLE 0x1
1844#define FCP_PRIO_ATTR_PERSIST 0x2
1845 uint8_t reserved; /* Reserved for future use */
1846#define FCP_PRIO_CFG_HDR_SIZE 0x10
1847 struct qla_fcp_prio_entry entry[1]; /* fcp priority entries */
1848#define FCP_PRIO_CFG_ENTRY_SIZE 0x20
1849};
1850
1851#define FCP_PRIO_CFG_SIZE (32*1024) /* fcp prio data per port*/
1852
1853/* 25XX Support ****************************************************/
1854#define FA_FCP_PRIO0_ADDR_25 0x3C000
1855#define FA_FCP_PRIO1_ADDR_25 0x3E000
1856
1753/* 81XX Flash locations -- occupies second 2MB region. */ 1857/* 81XX Flash locations -- occupies second 2MB region. */
1754#define FA_BOOT_CODE_ADDR_81 0x80000 1858#define FA_BOOT_CODE_ADDR_81 0x80000
1755#define FA_RISC_CODE_ADDR_81 0xA0000 1859#define FA_RISC_CODE_ADDR_81 0xA0000
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 3a89bc514e2b..8217c3bcbc2e 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -44,6 +44,7 @@ extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
44extern void qla2x00_update_fcports(scsi_qla_host_t *); 44extern void qla2x00_update_fcports(scsi_qla_host_t *);
45 45
46extern int qla2x00_abort_isp(scsi_qla_host_t *); 46extern int qla2x00_abort_isp(scsi_qla_host_t *);
47extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
47 48
48extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); 49extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
49 50
@@ -55,10 +56,20 @@ extern void qla84xx_put_chip(struct scsi_qla_host *);
55extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *, 56extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *,
56 uint16_t *); 57 uint16_t *);
57extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *); 58extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
58extern int qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *, 59extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
59 uint16_t *); 60 uint16_t *);
60extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *, 61extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
62extern int qla2x00_async_marker(fc_port_t *, uint16_t, uint8_t);
63extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
61 uint16_t *); 64 uint16_t *);
65extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
66 uint16_t *);
67extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
68 uint16_t *);
69extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *,
70 struct srb_iocb *);
71extern void qla2x00_async_marker_done(struct scsi_qla_host *, fc_port_t *,
72 struct srb_iocb *);
62 73
63extern fc_port_t * 74extern fc_port_t *
64qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t ); 75qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
@@ -79,6 +90,13 @@ extern int ql2xmaxqueues;
79extern int ql2xmultique_tag; 90extern int ql2xmultique_tag;
80extern int ql2xfwloadbin; 91extern int ql2xfwloadbin;
81extern int ql2xetsenable; 92extern int ql2xetsenable;
93extern int ql2xshiftctondsd;
94extern int ql2xdbwr;
95extern int ql2xdontresethba;
96extern int ql2xasynctmfenable;
97extern int ql2xenabledif;
98extern int ql2xenablehba_err_chk;
99extern int ql2xtargetreset;
82 100
83extern int qla2x00_loop_reset(scsi_qla_host_t *); 101extern int qla2x00_loop_reset(scsi_qla_host_t *);
84extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 102extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -93,6 +111,10 @@ extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
93 uint16_t *); 111 uint16_t *);
94extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *, 112extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
95 fc_port_t *, uint16_t *); 113 fc_port_t *, uint16_t *);
114extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
115 uint16_t *);
116extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
117 fc_port_t *, uint16_t *);
96extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); 118extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
97 119
98extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *); 120extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
@@ -135,6 +157,7 @@ extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
135 157
136extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *); 158extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
137extern int qla2x00_wait_for_chip_reset(scsi_qla_host_t *); 159extern int qla2x00_wait_for_chip_reset(scsi_qla_host_t *);
160extern int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *);
138 161
139extern void qla2xxx_wake_dpc(struct scsi_qla_host *); 162extern void qla2xxx_wake_dpc(struct scsi_qla_host *);
140extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *); 163extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *);
@@ -157,6 +180,10 @@ int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
157 uint16_t, uint16_t, uint8_t); 180 uint16_t, uint16_t, uint8_t);
158extern int qla2x00_start_sp(srb_t *); 181extern int qla2x00_start_sp(srb_t *);
159extern void qla2x00_ctx_sp_free(srb_t *); 182extern void qla2x00_ctx_sp_free(srb_t *);
183extern uint16_t qla24xx_calc_iocbs(uint16_t);
184extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
185extern int qla24xx_dif_start_scsi(srb_t *);
186
160 187
161/* 188/*
162 * Global Function Prototypes in qla_mbx.c source file. 189 * Global Function Prototypes in qla_mbx.c source file.
@@ -328,6 +355,9 @@ extern int
328qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t); 355qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
329 356
330extern int qla2x00_get_data_rate(scsi_qla_host_t *); 357extern int qla2x00_get_data_rate(scsi_qla_host_t *);
358extern int qla24xx_set_fcp_prio(scsi_qla_host_t *, uint16_t, uint16_t,
359 uint16_t *);
360
331/* 361/*
332 * Global Function Prototypes in qla_isr.c source file. 362 * Global Function Prototypes in qla_isr.c source file.
333 */ 363 */
@@ -340,6 +370,7 @@ qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *);
340extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *); 370extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
341extern void qla2x00_free_irqs(scsi_qla_host_t *); 371extern void qla2x00_free_irqs(scsi_qla_host_t *);
342 372
373extern int qla2x00_get_data_rate(scsi_qla_host_t *);
343/* 374/*
344 * Global Function Prototypes in qla_sup.c source file. 375 * Global Function Prototypes in qla_sup.c source file.
345 */ 376 */
@@ -384,6 +415,7 @@ extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
384extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t); 415extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
385 416
386extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *); 417extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
418extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
387 419
388/* 420/*
389 * Global Function Prototypes in qla_dbg.c source file. 421 * Global Function Prototypes in qla_dbg.c source file.
@@ -395,6 +427,7 @@ extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
395extern void qla81xx_fw_dump(scsi_qla_host_t *, int); 427extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
396extern void qla2x00_dump_regs(scsi_qla_host_t *); 428extern void qla2x00_dump_regs(scsi_qla_host_t *);
397extern void qla2x00_dump_buffer(uint8_t *, uint32_t); 429extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
430extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
398 431
399/* 432/*
400 * Global Function Prototypes in qla_gs.c source file. 433 * Global Function Prototypes in qla_gs.c source file.
@@ -430,7 +463,10 @@ extern void qla2x00_init_host_attr(scsi_qla_host_t *);
430extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); 463extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
431extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *); 464extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
432extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *); 465extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
433extern int qla2x00_echo_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *); 466extern int qla2x00_echo_test(scsi_qla_host_t *,
467 struct msg_echo_lb *, uint16_t *);
468extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
469extern int qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *, uint8_t);
434 470
435/* 471/*
436 * Global Function Prototypes in qla_dfs.c source file. 472 * Global Function Prototypes in qla_dfs.c source file.
@@ -459,4 +495,88 @@ extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
459extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 495extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
460extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 496extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
461 497
498/* qla82xx related functions */
499
500/* PCI related functions */
501extern int qla82xx_pci_config(struct scsi_qla_host *);
502extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int);
503extern int qla82xx_pci_mem_write_2M(struct qla_hw_data *, u64, void *, int);
504extern char *qla82xx_pci_info_str(struct scsi_qla_host *, char *);
505extern int qla82xx_pci_region_offset(struct pci_dev *, int);
506extern int qla82xx_pci_region_len(struct pci_dev *, int);
507extern int qla82xx_iospace_config(struct qla_hw_data *);
508
509/* Initialization related functions */
510extern void qla82xx_reset_chip(struct scsi_qla_host *);
511extern void qla82xx_config_rings(struct scsi_qla_host *);
512extern int qla82xx_nvram_config(struct scsi_qla_host *);
513extern int qla82xx_pinit_from_rom(scsi_qla_host_t *);
514extern int qla82xx_load_firmware(scsi_qla_host_t *);
515extern int qla82xx_reset_hw(scsi_qla_host_t *);
516extern int qla82xx_load_risc_blob(scsi_qla_host_t *, uint32_t *);
517extern void qla82xx_watchdog(scsi_qla_host_t *);
518
519/* Firmware and flash related functions */
520extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *);
521extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
522 uint32_t, uint32_t);
523extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
524 uint32_t, uint32_t);
525
526/* Mailbox related functions */
527extern int qla82xx_abort_isp(scsi_qla_host_t *);
528extern int qla82xx_restart_isp(scsi_qla_host_t *);
529
530/* IOCB related functions */
531extern int qla82xx_start_scsi(srb_t *);
532
533/* Interrupt related */
534extern irqreturn_t qla82xx_intr_handler(int, void *);
535extern irqreturn_t qla82xx_msi_handler(int, void *);
536extern irqreturn_t qla82xx_msix_default(int, void *);
537extern irqreturn_t qla82xx_msix_rsp_q(int, void *);
538extern void qla82xx_enable_intrs(struct qla_hw_data *);
539extern void qla82xx_disable_intrs(struct qla_hw_data *);
540extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
541extern void qla82xx_poll(int, void *);
542extern void qla82xx_init_flags(struct qla_hw_data *);
543
544/* ISP 8021 hardware related */
545extern int qla82xx_crb_win_lock(struct qla_hw_data *);
546extern void qla82xx_crb_win_unlock(struct qla_hw_data *);
547extern int qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *, ulong *);
548extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
549extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
550extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
551extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int);
552extern int qla82xx_check_for_bad_spd(struct qla_hw_data *);
553extern int qla82xx_load_fw(scsi_qla_host_t *);
554extern int qla82xx_rom_lock(struct qla_hw_data *);
555extern void qla82xx_rom_unlock(struct qla_hw_data *);
556extern int qla82xx_rom_fast_read(struct qla_hw_data *, int , int *);
557extern int qla82xx_do_rom_fast_read(struct qla_hw_data *, int, int *);
558extern unsigned long qla82xx_decode_crb_addr(unsigned long);
559
560/* ISP 8021 IDC */
561extern void qla82xx_clear_drv_active(struct qla_hw_data *);
562extern int qla82xx_idc_lock(struct qla_hw_data *);
563extern void qla82xx_idc_unlock(struct qla_hw_data *);
564extern int qla82xx_device_state_handler(scsi_qla_host_t *);
565
566extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
567 size_t, char *);
568extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
569extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
570extern void qla82xx_start_iocbs(srb_t *);
571extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
572extern void qla82xx_wait_for_pending_commands(scsi_qla_host_t *);
573
574/* BSG related functions */
575extern int qla24xx_bsg_request(struct fc_bsg_job *);
576extern int qla24xx_bsg_timeout(struct fc_bsg_job *);
577extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t);
578extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
579 dma_addr_t, size_t, uint32_t);
580extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
581 uint16_t *, uint16_t *);
462#endif /* _QLA_GBL_H */ 582#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4647015eba63..872c55f049a5 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1535,7 +1535,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1535 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1535 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1536 eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); 1536 eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1537 eiter->len = __constant_cpu_to_be16(4 + 4); 1537 eiter->len = __constant_cpu_to_be16(4 + 4);
1538 if (IS_QLA81XX(ha)) 1538 if (IS_QLA8XXX_TYPE(ha))
1539 eiter->a.sup_speed = __constant_cpu_to_be32( 1539 eiter->a.sup_speed = __constant_cpu_to_be32(
1540 FDMI_PORT_SPEED_10GB); 1540 FDMI_PORT_SPEED_10GB);
1541 else if (IS_QLA25XX(ha)) 1541 else if (IS_QLA25XX(ha))
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 4229bb483c5e..ab2cc71994c2 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -48,6 +48,7 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
48{ 48{
49 srb_t *sp = (srb_t *)__data; 49 srb_t *sp = (srb_t *)__data;
50 struct srb_ctx *ctx; 50 struct srb_ctx *ctx;
51 struct srb_iocb *iocb;
51 fc_port_t *fcport = sp->fcport; 52 fc_port_t *fcport = sp->fcport;
52 struct qla_hw_data *ha = fcport->vha->hw; 53 struct qla_hw_data *ha = fcport->vha->hw;
53 struct req_que *req; 54 struct req_que *req;
@@ -57,17 +58,21 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
57 req = ha->req_q_map[0]; 58 req = ha->req_q_map[0];
58 req->outstanding_cmds[sp->handle] = NULL; 59 req->outstanding_cmds[sp->handle] = NULL;
59 ctx = sp->ctx; 60 ctx = sp->ctx;
60 ctx->timeout(sp); 61 iocb = ctx->u.iocb_cmd;
62 iocb->timeout(sp);
61 spin_unlock_irqrestore(&ha->hardware_lock, flags); 63 spin_unlock_irqrestore(&ha->hardware_lock, flags);
62 64
63 ctx->free(sp); 65 iocb->free(sp);
64} 66}
65 67
66void 68void
67qla2x00_ctx_sp_free(srb_t *sp) 69qla2x00_ctx_sp_free(srb_t *sp)
68{ 70{
69 struct srb_ctx *ctx = sp->ctx; 71 struct srb_ctx *ctx = sp->ctx;
72 struct srb_iocb *iocb = ctx->u.iocb_cmd;
70 73
74 del_timer_sync(&iocb->timer);
75 kfree(iocb);
71 kfree(ctx); 76 kfree(ctx);
72 mempool_free(sp, sp->fcport->vha->hw->srb_mempool); 77 mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
73} 78}
@@ -79,6 +84,7 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
79 srb_t *sp; 84 srb_t *sp;
80 struct qla_hw_data *ha = vha->hw; 85 struct qla_hw_data *ha = vha->hw;
81 struct srb_ctx *ctx; 86 struct srb_ctx *ctx;
87 struct srb_iocb *iocb;
82 88
83 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); 89 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
84 if (!sp) 90 if (!sp)
@@ -86,21 +92,30 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
86 ctx = kzalloc(size, GFP_KERNEL); 92 ctx = kzalloc(size, GFP_KERNEL);
87 if (!ctx) { 93 if (!ctx) {
88 mempool_free(sp, ha->srb_mempool); 94 mempool_free(sp, ha->srb_mempool);
95 sp = NULL;
96 goto done;
97 }
98 iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL);
99 if (!iocb) {
100 mempool_free(sp, ha->srb_mempool);
101 sp = NULL;
102 kfree(ctx);
89 goto done; 103 goto done;
90 } 104 }
91 105
92 memset(sp, 0, sizeof(*sp)); 106 memset(sp, 0, sizeof(*sp));
93 sp->fcport = fcport; 107 sp->fcport = fcport;
94 sp->ctx = ctx; 108 sp->ctx = ctx;
95 ctx->free = qla2x00_ctx_sp_free; 109 ctx->u.iocb_cmd = iocb;
110 iocb->free = qla2x00_ctx_sp_free;
96 111
97 init_timer(&ctx->timer); 112 init_timer(&iocb->timer);
98 if (!tmo) 113 if (!tmo)
99 goto done; 114 goto done;
100 ctx->timer.expires = jiffies + tmo * HZ; 115 iocb->timer.expires = jiffies + tmo * HZ;
101 ctx->timer.data = (unsigned long)sp; 116 iocb->timer.data = (unsigned long)sp;
102 ctx->timer.function = qla2x00_ctx_sp_timeout; 117 iocb->timer.function = qla2x00_ctx_sp_timeout;
103 add_timer(&ctx->timer); 118 add_timer(&iocb->timer);
104done: 119done:
105 return sp; 120 return sp;
106} 121}
@@ -110,41 +125,56 @@ done:
110#define ELS_TMO_2_RATOV(ha) ((ha)->r_a_tov / 10 * 2) 125#define ELS_TMO_2_RATOV(ha) ((ha)->r_a_tov / 10 * 2)
111 126
112static void 127static void
113qla2x00_async_logio_timeout(srb_t *sp) 128qla2x00_async_iocb_timeout(srb_t *sp)
114{ 129{
115 fc_port_t *fcport = sp->fcport; 130 fc_port_t *fcport = sp->fcport;
116 struct srb_logio *lio = sp->ctx; 131 struct srb_ctx *ctx = sp->ctx;
117 132
118 DEBUG2(printk(KERN_WARNING 133 DEBUG2(printk(KERN_WARNING
119 "scsi(%ld:%x): Async-%s timeout.\n", 134 "scsi(%ld:%x): Async-%s timeout.\n",
120 fcport->vha->host_no, sp->handle, 135 fcport->vha->host_no, sp->handle, ctx->name));
121 lio->ctx.type == SRB_LOGIN_CMD ? "login": "logout"));
122 136
123 if (lio->ctx.type == SRB_LOGIN_CMD) 137 fcport->flags &= ~FCF_ASYNC_SENT;
138 if (ctx->type == SRB_LOGIN_CMD)
124 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL); 139 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
125} 140}
126 141
142static void
143qla2x00_async_login_ctx_done(srb_t *sp)
144{
145 struct srb_ctx *ctx = sp->ctx;
146 struct srb_iocb *lio = ctx->u.iocb_cmd;
147
148 qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
149 lio->u.logio.data);
150 lio->free(sp);
151}
152
127int 153int
128qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, 154qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
129 uint16_t *data) 155 uint16_t *data)
130{ 156{
131 struct qla_hw_data *ha = vha->hw; 157 struct qla_hw_data *ha = vha->hw;
132 srb_t *sp; 158 srb_t *sp;
133 struct srb_logio *lio; 159 struct srb_ctx *ctx;
160 struct srb_iocb *lio;
134 int rval; 161 int rval;
135 162
136 rval = QLA_FUNCTION_FAILED; 163 rval = QLA_FUNCTION_FAILED;
137 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_logio), 164 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
138 ELS_TMO_2_RATOV(ha) + 2); 165 ELS_TMO_2_RATOV(ha) + 2);
139 if (!sp) 166 if (!sp)
140 goto done; 167 goto done;
141 168
142 lio = sp->ctx; 169 ctx = sp->ctx;
143 lio->ctx.type = SRB_LOGIN_CMD; 170 ctx->type = SRB_LOGIN_CMD;
144 lio->ctx.timeout = qla2x00_async_logio_timeout; 171 ctx->name = "login";
145 lio->flags |= SRB_LOGIN_COND_PLOGI; 172 lio = ctx->u.iocb_cmd;
173 lio->timeout = qla2x00_async_iocb_timeout;
174 lio->done = qla2x00_async_login_ctx_done;
175 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
146 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 176 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
147 lio->flags |= SRB_LOGIN_RETRIED; 177 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
148 rval = qla2x00_start_sp(sp); 178 rval = qla2x00_start_sp(sp);
149 if (rval != QLA_SUCCESS) 179 if (rval != QLA_SUCCESS)
150 goto done_free_sp; 180 goto done_free_sp;
@@ -157,29 +187,43 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
157 return rval; 187 return rval;
158 188
159done_free_sp: 189done_free_sp:
160 del_timer_sync(&lio->ctx.timer); 190 lio->free(sp);
161 lio->ctx.free(sp);
162done: 191done:
163 return rval; 192 return rval;
164} 193}
165 194
195static void
196qla2x00_async_logout_ctx_done(srb_t *sp)
197{
198 struct srb_ctx *ctx = sp->ctx;
199 struct srb_iocb *lio = ctx->u.iocb_cmd;
200
201 qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
202 lio->u.logio.data);
203 lio->free(sp);
204}
205
166int 206int
167qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) 207qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
168{ 208{
169 struct qla_hw_data *ha = vha->hw; 209 struct qla_hw_data *ha = vha->hw;
170 srb_t *sp; 210 srb_t *sp;
171 struct srb_logio *lio; 211 struct srb_ctx *ctx;
212 struct srb_iocb *lio;
172 int rval; 213 int rval;
173 214
174 rval = QLA_FUNCTION_FAILED; 215 rval = QLA_FUNCTION_FAILED;
175 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_logio), 216 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
176 ELS_TMO_2_RATOV(ha) + 2); 217 ELS_TMO_2_RATOV(ha) + 2);
177 if (!sp) 218 if (!sp)
178 goto done; 219 goto done;
179 220
180 lio = sp->ctx; 221 ctx = sp->ctx;
181 lio->ctx.type = SRB_LOGOUT_CMD; 222 ctx->type = SRB_LOGOUT_CMD;
182 lio->ctx.timeout = qla2x00_async_logio_timeout; 223 ctx->name = "logout";
224 lio = ctx->u.iocb_cmd;
225 lio->timeout = qla2x00_async_iocb_timeout;
226 lio->done = qla2x00_async_logout_ctx_done;
183 rval = qla2x00_start_sp(sp); 227 rval = qla2x00_start_sp(sp);
184 if (rval != QLA_SUCCESS) 228 if (rval != QLA_SUCCESS)
185 goto done_free_sp; 229 goto done_free_sp;
@@ -191,30 +235,186 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
191 return rval; 235 return rval;
192 236
193done_free_sp: 237done_free_sp:
194 del_timer_sync(&lio->ctx.timer); 238 lio->free(sp);
195 lio->ctx.free(sp);
196done: 239done:
197 return rval; 240 return rval;
198} 241}
199 242
243static void
244qla2x00_async_adisc_ctx_done(srb_t *sp)
245{
246 struct srb_ctx *ctx = sp->ctx;
247 struct srb_iocb *lio = ctx->u.iocb_cmd;
248
249 qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
250 lio->u.logio.data);
251 lio->free(sp);
252}
253
200int 254int
255qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
256 uint16_t *data)
257{
258 struct qla_hw_data *ha = vha->hw;
259 srb_t *sp;
260 struct srb_ctx *ctx;
261 struct srb_iocb *lio;
262 int rval;
263
264 rval = QLA_FUNCTION_FAILED;
265 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
266 ELS_TMO_2_RATOV(ha) + 2);
267 if (!sp)
268 goto done;
269
270 ctx = sp->ctx;
271 ctx->type = SRB_ADISC_CMD;
272 ctx->name = "adisc";
273 lio = ctx->u.iocb_cmd;
274 lio->timeout = qla2x00_async_iocb_timeout;
275 lio->done = qla2x00_async_adisc_ctx_done;
276 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
277 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
278 rval = qla2x00_start_sp(sp);
279 if (rval != QLA_SUCCESS)
280 goto done_free_sp;
281
282 DEBUG2(printk(KERN_DEBUG
283 "scsi(%ld:%x): Async-adisc - loop-id=%x portid=%02x%02x%02x.\n",
284 fcport->vha->host_no, sp->handle, fcport->loop_id,
285 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
286
287 return rval;
288
289done_free_sp:
290 lio->free(sp);
291done:
292 return rval;
293}
294
295static void
296qla2x00_async_tm_cmd_ctx_done(srb_t *sp)
297{
298 struct srb_ctx *ctx = sp->ctx;
299 struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
300
301 qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb);
302 iocb->free(sp);
303}
304
305int
306qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
307 uint32_t tag)
308{
309 struct scsi_qla_host *vha = fcport->vha;
310 struct qla_hw_data *ha = vha->hw;
311 srb_t *sp;
312 struct srb_ctx *ctx;
313 struct srb_iocb *tcf;
314 int rval;
315
316 rval = QLA_FUNCTION_FAILED;
317 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx),
318 ELS_TMO_2_RATOV(ha) + 2);
319 if (!sp)
320 goto done;
321
322 ctx = sp->ctx;
323 ctx->type = SRB_TM_CMD;
324 ctx->name = "tmf";
325 tcf = ctx->u.iocb_cmd;
326 tcf->u.tmf.flags = flags;
327 tcf->u.tmf.lun = lun;
328 tcf->u.tmf.data = tag;
329 tcf->timeout = qla2x00_async_iocb_timeout;
330 tcf->done = qla2x00_async_tm_cmd_ctx_done;
331
332 rval = qla2x00_start_sp(sp);
333 if (rval != QLA_SUCCESS)
334 goto done_free_sp;
335
336 DEBUG2(printk(KERN_DEBUG
337 "scsi(%ld:%x): Async-tmf - loop-id=%x portid=%02x%02x%02x.\n",
338 fcport->vha->host_no, sp->handle, fcport->loop_id,
339 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
340
341 return rval;
342
343done_free_sp:
344 tcf->free(sp);
345done:
346 return rval;
347}
348
349static void
350qla2x00_async_marker_ctx_done(srb_t *sp)
351{
352 struct srb_ctx *ctx = sp->ctx;
353 struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd;
354
355 qla2x00_async_marker_done(sp->fcport->vha, sp->fcport, iocb);
356 iocb->free(sp);
357}
358
359int
360qla2x00_async_marker(fc_port_t *fcport, uint16_t lun, uint8_t modif)
361{
362 struct scsi_qla_host *vha = fcport->vha;
363 srb_t *sp;
364 struct srb_ctx *ctx;
365 struct srb_iocb *mrk;
366 int rval;
367
368 rval = QLA_FUNCTION_FAILED;
369 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 0);
370 if (!sp)
371 goto done;
372
373 ctx = sp->ctx;
374 ctx->type = SRB_MARKER_CMD;
375 ctx->name = "marker";
376 mrk = ctx->u.iocb_cmd;
377 mrk->u.marker.lun = lun;
378 mrk->u.marker.modif = modif;
379 mrk->timeout = qla2x00_async_iocb_timeout;
380 mrk->done = qla2x00_async_marker_ctx_done;
381
382 rval = qla2x00_start_sp(sp);
383 if (rval != QLA_SUCCESS)
384 goto done_free_sp;
385
386 DEBUG2(printk(KERN_DEBUG
387 "scsi(%ld:%x): Async-marker - loop-id=%x "
388 "portid=%02x%02x%02x.\n",
389 fcport->vha->host_no, sp->handle, fcport->loop_id,
390 fcport->d_id.b.domain, fcport->d_id.b.area,
391 fcport->d_id.b.al_pa));
392
393 return rval;
394
395done_free_sp:
396 mrk->free(sp);
397done:
398 return rval;
399}
400
401void
201qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, 402qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
202 uint16_t *data) 403 uint16_t *data)
203{ 404{
204 int rval; 405 int rval;
205 uint8_t opts = 0;
206 406
207 switch (data[0]) { 407 switch (data[0]) {
208 case MBS_COMMAND_COMPLETE: 408 case MBS_COMMAND_COMPLETE:
209 if (fcport->flags & FCF_FCP2_DEVICE) 409 if (fcport->flags & FCF_FCP2_DEVICE) {
210 opts |= BIT_1; 410 fcport->flags |= FCF_ASYNC_SENT;
211 rval = qla2x00_get_port_database(vha, fcport, opts); 411 qla2x00_post_async_adisc_work(vha, fcport, data);
212 if (rval != QLA_SUCCESS) 412 break;
213 qla2x00_mark_device_lost(vha, fcport, 1, 0); 413 }
214 else 414 qla2x00_update_fcport(vha, fcport);
215 qla2x00_update_fcport(vha, fcport);
216 break; 415 break;
217 case MBS_COMMAND_ERROR: 416 case MBS_COMMAND_ERROR:
417 fcport->flags &= ~FCF_ASYNC_SENT;
218 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 418 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
219 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 419 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
220 else 420 else
@@ -228,21 +428,84 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
228 fcport->loop_id++; 428 fcport->loop_id++;
229 rval = qla2x00_find_new_loop_id(vha, fcport); 429 rval = qla2x00_find_new_loop_id(vha, fcport);
230 if (rval != QLA_SUCCESS) { 430 if (rval != QLA_SUCCESS) {
431 fcport->flags &= ~FCF_ASYNC_SENT;
231 qla2x00_mark_device_lost(vha, fcport, 1, 0); 432 qla2x00_mark_device_lost(vha, fcport, 1, 0);
232 break; 433 break;
233 } 434 }
234 qla2x00_post_async_login_work(vha, fcport, NULL); 435 qla2x00_post_async_login_work(vha, fcport, NULL);
235 break; 436 break;
236 } 437 }
237 return QLA_SUCCESS; 438 return;
238} 439}
239 440
240int 441void
241qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, 442qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
242 uint16_t *data) 443 uint16_t *data)
243{ 444{
244 qla2x00_mark_device_lost(vha, fcport, 1, 0); 445 qla2x00_mark_device_lost(vha, fcport, 1, 0);
245 return QLA_SUCCESS; 446 return;
447}
448
449void
450qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
451 uint16_t *data)
452{
453 if (data[0] == MBS_COMMAND_COMPLETE) {
454 qla2x00_update_fcport(vha, fcport);
455
456 return;
457 }
458
459 /* Retry login. */
460 fcport->flags &= ~FCF_ASYNC_SENT;
461 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
462 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
463 else
464 qla2x00_mark_device_lost(vha, fcport, 1, 0);
465
466 return;
467}
468
469void
470qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
471 struct srb_iocb *iocb)
472{
473 int rval;
474 uint32_t flags;
475 uint16_t lun;
476
477 flags = iocb->u.tmf.flags;
478 lun = (uint16_t)iocb->u.tmf.lun;
479
480 /* Issue Marker IOCB */
481 rval = qla2x00_async_marker(fcport, lun,
482 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
483
484 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
485 DEBUG2_3_11(printk(KERN_WARNING
486 "%s(%ld): TM IOCB failed (%x).\n",
487 __func__, vha->host_no, rval));
488 }
489
490 return;
491}
492
493void
494qla2x00_async_marker_done(struct scsi_qla_host *vha, fc_port_t *fcport,
495 struct srb_iocb *iocb)
496{
497 /*
498 * Currently we dont have any specific post response processing
499 * for this IOCB. We'll just return success or failed
500 * depending on whether the IOCB command succeeded or failed.
501 */
502 if (iocb->u.tmf.data) {
503 DEBUG2_3_11(printk(KERN_WARNING
504 "%s(%ld): Marker IOCB failed (%x).\n",
505 __func__, vha->host_no, iocb->u.tmf.data));
506 }
507
508 return;
246} 509}
247 510
248/****************************************************************************/ 511/****************************************************************************/
@@ -328,6 +591,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
328 if (rval) 591 if (rval)
329 return (rval); 592 return (rval);
330 } 593 }
594
331 if (IS_QLA84XX(ha)) { 595 if (IS_QLA84XX(ha)) {
332 ha->cs84xx = qla84xx_get_chip(vha); 596 ha->cs84xx = qla84xx_get_chip(vha);
333 if (!ha->cs84xx) { 597 if (!ha->cs84xx) {
@@ -340,7 +604,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
340 ha->flags.chip_reset_done = 1; 604 ha->flags.chip_reset_done = 1;
341 605
342 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { 606 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
343 /* Issue verify 84xx FW IOCB to complete 84xx initialization */ 607 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
344 rval = qla84xx_init_chip(vha); 608 rval = qla84xx_init_chip(vha);
345 if (rval != QLA_SUCCESS) { 609 if (rval != QLA_SUCCESS) {
346 qla_printk(KERN_ERR, ha, 610 qla_printk(KERN_ERR, ha,
@@ -349,6 +613,12 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
349 } 613 }
350 } 614 }
351 615
616 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) {
617 if (qla24xx_read_fcp_prio_cfg(vha))
618 qla_printk(KERN_ERR, ha,
619 "Unable to read FCP priority data.\n");
620 }
621
352 return (rval); 622 return (rval);
353} 623}
354 624
@@ -955,6 +1225,9 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
955 struct qla_hw_data *ha = vha->hw; 1225 struct qla_hw_data *ha = vha->hw;
956 struct req_que *req = ha->req_q_map[0]; 1226 struct req_que *req = ha->req_q_map[0];
957 1227
1228 if (IS_QLA82XX(ha))
1229 return QLA_SUCCESS;
1230
958 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; 1231 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
959 1232
960 rval = qla2x00_mbx_reg_test(vha); 1233 rval = qla2x00_mbx_reg_test(vha);
@@ -1177,6 +1450,12 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
1177 unsigned long flags; 1450 unsigned long flags;
1178 uint16_t fw_major_version; 1451 uint16_t fw_major_version;
1179 1452
1453 if (IS_QLA82XX(ha)) {
1454 rval = ha->isp_ops->load_risc(vha, &srisc_address);
1455 if (rval == QLA_SUCCESS)
1456 goto enable_82xx_npiv;
1457 }
1458
1180 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 1459 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1181 /* Disable SRAM, Instruction RAM and GP RAM parity. */ 1460 /* Disable SRAM, Instruction RAM and GP RAM parity. */
1182 spin_lock_irqsave(&ha->hardware_lock, flags); 1461 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1202,6 +1481,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
1202 rval = qla2x00_execute_fw(vha, srisc_address); 1481 rval = qla2x00_execute_fw(vha, srisc_address);
1203 /* Retrieve firmware information. */ 1482 /* Retrieve firmware information. */
1204 if (rval == QLA_SUCCESS) { 1483 if (rval == QLA_SUCCESS) {
1484enable_82xx_npiv:
1205 fw_major_version = ha->fw_major_version; 1485 fw_major_version = ha->fw_major_version;
1206 rval = qla2x00_get_fw_version(vha, 1486 rval = qla2x00_get_fw_version(vha,
1207 &ha->fw_major_version, 1487 &ha->fw_major_version,
@@ -1226,8 +1506,10 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
1226 &ha->fw_xcb_count, NULL, NULL, 1506 &ha->fw_xcb_count, NULL, NULL,
1227 &ha->max_npiv_vports, NULL); 1507 &ha->max_npiv_vports, NULL);
1228 1508
1229 if (!fw_major_version && ql2xallocfwdump) 1509 if (!fw_major_version && ql2xallocfwdump) {
1230 qla2x00_alloc_fw_dump(vha); 1510 if (!IS_QLA82XX(ha))
1511 qla2x00_alloc_fw_dump(vha);
1512 }
1231 } 1513 }
1232 } else { 1514 } else {
1233 DEBUG2(printk(KERN_INFO 1515 DEBUG2(printk(KERN_INFO
@@ -1384,6 +1666,9 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
1384 int rval; 1666 int rval;
1385 struct qla_hw_data *ha = vha->hw; 1667 struct qla_hw_data *ha = vha->hw;
1386 1668
1669 if (IS_QLA82XX(ha))
1670 return;
1671
1387 /* Update Serial Link options. */ 1672 /* Update Serial Link options. */
1388 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) 1673 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
1389 return; 1674 return;
@@ -1818,7 +2103,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
1818 return(rval); 2103 return(rval);
1819} 2104}
1820 2105
1821static inline void 2106inline void
1822qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, 2107qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1823 char *def) 2108 char *def)
1824{ 2109{
@@ -1826,7 +2111,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1826 uint16_t index; 2111 uint16_t index;
1827 struct qla_hw_data *ha = vha->hw; 2112 struct qla_hw_data *ha = vha->hw;
1828 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && 2113 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
1829 !IS_QLA81XX(ha); 2114 !IS_QLA8XXX_TYPE(ha);
1830 2115
1831 if (memcmp(model, BINZERO, len) != 0) { 2116 if (memcmp(model, BINZERO, len) != 0) {
1832 strncpy(ha->model_number, model, len); 2117 strncpy(ha->model_number, model, len);
@@ -2017,6 +2302,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2017 if (IS_QLA23XX(ha)) { 2302 if (IS_QLA23XX(ha)) {
2018 nv->firmware_options[0] |= BIT_2; 2303 nv->firmware_options[0] |= BIT_2;
2019 nv->firmware_options[0] &= ~BIT_3; 2304 nv->firmware_options[0] &= ~BIT_3;
2305 nv->firmware_options[0] &= ~BIT_6;
2020 nv->add_firmware_options[1] |= BIT_5 | BIT_4; 2306 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
2021 2307
2022 if (IS_QLA2300(ha)) { 2308 if (IS_QLA2300(ha)) {
@@ -2635,7 +2921,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2635 PORT_RETRY_TIME; 2921 PORT_RETRY_TIME;
2636 atomic_set(&fcport->port_down_timer, ha->port_down_retry_count * 2922 atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
2637 PORT_RETRY_TIME); 2923 PORT_RETRY_TIME);
2638 fcport->flags &= ~FCF_LOGIN_NEEDED; 2924 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
2639 2925
2640 qla2x00_iidma_fcport(vha, fcport); 2926 qla2x00_iidma_fcport(vha, fcport);
2641 2927
@@ -2864,7 +3150,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
2864 sw_info_t *swl; 3150 sw_info_t *swl;
2865 int swl_idx; 3151 int swl_idx;
2866 int first_dev, last_dev; 3152 int first_dev, last_dev;
2867 port_id_t wrap, nxt_d_id; 3153 port_id_t wrap = {}, nxt_d_id;
2868 struct qla_hw_data *ha = vha->hw; 3154 struct qla_hw_data *ha = vha->hw;
2869 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); 3155 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
2870 struct scsi_qla_host *tvp; 3156 struct scsi_qla_host *tvp;
@@ -3167,7 +3453,7 @@ qla2x00_device_resync(scsi_qla_host_t *vha)
3167 uint32_t rscn_entry; 3453 uint32_t rscn_entry;
3168 uint8_t rscn_out_iter; 3454 uint8_t rscn_out_iter;
3169 uint8_t format; 3455 uint8_t format;
3170 port_id_t d_id; 3456 port_id_t d_id = {};
3171 3457
3172 rval = QLA_RSCNS_HANDLED; 3458 rval = QLA_RSCNS_HANDLED;
3173 3459
@@ -3281,11 +3567,15 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3281 retry = 0; 3567 retry = 0;
3282 3568
3283 if (IS_ALOGIO_CAPABLE(ha)) { 3569 if (IS_ALOGIO_CAPABLE(ha)) {
3570 if (fcport->flags & FCF_ASYNC_SENT)
3571 return rval;
3572 fcport->flags |= FCF_ASYNC_SENT;
3284 rval = qla2x00_post_async_login_work(vha, fcport, NULL); 3573 rval = qla2x00_post_async_login_work(vha, fcport, NULL);
3285 if (!rval) 3574 if (!rval)
3286 return rval; 3575 return rval;
3287 } 3576 }
3288 3577
3578 fcport->flags &= ~FCF_ASYNC_SENT;
3289 rval = qla2x00_fabric_login(vha, fcport, next_loopid); 3579 rval = qla2x00_fabric_login(vha, fcport, next_loopid);
3290 if (rval == QLA_SUCCESS) { 3580 if (rval == QLA_SUCCESS) {
3291 /* Send an ADISC to FCP2 devices.*/ 3581 /* Send an ADISC to FCP2 devices.*/
@@ -3546,6 +3836,45 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3546 qla2x00_rport_del(fcport); 3836 qla2x00_rport_del(fcport);
3547} 3837}
3548 3838
3839void
3840qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3841{
3842 struct qla_hw_data *ha = vha->hw;
3843 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
3844 struct scsi_qla_host *tvp;
3845
3846 vha->flags.online = 0;
3847 ha->flags.chip_reset_done = 0;
3848 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3849 ha->qla_stats.total_isp_aborts++;
3850
3851 qla_printk(KERN_INFO, ha,
3852 "Performing ISP error recovery - ha= %p.\n", ha);
3853
3854 /* Chip reset does not apply to 82XX */
3855 if (!IS_QLA82XX(ha))
3856 ha->isp_ops->reset_chip(vha);
3857
3858 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
3859 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3860 atomic_set(&vha->loop_state, LOOP_DOWN);
3861 qla2x00_mark_all_devices_lost(vha, 0);
3862 list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list)
3863 qla2x00_mark_all_devices_lost(vp, 0);
3864 } else {
3865 if (!atomic_read(&vha->loop_down_timer))
3866 atomic_set(&vha->loop_down_timer,
3867 LOOP_DOWN_TIME);
3868 }
3869
3870 /* Make sure for ISP 82XX IO DMA is complete */
3871 if (IS_QLA82XX(ha))
3872 qla82xx_wait_for_pending_commands(vha);
3873
3874 /* Requeue all commands in outstanding command list. */
3875 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3876}
3877
3549/* 3878/*
3550* qla2x00_abort_isp 3879* qla2x00_abort_isp
3551* Resets ISP and aborts all outstanding commands. 3880* Resets ISP and aborts all outstanding commands.
@@ -3567,27 +3896,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
3567 struct req_que *req = ha->req_q_map[0]; 3896 struct req_que *req = ha->req_q_map[0];
3568 3897
3569 if (vha->flags.online) { 3898 if (vha->flags.online) {
3570 vha->flags.online = 0; 3899 qla2x00_abort_isp_cleanup(vha);
3571 ha->flags.chip_reset_done = 0;
3572 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3573 ha->qla_stats.total_isp_aborts++;
3574
3575 qla_printk(KERN_INFO, ha,
3576 "Performing ISP error recovery - ha= %p.\n", ha);
3577 ha->isp_ops->reset_chip(vha);
3578
3579 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
3580 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3581 atomic_set(&vha->loop_state, LOOP_DOWN);
3582 qla2x00_mark_all_devices_lost(vha, 0);
3583 } else {
3584 if (!atomic_read(&vha->loop_down_timer))
3585 atomic_set(&vha->loop_down_timer,
3586 LOOP_DOWN_TIME);
3587 }
3588
3589 /* Requeue all commands in outstanding command list. */
3590 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3591 3900
3592 if (unlikely(pci_channel_offline(ha->pdev) && 3901 if (unlikely(pci_channel_offline(ha->pdev) &&
3593 ha->flags.pci_channel_io_perm_failure)) { 3902 ha->flags.pci_channel_io_perm_failure)) {
@@ -3843,6 +4152,9 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
3843 struct qla_hw_data *ha = vha->hw; 4152 struct qla_hw_data *ha = vha->hw;
3844 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 4153 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3845 4154
4155 if (IS_QLA82XX(ha))
4156 return;
4157
3846 vha->flags.online = 0; 4158 vha->flags.online = 0;
3847 ha->isp_ops->disable_intrs(ha); 4159 ha->isp_ops->disable_intrs(ha);
3848 4160
@@ -3906,6 +4218,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
3906 } 4218 }
3907 ha->nvram_size = sizeof(struct nvram_24xx); 4219 ha->nvram_size = sizeof(struct nvram_24xx);
3908 ha->vpd_size = FA_NVRAM_VPD_SIZE; 4220 ha->vpd_size = FA_NVRAM_VPD_SIZE;
4221 if (IS_QLA82XX(ha))
4222 ha->vpd_size = FA_VPD_SIZE_82XX;
3909 4223
3910 /* Get VPD data into cache */ 4224 /* Get VPD data into cache */
3911 ha->vpd = ha->nvram + VPD_OFFSET; 4225 ha->vpd = ha->nvram + VPD_OFFSET;
@@ -4769,7 +5083,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4769 * Setup driver NVRAM options. 5083 * Setup driver NVRAM options.
4770 */ 5084 */
4771 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 5085 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
4772 "QLE81XX"); 5086 "QLE8XXX");
4773 5087
4774 /* Use alternate WWN? */ 5088 /* Use alternate WWN? */
4775 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { 5089 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
@@ -4892,6 +5206,114 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
4892 return (rval); 5206 return (rval);
4893} 5207}
4894 5208
5209int
5210qla82xx_restart_isp(scsi_qla_host_t *vha)
5211{
5212 int status, rval;
5213 uint32_t wait_time;
5214 struct qla_hw_data *ha = vha->hw;
5215 struct req_que *req = ha->req_q_map[0];
5216 struct rsp_que *rsp = ha->rsp_q_map[0];
5217 struct scsi_qla_host *vp;
5218 struct scsi_qla_host *tvp;
5219
5220 status = qla2x00_init_rings(vha);
5221 if (!status) {
5222 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5223 ha->flags.chip_reset_done = 1;
5224
5225 status = qla2x00_fw_ready(vha);
5226 if (!status) {
5227 qla_printk(KERN_INFO, ha,
5228 "%s(): Start configure loop, "
5229 "status = %d\n", __func__, status);
5230
5231 /* Issue a marker after FW becomes ready. */
5232 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
5233
5234 vha->flags.online = 1;
5235 /* Wait at most MAX_TARGET RSCNs for a stable link. */
5236 wait_time = 256;
5237 do {
5238 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5239 qla2x00_configure_loop(vha);
5240 wait_time--;
5241 } while (!atomic_read(&vha->loop_down_timer) &&
5242 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) &&
5243 wait_time &&
5244 (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)));
5245 }
5246
5247 /* if no cable then assume it's good */
5248 if ((vha->device_flags & DFLG_NO_CABLE))
5249 status = 0;
5250
5251 qla_printk(KERN_INFO, ha,
5252 "%s(): Configure loop done, status = 0x%x\n",
5253 __func__, status);
5254 }
5255
5256 if (!status) {
5257 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
5258
5259 if (!atomic_read(&vha->loop_down_timer)) {
5260 /*
5261 * Issue marker command only when we are going
5262 * to start the I/O .
5263 */
5264 vha->marker_needed = 1;
5265 }
5266
5267 vha->flags.online = 1;
5268
5269 ha->isp_ops->enable_intrs(ha);
5270
5271 ha->isp_abort_cnt = 0;
5272 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5273
5274 if (ha->fce) {
5275 ha->flags.fce_enabled = 1;
5276 memset(ha->fce, 0,
5277 fce_calc_size(ha->fce_bufs));
5278 rval = qla2x00_enable_fce_trace(vha,
5279 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
5280 &ha->fce_bufs);
5281 if (rval) {
5282 qla_printk(KERN_WARNING, ha,
5283 "Unable to reinitialize FCE "
5284 "(%d).\n", rval);
5285 ha->flags.fce_enabled = 0;
5286 }
5287 }
5288
5289 if (ha->eft) {
5290 memset(ha->eft, 0, EFT_SIZE);
5291 rval = qla2x00_enable_eft_trace(vha,
5292 ha->eft_dma, EFT_NUM_BUFFERS);
5293 if (rval) {
5294 qla_printk(KERN_WARNING, ha,
5295 "Unable to reinitialize EFT "
5296 "(%d).\n", rval);
5297 }
5298 }
5299 }
5300
5301 if (!status) {
5302 DEBUG(printk(KERN_INFO
5303 "qla82xx_restart_isp(%ld): succeeded.\n",
5304 vha->host_no));
5305 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
5306 if (vp->vp_idx)
5307 qla2x00_vp_abort_isp(vp);
5308 }
5309 } else {
5310 qla_printk(KERN_INFO, ha,
5311 "qla82xx_restart_isp: **** FAILED ****\n");
5312 }
5313
5314 return status;
5315}
5316
4895void 5317void
4896qla81xx_update_fw_options(scsi_qla_host_t *vha) 5318qla81xx_update_fw_options(scsi_qla_host_t *vha)
4897{ 5319{
@@ -4905,3 +5327,165 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha)
4905 ha->fw_options[2] |= BIT_9; 5327 ha->fw_options[2] |= BIT_9;
4906 qla2x00_set_fw_options(vha, ha->fw_options); 5328 qla2x00_set_fw_options(vha, ha->fw_options);
4907} 5329}
5330
5331/*
5332 * qla24xx_get_fcp_prio
5333 * Gets the fcp cmd priority value for the logged in port.
5334 * Looks for a match of the port descriptors within
5335 * each of the fcp prio config entries. If a match is found,
5336 * the tag (priority) value is returned.
5337 *
5338 * Input:
5339 * ha = adapter block po
5340 * fcport = port structure pointer.
5341 *
5342 * Return:
5343 * non-zero (if found)
5344 * 0 (if not found)
5345 *
5346 * Context:
5347 * Kernel context
5348 */
5349uint8_t
5350qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
5351{
5352 int i, entries;
5353 uint8_t pid_match, wwn_match;
5354 uint8_t priority;
5355 uint32_t pid1, pid2;
5356 uint64_t wwn1, wwn2;
5357 struct qla_fcp_prio_entry *pri_entry;
5358 struct qla_hw_data *ha = vha->hw;
5359
5360 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
5361 return 0;
5362
5363 priority = 0;
5364 entries = ha->fcp_prio_cfg->num_entries;
5365 pri_entry = &ha->fcp_prio_cfg->entry[0];
5366
5367 for (i = 0; i < entries; i++) {
5368 pid_match = wwn_match = 0;
5369
5370 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
5371 pri_entry++;
5372 continue;
5373 }
5374
5375 /* check source pid for a match */
5376 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
5377 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
5378 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
5379 if (pid1 == INVALID_PORT_ID)
5380 pid_match++;
5381 else if (pid1 == pid2)
5382 pid_match++;
5383 }
5384
5385 /* check destination pid for a match */
5386 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
5387 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
5388 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
5389 if (pid1 == INVALID_PORT_ID)
5390 pid_match++;
5391 else if (pid1 == pid2)
5392 pid_match++;
5393 }
5394
5395 /* check source WWN for a match */
5396 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
5397 wwn1 = wwn_to_u64(vha->port_name);
5398 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
5399 if (wwn2 == (uint64_t)-1)
5400 wwn_match++;
5401 else if (wwn1 == wwn2)
5402 wwn_match++;
5403 }
5404
5405 /* check destination WWN for a match */
5406 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
5407 wwn1 = wwn_to_u64(fcport->port_name);
5408 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
5409 if (wwn2 == (uint64_t)-1)
5410 wwn_match++;
5411 else if (wwn1 == wwn2)
5412 wwn_match++;
5413 }
5414
5415 if (pid_match == 2 || wwn_match == 2) {
5416 /* Found a matching entry */
5417 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
5418 priority = pri_entry->tag;
5419 break;
5420 }
5421
5422 pri_entry++;
5423 }
5424
5425 return priority;
5426}
5427
5428/*
5429 * qla24xx_update_fcport_fcp_prio
5430 * Activates fcp priority for the logged in fc port
5431 *
5432 * Input:
5433 * ha = adapter block pointer.
5434 * fcp = port structure pointer.
5435 *
5436 * Return:
5437 * QLA_SUCCESS or QLA_FUNCTION_FAILED
5438 *
5439 * Context:
5440 * Kernel context.
5441 */
5442int
5443qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *ha, fc_port_t *fcport)
5444{
5445 int ret;
5446 uint8_t priority;
5447 uint16_t mb[5];
5448
5449 if (atomic_read(&fcport->state) == FCS_UNCONFIGURED ||
5450 fcport->port_type != FCT_TARGET ||
5451 fcport->loop_id == FC_NO_LOOP_ID)
5452 return QLA_FUNCTION_FAILED;
5453
5454 priority = qla24xx_get_fcp_prio(ha, fcport);
5455 ret = qla24xx_set_fcp_prio(ha, fcport->loop_id, priority, mb);
5456 if (ret == QLA_SUCCESS)
5457 fcport->fcp_prio = priority;
5458 else
5459 DEBUG2(printk(KERN_WARNING
5460 "scsi(%ld): Unable to activate fcp priority, "
5461 " ret=0x%x\n", ha->host_no, ret));
5462
5463 return ret;
5464}
5465
5466/*
5467 * qla24xx_update_all_fcp_prio
5468 * Activates fcp priority for all the logged in ports
5469 *
5470 * Input:
5471 * ha = adapter block pointer.
5472 *
5473 * Return:
5474 * QLA_SUCCESS or QLA_FUNCTION_FAILED
5475 *
5476 * Context:
5477 * Kernel context.
5478 */
5479int
5480qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
5481{
5482 int ret;
5483 fc_port_t *fcport;
5484
5485 ret = QLA_FUNCTION_FAILED;
5486 /* We need to set priority for all logged in ports */
5487 list_for_each_entry(fcport, &vha->vp_fcports, list)
5488 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
5489
5490 return ret;
5491}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 5e0a7095c9f2..84c2fea154d2 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -37,7 +37,10 @@ qla2x00_poll(struct rsp_que *rsp)
37 unsigned long flags; 37 unsigned long flags;
38 struct qla_hw_data *ha = rsp->hw; 38 struct qla_hw_data *ha = rsp->hw;
39 local_irq_save(flags); 39 local_irq_save(flags);
40 ha->isp_ops->intr_handler(0, rsp); 40 if (IS_QLA82XX(ha))
41 qla82xx_poll(0, rsp);
42 else
43 ha->isp_ops->intr_handler(0, rsp);
41 local_irq_restore(flags); 44 local_irq_restore(flags);
42} 45}
43 46
@@ -64,3 +67,19 @@ qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
64 return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) || 67 return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
65 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST); 68 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
66} 69}
70
71static inline void
72qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
73{
74 struct dsd_dma *dsd_ptr, *tdsd_ptr;
75
76 /* clean up allocated prev pool */
77 list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
78 &((struct crc_context *)sp->ctx)->dsd_list, list) {
79 dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
80 dsd_ptr->dsd_list_dma);
81 list_del(&dsd_ptr->list);
82 kfree(dsd_ptr);
83 }
84 INIT_LIST_HEAD(&((struct crc_context *)sp->ctx)->dsd_list);
85}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 8299a9891bfe..8ef945365412 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -145,7 +145,49 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
145 return (cont_pkt); 145 return (cont_pkt);
146} 146}
147 147
148/** 148static inline int
149qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
150{
151 uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
152
153 /* We only support T10 DIF right now */
154 if (guard != SHOST_DIX_GUARD_CRC) {
155 DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard));
156 return 0;
157 }
158
159 /* We always use DIFF Bundling for best performance */
160 *fw_prot_opts = 0;
161
162 /* Translate SCSI opcode to a protection opcode */
163 switch (scsi_get_prot_op(sp->cmd)) {
164 case SCSI_PROT_READ_STRIP:
165 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
166 break;
167 case SCSI_PROT_WRITE_INSERT:
168 *fw_prot_opts |= PO_MODE_DIF_INSERT;
169 break;
170 case SCSI_PROT_READ_INSERT:
171 *fw_prot_opts |= PO_MODE_DIF_INSERT;
172 break;
173 case SCSI_PROT_WRITE_STRIP:
174 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
175 break;
176 case SCSI_PROT_READ_PASS:
177 *fw_prot_opts |= PO_MODE_DIF_PASS;
178 break;
179 case SCSI_PROT_WRITE_PASS:
180 *fw_prot_opts |= PO_MODE_DIF_PASS;
181 break;
182 default: /* Normal Request */
183 *fw_prot_opts |= PO_MODE_DIF_PASS;
184 break;
185 }
186
187 return scsi_prot_sg_count(sp->cmd);
188}
189
190/*
149 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit 191 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
150 * capable IOCB types. 192 * capable IOCB types.
151 * 193 *
@@ -506,7 +548,10 @@ qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
506 cnt = (uint16_t) 548 cnt = (uint16_t)
507 RD_REG_DWORD(&reg->isp25mq.req_q_out); 549 RD_REG_DWORD(&reg->isp25mq.req_q_out);
508 else { 550 else {
509 if (IS_FWI2_CAPABLE(ha)) 551 if (IS_QLA82XX(ha))
552 cnt = (uint16_t)RD_REG_DWORD(
553 &reg->isp82.req_q_out);
554 else if (IS_FWI2_CAPABLE(ha))
510 cnt = (uint16_t)RD_REG_DWORD( 555 cnt = (uint16_t)RD_REG_DWORD(
511 &reg->isp24.req_q_out); 556 &reg->isp24.req_q_out);
512 else 557 else
@@ -579,11 +624,29 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
579 req->ring_ptr++; 624 req->ring_ptr++;
580 625
581 /* Set chip new ring index. */ 626 /* Set chip new ring index. */
582 if (ha->mqenable) { 627 if (IS_QLA82XX(ha)) {
628 uint32_t dbval = 0x04 | (ha->portnum << 5);
629
630 /* write, read and verify logic */
631 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
632 if (ql2xdbwr)
633 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
634 else {
635 WRT_REG_DWORD(
636 (unsigned long __iomem *)ha->nxdb_wr_ptr,
637 dbval);
638 wmb();
639 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
640 WRT_REG_DWORD((unsigned long __iomem *)
641 ha->nxdb_wr_ptr, dbval);
642 wmb();
643 }
644 }
645 } else if (ha->mqenable) {
646 /* Set chip new ring index. */
583 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index); 647 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
584 RD_REG_DWORD(&ioreg->hccr); 648 RD_REG_DWORD(&ioreg->hccr);
585 } 649 } else {
586 else {
587 if (IS_FWI2_CAPABLE(ha)) { 650 if (IS_FWI2_CAPABLE(ha)) {
588 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index); 651 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
589 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in); 652 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
@@ -604,7 +667,7 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
604 * 667 *
605 * Returns the number of IOCB entries needed to store @dsds. 668 * Returns the number of IOCB entries needed to store @dsds.
606 */ 669 */
607static inline uint16_t 670inline uint16_t
608qla24xx_calc_iocbs(uint16_t dsds) 671qla24xx_calc_iocbs(uint16_t dsds)
609{ 672{
610 uint16_t iocbs; 673 uint16_t iocbs;
@@ -615,6 +678,8 @@ qla24xx_calc_iocbs(uint16_t dsds)
615 if ((dsds - 1) % 5) 678 if ((dsds - 1) % 5)
616 iocbs++; 679 iocbs++;
617 } 680 }
681 DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
682 __func__, iocbs));
618 return iocbs; 683 return iocbs;
619} 684}
620 685
@@ -626,7 +691,7 @@ qla24xx_calc_iocbs(uint16_t dsds)
626 * @cmd_pkt: Command type 3 IOCB 691 * @cmd_pkt: Command type 3 IOCB
627 * @tot_dsds: Total number of segments to transfer 692 * @tot_dsds: Total number of segments to transfer
628 */ 693 */
629static inline void 694inline void
630qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, 695qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
631 uint16_t tot_dsds) 696 uint16_t tot_dsds)
632{ 697{
@@ -695,6 +760,453 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
695 } 760 }
696} 761}
697 762
763struct fw_dif_context {
764 uint32_t ref_tag;
765 uint16_t app_tag;
766 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
767 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
768};
769
770/*
771 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
772 *
773 */
774static inline void
775qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
776 unsigned int protcnt)
777{
778 struct sd_dif_tuple *spt;
779 unsigned char op = scsi_get_prot_op(cmd);
780
781 switch (scsi_get_prot_type(cmd)) {
782 /* For TYPE 0 protection: no checking */
783 case SCSI_PROT_DIF_TYPE0:
784 pkt->ref_tag_mask[0] = 0x00;
785 pkt->ref_tag_mask[1] = 0x00;
786 pkt->ref_tag_mask[2] = 0x00;
787 pkt->ref_tag_mask[3] = 0x00;
788 break;
789
790 /*
791 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
792 * match LBA in CDB + N
793 */
794 case SCSI_PROT_DIF_TYPE2:
795 break;
796
797 /* For Type 3 protection: 16 bit GUARD only */
798 case SCSI_PROT_DIF_TYPE3:
799 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
800 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
801 0x00;
802 break;
803
804 /*
805 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
806 * 16 bit app tag.
807 */
808 case SCSI_PROT_DIF_TYPE1:
809 if (!ql2xenablehba_err_chk)
810 break;
811
812 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
813 op == SCSI_PROT_WRITE_PASS)) {
814 spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
815 scsi_prot_sglist(cmd)[0].offset;
816 DEBUG18(printk(KERN_DEBUG
817 "%s(): LBA from user %p, lba = 0x%x\n",
818 __func__, spt, (int)spt->ref_tag));
819 pkt->ref_tag = swab32(spt->ref_tag);
820 pkt->app_tag_mask[0] = 0x0;
821 pkt->app_tag_mask[1] = 0x0;
822 } else {
823 pkt->ref_tag = cpu_to_le32((uint32_t)
824 (0xffffffff & scsi_get_lba(cmd)));
825 pkt->app_tag = __constant_cpu_to_le16(0);
826 pkt->app_tag_mask[0] = 0x0;
827 pkt->app_tag_mask[1] = 0x0;
828 }
829 /* enable ALL bytes of the ref tag */
830 pkt->ref_tag_mask[0] = 0xff;
831 pkt->ref_tag_mask[1] = 0xff;
832 pkt->ref_tag_mask[2] = 0xff;
833 pkt->ref_tag_mask[3] = 0xff;
834 break;
835 }
836
837 DEBUG18(printk(KERN_DEBUG
838 "%s(): Setting protection Tags: (BIG) ref tag = 0x%x,"
839 " app tag = 0x%x, prot SG count %d , cmd lba 0x%x,"
840 " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt,
841 (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd)));
842}
843
844
845static int
846qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
847 uint16_t tot_dsds)
848{
849 void *next_dsd;
850 uint8_t avail_dsds = 0;
851 uint32_t dsd_list_len;
852 struct dsd_dma *dsd_ptr;
853 struct scatterlist *sg;
854 uint32_t *cur_dsd = dsd;
855 int i;
856 uint16_t used_dsds = tot_dsds;
857
858 uint8_t *cp;
859
860 scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
861 dma_addr_t sle_dma;
862
863 /* Allocate additional continuation packets? */
864 if (avail_dsds == 0) {
865 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
866 QLA_DSDS_PER_IOCB : used_dsds;
867 dsd_list_len = (avail_dsds + 1) * 12;
868 used_dsds -= avail_dsds;
869
870 /* allocate tracking DS */
871 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
872 if (!dsd_ptr)
873 return 1;
874
875 /* allocate new list */
876 dsd_ptr->dsd_addr = next_dsd =
877 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
878 &dsd_ptr->dsd_list_dma);
879
880 if (!next_dsd) {
881 /*
882 * Need to cleanup only this dsd_ptr, rest
883 * will be done by sp_free_dma()
884 */
885 kfree(dsd_ptr);
886 return 1;
887 }
888
889 list_add_tail(&dsd_ptr->list,
890 &((struct crc_context *)sp->ctx)->dsd_list);
891
892 sp->flags |= SRB_CRC_CTX_DSD_VALID;
893
894 /* add new list to cmd iocb or last list */
895 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
896 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
897 *cur_dsd++ = dsd_list_len;
898 cur_dsd = (uint32_t *)next_dsd;
899 }
900 sle_dma = sg_dma_address(sg);
901 DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x,"
902 " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma),
903 MSD(sle_dma), sg_dma_len(sg)));
904 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
905 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
906 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
907 avail_dsds--;
908
909 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
910 cp = page_address(sg_page(sg)) + sg->offset;
911 DEBUG18(printk("%s(): User Data buffer= %p:\n",
912 __func__ , cp));
913 }
914 }
915 /* Null termination */
916 *cur_dsd++ = 0;
917 *cur_dsd++ = 0;
918 *cur_dsd++ = 0;
919 return 0;
920}
921
922static int
923qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
924 uint32_t *dsd,
925 uint16_t tot_dsds)
926{
927 void *next_dsd;
928 uint8_t avail_dsds = 0;
929 uint32_t dsd_list_len;
930 struct dsd_dma *dsd_ptr;
931 struct scatterlist *sg;
932 int i;
933 struct scsi_cmnd *cmd;
934 uint32_t *cur_dsd = dsd;
935 uint16_t used_dsds = tot_dsds;
936
937 uint8_t *cp;
938
939
940 cmd = sp->cmd;
941 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
942 dma_addr_t sle_dma;
943
944 /* Allocate additional continuation packets? */
945 if (avail_dsds == 0) {
946 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
947 QLA_DSDS_PER_IOCB : used_dsds;
948 dsd_list_len = (avail_dsds + 1) * 12;
949 used_dsds -= avail_dsds;
950
951 /* allocate tracking DS */
952 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
953 if (!dsd_ptr)
954 return 1;
955
956 /* allocate new list */
957 dsd_ptr->dsd_addr = next_dsd =
958 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
959 &dsd_ptr->dsd_list_dma);
960
961 if (!next_dsd) {
962 /*
963 * Need to cleanup only this dsd_ptr, rest
964 * will be done by sp_free_dma()
965 */
966 kfree(dsd_ptr);
967 return 1;
968 }
969
970 list_add_tail(&dsd_ptr->list,
971 &((struct crc_context *)sp->ctx)->dsd_list);
972
973 sp->flags |= SRB_CRC_CTX_DSD_VALID;
974
975 /* add new list to cmd iocb or last list */
976 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
977 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
978 *cur_dsd++ = dsd_list_len;
979 cur_dsd = (uint32_t *)next_dsd;
980 }
981 sle_dma = sg_dma_address(sg);
982 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
983 DEBUG18(printk(KERN_DEBUG
984 "%s(): %p, sg entry %d - addr =0x%x"
985 "0x%x, len =%d\n", __func__ , cur_dsd, i,
986 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg)));
987 }
988 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
989 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
990 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
991
992 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
993 cp = page_address(sg_page(sg)) + sg->offset;
994 DEBUG18(printk("%s(): Protection Data buffer = %p:\n",
995 __func__ , cp));
996 }
997 avail_dsds--;
998 }
999 /* Null termination */
1000 *cur_dsd++ = 0;
1001 *cur_dsd++ = 0;
1002 *cur_dsd++ = 0;
1003 return 0;
1004}
1005
1006/**
1007 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1008 * Type 6 IOCB types.
1009 *
1010 * @sp: SRB command to process
1011 * @cmd_pkt: Command type 3 IOCB
1012 * @tot_dsds: Total number of segments to transfer
1013 */
1014static inline int
1015qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1016 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1017{
1018 uint32_t *cur_dsd, *fcp_dl;
1019 scsi_qla_host_t *vha;
1020 struct scsi_cmnd *cmd;
1021 struct scatterlist *cur_seg;
1022 int sgc;
1023 uint32_t total_bytes;
1024 uint32_t data_bytes;
1025 uint32_t dif_bytes;
1026 uint8_t bundling = 1;
1027 uint16_t blk_size;
1028 uint8_t *clr_ptr;
1029 struct crc_context *crc_ctx_pkt = NULL;
1030 struct qla_hw_data *ha;
1031 uint8_t additional_fcpcdb_len;
1032 uint16_t fcp_cmnd_len;
1033 struct fcp_cmnd *fcp_cmnd;
1034 dma_addr_t crc_ctx_dma;
1035
1036 cmd = sp->cmd;
1037
1038 sgc = 0;
1039 /* Update entry type to indicate Command Type CRC_2 IOCB */
1040 *((uint32_t *)(&cmd_pkt->entry_type)) =
1041 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1042
1043 /* No data transfer */
1044 data_bytes = scsi_bufflen(cmd);
1045 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1046 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
1047 __func__, data_bytes));
1048 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1049 return QLA_SUCCESS;
1050 }
1051
1052 vha = sp->fcport->vha;
1053 ha = vha->hw;
1054
1055 DEBUG18(printk(KERN_DEBUG
1056 "%s(%ld): Executing cmd sp %p, pid=%ld, prot_op=%u.\n", __func__,
1057 vha->host_no, sp, cmd->serial_number, scsi_get_prot_op(sp->cmd)));
1058
1059 cmd_pkt->vp_index = sp->fcport->vp_idx;
1060
1061 /* Set transfer direction */
1062 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1063 cmd_pkt->control_flags =
1064 __constant_cpu_to_le16(CF_WRITE_DATA);
1065 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1066 cmd_pkt->control_flags =
1067 __constant_cpu_to_le16(CF_READ_DATA);
1068 }
1069
1070 tot_prot_dsds = scsi_prot_sg_count(cmd);
1071 if (!tot_prot_dsds)
1072 bundling = 0;
1073
1074 /* Allocate CRC context from global pool */
1075 crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
1076 GFP_ATOMIC, &crc_ctx_dma);
1077
1078 if (!crc_ctx_pkt)
1079 goto crc_queuing_error;
1080
1081 /* Zero out CTX area. */
1082 clr_ptr = (uint8_t *)crc_ctx_pkt;
1083 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1084
1085 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1086
1087 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1088
1089 /* Set handle */
1090 crc_ctx_pkt->handle = cmd_pkt->handle;
1091
1092 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1093
1094 qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *)
1095 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1096
1097 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1098 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1099 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1100
1101 /* Determine SCSI command length -- align to 4 byte boundary */
1102 if (cmd->cmd_len > 16) {
1103 DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
1104 __func__));
1105 additional_fcpcdb_len = cmd->cmd_len - 16;
1106 if ((cmd->cmd_len % 4) != 0) {
1107 /* SCSI cmd > 16 bytes must be multiple of 4 */
1108 goto crc_queuing_error;
1109 }
1110 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1111 } else {
1112 additional_fcpcdb_len = 0;
1113 fcp_cmnd_len = 12 + 16 + 4;
1114 }
1115
1116 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1117
1118 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1119 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1120 fcp_cmnd->additional_cdb_len |= 1;
1121 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1122 fcp_cmnd->additional_cdb_len |= 2;
1123
1124 int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
1125 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1126 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1127 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1128 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1129 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1130 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1131 fcp_cmnd->task_attribute = 0;
1132 fcp_cmnd->task_managment = 0;
1133
1134 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1135
1136 DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
1137 "entries %d, data bytes %d, Protection entries %d\n",
1138 __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
1139 data_bytes, tot_prot_dsds));
1140
1141 /* Compute dif len and adjust data len to incude protection */
1142 total_bytes = data_bytes;
1143 dif_bytes = 0;
1144 blk_size = cmd->device->sector_size;
1145 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE1) {
1146 dif_bytes = (data_bytes / blk_size) * 8;
1147 total_bytes += dif_bytes;
1148 }
1149
1150 if (!ql2xenablehba_err_chk)
1151 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1152
1153 if (!bundling) {
1154 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1155 } else {
1156 /*
1157 * Configure Bundling if we need to fetch interlaving
1158 * protection PCI accesses
1159 */
1160 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1161 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1162 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1163 tot_prot_dsds);
1164 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1165 }
1166
1167 /* Finish the common fields of CRC pkt */
1168 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1169 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1170 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1171 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1172 /* Fibre channel byte count */
1173 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1174 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1175 additional_fcpcdb_len);
1176 *fcp_dl = htonl(total_bytes);
1177
1178 DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
1179 " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
1180 vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
1181 crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
1182
1183 /* Walks data segments */
1184
1185 cmd_pkt->control_flags |=
1186 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1187 if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1188 (tot_dsds - tot_prot_dsds)))
1189 goto crc_queuing_error;
1190
1191 if (bundling && tot_prot_dsds) {
1192 /* Walks dif segments */
1193 cur_seg = scsi_prot_sglist(cmd);
1194 cmd_pkt->control_flags |=
1195 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1196 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1197 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1198 tot_prot_dsds))
1199 goto crc_queuing_error;
1200 }
1201 return QLA_SUCCESS;
1202
1203crc_queuing_error:
1204 DEBUG18(qla_printk(KERN_INFO, ha,
1205 "CMD sent FAILED crc_q error:sp = %p\n", sp));
1206 /* Cleanup will be performed by the caller */
1207
1208 return QLA_FUNCTION_FAILED;
1209}
698 1210
699/** 1211/**
700 * qla24xx_start_scsi() - Send a SCSI command to the ISP 1212 * qla24xx_start_scsi() - Send a SCSI command to the ISP
@@ -848,6 +1360,191 @@ queuing_error:
848 return QLA_FUNCTION_FAILED; 1360 return QLA_FUNCTION_FAILED;
849} 1361}
850 1362
1363
1364/**
1365 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1366 * @sp: command to send to the ISP
1367 *
1368 * Returns non-zero if a failure occurred, else zero.
1369 */
1370int
1371qla24xx_dif_start_scsi(srb_t *sp)
1372{
1373 int nseg;
1374 unsigned long flags;
1375 uint32_t *clr_ptr;
1376 uint32_t index;
1377 uint32_t handle;
1378 uint16_t cnt;
1379 uint16_t req_cnt = 0;
1380 uint16_t tot_dsds;
1381 uint16_t tot_prot_dsds;
1382 uint16_t fw_prot_opts = 0;
1383 struct req_que *req = NULL;
1384 struct rsp_que *rsp = NULL;
1385 struct scsi_cmnd *cmd = sp->cmd;
1386 struct scsi_qla_host *vha = sp->fcport->vha;
1387 struct qla_hw_data *ha = vha->hw;
1388 struct cmd_type_crc_2 *cmd_pkt;
1389 uint32_t status = 0;
1390
1391#define QDSS_GOT_Q_SPACE BIT_0
1392
1393 /* Only process protection in this routine */
1394 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL)
1395 return qla24xx_start_scsi(sp);
1396
1397 /* Setup device pointers. */
1398
1399 qla25xx_set_que(sp, &rsp);
1400 req = vha->req;
1401
1402 /* So we know we haven't pci_map'ed anything yet */
1403 tot_dsds = 0;
1404
1405 /* Send marker if required */
1406 if (vha->marker_needed != 0) {
1407 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1408 QLA_SUCCESS)
1409 return QLA_FUNCTION_FAILED;
1410 vha->marker_needed = 0;
1411 }
1412
1413 /* Acquire ring specific lock */
1414 spin_lock_irqsave(&ha->hardware_lock, flags);
1415
1416 /* Check for room in outstanding command list. */
1417 handle = req->current_outstanding_cmd;
1418 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1419 handle++;
1420 if (handle == MAX_OUTSTANDING_COMMANDS)
1421 handle = 1;
1422 if (!req->outstanding_cmds[handle])
1423 break;
1424 }
1425
1426 if (index == MAX_OUTSTANDING_COMMANDS)
1427 goto queuing_error;
1428
1429 /* Compute number of required data segments */
1430 /* Map the sg table so we have an accurate count of sg entries needed */
1431 if (scsi_sg_count(cmd)) {
1432 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1433 scsi_sg_count(cmd), cmd->sc_data_direction);
1434 if (unlikely(!nseg))
1435 goto queuing_error;
1436 else
1437 sp->flags |= SRB_DMA_VALID;
1438 } else
1439 nseg = 0;
1440
1441 /* number of required data segments */
1442 tot_dsds = nseg;
1443
1444 /* Compute number of required protection segments */
1445 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1446 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1447 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1448 if (unlikely(!nseg))
1449 goto queuing_error;
1450 else
1451 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1452 } else {
1453 nseg = 0;
1454 }
1455
1456 req_cnt = 1;
1457 /* Total Data and protection sg segment(s) */
1458 tot_prot_dsds = nseg;
1459 tot_dsds += nseg;
1460 if (req->cnt < (req_cnt + 2)) {
1461 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1462
1463 if (req->ring_index < cnt)
1464 req->cnt = cnt - req->ring_index;
1465 else
1466 req->cnt = req->length -
1467 (req->ring_index - cnt);
1468 }
1469
1470 if (req->cnt < (req_cnt + 2))
1471 goto queuing_error;
1472
1473 status |= QDSS_GOT_Q_SPACE;
1474
1475 /* Build header part of command packet (excluding the OPCODE). */
1476 req->current_outstanding_cmd = handle;
1477 req->outstanding_cmds[handle] = sp;
1478 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1479 req->cnt -= req_cnt;
1480
1481 /* Fill-in common area */
1482 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1483 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1484
1485 clr_ptr = (uint32_t *)cmd_pkt + 2;
1486 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1487
1488 /* Set NPORT-ID and LUN number*/
1489 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1490 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1491 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1492 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1493
1494 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1495 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1496
1497 /* Total Data and protection segment(s) */
1498 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1499
1500 /* Build IOCB segments and adjust for data protection segments */
1501 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1502 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1503 QLA_SUCCESS)
1504 goto queuing_error;
1505
1506 cmd_pkt->entry_count = (uint8_t)req_cnt;
1507 /* Specify response queue number where completion should happen */
1508 cmd_pkt->entry_status = (uint8_t) rsp->id;
1509 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1510 wmb();
1511
1512 /* Adjust ring index. */
1513 req->ring_index++;
1514 if (req->ring_index == req->length) {
1515 req->ring_index = 0;
1516 req->ring_ptr = req->ring;
1517 } else
1518 req->ring_ptr++;
1519
1520 /* Set chip new ring index. */
1521 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1522 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1523
1524 /* Manage unprocessed RIO/ZIO commands in response queue. */
1525 if (vha->flags.process_response_queue &&
1526 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1527 qla24xx_process_response_queue(vha, rsp);
1528
1529 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1530
1531 return QLA_SUCCESS;
1532
1533queuing_error:
1534 if (status & QDSS_GOT_Q_SPACE) {
1535 req->outstanding_cmds[handle] = NULL;
1536 req->cnt += req_cnt;
1537 }
1538 /* Cleanup will be performed by the caller (queuecommand) */
1539
1540 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1541
1542 DEBUG18(qla_printk(KERN_INFO, ha,
1543 "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
1544 return QLA_FUNCTION_FAILED;
1545}
1546
1547
851static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) 1548static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
852{ 1549{
853 struct scsi_cmnd *cmd = sp->cmd; 1550 struct scsi_cmnd *cmd = sp->cmd;
@@ -931,37 +1628,45 @@ qla2x00_start_iocbs(srb_t *sp)
931 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); 1628 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
932 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; 1629 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
933 1630
934 /* Adjust ring index. */ 1631 if (IS_QLA82XX(ha)) {
935 req->ring_index++; 1632 qla82xx_start_iocbs(sp);
936 if (req->ring_index == req->length) {
937 req->ring_index = 0;
938 req->ring_ptr = req->ring;
939 } else
940 req->ring_ptr++;
941
942 /* Set chip new ring index. */
943 if (ha->mqenable) {
944 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
945 RD_REG_DWORD(&ioreg->hccr);
946 } else if (IS_FWI2_CAPABLE(ha)) {
947 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
948 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
949 } else { 1633 } else {
950 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), req->ring_index); 1634 /* Adjust ring index. */
951 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp)); 1635 req->ring_index++;
1636 if (req->ring_index == req->length) {
1637 req->ring_index = 0;
1638 req->ring_ptr = req->ring;
1639 } else
1640 req->ring_ptr++;
1641
1642 /* Set chip new ring index. */
1643 if (ha->mqenable) {
1644 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
1645 RD_REG_DWORD(&ioreg->hccr);
1646 } else if (IS_QLA82XX(ha)) {
1647 qla82xx_start_iocbs(sp);
1648 } else if (IS_FWI2_CAPABLE(ha)) {
1649 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
1650 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
1651 } else {
1652 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
1653 req->ring_index);
1654 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
1655 }
952 } 1656 }
953} 1657}
954 1658
955static void 1659static void
956qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) 1660qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
957{ 1661{
958 struct srb_logio *lio = sp->ctx; 1662 struct srb_ctx *ctx = sp->ctx;
1663 struct srb_iocb *lio = ctx->u.iocb_cmd;
959 1664
960 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1665 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
961 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 1666 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
962 if (lio->flags & SRB_LOGIN_COND_PLOGI) 1667 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
963 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 1668 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
964 if (lio->flags & SRB_LOGIN_SKIP_PRLI) 1669 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
965 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 1670 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
966 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1671 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
967 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 1672 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -974,14 +1679,15 @@ static void
974qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) 1679qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
975{ 1680{
976 struct qla_hw_data *ha = sp->fcport->vha->hw; 1681 struct qla_hw_data *ha = sp->fcport->vha->hw;
977 struct srb_logio *lio = sp->ctx; 1682 struct srb_ctx *ctx = sp->ctx;
1683 struct srb_iocb *lio = ctx->u.iocb_cmd;
978 uint16_t opts; 1684 uint16_t opts;
979 1685
980 mbx->entry_type = MBX_IOCB_TYPE;; 1686 mbx->entry_type = MBX_IOCB_TYPE;;
981 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 1687 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
982 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT); 1688 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
983 opts = lio->flags & SRB_LOGIN_COND_PLOGI ? BIT_0: 0; 1689 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
984 opts |= lio->flags & SRB_LOGIN_SKIP_PRLI ? BIT_1: 0; 1690 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
985 if (HAS_EXTENDED_IDS(ha)) { 1691 if (HAS_EXTENDED_IDS(ha)) {
986 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 1692 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
987 mbx->mb10 = cpu_to_le16(opts); 1693 mbx->mb10 = cpu_to_le16(opts);
@@ -1026,9 +1732,97 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1026} 1732}
1027 1733
1028static void 1734static void
1735qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1736{
1737 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1738 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1739 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1740 logio->vp_index = sp->fcport->vp_idx;
1741}
1742
1743static void
1744qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1745{
1746 struct qla_hw_data *ha = sp->fcport->vha->hw;
1747
1748 mbx->entry_type = MBX_IOCB_TYPE;
1749 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1750 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1751 if (HAS_EXTENDED_IDS(ha)) {
1752 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1753 mbx->mb10 = cpu_to_le16(BIT_0);
1754 } else {
1755 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1756 }
1757 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1758 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1759 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1760 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1761 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1762}
1763
1764static void
1765qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1766{
1767 uint32_t flags;
1768 unsigned int lun;
1769 struct fc_port *fcport = sp->fcport;
1770 scsi_qla_host_t *vha = fcport->vha;
1771 struct qla_hw_data *ha = vha->hw;
1772 struct srb_ctx *ctx = sp->ctx;
1773 struct srb_iocb *iocb = ctx->u.iocb_cmd;
1774 struct req_que *req = vha->req;
1775
1776 flags = iocb->u.tmf.flags;
1777 lun = iocb->u.tmf.lun;
1778
1779 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1780 tsk->entry_count = 1;
1781 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1782 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
1783 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1784 tsk->control_flags = cpu_to_le32(flags);
1785 tsk->port_id[0] = fcport->d_id.b.al_pa;
1786 tsk->port_id[1] = fcport->d_id.b.area;
1787 tsk->port_id[2] = fcport->d_id.b.domain;
1788 tsk->vp_index = fcport->vp_idx;
1789
1790 if (flags == TCF_LUN_RESET) {
1791 int_to_scsilun(lun, &tsk->lun);
1792 host_to_fcp_swap((uint8_t *)&tsk->lun,
1793 sizeof(tsk->lun));
1794 }
1795}
1796
1797static void
1798qla24xx_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
1799{
1800 uint16_t lun;
1801 uint8_t modif;
1802 struct fc_port *fcport = sp->fcport;
1803 scsi_qla_host_t *vha = fcport->vha;
1804 struct srb_ctx *ctx = sp->ctx;
1805 struct srb_iocb *iocb = ctx->u.iocb_cmd;
1806 struct req_que *req = vha->req;
1807
1808 lun = iocb->u.marker.lun;
1809 modif = iocb->u.marker.modif;
1810 mrk->entry_type = MARKER_TYPE;
1811 mrk->modifier = modif;
1812 if (modif != MK_SYNC_ALL) {
1813 mrk->nport_handle = cpu_to_le16(fcport->loop_id);
1814 mrk->lun[1] = LSB(lun);
1815 mrk->lun[2] = MSB(lun);
1816 host_to_fcp_swap(mrk->lun, sizeof(mrk->lun));
1817 mrk->vp_index = vha->vp_idx;
1818 mrk->handle = MAKE_HANDLE(req->id, mrk->handle);
1819 }
1820}
1821
1822static void
1029qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 1823qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1030{ 1824{
1031 struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job; 1825 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1032 1826
1033 els_iocb->entry_type = ELS_IOCB_TYPE; 1827 els_iocb->entry_type = ELS_IOCB_TYPE;
1034 els_iocb->entry_count = 1; 1828 els_iocb->entry_count = 1;
@@ -1041,8 +1835,10 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1041 els_iocb->sof_type = EST_SOFI3; 1835 els_iocb->sof_type = EST_SOFI3;
1042 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); 1836 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1043 1837
1044 els_iocb->opcode =(((struct srb_bsg*)sp->ctx)->ctx.type == SRB_ELS_CMD_RPT) ? 1838 els_iocb->opcode =
1045 bsg_job->request->rqst_data.r_els.els_code : bsg_job->request->rqst_data.h_els.command_code; 1839 (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
1840 bsg_job->request->rqst_data.r_els.els_code :
1841 bsg_job->request->rqst_data.h_els.command_code;
1046 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 1842 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1047 els_iocb->port_id[1] = sp->fcport->d_id.b.area; 1843 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1048 els_iocb->port_id[2] = sp->fcport->d_id.b.domain; 1844 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
@@ -1076,7 +1872,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
1076 int index; 1872 int index;
1077 uint16_t tot_dsds; 1873 uint16_t tot_dsds;
1078 scsi_qla_host_t *vha = sp->fcport->vha; 1874 scsi_qla_host_t *vha = sp->fcport->vha;
1079 struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job; 1875 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1080 int loop_iterartion = 0; 1876 int loop_iterartion = 0;
1081 int cont_iocb_prsnt = 0; 1877 int cont_iocb_prsnt = 0;
1082 int entry_count = 1; 1878 int entry_count = 1;
@@ -1157,12 +1953,12 @@ qla2x00_start_sp(srb_t *sp)
1157 switch (ctx->type) { 1953 switch (ctx->type) {
1158 case SRB_LOGIN_CMD: 1954 case SRB_LOGIN_CMD:
1159 IS_FWI2_CAPABLE(ha) ? 1955 IS_FWI2_CAPABLE(ha) ?
1160 qla24xx_login_iocb(sp, pkt): 1956 qla24xx_login_iocb(sp, pkt) :
1161 qla2x00_login_iocb(sp, pkt); 1957 qla2x00_login_iocb(sp, pkt);
1162 break; 1958 break;
1163 case SRB_LOGOUT_CMD: 1959 case SRB_LOGOUT_CMD:
1164 IS_FWI2_CAPABLE(ha) ? 1960 IS_FWI2_CAPABLE(ha) ?
1165 qla24xx_logout_iocb(sp, pkt): 1961 qla24xx_logout_iocb(sp, pkt) :
1166 qla2x00_logout_iocb(sp, pkt); 1962 qla2x00_logout_iocb(sp, pkt);
1167 break; 1963 break;
1168 case SRB_ELS_CMD_RPT: 1964 case SRB_ELS_CMD_RPT:
@@ -1172,6 +1968,17 @@ qla2x00_start_sp(srb_t *sp)
1172 case SRB_CT_CMD: 1968 case SRB_CT_CMD:
1173 qla24xx_ct_iocb(sp, pkt); 1969 qla24xx_ct_iocb(sp, pkt);
1174 break; 1970 break;
1971 case SRB_ADISC_CMD:
1972 IS_FWI2_CAPABLE(ha) ?
1973 qla24xx_adisc_iocb(sp, pkt) :
1974 qla2x00_adisc_iocb(sp, pkt);
1975 break;
1976 case SRB_TM_CMD:
1977 qla24xx_tm_iocb(sp, pkt);
1978 break;
1979 case SRB_MARKER_CMD:
1980 qla24xx_marker_iocb(sp, pkt);
1981 break;
1175 default: 1982 default:
1176 break; 1983 break;
1177 } 1984 }
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index db539b0c3dae..be3d8bed2ecf 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -10,6 +10,7 @@
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <scsi/scsi_tcq.h> 11#include <scsi/scsi_tcq.h>
12#include <scsi/scsi_bsg_fc.h> 12#include <scsi/scsi_bsg_fc.h>
13#include <scsi/scsi_eh.h>
13 14
14static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 15static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
15static void qla2x00_process_completed_request(struct scsi_qla_host *, 16static void qla2x00_process_completed_request(struct scsi_qla_host *,
@@ -326,7 +327,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
326 327
327 /* Setup to process RIO completion. */ 328 /* Setup to process RIO completion. */
328 handle_cnt = 0; 329 handle_cnt = 0;
329 if (IS_QLA81XX(ha)) 330 if (IS_QLA8XXX_TYPE(ha))
330 goto skip_rio; 331 goto skip_rio;
331 switch (mb[0]) { 332 switch (mb[0]) {
332 case MBA_SCSI_COMPLETION: 333 case MBA_SCSI_COMPLETION:
@@ -544,7 +545,7 @@ skip_rio:
544 if (IS_QLA2100(ha)) 545 if (IS_QLA2100(ha))
545 break; 546 break;
546 547
547 if (IS_QLA81XX(ha)) 548 if (IS_QLA8XXX_TYPE(ha))
548 DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x " 549 DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
549 "%04x\n", vha->host_no, mb[1], mb[2], mb[3])); 550 "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
550 else 551 else
@@ -845,7 +846,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
845 qla2x00_sp_compl(ha, sp); 846 qla2x00_sp_compl(ha, sp);
846 } else { 847 } else {
847 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion" 848 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
848 " handle(%d)\n", vha->host_no, req->id, index)); 849 " handle(0x%x)\n", vha->host_no, req->id, index));
849 qla_printk(KERN_WARNING, ha, 850 qla_printk(KERN_WARNING, ha,
850 "Invalid ISP SCSI completion handle\n"); 851 "Invalid ISP SCSI completion handle\n");
851 852
@@ -895,36 +896,26 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
895{ 896{
896 const char func[] = "MBX-IOCB"; 897 const char func[] = "MBX-IOCB";
897 const char *type; 898 const char *type;
898 struct qla_hw_data *ha = vha->hw;
899 fc_port_t *fcport; 899 fc_port_t *fcport;
900 srb_t *sp; 900 srb_t *sp;
901 struct srb_logio *lio; 901 struct srb_iocb *lio;
902 uint16_t data[2]; 902 struct srb_ctx *ctx;
903 uint16_t *data;
904 uint16_t status;
903 905
904 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 906 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
905 if (!sp) 907 if (!sp)
906 return; 908 return;
907 909
908 type = NULL; 910 ctx = sp->ctx;
909 lio = sp->ctx; 911 lio = ctx->u.iocb_cmd;
910 switch (lio->ctx.type) { 912 type = ctx->name;
911 case SRB_LOGIN_CMD:
912 type = "login";
913 break;
914 case SRB_LOGOUT_CMD:
915 type = "logout";
916 break;
917 default:
918 qla_printk(KERN_WARNING, ha,
919 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
920 lio->ctx.type);
921 return;
922 }
923
924 del_timer(&lio->ctx.timer);
925 fcport = sp->fcport; 913 fcport = sp->fcport;
914 data = lio->u.logio.data;
926 915
927 data[0] = data[1] = 0; 916 data[0] = MBS_COMMAND_ERROR;
917 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
918 QLA_LOGIO_LOGIN_RETRIED : 0;
928 if (mbx->entry_status) { 919 if (mbx->entry_status) {
929 DEBUG2(printk(KERN_WARNING 920 DEBUG2(printk(KERN_WARNING
930 "scsi(%ld:%x): Async-%s error entry - entry-status=%x " 921 "scsi(%ld:%x): Async-%s error entry - entry-status=%x "
@@ -935,23 +926,28 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
935 le16_to_cpu(mbx->status_flags))); 926 le16_to_cpu(mbx->status_flags)));
936 DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx))); 927 DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx)));
937 928
938 data[0] = MBS_COMMAND_ERROR; 929 goto logio_done;
939 data[1] = lio->flags & SRB_LOGIN_RETRIED ?
940 QLA_LOGIO_LOGIN_RETRIED: 0;
941 goto done_post_logio_done_work;
942 } 930 }
943 931
944 if (!mbx->status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 932 status = le16_to_cpu(mbx->status);
933 if (status == 0x30 && ctx->type == SRB_LOGIN_CMD &&
934 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
935 status = 0;
936 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
945 DEBUG2(printk(KERN_DEBUG 937 DEBUG2(printk(KERN_DEBUG
946 "scsi(%ld:%x): Async-%s complete - mbx1=%x.\n", 938 "scsi(%ld:%x): Async-%s complete - mbx1=%x.\n",
947 fcport->vha->host_no, sp->handle, type, 939 fcport->vha->host_no, sp->handle, type,
948 le16_to_cpu(mbx->mb1))); 940 le16_to_cpu(mbx->mb1)));
949 941
950 data[0] = MBS_COMMAND_COMPLETE; 942 data[0] = MBS_COMMAND_COMPLETE;
951 if (lio->ctx.type == SRB_LOGIN_CMD && le16_to_cpu(mbx->mb1) & BIT_1) 943 if (ctx->type == SRB_LOGIN_CMD) {
952 fcport->flags |= FCF_FCP2_DEVICE; 944 fcport->port_type = FCT_TARGET;
953 945 if (le16_to_cpu(mbx->mb1) & BIT_0)
954 goto done_post_logio_done_work; 946 fcport->port_type = FCT_INITIATOR;
947 if (le16_to_cpu(mbx->mb1) & BIT_1)
948 fcport->flags |= FCF_FCP2_DEVICE;
949 }
950 goto logio_done;
955 } 951 }
956 952
957 data[0] = le16_to_cpu(mbx->mb0); 953 data[0] = le16_to_cpu(mbx->mb0);
@@ -963,25 +959,19 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
963 break; 959 break;
964 default: 960 default:
965 data[0] = MBS_COMMAND_ERROR; 961 data[0] = MBS_COMMAND_ERROR;
966 data[1] = lio->flags & SRB_LOGIN_RETRIED ?
967 QLA_LOGIO_LOGIN_RETRIED: 0;
968 break; 962 break;
969 } 963 }
970 964
971 DEBUG2(printk(KERN_WARNING 965 DEBUG2(printk(KERN_WARNING
972 "scsi(%ld:%x): Async-%s failed - status=%x mb0=%x mb1=%x mb2=%x " 966 "scsi(%ld:%x): Async-%s failed - status=%x mb0=%x mb1=%x mb2=%x "
973 "mb6=%x mb7=%x.\n", 967 "mb6=%x mb7=%x.\n",
974 fcport->vha->host_no, sp->handle, type, le16_to_cpu(mbx->status), 968 fcport->vha->host_no, sp->handle, type, status,
975 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 969 le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
976 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 970 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
977 le16_to_cpu(mbx->mb7))); 971 le16_to_cpu(mbx->mb7)));
978 972
979done_post_logio_done_work: 973logio_done:
980 lio->ctx.type == SRB_LOGIN_CMD ? 974 lio->done(sp);
981 qla2x00_post_async_login_done_work(fcport->vha, fcport, data):
982 qla2x00_post_async_logout_done_work(fcport->vha, fcport, data);
983
984 lio->ctx.free(sp);
985} 975}
986 976
987static void 977static void
@@ -992,7 +982,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
992 const char *type; 982 const char *type;
993 struct qla_hw_data *ha = vha->hw; 983 struct qla_hw_data *ha = vha->hw;
994 srb_t *sp; 984 srb_t *sp;
995 struct srb_bsg *sp_bsg; 985 struct srb_ctx *sp_bsg;
996 struct fc_bsg_job *bsg_job; 986 struct fc_bsg_job *bsg_job;
997 uint16_t comp_status; 987 uint16_t comp_status;
998 uint32_t fw_status[3]; 988 uint32_t fw_status[3];
@@ -1001,11 +991,11 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1001 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 991 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1002 if (!sp) 992 if (!sp)
1003 return; 993 return;
1004 sp_bsg = (struct srb_bsg*)sp->ctx; 994 sp_bsg = sp->ctx;
1005 bsg_job = sp_bsg->bsg_job; 995 bsg_job = sp_bsg->u.bsg_job;
1006 996
1007 type = NULL; 997 type = NULL;
1008 switch (sp_bsg->ctx.type) { 998 switch (sp_bsg->type) {
1009 case SRB_ELS_CMD_RPT: 999 case SRB_ELS_CMD_RPT:
1010 case SRB_ELS_CMD_HST: 1000 case SRB_ELS_CMD_HST:
1011 type = "els"; 1001 type = "els";
@@ -1016,7 +1006,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1016 default: 1006 default:
1017 qla_printk(KERN_WARNING, ha, 1007 qla_printk(KERN_WARNING, ha,
1018 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp, 1008 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
1019 sp_bsg->ctx.type); 1009 sp_bsg->type);
1020 return; 1010 return;
1021 } 1011 }
1022 1012
@@ -1070,8 +1060,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1070 dma_unmap_sg(&ha->pdev->dev, 1060 dma_unmap_sg(&ha->pdev->dev,
1071 bsg_job->reply_payload.sg_list, 1061 bsg_job->reply_payload.sg_list,
1072 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1062 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1073 if ((sp_bsg->ctx.type == SRB_ELS_CMD_HST) || 1063 if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
1074 (sp_bsg->ctx.type == SRB_CT_CMD)) 1064 (sp_bsg->type == SRB_CT_CMD))
1075 kfree(sp->fcport); 1065 kfree(sp->fcport);
1076 kfree(sp->ctx); 1066 kfree(sp->ctx);
1077 mempool_free(sp, ha->srb_mempool); 1067 mempool_free(sp, ha->srb_mempool);
@@ -1084,37 +1074,26 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1084{ 1074{
1085 const char func[] = "LOGIO-IOCB"; 1075 const char func[] = "LOGIO-IOCB";
1086 const char *type; 1076 const char *type;
1087 struct qla_hw_data *ha = vha->hw;
1088 fc_port_t *fcport; 1077 fc_port_t *fcport;
1089 srb_t *sp; 1078 srb_t *sp;
1090 struct srb_logio *lio; 1079 struct srb_iocb *lio;
1091 uint16_t data[2]; 1080 struct srb_ctx *ctx;
1081 uint16_t *data;
1092 uint32_t iop[2]; 1082 uint32_t iop[2];
1093 1083
1094 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1084 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1095 if (!sp) 1085 if (!sp)
1096 return; 1086 return;
1097 1087
1098 type = NULL; 1088 ctx = sp->ctx;
1099 lio = sp->ctx; 1089 lio = ctx->u.iocb_cmd;
1100 switch (lio->ctx.type) { 1090 type = ctx->name;
1101 case SRB_LOGIN_CMD:
1102 type = "login";
1103 break;
1104 case SRB_LOGOUT_CMD:
1105 type = "logout";
1106 break;
1107 default:
1108 qla_printk(KERN_WARNING, ha,
1109 "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
1110 lio->ctx.type);
1111 return;
1112 }
1113
1114 del_timer(&lio->ctx.timer);
1115 fcport = sp->fcport; 1091 fcport = sp->fcport;
1092 data = lio->u.logio.data;
1116 1093
1117 data[0] = data[1] = 0; 1094 data[0] = MBS_COMMAND_ERROR;
1095 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1096 QLA_LOGIO_LOGIN_RETRIED : 0;
1118 if (logio->entry_status) { 1097 if (logio->entry_status) {
1119 DEBUG2(printk(KERN_WARNING 1098 DEBUG2(printk(KERN_WARNING
1120 "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n", 1099 "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n",
@@ -1122,10 +1101,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1122 logio->entry_status)); 1101 logio->entry_status));
1123 DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio))); 1102 DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio)));
1124 1103
1125 data[0] = MBS_COMMAND_ERROR; 1104 goto logio_done;
1126 data[1] = lio->flags & SRB_LOGIN_RETRIED ?
1127 QLA_LOGIO_LOGIN_RETRIED: 0;
1128 goto done_post_logio_done_work;
1129 } 1105 }
1130 1106
1131 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1107 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
@@ -1135,8 +1111,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1135 le32_to_cpu(logio->io_parameter[0]))); 1111 le32_to_cpu(logio->io_parameter[0])));
1136 1112
1137 data[0] = MBS_COMMAND_COMPLETE; 1113 data[0] = MBS_COMMAND_COMPLETE;
1138 if (lio->ctx.type == SRB_LOGOUT_CMD) 1114 if (ctx->type != SRB_LOGIN_CMD)
1139 goto done_post_logio_done_work; 1115 goto logio_done;
1140 1116
1141 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1117 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1142 if (iop[0] & BIT_4) { 1118 if (iop[0] & BIT_4) {
@@ -1151,7 +1127,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1151 if (logio->io_parameter[9] || logio->io_parameter[10]) 1127 if (logio->io_parameter[9] || logio->io_parameter[10])
1152 fcport->supported_classes |= FC_COS_CLASS3; 1128 fcport->supported_classes |= FC_COS_CLASS3;
1153 1129
1154 goto done_post_logio_done_work; 1130 goto logio_done;
1155 } 1131 }
1156 1132
1157 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1133 iop[0] = le32_to_cpu(logio->io_parameter[0]);
@@ -1172,8 +1148,6 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1172 /* Fall through. */ 1148 /* Fall through. */
1173 default: 1149 default:
1174 data[0] = MBS_COMMAND_ERROR; 1150 data[0] = MBS_COMMAND_ERROR;
1175 data[1] = lio->flags & SRB_LOGIN_RETRIED ?
1176 QLA_LOGIO_LOGIN_RETRIED: 0;
1177 break; 1151 break;
1178 } 1152 }
1179 1153
@@ -1184,12 +1158,101 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1184 le32_to_cpu(logio->io_parameter[0]), 1158 le32_to_cpu(logio->io_parameter[0]),
1185 le32_to_cpu(logio->io_parameter[1]))); 1159 le32_to_cpu(logio->io_parameter[1])));
1186 1160
1187done_post_logio_done_work: 1161logio_done:
1188 lio->ctx.type == SRB_LOGIN_CMD ? 1162 lio->done(sp);
1189 qla2x00_post_async_login_done_work(fcport->vha, fcport, data): 1163}
1190 qla2x00_post_async_logout_done_work(fcport->vha, fcport, data);
1191 1164
1192 lio->ctx.free(sp); 1165static void
1166qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1167 struct tsk_mgmt_entry *tsk)
1168{
1169 const char func[] = "TMF-IOCB";
1170 const char *type;
1171 fc_port_t *fcport;
1172 srb_t *sp;
1173 struct srb_iocb *iocb;
1174 struct srb_ctx *ctx;
1175 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1176 int error = 1;
1177
1178 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1179 if (!sp)
1180 return;
1181
1182 ctx = sp->ctx;
1183 iocb = ctx->u.iocb_cmd;
1184 type = ctx->name;
1185 fcport = sp->fcport;
1186
1187 if (sts->entry_status) {
1188 DEBUG2(printk(KERN_WARNING
1189 "scsi(%ld:%x): Async-%s error - entry-status(%x).\n",
1190 fcport->vha->host_no, sp->handle, type,
1191 sts->entry_status));
1192 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1193 DEBUG2(printk(KERN_WARNING
1194 "scsi(%ld:%x): Async-%s error - completion status(%x).\n",
1195 fcport->vha->host_no, sp->handle, type,
1196 sts->comp_status));
1197 } else if (!(le16_to_cpu(sts->scsi_status) &
1198 SS_RESPONSE_INFO_LEN_VALID)) {
1199 DEBUG2(printk(KERN_WARNING
1200 "scsi(%ld:%x): Async-%s error - no response info(%x).\n",
1201 fcport->vha->host_no, sp->handle, type,
1202 sts->scsi_status));
1203 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1204 DEBUG2(printk(KERN_WARNING
1205 "scsi(%ld:%x): Async-%s error - not enough response(%d).\n",
1206 fcport->vha->host_no, sp->handle, type,
1207 sts->rsp_data_len));
1208 } else if (sts->data[3]) {
1209 DEBUG2(printk(KERN_WARNING
1210 "scsi(%ld:%x): Async-%s error - response(%x).\n",
1211 fcport->vha->host_no, sp->handle, type,
1212 sts->data[3]));
1213 } else {
1214 error = 0;
1215 }
1216
1217 if (error) {
1218 iocb->u.tmf.data = error;
1219 DEBUG2(qla2x00_dump_buffer((uint8_t *)sts, sizeof(*sts)));
1220 }
1221
1222 iocb->done(sp);
1223}
1224
1225static void
1226qla24xx_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1227 struct mrk_entry_24xx *mrk)
1228{
1229 const char func[] = "MRK-IOCB";
1230 const char *type;
1231 fc_port_t *fcport;
1232 srb_t *sp;
1233 struct srb_iocb *iocb;
1234 struct srb_ctx *ctx;
1235 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)mrk;
1236
1237 sp = qla2x00_get_sp_from_handle(vha, func, req, mrk);
1238 if (!sp)
1239 return;
1240
1241 ctx = sp->ctx;
1242 iocb = ctx->u.iocb_cmd;
1243 type = ctx->name;
1244 fcport = sp->fcport;
1245
1246 if (sts->entry_status) {
1247 iocb->u.marker.data = 1;
1248 DEBUG2(printk(KERN_WARNING
1249 "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n",
1250 fcport->vha->host_no, sp->handle, type,
1251 sts->entry_status));
1252 DEBUG2(qla2x00_dump_buffer((uint8_t *)mrk, sizeof(*sts)));
1253 }
1254
1255 iocb->done(sp);
1193} 1256}
1194 1257
1195/** 1258/**
@@ -1256,6 +1319,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
1256 case MBX_IOCB_TYPE: 1319 case MBX_IOCB_TYPE:
1257 qla2x00_mbx_iocb_entry(vha, rsp->req, 1320 qla2x00_mbx_iocb_entry(vha, rsp->req,
1258 (struct mbx_entry *)pkt); 1321 (struct mbx_entry *)pkt);
1322 break;
1259 default: 1323 default:
1260 /* Type Not Supported. */ 1324 /* Type Not Supported. */
1261 DEBUG4(printk(KERN_WARNING 1325 DEBUG4(printk(KERN_WARNING
@@ -1301,6 +1365,78 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len,
1301 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len)); 1365 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
1302} 1366}
1303 1367
1368struct scsi_dif_tuple {
1369 __be16 guard; /* Checksum */
1370 __be16 app_tag; /* APPL identifer */
1371 __be32 ref_tag; /* Target LBA or indirect LBA */
1372};
1373
1374/*
1375 * Checks the guard or meta-data for the type of error
1376 * detected by the HBA. In case of errors, we set the
1377 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1378 * to indicate to the kernel that the HBA detected error.
1379 */
1380static inline void
1381qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1382{
1383 struct scsi_cmnd *cmd = sp->cmd;
1384 struct scsi_dif_tuple *ep =
1385 (struct scsi_dif_tuple *)&sts24->data[20];
1386 struct scsi_dif_tuple *ap =
1387 (struct scsi_dif_tuple *)&sts24->data[12];
1388 uint32_t e_ref_tag, a_ref_tag;
1389 uint16_t e_app_tag, a_app_tag;
1390 uint16_t e_guard, a_guard;
1391
1392 e_ref_tag = be32_to_cpu(ep->ref_tag);
1393 a_ref_tag = be32_to_cpu(ap->ref_tag);
1394 e_app_tag = be16_to_cpu(ep->app_tag);
1395 a_app_tag = be16_to_cpu(ap->app_tag);
1396 e_guard = be16_to_cpu(ep->guard);
1397 a_guard = be16_to_cpu(ap->guard);
1398
1399 DEBUG18(printk(KERN_DEBUG
1400 "%s(): iocb(s) %p Returned STATUS\n", __func__, sts24));
1401
1402 DEBUG18(printk(KERN_ERR "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1403 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1404 " tag=0x%x, act guard=0x%x, exp guard=0x%x\n",
1405 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1406 a_app_tag, e_app_tag, a_guard, e_guard));
1407
1408
1409 /* check guard */
1410 if (e_guard != a_guard) {
1411 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1412 0x10, 0x1);
1413 set_driver_byte(cmd, DRIVER_SENSE);
1414 set_host_byte(cmd, DID_ABORT);
1415 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1416 return;
1417 }
1418
1419 /* check appl tag */
1420 if (e_app_tag != a_app_tag) {
1421 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1422 0x10, 0x2);
1423 set_driver_byte(cmd, DRIVER_SENSE);
1424 set_host_byte(cmd, DID_ABORT);
1425 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1426 return;
1427 }
1428
1429 /* check ref tag */
1430 if (e_ref_tag != a_ref_tag) {
1431 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1432 0x10, 0x3);
1433 set_driver_byte(cmd, DRIVER_SENSE);
1434 set_host_byte(cmd, DID_ABORT);
1435 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1436 return;
1437 }
1438}
1439
1304/** 1440/**
1305 * qla2x00_status_entry() - Process a Status IOCB entry. 1441 * qla2x00_status_entry() - Process a Status IOCB entry.
1306 * @ha: SCSI driver HA context 1442 * @ha: SCSI driver HA context
@@ -1316,6 +1452,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1316 struct sts_entry_24xx *sts24; 1452 struct sts_entry_24xx *sts24;
1317 uint16_t comp_status; 1453 uint16_t comp_status;
1318 uint16_t scsi_status; 1454 uint16_t scsi_status;
1455 uint16_t ox_id;
1319 uint8_t lscsi_status; 1456 uint8_t lscsi_status;
1320 int32_t resid; 1457 int32_t resid;
1321 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; 1458 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
@@ -1324,6 +1461,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1324 uint32_t handle; 1461 uint32_t handle;
1325 uint16_t que; 1462 uint16_t que;
1326 struct req_que *req; 1463 struct req_que *req;
1464 int logit = 1;
1327 1465
1328 sts = (sts_entry_t *) pkt; 1466 sts = (sts_entry_t *) pkt;
1329 sts24 = (struct sts_entry_24xx *) pkt; 1467 sts24 = (struct sts_entry_24xx *) pkt;
@@ -1337,6 +1475,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1337 handle = (uint32_t) LSW(sts->handle); 1475 handle = (uint32_t) LSW(sts->handle);
1338 que = MSW(sts->handle); 1476 que = MSW(sts->handle);
1339 req = ha->req_q_map[que]; 1477 req = ha->req_q_map[que];
1478
1340 /* Fast path completion. */ 1479 /* Fast path completion. */
1341 if (comp_status == CS_COMPLETE && scsi_status == 0) { 1480 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1342 qla2x00_process_completed_request(vha, req, handle); 1481 qla2x00_process_completed_request(vha, req, handle);
@@ -1352,9 +1491,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1352 sp = NULL; 1491 sp = NULL;
1353 1492
1354 if (sp == NULL) { 1493 if (sp == NULL) {
1355 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n", 1494 qla_printk(KERN_WARNING, ha,
1356 vha->host_no)); 1495 "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no,
1357 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n"); 1496 sts->handle);
1358 1497
1359 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1498 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1360 qla2xxx_wake_dpc(vha); 1499 qla2xxx_wake_dpc(vha);
@@ -1362,10 +1501,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1362 } 1501 }
1363 cp = sp->cmd; 1502 cp = sp->cmd;
1364 if (cp == NULL) { 1503 if (cp == NULL) {
1365 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
1366 "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp));
1367 qla_printk(KERN_WARNING, ha, 1504 qla_printk(KERN_WARNING, ha,
1368 "Command is NULL: already returned to OS (sp=%p)\n", sp); 1505 "scsi(%ld): Command already returned (0x%x/%p).\n",
1506 vha->host_no, sts->handle, sp);
1369 1507
1370 return; 1508 return;
1371 } 1509 }
@@ -1374,6 +1512,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1374 1512
1375 fcport = sp->fcport; 1513 fcport = sp->fcport;
1376 1514
1515 ox_id = 0;
1377 sense_len = rsp_info_len = resid_len = fw_resid_len = 0; 1516 sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
1378 if (IS_FWI2_CAPABLE(ha)) { 1517 if (IS_FWI2_CAPABLE(ha)) {
1379 if (scsi_status & SS_SENSE_LEN_VALID) 1518 if (scsi_status & SS_SENSE_LEN_VALID)
@@ -1387,6 +1526,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1387 rsp_info = sts24->data; 1526 rsp_info = sts24->data;
1388 sense_data = sts24->data; 1527 sense_data = sts24->data;
1389 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 1528 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1529 ox_id = le16_to_cpu(sts24->ox_id);
1390 } else { 1530 } else {
1391 if (scsi_status & SS_SENSE_LEN_VALID) 1531 if (scsi_status & SS_SENSE_LEN_VALID)
1392 sense_len = le16_to_cpu(sts->req_sense_length); 1532 sense_len = le16_to_cpu(sts->req_sense_length);
@@ -1403,17 +1543,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1403 if (IS_FWI2_CAPABLE(ha)) 1543 if (IS_FWI2_CAPABLE(ha))
1404 sense_data += rsp_info_len; 1544 sense_data += rsp_info_len;
1405 if (rsp_info_len > 3 && rsp_info[3]) { 1545 if (rsp_info_len > 3 && rsp_info[3]) {
1406 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol " 1546 DEBUG2(qla_printk(KERN_INFO, ha,
1407 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..." 1547 "scsi(%ld:%d:%d): FCP I/O protocol failure "
1408 "retrying command\n", vha->host_no, 1548 "(0x%x/0x%x).\n", vha->host_no, cp->device->id,
1409 cp->device->channel, cp->device->id, 1549 cp->device->lun, rsp_info_len, rsp_info[3]));
1410 cp->device->lun, rsp_info_len, rsp_info[0],
1411 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
1412 rsp_info[5], rsp_info[6], rsp_info[7]));
1413 1550
1414 cp->result = DID_BUS_BUSY << 16; 1551 cp->result = DID_BUS_BUSY << 16;
1415 qla2x00_sp_compl(ha, sp); 1552 goto out;
1416 return;
1417 } 1553 }
1418 } 1554 }
1419 1555
@@ -1440,12 +1576,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1440 ((unsigned)(scsi_bufflen(cp) - resid) < 1576 ((unsigned)(scsi_bufflen(cp) - resid) <
1441 cp->underflow)) { 1577 cp->underflow)) {
1442 qla_printk(KERN_INFO, ha, 1578 qla_printk(KERN_INFO, ha,
1443 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 1579 "scsi(%ld:%d:%d): Mid-layer underflow "
1444 "detected (%x of %x bytes)...returning " 1580 "detected (0x%x of 0x%x bytes).\n",
1445 "error status.\n", vha->host_no, 1581 vha->host_no, cp->device->id,
1446 cp->device->channel, cp->device->id, 1582 cp->device->lun, resid, scsi_bufflen(cp));
1447 cp->device->lun, resid,
1448 scsi_bufflen(cp));
1449 1583
1450 cp->result = DID_ERROR << 16; 1584 cp->result = DID_ERROR << 16;
1451 break; 1585 break;
@@ -1454,12 +1588,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1454 cp->result = DID_OK << 16 | lscsi_status; 1588 cp->result = DID_OK << 16 | lscsi_status;
1455 1589
1456 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1590 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1457 DEBUG2(printk(KERN_INFO 1591 DEBUG2(qla_printk(KERN_INFO, ha,
1458 "scsi(%ld): QUEUE FULL status detected " 1592 "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
1459 "0x%x-0x%x.\n", vha->host_no, comp_status, 1593 vha->host_no, cp->device->id, cp->device->lun));
1460 scsi_status));
1461 break; 1594 break;
1462 } 1595 }
1596 logit = 0;
1463 if (lscsi_status != SS_CHECK_CONDITION) 1597 if (lscsi_status != SS_CHECK_CONDITION)
1464 break; 1598 break;
1465 1599
@@ -1471,23 +1605,14 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1471 break; 1605 break;
1472 1606
1473 case CS_DATA_UNDERRUN: 1607 case CS_DATA_UNDERRUN:
1474 DEBUG2(printk(KERN_INFO
1475 "scsi(%ld:%d:%d) UNDERRUN status detected 0x%x-0x%x. "
1476 "resid=0x%x fw_resid=0x%x cdb=0x%x os_underflow=0x%x\n",
1477 vha->host_no, cp->device->id, cp->device->lun, comp_status,
1478 scsi_status, resid_len, fw_resid_len, cp->cmnd[0],
1479 cp->underflow));
1480
1481 /* Use F/W calculated residual length. */ 1608 /* Use F/W calculated residual length. */
1482 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 1609 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
1483 scsi_set_resid(cp, resid); 1610 scsi_set_resid(cp, resid);
1484 if (scsi_status & SS_RESIDUAL_UNDER) { 1611 if (scsi_status & SS_RESIDUAL_UNDER) {
1485 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 1612 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
1486 DEBUG2(printk( 1613 DEBUG2(qla_printk(KERN_INFO, ha,
1487 "scsi(%ld:%d:%d:%d) Dropped frame(s) " 1614 "scsi(%ld:%d:%d) Dropped frame(s) detected "
1488 "detected (%x of %x bytes)...residual " 1615 "(0x%x of 0x%x bytes).\n", vha->host_no,
1489 "length mismatch...retrying command.\n",
1490 vha->host_no, cp->device->channel,
1491 cp->device->id, cp->device->lun, resid, 1616 cp->device->id, cp->device->lun, resid,
1492 scsi_bufflen(cp))); 1617 scsi_bufflen(cp)));
1493 1618
@@ -1499,21 +1624,18 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1499 ((unsigned)(scsi_bufflen(cp) - resid) < 1624 ((unsigned)(scsi_bufflen(cp) - resid) <
1500 cp->underflow)) { 1625 cp->underflow)) {
1501 qla_printk(KERN_INFO, ha, 1626 qla_printk(KERN_INFO, ha,
1502 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 1627 "scsi(%ld:%d:%d): Mid-layer underflow "
1503 "detected (%x of %x bytes)...returning " 1628 "detected (0x%x of 0x%x bytes).\n",
1504 "error status.\n", vha->host_no, 1629 vha->host_no, cp->device->id,
1505 cp->device->channel, cp->device->id,
1506 cp->device->lun, resid, scsi_bufflen(cp)); 1630 cp->device->lun, resid, scsi_bufflen(cp));
1507 1631
1508 cp->result = DID_ERROR << 16; 1632 cp->result = DID_ERROR << 16;
1509 break; 1633 break;
1510 } 1634 }
1511 } else if (!lscsi_status) { 1635 } else if (!lscsi_status) {
1512 DEBUG2(printk( 1636 DEBUG2(qla_printk(KERN_INFO, ha,
1513 "scsi(%ld:%d:%d:%d) Dropped frame(s) detected " 1637 "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
1514 "(%x of %x bytes)...firmware reported underrun..." 1638 "of 0x%x bytes).\n", vha->host_no, cp->device->id,
1515 "retrying command.\n", vha->host_no,
1516 cp->device->channel, cp->device->id,
1517 cp->device->lun, resid, scsi_bufflen(cp))); 1639 cp->device->lun, resid, scsi_bufflen(cp)));
1518 1640
1519 cp->result = DID_ERROR << 16; 1641 cp->result = DID_ERROR << 16;
@@ -1521,6 +1643,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1521 } 1643 }
1522 1644
1523 cp->result = DID_OK << 16 | lscsi_status; 1645 cp->result = DID_OK << 16 | lscsi_status;
1646 logit = 0;
1524 1647
1525 /* 1648 /*
1526 * Check to see if SCSI Status is non zero. If so report SCSI 1649 * Check to see if SCSI Status is non zero. If so report SCSI
@@ -1528,10 +1651,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1528 */ 1651 */
1529 if (lscsi_status != 0) { 1652 if (lscsi_status != 0) {
1530 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1653 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1531 DEBUG2(printk(KERN_INFO 1654 DEBUG2(qla_printk(KERN_INFO, ha,
1532 "scsi(%ld): QUEUE FULL status detected " 1655 "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
1533 "0x%x-0x%x.\n", vha->host_no, comp_status, 1656 vha->host_no, cp->device->id,
1534 scsi_status)); 1657 cp->device->lun));
1658 logit = 1;
1535 break; 1659 break;
1536 } 1660 }
1537 if (lscsi_status != SS_CHECK_CONDITION) 1661 if (lscsi_status != SS_CHECK_CONDITION)
@@ -1545,109 +1669,60 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1545 } 1669 }
1546 break; 1670 break;
1547 1671
1548 case CS_DATA_OVERRUN:
1549 DEBUG2(printk(KERN_INFO
1550 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1551 vha->host_no, cp->device->id, cp->device->lun, comp_status,
1552 scsi_status));
1553 DEBUG2(printk(KERN_INFO
1554 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1555 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1556 cp->cmnd[4], cp->cmnd[5]));
1557 DEBUG2(printk(KERN_INFO
1558 "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1559 "status!\n",
1560 cp->serial_number, scsi_bufflen(cp), resid_len));
1561
1562 cp->result = DID_ERROR << 16;
1563 break;
1564
1565 case CS_PORT_LOGGED_OUT: 1672 case CS_PORT_LOGGED_OUT:
1566 case CS_PORT_CONFIG_CHG: 1673 case CS_PORT_CONFIG_CHG:
1567 case CS_PORT_BUSY: 1674 case CS_PORT_BUSY:
1568 case CS_INCOMPLETE: 1675 case CS_INCOMPLETE:
1569 case CS_PORT_UNAVAILABLE: 1676 case CS_PORT_UNAVAILABLE:
1570 /* 1677 case CS_TIMEOUT:
1571 * If the port is in Target Down state, return all IOs for this
1572 * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1573 * retry_queue.
1574 */
1575 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1576 "pid=%ld, compl status=0x%x, port state=0x%x\n",
1577 vha->host_no, cp->device->id, cp->device->lun,
1578 cp->serial_number, comp_status,
1579 atomic_read(&fcport->state)));
1580
1581 /* 1678 /*
1582 * We are going to have the fc class block the rport 1679 * We are going to have the fc class block the rport
1583 * while we try to recover so instruct the mid layer 1680 * while we try to recover so instruct the mid layer
1584 * to requeue until the class decides how to handle this. 1681 * to requeue until the class decides how to handle this.
1585 */ 1682 */
1586 cp->result = DID_TRANSPORT_DISRUPTED << 16; 1683 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1684
1685 if (comp_status == CS_TIMEOUT) {
1686 if (IS_FWI2_CAPABLE(ha))
1687 break;
1688 else if ((le16_to_cpu(sts->status_flags) &
1689 SF_LOGOUT_SENT) == 0)
1690 break;
1691 }
1692
1693 DEBUG2(qla_printk(KERN_INFO, ha,
1694 "scsi(%ld:%d:%d) Port down status: port-state=0x%x\n",
1695 vha->host_no, cp->device->id, cp->device->lun,
1696 atomic_read(&fcport->state)));
1697
1587 if (atomic_read(&fcport->state) == FCS_ONLINE) 1698 if (atomic_read(&fcport->state) == FCS_ONLINE)
1588 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1699 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1589 break; 1700 break;
1590 1701
1591 case CS_RESET: 1702 case CS_RESET:
1592 DEBUG2(printk(KERN_INFO
1593 "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1594 vha->host_no, comp_status, scsi_status));
1595
1596 cp->result = DID_RESET << 16;
1597 break;
1598
1599 case CS_ABORTED: 1703 case CS_ABORTED:
1600 /*
1601 * hv2.19.12 - DID_ABORT does not retry the request if we
1602 * aborted this request then abort otherwise it must be a
1603 * reset.
1604 */
1605 DEBUG2(printk(KERN_INFO
1606 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1607 vha->host_no, comp_status, scsi_status));
1608
1609 cp->result = DID_RESET << 16; 1704 cp->result = DID_RESET << 16;
1610 break; 1705 break;
1611 1706
1612 case CS_TIMEOUT: 1707 case CS_DIF_ERROR:
1613 /* 1708 qla2x00_handle_dif_error(sp, sts24);
1614 * We are going to have the fc class block the rport
1615 * while we try to recover so instruct the mid layer
1616 * to requeue until the class decides how to handle this.
1617 */
1618 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1619
1620 if (IS_FWI2_CAPABLE(ha)) {
1621 DEBUG2(printk(KERN_INFO
1622 "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1623 "0x%x-0x%x\n", vha->host_no, cp->device->channel,
1624 cp->device->id, cp->device->lun, comp_status,
1625 scsi_status));
1626 break;
1627 }
1628 DEBUG2(printk(KERN_INFO
1629 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1630 "sflags=%x.\n", vha->host_no, cp->device->channel,
1631 cp->device->id, cp->device->lun, comp_status, scsi_status,
1632 le16_to_cpu(sts->status_flags)));
1633
1634 /* Check to see if logout occurred. */
1635 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1636 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1637 break; 1709 break;
1638
1639 default: 1710 default:
1640 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1641 "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
1642 qla_printk(KERN_INFO, ha,
1643 "Unknown status detected 0x%x-0x%x.\n",
1644 comp_status, scsi_status);
1645
1646 cp->result = DID_ERROR << 16; 1711 cp->result = DID_ERROR << 16;
1647 break; 1712 break;
1648 } 1713 }
1649 1714
1650 /* Place command on done queue. */ 1715out:
1716 if (logit)
1717 DEBUG2(qla_printk(KERN_INFO, ha,
1718 "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) "
1719 "oxid=0x%x ser=0x%lx cdb=%02x%02x%02x len=0x%x "
1720 "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no,
1721 cp->device->id, cp->device->lun, comp_status, scsi_status,
1722 cp->result, ox_id, cp->serial_number, cp->cmnd[0],
1723 cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
1724 resid_len, fw_resid_len));
1725
1651 if (rsp->status_srb == NULL) 1726 if (rsp->status_srb == NULL)
1652 qla2x00_sp_compl(ha, sp); 1727 qla2x00_sp_compl(ha, sp);
1653} 1728}
@@ -1806,6 +1881,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1806 struct rsp_que *rsp) 1881 struct rsp_que *rsp)
1807{ 1882{
1808 struct sts_entry_24xx *pkt; 1883 struct sts_entry_24xx *pkt;
1884 struct qla_hw_data *ha = vha->hw;
1809 1885
1810 if (!vha->flags.online) 1886 if (!vha->flags.online)
1811 return; 1887 return;
@@ -1846,6 +1922,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1846 qla24xx_logio_entry(vha, rsp->req, 1922 qla24xx_logio_entry(vha, rsp->req,
1847 (struct logio_entry_24xx *)pkt); 1923 (struct logio_entry_24xx *)pkt);
1848 break; 1924 break;
1925 case TSK_MGMT_IOCB_TYPE:
1926 qla24xx_tm_iocb_entry(vha, rsp->req,
1927 (struct tsk_mgmt_entry *)pkt);
1928 break;
1929 case MARKER_TYPE:
1930 qla24xx_marker_iocb_entry(vha, rsp->req,
1931 (struct mrk_entry_24xx *)pkt);
1932 break;
1849 case CT_IOCB_TYPE: 1933 case CT_IOCB_TYPE:
1850 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 1934 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1851 clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags); 1935 clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
@@ -1866,7 +1950,11 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1866 } 1950 }
1867 1951
1868 /* Adjust ring index */ 1952 /* Adjust ring index */
1869 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 1953 if (IS_QLA82XX(ha)) {
1954 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1955 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
1956 } else
1957 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
1870} 1958}
1871 1959
1872static void 1960static void
@@ -2169,6 +2257,11 @@ static struct qla_init_msix_entry msix_entries[3] = {
2169 { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, 2257 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2170}; 2258};
2171 2259
2260static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2261 { "qla2xxx (default)", qla82xx_msix_default },
2262 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2263};
2264
2172static void 2265static void
2173qla24xx_disable_msix(struct qla_hw_data *ha) 2266qla24xx_disable_msix(struct qla_hw_data *ha)
2174{ 2267{
@@ -2195,7 +2288,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2195 struct qla_msix_entry *qentry; 2288 struct qla_msix_entry *qentry;
2196 2289
2197 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 2290 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2198 GFP_KERNEL); 2291 GFP_KERNEL);
2199 if (!entries) 2292 if (!entries)
2200 return -ENOMEM; 2293 return -ENOMEM;
2201 2294
@@ -2240,8 +2333,15 @@ msix_failed:
2240 /* Enable MSI-X vectors for the base queue */ 2333 /* Enable MSI-X vectors for the base queue */
2241 for (i = 0; i < 2; i++) { 2334 for (i = 0; i < 2; i++) {
2242 qentry = &ha->msix_entries[i]; 2335 qentry = &ha->msix_entries[i];
2243 ret = request_irq(qentry->vector, msix_entries[i].handler, 2336 if (IS_QLA82XX(ha)) {
2244 0, msix_entries[i].name, rsp); 2337 ret = request_irq(qentry->vector,
2338 qla82xx_msix_entries[i].handler,
2339 0, qla82xx_msix_entries[i].name, rsp);
2340 } else {
2341 ret = request_irq(qentry->vector,
2342 msix_entries[i].handler,
2343 0, msix_entries[i].name, rsp);
2344 }
2245 if (ret) { 2345 if (ret) {
2246 qla_printk(KERN_WARNING, ha, 2346 qla_printk(KERN_WARNING, ha,
2247 "MSI-X: Unable to register handler -- %x/%d.\n", 2347 "MSI-X: Unable to register handler -- %x/%d.\n",
@@ -2272,7 +2372,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2272 2372
2273 /* If possible, enable MSI-X. */ 2373 /* If possible, enable MSI-X. */
2274 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 2374 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
2275 !IS_QLA8432(ha) && !IS_QLA8001(ha)) 2375 !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha))
2276 goto skip_msi; 2376 goto skip_msi;
2277 2377
2278 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 2378 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -2302,7 +2402,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2302 goto clear_risc_ints; 2402 goto clear_risc_ints;
2303 } 2403 }
2304 qla_printk(KERN_WARNING, ha, 2404 qla_printk(KERN_WARNING, ha,
2305 "MSI-X: Falling back-to INTa mode -- %d.\n", ret); 2405 "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
2306skip_msix: 2406skip_msix:
2307 2407
2308 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2408 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
@@ -2313,7 +2413,9 @@ skip_msix:
2313 if (!ret) { 2413 if (!ret) {
2314 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n")); 2414 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
2315 ha->flags.msi_enabled = 1; 2415 ha->flags.msi_enabled = 1;
2316 } 2416 } else
2417 qla_printk(KERN_WARNING, ha,
2418 "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
2317skip_msi: 2419skip_msi:
2318 2420
2319 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2421 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
@@ -2331,7 +2433,7 @@ clear_risc_ints:
2331 * FIXME: Noted that 8014s were being dropped during NK testing. 2433 * FIXME: Noted that 8014s were being dropped during NK testing.
2332 * Timing deltas during MSI-X/INTa transitions? 2434 * Timing deltas during MSI-X/INTa transitions?
2333 */ 2435 */
2334 if (IS_QLA81XX(ha)) 2436 if (IS_QLA81XX(ha) || IS_QLA82XX(ha))
2335 goto fail; 2437 goto fail;
2336 spin_lock_irq(&ha->hardware_lock); 2438 spin_lock_irq(&ha->hardware_lock);
2337 if (IS_FWI2_CAPABLE(ha)) { 2439 if (IS_FWI2_CAPABLE(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 42eb7ffd5942..f3650d0434ca 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -49,6 +49,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
49 if (ha->pdev->error_state > pci_channel_io_frozen) 49 if (ha->pdev->error_state > pci_channel_io_frozen)
50 return QLA_FUNCTION_TIMEOUT; 50 return QLA_FUNCTION_TIMEOUT;
51 51
52 if (vha->device_flags & DFLG_DEV_FAILED) {
53 DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
54 "%s(%ld): Device in failed state, "
55 "timeout MBX Exiting.\n",
56 __func__, base_vha->host_no));
57 return QLA_FUNCTION_TIMEOUT;
58 }
59
52 reg = ha->iobase; 60 reg = ha->iobase;
53 io_lock_on = base_vha->flags.init_done; 61 io_lock_on = base_vha->flags.init_done;
54 62
@@ -85,7 +93,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
85 spin_lock_irqsave(&ha->hardware_lock, flags); 93 spin_lock_irqsave(&ha->hardware_lock, flags);
86 94
87 /* Load mailbox registers. */ 95 /* Load mailbox registers. */
88 if (IS_FWI2_CAPABLE(ha)) 96 if (IS_QLA82XX(ha))
97 optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
98 else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha))
89 optr = (uint16_t __iomem *)&reg->isp24.mailbox0; 99 optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
90 else 100 else
91 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0); 101 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
@@ -133,7 +143,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
133 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 143 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
134 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 144 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
135 145
136 if (IS_FWI2_CAPABLE(ha)) 146 if (IS_QLA82XX(ha)) {
147 if (RD_REG_DWORD(&reg->isp82.hint) &
148 HINT_MBX_INT_PENDING) {
149 spin_unlock_irqrestore(&ha->hardware_lock,
150 flags);
151 DEBUG2_3_11(printk(KERN_INFO
152 "%s(%ld): Pending Mailbox timeout. "
153 "Exiting.\n", __func__, base_vha->host_no));
154 return QLA_FUNCTION_TIMEOUT;
155 }
156 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
157 } else if (IS_FWI2_CAPABLE(ha))
137 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); 158 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
138 else 159 else
139 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); 160 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -147,7 +168,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
147 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__, 168 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
148 base_vha->host_no, command)); 169 base_vha->host_no, command));
149 170
150 if (IS_FWI2_CAPABLE(ha)) 171 if (IS_QLA82XX(ha)) {
172 if (RD_REG_DWORD(&reg->isp82.hint) &
173 HINT_MBX_INT_PENDING) {
174 spin_unlock_irqrestore(&ha->hardware_lock,
175 flags);
176 DEBUG2_3_11(printk(KERN_INFO
177 "%s(%ld): Pending Mailbox timeout. "
178 "Exiting.\n", __func__, base_vha->host_no));
179 return QLA_FUNCTION_TIMEOUT;
180 }
181 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
182 } else if (IS_FWI2_CAPABLE(ha))
151 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); 183 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
152 else 184 else
153 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); 185 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -264,7 +296,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
264 296
265 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 297 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
266 clear_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 298 clear_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
267 if (qla2x00_abort_isp(base_vha)) { 299 if (ha->isp_ops->abort_isp(base_vha)) {
268 /* Failed. retry later. */ 300 /* Failed. retry later. */
269 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 301 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
270 } 302 }
@@ -711,7 +743,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
711 * Context: 743 * Context:
712 * Kernel context. 744 * Kernel context.
713 */ 745 */
714static int 746int
715qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, 747qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
716 dma_addr_t phys_addr, size_t size, uint32_t tov) 748 dma_addr_t phys_addr, size_t size, uint32_t tov)
717{ 749{
@@ -952,7 +984,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
952 mcp->mb[9] = vha->vp_idx; 984 mcp->mb[9] = vha->vp_idx;
953 mcp->out_mb = MBX_9|MBX_0; 985 mcp->out_mb = MBX_9|MBX_0;
954 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 986 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
955 if (IS_QLA81XX(vha->hw)) 987 if (IS_QLA8XXX_TYPE(vha->hw))
956 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; 988 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
957 mcp->tov = MBX_TOV_SECONDS; 989 mcp->tov = MBX_TOV_SECONDS;
958 mcp->flags = 0; 990 mcp->flags = 0;
@@ -978,7 +1010,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
978 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", 1010 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
979 vha->host_no)); 1011 vha->host_no));
980 1012
981 if (IS_QLA81XX(vha->hw)) { 1013 if (IS_QLA8XXX_TYPE(vha->hw)) {
982 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1014 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
983 vha->fcoe_fcf_idx = mcp->mb[10]; 1015 vha->fcoe_fcf_idx = mcp->mb[10];
984 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; 1016 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
@@ -1076,6 +1108,10 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1076 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", 1108 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
1077 vha->host_no)); 1109 vha->host_no));
1078 1110
1111 if (IS_QLA82XX(ha) && ql2xdbwr)
1112 qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
1113 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1114
1079 if (ha->flags.npiv_supported) 1115 if (ha->flags.npiv_supported)
1080 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1116 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1081 else 1117 else
@@ -1408,7 +1444,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1408 1444
1409 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1445 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1410 1446
1411 if (IS_QLA81XX(vha->hw)) { 1447 if (IS_QLA8XXX_TYPE(vha->hw)) {
1412 /* Logout across all FCFs. */ 1448 /* Logout across all FCFs. */
1413 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1449 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1414 mcp->mb[1] = BIT_1; 1450 mcp->mb[1] = BIT_1;
@@ -2428,12 +2464,22 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2428int 2464int
2429qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag) 2465qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
2430{ 2466{
2467 struct qla_hw_data *ha = fcport->vha->hw;
2468
2469 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
2470 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
2471
2431 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); 2472 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
2432} 2473}
2433 2474
2434int 2475int
2435qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag) 2476qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
2436{ 2477{
2478 struct qla_hw_data *ha = fcport->vha->hw;
2479
2480 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
2481 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
2482
2437 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); 2483 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
2438} 2484}
2439 2485
@@ -2740,6 +2786,48 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint16_t addr,
2740} 2786}
2741 2787
2742int 2788int
2789qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2790 uint16_t *port_speed, uint16_t *mb)
2791{
2792 int rval;
2793 mbx_cmd_t mc;
2794 mbx_cmd_t *mcp = &mc;
2795
2796 if (!IS_IIDMA_CAPABLE(vha->hw))
2797 return QLA_FUNCTION_FAILED;
2798
2799 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2800
2801 mcp->mb[0] = MBC_PORT_PARAMS;
2802 mcp->mb[1] = loop_id;
2803 mcp->mb[2] = mcp->mb[3] = 0;
2804 mcp->mb[9] = vha->vp_idx;
2805 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
2806 mcp->in_mb = MBX_3|MBX_1|MBX_0;
2807 mcp->tov = MBX_TOV_SECONDS;
2808 mcp->flags = 0;
2809 rval = qla2x00_mailbox_command(vha, mcp);
2810
2811 /* Return mailbox statuses. */
2812 if (mb != NULL) {
2813 mb[0] = mcp->mb[0];
2814 mb[1] = mcp->mb[1];
2815 mb[3] = mcp->mb[3];
2816 }
2817
2818 if (rval != QLA_SUCCESS) {
2819 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2820 vha->host_no, rval));
2821 } else {
2822 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2823 if (port_speed)
2824 *port_speed = mcp->mb[3];
2825 }
2826
2827 return rval;
2828}
2829
2830int
2743qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 2831qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2744 uint16_t port_speed, uint16_t *mb) 2832 uint16_t port_speed, uint16_t *mb)
2745{ 2833{
@@ -2755,7 +2843,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2755 mcp->mb[0] = MBC_PORT_PARAMS; 2843 mcp->mb[0] = MBC_PORT_PARAMS;
2756 mcp->mb[1] = loop_id; 2844 mcp->mb[1] = loop_id;
2757 mcp->mb[2] = BIT_0; 2845 mcp->mb[2] = BIT_0;
2758 if (IS_QLA81XX(vha->hw)) 2846 if (IS_QLA8XXX_TYPE(vha->hw))
2759 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); 2847 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
2760 else 2848 else
2761 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); 2849 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
@@ -3544,7 +3632,7 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3544 mbx_cmd_t mc; 3632 mbx_cmd_t mc;
3545 mbx_cmd_t *mcp = &mc; 3633 mbx_cmd_t *mcp = &mc;
3546 3634
3547 if (!IS_QLA81XX(vha->hw)) 3635 if (!IS_QLA8XXX_TYPE(vha->hw))
3548 return QLA_FUNCTION_FAILED; 3636 return QLA_FUNCTION_FAILED;
3549 3637
3550 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3638 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
@@ -3582,7 +3670,7 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3582 mbx_cmd_t mc; 3670 mbx_cmd_t mc;
3583 mbx_cmd_t *mcp = &mc; 3671 mbx_cmd_t *mcp = &mc;
3584 3672
3585 if (!IS_QLA81XX(vha->hw)) 3673 if (!IS_QLA8XXX_TYPE(vha->hw))
3586 return QLA_FUNCTION_FAILED; 3674 return QLA_FUNCTION_FAILED;
3587 3675
3588 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3676 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
@@ -3643,7 +3731,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3643} 3731}
3644 3732
3645int 3733int
3646qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp) 3734qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3735 uint16_t *mresp)
3647{ 3736{
3648 int rval; 3737 int rval;
3649 mbx_cmd_t mc; 3738 mbx_cmd_t mc;
@@ -3678,7 +3767,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *
3678 3767
3679 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| 3768 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
3680 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 3769 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
3681 if (IS_QLA81XX(vha->hw)) 3770 if (IS_QLA8XXX_TYPE(vha->hw))
3682 mcp->out_mb |= MBX_2; 3771 mcp->out_mb |= MBX_2;
3683 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; 3772 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
3684 3773
@@ -3690,9 +3779,11 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *
3690 3779
3691 if (rval != QLA_SUCCESS) { 3780 if (rval != QLA_SUCCESS) {
3692 DEBUG2(printk(KERN_WARNING 3781 DEBUG2(printk(KERN_WARNING
3693 "(%ld): failed=%x mb[0]=0x%x " 3782 "(%ld): failed=%x mb[0]=0x%x "
3694 "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x mb[19]=0x%x. \n", vha->host_no, rval, 3783 "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x "
3695 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[18], mcp->mb[19])); 3784 "mb[19]=0x%x.\n",
3785 vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
3786 mcp->mb[3], mcp->mb[18], mcp->mb[19]));
3696 } else { 3787 } else {
3697 DEBUG2(printk(KERN_WARNING 3788 DEBUG2(printk(KERN_WARNING
3698 "scsi(%ld): done.\n", vha->host_no)); 3789 "scsi(%ld): done.\n", vha->host_no));
@@ -3706,7 +3797,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *
3706} 3797}
3707 3798
3708int 3799int
3709qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp) 3800qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3801 uint16_t *mresp)
3710{ 3802{
3711 int rval; 3803 int rval;
3712 mbx_cmd_t mc; 3804 mbx_cmd_t mc;
@@ -3718,9 +3810,10 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mres
3718 memset(mcp->mb, 0 , sizeof(mcp->mb)); 3810 memset(mcp->mb, 0 , sizeof(mcp->mb));
3719 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 3811 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
3720 mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */ 3812 mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
3721 if (IS_QLA81XX(ha)) 3813 if (IS_QLA8XXX_TYPE(ha)) {
3722 mcp->mb[1] |= BIT_15; 3814 mcp->mb[1] |= BIT_15;
3723 mcp->mb[2] = IS_QLA81XX(ha) ? vha->fcoe_fcf_idx : 0; 3815 mcp->mb[2] = vha->fcoe_fcf_idx;
3816 }
3724 mcp->mb[16] = LSW(mreq->rcv_dma); 3817 mcp->mb[16] = LSW(mreq->rcv_dma);
3725 mcp->mb[17] = MSW(mreq->rcv_dma); 3818 mcp->mb[17] = MSW(mreq->rcv_dma);
3726 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 3819 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
@@ -3735,13 +3828,13 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mres
3735 3828
3736 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| 3829 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
3737 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 3830 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
3738 if (IS_QLA81XX(ha)) 3831 if (IS_QLA8XXX_TYPE(ha))
3739 mcp->out_mb |= MBX_2; 3832 mcp->out_mb |= MBX_2;
3740 3833
3741 mcp->in_mb = MBX_0; 3834 mcp->in_mb = MBX_0;
3742 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) 3835 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
3743 mcp->in_mb |= MBX_1; 3836 mcp->in_mb |= MBX_1;
3744 if (IS_QLA81XX(ha)) 3837 if (IS_QLA8XXX_TYPE(ha))
3745 mcp->in_mb |= MBX_3; 3838 mcp->in_mb |= MBX_3;
3746 3839
3747 mcp->tov = MBX_TOV_SECONDS; 3840 mcp->tov = MBX_TOV_SECONDS;
@@ -3764,8 +3857,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mres
3764 return rval; 3857 return rval;
3765} 3858}
3766int 3859int
3767qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic, 3860qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic)
3768 uint16_t *cmd_status)
3769{ 3861{
3770 int rval; 3862 int rval;
3771 mbx_cmd_t mc; 3863 mbx_cmd_t mc;
@@ -3782,8 +3874,6 @@ qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic,
3782 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 3874 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3783 rval = qla2x00_mailbox_command(ha, mcp); 3875 rval = qla2x00_mailbox_command(ha, mcp);
3784 3876
3785 /* Return mailbox statuses. */
3786 *cmd_status = mcp->mb[0];
3787 if (rval != QLA_SUCCESS) 3877 if (rval != QLA_SUCCESS)
3788 DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no, 3878 DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no,
3789 rval)); 3879 rval));
@@ -3801,7 +3891,7 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3801 mbx_cmd_t *mcp = &mc; 3891 mbx_cmd_t *mcp = &mc;
3802 3892
3803 if (!IS_FWI2_CAPABLE(vha->hw)) 3893 if (!IS_FWI2_CAPABLE(vha->hw))
3804 return QLA_FUNCTION_FAILED; 3894 return QLA_FUNCTION_FAILED;
3805 3895
3806 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 3896 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
3807 3897
@@ -3836,7 +3926,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
3836 if (!IS_FWI2_CAPABLE(ha)) 3926 if (!IS_FWI2_CAPABLE(ha))
3837 return QLA_FUNCTION_FAILED; 3927 return QLA_FUNCTION_FAILED;
3838 3928
3839 DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no)); 3929 DEBUG11(qla_printk(KERN_INFO, ha,
3930 "%s(%ld): entered.\n", __func__, vha->host_no));
3840 3931
3841 mcp->mb[0] = MBC_DATA_RATE; 3932 mcp->mb[0] = MBC_DATA_RATE;
3842 mcp->mb[1] = 0; 3933 mcp->mb[1] = 0;
@@ -3857,3 +3948,122 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
3857 3948
3858 return rval; 3949 return rval;
3859} 3950}
3951
3952int
3953qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
3954 uint16_t *mb)
3955{
3956 int rval;
3957 mbx_cmd_t mc;
3958 mbx_cmd_t *mcp = &mc;
3959 struct qla_hw_data *ha = vha->hw;
3960
3961 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
3962 return QLA_FUNCTION_FAILED;
3963
3964 DEBUG11(printk(KERN_INFO
3965 "%s(%ld): entered.\n", __func__, ha->host_no));
3966
3967 mcp->mb[0] = MBC_PORT_PARAMS;
3968 mcp->mb[1] = loop_id;
3969 if (ha->flags.fcp_prio_enabled)
3970 mcp->mb[2] = BIT_1;
3971 else
3972 mcp->mb[2] = BIT_2;
3973 mcp->mb[4] = priority & 0xf;
3974 mcp->mb[9] = vha->vp_idx;
3975 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3976 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3977 mcp->tov = 30;
3978 mcp->flags = 0;
3979 rval = qla2x00_mailbox_command(vha, mcp);
3980 if (mb != NULL) {
3981 mb[0] = mcp->mb[0];
3982 mb[1] = mcp->mb[1];
3983 mb[3] = mcp->mb[3];
3984 mb[4] = mcp->mb[4];
3985 }
3986
3987 if (rval != QLA_SUCCESS) {
3988 DEBUG2_3_11(printk(KERN_WARNING
3989 "%s(%ld): failed=%x.\n", __func__,
3990 vha->host_no, rval));
3991 } else {
3992 DEBUG11(printk(KERN_INFO
3993 "%s(%ld): done.\n", __func__, vha->host_no));
3994 }
3995
3996 return rval;
3997}
3998
3999int
4000qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4001{
4002 int rval;
4003 struct qla_hw_data *ha = vha->hw;
4004 mbx_cmd_t mc;
4005 mbx_cmd_t *mcp = &mc;
4006
4007 if (!IS_FWI2_CAPABLE(ha))
4008 return QLA_FUNCTION_FAILED;
4009
4010 DEBUG11(qla_printk(KERN_INFO, ha,
4011 "%s(%ld): entered.\n", __func__, vha->host_no));
4012
4013 memset(mcp, 0, sizeof(mbx_cmd_t));
4014 mcp->mb[0] = MBC_TOGGLE_INTR;
4015 mcp->mb[1] = 1;
4016
4017 mcp->out_mb = MBX_1|MBX_0;
4018 mcp->in_mb = MBX_0;
4019 mcp->tov = 30;
4020 mcp->flags = 0;
4021
4022 rval = qla2x00_mailbox_command(vha, mcp);
4023 if (rval != QLA_SUCCESS) {
4024 DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
4025 "%s(%ld): failed=%x mb[0]=%x.\n", __func__,
4026 vha->host_no, rval, mcp->mb[0]));
4027 } else {
4028 DEBUG11(qla_printk(KERN_INFO, ha,
4029 "%s(%ld): done.\n", __func__, vha->host_no));
4030 }
4031
4032 return rval;
4033}
4034
4035int
4036qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4037{
4038 int rval;
4039 struct qla_hw_data *ha = vha->hw;
4040 mbx_cmd_t mc;
4041 mbx_cmd_t *mcp = &mc;
4042
4043 if (!IS_QLA82XX(ha))
4044 return QLA_FUNCTION_FAILED;
4045
4046 DEBUG11(qla_printk(KERN_INFO, ha,
4047 "%s(%ld): entered.\n", __func__, vha->host_no));
4048
4049 memset(mcp, 0, sizeof(mbx_cmd_t));
4050 mcp->mb[0] = MBC_TOGGLE_INTR;
4051 mcp->mb[1] = 0;
4052
4053 mcp->out_mb = MBX_1|MBX_0;
4054 mcp->in_mb = MBX_0;
4055 mcp->tov = 30;
4056 mcp->flags = 0;
4057
4058 rval = qla2x00_mailbox_command(vha, mcp);
4059 if (rval != QLA_SUCCESS) {
4060 DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
4061 "%s(%ld): failed=%x mb[0]=%x.\n", __func__,
4062 vha->host_no, rval, mcp->mb[0]));
4063 } else {
4064 DEBUG11(qla_printk(KERN_INFO, ha,
4065 "%s(%ld): done.\n", __func__, vha->host_no));
4066 }
4067
4068 return rval;
4069}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
new file mode 100644
index 000000000000..ff562de0e8e7
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -0,0 +1,3636 @@
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8#include <linux/delay.h>
9#include <linux/pci.h>
10
11#define MASK(n) ((1ULL<<(n))-1)
12#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
13 ((addr >> 25) & 0x3ff))
14#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
15 ((addr >> 25) & 0x3ff))
16#define MS_WIN(addr) (addr & 0x0ffc0000)
17#define QLA82XX_PCI_MN_2M (0)
18#define QLA82XX_PCI_MS_2M (0x80000)
19#define QLA82XX_PCI_OCM0_2M (0xc0000)
20#define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
21#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
22
23/* CRB window related */
24#define CRB_BLK(off) ((off >> 20) & 0x3f)
25#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
26#define CRB_WINDOW_2M (0x130060)
27#define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL)
28#define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
29 ((off) & 0xf0000))
30#define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL)
31#define CRB_INDIRECT_2M (0x1e0000UL)
32
33#define MAX_CRB_XFORM 60
34static unsigned long crb_addr_xform[MAX_CRB_XFORM];
35int qla82xx_crb_table_initialized;
36
37#define qla82xx_crb_addr_transform(name) \
38 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
39 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
40
41static void qla82xx_crb_addr_transform_setup(void)
42{
43 qla82xx_crb_addr_transform(XDMA);
44 qla82xx_crb_addr_transform(TIMR);
45 qla82xx_crb_addr_transform(SRE);
46 qla82xx_crb_addr_transform(SQN3);
47 qla82xx_crb_addr_transform(SQN2);
48 qla82xx_crb_addr_transform(SQN1);
49 qla82xx_crb_addr_transform(SQN0);
50 qla82xx_crb_addr_transform(SQS3);
51 qla82xx_crb_addr_transform(SQS2);
52 qla82xx_crb_addr_transform(SQS1);
53 qla82xx_crb_addr_transform(SQS0);
54 qla82xx_crb_addr_transform(RPMX7);
55 qla82xx_crb_addr_transform(RPMX6);
56 qla82xx_crb_addr_transform(RPMX5);
57 qla82xx_crb_addr_transform(RPMX4);
58 qla82xx_crb_addr_transform(RPMX3);
59 qla82xx_crb_addr_transform(RPMX2);
60 qla82xx_crb_addr_transform(RPMX1);
61 qla82xx_crb_addr_transform(RPMX0);
62 qla82xx_crb_addr_transform(ROMUSB);
63 qla82xx_crb_addr_transform(SN);
64 qla82xx_crb_addr_transform(QMN);
65 qla82xx_crb_addr_transform(QMS);
66 qla82xx_crb_addr_transform(PGNI);
67 qla82xx_crb_addr_transform(PGND);
68 qla82xx_crb_addr_transform(PGN3);
69 qla82xx_crb_addr_transform(PGN2);
70 qla82xx_crb_addr_transform(PGN1);
71 qla82xx_crb_addr_transform(PGN0);
72 qla82xx_crb_addr_transform(PGSI);
73 qla82xx_crb_addr_transform(PGSD);
74 qla82xx_crb_addr_transform(PGS3);
75 qla82xx_crb_addr_transform(PGS2);
76 qla82xx_crb_addr_transform(PGS1);
77 qla82xx_crb_addr_transform(PGS0);
78 qla82xx_crb_addr_transform(PS);
79 qla82xx_crb_addr_transform(PH);
80 qla82xx_crb_addr_transform(NIU);
81 qla82xx_crb_addr_transform(I2Q);
82 qla82xx_crb_addr_transform(EG);
83 qla82xx_crb_addr_transform(MN);
84 qla82xx_crb_addr_transform(MS);
85 qla82xx_crb_addr_transform(CAS2);
86 qla82xx_crb_addr_transform(CAS1);
87 qla82xx_crb_addr_transform(CAS0);
88 qla82xx_crb_addr_transform(CAM);
89 qla82xx_crb_addr_transform(C2C1);
90 qla82xx_crb_addr_transform(C2C0);
91 qla82xx_crb_addr_transform(SMB);
92 qla82xx_crb_addr_transform(OCM0);
93 /*
94 * Used only in P3 just define it for P2 also.
95 */
96 qla82xx_crb_addr_transform(I2C0);
97
98 qla82xx_crb_table_initialized = 1;
99}
100
101struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
102 {{{0, 0, 0, 0} } },
103 {{{1, 0x0100000, 0x0102000, 0x120000},
104 {1, 0x0110000, 0x0120000, 0x130000},
105 {1, 0x0120000, 0x0122000, 0x124000},
106 {1, 0x0130000, 0x0132000, 0x126000},
107 {1, 0x0140000, 0x0142000, 0x128000},
108 {1, 0x0150000, 0x0152000, 0x12a000},
109 {1, 0x0160000, 0x0170000, 0x110000},
110 {1, 0x0170000, 0x0172000, 0x12e000},
111 {0, 0x0000000, 0x0000000, 0x000000},
112 {0, 0x0000000, 0x0000000, 0x000000},
113 {0, 0x0000000, 0x0000000, 0x000000},
114 {0, 0x0000000, 0x0000000, 0x000000},
115 {0, 0x0000000, 0x0000000, 0x000000},
116 {0, 0x0000000, 0x0000000, 0x000000},
117 {1, 0x01e0000, 0x01e0800, 0x122000},
118 {0, 0x0000000, 0x0000000, 0x000000} } } ,
119 {{{1, 0x0200000, 0x0210000, 0x180000} } },
120 {{{0, 0, 0, 0} } },
121 {{{1, 0x0400000, 0x0401000, 0x169000} } },
122 {{{1, 0x0500000, 0x0510000, 0x140000} } },
123 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },
124 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },
125 {{{1, 0x0800000, 0x0802000, 0x170000},
126 {0, 0x0000000, 0x0000000, 0x000000},
127 {0, 0x0000000, 0x0000000, 0x000000},
128 {0, 0x0000000, 0x0000000, 0x000000},
129 {0, 0x0000000, 0x0000000, 0x000000},
130 {0, 0x0000000, 0x0000000, 0x000000},
131 {0, 0x0000000, 0x0000000, 0x000000},
132 {0, 0x0000000, 0x0000000, 0x000000},
133 {0, 0x0000000, 0x0000000, 0x000000},
134 {0, 0x0000000, 0x0000000, 0x000000},
135 {0, 0x0000000, 0x0000000, 0x000000},
136 {0, 0x0000000, 0x0000000, 0x000000},
137 {0, 0x0000000, 0x0000000, 0x000000},
138 {0, 0x0000000, 0x0000000, 0x000000},
139 {0, 0x0000000, 0x0000000, 0x000000},
140 {1, 0x08f0000, 0x08f2000, 0x172000} } },
141 {{{1, 0x0900000, 0x0902000, 0x174000},
142 {0, 0x0000000, 0x0000000, 0x000000},
143 {0, 0x0000000, 0x0000000, 0x000000},
144 {0, 0x0000000, 0x0000000, 0x000000},
145 {0, 0x0000000, 0x0000000, 0x000000},
146 {0, 0x0000000, 0x0000000, 0x000000},
147 {0, 0x0000000, 0x0000000, 0x000000},
148 {0, 0x0000000, 0x0000000, 0x000000},
149 {0, 0x0000000, 0x0000000, 0x000000},
150 {0, 0x0000000, 0x0000000, 0x000000},
151 {0, 0x0000000, 0x0000000, 0x000000},
152 {0, 0x0000000, 0x0000000, 0x000000},
153 {0, 0x0000000, 0x0000000, 0x000000},
154 {0, 0x0000000, 0x0000000, 0x000000},
155 {0, 0x0000000, 0x0000000, 0x000000},
156 {1, 0x09f0000, 0x09f2000, 0x176000} } },
157 {{{0, 0x0a00000, 0x0a02000, 0x178000},
158 {0, 0x0000000, 0x0000000, 0x000000},
159 {0, 0x0000000, 0x0000000, 0x000000},
160 {0, 0x0000000, 0x0000000, 0x000000},
161 {0, 0x0000000, 0x0000000, 0x000000},
162 {0, 0x0000000, 0x0000000, 0x000000},
163 {0, 0x0000000, 0x0000000, 0x000000},
164 {0, 0x0000000, 0x0000000, 0x000000},
165 {0, 0x0000000, 0x0000000, 0x000000},
166 {0, 0x0000000, 0x0000000, 0x000000},
167 {0, 0x0000000, 0x0000000, 0x000000},
168 {0, 0x0000000, 0x0000000, 0x000000},
169 {0, 0x0000000, 0x0000000, 0x000000},
170 {0, 0x0000000, 0x0000000, 0x000000},
171 {0, 0x0000000, 0x0000000, 0x000000},
172 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
173 {{{0, 0x0b00000, 0x0b02000, 0x17c000},
174 {0, 0x0000000, 0x0000000, 0x000000},
175 {0, 0x0000000, 0x0000000, 0x000000},
176 {0, 0x0000000, 0x0000000, 0x000000},
177 {0, 0x0000000, 0x0000000, 0x000000},
178 {0, 0x0000000, 0x0000000, 0x000000},
179 {0, 0x0000000, 0x0000000, 0x000000},
180 {0, 0x0000000, 0x0000000, 0x000000},
181 {0, 0x0000000, 0x0000000, 0x000000},
182 {0, 0x0000000, 0x0000000, 0x000000},
183 {0, 0x0000000, 0x0000000, 0x000000},
184 {0, 0x0000000, 0x0000000, 0x000000},
185 {0, 0x0000000, 0x0000000, 0x000000},
186 {0, 0x0000000, 0x0000000, 0x000000},
187 {0, 0x0000000, 0x0000000, 0x000000},
188 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
189 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
190 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
191 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
192 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },
193 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },
194 {{{1, 0x1100000, 0x1101000, 0x160000} } },
195 {{{1, 0x1200000, 0x1201000, 0x161000} } },
196 {{{1, 0x1300000, 0x1301000, 0x162000} } },
197 {{{1, 0x1400000, 0x1401000, 0x163000} } },
198 {{{1, 0x1500000, 0x1501000, 0x165000} } },
199 {{{1, 0x1600000, 0x1601000, 0x166000} } },
200 {{{0, 0, 0, 0} } },
201 {{{0, 0, 0, 0} } },
202 {{{0, 0, 0, 0} } },
203 {{{0, 0, 0, 0} } },
204 {{{0, 0, 0, 0} } },
205 {{{0, 0, 0, 0} } },
206 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },
207 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
208 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },
209 {{{0} } },
210 {{{1, 0x2100000, 0x2102000, 0x120000},
211 {1, 0x2110000, 0x2120000, 0x130000},
212 {1, 0x2120000, 0x2122000, 0x124000},
213 {1, 0x2130000, 0x2132000, 0x126000},
214 {1, 0x2140000, 0x2142000, 0x128000},
215 {1, 0x2150000, 0x2152000, 0x12a000},
216 {1, 0x2160000, 0x2170000, 0x110000},
217 {1, 0x2170000, 0x2172000, 0x12e000},
218 {0, 0x0000000, 0x0000000, 0x000000},
219 {0, 0x0000000, 0x0000000, 0x000000},
220 {0, 0x0000000, 0x0000000, 0x000000},
221 {0, 0x0000000, 0x0000000, 0x000000},
222 {0, 0x0000000, 0x0000000, 0x000000},
223 {0, 0x0000000, 0x0000000, 0x000000},
224 {0, 0x0000000, 0x0000000, 0x000000},
225 {0, 0x0000000, 0x0000000, 0x000000} } },
226 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },
227 {{{0} } },
228 {{{0} } },
229 {{{0} } },
230 {{{0} } },
231 {{{0} } },
232 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },
233 {{{1, 0x2900000, 0x2901000, 0x16b000} } },
234 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
235 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
236 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
237 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
238 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
239 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
240 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },
241 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },
242 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },
243 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },
244 {{{0} } },
245 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },
246 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },
247 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },
248 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },
249 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },
250 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
251 {{{0} } },
252 {{{0} } },
253 {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
254 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },
255 {{{1, 0x3f00000, 0x3f01000, 0x168000} } }
256};
257
258/*
259 * top 12 bits of crb internal address (hub, agent)
260 */
261unsigned qla82xx_crb_hub_agt[64] = {
262 0,
263 QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
264 QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
265 QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
266 0,
267 QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
268 QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
269 QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
270 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
271 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
272 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
273 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
274 QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
275 QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
276 QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
277 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
278 QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
279 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
280 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
281 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
282 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
283 QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
284 QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
285 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
286 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
287 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
288 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
289 0,
290 QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
291 QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
292 0,
293 QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
294 0,
295 QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
296 QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
297 0,
298 0,
299 0,
300 0,
301 0,
302 QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
303 0,
304 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
305 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
306 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
307 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
308 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
309 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
310 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
311 QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
312 QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
313 QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
314 0,
315 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
316 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
317 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
318 QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
319 0,
320 QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
321 QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
322 QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
323 0,
324 QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
325 0,
326};
327
328/* Device states */
329char *qdev_state[] = {
330 "Unknown",
331 "Cold",
332 "Initializing",
333 "Ready",
334 "Need Reset",
335 "Need Quiescent",
336 "Failed",
337 "Quiescent",
338};
339
340/*
341 * In: 'off' is offset from CRB space in 128M pci map
342 * Out: 'off' is 2M pci map addr
343 * side effect: lock crb window
344 */
345static void
346qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
347{
348 u32 win_read;
349
350 ha->crb_win = CRB_HI(*off);
351 writel(ha->crb_win,
352 (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
353
354 /* Read back value to make sure write has gone through before trying
355 * to use it.
356 */
357 win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
358 if (win_read != ha->crb_win) {
359 DEBUG2(qla_printk(KERN_INFO, ha,
360 "%s: Written crbwin (0x%x) != Read crbwin (0x%x), "
361 "off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
362 }
363 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
364}
365
366static inline unsigned long
367qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
368{
369 /* See if we are currently pointing to the region we want to use next */
370 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
371 /* No need to change window. PCIX and PCIEregs are in both
372 * regs are in both windows.
373 */
374 return off;
375 }
376
377 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
378 /* We are in first CRB window */
379 if (ha->curr_window != 0)
380 WARN_ON(1);
381 return off;
382 }
383
384 if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
385 /* We are in second CRB window */
386 off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
387
388 if (ha->curr_window != 1)
389 return off;
390
391 /* We are in the QM or direct access
392 * register region - do nothing
393 */
394 if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
395 (off < QLA82XX_PCI_CAMQM_MAX))
396 return off;
397 }
398 /* strange address given */
399 qla_printk(KERN_WARNING, ha,
400 "%s: Warning: unm_nic_pci_set_crbwindow called with"
401 " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off);
402 return off;
403}
404
405int
406qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
407{
408 unsigned long flags = 0;
409 int rv;
410
411 rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
412
413 BUG_ON(rv == -1);
414
415 if (rv == 1) {
416 write_lock_irqsave(&ha->hw_lock, flags);
417 qla82xx_crb_win_lock(ha);
418 qla82xx_pci_set_crbwindow_2M(ha, &off);
419 }
420
421 writel(data, (void __iomem *)off);
422
423 if (rv == 1) {
424 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
425 write_unlock_irqrestore(&ha->hw_lock, flags);
426 }
427 return 0;
428}
429
430int
431qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
432{
433 unsigned long flags = 0;
434 int rv;
435 u32 data;
436
437 rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
438
439 BUG_ON(rv == -1);
440
441 if (rv == 1) {
442 write_lock_irqsave(&ha->hw_lock, flags);
443 qla82xx_crb_win_lock(ha);
444 qla82xx_pci_set_crbwindow_2M(ha, &off);
445 }
446 data = RD_REG_DWORD((void __iomem *)off);
447
448 if (rv == 1) {
449 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
450 write_unlock_irqrestore(&ha->hw_lock, flags);
451 }
452 return data;
453}
454
455#define CRB_WIN_LOCK_TIMEOUT 100000000
456int qla82xx_crb_win_lock(struct qla_hw_data *ha)
457{
458 int done = 0, timeout = 0;
459
460 while (!done) {
461 /* acquire semaphore3 from PCI HW block */
462 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
463 if (done == 1)
464 break;
465 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
466 return -1;
467 timeout++;
468 }
469 qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
470 return 0;
471}
472
473#define IDC_LOCK_TIMEOUT 100000000
474int qla82xx_idc_lock(struct qla_hw_data *ha)
475{
476 int i;
477 int done = 0, timeout = 0;
478
479 while (!done) {
480 /* acquire semaphore5 from PCI HW block */
481 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
482 if (done == 1)
483 break;
484 if (timeout >= IDC_LOCK_TIMEOUT)
485 return -1;
486
487 timeout++;
488
489 /* Yield CPU */
490 if (!in_interrupt())
491 schedule();
492 else {
493 for (i = 0; i < 20; i++)
494 cpu_relax();
495 }
496 }
497
498 return 0;
499}
500
501void qla82xx_idc_unlock(struct qla_hw_data *ha)
502{
503 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
504}
505
506int
507qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
508{
509 struct crb_128M_2M_sub_block_map *m;
510
511 if (*off >= QLA82XX_CRB_MAX)
512 return -1;
513
514 if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
515 *off = (*off - QLA82XX_PCI_CAMQM) +
516 QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
517 return 0;
518 }
519
520 if (*off < QLA82XX_PCI_CRBSPACE)
521 return -1;
522
523 *off -= QLA82XX_PCI_CRBSPACE;
524
525 /* Try direct map */
526 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
527
528 if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
529 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
530 return 0;
531 }
532 /* Not in direct map, use crb window */
533 return 1;
534}
535
536/* PCI Windowing for DDR regions. */
537#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
538 (((addr) <= (high)) && ((addr) >= (low)))
539/*
540 * check memory access boundary.
541 * used by test agent. support ddr access only for now
542 */
543static unsigned long
544qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
545 unsigned long long addr, int size)
546{
547 if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
548 QLA82XX_ADDR_DDR_NET_MAX) ||
549 !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
550 QLA82XX_ADDR_DDR_NET_MAX) ||
551 ((size != 1) && (size != 2) && (size != 4) && (size != 8)))
552 return 0;
553 else
554 return 1;
555}
556
557int qla82xx_pci_set_window_warning_count;
558
559unsigned long
560qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
561{
562 int window;
563 u32 win_read;
564
565 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
566 QLA82XX_ADDR_DDR_NET_MAX)) {
567 /* DDR network side */
568 window = MN_WIN(addr);
569 ha->ddr_mn_window = window;
570 qla82xx_wr_32(ha,
571 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
572 win_read = qla82xx_rd_32(ha,
573 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
574 if ((win_read << 17) != window) {
575 qla_printk(KERN_WARNING, ha,
576 "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
577 __func__, window, win_read);
578 }
579 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
580 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
581 QLA82XX_ADDR_OCM0_MAX)) {
582 unsigned int temp1;
583 if ((addr & 0x00ff800) == 0xff800) {
584 qla_printk(KERN_WARNING, ha,
585 "%s: QM access not handled.\n", __func__);
586 addr = -1UL;
587 }
588 window = OCM_WIN(addr);
589 ha->ddr_mn_window = window;
590 qla82xx_wr_32(ha,
591 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
592 win_read = qla82xx_rd_32(ha,
593 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
594 temp1 = ((window & 0x1FF) << 7) |
595 ((window & 0x0FFFE0000) >> 17);
596 if (win_read != temp1) {
597 qla_printk(KERN_WARNING, ha,
598 "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n",
599 __func__, temp1, win_read);
600 }
601 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
602
603 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
604 QLA82XX_P3_ADDR_QDR_NET_MAX)) {
605 /* QDR network side */
606 window = MS_WIN(addr);
607 ha->qdr_sn_window = window;
608 qla82xx_wr_32(ha,
609 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
610 win_read = qla82xx_rd_32(ha,
611 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
612 if (win_read != window) {
613 qla_printk(KERN_WARNING, ha,
614 "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n",
615 __func__, window, win_read);
616 }
617 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
618 } else {
619 /*
620 * peg gdb frequently accesses memory that doesn't exist,
621 * this limits the chit chat so debugging isn't slowed down.
622 */
623 if ((qla82xx_pci_set_window_warning_count++ < 8) ||
624 (qla82xx_pci_set_window_warning_count%64 == 0)) {
625 qla_printk(KERN_WARNING, ha,
626 "%s: Warning:%s Unknown address range!\n", __func__,
627 QLA2XXX_DRIVER_NAME);
628 }
629 addr = -1UL;
630 }
631 return addr;
632}
633
634/* check if address is in the same windows as the previous access */
635static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
636 unsigned long long addr)
637{
638 int window;
639 unsigned long long qdr_max;
640
641 qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
642
643 /* DDR network side */
644 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
645 QLA82XX_ADDR_DDR_NET_MAX))
646 BUG();
647 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
648 QLA82XX_ADDR_OCM0_MAX))
649 return 1;
650 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
651 QLA82XX_ADDR_OCM1_MAX))
652 return 1;
653 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
654 /* QDR network side */
655 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
656 if (ha->qdr_sn_window == window)
657 return 1;
658 }
659 return 0;
660}
661
662static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
663 u64 off, void *data, int size)
664{
665 unsigned long flags;
666 void *addr = NULL;
667 int ret = 0;
668 u64 start;
669 uint8_t *mem_ptr = NULL;
670 unsigned long mem_base;
671 unsigned long mem_page;
672
673 write_lock_irqsave(&ha->hw_lock, flags);
674
675 /*
676 * If attempting to access unknown address or straddle hw windows,
677 * do not access.
678 */
679 start = qla82xx_pci_set_window(ha, off);
680 if ((start == -1UL) ||
681 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
682 write_unlock_irqrestore(&ha->hw_lock, flags);
683 qla_printk(KERN_ERR, ha,
684 "%s out of bound pci memory access. "
685 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
686 return -1;
687 }
688
689 write_unlock_irqrestore(&ha->hw_lock, flags);
690 mem_base = pci_resource_start(ha->pdev, 0);
691 mem_page = start & PAGE_MASK;
692 /* Map two pages whenever user tries to access addresses in two
693 * consecutive pages.
694 */
695 if (mem_page != ((start + size - 1) & PAGE_MASK))
696 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
697 else
698 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
699 if (mem_ptr == 0UL) {
700 *(u8 *)data = 0;
701 return -1;
702 }
703 addr = mem_ptr;
704 addr += start & (PAGE_SIZE - 1);
705 write_lock_irqsave(&ha->hw_lock, flags);
706
707 switch (size) {
708 case 1:
709 *(u8 *)data = readb(addr);
710 break;
711 case 2:
712 *(u16 *)data = readw(addr);
713 break;
714 case 4:
715 *(u32 *)data = readl(addr);
716 break;
717 case 8:
718 *(u64 *)data = readq(addr);
719 break;
720 default:
721 ret = -1;
722 break;
723 }
724 write_unlock_irqrestore(&ha->hw_lock, flags);
725
726 if (mem_ptr)
727 iounmap(mem_ptr);
728 return ret;
729}
730
731static int
732qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
733 u64 off, void *data, int size)
734{
735 unsigned long flags;
736 void *addr = NULL;
737 int ret = 0;
738 u64 start;
739 uint8_t *mem_ptr = NULL;
740 unsigned long mem_base;
741 unsigned long mem_page;
742
743 write_lock_irqsave(&ha->hw_lock, flags);
744
745 /*
746 * If attempting to access unknown address or straddle hw windows,
747 * do not access.
748 */
749 start = qla82xx_pci_set_window(ha, off);
750 if ((start == -1UL) ||
751 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
752 write_unlock_irqrestore(&ha->hw_lock, flags);
753 qla_printk(KERN_ERR, ha,
754 "%s out of bound pci memory access. "
755 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
756 return -1;
757 }
758
759 write_unlock_irqrestore(&ha->hw_lock, flags);
760 mem_base = pci_resource_start(ha->pdev, 0);
761 mem_page = start & PAGE_MASK;
762 /* Map two pages whenever user tries to access addresses in two
763 * consecutive pages.
764 */
765 if (mem_page != ((start + size - 1) & PAGE_MASK))
766 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
767 else
768 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
769 if (mem_ptr == 0UL)
770 return -1;
771
772 addr = mem_ptr;
773 addr += start & (PAGE_SIZE - 1);
774 write_lock_irqsave(&ha->hw_lock, flags);
775
776 switch (size) {
777 case 1:
778 writeb(*(u8 *)data, addr);
779 break;
780 case 2:
781 writew(*(u16 *)data, addr);
782 break;
783 case 4:
784 writel(*(u32 *)data, addr);
785 break;
786 case 8:
787 writeq(*(u64 *)data, addr);
788 break;
789 default:
790 ret = -1;
791 break;
792 }
793 write_unlock_irqrestore(&ha->hw_lock, flags);
794 if (mem_ptr)
795 iounmap(mem_ptr);
796 return ret;
797}
798
799int
800qla82xx_wrmem(struct qla_hw_data *ha, u64 off, void *data, int size)
801{
802 int i, j, ret = 0, loop, sz[2], off0;
803 u32 temp;
804 u64 off8, mem_crb, tmpw, word[2] = {0, 0};
805#define MAX_CTL_CHECK 1000
806 /*
807 * If not MN, go check for MS or invalid.
808 */
809 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) {
810 mem_crb = QLA82XX_CRB_QDR_NET;
811 } else {
812 mem_crb = QLA82XX_CRB_DDR_NET;
813 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
814 return qla82xx_pci_mem_write_direct(ha, off,
815 data, size);
816 }
817
818 off8 = off & 0xfffffff8;
819 off0 = off & 0x7;
820 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
821 sz[1] = size - sz[0];
822 loop = ((off0 + size - 1) >> 3) + 1;
823
824 if ((size != 8) || (off0 != 0)) {
825 for (i = 0; i < loop; i++) {
826 if (qla82xx_rdmem(ha, off8 + (i << 3), &word[i], 8))
827 return -1;
828 }
829 }
830
831 switch (size) {
832 case 1:
833 tmpw = *((u8 *)data);
834 break;
835 case 2:
836 tmpw = *((u16 *)data);
837 break;
838 case 4:
839 tmpw = *((u32 *)data);
840 break;
841 case 8:
842 default:
843 tmpw = *((u64 *)data);
844 break;
845 }
846
847 word[0] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
848 word[0] |= tmpw << (off0 * 8);
849
850 if (loop == 2) {
851 word[1] &= ~(~0ULL << (sz[1] * 8));
852 word[1] |= tmpw >> (sz[0] * 8);
853 }
854
855 for (i = 0; i < loop; i++) {
856 temp = off8 + (i << 3);
857 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
858 temp = 0;
859 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
860 temp = word[i] & 0xffffffff;
861 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
862 temp = (word[i] >> 32) & 0xffffffff;
863 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
864 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
865 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
866 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
867 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
868
869 for (j = 0; j < MAX_CTL_CHECK; j++) {
870 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
871 if ((temp & MIU_TA_CTL_BUSY) == 0)
872 break;
873 }
874
875 if (j >= MAX_CTL_CHECK) {
876 qla_printk(KERN_WARNING, ha,
877 "%s: Fail to write through agent\n",
878 QLA2XXX_DRIVER_NAME);
879 ret = -1;
880 break;
881 }
882 }
883 return ret;
884}
885
886int
887qla82xx_rdmem(struct qla_hw_data *ha, u64 off, void *data, int size)
888{
889 int i, j = 0, k, start, end, loop, sz[2], off0[2];
890 u32 temp;
891 u64 off8, val, mem_crb, word[2] = {0, 0};
892#define MAX_CTL_CHECK 1000
893
894 /*
895 * If not MN, go check for MS or invalid.
896 */
897 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
898 mem_crb = QLA82XX_CRB_QDR_NET;
899 else {
900 mem_crb = QLA82XX_CRB_DDR_NET;
901 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
902 return qla82xx_pci_mem_read_direct(ha, off,
903 data, size);
904 }
905
906 off8 = off & 0xfffffff8;
907 off0[0] = off & 0x7;
908 off0[1] = 0;
909 sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
910 sz[1] = size - sz[0];
911 loop = ((off0[0] + size - 1) >> 3) + 1;
912
913 for (i = 0; i < loop; i++) {
914 temp = off8 + (i << 3);
915 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
916 temp = 0;
917 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
918 temp = MIU_TA_CTL_ENABLE;
919 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
920 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
921 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
922
923 for (j = 0; j < MAX_CTL_CHECK; j++) {
924 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
925 if ((temp & MIU_TA_CTL_BUSY) == 0)
926 break;
927 }
928
929 if (j >= MAX_CTL_CHECK) {
930 qla_printk(KERN_INFO, ha,
931 "%s: Fail to read through agent\n",
932 QLA2XXX_DRIVER_NAME);
933 break;
934 }
935
936 start = off0[i] >> 2;
937 end = (off0[i] + sz[i] - 1) >> 2;
938 for (k = start; k <= end; k++) {
939 temp = qla82xx_rd_32(ha,
940 mem_crb + MIU_TEST_AGT_RDDATA(k));
941 word[i] |= ((u64)temp << (32 * k));
942 }
943 }
944
945 if (j >= MAX_CTL_CHECK)
946 return -1;
947
948 if (sz[0] == 8) {
949 val = word[0];
950 } else {
951 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
952 ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
953 }
954
955 switch (size) {
956 case 1:
957 *(u8 *)data = val;
958 break;
959 case 2:
960 *(u16 *)data = val;
961 break;
962 case 4:
963 *(u32 *)data = val;
964 break;
965 case 8:
966 *(u64 *)data = val;
967 break;
968 }
969 return 0;
970}
971
972#define MTU_FUDGE_FACTOR 100
973unsigned long qla82xx_decode_crb_addr(unsigned long addr)
974{
975 int i;
976 unsigned long base_addr, offset, pci_base;
977
978 if (!qla82xx_crb_table_initialized)
979 qla82xx_crb_addr_transform_setup();
980
981 pci_base = ADDR_ERROR;
982 base_addr = addr & 0xfff00000;
983 offset = addr & 0x000fffff;
984
985 for (i = 0; i < MAX_CRB_XFORM; i++) {
986 if (crb_addr_xform[i] == base_addr) {
987 pci_base = i << 20;
988 break;
989 }
990 }
991 if (pci_base == ADDR_ERROR)
992 return pci_base;
993 return pci_base + offset;
994}
995
996static long rom_max_timeout = 100;
997static long qla82xx_rom_lock_timeout = 100;
998
999int
1000qla82xx_rom_lock(struct qla_hw_data *ha)
1001{
1002 int done = 0, timeout = 0;
1003
1004 while (!done) {
1005 /* acquire semaphore2 from PCI HW block */
1006 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
1007 if (done == 1)
1008 break;
1009 if (timeout >= qla82xx_rom_lock_timeout)
1010 return -1;
1011 timeout++;
1012 }
1013 qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
1014 return 0;
1015}
1016
1017int
1018qla82xx_wait_rom_busy(struct qla_hw_data *ha)
1019{
1020 long timeout = 0;
1021 long done = 0 ;
1022
1023 while (done == 0) {
1024 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
1025 done &= 4;
1026 timeout++;
1027 if (timeout >= rom_max_timeout) {
1028 DEBUG(qla_printk(KERN_INFO, ha,
1029 "%s: Timeout reached waiting for rom busy",
1030 QLA2XXX_DRIVER_NAME));
1031 return -1;
1032 }
1033 }
1034 return 0;
1035}
1036
1037int
1038qla82xx_wait_rom_done(struct qla_hw_data *ha)
1039{
1040 long timeout = 0;
1041 long done = 0 ;
1042
1043 while (done == 0) {
1044 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
1045 done &= 2;
1046 timeout++;
1047 if (timeout >= rom_max_timeout) {
1048 DEBUG(qla_printk(KERN_INFO, ha,
1049 "%s: Timeout reached waiting for rom done",
1050 QLA2XXX_DRIVER_NAME));
1051 return -1;
1052 }
1053 }
1054 return 0;
1055}
1056
1057int
1058qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
1059{
1060 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
1061 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
1062 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
1063 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
1064 qla82xx_wait_rom_busy(ha);
1065 if (qla82xx_wait_rom_done(ha)) {
1066 qla_printk(KERN_WARNING, ha,
1067 "%s: Error waiting for rom done\n",
1068 QLA2XXX_DRIVER_NAME);
1069 return -1;
1070 }
1071 /* Reset abyte_cnt and dummy_byte_cnt */
1072 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
1073 udelay(10);
1074 cond_resched();
1075 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
1076 *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
1077 return 0;
1078}
1079
1080int
1081qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
1082{
1083 int ret, loops = 0;
1084
1085 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
1086 udelay(100);
1087 schedule();
1088 loops++;
1089 }
1090 if (loops >= 50000) {
1091 qla_printk(KERN_INFO, ha,
1092 "%s: qla82xx_rom_lock failed\n",
1093 QLA2XXX_DRIVER_NAME);
1094 return -1;
1095 }
1096 ret = qla82xx_do_rom_fast_read(ha, addr, valp);
1097 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
1098 return ret;
1099}
1100
1101int
1102qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
1103{
1104 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
1105 qla82xx_wait_rom_busy(ha);
1106 if (qla82xx_wait_rom_done(ha)) {
1107 qla_printk(KERN_WARNING, ha,
1108 "Error waiting for rom done\n");
1109 return -1;
1110 }
1111 *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
1112 return 0;
1113}
1114
1115int
1116qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
1117{
1118 long timeout = 0;
1119 uint32_t done = 1 ;
1120 uint32_t val;
1121 int ret = 0;
1122
1123 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
1124 while ((done != 0) && (ret == 0)) {
1125 ret = qla82xx_read_status_reg(ha, &val);
1126 done = val & 1;
1127 timeout++;
1128 udelay(10);
1129 cond_resched();
1130 if (timeout >= 50000) {
1131 qla_printk(KERN_WARNING, ha,
1132 "Timeout reached waiting for write finish");
1133 return -1;
1134 }
1135 }
1136 return ret;
1137}
1138
1139int
1140qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
1141{
1142 uint32_t val;
1143 qla82xx_wait_rom_busy(ha);
1144 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
1145 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
1146 qla82xx_wait_rom_busy(ha);
1147 if (qla82xx_wait_rom_done(ha))
1148 return -1;
1149 if (qla82xx_read_status_reg(ha, &val) != 0)
1150 return -1;
1151 if ((val & 2) != 2)
1152 return -1;
1153 return 0;
1154}
1155
1156int
1157qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
1158{
1159 if (qla82xx_flash_set_write_enable(ha))
1160 return -1;
1161 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
1162 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
1163 if (qla82xx_wait_rom_done(ha)) {
1164 qla_printk(KERN_WARNING, ha,
1165 "Error waiting for rom done\n");
1166 return -1;
1167 }
1168 return qla82xx_flash_wait_write_finish(ha);
1169}
1170
1171int
1172qla82xx_write_disable_flash(struct qla_hw_data *ha)
1173{
1174 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
1175 if (qla82xx_wait_rom_done(ha)) {
1176 qla_printk(KERN_WARNING, ha,
1177 "Error waiting for rom done\n");
1178 return -1;
1179 }
1180 return 0;
1181}
1182
1183int
1184ql82xx_rom_lock_d(struct qla_hw_data *ha)
1185{
1186 int loops = 0;
1187 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
1188 udelay(100);
1189 cond_resched();
1190 loops++;
1191 }
1192 if (loops >= 50000) {
1193 qla_printk(KERN_WARNING, ha, "ROM lock failed\n");
1194 return -1;
1195 }
1196 return 0;;
1197}
1198
1199int
1200qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1201 uint32_t data)
1202{
1203 int ret = 0;
1204
1205 ret = ql82xx_rom_lock_d(ha);
1206 if (ret < 0) {
1207 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
1208 return ret;
1209 }
1210
1211 if (qla82xx_flash_set_write_enable(ha))
1212 goto done_write;
1213
1214 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
1215 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
1216 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
1217 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
1218 qla82xx_wait_rom_busy(ha);
1219 if (qla82xx_wait_rom_done(ha)) {
1220 qla_printk(KERN_WARNING, ha,
1221 "Error waiting for rom done\n");
1222 ret = -1;
1223 goto done_write;
1224 }
1225
1226 ret = qla82xx_flash_wait_write_finish(ha);
1227
1228done_write:
1229 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
1230 return ret;
1231}
1232
1233/* This routine does CRB initialize sequence
1234 * to put the ISP into operational state
1235 */
1236int qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1237{
1238 int addr, val;
1239 int i ;
1240 struct crb_addr_pair *buf;
1241 unsigned long off;
1242 unsigned offset, n;
1243 struct qla_hw_data *ha = vha->hw;
1244
1245 struct crb_addr_pair {
1246 long addr;
1247 long data;
1248 };
1249
1250 /* Halt all the indiviual PEGs and other blocks of the ISP */
1251 qla82xx_rom_lock(ha);
1252 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
1253 /* don't reset CAM block on reset */
1254 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
1255 else
1256 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
1257 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
1258
1259 /* Read the signature value from the flash.
1260 * Offset 0: Contain signature (0xcafecafe)
1261 * Offset 4: Offset and number of addr/value pairs
1262 * that present in CRB initialize sequence
1263 */
1264 if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1265 qla82xx_rom_fast_read(ha, 4, &n) != 0) {
1266 qla_printk(KERN_WARNING, ha,
1267 "[ERROR] Reading crb_init area: n: %08x\n", n);
1268 return -1;
1269 }
1270
1271 /* Offset in flash = lower 16 bits
1272 * Number of enteries = upper 16 bits
1273 */
1274 offset = n & 0xffffU;
1275 n = (n >> 16) & 0xffffU;
1276
1277 /* number of addr/value pair should not exceed 1024 enteries */
1278 if (n >= 1024) {
1279 qla_printk(KERN_WARNING, ha,
1280 "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
1281 QLA2XXX_DRIVER_NAME, __func__, n);
1282 return -1;
1283 }
1284
1285 qla_printk(KERN_INFO, ha,
1286 "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n);
1287
1288 buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
1289 if (buf == NULL) {
1290 qla_printk(KERN_WARNING, ha,
1291 "%s: [ERROR] Unable to malloc memory.\n",
1292 QLA2XXX_DRIVER_NAME);
1293 return -1;
1294 }
1295
1296 for (i = 0; i < n; i++) {
1297 if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
1298 qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
1299 kfree(buf);
1300 return -1;
1301 }
1302
1303 buf[i].addr = addr;
1304 buf[i].data = val;
1305 }
1306
1307 for (i = 0; i < n; i++) {
1308 /* Translate internal CRB initialization
1309 * address to PCI bus address
1310 */
1311 off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
1312 QLA82XX_PCI_CRBSPACE;
1313 /* Not all CRB addr/value pair to be written,
1314 * some of them are skipped
1315 */
1316
1317 /* skipping cold reboot MAGIC */
1318 if (off == QLA82XX_CAM_RAM(0x1fc))
1319 continue;
1320
1321 /* do not reset PCI */
1322 if (off == (ROMUSB_GLB + 0xbc))
1323 continue;
1324
1325 /* skip core clock, so that firmware can increase the clock */
1326 if (off == (ROMUSB_GLB + 0xc8))
1327 continue;
1328
1329 /* skip the function enable register */
1330 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
1331 continue;
1332
1333 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
1334 continue;
1335
1336 if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
1337 continue;
1338
1339 if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
1340 continue;
1341
1342 if (off == ADDR_ERROR) {
1343 qla_printk(KERN_WARNING, ha,
1344 "%s: [ERROR] Unknown addr: 0x%08lx\n",
1345 QLA2XXX_DRIVER_NAME, buf[i].addr);
1346 continue;
1347 }
1348
1349 if (off == (QLA82XX_CRB_PEG_NET_1 + 0x18)) {
1350 if (!QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision))
1351 buf[i].data = 0x1020;
1352 }
1353
1354 qla82xx_wr_32(ha, off, buf[i].data);
1355
1356 /* ISP requires much bigger delay to settle down,
1357 * else crb_window returns 0xffffffff
1358 */
1359 if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
1360 msleep(1000);
1361
1362 /* ISP requires millisec delay between
1363 * successive CRB register updation
1364 */
1365 msleep(1);
1366 }
1367
1368 kfree(buf);
1369
1370 /* Resetting the data and instruction cache */
1371 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
1372 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
1373 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
1374
1375 /* Clear all protocol processing engines */
1376 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
1377 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
1378 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
1379 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
1380 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
1381 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
1382 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
1383 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
1384 return 0;
1385}
1386
1387int qla82xx_check_for_bad_spd(struct qla_hw_data *ha)
1388{
1389 u32 val = 0;
1390 val = qla82xx_rd_32(ha, BOOT_LOADER_DIMM_STATUS);
1391 val &= QLA82XX_BOOT_LOADER_MN_ISSUE;
1392 if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) {
1393 qla_printk(KERN_INFO, ha,
1394 "Memory DIMM SPD not programmed. "
1395 " Assumed valid.\n");
1396 return 1;
1397 } else if (val) {
1398 qla_printk(KERN_INFO, ha,
1399 "Memory DIMM type incorrect.Info:%08X.\n", val);
1400 return 2;
1401 }
1402 return 0;
1403}
1404
1405int
1406qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
1407{
1408 int i;
1409 long size = 0;
1410 long flashaddr = BOOTLD_START, memaddr = BOOTLD_START;
1411 u64 data;
1412 u32 high, low;
1413 size = (IMAGE_START - BOOTLD_START) / 8;
1414
1415 for (i = 0; i < size; i++) {
1416 if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1417 (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
1418 return -1;
1419 }
1420 data = ((u64)high << 32) | low ;
1421 qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
1422 flashaddr += 8;
1423 memaddr += 8;
1424
1425 if (i % 0x1000 == 0)
1426 msleep(1);
1427 }
1428 udelay(100);
1429 read_lock(&ha->hw_lock);
1430 if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
1431 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1432 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1433 } else {
1434 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001d);
1435 }
1436 read_unlock(&ha->hw_lock);
1437 return 0;
1438}
1439
1440int
1441qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1442 u64 off, void *data, int size)
1443{
1444 int i, j = 0, k, start, end, loop, sz[2], off0[2];
1445 int shift_amount;
1446 uint32_t temp;
1447 uint64_t off8, val, mem_crb, word[2] = {0, 0};
1448
1449 /*
1450 * If not MN, go check for MS or invalid.
1451 */
1452
1453 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1454 mem_crb = QLA82XX_CRB_QDR_NET;
1455 else {
1456 mem_crb = QLA82XX_CRB_DDR_NET;
1457 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1458 return qla82xx_pci_mem_read_direct(ha,
1459 off, data, size);
1460 }
1461
1462 if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
1463 off8 = off & 0xfffffff0;
1464 off0[0] = off & 0xf;
1465 sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
1466 shift_amount = 4;
1467 } else {
1468 off8 = off & 0xfffffff8;
1469 off0[0] = off & 0x7;
1470 sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
1471 shift_amount = 4;
1472 }
1473 loop = ((off0[0] + size - 1) >> shift_amount) + 1;
1474 off0[1] = 0;
1475 sz[1] = size - sz[0];
1476
1477 /*
1478 * don't lock here - write_wx gets the lock if each time
1479 * write_lock_irqsave(&adapter->adapter_lock, flags);
1480 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1481 */
1482
1483 for (i = 0; i < loop; i++) {
1484 temp = off8 + (i << shift_amount);
1485 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
1486 temp = 0;
1487 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
1488 temp = MIU_TA_CTL_ENABLE;
1489 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1490 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
1491 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1492
1493 for (j = 0; j < MAX_CTL_CHECK; j++) {
1494 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1495 if ((temp & MIU_TA_CTL_BUSY) == 0)
1496 break;
1497 }
1498
1499 if (j >= MAX_CTL_CHECK) {
1500 if (printk_ratelimit())
1501 dev_err(&ha->pdev->dev,
1502 "failed to read through agent\n");
1503 break;
1504 }
1505
1506 start = off0[i] >> 2;
1507 end = (off0[i] + sz[i] - 1) >> 2;
1508 for (k = start; k <= end; k++) {
1509 temp = qla82xx_rd_32(ha,
1510 mem_crb + MIU_TEST_AGT_RDDATA(k));
1511 word[i] |= ((uint64_t)temp << (32 * (k & 1)));
1512 }
1513 }
1514
1515 /*
1516 * netxen_nic_pci_change_crbwindow_128M(adapter, 1);
1517 * write_unlock_irqrestore(&adapter->adapter_lock, flags);
1518 */
1519
1520 if (j >= MAX_CTL_CHECK)
1521 return -1;
1522
1523 if ((off0[0] & 7) == 0) {
1524 val = word[0];
1525 } else {
1526 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
1527 ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
1528 }
1529
1530 switch (size) {
1531 case 1:
1532 *(uint8_t *)data = val;
1533 break;
1534 case 2:
1535 *(uint16_t *)data = val;
1536 break;
1537 case 4:
1538 *(uint32_t *)data = val;
1539 break;
1540 case 8:
1541 *(uint64_t *)data = val;
1542 break;
1543 }
1544 return 0;
1545}
1546
1547int
1548qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1549 u64 off, void *data, int size)
1550{
1551 int i, j, ret = 0, loop, sz[2], off0;
1552 int scale, shift_amount, p3p, startword;
1553 uint32_t temp;
1554 uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1555
1556 /*
1557 * If not MN, go check for MS or invalid.
1558 */
1559 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1560 mem_crb = QLA82XX_CRB_QDR_NET;
1561 else {
1562 mem_crb = QLA82XX_CRB_DDR_NET;
1563 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1564 return qla82xx_pci_mem_write_direct(ha,
1565 off, data, size);
1566 }
1567
1568 off0 = off & 0x7;
1569 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1570 sz[1] = size - sz[0];
1571
1572 if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
1573 off8 = off & 0xfffffff0;
1574 loop = (((off & 0xf) + size - 1) >> 4) + 1;
1575 shift_amount = 4;
1576 scale = 2;
1577 p3p = 1;
1578 startword = (off & 0xf)/8;
1579 } else {
1580 off8 = off & 0xfffffff8;
1581 loop = ((off0 + size - 1) >> 3) + 1;
1582 shift_amount = 3;
1583 scale = 1;
1584 p3p = 0;
1585 startword = 0;
1586 }
1587
1588 if (p3p || (size != 8) || (off0 != 0)) {
1589 for (i = 0; i < loop; i++) {
1590 if (qla82xx_pci_mem_read_2M(ha, off8 +
1591 (i << shift_amount), &word[i * scale], 8))
1592 return -1;
1593 }
1594 }
1595
1596 switch (size) {
1597 case 1:
1598 tmpw = *((uint8_t *)data);
1599 break;
1600 case 2:
1601 tmpw = *((uint16_t *)data);
1602 break;
1603 case 4:
1604 tmpw = *((uint32_t *)data);
1605 break;
1606 case 8:
1607 default:
1608 tmpw = *((uint64_t *)data);
1609 break;
1610 }
1611
1612 if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
1613 if (sz[0] == 8) {
1614 word[startword] = tmpw;
1615 } else {
1616 word[startword] &=
1617 ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1618 word[startword] |= tmpw << (off0 * 8);
1619 }
1620 if (sz[1] != 0) {
1621 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1622 word[startword+1] |= tmpw >> (sz[0] * 8);
1623 }
1624 } else {
1625 word[startword] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1626 word[startword] |= tmpw << (off0 * 8);
1627
1628 if (loop == 2) {
1629 word[1] &= ~(~0ULL << (sz[1] * 8));
1630 word[1] |= tmpw >> (sz[0] * 8);
1631 }
1632 }
1633
1634 /*
1635 * don't lock here - write_wx gets the lock if each time
1636 * write_lock_irqsave(&adapter->adapter_lock, flags);
1637 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1638 */
1639 for (i = 0; i < loop; i++) {
1640 temp = off8 + (i << shift_amount);
1641 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1642 temp = 0;
1643 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1644 temp = word[i * scale] & 0xffffffff;
1645 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1646 temp = (word[i * scale] >> 32) & 0xffffffff;
1647 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1648 if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
1649 temp = word[i*scale + 1] & 0xffffffff;
1650 qla82xx_wr_32(ha, mem_crb +
1651 MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
1652 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1653 qla82xx_wr_32(ha, mem_crb +
1654 MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
1655 }
1656
1657 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1658 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1659 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1660 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1661
1662 for (j = 0; j < MAX_CTL_CHECK; j++) {
1663 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1664 if ((temp & MIU_TA_CTL_BUSY) == 0)
1665 break;
1666 }
1667
1668 if (j >= MAX_CTL_CHECK) {
1669 if (printk_ratelimit())
1670 dev_err(&ha->pdev->dev,
1671 "failed to write through agent\n");
1672 ret = -1;
1673 break;
1674 }
1675 }
1676
1677 return ret;
1678}
1679
1680/* PCI related functions */
1681char *
1682qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
1683{
1684 int pcie_reg;
1685 struct qla_hw_data *ha = vha->hw;
1686 char lwstr[6];
1687 uint16_t lnk;
1688
1689 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
1690 pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
1691 ha->link_width = (lnk >> 4) & 0x3f;
1692
1693 strcpy(str, "PCIe (");
1694 strcat(str, "2.5Gb/s ");
1695 snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
1696 strcat(str, lwstr);
1697 return str;
1698}
1699
1700int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
1701{
1702 unsigned long val = 0;
1703 u32 control;
1704
1705 switch (region) {
1706 case 0:
1707 val = 0;
1708 break;
1709 case 1:
1710 pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
1711 val = control + QLA82XX_MSIX_TBL_SPACE;
1712 break;
1713 }
1714 return val;
1715}
1716
1717int qla82xx_pci_region_len(struct pci_dev *pdev, int region)
1718{
1719 unsigned long val = 0;
1720 u32 control;
1721 switch (region) {
1722 case 0:
1723 pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
1724 val = control;
1725 break;
1726 case 1:
1727 val = pci_resource_len(pdev, 0) -
1728 qla82xx_pci_region_offset(pdev, 1);
1729 break;
1730 }
1731 return val;
1732}
1733
1734int
1735qla82xx_iospace_config(struct qla_hw_data *ha)
1736{
1737 uint32_t len = 0;
1738
1739 if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
1740 qla_printk(KERN_WARNING, ha,
1741 "Failed to reserve selected regions (%s)\n",
1742 pci_name(ha->pdev));
1743 goto iospace_error_exit;
1744 }
1745
1746 /* Use MMIO operations for all accesses. */
1747 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1748 qla_printk(KERN_ERR, ha,
1749 "region #0 not an MMIO resource (%s), aborting\n",
1750 pci_name(ha->pdev));
1751 goto iospace_error_exit;
1752 }
1753
1754 len = pci_resource_len(ha->pdev, 0);
1755 ha->nx_pcibase =
1756 (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
1757 if (!ha->nx_pcibase) {
1758 qla_printk(KERN_ERR, ha,
1759 "cannot remap pcibase MMIO (%s), aborting\n",
1760 pci_name(ha->pdev));
1761 pci_release_regions(ha->pdev);
1762 goto iospace_error_exit;
1763 }
1764
1765 /* Mapping of IO base pointer */
1766 ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
1767 0xbc000 + (ha->pdev->devfn << 11));
1768
1769 if (!ql2xdbwr) {
1770 ha->nxdb_wr_ptr =
1771 (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
1772 (ha->pdev->devfn << 12)), 4);
1773 if (!ha->nxdb_wr_ptr) {
1774 qla_printk(KERN_ERR, ha,
1775 "cannot remap MMIO (%s), aborting\n",
1776 pci_name(ha->pdev));
1777 pci_release_regions(ha->pdev);
1778 goto iospace_error_exit;
1779 }
1780
1781 /* Mapping of IO base pointer,
1782 * door bell read and write pointer
1783 */
1784 ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
1785 (ha->pdev->devfn * 8);
1786 } else {
1787 ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
1788 QLA82XX_CAMRAM_DB1 :
1789 QLA82XX_CAMRAM_DB2);
1790 }
1791
1792 ha->max_req_queues = ha->max_rsp_queues = 1;
1793 ha->msix_count = ha->max_rsp_queues + 1;
1794 return 0;
1795
1796iospace_error_exit:
1797 return -ENOMEM;
1798}
1799
1800/* GS related functions */
1801
1802/* Initialization related functions */
1803
1804/**
1805 * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
1806 * @ha: HA context
1807 *
1808 * Returns 0 on success.
1809*/
1810int
1811qla82xx_pci_config(scsi_qla_host_t *vha)
1812{
1813 struct qla_hw_data *ha = vha->hw;
1814 int ret;
1815
1816 pci_set_master(ha->pdev);
1817 ret = pci_set_mwi(ha->pdev);
1818 ha->chip_revision = ha->pdev->revision;
1819 return 0;
1820}
1821
1822/**
1823 * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
1824 * @ha: HA context
1825 *
1826 * Returns 0 on success.
1827 */
1828void
1829qla82xx_reset_chip(scsi_qla_host_t *vha)
1830{
1831 struct qla_hw_data *ha = vha->hw;
1832 ha->isp_ops->disable_intrs(ha);
1833}
1834
1835void qla82xx_config_rings(struct scsi_qla_host *vha)
1836{
1837 struct qla_hw_data *ha = vha->hw;
1838 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1839 struct init_cb_81xx *icb;
1840 struct req_que *req = ha->req_q_map[0];
1841 struct rsp_que *rsp = ha->rsp_q_map[0];
1842
1843 /* Setup ring parameters in initialization control block. */
1844 icb = (struct init_cb_81xx *)ha->init_cb;
1845 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1846 icb->response_q_inpointer = __constant_cpu_to_le16(0);
1847 icb->request_q_length = cpu_to_le16(req->length);
1848 icb->response_q_length = cpu_to_le16(rsp->length);
1849 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1850 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1851 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1852 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1853
1854 icb->version = 1;
1855 icb->frame_payload_size = 2112;
1856 icb->execution_throttle = 8;
1857 icb->exchange_count = 128;
1858 icb->login_retry_count = 8;
1859
1860 WRT_REG_DWORD((unsigned long __iomem *)&reg->req_q_out[0], 0);
1861 WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_in[0], 0);
1862 WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0);
1863}
1864
1865void qla82xx_reset_adapter(struct scsi_qla_host *vha)
1866{
1867 struct qla_hw_data *ha = vha->hw;
1868 vha->flags.online = 0;
1869 qla2x00_try_to_stop_firmware(vha);
1870 ha->isp_ops->disable_intrs(ha);
1871}
1872
1873int qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
1874{
1875 u64 *ptr64;
1876 u32 i, flashaddr, size;
1877 __le64 data;
1878
1879 size = (IMAGE_START - BOOTLD_START) / 8;
1880
1881 ptr64 = (u64 *)&ha->hablob->fw->data[BOOTLD_START];
1882 flashaddr = BOOTLD_START;
1883
1884 for (i = 0; i < size; i++) {
1885 data = cpu_to_le64(ptr64[i]);
1886 qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8);
1887 flashaddr += 8;
1888 }
1889
1890 size = *(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET];
1891 size = (__force u32)cpu_to_le32(size) / 8;
1892 ptr64 = (u64 *)&ha->hablob->fw->data[IMAGE_START];
1893 flashaddr = FLASH_ADDR_START;
1894
1895 for (i = 0; i < size; i++) {
1896 data = cpu_to_le64(ptr64[i]);
1897
1898 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1899 return -EIO;
1900 flashaddr += 8;
1901 }
1902
1903 /* Write a magic value to CAMRAM register
1904 * at a specified offset to indicate
1905 * that all data is written and
1906 * ready for firmware to initialize.
1907 */
1908 qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), 0x12345678);
1909
1910 if (QLA82XX_IS_REVISION_P3PLUS(ha->chip_revision)) {
1911 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1912 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1913 } else
1914 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001d);
1915 return 0;
1916}
1917
1918int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
1919{
1920 u32 val = 0;
1921 int retries = 60;
1922
1923 do {
1924 read_lock(&ha->hw_lock);
1925 val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
1926 read_unlock(&ha->hw_lock);
1927
1928 switch (val) {
1929 case PHAN_INITIALIZE_COMPLETE:
1930 case PHAN_INITIALIZE_ACK:
1931 return QLA_SUCCESS;
1932 case PHAN_INITIALIZE_FAILED:
1933 break;
1934 default:
1935 break;
1936 }
1937 qla_printk(KERN_WARNING, ha,
1938 "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n",
1939 val, retries);
1940
1941 msleep(500);
1942
1943 } while (--retries);
1944
1945 qla_printk(KERN_INFO, ha,
1946 "Cmd Peg initialization failed: 0x%x.\n", val);
1947
1948 qla82xx_check_for_bad_spd(ha);
1949 val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
1950 read_lock(&ha->hw_lock);
1951 qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
1952 read_unlock(&ha->hw_lock);
1953 return QLA_FUNCTION_FAILED;
1954}
1955
1956int qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1957{
1958 u32 val = 0;
1959 int retries = 60;
1960
1961 do {
1962 read_lock(&ha->hw_lock);
1963 val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
1964 read_unlock(&ha->hw_lock);
1965
1966 switch (val) {
1967 case PHAN_INITIALIZE_COMPLETE:
1968 case PHAN_INITIALIZE_ACK:
1969 return QLA_SUCCESS;
1970 case PHAN_INITIALIZE_FAILED:
1971 break;
1972 default:
1973 break;
1974 }
1975
1976 qla_printk(KERN_WARNING, ha,
1977 "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n",
1978 val, retries);
1979
1980 msleep(500);
1981
1982 } while (--retries);
1983
1984 qla_printk(KERN_INFO, ha,
1985 "Rcv Peg initialization failed: 0x%x.\n", val);
1986 read_lock(&ha->hw_lock);
1987 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
1988 read_unlock(&ha->hw_lock);
1989 return QLA_FUNCTION_FAILED;
1990}
1991
1992/* ISR related functions */
1993uint32_t qla82xx_isr_int_target_mask_enable[8] = {
1994 ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1,
1995 ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3,
1996 ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5,
1997 ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
1998};
1999
2000uint32_t qla82xx_isr_int_target_status[8] = {
2001 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
2002 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
2003 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
2004 ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
2005};
2006
2007static struct qla82xx_legacy_intr_set legacy_intr[] = \
2008 QLA82XX_LEGACY_INTR_CONFIG;
2009
2010/*
2011 * qla82xx_mbx_completion() - Process mailbox command completions.
2012 * @ha: SCSI driver HA context
2013 * @mb0: Mailbox0 register
2014 */
2015void
2016qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2017{
2018 uint16_t cnt;
2019 uint16_t __iomem *wptr;
2020 struct qla_hw_data *ha = vha->hw;
2021 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2022 wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
2023
2024 /* Load return mailbox registers. */
2025 ha->flags.mbox_int = 1;
2026 ha->mailbox_out[0] = mb0;
2027
2028 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2029 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2030 wptr++;
2031 }
2032
2033 if (ha->mcp) {
2034 DEBUG3_11(printk(KERN_INFO "%s(%ld): "
2035 "Got mailbox completion. cmd=%x.\n",
2036 __func__, vha->host_no, ha->mcp->mb[0]));
2037 } else {
2038 qla_printk(KERN_INFO, ha,
2039 "%s(%ld): MBX pointer ERROR!\n",
2040 __func__, vha->host_no);
2041 }
2042}
2043
2044/*
2045 * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
2046 * @irq:
2047 * @dev_id: SCSI driver HA context
2048 * @regs:
2049 *
2050 * Called by system whenever the host adapter generates an interrupt.
2051 *
2052 * Returns handled flag.
2053 */
2054irqreturn_t
2055qla82xx_intr_handler(int irq, void *dev_id)
2056{
2057 scsi_qla_host_t *vha;
2058 struct qla_hw_data *ha;
2059 struct rsp_que *rsp;
2060 struct device_reg_82xx __iomem *reg;
2061 int status = 0, status1 = 0;
2062 unsigned long flags;
2063 unsigned long iter;
2064 uint32_t stat;
2065 uint16_t mb[4];
2066
2067 rsp = (struct rsp_que *) dev_id;
2068 if (!rsp) {
2069 printk(KERN_INFO
2070 "%s(): NULL response queue pointer\n", __func__);
2071 return IRQ_NONE;
2072 }
2073 ha = rsp->hw;
2074
2075 if (!ha->flags.msi_enabled) {
2076 status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
2077 if (!(status & ha->nx_legacy_intr.int_vec_bit))
2078 return IRQ_NONE;
2079
2080 status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG);
2081 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1))
2082 return IRQ_NONE;
2083 }
2084
2085 /* clear the interrupt */
2086 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
2087
2088 /* read twice to ensure write is flushed */
2089 qla82xx_rd_32(ha, ISR_INT_VECTOR);
2090 qla82xx_rd_32(ha, ISR_INT_VECTOR);
2091
2092 reg = &ha->iobase->isp82;
2093
2094 spin_lock_irqsave(&ha->hardware_lock, flags);
2095 vha = pci_get_drvdata(ha->pdev);
2096 for (iter = 1; iter--; ) {
2097
2098 if (RD_REG_DWORD(&reg->host_int)) {
2099 stat = RD_REG_DWORD(&reg->host_status);
2100 if ((stat & HSRX_RISC_INT) == 0)
2101 break;
2102
2103 switch (stat & 0xff) {
2104 case 0x1:
2105 case 0x2:
2106 case 0x10:
2107 case 0x11:
2108 qla82xx_mbx_completion(vha, MSW(stat));
2109 status |= MBX_INTERRUPT;
2110 break;
2111 case 0x12:
2112 mb[0] = MSW(stat);
2113 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2114 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2115 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2116 qla2x00_async_event(vha, rsp, mb);
2117 break;
2118 case 0x13:
2119 qla24xx_process_response_queue(vha, rsp);
2120 break;
2121 default:
2122 DEBUG2(printk("scsi(%ld): "
2123 " Unrecognized interrupt type (%d).\n",
2124 vha->host_no, stat & 0xff));
2125 break;
2126 }
2127 }
2128 WRT_REG_DWORD(&reg->host_int, 0);
2129 }
2130 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2131 if (!ha->flags.msi_enabled)
2132 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2133
2134#ifdef QL_DEBUG_LEVEL_17
2135 if (!irq && ha->flags.eeh_busy)
2136 qla_printk(KERN_WARNING, ha,
2137 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
2138 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2139#endif
2140
2141 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2142 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2143 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2144 complete(&ha->mbx_intr_comp);
2145 }
2146 return IRQ_HANDLED;
2147}
2148
2149irqreturn_t
2150qla82xx_msix_default(int irq, void *dev_id)
2151{
2152 scsi_qla_host_t *vha;
2153 struct qla_hw_data *ha;
2154 struct rsp_que *rsp;
2155 struct device_reg_82xx __iomem *reg;
2156 int status = 0;
2157 unsigned long flags;
2158 uint32_t stat;
2159 uint16_t mb[4];
2160
2161 rsp = (struct rsp_que *) dev_id;
2162 if (!rsp) {
2163 printk(KERN_INFO
2164 "%s(): NULL response queue pointer\n", __func__);
2165 return IRQ_NONE;
2166 }
2167 ha = rsp->hw;
2168
2169 reg = &ha->iobase->isp82;
2170
2171 spin_lock_irqsave(&ha->hardware_lock, flags);
2172 vha = pci_get_drvdata(ha->pdev);
2173 do {
2174 if (RD_REG_DWORD(&reg->host_int)) {
2175 stat = RD_REG_DWORD(&reg->host_status);
2176 if ((stat & HSRX_RISC_INT) == 0)
2177 break;
2178
2179 switch (stat & 0xff) {
2180 case 0x1:
2181 case 0x2:
2182 case 0x10:
2183 case 0x11:
2184 qla82xx_mbx_completion(vha, MSW(stat));
2185 status |= MBX_INTERRUPT;
2186 break;
2187 case 0x12:
2188 mb[0] = MSW(stat);
2189 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2190 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2191 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2192 qla2x00_async_event(vha, rsp, mb);
2193 break;
2194 case 0x13:
2195 qla24xx_process_response_queue(vha, rsp);
2196 break;
2197 default:
2198 DEBUG2(printk("scsi(%ld): "
2199 " Unrecognized interrupt type (%d).\n",
2200 vha->host_no, stat & 0xff));
2201 break;
2202 }
2203 }
2204 WRT_REG_DWORD(&reg->host_int, 0);
2205 } while (0);
2206
2207 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2208
2209#ifdef QL_DEBUG_LEVEL_17
2210 if (!irq && ha->flags.eeh_busy)
2211 qla_printk(KERN_WARNING, ha,
2212 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
2213 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2214#endif
2215
2216 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2217 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2218 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2219 complete(&ha->mbx_intr_comp);
2220 }
2221 return IRQ_HANDLED;
2222}
2223
2224irqreturn_t
2225qla82xx_msix_rsp_q(int irq, void *dev_id)
2226{
2227 scsi_qla_host_t *vha;
2228 struct qla_hw_data *ha;
2229 struct rsp_que *rsp;
2230 struct device_reg_82xx __iomem *reg;
2231
2232 rsp = (struct rsp_que *) dev_id;
2233 if (!rsp) {
2234 printk(KERN_INFO
2235 "%s(): NULL response queue pointer\n", __func__);
2236 return IRQ_NONE;
2237 }
2238
2239 ha = rsp->hw;
2240 reg = &ha->iobase->isp82;
2241 spin_lock_irq(&ha->hardware_lock);
2242 vha = pci_get_drvdata(ha->pdev);
2243 qla24xx_process_response_queue(vha, rsp);
2244 WRT_REG_DWORD(&reg->host_int, 0);
2245 spin_unlock_irq(&ha->hardware_lock);
2246 return IRQ_HANDLED;
2247}
2248
2249void
2250qla82xx_poll(int irq, void *dev_id)
2251{
2252 scsi_qla_host_t *vha;
2253 struct qla_hw_data *ha;
2254 struct rsp_que *rsp;
2255 struct device_reg_82xx __iomem *reg;
2256 int status = 0;
2257 uint32_t stat;
2258 uint16_t mb[4];
2259 unsigned long flags;
2260
2261 rsp = (struct rsp_que *) dev_id;
2262 if (!rsp) {
2263 printk(KERN_INFO
2264 "%s(): NULL response queue pointer\n", __func__);
2265 return;
2266 }
2267 ha = rsp->hw;
2268
2269 reg = &ha->iobase->isp82;
2270 spin_lock_irqsave(&ha->hardware_lock, flags);
2271 vha = pci_get_drvdata(ha->pdev);
2272
2273 if (RD_REG_DWORD(&reg->host_int)) {
2274 stat = RD_REG_DWORD(&reg->host_status);
2275 switch (stat & 0xff) {
2276 case 0x1:
2277 case 0x2:
2278 case 0x10:
2279 case 0x11:
2280 qla82xx_mbx_completion(vha, MSW(stat));
2281 status |= MBX_INTERRUPT;
2282 break;
2283 case 0x12:
2284 mb[0] = MSW(stat);
2285 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2286 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2287 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2288 qla2x00_async_event(vha, rsp, mb);
2289 break;
2290 case 0x13:
2291 qla24xx_process_response_queue(vha, rsp);
2292 break;
2293 default:
2294 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2295 "(%d).\n",
2296 vha->host_no, stat & 0xff));
2297 break;
2298 }
2299 }
2300 WRT_REG_DWORD(&reg->host_int, 0);
2301 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2302}
2303
2304void
2305qla82xx_enable_intrs(struct qla_hw_data *ha)
2306{
2307 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2308 qla82xx_mbx_intr_enable(vha);
2309 spin_lock_irq(&ha->hardware_lock);
2310 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2311 spin_unlock_irq(&ha->hardware_lock);
2312 ha->interrupts_on = 1;
2313}
2314
2315void
2316qla82xx_disable_intrs(struct qla_hw_data *ha)
2317{
2318 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2319 qla82xx_mbx_intr_disable(vha);
2320 spin_lock_irq(&ha->hardware_lock);
2321 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2322 spin_unlock_irq(&ha->hardware_lock);
2323 ha->interrupts_on = 0;
2324}
2325
2326void qla82xx_init_flags(struct qla_hw_data *ha)
2327{
2328 struct qla82xx_legacy_intr_set *nx_legacy_intr;
2329
2330 /* ISP 8021 initializations */
2331 rwlock_init(&ha->hw_lock);
2332 ha->qdr_sn_window = -1;
2333 ha->ddr_mn_window = -1;
2334 ha->curr_window = 255;
2335 ha->portnum = PCI_FUNC(ha->pdev->devfn);
2336 nx_legacy_intr = &legacy_intr[ha->portnum];
2337 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
2338 ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
2339 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
2340 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
2341}
2342
2343static inline void
2344qla82xx_set_drv_active(scsi_qla_host_t *vha)
2345{
2346 uint32_t drv_active;
2347 struct qla_hw_data *ha = vha->hw;
2348
2349 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2350
2351 /* If reset value is all FF's, initialize DRV_ACTIVE */
2352 if (drv_active == 0xffffffff) {
2353 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, 0);
2354 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2355 }
2356 drv_active |= (1 << (ha->portnum * 4));
2357 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2358}
2359
2360inline void
2361qla82xx_clear_drv_active(struct qla_hw_data *ha)
2362{
2363 uint32_t drv_active;
2364
2365 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2366 drv_active &= ~(1 << (ha->portnum * 4));
2367 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2368}
2369
2370static inline int
2371qla82xx_need_reset(struct qla_hw_data *ha)
2372{
2373 uint32_t drv_state;
2374 int rval;
2375
2376 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2377 rval = drv_state & (1 << (ha->portnum * 4));
2378 return rval;
2379}
2380
2381static inline void
2382qla82xx_set_rst_ready(struct qla_hw_data *ha)
2383{
2384 uint32_t drv_state;
2385 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2386
2387 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2388
2389 /* If reset value is all FF's, initialize DRV_STATE */
2390 if (drv_state == 0xffffffff) {
2391 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
2392 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2393 }
2394 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2395 qla_printk(KERN_INFO, ha,
2396 "%s(%ld):drv_state = 0x%x\n",
2397 __func__, vha->host_no, drv_state);
2398 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2399}
2400
2401static inline void
2402qla82xx_clear_rst_ready(struct qla_hw_data *ha)
2403{
2404 uint32_t drv_state;
2405
2406 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2407 drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2408 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2409}
2410
2411static inline void
2412qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
2413{
2414 uint32_t qsnt_state;
2415
2416 qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2417 qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2418 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2419}
2420
2421int qla82xx_load_fw(scsi_qla_host_t *vha)
2422{
2423 int rst;
2424 struct fw_blob *blob;
2425 struct qla_hw_data *ha = vha->hw;
2426
2427 /* Put both the PEG CMD and RCV PEG to default state
2428 * of 0 before resetting the hardware
2429 */
2430 qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
2431 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
2432
2433 if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
2434 qla_printk(KERN_ERR, ha,
2435 "%s: Error during CRB Initialization\n", __func__);
2436 return QLA_FUNCTION_FAILED;
2437 }
2438 udelay(500);
2439
2440 /* Bring QM and CAMRAM out of reset */
2441 rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
2442 rst &= ~((1 << 28) | (1 << 24));
2443 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
2444
2445 /*
2446 * FW Load priority:
2447 * 1) Operational firmware residing in flash.
2448 * 2) Firmware via request-firmware interface (.bin file).
2449 */
2450 if (ql2xfwloadbin == 2)
2451 goto try_blob_fw;
2452
2453 qla_printk(KERN_INFO, ha,
2454 "Attempting to load firmware from flash\n");
2455
2456 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2457 qla_printk(KERN_ERR, ha,
2458 "Firmware loaded successfully from flash\n");
2459 return QLA_SUCCESS;
2460 }
2461try_blob_fw:
2462 qla_printk(KERN_INFO, ha,
2463 "Attempting to load firmware from blob\n");
2464
2465 /* Load firmware blob. */
2466 blob = ha->hablob = qla2x00_request_firmware(vha);
2467 if (!blob) {
2468 qla_printk(KERN_ERR, ha,
2469 "Firmware image not present.\n");
2470 goto fw_load_failed;
2471 }
2472
2473 if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
2474 qla_printk(KERN_ERR, ha,
2475 "%s: Firmware loaded successfully "
2476 " from binary blob\n", __func__);
2477 return QLA_SUCCESS;
2478 } else {
2479 qla_printk(KERN_ERR, ha,
2480 "Firmware load failed from binary blob\n");
2481 blob->fw = NULL;
2482 blob = NULL;
2483 goto fw_load_failed;
2484 }
2485 return QLA_SUCCESS;
2486
2487fw_load_failed:
2488 return QLA_FUNCTION_FAILED;
2489}
2490
2491static int
2492qla82xx_start_firmware(scsi_qla_host_t *vha)
2493{
2494 int pcie_cap;
2495 uint16_t lnk;
2496 struct qla_hw_data *ha = vha->hw;
2497
2498 /* scrub dma mask expansion register */
2499 qla82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
2500
2501 /* Overwrite stale initialization register values */
2502 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
2503 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
2504
2505 if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
2506 qla_printk(KERN_INFO, ha,
2507 "%s: Error trying to start fw!\n", __func__);
2508 return QLA_FUNCTION_FAILED;
2509 }
2510
2511 /* Handshake with the card before we register the devices. */
2512 if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
2513 qla_printk(KERN_INFO, ha,
2514 "%s: Error during card handshake!\n", __func__);
2515 return QLA_FUNCTION_FAILED;
2516 }
2517
2518 /* Negotiated Link width */
2519 pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
2520 pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
2521 ha->link_width = (lnk >> 4) & 0x3f;
2522
2523 /* Synchronize with Receive peg */
2524 return qla82xx_check_rcvpeg_state(ha);
2525}
2526
2527static inline int
2528qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2529 uint16_t tot_dsds)
2530{
2531 uint32_t *cur_dsd = NULL;
2532 scsi_qla_host_t *vha;
2533 struct qla_hw_data *ha;
2534 struct scsi_cmnd *cmd;
2535 struct scatterlist *cur_seg;
2536 uint32_t *dsd_seg;
2537 void *next_dsd;
2538 uint8_t avail_dsds;
2539 uint8_t first_iocb = 1;
2540 uint32_t dsd_list_len;
2541 struct dsd_dma *dsd_ptr;
2542 struct ct6_dsd *ctx;
2543
2544 cmd = sp->cmd;
2545
2546 /* Update entry type to indicate Command Type 3 IOCB */
2547 *((uint32_t *)(&cmd_pkt->entry_type)) =
2548 __constant_cpu_to_le32(COMMAND_TYPE_6);
2549
2550 /* No data transfer */
2551 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2552 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
2553 return 0;
2554 }
2555
2556 vha = sp->fcport->vha;
2557 ha = vha->hw;
2558
2559 /* Set transfer direction */
2560 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2561 cmd_pkt->control_flags =
2562 __constant_cpu_to_le16(CF_WRITE_DATA);
2563 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
2564 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2565 cmd_pkt->control_flags =
2566 __constant_cpu_to_le16(CF_READ_DATA);
2567 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
2568 }
2569
2570 cur_seg = scsi_sglist(cmd);
2571 ctx = sp->ctx;
2572
2573 while (tot_dsds) {
2574 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
2575 QLA_DSDS_PER_IOCB : tot_dsds;
2576 tot_dsds -= avail_dsds;
2577 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
2578
2579 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
2580 struct dsd_dma, list);
2581 next_dsd = dsd_ptr->dsd_addr;
2582 list_del(&dsd_ptr->list);
2583 ha->gbl_dsd_avail--;
2584 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
2585 ctx->dsd_use_cnt++;
2586 ha->gbl_dsd_inuse++;
2587
2588 if (first_iocb) {
2589 first_iocb = 0;
2590 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2591 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2592 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2593 *dsd_seg++ = dsd_list_len;
2594 } else {
2595 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2596 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2597 *cur_dsd++ = dsd_list_len;
2598 }
2599 cur_dsd = (uint32_t *)next_dsd;
2600 while (avail_dsds) {
2601 dma_addr_t sle_dma;
2602
2603 sle_dma = sg_dma_address(cur_seg);
2604 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2605 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2606 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
2607 cur_seg++;
2608 avail_dsds--;
2609 }
2610 }
2611
2612 /* Null termination */
2613 *cur_dsd++ = 0;
2614 *cur_dsd++ = 0;
2615 *cur_dsd++ = 0;
2616 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
2617 return 0;
2618}
2619
2620/*
2621 * qla82xx_calc_dsd_lists() - Determine number of DSD list required
2622 * for Command Type 6.
2623 *
2624 * @dsds: number of data segment decriptors needed
2625 *
2626 * Returns the number of dsd list needed to store @dsds.
2627 */
2628inline uint16_t
2629qla82xx_calc_dsd_lists(uint16_t dsds)
2630{
2631 uint16_t dsd_lists = 0;
2632
2633 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
2634 if (dsds % QLA_DSDS_PER_IOCB)
2635 dsd_lists++;
2636 return dsd_lists;
2637}
2638
2639/*
2640 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2641 * @sp: command to send to the ISP
2642 *
2643 * Returns non-zero if a failure occured, else zero.
2644 */
2645int
2646qla82xx_start_scsi(srb_t *sp)
2647{
2648 int ret, nseg;
2649 unsigned long flags;
2650 struct scsi_cmnd *cmd;
2651 uint32_t *clr_ptr;
2652 uint32_t index;
2653 uint32_t handle;
2654 uint16_t cnt;
2655 uint16_t req_cnt;
2656 uint16_t tot_dsds;
2657 struct device_reg_82xx __iomem *reg;
2658 uint32_t dbval;
2659 uint32_t *fcp_dl;
2660 uint8_t additional_cdb_len;
2661 struct ct6_dsd *ctx;
2662 struct scsi_qla_host *vha = sp->fcport->vha;
2663 struct qla_hw_data *ha = vha->hw;
2664 struct req_que *req = NULL;
2665 struct rsp_que *rsp = NULL;
2666
2667 /* Setup device pointers. */
2668 ret = 0;
2669 reg = &ha->iobase->isp82;
2670 cmd = sp->cmd;
2671 req = vha->req;
2672 rsp = ha->rsp_q_map[0];
2673
2674 /* So we know we haven't pci_map'ed anything yet */
2675 tot_dsds = 0;
2676
2677 dbval = 0x04 | (ha->portnum << 5);
2678
2679 /* Send marker if required */
2680 if (vha->marker_needed != 0) {
2681 if (qla2x00_marker(vha, req,
2682 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2683 return QLA_FUNCTION_FAILED;
2684 vha->marker_needed = 0;
2685 }
2686
2687 /* Acquire ring specific lock */
2688 spin_lock_irqsave(&ha->hardware_lock, flags);
2689
2690 /* Check for room in outstanding command list. */
2691 handle = req->current_outstanding_cmd;
2692 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2693 handle++;
2694 if (handle == MAX_OUTSTANDING_COMMANDS)
2695 handle = 1;
2696 if (!req->outstanding_cmds[handle])
2697 break;
2698 }
2699 if (index == MAX_OUTSTANDING_COMMANDS)
2700 goto queuing_error;
2701
2702 /* Map the sg table so we have an accurate count of sg entries needed */
2703 if (scsi_sg_count(cmd)) {
2704 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2705 scsi_sg_count(cmd), cmd->sc_data_direction);
2706 if (unlikely(!nseg))
2707 goto queuing_error;
2708 } else
2709 nseg = 0;
2710
2711 tot_dsds = nseg;
2712
2713 if (tot_dsds > ql2xshiftctondsd) {
2714 struct cmd_type_6 *cmd_pkt;
2715 uint16_t more_dsd_lists = 0;
2716 struct dsd_dma *dsd_ptr;
2717 uint16_t i;
2718
2719 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
2720 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN)
2721 goto queuing_error;
2722
2723 if (more_dsd_lists <= ha->gbl_dsd_avail)
2724 goto sufficient_dsds;
2725 else
2726 more_dsd_lists -= ha->gbl_dsd_avail;
2727
2728 for (i = 0; i < more_dsd_lists; i++) {
2729 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2730 if (!dsd_ptr)
2731 goto queuing_error;
2732
2733 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2734 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2735 if (!dsd_ptr->dsd_addr) {
2736 kfree(dsd_ptr);
2737 goto queuing_error;
2738 }
2739 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2740 ha->gbl_dsd_avail++;
2741 }
2742
2743sufficient_dsds:
2744 req_cnt = 1;
2745
2746 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2747 if (!sp->ctx) {
2748 DEBUG(printk(KERN_INFO
2749 "%s(%ld): failed to allocate"
2750 " ctx.\n", __func__, vha->host_no));
2751 goto queuing_error;
2752 }
2753 memset(ctx, 0, sizeof(struct ct6_dsd));
2754 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2755 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2756 if (!ctx->fcp_cmnd) {
2757 DEBUG2_3(printk("%s(%ld): failed to allocate"
2758 " fcp_cmnd.\n", __func__, vha->host_no));
2759 goto queuing_error_fcp_cmnd;
2760 }
2761
2762 /* Initialize the DSD list and dma handle */
2763 INIT_LIST_HEAD(&ctx->dsd_list);
2764 ctx->dsd_use_cnt = 0;
2765
2766 if (cmd->cmd_len > 16) {
2767 additional_cdb_len = cmd->cmd_len - 16;
2768 if ((cmd->cmd_len % 4) != 0) {
2769 /* SCSI command bigger than 16 bytes must be
2770 * multiple of 4
2771 */
2772 goto queuing_error_fcp_cmnd;
2773 }
2774 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2775 } else {
2776 additional_cdb_len = 0;
2777 ctx->fcp_cmnd_len = 12 + 16 + 4;
2778 }
2779
2780 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2781 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2782
2783 /* Zero out remaining portion of packet. */
2784 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2785 clr_ptr = (uint32_t *)cmd_pkt + 2;
2786 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2787 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2788
2789 /* Set NPORT-ID and LUN number*/
2790 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2791 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2792 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2793 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2794 cmd_pkt->vp_index = sp->fcport->vp_idx;
2795
2796 /* Build IOCB segments */
2797 if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2798 goto queuing_error_fcp_cmnd;
2799
2800 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2801
2802 /* build FCP_CMND IU */
2803 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2804 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2805 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2806
2807 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2808 ctx->fcp_cmnd->additional_cdb_len |= 1;
2809 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2810 ctx->fcp_cmnd->additional_cdb_len |= 2;
2811
2812 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2813
2814 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2815 additional_cdb_len);
2816 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2817
2818 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2819 cmd_pkt->fcp_cmnd_dseg_address[0] =
2820 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2821 cmd_pkt->fcp_cmnd_dseg_address[1] =
2822 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2823
2824 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2825 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2826 /* Set total data segment count. */
2827 cmd_pkt->entry_count = (uint8_t)req_cnt;
2828 /* Specify response queue number where
2829 * completion should happen
2830 */
2831 cmd_pkt->entry_status = (uint8_t) rsp->id;
2832 } else {
2833 struct cmd_type_7 *cmd_pkt;
2834 req_cnt = qla24xx_calc_iocbs(tot_dsds);
2835 if (req->cnt < (req_cnt + 2)) {
2836 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2837 &reg->req_q_out[0]);
2838 if (req->ring_index < cnt)
2839 req->cnt = cnt - req->ring_index;
2840 else
2841 req->cnt = req->length -
2842 (req->ring_index - cnt);
2843 }
2844 if (req->cnt < (req_cnt + 2))
2845 goto queuing_error;
2846
2847 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2848 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2849
2850 /* Zero out remaining portion of packet. */
2851 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2852 clr_ptr = (uint32_t *)cmd_pkt + 2;
2853 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2854 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2855
2856 /* Set NPORT-ID and LUN number*/
2857 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2858 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2859 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2860 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2861 cmd_pkt->vp_index = sp->fcport->vp_idx;
2862
2863 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2864 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2865 sizeof(cmd_pkt->lun));
2866
2867 /* Load SCSI command packet. */
2868 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2869 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2870
2871 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2872
2873 /* Build IOCB segments */
2874 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2875
2876 /* Set total data segment count. */
2877 cmd_pkt->entry_count = (uint8_t)req_cnt;
2878 /* Specify response queue number where
2879 * completion should happen.
2880 */
2881 cmd_pkt->entry_status = (uint8_t) rsp->id;
2882
2883 }
2884 /* Build command packet. */
2885 req->current_outstanding_cmd = handle;
2886 req->outstanding_cmds[handle] = sp;
2887 sp->handle = handle;
2888 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2889 req->cnt -= req_cnt;
2890 wmb();
2891
2892 /* Adjust ring index. */
2893 req->ring_index++;
2894 if (req->ring_index == req->length) {
2895 req->ring_index = 0;
2896 req->ring_ptr = req->ring;
2897 } else
2898 req->ring_ptr++;
2899
2900 sp->flags |= SRB_DMA_VALID;
2901
2902 /* Set chip new ring index. */
2903 /* write, read and verify logic */
2904 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2905 if (ql2xdbwr)
2906 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2907 else {
2908 WRT_REG_DWORD(
2909 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2910 dbval);
2911 wmb();
2912 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2913 WRT_REG_DWORD(
2914 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2915 dbval);
2916 wmb();
2917 }
2918 }
2919
2920 /* Manage unprocessed RIO/ZIO commands in response queue. */
2921 if (vha->flags.process_response_queue &&
2922 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2923 qla24xx_process_response_queue(vha, rsp);
2924
2925 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2926 return QLA_SUCCESS;
2927
2928queuing_error_fcp_cmnd:
2929 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2930queuing_error:
2931 if (tot_dsds)
2932 scsi_dma_unmap(cmd);
2933
2934 if (sp->ctx) {
2935 mempool_free(sp->ctx, ha->ctx_mempool);
2936 sp->ctx = NULL;
2937 }
2938 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2939
2940 return QLA_FUNCTION_FAILED;
2941}
2942
2943uint32_t *
2944qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
2945 uint32_t length)
2946{
2947 uint32_t i;
2948 uint32_t val;
2949 struct qla_hw_data *ha = vha->hw;
2950
2951 /* Dword reads to flash. */
2952 for (i = 0; i < length/4; i++, faddr += 4) {
2953 if (qla82xx_rom_fast_read(ha, faddr, &val)) {
2954 qla_printk(KERN_WARNING, ha,
2955 "Do ROM fast read failed\n");
2956 goto done_read;
2957 }
2958 dwptr[i] = __constant_cpu_to_le32(val);
2959 }
2960done_read:
2961 return dwptr;
2962}
2963
2964int
2965qla82xx_unprotect_flash(struct qla_hw_data *ha)
2966{
2967 int ret;
2968 uint32_t val;
2969
2970 ret = ql82xx_rom_lock_d(ha);
2971 if (ret < 0) {
2972 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
2973 return ret;
2974 }
2975
2976 ret = qla82xx_read_status_reg(ha, &val);
2977 if (ret < 0)
2978 goto done_unprotect;
2979
2980 val &= ~(0x7 << 2);
2981 ret = qla82xx_write_status_reg(ha, val);
2982 if (ret < 0) {
2983 val |= (0x7 << 2);
2984 qla82xx_write_status_reg(ha, val);
2985 }
2986
2987 if (qla82xx_write_disable_flash(ha) != 0)
2988 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
2989
2990done_unprotect:
2991 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
2992 return ret;
2993}
2994
2995int
2996qla82xx_protect_flash(struct qla_hw_data *ha)
2997{
2998 int ret;
2999 uint32_t val;
3000
3001 ret = ql82xx_rom_lock_d(ha);
3002 if (ret < 0) {
3003 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
3004 return ret;
3005 }
3006
3007 ret = qla82xx_read_status_reg(ha, &val);
3008 if (ret < 0)
3009 goto done_protect;
3010
3011 val |= (0x7 << 2);
3012 /* LOCK all sectors */
3013 ret = qla82xx_write_status_reg(ha, val);
3014 if (ret < 0)
3015 qla_printk(KERN_WARNING, ha, "Write status register failed\n");
3016
3017 if (qla82xx_write_disable_flash(ha) != 0)
3018 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
3019done_protect:
3020 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3021 return ret;
3022}
3023
3024int
3025qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3026{
3027 int ret = 0;
3028
3029 ret = ql82xx_rom_lock_d(ha);
3030 if (ret < 0) {
3031 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
3032 return ret;
3033 }
3034
3035 qla82xx_flash_set_write_enable(ha);
3036 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
3037 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
3038 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
3039
3040 if (qla82xx_wait_rom_done(ha)) {
3041 qla_printk(KERN_WARNING, ha,
3042 "Error waiting for rom done\n");
3043 ret = -1;
3044 goto done;
3045 }
3046 ret = qla82xx_flash_wait_write_finish(ha);
3047done:
3048 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3049 return ret;
3050}
3051
3052/*
3053 * Address and length are byte address
3054 */
3055uint8_t *
3056qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3057 uint32_t offset, uint32_t length)
3058{
3059 scsi_block_requests(vha->host);
3060 qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
3061 scsi_unblock_requests(vha->host);
3062 return buf;
3063}
3064
3065static int
3066qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3067 uint32_t faddr, uint32_t dwords)
3068{
3069 int ret;
3070 uint32_t liter;
3071 uint32_t sec_mask, rest_addr;
3072 dma_addr_t optrom_dma;
3073 void *optrom = NULL;
3074 int page_mode = 0;
3075 struct qla_hw_data *ha = vha->hw;
3076
3077 ret = -1;
3078
3079 /* Prepare burst-capable write on supported ISPs. */
3080 if (page_mode && !(faddr & 0xfff) &&
3081 dwords > OPTROM_BURST_DWORDS) {
3082 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
3083 &optrom_dma, GFP_KERNEL);
3084 if (!optrom) {
3085 qla_printk(KERN_DEBUG, ha,
3086 "Unable to allocate memory for optrom "
3087 "burst write (%x KB).\n",
3088 OPTROM_BURST_SIZE / 1024);
3089 }
3090 }
3091
3092 rest_addr = ha->fdt_block_size - 1;
3093 sec_mask = ~rest_addr;
3094
3095 ret = qla82xx_unprotect_flash(ha);
3096 if (ret) {
3097 qla_printk(KERN_WARNING, ha,
3098 "Unable to unprotect flash for update.\n");
3099 goto write_done;
3100 }
3101
3102 for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
3103 /* Are we at the beginning of a sector? */
3104 if ((faddr & rest_addr) == 0) {
3105
3106 ret = qla82xx_erase_sector(ha, faddr);
3107 if (ret) {
3108 DEBUG9(qla_printk(KERN_ERR, ha,
3109 "Unable to erase sector: "
3110 "address=%x.\n", faddr));
3111 break;
3112 }
3113 }
3114
3115 /* Go with burst-write. */
3116 if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
3117 /* Copy data to DMA'ble buffer. */
3118 memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
3119
3120 ret = qla2x00_load_ram(vha, optrom_dma,
3121 (ha->flash_data_off | faddr),
3122 OPTROM_BURST_DWORDS);
3123 if (ret != QLA_SUCCESS) {
3124 qla_printk(KERN_WARNING, ha,
3125 "Unable to burst-write optrom segment "
3126 "(%x/%x/%llx).\n", ret,
3127 (ha->flash_data_off | faddr),
3128 (unsigned long long)optrom_dma);
3129 qla_printk(KERN_WARNING, ha,
3130 "Reverting to slow-write.\n");
3131
3132 dma_free_coherent(&ha->pdev->dev,
3133 OPTROM_BURST_SIZE, optrom, optrom_dma);
3134 optrom = NULL;
3135 } else {
3136 liter += OPTROM_BURST_DWORDS - 1;
3137 faddr += OPTROM_BURST_DWORDS - 1;
3138 dwptr += OPTROM_BURST_DWORDS - 1;
3139 continue;
3140 }
3141 }
3142
3143 ret = qla82xx_write_flash_dword(ha, faddr,
3144 cpu_to_le32(*dwptr));
3145 if (ret) {
3146 DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program"
3147 "flash address=%x data=%x.\n", __func__,
3148 ha->host_no, faddr, *dwptr));
3149 break;
3150 }
3151 }
3152
3153 ret = qla82xx_protect_flash(ha);
3154 if (ret)
3155 qla_printk(KERN_WARNING, ha,
3156 "Unable to protect flash after update.\n");
3157write_done:
3158 if (optrom)
3159 dma_free_coherent(&ha->pdev->dev,
3160 OPTROM_BURST_SIZE, optrom, optrom_dma);
3161 return ret;
3162}
3163
3164int
3165qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3166 uint32_t offset, uint32_t length)
3167{
3168 int rval;
3169
3170 /* Suspend HBA. */
3171 scsi_block_requests(vha->host);
3172 rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
3173 length >> 2);
3174 scsi_unblock_requests(vha->host);
3175
3176 /* Convert return ISP82xx to generic */
3177 if (rval)
3178 rval = QLA_FUNCTION_FAILED;
3179 else
3180 rval = QLA_SUCCESS;
3181 return rval;
3182}
3183
3184void
3185qla82xx_start_iocbs(srb_t *sp)
3186{
3187 struct qla_hw_data *ha = sp->fcport->vha->hw;
3188 struct req_que *req = ha->req_q_map[0];
3189 struct device_reg_82xx __iomem *reg;
3190 uint32_t dbval;
3191
3192 /* Adjust ring index. */
3193 req->ring_index++;
3194 if (req->ring_index == req->length) {
3195 req->ring_index = 0;
3196 req->ring_ptr = req->ring;
3197 } else
3198 req->ring_ptr++;
3199
3200 reg = &ha->iobase->isp82;
3201 dbval = 0x04 | (ha->portnum << 5);
3202
3203 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3204 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
3205 wmb();
3206 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3207 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
3208 wmb();
3209 }
3210}
3211
3212/*
3213 * qla82xx_device_bootstrap
3214 * Initialize device, set DEV_READY, start fw
3215 *
3216 * Note:
3217 * IDC lock must be held upon entry
3218 *
3219 * Return:
3220 * Success : 0
3221 * Failed : 1
3222 */
3223static int
3224qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3225{
3226 int rval, i, timeout;
3227 uint32_t old_count, count;
3228 struct qla_hw_data *ha = vha->hw;
3229
3230 if (qla82xx_need_reset(ha))
3231 goto dev_initialize;
3232
3233 old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3234
3235 for (i = 0; i < 10; i++) {
3236 timeout = msleep_interruptible(200);
3237 if (timeout) {
3238 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3239 QLA82XX_DEV_FAILED);
3240 return QLA_FUNCTION_FAILED;
3241 }
3242
3243 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3244 if (count != old_count)
3245 goto dev_ready;
3246 }
3247
3248dev_initialize:
3249 /* set to DEV_INITIALIZING */
3250 qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
3251 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
3252
3253 /* Driver that sets device state to initializating sets IDC version */
3254 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
3255
3256 qla82xx_idc_unlock(ha);
3257 rval = qla82xx_start_firmware(vha);
3258 qla82xx_idc_lock(ha);
3259
3260 if (rval != QLA_SUCCESS) {
3261 qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
3262 qla82xx_clear_drv_active(ha);
3263 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
3264 return rval;
3265 }
3266
3267dev_ready:
3268 qla_printk(KERN_INFO, ha, "HW State: READY\n");
3269 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
3270
3271 return QLA_SUCCESS;
3272}
3273
3274static void
3275qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3276{
3277 struct qla_hw_data *ha = vha->hw;
3278
3279 /* Disable the board */
3280 qla_printk(KERN_INFO, ha, "Disabling the board\n");
3281
3282 /* Set DEV_FAILED flag to disable timer */
3283 vha->device_flags |= DFLG_DEV_FAILED;
3284 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3285 qla2x00_mark_all_devices_lost(vha, 0);
3286 vha->flags.online = 0;
3287 vha->flags.init_done = 0;
3288}
3289
3290/*
3291 * qla82xx_need_reset_handler
3292 * Code to start reset sequence
3293 *
3294 * Note:
3295 * IDC lock must be held upon entry
3296 *
3297 * Return:
3298 * Success : 0
3299 * Failed : 1
3300 */
3301static void
3302qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3303{
3304 uint32_t dev_state, drv_state, drv_active;
3305 unsigned long reset_timeout;
3306 struct qla_hw_data *ha = vha->hw;
3307 struct req_que *req = ha->req_q_map[0];
3308
3309 if (vha->flags.online) {
3310 qla82xx_idc_unlock(ha);
3311 qla2x00_abort_isp_cleanup(vha);
3312 ha->isp_ops->get_flash_version(vha, req->ring);
3313 ha->isp_ops->nvram_config(vha);
3314 qla82xx_idc_lock(ha);
3315 }
3316
3317 qla82xx_set_rst_ready(ha);
3318
3319 /* wait for 10 seconds for reset ack from all functions */
3320 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
3321
3322 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3323 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3324
3325 while (drv_state != drv_active) {
3326 if (time_after_eq(jiffies, reset_timeout)) {
3327 qla_printk(KERN_INFO, ha,
3328 "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME);
3329 break;
3330 }
3331 qla82xx_idc_unlock(ha);
3332 msleep(1000);
3333 qla82xx_idc_lock(ha);
3334 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3335 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3336 }
3337
3338 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3339 qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
3340 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3341
3342 /* Force to DEV_COLD unless someone else is starting a reset */
3343 if (dev_state != QLA82XX_DEV_INITIALIZING) {
3344 qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
3345 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3346 }
3347}
3348
3349static void
3350qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3351{
3352 uint32_t fw_heartbeat_counter, halt_status;
3353 struct qla_hw_data *ha = vha->hw;
3354
3355 fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3356 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3357 vha->seconds_since_last_heartbeat++;
3358 /* FW not alive after 2 seconds */
3359 if (vha->seconds_since_last_heartbeat == 2) {
3360 vha->seconds_since_last_heartbeat = 0;
3361 halt_status = qla82xx_rd_32(ha,
3362 QLA82XX_PEG_HALT_STATUS1);
3363 if (halt_status & HALT_STATUS_UNRECOVERABLE) {
3364 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
3365 } else {
3366 qla_printk(KERN_INFO, ha,
3367 "scsi(%ld): %s - detect abort needed\n",
3368 vha->host_no, __func__);
3369 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3370 }
3371 qla2xxx_wake_dpc(vha);
3372 }
3373 }
3374 vha->fw_heartbeat_counter = fw_heartbeat_counter;
3375}
3376
3377/*
3378 * qla82xx_device_state_handler
3379 * Main state handler
3380 *
3381 * Note:
3382 * IDC lock must be held upon entry
3383 *
3384 * Return:
3385 * Success : 0
3386 * Failed : 1
3387 */
3388int
3389qla82xx_device_state_handler(scsi_qla_host_t *vha)
3390{
3391 uint32_t dev_state;
3392 int rval = QLA_SUCCESS;
3393 unsigned long dev_init_timeout;
3394 struct qla_hw_data *ha = vha->hw;
3395
3396 qla82xx_idc_lock(ha);
3397 if (!vha->flags.init_done)
3398 qla82xx_set_drv_active(vha);
3399
3400 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3401 qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
3402 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3403
3404 /* wait for 30 seconds for device to go ready */
3405 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
3406
3407 while (1) {
3408
3409 if (time_after_eq(jiffies, dev_init_timeout)) {
3410 DEBUG(qla_printk(KERN_INFO, ha,
3411 "%s: device init failed!\n",
3412 QLA2XXX_DRIVER_NAME));
3413 rval = QLA_FUNCTION_FAILED;
3414 break;
3415 }
3416 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3417 qla_printk(KERN_INFO, ha,
3418 "2:Device state is 0x%x = %s\n", dev_state,
3419 dev_state < MAX_STATES ?
3420 qdev_state[dev_state] : "Unknown");
3421
3422 switch (dev_state) {
3423 case QLA82XX_DEV_READY:
3424 goto exit;
3425 case QLA82XX_DEV_COLD:
3426 rval = qla82xx_device_bootstrap(vha);
3427 goto exit;
3428 case QLA82XX_DEV_INITIALIZING:
3429 qla82xx_idc_unlock(ha);
3430 msleep(1000);
3431 qla82xx_idc_lock(ha);
3432 break;
3433 case QLA82XX_DEV_NEED_RESET:
3434 if (!ql2xdontresethba)
3435 qla82xx_need_reset_handler(vha);
3436 break;
3437 case QLA82XX_DEV_NEED_QUIESCENT:
3438 qla82xx_set_qsnt_ready(ha);
3439 case QLA82XX_DEV_QUIESCENT:
3440 qla82xx_idc_unlock(ha);
3441 msleep(1000);
3442 qla82xx_idc_lock(ha);
3443 break;
3444 case QLA82XX_DEV_FAILED:
3445 qla82xx_dev_failed_handler(vha);
3446 rval = QLA_FUNCTION_FAILED;
3447 goto exit;
3448 default:
3449 qla82xx_idc_unlock(ha);
3450 msleep(1000);
3451 qla82xx_idc_lock(ha);
3452 }
3453 }
3454exit:
3455 qla82xx_idc_unlock(ha);
3456 return rval;
3457}
3458
3459void qla82xx_watchdog(scsi_qla_host_t *vha)
3460{
3461 uint32_t dev_state;
3462 struct qla_hw_data *ha = vha->hw;
3463
3464 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3465
3466 /* don't poll if reset is going on */
3467 if (!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
3468 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
3469 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))) {
3470 if (dev_state == QLA82XX_DEV_NEED_RESET) {
3471 qla_printk(KERN_WARNING, ha,
3472 "%s(): Adapter reset needed!\n", __func__);
3473 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3474 qla2xxx_wake_dpc(vha);
3475 } else {
3476 qla82xx_check_fw_alive(vha);
3477 }
3478 }
3479}
3480
3481int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3482{
3483 int rval;
3484 rval = qla82xx_device_state_handler(vha);
3485 return rval;
3486}
3487
3488/*
3489 * qla82xx_abort_isp
3490 * Resets ISP and aborts all outstanding commands.
3491 *
3492 * Input:
3493 * ha = adapter block pointer.
3494 *
3495 * Returns:
3496 * 0 = success
3497 */
3498int
3499qla82xx_abort_isp(scsi_qla_host_t *vha)
3500{
3501 int rval;
3502 struct qla_hw_data *ha = vha->hw;
3503 uint32_t dev_state;
3504
3505 if (vha->device_flags & DFLG_DEV_FAILED) {
3506 qla_printk(KERN_WARNING, ha,
3507 "%s(%ld): Device in failed state, "
3508 "Exiting.\n", __func__, vha->host_no);
3509 return QLA_SUCCESS;
3510 }
3511
3512 qla82xx_idc_lock(ha);
3513 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3514 if (dev_state == QLA82XX_DEV_READY) {
3515 qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
3516 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3517 QLA82XX_DEV_NEED_RESET);
3518 } else
3519 qla_printk(KERN_INFO, ha, "HW State: %s\n",
3520 dev_state < MAX_STATES ?
3521 qdev_state[dev_state] : "Unknown");
3522 qla82xx_idc_unlock(ha);
3523
3524 rval = qla82xx_device_state_handler(vha);
3525
3526 qla82xx_idc_lock(ha);
3527 qla82xx_clear_rst_ready(ha);
3528 qla82xx_idc_unlock(ha);
3529
3530 if (rval == QLA_SUCCESS)
3531 qla82xx_restart_isp(vha);
3532
3533 if (rval) {
3534 vha->flags.online = 1;
3535 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3536 if (ha->isp_abort_cnt == 0) {
3537 qla_printk(KERN_WARNING, ha,
3538 "ISP error recovery failed - "
3539 "board disabled\n");
3540 /*
3541 * The next call disables the board
3542 * completely.
3543 */
3544 ha->isp_ops->reset_adapter(vha);
3545 vha->flags.online = 0;
3546 clear_bit(ISP_ABORT_RETRY,
3547 &vha->dpc_flags);
3548 rval = QLA_SUCCESS;
3549 } else { /* schedule another ISP abort */
3550 ha->isp_abort_cnt--;
3551 DEBUG(qla_printk(KERN_INFO, ha,
3552 "qla%ld: ISP abort - retry remaining %d\n",
3553 vha->host_no, ha->isp_abort_cnt));
3554 rval = QLA_FUNCTION_FAILED;
3555 }
3556 } else {
3557 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3558 DEBUG(qla_printk(KERN_INFO, ha,
3559 "(%ld): ISP error recovery - retrying (%d) "
3560 "more times\n", vha->host_no, ha->isp_abort_cnt));
3561 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3562 rval = QLA_FUNCTION_FAILED;
3563 }
3564 }
3565 return rval;
3566}
3567
3568/*
3569 * qla82xx_fcoe_ctx_reset
3570 * Perform a quick reset and aborts all outstanding commands.
3571 * This will only perform an FCoE context reset and avoids a full blown
3572 * chip reset.
3573 *
3574 * Input:
3575 * ha = adapter block pointer.
3576 * is_reset_path = flag for identifying the reset path.
3577 *
3578 * Returns:
3579 * 0 = success
3580 */
3581int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha)
3582{
3583 int rval = QLA_FUNCTION_FAILED;
3584
3585 if (vha->flags.online) {
3586 /* Abort all outstanding commands, so as to be requeued later */
3587 qla2x00_abort_isp_cleanup(vha);
3588 }
3589
3590 /* Stop currently executing firmware.
3591 * This will destroy existing FCoE context at the F/W end.
3592 */
3593 qla2x00_try_to_stop_firmware(vha);
3594
3595 /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
3596 rval = qla82xx_restart_isp(vha);
3597
3598 return rval;
3599}
3600
3601/*
3602 * qla2x00_wait_for_fcoe_ctx_reset
3603 * Wait till the FCoE context is reset.
3604 *
3605 * Note:
3606 * Does context switching here.
3607 * Release SPIN_LOCK (if any) before calling this routine.
3608 *
3609 * Return:
3610 * Success (fcoe_ctx reset is done) : 0
3611 * Failed (fcoe_ctx reset not completed within max loop timout ) : 1
3612 */
3613int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
3614{
3615 int status = QLA_FUNCTION_FAILED;
3616 unsigned long wait_reset;
3617
3618 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
3619 while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3620 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
3621 && time_before(jiffies, wait_reset)) {
3622
3623 set_current_state(TASK_UNINTERRUPTIBLE);
3624 schedule_timeout(HZ);
3625
3626 if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
3627 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3628 status = QLA_SUCCESS;
3629 break;
3630 }
3631 }
3632 DEBUG2(printk(KERN_INFO
3633 "%s status=%d\n", __func__, status));
3634
3635 return status;
3636}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
new file mode 100644
index 000000000000..f8f99a5ea532
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -0,0 +1,889 @@
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#ifndef __QLA_NX_H
8#define __QLA_NX_H
9
10/*
11 * Following are the states of the Phantom. Phantom will set them and
12 * Host will read to check if the fields are correct.
13*/
14#define PHAN_INITIALIZE_FAILED 0xffff
15#define PHAN_INITIALIZE_COMPLETE 0xff01
16
17/* Host writes the following to notify that it has done the init-handshake */
18#define PHAN_INITIALIZE_ACK 0xf00f
19#define PHAN_PEG_RCV_INITIALIZED 0xff01
20
21/*CRB_RELATED*/
22#define QLA82XX_CRB_BASE QLA82XX_CAM_RAM(0x200)
23#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
24
25#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
27#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
28#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
29
30#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
31#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E
32#define QLA82XX_HW_H2_CH_HUB_ADR 0x03
33#define QLA82XX_HW_H3_CH_HUB_ADR 0x01
34#define QLA82XX_HW_H4_CH_HUB_ADR 0x06
35#define QLA82XX_HW_H5_CH_HUB_ADR 0x07
36#define QLA82XX_HW_H6_CH_HUB_ADR 0x08
37
38/* Hub 0 */
39#define QLA82XX_HW_MN_CRB_AGT_ADR 0x15
40#define QLA82XX_HW_MS_CRB_AGT_ADR 0x25
41
42/* Hub 1 */
43#define QLA82XX_HW_PS_CRB_AGT_ADR 0x73
44#define QLA82XX_HW_QMS_CRB_AGT_ADR 0x00
45#define QLA82XX_HW_RPMX3_CRB_AGT_ADR 0x0b
46#define QLA82XX_HW_SQGS0_CRB_AGT_ADR 0x01
47#define QLA82XX_HW_SQGS1_CRB_AGT_ADR 0x02
48#define QLA82XX_HW_SQGS2_CRB_AGT_ADR 0x03
49#define QLA82XX_HW_SQGS3_CRB_AGT_ADR 0x04
50#define QLA82XX_HW_C2C0_CRB_AGT_ADR 0x58
51#define QLA82XX_HW_C2C1_CRB_AGT_ADR 0x59
52#define QLA82XX_HW_C2C2_CRB_AGT_ADR 0x5a
53#define QLA82XX_HW_RPMX2_CRB_AGT_ADR 0x0a
54#define QLA82XX_HW_RPMX4_CRB_AGT_ADR 0x0c
55#define QLA82XX_HW_RPMX7_CRB_AGT_ADR 0x0f
56#define QLA82XX_HW_RPMX9_CRB_AGT_ADR 0x12
57#define QLA82XX_HW_SMB_CRB_AGT_ADR 0x18
58
59/* Hub 2 */
60#define QLA82XX_HW_NIU_CRB_AGT_ADR 0x31
61#define QLA82XX_HW_I2C0_CRB_AGT_ADR 0x19
62#define QLA82XX_HW_I2C1_CRB_AGT_ADR 0x29
63
64#define QLA82XX_HW_SN_CRB_AGT_ADR 0x10
65#define QLA82XX_HW_I2Q_CRB_AGT_ADR 0x20
66#define QLA82XX_HW_LPC_CRB_AGT_ADR 0x22
67#define QLA82XX_HW_ROMUSB_CRB_AGT_ADR 0x21
68#define QLA82XX_HW_QM_CRB_AGT_ADR 0x66
69#define QLA82XX_HW_SQG0_CRB_AGT_ADR 0x60
70#define QLA82XX_HW_SQG1_CRB_AGT_ADR 0x61
71#define QLA82XX_HW_SQG2_CRB_AGT_ADR 0x62
72#define QLA82XX_HW_SQG3_CRB_AGT_ADR 0x63
73#define QLA82XX_HW_RPMX1_CRB_AGT_ADR 0x09
74#define QLA82XX_HW_RPMX5_CRB_AGT_ADR 0x0d
75#define QLA82XX_HW_RPMX6_CRB_AGT_ADR 0x0e
76#define QLA82XX_HW_RPMX8_CRB_AGT_ADR 0x11
77
78/* Hub 3 */
79#define QLA82XX_HW_PH_CRB_AGT_ADR 0x1A
80#define QLA82XX_HW_SRE_CRB_AGT_ADR 0x50
81#define QLA82XX_HW_EG_CRB_AGT_ADR 0x51
82#define QLA82XX_HW_RPMX0_CRB_AGT_ADR 0x08
83
84/* Hub 4 */
85#define QLA82XX_HW_PEGN0_CRB_AGT_ADR 0x40
86#define QLA82XX_HW_PEGN1_CRB_AGT_ADR 0x41
87#define QLA82XX_HW_PEGN2_CRB_AGT_ADR 0x42
88#define QLA82XX_HW_PEGN3_CRB_AGT_ADR 0x43
89#define QLA82XX_HW_PEGNI_CRB_AGT_ADR 0x44
90#define QLA82XX_HW_PEGND_CRB_AGT_ADR 0x45
91#define QLA82XX_HW_PEGNC_CRB_AGT_ADR 0x46
92#define QLA82XX_HW_PEGR0_CRB_AGT_ADR 0x47
93#define QLA82XX_HW_PEGR1_CRB_AGT_ADR 0x48
94#define QLA82XX_HW_PEGR2_CRB_AGT_ADR 0x49
95#define QLA82XX_HW_PEGR3_CRB_AGT_ADR 0x4a
96#define QLA82XX_HW_PEGN4_CRB_AGT_ADR 0x4b
97
98/* Hub 5 */
99#define QLA82XX_HW_PEGS0_CRB_AGT_ADR 0x40
100#define QLA82XX_HW_PEGS1_CRB_AGT_ADR 0x41
101#define QLA82XX_HW_PEGS2_CRB_AGT_ADR 0x42
102#define QLA82XX_HW_PEGS3_CRB_AGT_ADR 0x43
103#define QLA82XX_HW_PEGSI_CRB_AGT_ADR 0x44
104#define QLA82XX_HW_PEGSD_CRB_AGT_ADR 0x45
105#define QLA82XX_HW_PEGSC_CRB_AGT_ADR 0x46
106
107/* Hub 6 */
108#define QLA82XX_HW_CAS0_CRB_AGT_ADR 0x46
109#define QLA82XX_HW_CAS1_CRB_AGT_ADR 0x47
110#define QLA82XX_HW_CAS2_CRB_AGT_ADR 0x48
111#define QLA82XX_HW_CAS3_CRB_AGT_ADR 0x49
112#define QLA82XX_HW_NCM_CRB_AGT_ADR 0x16
113#define QLA82XX_HW_TMR_CRB_AGT_ADR 0x17
114#define QLA82XX_HW_XDMA_CRB_AGT_ADR 0x05
115#define QLA82XX_HW_OCM0_CRB_AGT_ADR 0x06
116#define QLA82XX_HW_OCM1_CRB_AGT_ADR 0x07
117
118/* This field defines PCI/X adr [25:20] of agents on the CRB */
119/* */
120#define QLA82XX_HW_PX_MAP_CRB_PH 0
121#define QLA82XX_HW_PX_MAP_CRB_PS 1
122#define QLA82XX_HW_PX_MAP_CRB_MN 2
123#define QLA82XX_HW_PX_MAP_CRB_MS 3
124#define QLA82XX_HW_PX_MAP_CRB_SRE 5
125#define QLA82XX_HW_PX_MAP_CRB_NIU 6
126#define QLA82XX_HW_PX_MAP_CRB_QMN 7
127#define QLA82XX_HW_PX_MAP_CRB_SQN0 8
128#define QLA82XX_HW_PX_MAP_CRB_SQN1 9
129#define QLA82XX_HW_PX_MAP_CRB_SQN2 10
130#define QLA82XX_HW_PX_MAP_CRB_SQN3 11
131#define QLA82XX_HW_PX_MAP_CRB_QMS 12
132#define QLA82XX_HW_PX_MAP_CRB_SQS0 13
133#define QLA82XX_HW_PX_MAP_CRB_SQS1 14
134#define QLA82XX_HW_PX_MAP_CRB_SQS2 15
135#define QLA82XX_HW_PX_MAP_CRB_SQS3 16
136#define QLA82XX_HW_PX_MAP_CRB_PGN0 17
137#define QLA82XX_HW_PX_MAP_CRB_PGN1 18
138#define QLA82XX_HW_PX_MAP_CRB_PGN2 19
139#define QLA82XX_HW_PX_MAP_CRB_PGN3 20
140#define QLA82XX_HW_PX_MAP_CRB_PGN4 QLA82XX_HW_PX_MAP_CRB_SQS2
141#define QLA82XX_HW_PX_MAP_CRB_PGND 21
142#define QLA82XX_HW_PX_MAP_CRB_PGNI 22
143#define QLA82XX_HW_PX_MAP_CRB_PGS0 23
144#define QLA82XX_HW_PX_MAP_CRB_PGS1 24
145#define QLA82XX_HW_PX_MAP_CRB_PGS2 25
146#define QLA82XX_HW_PX_MAP_CRB_PGS3 26
147#define QLA82XX_HW_PX_MAP_CRB_PGSD 27
148#define QLA82XX_HW_PX_MAP_CRB_PGSI 28
149#define QLA82XX_HW_PX_MAP_CRB_SN 29
150#define QLA82XX_HW_PX_MAP_CRB_EG 31
151#define QLA82XX_HW_PX_MAP_CRB_PH2 32
152#define QLA82XX_HW_PX_MAP_CRB_PS2 33
153#define QLA82XX_HW_PX_MAP_CRB_CAM 34
154#define QLA82XX_HW_PX_MAP_CRB_CAS0 35
155#define QLA82XX_HW_PX_MAP_CRB_CAS1 36
156#define QLA82XX_HW_PX_MAP_CRB_CAS2 37
157#define QLA82XX_HW_PX_MAP_CRB_C2C0 38
158#define QLA82XX_HW_PX_MAP_CRB_C2C1 39
159#define QLA82XX_HW_PX_MAP_CRB_TIMR 40
160#define QLA82XX_HW_PX_MAP_CRB_RPMX1 42
161#define QLA82XX_HW_PX_MAP_CRB_RPMX2 43
162#define QLA82XX_HW_PX_MAP_CRB_RPMX3 44
163#define QLA82XX_HW_PX_MAP_CRB_RPMX4 45
164#define QLA82XX_HW_PX_MAP_CRB_RPMX5 46
165#define QLA82XX_HW_PX_MAP_CRB_RPMX6 47
166#define QLA82XX_HW_PX_MAP_CRB_RPMX7 48
167#define QLA82XX_HW_PX_MAP_CRB_XDMA 49
168#define QLA82XX_HW_PX_MAP_CRB_I2Q 50
169#define QLA82XX_HW_PX_MAP_CRB_ROMUSB 51
170#define QLA82XX_HW_PX_MAP_CRB_CAS3 52
171#define QLA82XX_HW_PX_MAP_CRB_RPMX0 53
172#define QLA82XX_HW_PX_MAP_CRB_RPMX8 54
173#define QLA82XX_HW_PX_MAP_CRB_RPMX9 55
174#define QLA82XX_HW_PX_MAP_CRB_OCM0 56
175#define QLA82XX_HW_PX_MAP_CRB_OCM1 57
176#define QLA82XX_HW_PX_MAP_CRB_SMB 58
177#define QLA82XX_HW_PX_MAP_CRB_I2C0 59
178#define QLA82XX_HW_PX_MAP_CRB_I2C1 60
179#define QLA82XX_HW_PX_MAP_CRB_LPC 61
180#define QLA82XX_HW_PX_MAP_CRB_PGNC 62
181#define QLA82XX_HW_PX_MAP_CRB_PGR0 63
182#define QLA82XX_HW_PX_MAP_CRB_PGR1 4
183#define QLA82XX_HW_PX_MAP_CRB_PGR2 30
184#define QLA82XX_HW_PX_MAP_CRB_PGR3 41
185
186/* This field defines CRB adr [31:20] of the agents */
187/* */
188
189#define QLA82XX_HW_CRB_HUB_AGT_ADR_MN ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
190 QLA82XX_HW_MN_CRB_AGT_ADR)
191#define QLA82XX_HW_CRB_HUB_AGT_ADR_PH ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
192 QLA82XX_HW_PH_CRB_AGT_ADR)
193#define QLA82XX_HW_CRB_HUB_AGT_ADR_MS ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
194 QLA82XX_HW_MS_CRB_AGT_ADR)
195#define QLA82XX_HW_CRB_HUB_AGT_ADR_PS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
196 QLA82XX_HW_PS_CRB_AGT_ADR)
197#define QLA82XX_HW_CRB_HUB_AGT_ADR_SS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
198 QLA82XX_HW_SS_CRB_AGT_ADR)
199#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
200 QLA82XX_HW_RPMX3_CRB_AGT_ADR)
201#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMS ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
202 QLA82XX_HW_QMS_CRB_AGT_ADR)
203#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
204 QLA82XX_HW_SQGS0_CRB_AGT_ADR)
205#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
206 QLA82XX_HW_SQGS1_CRB_AGT_ADR)
207#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
208 QLA82XX_HW_SQGS2_CRB_AGT_ADR)
209#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS3 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
210 QLA82XX_HW_SQGS3_CRB_AGT_ADR)
211#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C0 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
212 QLA82XX_HW_C2C0_CRB_AGT_ADR)
213#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C1 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
214 QLA82XX_HW_C2C1_CRB_AGT_ADR)
215#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
216 QLA82XX_HW_RPMX2_CRB_AGT_ADR)
217#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
218 QLA82XX_HW_RPMX4_CRB_AGT_ADR)
219#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
220 QLA82XX_HW_RPMX7_CRB_AGT_ADR)
221#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9 ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
222 QLA82XX_HW_RPMX9_CRB_AGT_ADR)
223#define QLA82XX_HW_CRB_HUB_AGT_ADR_SMB ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
224 QLA82XX_HW_SMB_CRB_AGT_ADR)
225#define QLA82XX_HW_CRB_HUB_AGT_ADR_NIU ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
226 QLA82XX_HW_NIU_CRB_AGT_ADR)
227#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
228 QLA82XX_HW_I2C0_CRB_AGT_ADR)
229#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1 ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
230 QLA82XX_HW_I2C1_CRB_AGT_ADR)
231#define QLA82XX_HW_CRB_HUB_AGT_ADR_SRE ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
232 QLA82XX_HW_SRE_CRB_AGT_ADR)
233#define QLA82XX_HW_CRB_HUB_AGT_ADR_EG ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
234 QLA82XX_HW_EG_CRB_AGT_ADR)
235#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
236 QLA82XX_HW_RPMX0_CRB_AGT_ADR)
237#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMN ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
238 QLA82XX_HW_QM_CRB_AGT_ADR)
239#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
240 QLA82XX_HW_SQG0_CRB_AGT_ADR)
241#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
242 QLA82XX_HW_SQG1_CRB_AGT_ADR)
243#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
244 QLA82XX_HW_SQG2_CRB_AGT_ADR)
245#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
246 QLA82XX_HW_SQG3_CRB_AGT_ADR)
247#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
248 QLA82XX_HW_RPMX1_CRB_AGT_ADR)
249#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
250 QLA82XX_HW_RPMX5_CRB_AGT_ADR)
251#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
252 QLA82XX_HW_RPMX6_CRB_AGT_ADR)
253#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
254 QLA82XX_HW_RPMX8_CRB_AGT_ADR)
255#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS0 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
256 QLA82XX_HW_CAS0_CRB_AGT_ADR)
257#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS1 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
258 QLA82XX_HW_CAS1_CRB_AGT_ADR)
259#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS2 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
260 QLA82XX_HW_CAS2_CRB_AGT_ADR)
261#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS3 ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
262 QLA82XX_HW_CAS3_CRB_AGT_ADR)
263#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
264 QLA82XX_HW_PEGNI_CRB_AGT_ADR)
265#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGND ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
266 QLA82XX_HW_PEGND_CRB_AGT_ADR)
267#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
268 QLA82XX_HW_PEGN0_CRB_AGT_ADR)
269#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
270 QLA82XX_HW_PEGN1_CRB_AGT_ADR)
271#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
272 QLA82XX_HW_PEGN2_CRB_AGT_ADR)
273#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
274 QLA82XX_HW_PEGN3_CRB_AGT_ADR)
275#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
276 QLA82XX_HW_PEGN4_CRB_AGT_ADR)
277#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
278 QLA82XX_HW_PEGNC_CRB_AGT_ADR)
279#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR0 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
280 QLA82XX_HW_PEGR0_CRB_AGT_ADR)
281#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR1 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
282 QLA82XX_HW_PEGR1_CRB_AGT_ADR)
283#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR2 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
284 QLA82XX_HW_PEGR2_CRB_AGT_ADR)
285#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR3 ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
286 QLA82XX_HW_PEGR3_CRB_AGT_ADR)
287#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
288 QLA82XX_HW_PEGSI_CRB_AGT_ADR)
289#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSD ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
290 QLA82XX_HW_PEGSD_CRB_AGT_ADR)
291#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
292 QLA82XX_HW_PEGS0_CRB_AGT_ADR)
293#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
294 QLA82XX_HW_PEGS1_CRB_AGT_ADR)
295#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
296 QLA82XX_HW_PEGS2_CRB_AGT_ADR)
297#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3 ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
298 QLA82XX_HW_PEGS3_CRB_AGT_ADR)
299#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSC ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
300 QLA82XX_HW_PEGSC_CRB_AGT_ADR)
301#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAM ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
302 QLA82XX_HW_NCM_CRB_AGT_ADR)
303#define QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
304 QLA82XX_HW_TMR_CRB_AGT_ADR)
305#define QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
306 QLA82XX_HW_XDMA_CRB_AGT_ADR)
307#define QLA82XX_HW_CRB_HUB_AGT_ADR_SN ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
308 QLA82XX_HW_SN_CRB_AGT_ADR)
309#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
310 QLA82XX_HW_I2Q_CRB_AGT_ADR)
311#define QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
312 QLA82XX_HW_ROMUSB_CRB_AGT_ADR)
313#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
314 QLA82XX_HW_OCM0_CRB_AGT_ADR)
315#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM1 ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
316 QLA82XX_HW_OCM1_CRB_AGT_ADR)
317#define QLA82XX_HW_CRB_HUB_AGT_ADR_LPC ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
318 QLA82XX_HW_LPC_CRB_AGT_ADR)
319
320#define ROMUSB_GLB (QLA82XX_CRB_ROMUSB + 0x00000)
321#define QLA82XX_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
322#define QLA82XX_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
323#define QLA82XX_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
324#define QLA82XX_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
325#define QLA82XX_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
326#define QLA82XX_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
327#define QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
328#define QLA82XX_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
329
330#define ROMUSB_ROM (QLA82XX_CRB_ROMUSB + 0x10000)
331#define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
332#define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
333
334/* Lock IDs for ROM lock */
335#define ROM_LOCK_DRIVER 0x0d417340
336
337#define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */
338#define QLA82XX_PCI_CRB_WINDOW(A) \
339 (QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE)
340#define QLA82XX_CRB_C2C_0 \
341 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C0)
342#define QLA82XX_CRB_C2C_1 \
343 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C1)
344#define QLA82XX_CRB_C2C_2 \
345 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C2)
346#define QLA82XX_CRB_CAM \
347 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAM)
348#define QLA82XX_CRB_CASPER \
349 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS)
350#define QLA82XX_CRB_CASPER_0 \
351 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS0)
352#define QLA82XX_CRB_CASPER_1 \
353 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS1)
354#define QLA82XX_CRB_CASPER_2 \
355 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS2)
356#define QLA82XX_CRB_DDR_MD \
357 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MS)
358#define QLA82XX_CRB_DDR_NET \
359 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MN)
360#define QLA82XX_CRB_EPG \
361 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_EG)
362#define QLA82XX_CRB_I2Q \
363 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2Q)
364#define QLA82XX_CRB_NIU \
365 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_NIU)
366
367#define QLA82XX_CRB_PCIX_HOST \
368 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH)
369#define QLA82XX_CRB_PCIX_HOST2 \
370 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH2)
371#define QLA82XX_CRB_PCIX_MD \
372 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS)
373#define QLA82XX_CRB_PCIE \
374 QLA82XX_CRB_PCIX_MD
375
376/* window 1 pcie slot */
377#define QLA82XX_CRB_PCIE2 \
378 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS2)
379#define QLA82XX_CRB_PEG_MD_0 \
380 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS0)
381#define QLA82XX_CRB_PEG_MD_1 \
382 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS1)
383#define QLA82XX_CRB_PEG_MD_2 \
384 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS2)
385#define QLA82XX_CRB_PEG_MD_3 \
386 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
387#define QLA82XX_CRB_PEG_MD_3 \
388 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
389#define QLA82XX_CRB_PEG_MD_D \
390 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSD)
391#define QLA82XX_CRB_PEG_MD_I \
392 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSI)
393#define QLA82XX_CRB_PEG_NET_0 \
394 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN0)
395#define QLA82XX_CRB_PEG_NET_1 \
396 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN1)
397#define QLA82XX_CRB_PEG_NET_2 \
398 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN2)
399#define QLA82XX_CRB_PEG_NET_3 \
400 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN3)
401#define QLA82XX_CRB_PEG_NET_4 \
402 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN4)
403#define QLA82XX_CRB_PEG_NET_D \
404 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGND)
405#define QLA82XX_CRB_PEG_NET_I \
406 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGNI)
407#define QLA82XX_CRB_PQM_MD \
408 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMS)
409#define QLA82XX_CRB_PQM_NET \
410 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMN)
411#define QLA82XX_CRB_QDR_MD \
412 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SS)
413#define QLA82XX_CRB_QDR_NET \
414 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SN)
415#define QLA82XX_CRB_ROMUSB \
416 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_ROMUSB)
417#define QLA82XX_CRB_RPMX_0 \
418 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX0)
419#define QLA82XX_CRB_RPMX_1 \
420 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX1)
421#define QLA82XX_CRB_RPMX_2 \
422 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX2)
423#define QLA82XX_CRB_RPMX_3 \
424 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX3)
425#define QLA82XX_CRB_RPMX_4 \
426 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX4)
427#define QLA82XX_CRB_RPMX_5 \
428 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX5)
429#define QLA82XX_CRB_RPMX_6 \
430 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX6)
431#define QLA82XX_CRB_RPMX_7 \
432 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX7)
433#define QLA82XX_CRB_SQM_MD_0 \
434 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS0)
435#define QLA82XX_CRB_SQM_MD_1 \
436 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS1)
437#define QLA82XX_CRB_SQM_MD_2 \
438 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS2)
439#define QLA82XX_CRB_SQM_MD_3 \
440 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS3)
441#define QLA82XX_CRB_SQM_NET_0 \
442 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN0)
443#define QLA82XX_CRB_SQM_NET_1 \
444 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN1)
445#define QLA82XX_CRB_SQM_NET_2 \
446 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN2)
447#define QLA82XX_CRB_SQM_NET_3 \
448 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN3)
449#define QLA82XX_CRB_SRE \
450 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SRE)
451#define QLA82XX_CRB_TIMER \
452 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_TIMR)
453#define QLA82XX_CRB_XDMA \
454 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_XDMA)
455#define QLA82XX_CRB_I2C0 \
456 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C0)
457#define QLA82XX_CRB_I2C1 \
458 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C1)
459#define QLA82XX_CRB_OCM0 \
460 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_OCM0)
461#define QLA82XX_CRB_SMB \
462 QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SMB)
463#define QLA82XX_CRB_MAX \
464 QLA82XX_PCI_CRB_WINDOW(64)
465
466/*
467 * ====================== BASE ADDRESSES ON-CHIP ======================
468 * Base addresses of major components on-chip.
469 * ====================== BASE ADDRESSES ON-CHIP ======================
470 */
471#define QLA82XX_ADDR_DDR_NET (0x0000000000000000ULL)
472#define QLA82XX_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
473
474/* Imbus address bit used to indicate a host address. This bit is
475 * eliminated by the pcie bar and bar select before presentation
476 * over pcie. */
477/* host memory via IMBUS */
478#define QLA82XX_P2_ADDR_PCIE (0x0000000800000000ULL)
479#define QLA82XX_P3_ADDR_PCIE (0x0000008000000000ULL)
480#define QLA82XX_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL)
481#define QLA82XX_ADDR_OCM0 (0x0000000200000000ULL)
482#define QLA82XX_ADDR_OCM0_MAX (0x00000002000fffffULL)
483#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL)
484#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL)
485#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL)
486
487#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
488#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
489
490#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000
491#define QLA82XX_PCI_DIRECT_CRB (unsigned long)0x04400000
492#define QLA82XX_PCI_CAMQM (unsigned long)0x04800000
493#define QLA82XX_PCI_CAMQM_MAX (unsigned long)0x04ffffff
494#define QLA82XX_PCI_DDR_NET (unsigned long)0x00000000
495#define QLA82XX_PCI_QDR_NET (unsigned long)0x04000000
496#define QLA82XX_PCI_QDR_NET_MAX (unsigned long)0x043fffff
497
498/*
499 * Register offsets for MN
500 */
501#define MIU_CONTROL (0x000)
502#define MIU_TAG (0x004)
503#define MIU_TEST_AGT_CTRL (0x090)
504#define MIU_TEST_AGT_ADDR_LO (0x094)
505#define MIU_TEST_AGT_ADDR_HI (0x098)
506#define MIU_TEST_AGT_WRDATA_LO (0x0a0)
507#define MIU_TEST_AGT_WRDATA_HI (0x0a4)
508#define MIU_TEST_AGT_WRDATA(i) (0x0a0+(4*(i)))
509#define MIU_TEST_AGT_RDDATA_LO (0x0a8)
510#define MIU_TEST_AGT_RDDATA_HI (0x0ac)
511#define MIU_TEST_AGT_RDDATA(i) (0x0a8+(4*(i)))
512#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
513#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
514
515/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
516#define MIU_TA_CTL_START 1
517#define MIU_TA_CTL_ENABLE 2
518#define MIU_TA_CTL_WRITE 4
519#define MIU_TA_CTL_BUSY 8
520
521/*CAM RAM */
522# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000)
523# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg))
524
525#define QLA82XX_PEG_TUNE_MN_SPD_ZEROED 0x80000000
526#define QLA82XX_BOOT_LOADER_MN_ISSUE 0xff00ffff
527#define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24))
528#define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8))
529#define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac))
530#define QLA82XX_PEG_ALIVE_COUNTER (QLA82XX_CAM_RAM(0xb0))
531
532#define QLA82XX_CAMRAM_DB1 (QLA82XX_CAM_RAM(0x1b8))
533#define QLA82XX_CAMRAM_DB2 (QLA82XX_CAM_RAM(0x1bc))
534
535#define HALT_STATUS_UNRECOVERABLE 0x80000000
536#define HALT_STATUS_RECOVERABLE 0x40000000
537
538/* Driver Coexistence Defines */
539#define QLA82XX_CRB_DRV_ACTIVE (QLA82XX_CAM_RAM(0x138))
540#define QLA82XX_CRB_DEV_STATE (QLA82XX_CAM_RAM(0x140))
541#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
542#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
543#define QLA82XX_CRB_DRV_STATE (QLA82XX_CAM_RAM(0x144))
544#define QLA82XX_CRB_DRV_SCRATCH (QLA82XX_CAM_RAM(0x148))
545#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
546
547/* Every driver should use these Device State */
548#define QLA82XX_DEV_COLD 1
549#define QLA82XX_DEV_INITIALIZING 2
550#define QLA82XX_DEV_READY 3
551#define QLA82XX_DEV_NEED_RESET 4
552#define QLA82XX_DEV_NEED_QUIESCENT 5
553#define QLA82XX_DEV_FAILED 6
554#define QLA82XX_DEV_QUIESCENT 7
555#define MAX_STATES 8 /* Increment if new state added */
556
557#define QLA82XX_IDC_VERSION 1
558#define QLA82XX_ROM_DEV_INIT_TIMEOUT 30
559#define QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT 10
560
561#define QLA82XX_ROM_LOCK_ID (QLA82XX_CAM_RAM(0x100))
562#define QLA82XX_CRB_WIN_LOCK_ID (QLA82XX_CAM_RAM(0x124))
563#define QLA82XX_FW_VERSION_MAJOR (QLA82XX_CAM_RAM(0x150))
564#define QLA82XX_FW_VERSION_MINOR (QLA82XX_CAM_RAM(0x154))
565#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158))
566#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg))
567
568#define PCIE_CHICKEN3 (0x120c8)
569#define PCIE_SETUP_FUNCTION (0x12040)
570#define PCIE_SETUP_FUNCTION2 (0x12048)
571
572#define QLA82XX_PCIX_PS_REG(reg) (QLA82XX_CRB_PCIX_MD + (reg))
573#define QLA82XX_PCIX_PS2_REG(reg) (QLA82XX_CRB_PCIE2 + (reg))
574
575#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */
576#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */
577#define PCIE_SEM5_LOCK (0x1c028) /* Coexistence lock */
578#define PCIE_SEM5_UNLOCK (0x1c02c) /* Coexistence unlock */
579#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */
580#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/
581
582/* Different drive state */
583#define QLA82XX_DRVST_NOT_RDY 0
584#define QLA82XX_DRVST_RST_RDY 1
585#define QLA82XX_DRVST_QSNT_RDY 2
586
587/*
588 * The PCI VendorID and DeviceID for our board.
589 */
590#define PCI_DEVICE_ID_QLOGIC_ISP8021 0x8021
591
592#define QLA82XX_MSIX_TBL_SPACE 8192
593#define QLA82XX_PCI_REG_MSIX_TBL 0x44
594#define QLA82XX_PCI_MSIX_CONTROL 0x40
595
596struct crb_128M_2M_sub_block_map {
597 unsigned valid;
598 unsigned start_128M;
599 unsigned end_128M;
600 unsigned start_2M;
601};
602
603struct crb_128M_2M_block_map {
604 struct crb_128M_2M_sub_block_map sub_block[16];
605};
606
607struct crb_addr_pair {
608 long addr;
609 long data;
610};
611
612#define ADDR_ERROR ((unsigned long) 0xffffffff)
613#define MAX_CTL_CHECK 1000
614
615/***************************************************************************
616 * PCI related defines.
617 **************************************************************************/
618
619/*
620 * Interrupt related defines.
621 */
622#define PCIX_TARGET_STATUS (0x10118)
623#define PCIX_TARGET_STATUS_F1 (0x10160)
624#define PCIX_TARGET_STATUS_F2 (0x10164)
625#define PCIX_TARGET_STATUS_F3 (0x10168)
626#define PCIX_TARGET_STATUS_F4 (0x10360)
627#define PCIX_TARGET_STATUS_F5 (0x10364)
628#define PCIX_TARGET_STATUS_F6 (0x10368)
629#define PCIX_TARGET_STATUS_F7 (0x1036c)
630
631#define PCIX_TARGET_MASK (0x10128)
632#define PCIX_TARGET_MASK_F1 (0x10170)
633#define PCIX_TARGET_MASK_F2 (0x10174)
634#define PCIX_TARGET_MASK_F3 (0x10178)
635#define PCIX_TARGET_MASK_F4 (0x10370)
636#define PCIX_TARGET_MASK_F5 (0x10374)
637#define PCIX_TARGET_MASK_F6 (0x10378)
638#define PCIX_TARGET_MASK_F7 (0x1037c)
639
640/*
641 * Message Signaled Interrupts
642 */
643#define PCIX_MSI_F0 (0x13000)
644#define PCIX_MSI_F1 (0x13004)
645#define PCIX_MSI_F2 (0x13008)
646#define PCIX_MSI_F3 (0x1300c)
647#define PCIX_MSI_F4 (0x13010)
648#define PCIX_MSI_F5 (0x13014)
649#define PCIX_MSI_F6 (0x13018)
650#define PCIX_MSI_F7 (0x1301c)
651#define PCIX_MSI_F(FUNC) (0x13000 + ((FUNC) * 4))
652#define PCIX_INT_VECTOR (0x10100)
653#define PCIX_INT_MASK (0x10104)
654
655/*
656 * Interrupt state machine and other bits.
657 */
658#define PCIE_MISCCFG_RC (0x1206c)
659
660#define ISR_INT_TARGET_STATUS \
661 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS))
662#define ISR_INT_TARGET_STATUS_F1 \
663 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
664#define ISR_INT_TARGET_STATUS_F2 \
665 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
666#define ISR_INT_TARGET_STATUS_F3 \
667 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
668#define ISR_INT_TARGET_STATUS_F4 \
669 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
670#define ISR_INT_TARGET_STATUS_F5 \
671 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
672#define ISR_INT_TARGET_STATUS_F6 \
673 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
674#define ISR_INT_TARGET_STATUS_F7 \
675 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
676
677#define ISR_INT_TARGET_MASK \
678 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK))
679#define ISR_INT_TARGET_MASK_F1 \
680 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
681#define ISR_INT_TARGET_MASK_F2 \
682 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
683#define ISR_INT_TARGET_MASK_F3 \
684 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
685#define ISR_INT_TARGET_MASK_F4 \
686 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
687#define ISR_INT_TARGET_MASK_F5 \
688 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
689#define ISR_INT_TARGET_MASK_F6 \
690 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
691#define ISR_INT_TARGET_MASK_F7 \
692 (QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
693
694#define ISR_INT_VECTOR \
695 (QLA82XX_PCIX_PS_REG(PCIX_INT_VECTOR))
696#define ISR_INT_MASK \
697 (QLA82XX_PCIX_PS_REG(PCIX_INT_MASK))
698#define ISR_INT_STATE_REG \
699 (QLA82XX_PCIX_PS_REG(PCIE_MISCCFG_RC))
700
701#define ISR_MSI_INT_TRIGGER(FUNC) \
702 (QLA82XX_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
703
704#define ISR_IS_LEGACY_INTR_IDLE(VAL) (((VAL) & 0x300) == 0)
705#define ISR_IS_LEGACY_INTR_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
706
707/*
708 * PCI Interrupt Vector Values.
709 */
710#define PCIX_INT_VECTOR_BIT_F0 0x0080
711#define PCIX_INT_VECTOR_BIT_F1 0x0100
712#define PCIX_INT_VECTOR_BIT_F2 0x0200
713#define PCIX_INT_VECTOR_BIT_F3 0x0400
714#define PCIX_INT_VECTOR_BIT_F4 0x0800
715#define PCIX_INT_VECTOR_BIT_F5 0x1000
716#define PCIX_INT_VECTOR_BIT_F6 0x2000
717#define PCIX_INT_VECTOR_BIT_F7 0x4000
718
719struct qla82xx_legacy_intr_set {
720 uint32_t int_vec_bit;
721 uint32_t tgt_status_reg;
722 uint32_t tgt_mask_reg;
723 uint32_t pci_int_reg;
724};
725
726#define QLA82XX_LEGACY_INTR_CONFIG \
727{ \
728 { \
729 .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
730 .tgt_status_reg = ISR_INT_TARGET_STATUS, \
731 .tgt_mask_reg = ISR_INT_TARGET_MASK, \
732 .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
733 \
734 { \
735 .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
736 .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
737 .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
738 .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
739 \
740 { \
741 .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
742 .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
743 .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
744 .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
745 \
746 { \
747 .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
748 .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
749 .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
750 .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
751 \
752 { \
753 .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
754 .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
755 .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
756 .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
757 \
758 { \
759 .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
760 .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
761 .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
762 .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
763 \
764 { \
765 .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
766 .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
767 .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
768 .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
769 \
770 { \
771 .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
772 .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
773 .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
774 .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
775}
776
777#define BOOTLD_START 0x10000
778#define IMAGE_START 0x100000
779#define FLASH_ADDR_START 0x43000
780
781/* Magic number to let user know flash is programmed */
782#define QLA82XX_BDINFO_MAGIC 0x12345678
783#define FW_SIZE_OFFSET (0x3e840c)
784
785#define QLA82XX_IS_REVISION_P3PLUS(_rev_) ((_rev_) >= 0x50)
786#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
787#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
788
789#ifndef readq
790static inline u64 readq(void __iomem *addr)
791{
792 return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
793}
794#endif
795
796#ifndef writeq
797static inline void writeq(u64 val, void __iomem *addr)
798{
799 writel(((u32) (val)), (addr));
800 writel(((u32) (val >> 32)), (addr + 4));
801}
802#endif
803
804/* Request and response queue size */
805#define REQUEST_ENTRY_CNT_82XX 128 /* Number of request entries. */
806#define RESPONSE_ENTRY_CNT_82XX 128 /* Number of response entries.*/
807
808/*
809 * ISP 8021 I/O Register Set structure definitions.
810 */
811struct device_reg_82xx {
812 uint32_t req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */
813 uint32_t rsp_q_in[64]; /* Response Queue In-Pointer. */
814 uint32_t rsp_q_out[64]; /* Response Queue Out-Pointer. */
815
816 uint16_t mailbox_in[32]; /* Mail box In registers */
817 uint16_t unused_1[32];
818 uint32_t hint; /* Host interrupt register */
819#define HINT_MBX_INT_PENDING BIT_0
820 uint16_t unused_2[62];
821 uint16_t mailbox_out[32]; /* Mail box Out registers */
822 uint32_t unused_3[48];
823
824 uint32_t host_status; /* host status */
825#define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */
826#define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */
827 uint32_t host_int; /* Interrupt status. */
828#define ISRX_NX_RISC_INT BIT_0 /* RISC interrupt. */
829};
830
831struct fcp_cmnd {
832 struct scsi_lun lun;
833 uint8_t crn;
834 uint8_t task_attribute;
835 uint8_t task_managment;
836 uint8_t additional_cdb_len;
837 uint8_t cdb[260]; /* 256 for CDB len and 4 for FCP_DL */
838};
839
840struct dsd_dma {
841 struct list_head list;
842 dma_addr_t dsd_list_dma;
843 void *dsd_addr;
844};
845
846#define QLA_DSDS_PER_IOCB 37
847#define QLA_DSD_SIZE 12
848struct ct6_dsd {
849 uint16_t fcp_cmnd_len;
850 dma_addr_t fcp_cmnd_dma;
851 struct fcp_cmnd *fcp_cmnd;
852 int dsd_use_cnt;
853 struct list_head dsd_list;
854};
855
856#define MBC_TOGGLE_INTR 0x10
857
858/* Flash offset */
859#define FLT_REG_BOOTLOAD_82XX 0x72
860#define FLT_REG_BOOT_CODE_82XX 0x78
861#define FLT_REG_FW_82XX 0x74
862#define FLT_REG_GOLD_FW_82XX 0x75
863#define FLT_REG_VPD_82XX 0x81
864
865#define FA_VPD_SIZE_82XX 0x400
866
867#define FA_FLASH_LAYOUT_ADDR_82 0xFC400
868
869/******************************************************************************
870*
871* Definitions specific to M25P flash
872*
873*******************************************************************************
874* Instructions
875*/
876#define M25P_INSTR_WREN 0x06
877#define M25P_INSTR_WRDI 0x04
878#define M25P_INSTR_RDID 0x9f
879#define M25P_INSTR_RDSR 0x05
880#define M25P_INSTR_WRSR 0x01
881#define M25P_INSTR_READ 0x03
882#define M25P_INSTR_FAST_READ 0x0b
883#define M25P_INSTR_PP 0x02
884#define M25P_INSTR_SE 0xd8
885#define M25P_INSTR_BE 0xc7
886#define M25P_INSTR_DP 0xb9
887#define M25P_INSTR_RES 0xab
888
889#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 48c37e38ed01..be1a8fcbb1fb 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -24,11 +24,18 @@
24 */ 24 */
25char qla2x00_version_str[40]; 25char qla2x00_version_str[40];
26 26
27static int apidev_major;
28
27/* 29/*
28 * SRB allocation cache 30 * SRB allocation cache
29 */ 31 */
30static struct kmem_cache *srb_cachep; 32static struct kmem_cache *srb_cachep;
31 33
34/*
35 * CT6 CTX allocation cache
36 */
37static struct kmem_cache *ctx_cachep;
38
32int ql2xlogintimeout = 20; 39int ql2xlogintimeout = 20;
33module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR); 40module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
34MODULE_PARM_DESC(ql2xlogintimeout, 41MODULE_PARM_DESC(ql2xlogintimeout,
@@ -65,13 +72,19 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
65 "Option to enable extended error logging, " 72 "Option to enable extended error logging, "
66 "Default is 0 - no logging. 1 - log errors."); 73 "Default is 0 - no logging. 1 - log errors.");
67 74
75int ql2xshiftctondsd = 6;
76module_param(ql2xshiftctondsd, int, S_IRUGO|S_IRUSR);
77MODULE_PARM_DESC(ql2xshiftctondsd,
78 "Set to control shifting of command type processing "
79 "based on total number of SG elements.");
80
68static void qla2x00_free_device(scsi_qla_host_t *); 81static void qla2x00_free_device(scsi_qla_host_t *);
69 82
70int ql2xfdmienable=1; 83int ql2xfdmienable=1;
71module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR); 84module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
72MODULE_PARM_DESC(ql2xfdmienable, 85MODULE_PARM_DESC(ql2xfdmienable,
73 "Enables FDMI registratons " 86 "Enables FDMI registrations. "
74 "Default is 0 - no FDMI. 1 - perfom FDMI."); 87 "0 - no FDMI. Default is 1 - perform FDMI.");
75 88
76#define MAX_Q_DEPTH 32 89#define MAX_Q_DEPTH 32
77static int ql2xmaxqdepth = MAX_Q_DEPTH; 90static int ql2xmaxqdepth = MAX_Q_DEPTH;
@@ -79,6 +92,19 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
79MODULE_PARM_DESC(ql2xmaxqdepth, 92MODULE_PARM_DESC(ql2xmaxqdepth,
80 "Maximum queue depth to report for target devices."); 93 "Maximum queue depth to report for target devices.");
81 94
95/* Do not change the value of this after module load */
96int ql2xenabledif = 1;
97module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
98MODULE_PARM_DESC(ql2xenabledif,
99 " Enable T10-CRC-DIF "
100 " Default is 0 - No DIF Support. 1 - Enable it");
101
102int ql2xenablehba_err_chk;
103module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
104MODULE_PARM_DESC(ql2xenablehba_err_chk,
105 " Enable T10-CRC-DIF Error isolation by HBA"
106 " Default is 0 - Error isolation disabled, 1 - Enable it");
107
82int ql2xiidmaenable=1; 108int ql2xiidmaenable=1;
83module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR); 109module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
84MODULE_PARM_DESC(ql2xiidmaenable, 110MODULE_PARM_DESC(ql2xiidmaenable,
@@ -114,6 +140,32 @@ MODULE_PARM_DESC(ql2xetsenable,
114 "Enables firmware ETS burst." 140 "Enables firmware ETS burst."
115 "Default is 0 - skip ETS enablement."); 141 "Default is 0 - skip ETS enablement.");
116 142
143int ql2xdbwr;
144module_param(ql2xdbwr, int, S_IRUGO|S_IRUSR);
145MODULE_PARM_DESC(ql2xdbwr,
146 "Option to specify scheme for request queue posting\n"
147 " 0 -- Regular doorbell.\n"
148 " 1 -- CAMRAM doorbell (faster).\n");
149
150int ql2xdontresethba;
151module_param(ql2xdontresethba, int, S_IRUGO|S_IRUSR);
152MODULE_PARM_DESC(ql2xdontresethba,
153 "Option to specify reset behaviour\n"
154 " 0 (Default) -- Reset on failure.\n"
155 " 1 -- Do not reset on failure.\n");
156
157int ql2xtargetreset = 1;
158module_param(ql2xtargetreset, int, S_IRUGO|S_IRUSR);
159MODULE_PARM_DESC(ql2xtargetreset,
160 "Enable target reset."
161 "Default is 1 - use hw defaults.");
162
163
164int ql2xasynctmfenable;
165module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR);
166MODULE_PARM_DESC(ql2xasynctmfenable,
167 "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
168 "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
117/* 169/*
118 * SCSI host template entry points 170 * SCSI host template entry points
119 */ 171 */
@@ -183,6 +235,10 @@ qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
183static inline void 235static inline void
184qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval) 236qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
185{ 237{
238 /* Currently used for 82XX only. */
239 if (vha->device_flags & DFLG_DEV_FAILED)
240 return;
241
186 mod_timer(&vha->timer, jiffies + interval * HZ); 242 mod_timer(&vha->timer, jiffies + interval * HZ);
187} 243}
188 244
@@ -500,6 +556,14 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
500 if (fcport->drport) 556 if (fcport->drport)
501 goto qc24_target_busy; 557 goto qc24_target_busy;
502 558
559 if (!vha->flags.difdix_supported &&
560 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
561 DEBUG2(qla_printk(KERN_ERR, ha,
562 "DIF Cap Not Reg, fail DIF capable cmd's:%x\n",
563 cmd->cmnd[0]));
564 cmd->result = DID_NO_CONNECT << 16;
565 goto qc24_fail_command;
566 }
503 if (atomic_read(&fcport->state) != FCS_ONLINE) { 567 if (atomic_read(&fcport->state) != FCS_ONLINE) {
504 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 568 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
505 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 569 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
@@ -618,6 +682,50 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
618 return (return_status); 682 return (return_status);
619} 683}
620 684
685/*
686 * qla2x00_wait_for_reset_ready
687 * Wait till the HBA is online after going through
688 * <= MAX_RETRIES_OF_ISP_ABORT or
689 * finally HBA is disabled ie marked offline or flash
690 * operations are in progress.
691 *
692 * Input:
693 * ha - pointer to host adapter structure
694 *
695 * Note:
696 * Does context switching-Release SPIN_LOCK
697 * (if any) before calling this routine.
698 *
699 * Return:
700 * Success (Adapter is online/no flash ops) : 0
701 * Failed (Adapter is offline/disabled/flash ops in progress) : 1
702 */
703int
704qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
705{
706 int return_status;
707 unsigned long wait_online;
708 struct qla_hw_data *ha = vha->hw;
709 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
710
711 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
712 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
713 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
714 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
715 ha->optrom_state != QLA_SWAITING ||
716 ha->dpc_active) && time_before(jiffies, wait_online))
717 msleep(1000);
718
719 if (base_vha->flags.online && ha->optrom_state == QLA_SWAITING)
720 return_status = QLA_SUCCESS;
721 else
722 return_status = QLA_FUNCTION_FAILED;
723
724 DEBUG2(printk("%s return_status=%d\n", __func__, return_status));
725
726 return return_status;
727}
728
621int 729int
622qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) 730qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
623{ 731{
@@ -739,7 +847,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
739 847
740 if (sp == NULL) 848 if (sp == NULL)
741 continue; 849 continue;
742 if (sp->ctx) 850 if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID) &&
851 !IS_PROT_IO(sp))
743 continue; 852 continue;
744 if (sp->cmd != cmd) 853 if (sp->cmd != cmd)
745 continue; 854 continue;
@@ -805,7 +914,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
805 sp = req->outstanding_cmds[cnt]; 914 sp = req->outstanding_cmds[cnt];
806 if (!sp) 915 if (!sp)
807 continue; 916 continue;
808 if (sp->ctx) 917 if ((sp->ctx) && !IS_PROT_IO(sp))
809 continue; 918 continue;
810 if (vha->vp_idx != sp->fcport->vha->vp_idx) 919 if (vha->vp_idx != sp->fcport->vha->vp_idx)
811 continue; 920 continue;
@@ -834,6 +943,24 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
834 return status; 943 return status;
835} 944}
836 945
946void qla82xx_wait_for_pending_commands(scsi_qla_host_t *vha)
947{
948 int cnt;
949 srb_t *sp;
950 struct req_que *req = vha->req;
951
952 DEBUG2(qla_printk(KERN_INFO, vha->hw,
953 "Waiting for pending commands\n"));
954 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
955 sp = req->outstanding_cmds[cnt];
956 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
957 sp, WAIT_HOST) == QLA_SUCCESS) {
958 DEBUG2(qla_printk(KERN_INFO, vha->hw,
959 "Done wait for pending commands\n"));
960 }
961 }
962}
963
837static char *reset_errors[] = { 964static char *reset_errors[] = {
838 "HBA not online", 965 "HBA not online",
839 "HBA not ready", 966 "HBA not ready",
@@ -1004,7 +1131,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1004 qla_printk(KERN_INFO, ha, 1131 qla_printk(KERN_INFO, ha,
1005 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun); 1132 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
1006 1133
1007 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) 1134 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
1008 goto eh_host_reset_lock; 1135 goto eh_host_reset_lock;
1009 1136
1010 /* 1137 /*
@@ -1020,11 +1147,19 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1020 if (qla2x00_vp_abort_isp(vha)) 1147 if (qla2x00_vp_abort_isp(vha))
1021 goto eh_host_reset_lock; 1148 goto eh_host_reset_lock;
1022 } else { 1149 } else {
1150 if (IS_QLA82XX(vha->hw)) {
1151 if (!qla82xx_fcoe_ctx_reset(vha)) {
1152 /* Ctx reset success */
1153 ret = SUCCESS;
1154 goto eh_host_reset_lock;
1155 }
1156 /* fall thru if ctx reset failed */
1157 }
1023 if (ha->wq) 1158 if (ha->wq)
1024 flush_workqueue(ha->wq); 1159 flush_workqueue(ha->wq);
1025 1160
1026 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1161 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1027 if (qla2x00_abort_isp(base_vha)) { 1162 if (ha->isp_ops->abort_isp(base_vha)) {
1028 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 1163 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1029 /* failed. schedule dpc to try */ 1164 /* failed. schedule dpc to try */
1030 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 1165 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
@@ -1064,7 +1199,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1064 struct fc_port *fcport; 1199 struct fc_port *fcport;
1065 struct qla_hw_data *ha = vha->hw; 1200 struct qla_hw_data *ha = vha->hw;
1066 1201
1067 if (ha->flags.enable_target_reset) { 1202 if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
1068 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1203 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1069 if (fcport->port_type != FCT_TARGET) 1204 if (fcport->port_type != FCT_TARGET)
1070 continue; 1205 continue;
@@ -1078,7 +1213,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1078 } 1213 }
1079 } 1214 }
1080 1215
1081 if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) { 1216 if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
1082 ret = qla2x00_full_login_lip(vha); 1217 ret = qla2x00_full_login_lip(vha);
1083 if (ret != QLA_SUCCESS) { 1218 if (ret != QLA_SUCCESS) {
1084 DEBUG2_3(printk("%s(%ld): failed: " 1219 DEBUG2_3(printk("%s(%ld): failed: "
@@ -1125,23 +1260,28 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1125 sp = req->outstanding_cmds[cnt]; 1260 sp = req->outstanding_cmds[cnt];
1126 if (sp) { 1261 if (sp) {
1127 req->outstanding_cmds[cnt] = NULL; 1262 req->outstanding_cmds[cnt] = NULL;
1128 if (!sp->ctx) { 1263 if (!sp->ctx ||
1264 (sp->flags & SRB_FCP_CMND_DMA_VALID) ||
1265 IS_PROT_IO(sp)) {
1129 sp->cmd->result = res; 1266 sp->cmd->result = res;
1130 qla2x00_sp_compl(ha, sp); 1267 qla2x00_sp_compl(ha, sp);
1131 } else { 1268 } else {
1132 ctx = sp->ctx; 1269 ctx = sp->ctx;
1133 if (ctx->type == SRB_LOGIN_CMD || ctx->type == SRB_LOGOUT_CMD) { 1270 if (ctx->type == SRB_LOGIN_CMD ||
1134 del_timer_sync(&ctx->timer); 1271 ctx->type == SRB_LOGOUT_CMD) {
1135 ctx->free(sp); 1272 ctx->u.iocb_cmd->free(sp);
1136 } else { 1273 } else {
1137 struct srb_bsg* sp_bsg = (struct srb_bsg*)sp->ctx; 1274 struct fc_bsg_job *bsg_job =
1138 if (sp_bsg->bsg_job->request->msgcode == FC_BSG_HST_CT) 1275 ctx->u.bsg_job;
1276 if (bsg_job->request->msgcode
1277 == FC_BSG_HST_CT)
1139 kfree(sp->fcport); 1278 kfree(sp->fcport);
1140 sp_bsg->bsg_job->req->errors = 0; 1279 bsg_job->req->errors = 0;
1141 sp_bsg->bsg_job->reply->result = res; 1280 bsg_job->reply->result = res;
1142 sp_bsg->bsg_job->job_done(sp_bsg->bsg_job); 1281 bsg_job->job_done(bsg_job);
1143 kfree(sp->ctx); 1282 kfree(sp->ctx);
1144 mempool_free(sp, ha->srb_mempool); 1283 mempool_free(sp,
1284 ha->srb_mempool);
1145 } 1285 }
1146 } 1286 }
1147 } 1287 }
@@ -1379,6 +1519,7 @@ static struct isp_operations qla2100_isp_ops = {
1379 .write_optrom = qla2x00_write_optrom_data, 1519 .write_optrom = qla2x00_write_optrom_data,
1380 .get_flash_version = qla2x00_get_flash_version, 1520 .get_flash_version = qla2x00_get_flash_version,
1381 .start_scsi = qla2x00_start_scsi, 1521 .start_scsi = qla2x00_start_scsi,
1522 .abort_isp = qla2x00_abort_isp,
1382}; 1523};
1383 1524
1384static struct isp_operations qla2300_isp_ops = { 1525static struct isp_operations qla2300_isp_ops = {
@@ -1414,6 +1555,7 @@ static struct isp_operations qla2300_isp_ops = {
1414 .write_optrom = qla2x00_write_optrom_data, 1555 .write_optrom = qla2x00_write_optrom_data,
1415 .get_flash_version = qla2x00_get_flash_version, 1556 .get_flash_version = qla2x00_get_flash_version,
1416 .start_scsi = qla2x00_start_scsi, 1557 .start_scsi = qla2x00_start_scsi,
1558 .abort_isp = qla2x00_abort_isp,
1417}; 1559};
1418 1560
1419static struct isp_operations qla24xx_isp_ops = { 1561static struct isp_operations qla24xx_isp_ops = {
@@ -1449,6 +1591,7 @@ static struct isp_operations qla24xx_isp_ops = {
1449 .write_optrom = qla24xx_write_optrom_data, 1591 .write_optrom = qla24xx_write_optrom_data,
1450 .get_flash_version = qla24xx_get_flash_version, 1592 .get_flash_version = qla24xx_get_flash_version,
1451 .start_scsi = qla24xx_start_scsi, 1593 .start_scsi = qla24xx_start_scsi,
1594 .abort_isp = qla2x00_abort_isp,
1452}; 1595};
1453 1596
1454static struct isp_operations qla25xx_isp_ops = { 1597static struct isp_operations qla25xx_isp_ops = {
@@ -1483,7 +1626,8 @@ static struct isp_operations qla25xx_isp_ops = {
1483 .read_optrom = qla25xx_read_optrom_data, 1626 .read_optrom = qla25xx_read_optrom_data,
1484 .write_optrom = qla24xx_write_optrom_data, 1627 .write_optrom = qla24xx_write_optrom_data,
1485 .get_flash_version = qla24xx_get_flash_version, 1628 .get_flash_version = qla24xx_get_flash_version,
1486 .start_scsi = qla24xx_start_scsi, 1629 .start_scsi = qla24xx_dif_start_scsi,
1630 .abort_isp = qla2x00_abort_isp,
1487}; 1631};
1488 1632
1489static struct isp_operations qla81xx_isp_ops = { 1633static struct isp_operations qla81xx_isp_ops = {
@@ -1519,6 +1663,43 @@ static struct isp_operations qla81xx_isp_ops = {
1519 .write_optrom = qla24xx_write_optrom_data, 1663 .write_optrom = qla24xx_write_optrom_data,
1520 .get_flash_version = qla24xx_get_flash_version, 1664 .get_flash_version = qla24xx_get_flash_version,
1521 .start_scsi = qla24xx_start_scsi, 1665 .start_scsi = qla24xx_start_scsi,
1666 .abort_isp = qla2x00_abort_isp,
1667};
1668
1669static struct isp_operations qla82xx_isp_ops = {
1670 .pci_config = qla82xx_pci_config,
1671 .reset_chip = qla82xx_reset_chip,
1672 .chip_diag = qla24xx_chip_diag,
1673 .config_rings = qla82xx_config_rings,
1674 .reset_adapter = qla24xx_reset_adapter,
1675 .nvram_config = qla81xx_nvram_config,
1676 .update_fw_options = qla24xx_update_fw_options,
1677 .load_risc = qla82xx_load_risc,
1678 .pci_info_str = qla82xx_pci_info_str,
1679 .fw_version_str = qla24xx_fw_version_str,
1680 .intr_handler = qla82xx_intr_handler,
1681 .enable_intrs = qla82xx_enable_intrs,
1682 .disable_intrs = qla82xx_disable_intrs,
1683 .abort_command = qla24xx_abort_command,
1684 .target_reset = qla24xx_abort_target,
1685 .lun_reset = qla24xx_lun_reset,
1686 .fabric_login = qla24xx_login_fabric,
1687 .fabric_logout = qla24xx_fabric_logout,
1688 .calc_req_entries = NULL,
1689 .build_iocbs = NULL,
1690 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1691 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1692 .read_nvram = qla24xx_read_nvram_data,
1693 .write_nvram = qla24xx_write_nvram_data,
1694 .fw_dump = qla24xx_fw_dump,
1695 .beacon_on = qla24xx_beacon_on,
1696 .beacon_off = qla24xx_beacon_off,
1697 .beacon_blink = qla24xx_beacon_blink,
1698 .read_optrom = qla82xx_read_optrom_data,
1699 .write_optrom = qla82xx_write_optrom_data,
1700 .get_flash_version = qla24xx_get_flash_version,
1701 .start_scsi = qla82xx_start_scsi,
1702 .abort_isp = qla82xx_abort_isp,
1522}; 1703};
1523 1704
1524static inline void 1705static inline void
@@ -1607,10 +1788,22 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
1607 ha->device_type |= DT_IIDMA; 1788 ha->device_type |= DT_IIDMA;
1608 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1789 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1609 break; 1790 break;
1791 case PCI_DEVICE_ID_QLOGIC_ISP8021:
1792 ha->device_type |= DT_ISP8021;
1793 ha->device_type |= DT_ZIO_SUPPORTED;
1794 ha->device_type |= DT_FWI2;
1795 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1796 /* Initialize 82XX ISP flags */
1797 qla82xx_init_flags(ha);
1798 break;
1610 } 1799 }
1611 1800
1612 /* Get adapter physical port no from interrupt pin register. */ 1801 if (IS_QLA82XX(ha))
1613 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); 1802 ha->port_no = !(ha->portnum & 1);
1803 else
1804 /* Get adapter physical port no from interrupt pin register. */
1805 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
1806
1614 if (ha->port_no & 1) 1807 if (ha->port_no & 1)
1615 ha->flags.port0 = 1; 1808 ha->flags.port0 = 1;
1616 else 1809 else
@@ -1624,6 +1817,9 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1624 uint16_t msix; 1817 uint16_t msix;
1625 int cpus; 1818 int cpus;
1626 1819
1820 if (IS_QLA82XX(ha))
1821 return qla82xx_iospace_config(ha);
1822
1627 if (pci_request_selected_regions(ha->pdev, ha->bars, 1823 if (pci_request_selected_regions(ha->pdev, ha->bars,
1628 QLA2XXX_DRIVER_NAME)) { 1824 QLA2XXX_DRIVER_NAME)) {
1629 qla_printk(KERN_WARNING, ha, 1825 qla_printk(KERN_WARNING, ha,
@@ -1767,7 +1963,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1767 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 1963 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
1768 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || 1964 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
1769 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 || 1965 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
1770 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001) { 1966 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
1967 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
1771 bars = pci_select_bars(pdev, IORESOURCE_MEM); 1968 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1772 mem_only = 1; 1969 mem_only = 1;
1773 } 1970 }
@@ -1897,6 +2094,19 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1897 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; 2094 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
1898 ha->nvram_conf_off = ~0; 2095 ha->nvram_conf_off = ~0;
1899 ha->nvram_data_off = ~0; 2096 ha->nvram_data_off = ~0;
2097 } else if (IS_QLA82XX(ha)) {
2098 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2099 req_length = REQUEST_ENTRY_CNT_82XX;
2100 rsp_length = RESPONSE_ENTRY_CNT_82XX;
2101 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2102 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2103 ha->gid_list_info_size = 8;
2104 ha->optrom_size = OPTROM_SIZE_82XX;
2105 ha->isp_ops = &qla82xx_isp_ops;
2106 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
2107 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2108 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2109 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
1900 } 2110 }
1901 2111
1902 mutex_init(&ha->vport_lock); 2112 mutex_init(&ha->vport_lock);
@@ -1969,6 +2179,7 @@ que_init:
1969 " pointers\n"); 2179 " pointers\n");
1970 goto probe_init_failed; 2180 goto probe_init_failed;
1971 } 2181 }
2182
1972 ha->rsp_q_map[0] = rsp; 2183 ha->rsp_q_map[0] = rsp;
1973 ha->req_q_map[0] = req; 2184 ha->req_q_map[0] = req;
1974 rsp->req = req; 2185 rsp->req = req;
@@ -1987,6 +2198,12 @@ que_init:
1987 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out; 2198 rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
1988 } 2199 }
1989 2200
2201 if (IS_QLA82XX(ha)) {
2202 req->req_q_out = &ha->iobase->isp82.req_q_out[0];
2203 rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
2204 rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
2205 }
2206
1990 if (qla2x00_initialize_adapter(base_vha)) { 2207 if (qla2x00_initialize_adapter(base_vha)) {
1991 qla_printk(KERN_WARNING, ha, 2208 qla_printk(KERN_WARNING, ha,
1992 "Failed to initialize adapter\n"); 2209 "Failed to initialize adapter\n");
@@ -1995,6 +2212,14 @@ que_init:
1995 "Adapter flags %x.\n", 2212 "Adapter flags %x.\n",
1996 base_vha->host_no, base_vha->device_flags)); 2213 base_vha->host_no, base_vha->device_flags));
1997 2214
2215 if (IS_QLA82XX(ha)) {
2216 qla82xx_idc_lock(ha);
2217 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2218 QLA82XX_DEV_FAILED);
2219 qla82xx_idc_unlock(ha);
2220 qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
2221 }
2222
1998 ret = -ENODEV; 2223 ret = -ENODEV;
1999 goto probe_failed; 2224 goto probe_failed;
2000 } 2225 }
@@ -2033,6 +2258,24 @@ skip_dpc:
2033 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 2258 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
2034 base_vha->host_no, ha)); 2259 base_vha->host_no, ha));
2035 2260
2261 if (IS_QLA25XX(ha) && ql2xenabledif) {
2262 if (ha->fw_attributes & BIT_4) {
2263 base_vha->flags.difdix_supported = 1;
2264 DEBUG18(qla_printk(KERN_INFO, ha,
2265 "Registering for DIF/DIX type 1 and 3"
2266 " protection.\n"));
2267 scsi_host_set_prot(host,
2268 SHOST_DIF_TYPE1_PROTECTION
2269 | SHOST_DIF_TYPE3_PROTECTION
2270 | SHOST_DIX_TYPE1_PROTECTION
2271 | SHOST_DIX_TYPE3_PROTECTION);
2272 scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC);
2273 } else
2274 base_vha->flags.difdix_supported = 0;
2275 }
2276
2277 ha->isp_ops->enable_intrs(ha);
2278
2036 ret = scsi_add_host(host, &pdev->dev); 2279 ret = scsi_add_host(host, &pdev->dev);
2037 if (ret) 2280 if (ret)
2038 goto probe_failed; 2281 goto probe_failed;
@@ -2040,8 +2283,6 @@ skip_dpc:
2040 base_vha->flags.init_done = 1; 2283 base_vha->flags.init_done = 1;
2041 base_vha->flags.online = 1; 2284 base_vha->flags.online = 1;
2042 2285
2043 ha->isp_ops->enable_intrs(ha);
2044
2045 scsi_scan_host(host); 2286 scsi_scan_host(host);
2046 2287
2047 qla2x00_alloc_sysfs_attr(base_vha); 2288 qla2x00_alloc_sysfs_attr(base_vha);
@@ -2083,9 +2324,17 @@ probe_failed:
2083 scsi_host_put(base_vha->host); 2324 scsi_host_put(base_vha->host);
2084 2325
2085probe_hw_failed: 2326probe_hw_failed:
2086 if (ha->iobase) 2327 if (IS_QLA82XX(ha)) {
2087 iounmap(ha->iobase); 2328 qla82xx_idc_lock(ha);
2088 2329 qla82xx_clear_drv_active(ha);
2330 qla82xx_idc_unlock(ha);
2331 iounmap((device_reg_t __iomem *)ha->nx_pcibase);
2332 if (!ql2xdbwr)
2333 iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
2334 } else {
2335 if (ha->iobase)
2336 iounmap(ha->iobase);
2337 }
2089 pci_release_selected_regions(ha->pdev, ha->bars); 2338 pci_release_selected_regions(ha->pdev, ha->bars);
2090 kfree(ha); 2339 kfree(ha);
2091 ha = NULL; 2340 ha = NULL;
@@ -2152,11 +2401,17 @@ qla2x00_remove_one(struct pci_dev *pdev)
2152 2401
2153 scsi_host_put(base_vha->host); 2402 scsi_host_put(base_vha->host);
2154 2403
2155 if (ha->iobase) 2404 if (IS_QLA82XX(ha)) {
2156 iounmap(ha->iobase); 2405 iounmap((device_reg_t __iomem *)ha->nx_pcibase);
2406 if (!ql2xdbwr)
2407 iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
2408 } else {
2409 if (ha->iobase)
2410 iounmap(ha->iobase);
2157 2411
2158 if (ha->mqiobase) 2412 if (ha->mqiobase)
2159 iounmap(ha->mqiobase); 2413 iounmap(ha->mqiobase);
2414 }
2160 2415
2161 pci_release_selected_regions(ha->pdev, ha->bars); 2416 pci_release_selected_regions(ha->pdev, ha->bars);
2162 kfree(ha); 2417 kfree(ha);
@@ -2205,8 +2460,10 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2205 vha->flags.online = 0; 2460 vha->flags.online = 0;
2206 2461
2207 /* turn-off interrupts on the card */ 2462 /* turn-off interrupts on the card */
2208 if (ha->interrupts_on) 2463 if (ha->interrupts_on) {
2464 vha->flags.init_done = 0;
2209 ha->isp_ops->disable_intrs(ha); 2465 ha->isp_ops->disable_intrs(ha);
2466 }
2210 2467
2211 qla2x00_free_irqs(vha); 2468 qla2x00_free_irqs(vha);
2212 2469
@@ -2351,10 +2608,25 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2351 if (!ha->srb_mempool) 2608 if (!ha->srb_mempool)
2352 goto fail_free_gid_list; 2609 goto fail_free_gid_list;
2353 2610
2611 if (IS_QLA82XX(ha)) {
2612 /* Allocate cache for CT6 Ctx. */
2613 if (!ctx_cachep) {
2614 ctx_cachep = kmem_cache_create("qla2xxx_ctx",
2615 sizeof(struct ct6_dsd), 0,
2616 SLAB_HWCACHE_ALIGN, NULL);
2617 if (!ctx_cachep)
2618 goto fail_free_gid_list;
2619 }
2620 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
2621 ctx_cachep);
2622 if (!ha->ctx_mempool)
2623 goto fail_free_srb_mempool;
2624 }
2625
2354 /* Get memory for cached NVRAM */ 2626 /* Get memory for cached NVRAM */
2355 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 2627 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
2356 if (!ha->nvram) 2628 if (!ha->nvram)
2357 goto fail_free_srb_mempool; 2629 goto fail_free_ctx_mempool;
2358 2630
2359 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME, 2631 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
2360 ha->pdev->device); 2632 ha->pdev->device);
@@ -2363,6 +2635,24 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2363 if (!ha->s_dma_pool) 2635 if (!ha->s_dma_pool)
2364 goto fail_free_nvram; 2636 goto fail_free_nvram;
2365 2637
2638 if (IS_QLA82XX(ha) || ql2xenabledif) {
2639 ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2640 DSD_LIST_DMA_POOL_SIZE, 8, 0);
2641 if (!ha->dl_dma_pool) {
2642 qla_printk(KERN_WARNING, ha,
2643 "Memory Allocation failed - dl_dma_pool\n");
2644 goto fail_s_dma_pool;
2645 }
2646
2647 ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2648 FCP_CMND_DMA_POOL_SIZE, 8, 0);
2649 if (!ha->fcp_cmnd_dma_pool) {
2650 qla_printk(KERN_WARNING, ha,
2651 "Memory Allocation failed - fcp_cmnd_dma_pool\n");
2652 goto fail_dl_dma_pool;
2653 }
2654 }
2655
2366 /* Allocate memory for SNS commands */ 2656 /* Allocate memory for SNS commands */
2367 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 2657 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2368 /* Get consistent memory allocated for SNS commands */ 2658 /* Get consistent memory allocated for SNS commands */
@@ -2429,16 +2719,28 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2429 ha->npiv_info = NULL; 2719 ha->npiv_info = NULL;
2430 2720
2431 /* Get consistent memory allocated for EX-INIT-CB. */ 2721 /* Get consistent memory allocated for EX-INIT-CB. */
2432 if (IS_QLA81XX(ha)) { 2722 if (IS_QLA8XXX_TYPE(ha)) {
2433 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 2723 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2434 &ha->ex_init_cb_dma); 2724 &ha->ex_init_cb_dma);
2435 if (!ha->ex_init_cb) 2725 if (!ha->ex_init_cb)
2436 goto fail_ex_init_cb; 2726 goto fail_ex_init_cb;
2437 } 2727 }
2438 2728
2729 INIT_LIST_HEAD(&ha->gbl_dsd_list);
2730
2731 /* Get consistent memory allocated for Async Port-Database. */
2732 if (!IS_FWI2_CAPABLE(ha)) {
2733 ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2734 &ha->async_pd_dma);
2735 if (!ha->async_pd)
2736 goto fail_async_pd;
2737 }
2738
2439 INIT_LIST_HEAD(&ha->vp_list); 2739 INIT_LIST_HEAD(&ha->vp_list);
2440 return 1; 2740 return 1;
2441 2741
2742fail_async_pd:
2743 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
2442fail_ex_init_cb: 2744fail_ex_init_cb:
2443 kfree(ha->npiv_info); 2745 kfree(ha->npiv_info);
2444fail_npiv_info: 2746fail_npiv_info:
@@ -2465,11 +2767,24 @@ fail_free_ms_iocb:
2465 ha->ms_iocb = NULL; 2767 ha->ms_iocb = NULL;
2466 ha->ms_iocb_dma = 0; 2768 ha->ms_iocb_dma = 0;
2467fail_dma_pool: 2769fail_dma_pool:
2770 if (IS_QLA82XX(ha) || ql2xenabledif) {
2771 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
2772 ha->fcp_cmnd_dma_pool = NULL;
2773 }
2774fail_dl_dma_pool:
2775 if (IS_QLA82XX(ha) || ql2xenabledif) {
2776 dma_pool_destroy(ha->dl_dma_pool);
2777 ha->dl_dma_pool = NULL;
2778 }
2779fail_s_dma_pool:
2468 dma_pool_destroy(ha->s_dma_pool); 2780 dma_pool_destroy(ha->s_dma_pool);
2469 ha->s_dma_pool = NULL; 2781 ha->s_dma_pool = NULL;
2470fail_free_nvram: 2782fail_free_nvram:
2471 kfree(ha->nvram); 2783 kfree(ha->nvram);
2472 ha->nvram = NULL; 2784 ha->nvram = NULL;
2785fail_free_ctx_mempool:
2786 mempool_destroy(ha->ctx_mempool);
2787 ha->ctx_mempool = NULL;
2473fail_free_srb_mempool: 2788fail_free_srb_mempool:
2474 mempool_destroy(ha->srb_mempool); 2789 mempool_destroy(ha->srb_mempool);
2475 ha->srb_mempool = NULL; 2790 ha->srb_mempool = NULL;
@@ -2538,7 +2853,11 @@ qla2x00_mem_free(struct qla_hw_data *ha)
2538 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 2853 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2539 2854
2540 if (ha->ex_init_cb) 2855 if (ha->ex_init_cb)
2541 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); 2856 dma_pool_free(ha->s_dma_pool,
2857 ha->ex_init_cb, ha->ex_init_cb_dma);
2858
2859 if (ha->async_pd)
2860 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
2542 2861
2543 if (ha->s_dma_pool) 2862 if (ha->s_dma_pool)
2544 dma_pool_destroy(ha->s_dma_pool); 2863 dma_pool_destroy(ha->s_dma_pool);
@@ -2547,14 +2866,39 @@ qla2x00_mem_free(struct qla_hw_data *ha)
2547 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2866 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2548 ha->gid_list_dma); 2867 ha->gid_list_dma);
2549 2868
2869 if (IS_QLA82XX(ha)) {
2870 if (!list_empty(&ha->gbl_dsd_list)) {
2871 struct dsd_dma *dsd_ptr, *tdsd_ptr;
2872
2873 /* clean up allocated prev pool */
2874 list_for_each_entry_safe(dsd_ptr,
2875 tdsd_ptr, &ha->gbl_dsd_list, list) {
2876 dma_pool_free(ha->dl_dma_pool,
2877 dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
2878 list_del(&dsd_ptr->list);
2879 kfree(dsd_ptr);
2880 }
2881 }
2882 }
2883
2884 if (ha->dl_dma_pool)
2885 dma_pool_destroy(ha->dl_dma_pool);
2886
2887 if (ha->fcp_cmnd_dma_pool)
2888 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
2889
2890 if (ha->ctx_mempool)
2891 mempool_destroy(ha->ctx_mempool);
2892
2550 if (ha->init_cb) 2893 if (ha->init_cb)
2551 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 2894 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
2552 ha->init_cb, ha->init_cb_dma); 2895 ha->init_cb, ha->init_cb_dma);
2553 vfree(ha->optrom_buffer); 2896 vfree(ha->optrom_buffer);
2554 kfree(ha->nvram); 2897 kfree(ha->nvram);
2555 kfree(ha->npiv_info); 2898 kfree(ha->npiv_info);
2556 2899
2557 ha->srb_mempool = NULL; 2900 ha->srb_mempool = NULL;
2901 ha->ctx_mempool = NULL;
2558 ha->eft = NULL; 2902 ha->eft = NULL;
2559 ha->eft_dma = 0; 2903 ha->eft_dma = 0;
2560 ha->sns_cmd = NULL; 2904 ha->sns_cmd = NULL;
@@ -2567,8 +2911,12 @@ qla2x00_mem_free(struct qla_hw_data *ha)
2567 ha->init_cb_dma = 0; 2911 ha->init_cb_dma = 0;
2568 ha->ex_init_cb = NULL; 2912 ha->ex_init_cb = NULL;
2569 ha->ex_init_cb_dma = 0; 2913 ha->ex_init_cb_dma = 0;
2914 ha->async_pd = NULL;
2915 ha->async_pd_dma = 0;
2570 2916
2571 ha->s_dma_pool = NULL; 2917 ha->s_dma_pool = NULL;
2918 ha->dl_dma_pool = NULL;
2919 ha->fcp_cmnd_dma_pool = NULL;
2572 2920
2573 ha->gid_list = NULL; 2921 ha->gid_list = NULL;
2574 ha->gid_list_dma = 0; 2922 ha->gid_list_dma = 0;
@@ -2691,6 +3039,8 @@ qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
2691qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE); 3039qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
2692qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); 3040qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
2693qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); 3041qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
3042qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
3043qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
2694 3044
2695int 3045int
2696qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code) 3046qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
@@ -2760,6 +3110,14 @@ qla2x00_do_work(struct scsi_qla_host *vha)
2760 qla2x00_async_logout_done(vha, e->u.logio.fcport, 3110 qla2x00_async_logout_done(vha, e->u.logio.fcport,
2761 e->u.logio.data); 3111 e->u.logio.data);
2762 break; 3112 break;
3113 case QLA_EVT_ASYNC_ADISC:
3114 qla2x00_async_adisc(vha, e->u.logio.fcport,
3115 e->u.logio.data);
3116 break;
3117 case QLA_EVT_ASYNC_ADISC_DONE:
3118 qla2x00_async_adisc_done(vha, e->u.logio.fcport,
3119 e->u.logio.data);
3120 break;
2763 case QLA_EVT_UEVENT: 3121 case QLA_EVT_UEVENT:
2764 qla2x00_uevent_emit(vha, e->u.uevent.code); 3122 qla2x00_uevent_emit(vha, e->u.uevent.code);
2765 break; 3123 break;
@@ -2785,9 +3143,8 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
2785 * If the port is not ONLINE then try to login 3143 * If the port is not ONLINE then try to login
2786 * to it if we haven't run out of retries. 3144 * to it if we haven't run out of retries.
2787 */ 3145 */
2788 if (atomic_read(&fcport->state) != 3146 if (atomic_read(&fcport->state) != FCS_ONLINE &&
2789 FCS_ONLINE && fcport->login_retry) { 3147 fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
2790
2791 fcport->login_retry--; 3148 fcport->login_retry--;
2792 if (fcport->flags & FCF_FABRIC_DEVICE) { 3149 if (fcport->flags & FCF_FABRIC_DEVICE) {
2793 if (fcport->flags & FCF_FCP2_DEVICE) 3150 if (fcport->flags & FCF_FCP2_DEVICE)
@@ -2798,6 +3155,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
2798 fcport->d_id.b.al_pa); 3155 fcport->d_id.b.al_pa);
2799 3156
2800 if (IS_ALOGIO_CAPABLE(ha)) { 3157 if (IS_ALOGIO_CAPABLE(ha)) {
3158 fcport->flags |= FCF_ASYNC_SENT;
2801 data[0] = 0; 3159 data[0] = 0;
2802 data[1] = QLA_LOGIO_LOGIN_RETRIED; 3160 data[1] = QLA_LOGIO_LOGIN_RETRIED;
2803 status = qla2x00_post_async_login_work( 3161 status = qla2x00_post_async_login_work(
@@ -2896,6 +3254,45 @@ qla2x00_do_dpc(void *data)
2896 3254
2897 qla2x00_do_work(base_vha); 3255 qla2x00_do_work(base_vha);
2898 3256
3257 if (IS_QLA82XX(ha)) {
3258 if (test_and_clear_bit(ISP_UNRECOVERABLE,
3259 &base_vha->dpc_flags)) {
3260 qla82xx_idc_lock(ha);
3261 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3262 QLA82XX_DEV_FAILED);
3263 qla82xx_idc_unlock(ha);
3264 qla_printk(KERN_INFO, ha,
3265 "HW State: FAILED\n");
3266 qla82xx_device_state_handler(base_vha);
3267 continue;
3268 }
3269
3270 if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
3271 &base_vha->dpc_flags)) {
3272
3273 DEBUG(printk(KERN_INFO
3274 "scsi(%ld): dpc: sched "
3275 "qla82xx_fcoe_ctx_reset ha = %p\n",
3276 base_vha->host_no, ha));
3277 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
3278 &base_vha->dpc_flags))) {
3279 if (qla82xx_fcoe_ctx_reset(base_vha)) {
3280 /* FCoE-ctx reset failed.
3281 * Escalate to chip-reset
3282 */
3283 set_bit(ISP_ABORT_NEEDED,
3284 &base_vha->dpc_flags);
3285 }
3286 clear_bit(ABORT_ISP_ACTIVE,
3287 &base_vha->dpc_flags);
3288 }
3289
3290 DEBUG(printk("scsi(%ld): dpc:"
3291 " qla82xx_fcoe_ctx_reset end\n",
3292 base_vha->host_no));
3293 }
3294 }
3295
2899 if (test_and_clear_bit(ISP_ABORT_NEEDED, 3296 if (test_and_clear_bit(ISP_ABORT_NEEDED,
2900 &base_vha->dpc_flags)) { 3297 &base_vha->dpc_flags)) {
2901 3298
@@ -2905,7 +3302,7 @@ qla2x00_do_dpc(void *data)
2905 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 3302 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
2906 &base_vha->dpc_flags))) { 3303 &base_vha->dpc_flags))) {
2907 3304
2908 if (qla2x00_abort_isp(base_vha)) { 3305 if (ha->isp_ops->abort_isp(base_vha)) {
2909 /* failed. retry later */ 3306 /* failed. retry later */
2910 set_bit(ISP_ABORT_NEEDED, 3307 set_bit(ISP_ABORT_NEEDED,
2911 &base_vha->dpc_flags); 3308 &base_vha->dpc_flags);
@@ -3038,11 +3435,31 @@ static void
3038qla2x00_sp_free_dma(srb_t *sp) 3435qla2x00_sp_free_dma(srb_t *sp)
3039{ 3436{
3040 struct scsi_cmnd *cmd = sp->cmd; 3437 struct scsi_cmnd *cmd = sp->cmd;
3438 struct qla_hw_data *ha = sp->fcport->vha->hw;
3041 3439
3042 if (sp->flags & SRB_DMA_VALID) { 3440 if (sp->flags & SRB_DMA_VALID) {
3043 scsi_dma_unmap(cmd); 3441 scsi_dma_unmap(cmd);
3044 sp->flags &= ~SRB_DMA_VALID; 3442 sp->flags &= ~SRB_DMA_VALID;
3045 } 3443 }
3444
3445 if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
3446 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
3447 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
3448 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
3449 }
3450
3451 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
3452 /* List assured to be having elements */
3453 qla2x00_clean_dsd_pool(ha, sp);
3454 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
3455 }
3456
3457 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
3458 dma_pool_free(ha->dl_dma_pool, sp->ctx,
3459 ((struct crc_context *)sp->ctx)->crc_ctx_dma);
3460 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
3461 }
3462
3046 CMD_SP(cmd) = NULL; 3463 CMD_SP(cmd) = NULL;
3047} 3464}
3048 3465
@@ -3053,8 +3470,18 @@ qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
3053 3470
3054 qla2x00_sp_free_dma(sp); 3471 qla2x00_sp_free_dma(sp);
3055 3472
3056 mempool_free(sp, ha->srb_mempool); 3473 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
3474 struct ct6_dsd *ctx = sp->ctx;
3475 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
3476 ctx->fcp_cmnd_dma);
3477 list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
3478 ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
3479 ha->gbl_dsd_avail += ctx->dsd_use_cnt;
3480 mempool_free(sp->ctx, ha->ctx_mempool);
3481 sp->ctx = NULL;
3482 }
3057 3483
3484 mempool_free(sp, ha->srb_mempool);
3058 cmd->scsi_done(cmd); 3485 cmd->scsi_done(cmd);
3059} 3486}
3060 3487
@@ -3079,6 +3506,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
3079 struct qla_hw_data *ha = vha->hw; 3506 struct qla_hw_data *ha = vha->hw;
3080 struct req_que *req; 3507 struct req_que *req;
3081 3508
3509 if (IS_QLA82XX(ha))
3510 qla82xx_watchdog(vha);
3511
3082 /* Hardware read to raise pending EEH errors during mailbox waits. */ 3512 /* Hardware read to raise pending EEH errors during mailbox waits. */
3083 if (!pci_channel_offline(ha->pdev)) 3513 if (!pci_channel_offline(ha->pdev))
3084 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 3514 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
@@ -3143,7 +3573,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
3143 sp = req->outstanding_cmds[index]; 3573 sp = req->outstanding_cmds[index];
3144 if (!sp) 3574 if (!sp)
3145 continue; 3575 continue;
3146 if (sp->ctx) 3576 if (sp->ctx && !IS_PROT_IO(sp))
3147 continue; 3577 continue;
3148 sfcp = sp->fcport; 3578 sfcp = sp->fcport;
3149 if (!(sfcp->flags & FCF_FCP2_DEVICE)) 3579 if (!(sfcp->flags & FCF_FCP2_DEVICE))
@@ -3193,6 +3623,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
3193 start_dpc || 3623 start_dpc ||
3194 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) || 3624 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
3195 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) || 3625 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
3626 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
3627 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3196 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || 3628 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
3197 test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) 3629 test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
3198 qla2xxx_wake_dpc(vha); 3630 qla2xxx_wake_dpc(vha);
@@ -3202,7 +3634,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
3202 3634
3203/* Firmware interface routines. */ 3635/* Firmware interface routines. */
3204 3636
3205#define FW_BLOBS 7 3637#define FW_BLOBS 8
3206#define FW_ISP21XX 0 3638#define FW_ISP21XX 0
3207#define FW_ISP22XX 1 3639#define FW_ISP22XX 1
3208#define FW_ISP2300 2 3640#define FW_ISP2300 2
@@ -3210,6 +3642,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
3210#define FW_ISP24XX 4 3642#define FW_ISP24XX 4
3211#define FW_ISP25XX 5 3643#define FW_ISP25XX 5
3212#define FW_ISP81XX 6 3644#define FW_ISP81XX 6
3645#define FW_ISP82XX 7
3213 3646
3214#define FW_FILE_ISP21XX "ql2100_fw.bin" 3647#define FW_FILE_ISP21XX "ql2100_fw.bin"
3215#define FW_FILE_ISP22XX "ql2200_fw.bin" 3648#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -3218,6 +3651,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
3218#define FW_FILE_ISP24XX "ql2400_fw.bin" 3651#define FW_FILE_ISP24XX "ql2400_fw.bin"
3219#define FW_FILE_ISP25XX "ql2500_fw.bin" 3652#define FW_FILE_ISP25XX "ql2500_fw.bin"
3220#define FW_FILE_ISP81XX "ql8100_fw.bin" 3653#define FW_FILE_ISP81XX "ql8100_fw.bin"
3654#define FW_FILE_ISP82XX "ql8200_fw.bin"
3221 3655
3222static DEFINE_MUTEX(qla_fw_lock); 3656static DEFINE_MUTEX(qla_fw_lock);
3223 3657
@@ -3229,6 +3663,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
3229 { .name = FW_FILE_ISP24XX, }, 3663 { .name = FW_FILE_ISP24XX, },
3230 { .name = FW_FILE_ISP25XX, }, 3664 { .name = FW_FILE_ISP25XX, },
3231 { .name = FW_FILE_ISP81XX, }, 3665 { .name = FW_FILE_ISP81XX, },
3666 { .name = FW_FILE_ISP82XX, },
3232}; 3667};
3233 3668
3234struct fw_blob * 3669struct fw_blob *
@@ -3252,6 +3687,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
3252 blob = &qla_fw_blobs[FW_ISP25XX]; 3687 blob = &qla_fw_blobs[FW_ISP25XX];
3253 } else if (IS_QLA81XX(ha)) { 3688 } else if (IS_QLA81XX(ha)) {
3254 blob = &qla_fw_blobs[FW_ISP81XX]; 3689 blob = &qla_fw_blobs[FW_ISP81XX];
3690 } else if (IS_QLA82XX(ha)) {
3691 blob = &qla_fw_blobs[FW_ISP82XX];
3255 } 3692 }
3256 3693
3257 mutex_lock(&qla_fw_lock); 3694 mutex_lock(&qla_fw_lock);
@@ -3392,11 +3829,10 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3392 msleep(1000); 3829 msleep(1000);
3393 3830
3394 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 3831 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3395 if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS) 3832 if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
3396 ret = PCI_ERS_RESULT_RECOVERED; 3833 ret = PCI_ERS_RESULT_RECOVERED;
3397 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 3834 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3398 3835
3399 pci_cleanup_aer_uncorrect_error_status(pdev);
3400 3836
3401 DEBUG17(qla_printk(KERN_WARNING, ha, 3837 DEBUG17(qla_printk(KERN_WARNING, ha,
3402 "slot_reset-return:ret=%x\n", ret)); 3838 "slot_reset-return:ret=%x\n", ret));
@@ -3420,6 +3856,8 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
3420 "from slot/link_reset"); 3856 "from slot/link_reset");
3421 } 3857 }
3422 3858
3859 pci_cleanup_aer_uncorrect_error_status(pdev);
3860
3423 ha->flags.eeh_busy = 0; 3861 ha->flags.eeh_busy = 0;
3424} 3862}
3425 3863
@@ -3445,6 +3883,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
3445 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 3883 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
3446 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, 3884 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
3447 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, 3885 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
3886 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
3448 { 0 }, 3887 { 0 },
3449}; 3888};
3450MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 3889MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
@@ -3460,6 +3899,10 @@ static struct pci_driver qla2xxx_pci_driver = {
3460 .err_handler = &qla2xxx_err_handler, 3899 .err_handler = &qla2xxx_err_handler,
3461}; 3900};
3462 3901
3902static struct file_operations apidev_fops = {
3903 .owner = THIS_MODULE,
3904};
3905
3463/** 3906/**
3464 * qla2x00_module_init - Module initialization. 3907 * qla2x00_module_init - Module initialization.
3465 **/ 3908 **/
@@ -3488,6 +3931,13 @@ qla2x00_module_init(void)
3488 kmem_cache_destroy(srb_cachep); 3931 kmem_cache_destroy(srb_cachep);
3489 return -ENODEV; 3932 return -ENODEV;
3490 } 3933 }
3934
3935 apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
3936 if (apidev_major < 0) {
3937 printk(KERN_WARNING "qla2xxx: Unable to register char device "
3938 "%s\n", QLA2XXX_APIDEV);
3939 }
3940
3491 qla2xxx_transport_vport_template = 3941 qla2xxx_transport_vport_template =
3492 fc_attach_transport(&qla2xxx_transport_vport_functions); 3942 fc_attach_transport(&qla2xxx_transport_vport_functions);
3493 if (!qla2xxx_transport_vport_template) { 3943 if (!qla2xxx_transport_vport_template) {
@@ -3513,9 +3963,12 @@ qla2x00_module_init(void)
3513static void __exit 3963static void __exit
3514qla2x00_module_exit(void) 3964qla2x00_module_exit(void)
3515{ 3965{
3966 unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
3516 pci_unregister_driver(&qla2xxx_pci_driver); 3967 pci_unregister_driver(&qla2xxx_pci_driver);
3517 qla2x00_release_firmware(); 3968 qla2x00_release_firmware();
3518 kmem_cache_destroy(srb_cachep); 3969 kmem_cache_destroy(srb_cachep);
3970 if (ctx_cachep)
3971 kmem_cache_destroy(ctx_cachep);
3519 fc_release_transport(qla2xxx_transport_template); 3972 fc_release_transport(qla2xxx_transport_template);
3520 fc_release_transport(qla2xxx_transport_vport_template); 3973 fc_release_transport(qla2xxx_transport_vport_template);
3521} 3974}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 8b3de4e54c28..de92504d7585 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -423,9 +423,6 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
423/* Flash Manipulation Routines */ 423/* Flash Manipulation Routines */
424/*****************************************************************************/ 424/*****************************************************************************/
425 425
426#define OPTROM_BURST_SIZE 0x1000
427#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
428
429static inline uint32_t 426static inline uint32_t
430flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr) 427flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr)
431{ 428{
@@ -565,6 +562,10 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
565 *start = FA_FLASH_LAYOUT_ADDR; 562 *start = FA_FLASH_LAYOUT_ADDR;
566 else if (IS_QLA81XX(ha)) 563 else if (IS_QLA81XX(ha))
567 *start = FA_FLASH_LAYOUT_ADDR_81; 564 *start = FA_FLASH_LAYOUT_ADDR_81;
565 else if (IS_QLA82XX(ha)) {
566 *start = FA_FLASH_LAYOUT_ADDR_82;
567 goto end;
568 }
568 /* Begin with first PCI expansion ROM header. */ 569 /* Begin with first PCI expansion ROM header. */
569 buf = (uint8_t *)req->ring; 570 buf = (uint8_t *)req->ring;
570 dcode = (uint32_t *)req->ring; 571 dcode = (uint32_t *)req->ring;
@@ -648,6 +649,12 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
648 const uint32_t def_npiv_conf1[] = 649 const uint32_t def_npiv_conf1[] =
649 { FA_NPIV_CONF1_ADDR_24, FA_NPIV_CONF1_ADDR, 650 { FA_NPIV_CONF1_ADDR_24, FA_NPIV_CONF1_ADDR,
650 FA_NPIV_CONF1_ADDR_81 }; 651 FA_NPIV_CONF1_ADDR_81 };
652 const uint32_t fcp_prio_cfg0[] =
653 { FA_FCP_PRIO0_ADDR, FA_FCP_PRIO0_ADDR_25,
654 0 };
655 const uint32_t fcp_prio_cfg1[] =
656 { FA_FCP_PRIO1_ADDR, FA_FCP_PRIO1_ADDR_25,
657 0 };
651 uint32_t def; 658 uint32_t def;
652 uint16_t *wptr; 659 uint16_t *wptr;
653 uint16_t cnt, chksum; 660 uint16_t cnt, chksum;
@@ -703,10 +710,14 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
703 break; 710 break;
704 case FLT_REG_VPD_0: 711 case FLT_REG_VPD_0:
705 ha->flt_region_vpd_nvram = start; 712 ha->flt_region_vpd_nvram = start;
713 if (IS_QLA82XX(ha))
714 break;
706 if (ha->flags.port0) 715 if (ha->flags.port0)
707 ha->flt_region_vpd = start; 716 ha->flt_region_vpd = start;
708 break; 717 break;
709 case FLT_REG_VPD_1: 718 case FLT_REG_VPD_1:
719 if (IS_QLA82XX(ha))
720 break;
710 if (!ha->flags.port0) 721 if (!ha->flags.port0)
711 ha->flt_region_vpd = start; 722 ha->flt_region_vpd = start;
712 break; 723 break;
@@ -732,6 +743,29 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
732 case FLT_REG_GOLD_FW: 743 case FLT_REG_GOLD_FW:
733 ha->flt_region_gold_fw = start; 744 ha->flt_region_gold_fw = start;
734 break; 745 break;
746 case FLT_REG_FCP_PRIO_0:
747 if (ha->flags.port0)
748 ha->flt_region_fcp_prio = start;
749 break;
750 case FLT_REG_FCP_PRIO_1:
751 if (!ha->flags.port0)
752 ha->flt_region_fcp_prio = start;
753 break;
754 case FLT_REG_BOOT_CODE_82XX:
755 ha->flt_region_boot = start;
756 break;
757 case FLT_REG_FW_82XX:
758 ha->flt_region_fw = start;
759 break;
760 case FLT_REG_GOLD_FW_82XX:
761 ha->flt_region_gold_fw = start;
762 break;
763 case FLT_REG_BOOTLOAD_82XX:
764 ha->flt_region_bootload = start;
765 break;
766 case FLT_REG_VPD_82XX:
767 ha->flt_region_vpd = start;
768 break;
735 } 769 }
736 } 770 }
737 goto done; 771 goto done;
@@ -750,12 +784,14 @@ no_flash_data:
750 ha->flt_region_boot = def_boot[def]; 784 ha->flt_region_boot = def_boot[def];
751 ha->flt_region_vpd_nvram = def_vpd_nvram[def]; 785 ha->flt_region_vpd_nvram = def_vpd_nvram[def];
752 ha->flt_region_vpd = ha->flags.port0 ? 786 ha->flt_region_vpd = ha->flags.port0 ?
753 def_vpd0[def]: def_vpd1[def]; 787 def_vpd0[def] : def_vpd1[def];
754 ha->flt_region_nvram = ha->flags.port0 ? 788 ha->flt_region_nvram = ha->flags.port0 ?
755 def_nvram0[def]: def_nvram1[def]; 789 def_nvram0[def] : def_nvram1[def];
756 ha->flt_region_fdt = def_fdt[def]; 790 ha->flt_region_fdt = def_fdt[def];
757 ha->flt_region_npiv_conf = ha->flags.port0 ? 791 ha->flt_region_npiv_conf = ha->flags.port0 ?
758 def_npiv_conf0[def]: def_npiv_conf1[def]; 792 def_npiv_conf0[def] : def_npiv_conf1[def];
793 ha->flt_region_fcp_prio = ha->flags.port0 ?
794 fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
759done: 795done:
760 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " 796 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
761 "vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x " 797 "vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x "
@@ -775,7 +811,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
775 uint16_t *wptr; 811 uint16_t *wptr;
776 struct qla_fdt_layout *fdt; 812 struct qla_fdt_layout *fdt;
777 uint8_t man_id, flash_id; 813 uint8_t man_id, flash_id;
778 uint16_t mid, fid; 814 uint16_t mid = 0, fid = 0;
779 struct qla_hw_data *ha = vha->hw; 815 struct qla_hw_data *ha = vha->hw;
780 struct req_que *req = ha->req_q_map[0]; 816 struct req_que *req = ha->req_q_map[0];
781 817
@@ -816,6 +852,10 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
816 goto done; 852 goto done;
817no_flash_data: 853no_flash_data:
818 loc = locations[0]; 854 loc = locations[0];
855 if (IS_QLA82XX(ha)) {
856 ha->fdt_block_size = FLASH_BLK_SIZE_64K;
857 goto done;
858 }
819 qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id); 859 qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
820 mid = man_id; 860 mid = man_id;
821 fid = flash_id; 861 fid = flash_id;
@@ -853,6 +893,31 @@ done:
853 ha->fdt_block_size)); 893 ha->fdt_block_size));
854} 894}
855 895
896static void
897qla2xxx_get_idc_param(scsi_qla_host_t *vha)
898{
899#define QLA82XX_IDC_PARAM_ADDR 0x003e885c
900 uint32_t *wptr;
901 struct qla_hw_data *ha = vha->hw;
902 struct req_que *req = ha->req_q_map[0];
903
904 if (!IS_QLA82XX(ha))
905 return;
906
907 wptr = (uint32_t *)req->ring;
908 ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
909 QLA82XX_IDC_PARAM_ADDR , 8);
910
911 if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
912 ha->nx_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
913 ha->nx_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
914 } else {
915 ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
916 ha->nx_reset_timeout = le32_to_cpu(*wptr);
917 }
918 return;
919}
920
856int 921int
857qla2xxx_get_flash_info(scsi_qla_host_t *vha) 922qla2xxx_get_flash_info(scsi_qla_host_t *vha)
858{ 923{
@@ -860,7 +925,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
860 uint32_t flt_addr; 925 uint32_t flt_addr;
861 struct qla_hw_data *ha = vha->hw; 926 struct qla_hw_data *ha = vha->hw;
862 927
863 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 928 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha))
864 return QLA_SUCCESS; 929 return QLA_SUCCESS;
865 930
866 ret = qla2xxx_find_flt_start(vha, &flt_addr); 931 ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -869,6 +934,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
869 934
870 qla2xxx_get_flt_info(vha, flt_addr); 935 qla2xxx_get_flt_info(vha, flt_addr);
871 qla2xxx_get_fdt_info(vha); 936 qla2xxx_get_fdt_info(vha);
937 qla2xxx_get_idc_param(vha);
872 938
873 return QLA_SUCCESS; 939 return QLA_SUCCESS;
874} 940}
@@ -885,7 +951,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
885 struct qla_npiv_entry *entry; 951 struct qla_npiv_entry *entry;
886 struct qla_hw_data *ha = vha->hw; 952 struct qla_hw_data *ha = vha->hw;
887 953
888 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 954 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha))
889 return; 955 return;
890 956
891 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, 957 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
@@ -1178,6 +1244,9 @@ qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1178 uint32_t *dwptr; 1244 uint32_t *dwptr;
1179 struct qla_hw_data *ha = vha->hw; 1245 struct qla_hw_data *ha = vha->hw;
1180 1246
1247 if (IS_QLA82XX(ha))
1248 return buf;
1249
1181 /* Dword reads to flash. */ 1250 /* Dword reads to flash. */
1182 dwptr = (uint32_t *)buf; 1251 dwptr = (uint32_t *)buf;
1183 for (i = 0; i < bytes >> 2; i++, naddr++) 1252 for (i = 0; i < bytes >> 2; i++, naddr++)
@@ -1233,6 +1302,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1233 1302
1234 ret = QLA_SUCCESS; 1303 ret = QLA_SUCCESS;
1235 1304
1305 if (IS_QLA82XX(ha))
1306 return ret;
1307
1236 /* Enable flash write. */ 1308 /* Enable flash write. */
1237 WRT_REG_DWORD(&reg->ctrl_status, 1309 WRT_REG_DWORD(&reg->ctrl_status,
1238 RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE); 1310 RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
@@ -1344,6 +1416,9 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
1344 struct qla_hw_data *ha = vha->hw; 1416 struct qla_hw_data *ha = vha->hw;
1345 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1417 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1346 1418
1419 if (IS_QLA82XX(ha))
1420 return;
1421
1347 spin_lock_irqsave(&ha->hardware_lock, flags); 1422 spin_lock_irqsave(&ha->hardware_lock, flags);
1348 1423
1349 /* Save the Original GPIOE. */ 1424 /* Save the Original GPIOE. */
@@ -1525,6 +1600,9 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
1525 struct qla_hw_data *ha = vha->hw; 1600 struct qla_hw_data *ha = vha->hw;
1526 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1601 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1527 1602
1603 if (IS_QLA82XX(ha))
1604 return QLA_SUCCESS;
1605
1528 if (ha->beacon_blink_led == 0) { 1606 if (ha->beacon_blink_led == 0) {
1529 /* Enable firmware for update */ 1607 /* Enable firmware for update */
1530 ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL; 1608 ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
@@ -1567,6 +1645,9 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
1567 struct qla_hw_data *ha = vha->hw; 1645 struct qla_hw_data *ha = vha->hw;
1568 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1646 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1569 1647
1648 if (IS_QLA82XX(ha))
1649 return QLA_SUCCESS;
1650
1570 ha->beacon_blink_led = 0; 1651 ha->beacon_blink_led = 0;
1571 ha->beacon_color_state = QLA_LED_ALL_ON; 1652 ha->beacon_color_state = QLA_LED_ALL_ON;
1572 1653
@@ -2576,6 +2657,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2576 int i; 2657 int i;
2577 struct qla_hw_data *ha = vha->hw; 2658 struct qla_hw_data *ha = vha->hw;
2578 2659
2660 if (IS_QLA82XX(ha))
2661 return ret;
2662
2579 if (!mbuf) 2663 if (!mbuf)
2580 return QLA_FUNCTION_FAILED; 2664 return QLA_FUNCTION_FAILED;
2581 2665
@@ -2722,3 +2806,50 @@ qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
2722 2806
2723 return 0; 2807 return 0;
2724} 2808}
2809
2810int
2811qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
2812{
2813 int len, max_len;
2814 uint32_t fcp_prio_addr;
2815 struct qla_hw_data *ha = vha->hw;
2816
2817 if (!ha->fcp_prio_cfg) {
2818 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
2819 if (!ha->fcp_prio_cfg) {
2820 qla_printk(KERN_WARNING, ha,
2821 "Unable to allocate memory for fcp priority data "
2822 "(%x).\n", FCP_PRIO_CFG_SIZE);
2823 return QLA_FUNCTION_FAILED;
2824 }
2825 }
2826 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
2827
2828 fcp_prio_addr = ha->flt_region_fcp_prio;
2829
2830 /* first read the fcp priority data header from flash */
2831 ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
2832 fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
2833
2834 if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 0))
2835 goto fail;
2836
2837 /* read remaining FCP CMD config data from flash */
2838 fcp_prio_addr += (FCP_PRIO_CFG_HDR_SIZE >> 2);
2839 len = ha->fcp_prio_cfg->num_entries * FCP_PRIO_CFG_ENTRY_SIZE;
2840 max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE;
2841
2842 ha->isp_ops->read_optrom(vha, (uint8_t *)&ha->fcp_prio_cfg->entry[0],
2843 fcp_prio_addr << 2, (len < max_len ? len : max_len));
2844
2845 /* revalidate the entire FCP priority config data, including entries */
2846 if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 1))
2847 goto fail;
2848
2849 ha->flags.fcp_prio_enabled = 1;
2850 return QLA_SUCCESS;
2851fail:
2852 vfree(ha->fcp_prio_cfg);
2853 ha->fcp_prio_cfg = NULL;
2854 return QLA_FUNCTION_FAILED;
2855}
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 81b5f29254e2..428802616e33 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -114,6 +114,7 @@
114 */ 114 */
115#define MAC_ADDR_LEN 6 /* in bytes */ 115#define MAC_ADDR_LEN 6 /* in bytes */
116#define IP_ADDR_LEN 4 /* in bytes */ 116#define IP_ADDR_LEN 4 /* in bytes */
117#define IPv6_ADDR_LEN 16 /* IPv6 address size */
117#define DRIVER_NAME "qla4xxx" 118#define DRIVER_NAME "qla4xxx"
118 119
119#define MAX_LINKED_CMDS_PER_LUN 3 120#define MAX_LINKED_CMDS_PER_LUN 3
@@ -147,6 +148,8 @@
147 148
148#define MAX_RESET_HA_RETRIES 2 149#define MAX_RESET_HA_RETRIES 2
149 150
151#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
152
150/* 153/*
151 * SCSI Request Block structure (srb) that is placed 154 * SCSI Request Block structure (srb) that is placed
152 * on cmd->SCp location of every I/O [We have 22 bytes available] 155 * on cmd->SCp location of every I/O [We have 22 bytes available]
@@ -169,7 +172,7 @@ struct srb {
169 172
170 struct scsi_cmnd *cmd; /* (4) SCSI command block */ 173 struct scsi_cmnd *cmd; /* (4) SCSI command block */
171 dma_addr_t dma_handle; /* (4) for unmap of single transfers */ 174 dma_addr_t dma_handle; /* (4) for unmap of single transfers */
172 atomic_t ref_count; /* reference count for this srb */ 175 struct kref srb_ref; /* reference count for this srb */
173 uint32_t fw_ddb_index; 176 uint32_t fw_ddb_index;
174 uint8_t err_id; /* error id */ 177 uint8_t err_id; /* error id */
175#define SRB_ERR_PORT 1 /* Request failed because "port down" */ 178#define SRB_ERR_PORT 1 /* Request failed because "port down" */
@@ -220,7 +223,7 @@ struct ddb_entry {
220 223
221 uint16_t os_target_id; /* Target ID */ 224 uint16_t os_target_id; /* Target ID */
222 uint16_t fw_ddb_index; /* DDB firmware index */ 225 uint16_t fw_ddb_index; /* DDB firmware index */
223 uint8_t reserved[2]; 226 uint16_t options;
224 uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */ 227 uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
225 228
226 uint32_t CmdSn; 229 uint32_t CmdSn;
@@ -245,10 +248,18 @@ struct ddb_entry {
245 248
246 uint16_t port; 249 uint16_t port;
247 uint32_t tpgt; 250 uint32_t tpgt;
248 uint8_t ip_addr[ISCSI_IPADDR_SIZE]; 251 uint8_t ip_addr[IP_ADDR_LEN];
249 uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */ 252 uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */
250 uint8_t iscsi_alias[0x20]; 253 uint8_t iscsi_alias[0x20];
251 uint8_t isid[6]; 254 uint8_t isid[6];
255 uint16_t iscsi_max_burst_len;
256 uint16_t iscsi_max_outsnd_r2t;
257 uint16_t iscsi_first_burst_len;
258 uint16_t iscsi_max_rcv_data_seg_len;
259 uint16_t iscsi_max_snd_data_seg_len;
260
261 struct in6_addr remote_ipv6_addr;
262 struct in6_addr link_local_ipv6_addr;
252}; 263};
253 264
254/* 265/*
@@ -301,6 +312,7 @@ struct scsi_qla_host {
301#define DPC_ISNS_RESTART 7 /* 0x00000080 */ 312#define DPC_ISNS_RESTART 7 /* 0x00000080 */
302#define DPC_AEN 9 /* 0x00000200 */ 313#define DPC_AEN 9 /* 0x00000200 */
303#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */ 314#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
315#define DPC_LINK_CHANGED 18 /* 0x00040000 */
304 316
305 struct Scsi_Host *host; /* pointer to host data */ 317 struct Scsi_Host *host; /* pointer to host data */
306 uint32_t tot_ddbs; 318 uint32_t tot_ddbs;
@@ -320,8 +332,7 @@ struct scsi_qla_host {
320#define MIN_IOBASE_LEN 0x100 332#define MIN_IOBASE_LEN 0x100
321 333
322 uint16_t req_q_count; 334 uint16_t req_q_count;
323 uint8_t marker_needed; 335 uint8_t rsvd1[2];
324 uint8_t rsvd1;
325 336
326 unsigned long host_no; 337 unsigned long host_no;
327 338
@@ -441,8 +452,35 @@ struct scsi_qla_host {
441 452
442 /* Saved srb for status continuation entry processing */ 453 /* Saved srb for status continuation entry processing */
443 struct srb *status_srb; 454 struct srb *status_srb;
455
456 /* IPv6 support info from InitFW */
457 uint8_t acb_version;
458 uint8_t ipv4_addr_state;
459 uint16_t ipv4_options;
460
461 uint32_t resvd2;
462 uint32_t ipv6_options;
463 uint32_t ipv6_addl_options;
464 uint8_t ipv6_link_local_state;
465 uint8_t ipv6_addr0_state;
466 uint8_t ipv6_addr1_state;
467 uint8_t ipv6_default_router_state;
468 struct in6_addr ipv6_link_local_addr;
469 struct in6_addr ipv6_addr0;
470 struct in6_addr ipv6_addr1;
471 struct in6_addr ipv6_default_router_addr;
444}; 472};
445 473
474static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
475{
476 return ((ha->ipv4_options & IPOPT_IPv4_PROTOCOL_ENABLE) != 0);
477}
478
479static inline int is_ipv6_enabled(struct scsi_qla_host *ha)
480{
481 return ((ha->ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) != 0);
482}
483
446static inline int is_qla4010(struct scsi_qla_host *ha) 484static inline int is_qla4010(struct scsi_qla_host *ha)
447{ 485{
448 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4010; 486 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4010;
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 9cd7a608df38..855226e08665 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -215,6 +215,7 @@ union external_hw_config_reg {
215/* Mailbox command definitions */ 215/* Mailbox command definitions */
216#define MBOX_CMD_ABOUT_FW 0x0009 216#define MBOX_CMD_ABOUT_FW 0x0009
217#define MBOX_CMD_PING 0x000B 217#define MBOX_CMD_PING 0x000B
218#define MBOX_CMD_ABORT_TASK 0x0015
218#define MBOX_CMD_LUN_RESET 0x0016 219#define MBOX_CMD_LUN_RESET 0x0016
219#define MBOX_CMD_TARGET_WARM_RESET 0x0017 220#define MBOX_CMD_TARGET_WARM_RESET 0x0017
220#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E 221#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E
@@ -258,13 +259,15 @@ union external_hw_config_reg {
258/* Mailbox 1 */ 259/* Mailbox 1 */
259#define FW_STATE_READY 0x0000 260#define FW_STATE_READY 0x0000
260#define FW_STATE_CONFIG_WAIT 0x0001 261#define FW_STATE_CONFIG_WAIT 0x0001
261#define FW_STATE_WAIT_LOGIN 0x0002 262#define FW_STATE_WAIT_AUTOCONNECT 0x0002
262#define FW_STATE_ERROR 0x0004 263#define FW_STATE_ERROR 0x0004
263#define FW_STATE_DHCP_IN_PROGRESS 0x0008 264#define FW_STATE_CONFIGURING_IP 0x0008
264 265
265/* Mailbox 3 */ 266/* Mailbox 3 */
266#define FW_ADDSTATE_OPTICAL_MEDIA 0x0001 267#define FW_ADDSTATE_OPTICAL_MEDIA 0x0001
267#define FW_ADDSTATE_DHCP_ENABLED 0x0002 268#define FW_ADDSTATE_DHCPv4_ENABLED 0x0002
269#define FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED 0x0004
270#define FW_ADDSTATE_DHCPv4_LEASE_EXPIRED 0x0008
268#define FW_ADDSTATE_LINK_UP 0x0010 271#define FW_ADDSTATE_LINK_UP 0x0010
269#define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020 272#define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020
270#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B 273#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B
@@ -320,6 +323,8 @@ union external_hw_config_reg {
320/* Host Adapter Initialization Control Block (from host) */ 323/* Host Adapter Initialization Control Block (from host) */
321struct addr_ctrl_blk { 324struct addr_ctrl_blk {
322 uint8_t version; /* 00 */ 325 uint8_t version; /* 00 */
326#define IFCB_VER_MIN 0x01
327#define IFCB_VER_MAX 0x02
323 uint8_t control; /* 01 */ 328 uint8_t control; /* 01 */
324 329
325 uint16_t fw_options; /* 02-03 */ 330 uint16_t fw_options; /* 02-03 */
@@ -351,11 +356,16 @@ struct addr_ctrl_blk {
351 uint16_t iscsi_opts; /* 30-31 */ 356 uint16_t iscsi_opts; /* 30-31 */
352 uint16_t ipv4_tcp_opts; /* 32-33 */ 357 uint16_t ipv4_tcp_opts; /* 32-33 */
353 uint16_t ipv4_ip_opts; /* 34-35 */ 358 uint16_t ipv4_ip_opts; /* 34-35 */
359#define IPOPT_IPv4_PROTOCOL_ENABLE 0x8000
354 360
355 uint16_t iscsi_max_pdu_size; /* 36-37 */ 361 uint16_t iscsi_max_pdu_size; /* 36-37 */
356 uint8_t ipv4_tos; /* 38 */ 362 uint8_t ipv4_tos; /* 38 */
357 uint8_t ipv4_ttl; /* 39 */ 363 uint8_t ipv4_ttl; /* 39 */
358 uint8_t acb_version; /* 3A */ 364 uint8_t acb_version; /* 3A */
365#define ACB_NOT_SUPPORTED 0x00
366#define ACB_SUPPORTED 0x02 /* Capable of ACB Version 2
367 Features */
368
359 uint8_t res2; /* 3B */ 369 uint8_t res2; /* 3B */
360 uint16_t def_timeout; /* 3C-3D */ 370 uint16_t def_timeout; /* 3C-3D */
361 uint16_t iscsi_fburst_len; /* 3E-3F */ 371 uint16_t iscsi_fburst_len; /* 3E-3F */
@@ -397,16 +407,35 @@ struct addr_ctrl_blk {
397 uint32_t cookie; /* 200-203 */ 407 uint32_t cookie; /* 200-203 */
398 uint16_t ipv6_port; /* 204-205 */ 408 uint16_t ipv6_port; /* 204-205 */
399 uint16_t ipv6_opts; /* 206-207 */ 409 uint16_t ipv6_opts; /* 206-207 */
410#define IPV6_OPT_IPV6_PROTOCOL_ENABLE 0x8000
411
400 uint16_t ipv6_addtl_opts; /* 208-209 */ 412 uint16_t ipv6_addtl_opts; /* 208-209 */
413#define IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE 0x0002 /* Pri ACB
414 Only */
415#define IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR 0x0001
416
401 uint16_t ipv6_tcp_opts; /* 20A-20B */ 417 uint16_t ipv6_tcp_opts; /* 20A-20B */
402 uint8_t ipv6_tcp_wsf; /* 20C */ 418 uint8_t ipv6_tcp_wsf; /* 20C */
403 uint16_t ipv6_flow_lbl; /* 20D-20F */ 419 uint16_t ipv6_flow_lbl; /* 20D-20F */
404 uint8_t ipv6_gw_addr[16]; /* 210-21F */ 420 uint8_t ipv6_dflt_rtr_addr[16]; /* 210-21F */
405 uint16_t ipv6_vlan_tag; /* 220-221 */ 421 uint16_t ipv6_vlan_tag; /* 220-221 */
406 uint8_t ipv6_lnk_lcl_addr_state;/* 222 */ 422 uint8_t ipv6_lnk_lcl_addr_state;/* 222 */
407 uint8_t ipv6_addr0_state; /* 223 */ 423 uint8_t ipv6_addr0_state; /* 223 */
408 uint8_t ipv6_addr1_state; /* 224 */ 424 uint8_t ipv6_addr1_state; /* 224 */
409 uint8_t ipv6_gw_state; /* 225 */ 425#define IP_ADDRSTATE_UNCONFIGURED 0
426#define IP_ADDRSTATE_INVALID 1
427#define IP_ADDRSTATE_ACQUIRING 2
428#define IP_ADDRSTATE_TENTATIVE 3
429#define IP_ADDRSTATE_DEPRICATED 4
430#define IP_ADDRSTATE_PREFERRED 5
431#define IP_ADDRSTATE_DISABLING 6
432
433 uint8_t ipv6_dflt_rtr_state; /* 225 */
434#define IPV6_RTRSTATE_UNKNOWN 0
435#define IPV6_RTRSTATE_MANUAL 1
436#define IPV6_RTRSTATE_ADVERTISED 3
437#define IPV6_RTRSTATE_STALE 4
438
410 uint8_t ipv6_traffic_class; /* 226 */ 439 uint8_t ipv6_traffic_class; /* 226 */
411 uint8_t ipv6_hop_limit; /* 227 */ 440 uint8_t ipv6_hop_limit; /* 227 */
412 uint8_t ipv6_if_id[8]; /* 228-22F */ 441 uint8_t ipv6_if_id[8]; /* 228-22F */
@@ -424,7 +453,7 @@ struct addr_ctrl_blk {
424 453
425struct init_fw_ctrl_blk { 454struct init_fw_ctrl_blk {
426 struct addr_ctrl_blk pri; 455 struct addr_ctrl_blk pri;
427 struct addr_ctrl_blk sec; 456/* struct addr_ctrl_blk sec;*/
428}; 457};
429 458
430/*************************************************************************/ 459/*************************************************************************/
@@ -433,6 +462,9 @@ struct dev_db_entry {
433 uint16_t options; /* 00-01 */ 462 uint16_t options; /* 00-01 */
434#define DDB_OPT_DISC_SESSION 0x10 463#define DDB_OPT_DISC_SESSION 0x10
435#define DDB_OPT_TARGET 0x02 /* device is a target */ 464#define DDB_OPT_TARGET 0x02 /* device is a target */
465#define DDB_OPT_IPV6_DEVICE 0x100
466#define DDB_OPT_IPV6_NULL_LINK_LOCAL 0x800 /* post connection */
467#define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL 0x800 /* pre connection */
436 468
437 uint16_t exec_throttle; /* 02-03 */ 469 uint16_t exec_throttle; /* 02-03 */
438 uint16_t exec_count; /* 04-05 */ 470 uint16_t exec_count; /* 04-05 */
@@ -468,7 +500,7 @@ struct dev_db_entry {
468 * pointer to a string so we 500 * pointer to a string so we
469 * don't have to reserve soooo 501 * don't have to reserve soooo
470 * much RAM */ 502 * much RAM */
471 uint8_t ipv6_addr[0x10];/* 1A0-1AF */ 503 uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */
472 uint8_t res5[0x10]; /* 1B0-1BF */ 504 uint8_t res5[0x10]; /* 1B0-1BF */
473 uint16_t ddb_link; /* 1C0-1C1 */ 505 uint16_t ddb_link; /* 1C0-1C1 */
474 uint16_t chap_tbl_idx; /* 1C2-1C3 */ 506 uint16_t chap_tbl_idx; /* 1C2-1C3 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 96ebfb021f6c..c4636f6cb3cb 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -25,6 +25,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen);
25int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha); 25int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha);
26int qla4xxx_relogin_device(struct scsi_qla_host * ha, 26int qla4xxx_relogin_device(struct scsi_qla_host * ha,
27 struct ddb_entry * ddb_entry); 27 struct ddb_entry * ddb_entry);
28int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb);
28int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry, 29int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
29 int lun); 30 int lun);
30int qla4xxx_reset_target(struct scsi_qla_host * ha, 31int qla4xxx_reset_target(struct scsi_qla_host * ha,
@@ -65,13 +66,14 @@ void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
65int qla4xxx_init_rings(struct scsi_qla_host * ha); 66int qla4xxx_init_rings(struct scsi_qla_host * ha);
66struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, 67struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
67 uint32_t index); 68 uint32_t index);
68void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb); 69void qla4xxx_srb_compl(struct kref *ref);
69int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha); 70int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha);
70int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha, 71int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
71 uint32_t fw_ddb_index, uint32_t state); 72 uint32_t state, uint32_t conn_error);
72void qla4xxx_dump_buffer(void *b, uint32_t size); 73void qla4xxx_dump_buffer(void *b, uint32_t size);
73int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, 74int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
74 struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod); 75 struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod);
76int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err);
75 77
76extern int ql4xextended_error_logging; 78extern int ql4xextended_error_logging;
77extern int ql4xdiscoverywait; 79extern int ql4xdiscoverywait;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 92329a461c68..5510df8a7fa6 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -189,6 +189,78 @@ static int qla4xxx_init_local_data(struct scsi_qla_host *ha)
189 return qla4xxx_get_firmware_status(ha); 189 return qla4xxx_get_firmware_status(ha);
190} 190}
191 191
192static uint8_t
193qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
194{
195 uint8_t ipv4_wait = 0;
196 uint8_t ipv6_wait = 0;
197 int8_t ip_address[IPv6_ADDR_LEN] = {0} ;
198
199 /* If both IPv4 & IPv6 are enabled, possibly only one
200 * IP address may be acquired, so check to see if we
201 * need to wait for another */
202 if (is_ipv4_enabled(ha) && is_ipv6_enabled(ha)) {
203 if (((ha->addl_fw_state & FW_ADDSTATE_DHCPv4_ENABLED) != 0) &&
204 ((ha->addl_fw_state &
205 FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED) == 0)) {
206 ipv4_wait = 1;
207 }
208 if (((ha->ipv6_addl_options &
209 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) &&
210 ((ha->ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING) ||
211 (ha->ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING) ||
212 (ha->ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING))) {
213
214 ipv6_wait = 1;
215
216 if ((ha->ipv6_link_local_state ==
217 IP_ADDRSTATE_PREFERRED) ||
218 (ha->ipv6_addr0_state == IP_ADDRSTATE_PREFERRED) ||
219 (ha->ipv6_addr1_state == IP_ADDRSTATE_PREFERRED)) {
220 DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
221 "Preferred IP configured."
222 " Don't wait!\n", ha->host_no,
223 __func__));
224 ipv6_wait = 0;
225 }
226 if (memcmp(&ha->ipv6_default_router_addr, ip_address,
227 IPv6_ADDR_LEN) == 0) {
228 DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
229 "No Router configured. "
230 "Don't wait!\n", ha->host_no,
231 __func__));
232 ipv6_wait = 0;
233 }
234 if ((ha->ipv6_default_router_state ==
235 IPV6_RTRSTATE_MANUAL) &&
236 (ha->ipv6_link_local_state ==
237 IP_ADDRSTATE_TENTATIVE) &&
238 (memcmp(&ha->ipv6_link_local_addr,
239 &ha->ipv6_default_router_addr, 4) == 0)) {
240 DEBUG2(printk("scsi%ld: %s: LinkLocal Router & "
241 "IP configured. Don't wait!\n",
242 ha->host_no, __func__));
243 ipv6_wait = 0;
244 }
245 }
246 if (ipv4_wait || ipv6_wait) {
247 DEBUG2(printk("scsi%ld: %s: Wait for additional "
248 "IP(s) \"", ha->host_no, __func__));
249 if (ipv4_wait)
250 DEBUG2(printk("IPv4 "));
251 if (ha->ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING)
252 DEBUG2(printk("IPv6LinkLocal "));
253 if (ha->ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING)
254 DEBUG2(printk("IPv6Addr0 "));
255 if (ha->ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING)
256 DEBUG2(printk("IPv6Addr1 "));
257 DEBUG2(printk("\"\n"));
258 }
259 }
260
261 return ipv4_wait|ipv6_wait;
262}
263
192static int qla4xxx_fw_ready(struct scsi_qla_host *ha) 264static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
193{ 265{
194 uint32_t timeout_count; 266 uint32_t timeout_count;
@@ -226,38 +298,80 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
226 continue; 298 continue;
227 } 299 }
228 300
301 if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) {
302 DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:"
303 "AUTOCONNECT in progress\n",
304 ha->host_no, __func__));
305 }
306
307 if (ha->firmware_state & FW_STATE_CONFIGURING_IP) {
308 DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:"
309 " CONFIGURING IP\n",
310 ha->host_no, __func__));
311 /*
312 * Check for link state after 15 secs and if link is
313 * still DOWN then, cable is unplugged. Ignore "DHCP
314 * in Progress/CONFIGURING IP" bit to check if firmware
315 * is in ready state or not after 15 secs.
316 * This is applicable for both 2.x & 3.x firmware
317 */
318 if (timeout_count <= (ADAPTER_INIT_TOV - 15)) {
319 if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) {
320 DEBUG2(printk(KERN_INFO "scsi%ld: %s:"
321 " LINK UP (Cable plugged)\n",
322 ha->host_no, __func__));
323 } else if (ha->firmware_state &
324 (FW_STATE_CONFIGURING_IP |
325 FW_STATE_READY)) {
326 DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
327 "LINK DOWN (Cable unplugged)\n",
328 ha->host_no, __func__));
329 ha->firmware_state = FW_STATE_READY;
330 }
331 }
332 }
333
229 if (ha->firmware_state == FW_STATE_READY) { 334 if (ha->firmware_state == FW_STATE_READY) {
230 DEBUG2(dev_info(&ha->pdev->dev, "Firmware Ready..\n")); 335 /* If DHCP IP Addr is available, retrieve it now. */
231 /* The firmware is ready to process SCSI commands. */ 336 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR,
232 DEBUG2(dev_info(&ha->pdev->dev, 337 &ha->dpc_flags))
233 "scsi%ld: %s: MEDIA TYPE - %s\n", 338 qla4xxx_get_dhcp_ip_address(ha);
234 ha->host_no, 339
235 __func__, (ha->addl_fw_state & 340 if (!qla4xxx_wait_for_ip_config(ha) ||
236 FW_ADDSTATE_OPTICAL_MEDIA) 341 timeout_count == 1) {
237 != 0 ? "OPTICAL" : "COPPER")); 342 DEBUG2(dev_info(&ha->pdev->dev,
238 DEBUG2(dev_info(&ha->pdev->dev, 343 "Firmware Ready..\n"));
239 "scsi%ld: %s: DHCP STATE Enabled " 344 /* The firmware is ready to process SCSI
240 "%s\n", 345 commands. */
241 ha->host_no, __func__, 346 DEBUG2(dev_info(&ha->pdev->dev,
242 (ha->addl_fw_state & 347 "scsi%ld: %s: MEDIA TYPE"
243 FW_ADDSTATE_DHCP_ENABLED) != 0 ? 348 " - %s\n", ha->host_no,
244 "YES" : "NO")); 349 __func__, (ha->addl_fw_state &
245 DEBUG2(dev_info(&ha->pdev->dev, 350 FW_ADDSTATE_OPTICAL_MEDIA)
246 "scsi%ld: %s: LINK %s\n", 351 != 0 ? "OPTICAL" : "COPPER"));
247 ha->host_no, __func__, 352 DEBUG2(dev_info(&ha->pdev->dev,
248 (ha->addl_fw_state & 353 "scsi%ld: %s: DHCPv4 STATE"
249 FW_ADDSTATE_LINK_UP) != 0 ? 354 " Enabled %s\n", ha->host_no,
250 "UP" : "DOWN")); 355 __func__, (ha->addl_fw_state &
251 DEBUG2(dev_info(&ha->pdev->dev, 356 FW_ADDSTATE_DHCPv4_ENABLED) != 0 ?
252 "scsi%ld: %s: iSNS Service " 357 "YES" : "NO"));
253 "Started %s\n", 358 DEBUG2(dev_info(&ha->pdev->dev,
254 ha->host_no, __func__, 359 "scsi%ld: %s: LINK %s\n",
255 (ha->addl_fw_state & 360 ha->host_no, __func__,
256 FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ? 361 (ha->addl_fw_state &
257 "YES" : "NO")); 362 FW_ADDSTATE_LINK_UP) != 0 ?
258 363 "UP" : "DOWN"));
259 ready = 1; 364 DEBUG2(dev_info(&ha->pdev->dev,
260 break; 365 "scsi%ld: %s: iSNS Service "
366 "Started %s\n",
367 ha->host_no, __func__,
368 (ha->addl_fw_state &
369 FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ?
370 "YES" : "NO"));
371
372 ready = 1;
373 break;
374 }
261 } 375 }
262 DEBUG2(printk("scsi%ld: %s: waiting on fw, state=%x:%x - " 376 DEBUG2(printk("scsi%ld: %s: waiting on fw, state=%x:%x - "
263 "seconds expired= %d\n", ha->host_no, __func__, 377 "seconds expired= %d\n", ha->host_no, __func__,
@@ -272,15 +386,19 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
272 msleep(1000); 386 msleep(1000);
273 } /* end of for */ 387 } /* end of for */
274 388
275 if (timeout_count == 0) 389 if (timeout_count <= 0)
276 DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n", 390 DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n",
277 ha->host_no, __func__)); 391 ha->host_no, __func__));
278 392
279 if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS) { 393 if (ha->firmware_state & FW_STATE_CONFIGURING_IP) {
280 DEBUG2(printk("scsi%ld: %s: FW is reporting its waiting to" 394 DEBUG2(printk("scsi%ld: %s: FW initialized, but is reporting "
281 " grab an IP address from DHCP server\n", 395 "it's waiting to configure an IP address\n",
282 ha->host_no, __func__)); 396 ha->host_no, __func__));
283 ready = 1; 397 ready = 1;
398 } else if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) {
399 DEBUG2(printk("scsi%ld: %s: FW initialized, but "
400 "auto-discovery still in process\n",
401 ha->host_no, __func__));
284 } 402 }
285 403
286 return ready; 404 return ready;
@@ -387,6 +505,7 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
387 struct dev_db_entry *fw_ddb_entry = NULL; 505 struct dev_db_entry *fw_ddb_entry = NULL;
388 dma_addr_t fw_ddb_entry_dma; 506 dma_addr_t fw_ddb_entry_dma;
389 int status = QLA_ERROR; 507 int status = QLA_ERROR;
508 uint32_t conn_err;
390 509
391 if (ddb_entry == NULL) { 510 if (ddb_entry == NULL) {
392 DEBUG2(printk("scsi%ld: %s: ddb_entry is NULL\n", ha->host_no, 511 DEBUG2(printk("scsi%ld: %s: ddb_entry is NULL\n", ha->host_no,
@@ -407,7 +526,7 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
407 526
408 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry, 527 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
409 fw_ddb_entry_dma, NULL, NULL, 528 fw_ddb_entry_dma, NULL, NULL,
410 &ddb_entry->fw_ddb_device_state, NULL, 529 &ddb_entry->fw_ddb_device_state, &conn_err,
411 &ddb_entry->tcp_source_port_num, 530 &ddb_entry->tcp_source_port_num,
412 &ddb_entry->connection_id) == 531 &ddb_entry->connection_id) ==
413 QLA_ERROR) { 532 QLA_ERROR) {
@@ -419,6 +538,7 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
419 } 538 }
420 539
421 status = QLA_SUCCESS; 540 status = QLA_SUCCESS;
541 ddb_entry->options = le16_to_cpu(fw_ddb_entry->options);
422 ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->tsid); 542 ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->tsid);
423 ddb_entry->task_mgmt_timeout = 543 ddb_entry->task_mgmt_timeout =
424 le16_to_cpu(fw_ddb_entry->def_timeout); 544 le16_to_cpu(fw_ddb_entry->def_timeout);
@@ -442,11 +562,44 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
442 memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0], 562 memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0],
443 min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr))); 563 min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr)));
444 564
445 DEBUG2(printk("scsi%ld: %s: ddb[%d] - State= %x status= %d.\n", 565 ddb_entry->iscsi_max_burst_len = fw_ddb_entry->iscsi_max_burst_len;
446 ha->host_no, __func__, fw_ddb_index, 566 ddb_entry->iscsi_max_outsnd_r2t = fw_ddb_entry->iscsi_max_outsnd_r2t;
447 ddb_entry->fw_ddb_device_state, status)); 567 ddb_entry->iscsi_first_burst_len = fw_ddb_entry->iscsi_first_burst_len;
448 568 ddb_entry->iscsi_max_rcv_data_seg_len =
449 exit_update_ddb: 569 fw_ddb_entry->iscsi_max_rcv_data_seg_len;
570 ddb_entry->iscsi_max_snd_data_seg_len =
571 fw_ddb_entry->iscsi_max_snd_data_seg_len;
572
573 if (ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
574 memcpy(&ddb_entry->remote_ipv6_addr,
575 fw_ddb_entry->ip_addr,
576 min(sizeof(ddb_entry->remote_ipv6_addr),
577 sizeof(fw_ddb_entry->ip_addr)));
578 memcpy(&ddb_entry->link_local_ipv6_addr,
579 fw_ddb_entry->link_local_ipv6_addr,
580 min(sizeof(ddb_entry->link_local_ipv6_addr),
581 sizeof(fw_ddb_entry->link_local_ipv6_addr)));
582
583 DEBUG2(dev_info(&ha->pdev->dev, "%s: DDB[%d] osIdx = %d "
584 "State %04x ConnErr %08x IP %pI6 "
585 ":%04d \"%s\"\n",
586 __func__, fw_ddb_index,
587 ddb_entry->os_target_id,
588 ddb_entry->fw_ddb_device_state,
589 conn_err, fw_ddb_entry->ip_addr,
590 le16_to_cpu(fw_ddb_entry->port),
591 fw_ddb_entry->iscsi_name));
592 } else
593 DEBUG2(dev_info(&ha->pdev->dev, "%s: DDB[%d] osIdx = %d "
594 "State %04x ConnErr %08x IP %pI4 "
595 ":%04d \"%s\"\n",
596 __func__, fw_ddb_index,
597 ddb_entry->os_target_id,
598 ddb_entry->fw_ddb_device_state,
599 conn_err, fw_ddb_entry->ip_addr,
600 le16_to_cpu(fw_ddb_entry->port),
601 fw_ddb_entry->iscsi_name));
602exit_update_ddb:
450 if (fw_ddb_entry) 603 if (fw_ddb_entry)
451 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 604 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
452 fw_ddb_entry, fw_ddb_entry_dma); 605 fw_ddb_entry, fw_ddb_entry_dma);
@@ -492,6 +645,40 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
492} 645}
493 646
494/** 647/**
648 * qla4_is_relogin_allowed - Are we allowed to login?
649 * @ha: Pointer to host adapter structure.
650 * @conn_err: Last connection error associated with the ddb
651 *
652 * This routine tests the given connection error to determine if
653 * we are allowed to login.
654 **/
655int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err)
656{
657 uint32_t err_code, login_rsp_sts_class;
658 int relogin = 1;
659
660 err_code = ((conn_err & 0x00ff0000) >> 16);
661 login_rsp_sts_class = ((conn_err & 0x0000ff00) >> 8);
662 if (err_code == 0x1c || err_code == 0x06) {
663 DEBUG2(dev_info(&ha->pdev->dev,
664 ": conn_err=0x%08x, send target completed"
665 " or access denied failure\n", conn_err));
666 relogin = 0;
667 }
668 if ((err_code == 0x08) && (login_rsp_sts_class == 0x02)) {
669 /* Login Response PDU returned an error.
670 Login Response Status in Error Code Detail
671 indicates login should not be retried.*/
672 DEBUG2(dev_info(&ha->pdev->dev,
673 ": conn_err=0x%08x, do not retry relogin\n",
674 conn_err));
675 relogin = 0;
676 }
677
678 return relogin;
679}
680
681/**
495 * qla4xxx_configure_ddbs - builds driver ddb list 682 * qla4xxx_configure_ddbs - builds driver ddb list
496 * @ha: Pointer to host adapter structure. 683 * @ha: Pointer to host adapter structure.
497 * 684 *
@@ -505,18 +692,30 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
505 uint32_t fw_ddb_index = 0; 692 uint32_t fw_ddb_index = 0;
506 uint32_t next_fw_ddb_index = 0; 693 uint32_t next_fw_ddb_index = 0;
507 uint32_t ddb_state; 694 uint32_t ddb_state;
508 uint32_t conn_err, err_code; 695 uint32_t conn_err;
509 struct ddb_entry *ddb_entry; 696 struct ddb_entry *ddb_entry;
697 struct dev_db_entry *fw_ddb_entry = NULL;
698 dma_addr_t fw_ddb_entry_dma;
699 uint32_t ipv6_device;
510 uint32_t new_tgt; 700 uint32_t new_tgt;
511 701
702 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
703 &fw_ddb_entry_dma, GFP_KERNEL);
704 if (fw_ddb_entry == NULL) {
705 DEBUG2(dev_info(&ha->pdev->dev, "%s: DMA alloc failed\n",
706 __func__));
707 return QLA_ERROR;
708 }
709
512 dev_info(&ha->pdev->dev, "Initializing DDBs ...\n"); 710 dev_info(&ha->pdev->dev, "Initializing DDBs ...\n");
513 for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; 711 for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES;
514 fw_ddb_index = next_fw_ddb_index) { 712 fw_ddb_index = next_fw_ddb_index) {
515 /* First, let's see if a device exists here */ 713 /* First, let's see if a device exists here */
516 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, NULL, 0, NULL, 714 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
517 &next_fw_ddb_index, &ddb_state, 715 0, NULL, &next_fw_ddb_index,
518 &conn_err, NULL, NULL) == 716 &ddb_state, &conn_err,
519 QLA_ERROR) { 717 NULL, NULL) ==
718 QLA_ERROR) {
520 DEBUG2(printk("scsi%ld: %s: get_ddb_entry, " 719 DEBUG2(printk("scsi%ld: %s: get_ddb_entry, "
521 "fw_ddb_index %d failed", ha->host_no, 720 "fw_ddb_index %d failed", ha->host_no,
522 __func__, fw_ddb_index)); 721 __func__, fw_ddb_index));
@@ -533,18 +732,19 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
533 /* Try and login to device */ 732 /* Try and login to device */
534 DEBUG2(printk("scsi%ld: %s: Login to DDB[%d]\n", 733 DEBUG2(printk("scsi%ld: %s: Login to DDB[%d]\n",
535 ha->host_no, __func__, fw_ddb_index)); 734 ha->host_no, __func__, fw_ddb_index));
536 err_code = ((conn_err & 0x00ff0000) >> 16); 735 ipv6_device = le16_to_cpu(fw_ddb_entry->options) &
537 if (err_code == 0x1c || err_code == 0x06) { 736 DDB_OPT_IPV6_DEVICE;
538 DEBUG2(printk("scsi%ld: %s send target " 737 if (qla4_is_relogin_allowed(ha, conn_err) &&
539 "completed " 738 ((!ipv6_device &&
540 "or access denied failure\n", 739 *((uint32_t *)fw_ddb_entry->ip_addr))
541 ha->host_no, __func__)); 740 || ipv6_device)) {
542 } else {
543 qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0); 741 qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0);
544 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, 742 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index,
545 NULL, 0, NULL, &next_fw_ddb_index, 743 NULL, 0, NULL,
546 &ddb_state, &conn_err, NULL, NULL) 744 &next_fw_ddb_index,
547 == QLA_ERROR) { 745 &ddb_state, &conn_err,
746 NULL, NULL)
747 == QLA_ERROR) {
548 DEBUG2(printk("scsi%ld: %s:" 748 DEBUG2(printk("scsi%ld: %s:"
549 "get_ddb_entry %d failed\n", 749 "get_ddb_entry %d failed\n",
550 ha->host_no, 750 ha->host_no,
@@ -599,7 +799,6 @@ next_one:
599struct qla4_relog_scan { 799struct qla4_relog_scan {
600 int halt_wait; 800 int halt_wait;
601 uint32_t conn_err; 801 uint32_t conn_err;
602 uint32_t err_code;
603 uint32_t fw_ddb_index; 802 uint32_t fw_ddb_index;
604 uint32_t next_fw_ddb_index; 803 uint32_t next_fw_ddb_index;
605 uint32_t fw_ddb_device_state; 804 uint32_t fw_ddb_device_state;
@@ -609,18 +808,7 @@ static int qla4_test_rdy(struct scsi_qla_host *ha, struct qla4_relog_scan *rs)
609{ 808{
610 struct ddb_entry *ddb_entry; 809 struct ddb_entry *ddb_entry;
611 810
612 /* 811 if (qla4_is_relogin_allowed(ha, rs->conn_err)) {
613 * Don't want to do a relogin if connection
614 * error is 0x1c.
615 */
616 rs->err_code = ((rs->conn_err & 0x00ff0000) >> 16);
617 if (rs->err_code == 0x1c || rs->err_code == 0x06) {
618 DEBUG2(printk(
619 "scsi%ld: %s send target"
620 " completed or "
621 "access denied failure\n",
622 ha->host_no, __func__));
623 } else {
624 /* We either have a device that is in 812 /* We either have a device that is in
625 * the process of relogging in or a 813 * the process of relogging in or a
626 * device that is waiting to be 814 * device that is waiting to be
@@ -908,7 +1096,7 @@ static void qla4x00_pci_config(struct scsi_qla_host *ha)
908static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha) 1096static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
909{ 1097{
910 int status = QLA_ERROR; 1098 int status = QLA_ERROR;
911 uint32_t max_wait_time; 1099 unsigned long max_wait_time;
912 unsigned long flags; 1100 unsigned long flags;
913 uint32_t mbox_status; 1101 uint32_t mbox_status;
914 1102
@@ -940,7 +1128,10 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
940 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1128 spin_unlock_irqrestore(&ha->hardware_lock, flags);
941 1129
942 /* Wait for firmware to come UP. */ 1130 /* Wait for firmware to come UP. */
943 max_wait_time = FIRMWARE_UP_TOV * 4; 1131 DEBUG2(printk(KERN_INFO "scsi%ld: %s: Wait up to %d seconds for "
1132 "boot firmware to complete...\n",
1133 ha->host_no, __func__, FIRMWARE_UP_TOV));
1134 max_wait_time = jiffies + (FIRMWARE_UP_TOV * HZ);
944 do { 1135 do {
945 uint32_t ctrl_status; 1136 uint32_t ctrl_status;
946 1137
@@ -954,16 +1145,15 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
954 if (mbox_status == MBOX_STS_COMMAND_COMPLETE) 1145 if (mbox_status == MBOX_STS_COMMAND_COMPLETE)
955 break; 1146 break;
956 1147
957 DEBUG2(printk("scsi%ld: %s: Waiting for boot firmware to " 1148 DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot "
958 "complete... ctrl_sts=0x%x, remaining=%d\n", 1149 "firmware to complete... ctrl_sts=0x%x\n",
959 ha->host_no, __func__, ctrl_status, 1150 ha->host_no, __func__, ctrl_status));
960 max_wait_time));
961 1151
962 msleep(250); 1152 msleep_interruptible(250);
963 } while ((max_wait_time--)); 1153 } while (!time_after_eq(jiffies, max_wait_time));
964 1154
965 if (mbox_status == MBOX_STS_COMMAND_COMPLETE) { 1155 if (mbox_status == MBOX_STS_COMMAND_COMPLETE) {
966 DEBUG(printk("scsi%ld: %s: Firmware has started\n", 1156 DEBUG(printk(KERN_INFO "scsi%ld: %s: Firmware has started\n",
967 ha->host_no, __func__)); 1157 ha->host_no, __func__));
968 1158
969 spin_lock_irqsave(&ha->hardware_lock, flags); 1159 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1141,6 +1331,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
1141 int status = QLA_ERROR; 1331 int status = QLA_ERROR;
1142 int8_t ip_address[IP_ADDR_LEN] = {0} ; 1332 int8_t ip_address[IP_ADDR_LEN] = {0} ;
1143 1333
1334 clear_bit(AF_ONLINE, &ha->flags);
1144 ha->eeprom_cmd_data = 0; 1335 ha->eeprom_cmd_data = 0;
1145 1336
1146 qla4x00_pci_config(ha); 1337 qla4x00_pci_config(ha);
@@ -1166,7 +1357,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
1166 * the ddb_list and wait for DHCP lease acquired aen to come in 1357 * the ddb_list and wait for DHCP lease acquired aen to come in
1167 * followed by 0x8014 aen" to trigger the tgt discovery process. 1358 * followed by 0x8014 aen" to trigger the tgt discovery process.
1168 */ 1359 */
1169 if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS) 1360 if (ha->firmware_state & FW_STATE_CONFIGURING_IP)
1170 goto exit_init_online; 1361 goto exit_init_online;
1171 1362
1172 /* Skip device discovery if ip and subnet is zero */ 1363 /* Skip device discovery if ip and subnet is zero */
@@ -1270,8 +1461,8 @@ static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
1270 * 1461 *
1271 * This routine processes a Decive Database Changed AEN Event. 1462 * This routine processes a Decive Database Changed AEN Event.
1272 **/ 1463 **/
1273int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, 1464int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
1274 uint32_t fw_ddb_index, uint32_t state) 1465 uint32_t state, uint32_t conn_err)
1275{ 1466{
1276 struct ddb_entry * ddb_entry; 1467 struct ddb_entry * ddb_entry;
1277 uint32_t old_fw_ddb_device_state; 1468 uint32_t old_fw_ddb_device_state;
@@ -1318,19 +1509,24 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
1318 * the device came back. 1509 * the device came back.
1319 */ 1510 */
1320 } else { 1511 } else {
1321 /* Device went away, try to relogin. */ 1512 /* Device went away, mark device missing */
1322 /* Mark device missing */ 1513 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) {
1323 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) 1514 DEBUG2(dev_info(&ha->pdev->dev, "%s mark missing "
1515 "ddb_entry 0x%p sess 0x%p conn 0x%p\n",
1516 __func__, ddb_entry,
1517 ddb_entry->sess, ddb_entry->conn));
1324 qla4xxx_mark_device_missing(ha, ddb_entry); 1518 qla4xxx_mark_device_missing(ha, ddb_entry);
1519 }
1520
1325 /* 1521 /*
1326 * Relogin if device state changed to a not active state. 1522 * Relogin if device state changed to a not active state.
1327 * However, do not relogin if this aen is a result of an IOCTL 1523 * However, do not relogin if a RELOGIN is in process, or
1328 * logout (DF_NO_RELOGIN) or if this is a discovered device. 1524 * we are not allowed to relogin to this DDB.
1329 */ 1525 */
1330 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED && 1526 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED &&
1331 !test_bit(DF_RELOGIN, &ddb_entry->flags) && 1527 !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
1332 !test_bit(DF_NO_RELOGIN, &ddb_entry->flags) && 1528 !test_bit(DF_NO_RELOGIN, &ddb_entry->flags) &&
1333 !test_bit(DF_ISNS_DISCOVERED, &ddb_entry->flags)) { 1529 qla4_is_relogin_allowed(ha, conn_err)) {
1334 /* 1530 /*
1335 * This triggers a relogin. After the relogin_timer 1531 * This triggers a relogin. After the relogin_timer
1336 * expires, the relogin gets scheduled. We must wait a 1532 * expires, the relogin gets scheduled. We must wait a
@@ -1338,7 +1534,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
1338 * with failed device_state or a logout response before 1534 * with failed device_state or a logout response before
1339 * we can issue another relogin. 1535 * we can issue another relogin.
1340 */ 1536 */
1341 /* Firmware padds this timeout: (time2wait +1). 1537 /* Firmware pads this timeout: (time2wait +1).
1342 * Driver retry to login should be longer than F/W. 1538 * Driver retry to login should be longer than F/W.
1343 * Otherwise F/W will fail 1539 * Otherwise F/W will fail
1344 * set_ddb() mbx cmd with 0x4005 since it still 1540 * set_ddb() mbx cmd with 0x4005 since it still
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index e0c32159749c..e66f3f263f49 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -299,7 +299,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
299 qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds); 299 qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
300 wmb(); 300 wmb();
301 301
302 srb->cmd->host_scribble = (unsigned char *)srb; 302 srb->cmd->host_scribble = (unsigned char *)(unsigned long)index;
303 303
304 /* update counters */ 304 /* update counters */
305 srb->state = SRB_ACTIVE_STATE; 305 srb->state = SRB_ACTIVE_STATE;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index c196d55eae39..596c3031483c 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -97,7 +97,7 @@ qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
97 97
98 /* Place command on done queue. */ 98 /* Place command on done queue. */
99 if (srb->req_sense_len == 0) { 99 if (srb->req_sense_len == 0) {
100 qla4xxx_srb_compl(ha, srb); 100 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
101 ha->status_srb = NULL; 101 ha->status_srb = NULL;
102 } 102 }
103} 103}
@@ -329,7 +329,7 @@ status_entry_exit:
329 /* complete the request, if not waiting for status_continuation pkt */ 329 /* complete the request, if not waiting for status_continuation pkt */
330 srb->cc_stat = sts_entry->completionStatus; 330 srb->cc_stat = sts_entry->completionStatus;
331 if (ha->status_srb == NULL) 331 if (ha->status_srb == NULL)
332 qla4xxx_srb_compl(ha, srb); 332 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
333} 333}
334 334
335/** 335/**
@@ -393,7 +393,7 @@ static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
393 /* ETRY normally by sending it back with 393 /* ETRY normally by sending it back with
394 * DID_BUS_BUSY */ 394 * DID_BUS_BUSY */
395 srb->cmd->result = DID_BUS_BUSY << 16; 395 srb->cmd->result = DID_BUS_BUSY << 16;
396 qla4xxx_srb_compl(ha, srb); 396 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
397 break; 397 break;
398 398
399 case ET_CONTINUE: 399 case ET_CONTINUE:
@@ -498,15 +498,22 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
498 break; 498 break;
499 499
500 case MBOX_ASTS_LINK_UP: 500 case MBOX_ASTS_LINK_UP:
501 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n",
502 ha->host_no, mbox_status));
503 set_bit(AF_LINK_UP, &ha->flags); 501 set_bit(AF_LINK_UP, &ha->flags);
502 if (test_bit(AF_INIT_DONE, &ha->flags))
503 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
504
505 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x Adapter"
506 " LINK UP\n", ha->host_no,
507 mbox_status));
504 break; 508 break;
505 509
506 case MBOX_ASTS_LINK_DOWN: 510 case MBOX_ASTS_LINK_DOWN:
507 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n",
508 ha->host_no, mbox_status));
509 clear_bit(AF_LINK_UP, &ha->flags); 511 clear_bit(AF_LINK_UP, &ha->flags);
512 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
513
514 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x Adapter"
515 " LINK DOWN\n", ha->host_no,
516 mbox_status));
510 break; 517 break;
511 518
512 case MBOX_ASTS_HEARTBEAT: 519 case MBOX_ASTS_HEARTBEAT:
@@ -831,7 +838,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
831 qla4xxx_reinitialize_ddb_list(ha); 838 qla4xxx_reinitialize_ddb_list(ha);
832 } else if (mbox_sts[1] == 1) { /* Specific device. */ 839 } else if (mbox_sts[1] == 1) { /* Specific device. */
833 qla4xxx_process_ddb_changed(ha, mbox_sts[2], 840 qla4xxx_process_ddb_changed(ha, mbox_sts[2],
834 mbox_sts[3]); 841 mbox_sts[3], mbox_sts[4]);
835 } 842 }
836 break; 843 break;
837 } 844 }
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index caeb7d10ae04..75496fb0ae75 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -172,108 +172,207 @@ mbox_exit:
172 return status; 172 return status;
173} 173}
174 174
175uint8_t
176qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
177 uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
178{
179 memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
180 memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
181 mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
182 mbox_cmd[1] = 0;
183 mbox_cmd[2] = LSDW(init_fw_cb_dma);
184 mbox_cmd[3] = MSDW(init_fw_cb_dma);
185 mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
186 mbox_cmd[5] = (IFCB_VER_MAX << 8) | IFCB_VER_MIN;
187
188 if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) !=
189 QLA_SUCCESS) {
190 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
191 "MBOX_CMD_INITIALIZE_FIRMWARE"
192 " failed w/ status %04X\n",
193 ha->host_no, __func__, mbox_sts[0]));
194 return QLA_ERROR;
195 }
196 return QLA_SUCCESS;
197}
198
199uint8_t
200qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
201 uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
202{
203 memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
204 memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
205 mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
206 mbox_cmd[2] = LSDW(init_fw_cb_dma);
207 mbox_cmd[3] = MSDW(init_fw_cb_dma);
208 mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
209
210 if (qla4xxx_mailbox_command(ha, 5, 5, mbox_cmd, mbox_sts) !=
211 QLA_SUCCESS) {
212 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
213 "MBOX_CMD_GET_INIT_FW_CTRL_BLOCK"
214 " failed w/ status %04X\n",
215 ha->host_no, __func__, mbox_sts[0]));
216 return QLA_ERROR;
217 }
218 return QLA_SUCCESS;
219}
220
221void
222qla4xxx_update_local_ip(struct scsi_qla_host *ha,
223 struct addr_ctrl_blk *init_fw_cb)
224{
225 /* Save IPv4 Address Info */
226 memcpy(ha->ip_address, init_fw_cb->ipv4_addr,
227 min(sizeof(ha->ip_address), sizeof(init_fw_cb->ipv4_addr)));
228 memcpy(ha->subnet_mask, init_fw_cb->ipv4_subnet,
229 min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->ipv4_subnet)));
230 memcpy(ha->gateway, init_fw_cb->ipv4_gw_addr,
231 min(sizeof(ha->gateway), sizeof(init_fw_cb->ipv4_gw_addr)));
232
233 if (is_ipv6_enabled(ha)) {
234 /* Save IPv6 Address */
235 ha->ipv6_link_local_state = init_fw_cb->ipv6_lnk_lcl_addr_state;
236 ha->ipv6_addr0_state = init_fw_cb->ipv6_addr0_state;
237 ha->ipv6_addr1_state = init_fw_cb->ipv6_addr1_state;
238 ha->ipv6_default_router_state = init_fw_cb->ipv6_dflt_rtr_state;
239 ha->ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE;
240 ha->ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80;
241
242 memcpy(&ha->ipv6_link_local_addr.in6_u.u6_addr8[8],
243 init_fw_cb->ipv6_if_id,
244 min(sizeof(ha->ipv6_link_local_addr)/2,
245 sizeof(init_fw_cb->ipv6_if_id)));
246 memcpy(&ha->ipv6_addr0, init_fw_cb->ipv6_addr0,
247 min(sizeof(ha->ipv6_addr0),
248 sizeof(init_fw_cb->ipv6_addr0)));
249 memcpy(&ha->ipv6_addr1, init_fw_cb->ipv6_addr1,
250 min(sizeof(ha->ipv6_addr1),
251 sizeof(init_fw_cb->ipv6_addr1)));
252 memcpy(&ha->ipv6_default_router_addr,
253 init_fw_cb->ipv6_dflt_rtr_addr,
254 min(sizeof(ha->ipv6_default_router_addr),
255 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)));
256 }
257}
258
259uint8_t
260qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
261 uint32_t *mbox_cmd,
262 uint32_t *mbox_sts,
263 struct addr_ctrl_blk *init_fw_cb,
264 dma_addr_t init_fw_cb_dma)
265{
266 if (qla4xxx_get_ifcb(ha, mbox_cmd, mbox_sts, init_fw_cb_dma)
267 != QLA_SUCCESS) {
268 DEBUG2(printk(KERN_WARNING
269 "scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
270 ha->host_no, __func__));
271 return QLA_ERROR;
272 }
273
274 DEBUG2(qla4xxx_dump_buffer(init_fw_cb, sizeof(struct addr_ctrl_blk)));
275
276 /* Save some info in adapter structure. */
277 ha->acb_version = init_fw_cb->acb_version;
278 ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options);
279 ha->tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);
280 ha->ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);
281 ha->ipv4_addr_state = le16_to_cpu(init_fw_cb->ipv4_addr_state);
282 ha->heartbeat_interval = init_fw_cb->hb_interval;
283 memcpy(ha->name_string, init_fw_cb->iscsi_name,
284 min(sizeof(ha->name_string),
285 sizeof(init_fw_cb->iscsi_name)));
286 /*memcpy(ha->alias, init_fw_cb->Alias,
287 min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
288
289 /* Save Command Line Paramater info */
290 ha->port_down_retry_count = le16_to_cpu(init_fw_cb->conn_ka_timeout);
291 ha->discovery_wait = ql4xdiscoverywait;
292
293 if (ha->acb_version == ACB_SUPPORTED) {
294 ha->ipv6_options = init_fw_cb->ipv6_opts;
295 ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts;
296 }
297 qla4xxx_update_local_ip(ha, init_fw_cb);
298
299 return QLA_SUCCESS;
300}
301
175/** 302/**
176 * qla4xxx_initialize_fw_cb - initializes firmware control block. 303 * qla4xxx_initialize_fw_cb - initializes firmware control block.
177 * @ha: Pointer to host adapter structure. 304 * @ha: Pointer to host adapter structure.
178 **/ 305 **/
179int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) 306int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
180{ 307{
181 struct init_fw_ctrl_blk *init_fw_cb; 308 struct addr_ctrl_blk *init_fw_cb;
182 dma_addr_t init_fw_cb_dma; 309 dma_addr_t init_fw_cb_dma;
183 uint32_t mbox_cmd[MBOX_REG_COUNT]; 310 uint32_t mbox_cmd[MBOX_REG_COUNT];
184 uint32_t mbox_sts[MBOX_REG_COUNT]; 311 uint32_t mbox_sts[MBOX_REG_COUNT];
185 int status = QLA_ERROR; 312 int status = QLA_ERROR;
186 313
187 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 314 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
188 sizeof(struct init_fw_ctrl_blk), 315 sizeof(struct addr_ctrl_blk),
189 &init_fw_cb_dma, GFP_KERNEL); 316 &init_fw_cb_dma, GFP_KERNEL);
190 if (init_fw_cb == NULL) { 317 if (init_fw_cb == NULL) {
191 DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n", 318 DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
192 ha->host_no, __func__)); 319 ha->host_no, __func__));
193 return 10; 320 return 10;
194 } 321 }
195 memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk)); 322 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
196 323
197 /* Get Initialize Firmware Control Block. */ 324 /* Get Initialize Firmware Control Block. */
198 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 325 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
199 memset(&mbox_sts, 0, sizeof(mbox_sts)); 326 memset(&mbox_sts, 0, sizeof(mbox_sts));
200 327
201 mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK; 328 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
202 mbox_cmd[2] = LSDW(init_fw_cb_dma);
203 mbox_cmd[3] = MSDW(init_fw_cb_dma);
204 mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
205
206 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
207 QLA_SUCCESS) { 329 QLA_SUCCESS) {
208 dma_free_coherent(&ha->pdev->dev, 330 dma_free_coherent(&ha->pdev->dev,
209 sizeof(struct init_fw_ctrl_blk), 331 sizeof(struct addr_ctrl_blk),
210 init_fw_cb, init_fw_cb_dma); 332 init_fw_cb, init_fw_cb_dma);
211 return status; 333 goto exit_init_fw_cb;
212 } 334 }
213 335
214 /* Initialize request and response queues. */ 336 /* Initialize request and response queues. */
215 qla4xxx_init_rings(ha); 337 qla4xxx_init_rings(ha);
216 338
217 /* Fill in the request and response queue information. */ 339 /* Fill in the request and response queue information. */
218 init_fw_cb->pri.rqq_consumer_idx = cpu_to_le16(ha->request_out); 340 init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
219 init_fw_cb->pri.compq_producer_idx = cpu_to_le16(ha->response_in); 341 init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
220 init_fw_cb->pri.rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH); 342 init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
221 init_fw_cb->pri.compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH); 343 init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
222 init_fw_cb->pri.rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma)); 344 init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
223 init_fw_cb->pri.rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma)); 345 init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
224 init_fw_cb->pri.compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma)); 346 init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
225 init_fw_cb->pri.compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma)); 347 init_fw_cb->compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
226 init_fw_cb->pri.shdwreg_addr_lo = 348 init_fw_cb->shdwreg_addr_lo = cpu_to_le32(LSDW(ha->shadow_regs_dma));
227 cpu_to_le32(LSDW(ha->shadow_regs_dma)); 349 init_fw_cb->shdwreg_addr_hi = cpu_to_le32(MSDW(ha->shadow_regs_dma));
228 init_fw_cb->pri.shdwreg_addr_hi =
229 cpu_to_le32(MSDW(ha->shadow_regs_dma));
230 350
231 /* Set up required options. */ 351 /* Set up required options. */
232 init_fw_cb->pri.fw_options |= 352 init_fw_cb->fw_options |=
233 __constant_cpu_to_le16(FWOPT_SESSION_MODE | 353 __constant_cpu_to_le16(FWOPT_SESSION_MODE |
234 FWOPT_INITIATOR_MODE); 354 FWOPT_INITIATOR_MODE);
235 init_fw_cb->pri.fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); 355 init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
236
237 /* Save some info in adapter structure. */
238 ha->firmware_options = le16_to_cpu(init_fw_cb->pri.fw_options);
239 ha->tcp_options = le16_to_cpu(init_fw_cb->pri.ipv4_tcp_opts);
240 ha->heartbeat_interval = init_fw_cb->pri.hb_interval;
241 memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
242 min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
243 memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
244 min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
245 memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
246 min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
247 memcpy(ha->name_string, init_fw_cb->pri.iscsi_name,
248 min(sizeof(ha->name_string),
249 sizeof(init_fw_cb->pri.iscsi_name)));
250 /*memcpy(ha->alias, init_fw_cb->Alias,
251 min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
252
253 /* Save Command Line Paramater info */
254 ha->port_down_retry_count = le16_to_cpu(init_fw_cb->pri.conn_ka_timeout);
255 ha->discovery_wait = ql4xdiscoverywait;
256 356
257 /* Send Initialize Firmware Control Block. */ 357 if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
258 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 358 != QLA_SUCCESS) {
259 memset(&mbox_sts, 0, sizeof(mbox_sts)); 359 DEBUG2(printk(KERN_WARNING
260 360 "scsi%ld: %s: Failed to set init_fw_ctrl_blk\n",
261 mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE; 361 ha->host_no, __func__));
262 mbox_cmd[1] = 0; 362 goto exit_init_fw_cb;
263 mbox_cmd[2] = LSDW(init_fw_cb_dma); 363 }
264 mbox_cmd[3] = MSDW(init_fw_cb_dma);
265 mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
266 364
267 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) == 365 if (qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0],
268 QLA_SUCCESS) 366 init_fw_cb, init_fw_cb_dma) != QLA_SUCCESS) {
269 status = QLA_SUCCESS; 367 DEBUG2(printk("scsi%ld: %s: Failed to update local ifcb\n",
270 else { 368 ha->host_no, __func__));
271 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_INITIALIZE_FIRMWARE " 369 goto exit_init_fw_cb;
272 "failed w/ status %04X\n", ha->host_no, __func__,
273 mbox_sts[0]));
274 } 370 }
275 dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk), 371 status = QLA_SUCCESS;
276 init_fw_cb, init_fw_cb_dma); 372
373exit_init_fw_cb:
374 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
375 init_fw_cb, init_fw_cb_dma);
277 376
278 return status; 377 return status;
279} 378}
@@ -284,13 +383,13 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
284 **/ 383 **/
285int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha) 384int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
286{ 385{
287 struct init_fw_ctrl_blk *init_fw_cb; 386 struct addr_ctrl_blk *init_fw_cb;
288 dma_addr_t init_fw_cb_dma; 387 dma_addr_t init_fw_cb_dma;
289 uint32_t mbox_cmd[MBOX_REG_COUNT]; 388 uint32_t mbox_cmd[MBOX_REG_COUNT];
290 uint32_t mbox_sts[MBOX_REG_COUNT]; 389 uint32_t mbox_sts[MBOX_REG_COUNT];
291 390
292 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, 391 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
293 sizeof(struct init_fw_ctrl_blk), 392 sizeof(struct addr_ctrl_blk),
294 &init_fw_cb_dma, GFP_KERNEL); 393 &init_fw_cb_dma, GFP_KERNEL);
295 if (init_fw_cb == NULL) { 394 if (init_fw_cb == NULL) {
296 printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, 395 printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
@@ -299,35 +398,21 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
299 } 398 }
300 399
301 /* Get Initialize Firmware Control Block. */ 400 /* Get Initialize Firmware Control Block. */
302 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 401 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
303 memset(&mbox_sts, 0, sizeof(mbox_sts)); 402 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
304
305 memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
306 mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
307 mbox_cmd[2] = LSDW(init_fw_cb_dma);
308 mbox_cmd[3] = MSDW(init_fw_cb_dma);
309 mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
310
311 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
312 QLA_SUCCESS) { 403 QLA_SUCCESS) {
313 DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n", 404 DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
314 ha->host_no, __func__)); 405 ha->host_no, __func__));
315 dma_free_coherent(&ha->pdev->dev, 406 dma_free_coherent(&ha->pdev->dev,
316 sizeof(struct init_fw_ctrl_blk), 407 sizeof(struct addr_ctrl_blk),
317 init_fw_cb, init_fw_cb_dma); 408 init_fw_cb, init_fw_cb_dma);
318 return QLA_ERROR; 409 return QLA_ERROR;
319 } 410 }
320 411
321 /* Save IP Address. */ 412 /* Save IP Address. */
322 memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr, 413 qla4xxx_update_local_ip(ha, init_fw_cb);
323 min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr))); 414 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
324 memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet, 415 init_fw_cb, init_fw_cb_dma);
325 min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
326 memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
327 min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
328
329 dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
330 init_fw_cb, init_fw_cb_dma);
331 416
332 return QLA_SUCCESS; 417 return QLA_SUCCESS;
333} 418}
@@ -409,6 +494,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
409 uint16_t *connection_id) 494 uint16_t *connection_id)
410{ 495{
411 int status = QLA_ERROR; 496 int status = QLA_ERROR;
497 uint16_t options;
412 uint32_t mbox_cmd[MBOX_REG_COUNT]; 498 uint32_t mbox_cmd[MBOX_REG_COUNT];
413 uint32_t mbox_sts[MBOX_REG_COUNT]; 499 uint32_t mbox_sts[MBOX_REG_COUNT];
414 500
@@ -441,14 +527,26 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
441 goto exit_get_fwddb; 527 goto exit_get_fwddb;
442 } 528 }
443 if (fw_ddb_entry) { 529 if (fw_ddb_entry) {
444 dev_info(&ha->pdev->dev, "DDB[%d] MB0 %04x Tot %d Next %d " 530 options = le16_to_cpu(fw_ddb_entry->options);
445 "State %04x ConnErr %08x %d.%d.%d.%d:%04d \"%s\"\n", 531 if (options & DDB_OPT_IPV6_DEVICE) {
446 fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3], 532 dev_info(&ha->pdev->dev, "%s: DDB[%d] MB0 %04x Tot %d "
447 mbox_sts[4], mbox_sts[5], fw_ddb_entry->ip_addr[0], 533 "Next %d State %04x ConnErr %08x %pI6 "
448 fw_ddb_entry->ip_addr[1], fw_ddb_entry->ip_addr[2], 534 ":%04d \"%s\"\n", __func__, fw_ddb_index,
449 fw_ddb_entry->ip_addr[3], 535 mbox_sts[0], mbox_sts[2], mbox_sts[3],
450 le16_to_cpu(fw_ddb_entry->port), 536 mbox_sts[4], mbox_sts[5],
451 fw_ddb_entry->iscsi_name); 537 fw_ddb_entry->ip_addr,
538 le16_to_cpu(fw_ddb_entry->port),
539 fw_ddb_entry->iscsi_name);
540 } else {
541 dev_info(&ha->pdev->dev, "%s: DDB[%d] MB0 %04x Tot %d "
542 "Next %d State %04x ConnErr %08x %pI4 "
543 ":%04d \"%s\"\n", __func__, fw_ddb_index,
544 mbox_sts[0], mbox_sts[2], mbox_sts[3],
545 mbox_sts[4], mbox_sts[5],
546 fw_ddb_entry->ip_addr,
547 le16_to_cpu(fw_ddb_entry->port),
548 fw_ddb_entry->iscsi_name);
549 }
452 } 550 }
453 if (num_valid_ddb_entries) 551 if (num_valid_ddb_entries)
454 *num_valid_ddb_entries = mbox_sts[2]; 552 *num_valid_ddb_entries = mbox_sts[2];
@@ -664,6 +762,59 @@ exit_get_event_log:
664} 762}
665 763
666/** 764/**
765 * qla4xxx_abort_task - issues Abort Task
766 * @ha: Pointer to host adapter structure.
767 * @srb: Pointer to srb entry
768 *
769 * This routine performs a LUN RESET on the specified target/lun.
770 * The caller must ensure that the ddb_entry and lun_entry pointers
771 * are valid before calling this routine.
772 **/
773int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb)
774{
775 uint32_t mbox_cmd[MBOX_REG_COUNT];
776 uint32_t mbox_sts[MBOX_REG_COUNT];
777 struct scsi_cmnd *cmd = srb->cmd;
778 int status = QLA_SUCCESS;
779 unsigned long flags = 0;
780 uint32_t index;
781
782 /*
783 * Send abort task command to ISP, so that the ISP will return
784 * request with ABORT status
785 */
786 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
787 memset(&mbox_sts, 0, sizeof(mbox_sts));
788
789 spin_lock_irqsave(&ha->hardware_lock, flags);
790 index = (unsigned long)(unsigned char *)cmd->host_scribble;
791 spin_unlock_irqrestore(&ha->hardware_lock, flags);
792
793 /* Firmware already posted completion on response queue */
794 if (index == MAX_SRBS)
795 return status;
796
797 mbox_cmd[0] = MBOX_CMD_ABORT_TASK;
798 mbox_cmd[1] = srb->fw_ddb_index;
799 mbox_cmd[2] = index;
800 /* Immediate Command Enable */
801 mbox_cmd[5] = 0x01;
802
803 qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
804 &mbox_sts[0]);
805 if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) {
806 status = QLA_ERROR;
807
808 DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%d: abort task FAILED: "
809 "mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n",
810 ha->host_no, cmd->device->id, cmd->device->lun, mbox_sts[0],
811 mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4]));
812 }
813
814 return status;
815}
816
817/**
667 * qla4xxx_reset_lun - issues LUN Reset 818 * qla4xxx_reset_lun - issues LUN Reset
668 * @ha: Pointer to host adapter structure. 819 * @ha: Pointer to host adapter structure.
669 * @db_entry: Pointer to device database entry 820 * @db_entry: Pointer to device database entry
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2ccad36bee9f..38b1d38afca5 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -74,6 +74,7 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
74 */ 74 */
75static int qla4xxx_queuecommand(struct scsi_cmnd *cmd, 75static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
76 void (*done) (struct scsi_cmnd *)); 76 void (*done) (struct scsi_cmnd *));
77static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
77static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); 78static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
78static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); 79static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
79static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); 80static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
@@ -88,6 +89,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
88 .proc_name = DRIVER_NAME, 89 .proc_name = DRIVER_NAME,
89 .queuecommand = qla4xxx_queuecommand, 90 .queuecommand = qla4xxx_queuecommand,
90 91
92 .eh_abort_handler = qla4xxx_eh_abort,
91 .eh_device_reset_handler = qla4xxx_eh_device_reset, 93 .eh_device_reset_handler = qla4xxx_eh_device_reset,
92 .eh_target_reset_handler = qla4xxx_eh_target_reset, 94 .eh_target_reset_handler = qla4xxx_eh_target_reset,
93 .eh_host_reset_handler = qla4xxx_eh_host_reset, 95 .eh_host_reset_handler = qla4xxx_eh_host_reset,
@@ -384,12 +386,12 @@ static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
384 if (!srb) 386 if (!srb)
385 return srb; 387 return srb;
386 388
387 atomic_set(&srb->ref_count, 1); 389 kref_init(&srb->srb_ref);
388 srb->ha = ha; 390 srb->ha = ha;
389 srb->ddb = ddb_entry; 391 srb->ddb = ddb_entry;
390 srb->cmd = cmd; 392 srb->cmd = cmd;
391 srb->flags = 0; 393 srb->flags = 0;
392 cmd->SCp.ptr = (void *)srb; 394 CMD_SP(cmd) = (void *)srb;
393 cmd->scsi_done = done; 395 cmd->scsi_done = done;
394 396
395 return srb; 397 return srb;
@@ -403,12 +405,14 @@ static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
403 scsi_dma_unmap(cmd); 405 scsi_dma_unmap(cmd);
404 srb->flags &= ~SRB_DMA_VALID; 406 srb->flags &= ~SRB_DMA_VALID;
405 } 407 }
406 cmd->SCp.ptr = NULL; 408 CMD_SP(cmd) = NULL;
407} 409}
408 410
409void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb) 411void qla4xxx_srb_compl(struct kref *ref)
410{ 412{
413 struct srb *srb = container_of(ref, struct srb, srb_ref);
411 struct scsi_cmnd *cmd = srb->cmd; 414 struct scsi_cmnd *cmd = srb->cmd;
415 struct scsi_qla_host *ha = srb->ha;
412 416
413 qla4xxx_srb_free_dma(ha, srb); 417 qla4xxx_srb_free_dma(ha, srb);
414 418
@@ -685,6 +689,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
685 test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) || 689 test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) ||
686 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 690 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
687 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) || 691 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
692 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
688 test_bit(DPC_AEN, &ha->dpc_flags)) && 693 test_bit(DPC_AEN, &ha->dpc_flags)) &&
689 ha->dpc_thread) { 694 ha->dpc_thread) {
690 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" 695 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
@@ -886,11 +891,10 @@ static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha)
886 srb = qla4xxx_del_from_active_array(ha, i); 891 srb = qla4xxx_del_from_active_array(ha, i);
887 if (srb != NULL) { 892 if (srb != NULL) {
888 srb->cmd->result = DID_RESET << 16; 893 srb->cmd->result = DID_RESET << 16;
889 qla4xxx_srb_compl(ha, srb); 894 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
890 } 895 }
891 } 896 }
892 spin_unlock_irqrestore(&ha->hardware_lock, flags); 897 spin_unlock_irqrestore(&ha->hardware_lock, flags);
893
894} 898}
895 899
896/** 900/**
@@ -1069,6 +1073,54 @@ static void qla4xxx_do_dpc(struct work_struct *work)
1069 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) 1073 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
1070 qla4xxx_get_dhcp_ip_address(ha); 1074 qla4xxx_get_dhcp_ip_address(ha);
1071 1075
1076 /* ---- link change? --- */
1077 if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
1078 if (!test_bit(AF_LINK_UP, &ha->flags)) {
1079 /* ---- link down? --- */
1080 list_for_each_entry_safe(ddb_entry, dtemp,
1081 &ha->ddb_list, list) {
1082 if (atomic_read(&ddb_entry->state) ==
1083 DDB_STATE_ONLINE)
1084 qla4xxx_mark_device_missing(ha,
1085 ddb_entry);
1086 }
1087 } else {
1088 /* ---- link up? --- *
1089 * F/W will auto login to all devices ONLY ONCE after
1090 * link up during driver initialization and runtime
1091 * fatal error recovery. Therefore, the driver must
1092 * manually relogin to devices when recovering from
1093 * connection failures, logouts, expired KATO, etc. */
1094
1095 list_for_each_entry_safe(ddb_entry, dtemp,
1096 &ha->ddb_list, list) {
1097 if ((atomic_read(&ddb_entry->state) ==
1098 DDB_STATE_MISSING) ||
1099 (atomic_read(&ddb_entry->state) ==
1100 DDB_STATE_DEAD)) {
1101 if (ddb_entry->fw_ddb_device_state ==
1102 DDB_DS_SESSION_ACTIVE) {
1103 atomic_set(&ddb_entry->state,
1104 DDB_STATE_ONLINE);
1105 dev_info(&ha->pdev->dev,
1106 "scsi%ld: %s: ddb[%d]"
1107 " os[%d] marked"
1108 " ONLINE\n",
1109 ha->host_no, __func__,
1110 ddb_entry->fw_ddb_index,
1111 ddb_entry->os_target_id);
1112
1113 iscsi_unblock_session(
1114 ddb_entry->sess);
1115 } else
1116 qla4xxx_relogin_device(
1117 ha, ddb_entry);
1118 }
1119
1120 }
1121 }
1122 }
1123
1072 /* ---- relogin device? --- */ 1124 /* ---- relogin device? --- */
1073 if (adapter_up(ha) && 1125 if (adapter_up(ha) &&
1074 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) { 1126 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
@@ -1430,12 +1482,14 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev)
1430struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index) 1482struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index)
1431{ 1483{
1432 struct srb *srb = NULL; 1484 struct srb *srb = NULL;
1433 struct scsi_cmnd *cmd; 1485 struct scsi_cmnd *cmd = NULL;
1434 1486
1435 if (!(cmd = scsi_host_find_tag(ha->host, index))) 1487 cmd = scsi_host_find_tag(ha->host, index);
1488 if (!cmd)
1436 return srb; 1489 return srb;
1437 1490
1438 if (!(srb = (struct srb *)cmd->host_scribble)) 1491 srb = (struct srb *)CMD_SP(cmd);
1492 if (!srb)
1439 return srb; 1493 return srb;
1440 1494
1441 /* update counters */ 1495 /* update counters */
@@ -1443,14 +1497,15 @@ struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t in
1443 ha->req_q_count += srb->iocb_cnt; 1497 ha->req_q_count += srb->iocb_cnt;
1444 ha->iocb_cnt -= srb->iocb_cnt; 1498 ha->iocb_cnt -= srb->iocb_cnt;
1445 if (srb->cmd) 1499 if (srb->cmd)
1446 srb->cmd->host_scribble = NULL; 1500 srb->cmd->host_scribble =
1501 (unsigned char *)(unsigned long) MAX_SRBS;
1447 } 1502 }
1448 return srb; 1503 return srb;
1449} 1504}
1450 1505
1451/** 1506/**
1452 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware 1507 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
1453 * @ha: actual ha whose done queue will contain the comd returned by firmware. 1508 * @ha: Pointer to host adapter structure.
1454 * @cmd: Scsi Command to wait on. 1509 * @cmd: Scsi Command to wait on.
1455 * 1510 *
1456 * This routine waits for the command to be returned by the Firmware 1511 * This routine waits for the command to be returned by the Firmware
@@ -1465,7 +1520,7 @@ static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
1465 1520
1466 do { 1521 do {
1467 /* Checking to see if its returned to OS */ 1522 /* Checking to see if its returned to OS */
1468 rp = (struct srb *) cmd->SCp.ptr; 1523 rp = (struct srb *) CMD_SP(cmd);
1469 if (rp == NULL) { 1524 if (rp == NULL) {
1470 done++; 1525 done++;
1471 break; 1526 break;
@@ -1534,6 +1589,62 @@ static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
1534} 1589}
1535 1590
1536/** 1591/**
1592 * qla4xxx_eh_abort - callback for abort task.
1593 * @cmd: Pointer to Linux's SCSI command structure
1594 *
1595 * This routine is called by the Linux OS to abort the specified
1596 * command.
1597 **/
1598static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
1599{
1600 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
1601 unsigned int id = cmd->device->id;
1602 unsigned int lun = cmd->device->lun;
1603 unsigned long serial = cmd->serial_number;
1604 struct srb *srb = NULL;
1605 int ret = SUCCESS;
1606 int wait = 0;
1607
1608 dev_info(&ha->pdev->dev,
1609 "scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n",
1610 ha->host_no, id, lun, cmd, serial);
1611
1612 srb = (struct srb *) CMD_SP(cmd);
1613
1614 if (!srb)
1615 return SUCCESS;
1616
1617 kref_get(&srb->srb_ref);
1618
1619 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
1620 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
1621 ha->host_no, id, lun));
1622 ret = FAILED;
1623 } else {
1624 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
1625 ha->host_no, id, lun));
1626 wait = 1;
1627 }
1628
1629 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
1630
1631 /* Wait for command to complete */
1632 if (wait) {
1633 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
1634 DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
1635 ha->host_no, id, lun));
1636 ret = FAILED;
1637 }
1638 }
1639
1640 dev_info(&ha->pdev->dev,
1641 "scsi%ld:%d:%d: Abort command - %s\n",
1642 ha->host_no, id, lun, (ret == SUCCESS) ? "succeded" : "failed");
1643
1644 return ret;
1645}
1646
1647/**
1537 * qla4xxx_eh_device_reset - callback for target reset. 1648 * qla4xxx_eh_device_reset - callback for target reset.
1538 * @cmd: Pointer to Linux's SCSI command structure 1649 * @cmd: Pointer to Linux's SCSI command structure
1539 * 1650 *
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 6980cb279c81..28a6c494a2e8 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,5 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.01.00-k9" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k1"
9
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1c08f6164658..ad0ed212db4a 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -67,6 +67,9 @@
67#include "scsi_priv.h" 67#include "scsi_priv.h"
68#include "scsi_logging.h" 68#include "scsi_logging.h"
69 69
70#define CREATE_TRACE_POINTS
71#include <trace/events/scsi.h>
72
70static void scsi_done(struct scsi_cmnd *cmd); 73static void scsi_done(struct scsi_cmnd *cmd);
71 74
72/* 75/*
@@ -747,10 +750,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
747 cmd->result = (DID_NO_CONNECT << 16); 750 cmd->result = (DID_NO_CONNECT << 16);
748 scsi_done(cmd); 751 scsi_done(cmd);
749 } else { 752 } else {
753 trace_scsi_dispatch_cmd_start(cmd);
750 rtn = host->hostt->queuecommand(cmd, scsi_done); 754 rtn = host->hostt->queuecommand(cmd, scsi_done);
751 } 755 }
752 spin_unlock_irqrestore(host->host_lock, flags); 756 spin_unlock_irqrestore(host->host_lock, flags);
753 if (rtn) { 757 if (rtn) {
758 trace_scsi_dispatch_cmd_error(cmd, rtn);
754 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && 759 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
755 rtn != SCSI_MLQUEUE_TARGET_BUSY) 760 rtn != SCSI_MLQUEUE_TARGET_BUSY)
756 rtn = SCSI_MLQUEUE_HOST_BUSY; 761 rtn = SCSI_MLQUEUE_HOST_BUSY;
@@ -781,6 +786,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
781 */ 786 */
782static void scsi_done(struct scsi_cmnd *cmd) 787static void scsi_done(struct scsi_cmnd *cmd)
783{ 788{
789 trace_scsi_dispatch_cmd_done(cmd);
784 blk_complete_request(cmd->request); 790 blk_complete_request(cmd->request);
785} 791}
786 792
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 3a5bfd10b2cb..136329b4027b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -12,7 +12,7 @@
12 * SAS disks. 12 * SAS disks.
13 * 13 *
14 * 14 *
15 * For documentation see http://www.torque.net/sg/sdebug26.html 15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
16 * 16 *
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421] 17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809] 18 * dpg: work for devfs large number of disks [20010809]
@@ -58,8 +58,8 @@
58#include "sd.h" 58#include "sd.h"
59#include "scsi_logging.h" 59#include "scsi_logging.h"
60 60
61#define SCSI_DEBUG_VERSION "1.81" 61#define SCSI_DEBUG_VERSION "1.82"
62static const char * scsi_debug_version_date = "20070104"; 62static const char * scsi_debug_version_date = "20100324";
63 63
64/* Additional Sense Code (ASC) */ 64/* Additional Sense Code (ASC) */
65#define NO_ADDITIONAL_SENSE 0x0 65#define NO_ADDITIONAL_SENSE 0x0
@@ -108,6 +108,7 @@ static const char * scsi_debug_version_date = "20070104";
108#define DEF_ATO 1 108#define DEF_ATO 1
109#define DEF_PHYSBLK_EXP 0 109#define DEF_PHYSBLK_EXP 0
110#define DEF_LOWEST_ALIGNED 0 110#define DEF_LOWEST_ALIGNED 0
111#define DEF_OPT_BLKS 64
111#define DEF_UNMAP_MAX_BLOCKS 0 112#define DEF_UNMAP_MAX_BLOCKS 0
112#define DEF_UNMAP_MAX_DESC 0 113#define DEF_UNMAP_MAX_DESC 0
113#define DEF_UNMAP_GRANULARITY 0 114#define DEF_UNMAP_GRANULARITY 0
@@ -147,12 +148,18 @@ static const char * scsi_debug_version_date = "20070104";
147#define SAM2_LUN_ADDRESS_METHOD 0 148#define SAM2_LUN_ADDRESS_METHOD 0
148#define SAM2_WLUN_REPORT_LUNS 0xc101 149#define SAM2_WLUN_REPORT_LUNS 0xc101
149 150
151/* Can queue up to this number of commands. Typically commands that
152 * that have a non-zero delay are queued. */
153#define SCSI_DEBUG_CANQUEUE 255
154
150static int scsi_debug_add_host = DEF_NUM_HOST; 155static int scsi_debug_add_host = DEF_NUM_HOST;
151static int scsi_debug_delay = DEF_DELAY; 156static int scsi_debug_delay = DEF_DELAY;
152static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB; 157static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
153static int scsi_debug_every_nth = DEF_EVERY_NTH; 158static int scsi_debug_every_nth = DEF_EVERY_NTH;
154static int scsi_debug_max_luns = DEF_MAX_LUNS; 159static int scsi_debug_max_luns = DEF_MAX_LUNS;
160static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
155static int scsi_debug_num_parts = DEF_NUM_PARTS; 161static int scsi_debug_num_parts = DEF_NUM_PARTS;
162static int scsi_debug_no_uld = 0;
156static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */ 163static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
157static int scsi_debug_opts = DEF_OPTS; 164static int scsi_debug_opts = DEF_OPTS;
158static int scsi_debug_scsi_level = DEF_SCSI_LEVEL; 165static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
@@ -169,6 +176,7 @@ static int scsi_debug_guard = DEF_GUARD;
169static int scsi_debug_ato = DEF_ATO; 176static int scsi_debug_ato = DEF_ATO;
170static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; 177static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
171static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; 178static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
179static int scsi_debug_opt_blks = DEF_OPT_BLKS;
172static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; 180static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
173static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; 181static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
174static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; 182static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
@@ -192,7 +200,6 @@ static int sdebug_sectors_per; /* sectors per cylinder */
192 200
193#define SDEBUG_SENSE_LEN 32 201#define SDEBUG_SENSE_LEN 32
194 202
195#define SCSI_DEBUG_CANQUEUE 255
196#define SCSI_DEBUG_MAX_CMD_LEN 32 203#define SCSI_DEBUG_MAX_CMD_LEN 32
197 204
198struct sdebug_dev_info { 205struct sdebug_dev_info {
@@ -699,9 +706,13 @@ static int inquiry_evpd_b0(unsigned char * arr)
699 unsigned int gran; 706 unsigned int gran;
700 707
701 memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); 708 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
709
710 /* Optimal transfer length granularity */
702 gran = 1 << scsi_debug_physblk_exp; 711 gran = 1 << scsi_debug_physblk_exp;
703 arr[2] = (gran >> 8) & 0xff; 712 arr[2] = (gran >> 8) & 0xff;
704 arr[3] = gran & 0xff; 713 arr[3] = gran & 0xff;
714
715 /* Maximum Transfer Length */
705 if (sdebug_store_sectors > 0x400) { 716 if (sdebug_store_sectors > 0x400) {
706 arr[4] = (sdebug_store_sectors >> 24) & 0xff; 717 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
707 arr[5] = (sdebug_store_sectors >> 16) & 0xff; 718 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
@@ -709,6 +720,9 @@ static int inquiry_evpd_b0(unsigned char * arr)
709 arr[7] = sdebug_store_sectors & 0xff; 720 arr[7] = sdebug_store_sectors & 0xff;
710 } 721 }
711 722
723 /* Optimal Transfer Length */
724 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
725
712 if (scsi_debug_unmap_max_desc) { 726 if (scsi_debug_unmap_max_desc) {
713 unsigned int blocks; 727 unsigned int blocks;
714 728
@@ -717,15 +731,20 @@ static int inquiry_evpd_b0(unsigned char * arr)
717 else 731 else
718 blocks = 0xffffffff; 732 blocks = 0xffffffff;
719 733
734 /* Maximum Unmap LBA Count */
720 put_unaligned_be32(blocks, &arr[16]); 735 put_unaligned_be32(blocks, &arr[16]);
736
737 /* Maximum Unmap Block Descriptor Count */
721 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]); 738 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
722 } 739 }
723 740
741 /* Unmap Granularity Alignment */
724 if (scsi_debug_unmap_alignment) { 742 if (scsi_debug_unmap_alignment) {
725 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]); 743 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
726 arr[28] |= 0x80; /* UGAVALID */ 744 arr[28] |= 0x80; /* UGAVALID */
727 } 745 }
728 746
747 /* Optimal Unmap Granularity */
729 if (scsi_debug_unmap_granularity) { 748 if (scsi_debug_unmap_granularity) {
730 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]); 749 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
731 return 0x3c; /* Mandatory page length for thin provisioning */ 750 return 0x3c; /* Mandatory page length for thin provisioning */
@@ -2266,7 +2285,7 @@ static void timer_intr_handler(unsigned long indx)
2266 struct sdebug_queued_cmd * sqcp; 2285 struct sdebug_queued_cmd * sqcp;
2267 unsigned long iflags; 2286 unsigned long iflags;
2268 2287
2269 if (indx >= SCSI_DEBUG_CANQUEUE) { 2288 if (indx >= scsi_debug_max_queue) {
2270 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too " 2289 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2271 "large\n"); 2290 "large\n");
2272 return; 2291 return;
@@ -2380,6 +2399,8 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
2380 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING, 2399 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2381 sdp->host->cmd_per_lun); 2400 sdp->host->cmd_per_lun);
2382 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024); 2401 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2402 if (scsi_debug_no_uld)
2403 sdp->no_uld_attach = 1;
2383 return 0; 2404 return 0;
2384} 2405}
2385 2406
@@ -2406,7 +2427,7 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2406 struct sdebug_queued_cmd *sqcp; 2427 struct sdebug_queued_cmd *sqcp;
2407 2428
2408 spin_lock_irqsave(&queued_arr_lock, iflags); 2429 spin_lock_irqsave(&queued_arr_lock, iflags);
2409 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { 2430 for (k = 0; k < scsi_debug_max_queue; ++k) {
2410 sqcp = &queued_arr[k]; 2431 sqcp = &queued_arr[k];
2411 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) { 2432 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2412 del_timer_sync(&sqcp->cmnd_timer); 2433 del_timer_sync(&sqcp->cmnd_timer);
@@ -2416,7 +2437,7 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2416 } 2437 }
2417 } 2438 }
2418 spin_unlock_irqrestore(&queued_arr_lock, iflags); 2439 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2419 return (k < SCSI_DEBUG_CANQUEUE) ? 1 : 0; 2440 return (k < scsi_debug_max_queue) ? 1 : 0;
2420} 2441}
2421 2442
2422/* Deletes (stops) timers of all queued commands */ 2443/* Deletes (stops) timers of all queued commands */
@@ -2427,7 +2448,7 @@ static void stop_all_queued(void)
2427 struct sdebug_queued_cmd *sqcp; 2448 struct sdebug_queued_cmd *sqcp;
2428 2449
2429 spin_lock_irqsave(&queued_arr_lock, iflags); 2450 spin_lock_irqsave(&queued_arr_lock, iflags);
2430 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { 2451 for (k = 0; k < scsi_debug_max_queue; ++k) {
2431 sqcp = &queued_arr[k]; 2452 sqcp = &queued_arr[k];
2432 if (sqcp->in_use && sqcp->a_cmnd) { 2453 if (sqcp->in_use && sqcp->a_cmnd) {
2433 del_timer_sync(&sqcp->cmnd_timer); 2454 del_timer_sync(&sqcp->cmnd_timer);
@@ -2533,7 +2554,7 @@ static void __init init_all_queued(void)
2533 struct sdebug_queued_cmd * sqcp; 2554 struct sdebug_queued_cmd * sqcp;
2534 2555
2535 spin_lock_irqsave(&queued_arr_lock, iflags); 2556 spin_lock_irqsave(&queued_arr_lock, iflags);
2536 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { 2557 for (k = 0; k < scsi_debug_max_queue; ++k) {
2537 sqcp = &queued_arr[k]; 2558 sqcp = &queued_arr[k];
2538 init_timer(&sqcp->cmnd_timer); 2559 init_timer(&sqcp->cmnd_timer);
2539 sqcp->in_use = 0; 2560 sqcp->in_use = 0;
@@ -2625,12 +2646,12 @@ static int schedule_resp(struct scsi_cmnd * cmnd,
2625 struct sdebug_queued_cmd * sqcp = NULL; 2646 struct sdebug_queued_cmd * sqcp = NULL;
2626 2647
2627 spin_lock_irqsave(&queued_arr_lock, iflags); 2648 spin_lock_irqsave(&queued_arr_lock, iflags);
2628 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) { 2649 for (k = 0; k < scsi_debug_max_queue; ++k) {
2629 sqcp = &queued_arr[k]; 2650 sqcp = &queued_arr[k];
2630 if (! sqcp->in_use) 2651 if (! sqcp->in_use)
2631 break; 2652 break;
2632 } 2653 }
2633 if (k >= SCSI_DEBUG_CANQUEUE) { 2654 if (k >= scsi_debug_max_queue) {
2634 spin_unlock_irqrestore(&queued_arr_lock, iflags); 2655 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2635 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n"); 2656 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2636 return 1; /* report busy to mid level */ 2657 return 1; /* report busy to mid level */
@@ -2662,7 +2683,9 @@ module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2662module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR); 2683module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2663module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR); 2684module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2664module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR); 2685module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2686module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2665module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR); 2687module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2688module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2666module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO); 2689module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2667module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR); 2690module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2668module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR); 2691module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
@@ -2677,6 +2700,7 @@ module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2677module_param_named(guard, scsi_debug_guard, int, S_IRUGO); 2700module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2678module_param_named(ato, scsi_debug_ato, int, S_IRUGO); 2701module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2679module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); 2702module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2703module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2680module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); 2704module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2681module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO); 2705module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2682module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO); 2706module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
@@ -2695,7 +2719,9 @@ MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2695MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)"); 2719MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2696MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)"); 2720MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2697MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); 2721MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2722MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2698MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)"); 2723MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2724MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2699MODULE_PARM_DESC(num_parts, "number of partitions(def=0)"); 2725MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2700MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)"); 2726MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2701MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)"); 2727MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
@@ -2705,6 +2731,7 @@ MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)")
2705MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); 2731MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2706MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); 2732MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2707MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); 2733MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2734MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2708MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); 2735MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2709MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); 2736MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2710MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); 2737MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
@@ -2970,6 +2997,31 @@ static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
2970DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show, 2997DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
2971 sdebug_max_luns_store); 2998 sdebug_max_luns_store);
2972 2999
3000static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
3001{
3002 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3003}
3004static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
3005 const char * buf, size_t count)
3006{
3007 int n;
3008
3009 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3010 (n <= SCSI_DEBUG_CANQUEUE)) {
3011 scsi_debug_max_queue = n;
3012 return count;
3013 }
3014 return -EINVAL;
3015}
3016DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
3017 sdebug_max_queue_store);
3018
3019static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
3020{
3021 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3022}
3023DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
3024
2973static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf) 3025static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
2974{ 3026{
2975 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level); 3027 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
@@ -3107,7 +3159,9 @@ static int do_create_driverfs_files(void)
3107 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth); 3159 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3108 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw); 3160 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3109 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns); 3161 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3162 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3110 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0); 3163 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3164 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3111 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts); 3165 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3112 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); 3166 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3113 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); 3167 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
@@ -3139,7 +3193,9 @@ static void do_remove_driverfs_files(void)
3139 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype); 3193 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3140 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); 3194 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3141 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts); 3195 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3196 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3142 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0); 3197 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3198 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3143 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns); 3199 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3144 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw); 3200 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3145 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth); 3201 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
@@ -3830,12 +3886,13 @@ static int sdebug_driver_probe(struct device * dev)
3830 3886
3831 sdbg_host = to_sdebug_host(dev); 3887 sdbg_host = to_sdebug_host(dev);
3832 3888
3833 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); 3889 sdebug_driver_template.can_queue = scsi_debug_max_queue;
3834 if (NULL == hpnt) { 3890 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3835 printk(KERN_ERR "%s: scsi_register failed\n", __func__); 3891 if (NULL == hpnt) {
3836 error = -ENODEV; 3892 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3893 error = -ENODEV;
3837 return error; 3894 return error;
3838 } 3895 }
3839 3896
3840 sdbg_host->shost = hpnt; 3897 sdbg_host->shost = hpnt;
3841 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host; 3898 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 7ad53fa42766..a5d630f5f519 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -39,6 +39,8 @@
39#include "scsi_logging.h" 39#include "scsi_logging.h"
40#include "scsi_transport_api.h" 40#include "scsi_transport_api.h"
41 41
42#include <trace/events/scsi.h>
43
42#define SENSE_TIMEOUT (10*HZ) 44#define SENSE_TIMEOUT (10*HZ)
43 45
44/* 46/*
@@ -52,6 +54,7 @@
52void scsi_eh_wakeup(struct Scsi_Host *shost) 54void scsi_eh_wakeup(struct Scsi_Host *shost)
53{ 55{
54 if (shost->host_busy == shost->host_failed) { 56 if (shost->host_busy == shost->host_failed) {
57 trace_scsi_eh_wakeup(shost);
55 wake_up_process(shost->ehandler); 58 wake_up_process(shost->ehandler);
56 SCSI_LOG_ERROR_RECOVERY(5, 59 SCSI_LOG_ERROR_RECOVERY(5,
57 printk("Waking error handler thread\n")); 60 printk("Waking error handler thread\n"));
@@ -127,6 +130,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
127 struct scsi_cmnd *scmd = req->special; 130 struct scsi_cmnd *scmd = req->special;
128 enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED; 131 enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
129 132
133 trace_scsi_dispatch_cmd_timeout(scmd);
130 scsi_log_completion(scmd, TIMEOUT_ERROR); 134 scsi_log_completion(scmd, TIMEOUT_ERROR);
131 135
132 if (scmd->device->host->transportt->eh_timed_out) 136 if (scmd->device->host->transportt->eh_timed_out)
@@ -970,9 +974,10 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
970 "0x%p\n", current->comm, 974 "0x%p\n", current->comm,
971 scmd)); 975 scmd));
972 rtn = scsi_try_to_abort_cmd(scmd); 976 rtn = scsi_try_to_abort_cmd(scmd);
973 if (rtn == SUCCESS) { 977 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
974 scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD; 978 scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
975 if (!scsi_device_online(scmd->device) || 979 if (!scsi_device_online(scmd->device) ||
980 rtn == FAST_IO_FAIL ||
976 !scsi_eh_tur(scmd)) { 981 !scsi_eh_tur(scmd)) {
977 scsi_eh_finish_cmd(scmd, done_q); 982 scsi_eh_finish_cmd(scmd, done_q);
978 } 983 }
@@ -1099,8 +1104,9 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
1099 " 0x%p\n", current->comm, 1104 " 0x%p\n", current->comm,
1100 sdev)); 1105 sdev));
1101 rtn = scsi_try_bus_device_reset(bdr_scmd); 1106 rtn = scsi_try_bus_device_reset(bdr_scmd);
1102 if (rtn == SUCCESS) { 1107 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1103 if (!scsi_device_online(sdev) || 1108 if (!scsi_device_online(sdev) ||
1109 rtn == FAST_IO_FAIL ||
1104 !scsi_eh_tur(bdr_scmd)) { 1110 !scsi_eh_tur(bdr_scmd)) {
1105 list_for_each_entry_safe(scmd, next, 1111 list_for_each_entry_safe(scmd, next,
1106 work_q, eh_entry) { 1112 work_q, eh_entry) {
@@ -1163,10 +1169,11 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
1163 "to target %d\n", 1169 "to target %d\n",
1164 current->comm, id)); 1170 current->comm, id));
1165 rtn = scsi_try_target_reset(tgtr_scmd); 1171 rtn = scsi_try_target_reset(tgtr_scmd);
1166 if (rtn == SUCCESS) { 1172 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1167 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1173 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1168 if (id == scmd_id(scmd)) 1174 if (id == scmd_id(scmd))
1169 if (!scsi_device_online(scmd->device) || 1175 if (!scsi_device_online(scmd->device) ||
1176 rtn == FAST_IO_FAIL ||
1170 !scsi_eh_tur(tgtr_scmd)) 1177 !scsi_eh_tur(tgtr_scmd))
1171 scsi_eh_finish_cmd(scmd, 1178 scsi_eh_finish_cmd(scmd,
1172 done_q); 1179 done_q);
@@ -1222,10 +1229,11 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1222 " %d\n", current->comm, 1229 " %d\n", current->comm,
1223 channel)); 1230 channel));
1224 rtn = scsi_try_bus_reset(chan_scmd); 1231 rtn = scsi_try_bus_reset(chan_scmd);
1225 if (rtn == SUCCESS) { 1232 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1226 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1233 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1227 if (channel == scmd_channel(scmd)) 1234 if (channel == scmd_channel(scmd))
1228 if (!scsi_device_online(scmd->device) || 1235 if (!scsi_device_online(scmd->device) ||
1236 rtn == FAST_IO_FAIL ||
1229 !scsi_eh_tur(scmd)) 1237 !scsi_eh_tur(scmd))
1230 scsi_eh_finish_cmd(scmd, 1238 scsi_eh_finish_cmd(scmd,
1231 done_q); 1239 done_q);
@@ -1259,9 +1267,10 @@ static int scsi_eh_host_reset(struct list_head *work_q,
1259 , current->comm)); 1267 , current->comm));
1260 1268
1261 rtn = scsi_try_host_reset(scmd); 1269 rtn = scsi_try_host_reset(scmd);
1262 if (rtn == SUCCESS) { 1270 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1263 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1271 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1264 if (!scsi_device_online(scmd->device) || 1272 if (!scsi_device_online(scmd->device) ||
1273 rtn == FAST_IO_FAIL ||
1265 (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) || 1274 (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
1266 !scsi_eh_tur(scmd)) 1275 !scsi_eh_tur(scmd))
1267 scsi_eh_finish_cmd(scmd, done_q); 1276 scsi_eh_finish_cmd(scmd, done_q);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 38518b088073..c992ecf4e372 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -459,8 +459,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
459 found_target->reap_ref++; 459 found_target->reap_ref++;
460 spin_unlock_irqrestore(shost->host_lock, flags); 460 spin_unlock_irqrestore(shost->host_lock, flags);
461 if (found_target->state != STARGET_DEL) { 461 if (found_target->state != STARGET_DEL) {
462 put_device(parent); 462 put_device(dev);
463 kfree(starget);
464 return found_target; 463 return found_target;
465 } 464 }
466 /* Unfortunately, we found a dying target; need to 465 /* Unfortunately, we found a dying target; need to
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 429c9b73e3e4..c23ab978c3ba 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -474,7 +474,7 @@ static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
474 474
475 475
476/* 476/*
477 * sdev_rd_attr: create a function and attribute variable for a 477 * sdev_rw_attr: create a function and attribute variable for a
478 * read/write field. 478 * read/write field.
479 */ 479 */
480#define sdev_rw_attr(field, format_string) \ 480#define sdev_rw_attr(field, format_string) \
@@ -486,7 +486,7 @@ sdev_store_##field (struct device *dev, struct device_attribute *attr, \
486{ \ 486{ \
487 struct scsi_device *sdev; \ 487 struct scsi_device *sdev; \
488 sdev = to_scsi_device(dev); \ 488 sdev = to_scsi_device(dev); \
489 snscanf (buf, 20, format_string, &sdev->field); \ 489 sscanf (buf, format_string, &sdev->field); \
490 return count; \ 490 return count; \
491} \ 491} \
492static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field); 492static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
@@ -853,9 +853,6 @@ static int scsi_target_add(struct scsi_target *starget)
853 error = device_add(&starget->dev); 853 error = device_add(&starget->dev);
854 if (error) { 854 if (error) {
855 dev_err(&starget->dev, "target device_add failed, error %d\n", error); 855 dev_err(&starget->dev, "target device_add failed, error %d\n", error);
856 get_device(&starget->dev);
857 scsi_target_reap(starget);
858 put_device(&starget->dev);
859 return error; 856 return error;
860 } 857 }
861 transport_add_device(&starget->dev); 858 transport_add_device(&starget->dev);
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
new file mode 100644
index 000000000000..b587289cfacb
--- /dev/null
+++ b/drivers/scsi/scsi_trace.c
@@ -0,0 +1,284 @@
1/*
2 * Copyright (C) 2010 FUJITSU LIMITED
3 * Copyright (C) 2010 Tomohiro Kusumi <kusumi.tomohiro@jp.fujitsu.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include <linux/kernel.h>
19#include <linux/trace_seq.h>
20#include <trace/events/scsi.h>
21
22#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
23#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9])
24
25static const char *
26scsi_trace_misc(struct trace_seq *, unsigned char *, int);
27
28static const char *
29scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
30{
31 const char *ret = p->buffer + p->len;
32 sector_t lba = 0, txlen = 0;
33
34 lba |= ((cdb[1] & 0x1F) << 16);
35 lba |= (cdb[2] << 8);
36 lba |= cdb[3];
37 txlen = cdb[4];
38
39 trace_seq_printf(p, "lba=%llu txlen=%llu",
40 (unsigned long long)lba, (unsigned long long)txlen);
41 trace_seq_putc(p, 0);
42
43 return ret;
44}
45
46static const char *
47scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
48{
49 const char *ret = p->buffer + p->len;
50 sector_t lba = 0, txlen = 0;
51
52 lba |= (cdb[2] << 24);
53 lba |= (cdb[3] << 16);
54 lba |= (cdb[4] << 8);
55 lba |= cdb[5];
56 txlen |= (cdb[7] << 8);
57 txlen |= cdb[8];
58
59 trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
60 (unsigned long long)lba, (unsigned long long)txlen,
61 cdb[1] >> 5);
62 trace_seq_putc(p, 0);
63
64 return ret;
65}
66
67static const char *
68scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
69{
70 const char *ret = p->buffer + p->len;
71 sector_t lba = 0, txlen = 0;
72
73 lba |= (cdb[2] << 24);
74 lba |= (cdb[3] << 16);
75 lba |= (cdb[4] << 8);
76 lba |= cdb[5];
77 txlen |= (cdb[6] << 24);
78 txlen |= (cdb[7] << 16);
79 txlen |= (cdb[8] << 8);
80 txlen |= cdb[9];
81
82 trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
83 (unsigned long long)lba, (unsigned long long)txlen,
84 cdb[1] >> 5);
85 trace_seq_putc(p, 0);
86
87 return ret;
88}
89
90static const char *
91scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
92{
93 const char *ret = p->buffer + p->len;
94 sector_t lba = 0, txlen = 0;
95
96 lba |= ((u64)cdb[2] << 56);
97 lba |= ((u64)cdb[3] << 48);
98 lba |= ((u64)cdb[4] << 40);
99 lba |= ((u64)cdb[5] << 32);
100 lba |= (cdb[6] << 24);
101 lba |= (cdb[7] << 16);
102 lba |= (cdb[8] << 8);
103 lba |= cdb[9];
104 txlen |= (cdb[10] << 24);
105 txlen |= (cdb[11] << 16);
106 txlen |= (cdb[12] << 8);
107 txlen |= cdb[13];
108
109 trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
110 (unsigned long long)lba, (unsigned long long)txlen,
111 cdb[1] >> 5);
112
113 if (cdb[0] == WRITE_SAME_16)
114 trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
115
116 trace_seq_putc(p, 0);
117
118 return ret;
119}
120
121static const char *
122scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
123{
124 const char *ret = p->buffer + p->len, *cmd;
125 sector_t lba = 0, txlen = 0;
126 u32 ei_lbrt = 0;
127
128 switch (SERVICE_ACTION32(cdb)) {
129 case READ_32:
130 cmd = "READ";
131 break;
132 case VERIFY_32:
133 cmd = "VERIFY";
134 break;
135 case WRITE_32:
136 cmd = "WRITE";
137 break;
138 case WRITE_SAME_32:
139 cmd = "WRITE_SAME";
140 break;
141 default:
142 trace_seq_printf(p, "UNKNOWN");
143 goto out;
144 }
145
146 lba |= ((u64)cdb[12] << 56);
147 lba |= ((u64)cdb[13] << 48);
148 lba |= ((u64)cdb[14] << 40);
149 lba |= ((u64)cdb[15] << 32);
150 lba |= (cdb[16] << 24);
151 lba |= (cdb[17] << 16);
152 lba |= (cdb[18] << 8);
153 lba |= cdb[19];
154 ei_lbrt |= (cdb[20] << 24);
155 ei_lbrt |= (cdb[21] << 16);
156 ei_lbrt |= (cdb[22] << 8);
157 ei_lbrt |= cdb[23];
158 txlen |= (cdb[28] << 24);
159 txlen |= (cdb[29] << 16);
160 txlen |= (cdb[30] << 8);
161 txlen |= cdb[31];
162
163 trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u",
164 cmd, (unsigned long long)lba,
165 (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt);
166
167 if (SERVICE_ACTION32(cdb) == WRITE_SAME_32)
168 trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
169
170out:
171 trace_seq_putc(p, 0);
172
173 return ret;
174}
175
176static const char *
177scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
178{
179 const char *ret = p->buffer + p->len;
180 unsigned int regions = cdb[7] << 8 | cdb[8];
181
182 trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
183 trace_seq_putc(p, 0);
184
185 return ret;
186}
187
188static const char *
189scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
190{
191 const char *ret = p->buffer + p->len, *cmd;
192 sector_t lba = 0;
193 u32 alloc_len = 0;
194
195 switch (SERVICE_ACTION16(cdb)) {
196 case SAI_READ_CAPACITY_16:
197 cmd = "READ_CAPACITY_16";
198 break;
199 case SAI_GET_LBA_STATUS:
200 cmd = "GET_LBA_STATUS";
201 break;
202 default:
203 trace_seq_printf(p, "UNKNOWN");
204 goto out;
205 }
206
207 lba |= ((u64)cdb[2] << 56);
208 lba |= ((u64)cdb[3] << 48);
209 lba |= ((u64)cdb[4] << 40);
210 lba |= ((u64)cdb[5] << 32);
211 lba |= (cdb[6] << 24);
212 lba |= (cdb[7] << 16);
213 lba |= (cdb[8] << 8);
214 lba |= cdb[9];
215 alloc_len |= (cdb[10] << 24);
216 alloc_len |= (cdb[11] << 16);
217 alloc_len |= (cdb[12] << 8);
218 alloc_len |= cdb[13];
219
220 trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd,
221 (unsigned long long)lba, alloc_len);
222
223out:
224 trace_seq_putc(p, 0);
225
226 return ret;
227}
228
229static const char *
230scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len)
231{
232 switch (SERVICE_ACTION32(cdb)) {
233 case READ_32:
234 case VERIFY_32:
235 case WRITE_32:
236 case WRITE_SAME_32:
237 return scsi_trace_rw32(p, cdb, len);
238 default:
239 return scsi_trace_misc(p, cdb, len);
240 }
241}
242
243static const char *
244scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len)
245{
246 const char *ret = p->buffer + p->len;
247
248 trace_seq_printf(p, "-");
249 trace_seq_putc(p, 0);
250
251 return ret;
252}
253
254const char *
255scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len)
256{
257 switch (cdb[0]) {
258 case READ_6:
259 case WRITE_6:
260 return scsi_trace_rw6(p, cdb, len);
261 case READ_10:
262 case VERIFY:
263 case WRITE_10:
264 case WRITE_SAME:
265 return scsi_trace_rw10(p, cdb, len);
266 case READ_12:
267 case VERIFY_12:
268 case WRITE_12:
269 return scsi_trace_rw12(p, cdb, len);
270 case READ_16:
271 case VERIFY_16:
272 case WRITE_16:
273 case WRITE_SAME_16:
274 return scsi_trace_rw16(p, cdb, len);
275 case UNMAP:
276 return scsi_trace_unmap(p, cdb, len);
277 case SERVICE_ACTION_IN:
278 return scsi_trace_service_action_in(p, cdb, len);
279 case VARIABLE_LENGTH_CMD:
280 return scsi_trace_varlen(p, cdb, len);
281 default:
282 return scsi_trace_misc(p, cdb, len);
283 }
284}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 6cfffc88022a..06813789145c 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -834,7 +834,7 @@ static ssize_t
834store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr, 834store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
835 const char *buf, size_t count) 835 const char *buf, size_t count)
836{ 836{
837 int val; 837 unsigned long val;
838 struct fc_rport *rport = transport_class_to_rport(dev); 838 struct fc_rport *rport = transport_class_to_rport(dev);
839 struct Scsi_Host *shost = rport_to_shost(rport); 839 struct Scsi_Host *shost = rport_to_shost(rport);
840 struct fc_internal *i = to_fc_internal(shost->transportt); 840 struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -848,6 +848,12 @@ store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
848 return -EINVAL; 848 return -EINVAL;
849 849
850 /* 850 /*
851 * Check for overflow; dev_loss_tmo is u32
852 */
853 if (val > UINT_MAX)
854 return -EINVAL;
855
856 /*
851 * If fast_io_fail is off we have to cap 857 * If fast_io_fail is off we have to cap
852 * dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT 858 * dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT
853 */ 859 */
@@ -2865,7 +2871,7 @@ void
2865fc_remote_port_delete(struct fc_rport *rport) 2871fc_remote_port_delete(struct fc_rport *rport)
2866{ 2872{
2867 struct Scsi_Host *shost = rport_to_shost(rport); 2873 struct Scsi_Host *shost = rport_to_shost(rport);
2868 int timeout = rport->dev_loss_tmo; 2874 unsigned long timeout = rport->dev_loss_tmo;
2869 unsigned long flags; 2875 unsigned long flags;
2870 2876
2871 /* 2877 /*
@@ -3191,23 +3197,33 @@ fc_scsi_scan_rport(struct work_struct *work)
3191 * 3197 *
3192 * This routine can be called from a FC LLD scsi_eh callback. It 3198 * This routine can be called from a FC LLD scsi_eh callback. It
3193 * blocks the scsi_eh thread until the fc_rport leaves the 3199 * blocks the scsi_eh thread until the fc_rport leaves the
3194 * FC_PORTSTATE_BLOCKED. This is necessary to avoid the scsi_eh 3200 * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is
3195 * failing recovery actions for blocked rports which would lead to 3201 * necessary to avoid the scsi_eh failing recovery actions for blocked
3196 * offlined SCSI devices. 3202 * rports which would lead to offlined SCSI devices.
3203 *
3204 * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED.
3205 * FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be
3206 * passed back to scsi_eh.
3197 */ 3207 */
3198void fc_block_scsi_eh(struct scsi_cmnd *cmnd) 3208int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
3199{ 3209{
3200 struct Scsi_Host *shost = cmnd->device->host; 3210 struct Scsi_Host *shost = cmnd->device->host;
3201 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 3211 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
3202 unsigned long flags; 3212 unsigned long flags;
3203 3213
3204 spin_lock_irqsave(shost->host_lock, flags); 3214 spin_lock_irqsave(shost->host_lock, flags);
3205 while (rport->port_state == FC_PORTSTATE_BLOCKED) { 3215 while (rport->port_state == FC_PORTSTATE_BLOCKED &&
3216 !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) {
3206 spin_unlock_irqrestore(shost->host_lock, flags); 3217 spin_unlock_irqrestore(shost->host_lock, flags);
3207 msleep(1000); 3218 msleep(1000);
3208 spin_lock_irqsave(shost->host_lock, flags); 3219 spin_lock_irqsave(shost->host_lock, flags);
3209 } 3220 }
3210 spin_unlock_irqrestore(shost->host_lock, flags); 3221 spin_unlock_irqrestore(shost->host_lock, flags);
3222
3223 if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)
3224 return FAST_IO_FAIL;
3225
3226 return 0;
3211} 3227}
3212EXPORT_SYMBOL(fc_block_scsi_eh); 3228EXPORT_SYMBOL(fc_block_scsi_eh);
3213 3229
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index de6c60320f6f..829cc37abc41 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1434,6 +1434,8 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
1434#error RC16_LEN must not be more than SD_BUF_SIZE 1434#error RC16_LEN must not be more than SD_BUF_SIZE
1435#endif 1435#endif
1436 1436
1437#define READ_CAPACITY_RETRIES_ON_RESET 10
1438
1437static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, 1439static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1438 unsigned char *buffer) 1440 unsigned char *buffer)
1439{ 1441{
@@ -1441,7 +1443,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1441 struct scsi_sense_hdr sshdr; 1443 struct scsi_sense_hdr sshdr;
1442 int sense_valid = 0; 1444 int sense_valid = 0;
1443 int the_result; 1445 int the_result;
1444 int retries = 3; 1446 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
1445 unsigned int alignment; 1447 unsigned int alignment;
1446 unsigned long long lba; 1448 unsigned long long lba;
1447 unsigned sector_size; 1449 unsigned sector_size;
@@ -1470,6 +1472,13 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1470 * Invalid Field in CDB, just retry 1472 * Invalid Field in CDB, just retry
1471 * silently with RC10 */ 1473 * silently with RC10 */
1472 return -EINVAL; 1474 return -EINVAL;
1475 if (sense_valid &&
1476 sshdr.sense_key == UNIT_ATTENTION &&
1477 sshdr.asc == 0x29 && sshdr.ascq == 0x00)
1478 /* Device reset might occur several times,
1479 * give it one more chance */
1480 if (--reset_retries > 0)
1481 continue;
1473 } 1482 }
1474 retries--; 1483 retries--;
1475 1484
@@ -1528,7 +1537,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
1528 struct scsi_sense_hdr sshdr; 1537 struct scsi_sense_hdr sshdr;
1529 int sense_valid = 0; 1538 int sense_valid = 0;
1530 int the_result; 1539 int the_result;
1531 int retries = 3; 1540 int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
1532 sector_t lba; 1541 sector_t lba;
1533 unsigned sector_size; 1542 unsigned sector_size;
1534 1543
@@ -1544,8 +1553,16 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
1544 if (media_not_present(sdkp, &sshdr)) 1553 if (media_not_present(sdkp, &sshdr))
1545 return -ENODEV; 1554 return -ENODEV;
1546 1555
1547 if (the_result) 1556 if (the_result) {
1548 sense_valid = scsi_sense_valid(&sshdr); 1557 sense_valid = scsi_sense_valid(&sshdr);
1558 if (sense_valid &&
1559 sshdr.sense_key == UNIT_ATTENTION &&
1560 sshdr.asc == 0x29 && sshdr.ascq == 0x00)
1561 /* Device reset might occur several times,
1562 * give it one more chance */
1563 if (--reset_retries > 0)
1564 continue;
1565 }
1549 retries--; 1566 retries--;
1550 1567
1551 } while (the_result && retries); 1568 } while (the_result && retries);
@@ -1574,6 +1591,8 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
1574 1591
1575static int sd_try_rc16_first(struct scsi_device *sdp) 1592static int sd_try_rc16_first(struct scsi_device *sdp)
1576{ 1593{
1594 if (sdp->host->max_cmd_len < 16)
1595 return 0;
1577 if (sdp->scsi_level > SCSI_SPC_2) 1596 if (sdp->scsi_level > SCSI_SPC_2)
1578 return 1; 1597 return 1;
1579 if (scsi_device_protection(sdp)) 1598 if (scsi_device_protection(sdp))
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 5fda881c2470..b701bf2cc187 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -2224,14 +2224,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off
2224 2224
2225} 2225}
2226 2226
2227void
2228wd33c93_release(void)
2229{
2230}
2231
2232EXPORT_SYMBOL(wd33c93_host_reset); 2227EXPORT_SYMBOL(wd33c93_host_reset);
2233EXPORT_SYMBOL(wd33c93_init); 2228EXPORT_SYMBOL(wd33c93_init);
2234EXPORT_SYMBOL(wd33c93_release);
2235EXPORT_SYMBOL(wd33c93_abort); 2229EXPORT_SYMBOL(wd33c93_abort);
2236EXPORT_SYMBOL(wd33c93_queuecommand); 2230EXPORT_SYMBOL(wd33c93_queuecommand);
2237EXPORT_SYMBOL(wd33c93_intr); 2231EXPORT_SYMBOL(wd33c93_intr);
diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h
index 00123f2383d7..1ed5f3bf388e 100644
--- a/drivers/scsi/wd33c93.h
+++ b/drivers/scsi/wd33c93.h
@@ -348,6 +348,5 @@ int wd33c93_queuecommand (struct scsi_cmnd *cmd,
348void wd33c93_intr (struct Scsi_Host *instance); 348void wd33c93_intr (struct Scsi_Host *instance);
349int wd33c93_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int); 349int wd33c93_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int);
350int wd33c93_host_reset (struct scsi_cmnd *); 350int wd33c93_host_reset (struct scsi_cmnd *);
351void wd33c93_release(void);
352 351
353#endif /* WD33C93_H */ 352#endif /* WD33C93_H */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index c0f4b364c711..c3c5aaaae53a 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -25,6 +25,9 @@ const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
25const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, 25const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
26 const struct trace_print_flags *symbol_array); 26 const struct trace_print_flags *symbol_array);
27 27
28const char *ftrace_print_hex_seq(struct trace_seq *p,
29 const unsigned char *buf, int len);
30
28/* 31/*
29 * The trace entry - the most basic unit of tracing. This is what 32 * The trace entry - the most basic unit of tracing. This is what
30 * is printed in the end as a single line in the trace output, such as: 33 * is printed in the end as a single line in the trace output, such as:
diff --git a/include/scsi/Kbuild b/include/scsi/Kbuild
index b3a0ee6b2f1c..f2b94918994d 100644
--- a/include/scsi/Kbuild
+++ b/include/scsi/Kbuild
@@ -1,4 +1,3 @@
1header-y += scsi.h
2header-y += scsi_netlink.h 1header-y += scsi_netlink.h
3header-y += scsi_netlink_fc.h 2header-y += scsi_netlink_fc.h
4header-y += scsi_bsg_fc.h 3header-y += scsi_bsg_fc.h
diff --git a/include/scsi/fc/fc_fcp.h b/include/scsi/fc/fc_fcp.h
index 747e2c7d88d6..8e9b222251c2 100644
--- a/include/scsi/fc/fc_fcp.h
+++ b/include/scsi/fc/fc_fcp.h
@@ -76,6 +76,7 @@ struct fcp_cmnd32 {
76#define FCP_PTA_HEADQ 1 /* head of queue task attribute */ 76#define FCP_PTA_HEADQ 1 /* head of queue task attribute */
77#define FCP_PTA_ORDERED 2 /* ordered task attribute */ 77#define FCP_PTA_ORDERED 2 /* ordered task attribute */
78#define FCP_PTA_ACA 4 /* auto. contigent allegiance */ 78#define FCP_PTA_ACA 4 /* auto. contigent allegiance */
79#define FCP_PTA_MASK 7 /* mask for task attribute field */
79#define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ 80#define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
80#define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ 81#define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
81 82
diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h
index 8eb0a0fc0a71..9b4867c9c2d2 100644
--- a/include/scsi/fc_encode.h
+++ b/include/scsi/fc_encode.h
@@ -74,7 +74,7 @@ static inline void fc_adisc_fill(struct fc_lport *lport, struct fc_frame *fp)
74 adisc->adisc_cmd = ELS_ADISC; 74 adisc->adisc_cmd = ELS_ADISC;
75 put_unaligned_be64(lport->wwpn, &adisc->adisc_wwpn); 75 put_unaligned_be64(lport->wwpn, &adisc->adisc_wwpn);
76 put_unaligned_be64(lport->wwnn, &adisc->adisc_wwnn); 76 put_unaligned_be64(lport->wwnn, &adisc->adisc_wwnn);
77 hton24(adisc->adisc_port_id, fc_host_port_id(lport->host)); 77 hton24(adisc->adisc_port_id, lport->port_id);
78} 78}
79 79
80/** 80/**
@@ -127,15 +127,13 @@ static inline int fc_ct_fill(struct fc_lport *lport,
127 127
128 case FC_NS_RFT_ID: 128 case FC_NS_RFT_ID:
129 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft)); 129 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft));
130 hton24(ct->payload.rft.fid.fp_fid, 130 hton24(ct->payload.rft.fid.fp_fid, lport->port_id);
131 fc_host_port_id(lport->host));
132 ct->payload.rft.fts = lport->fcts; 131 ct->payload.rft.fts = lport->fcts;
133 break; 132 break;
134 133
135 case FC_NS_RFF_ID: 134 case FC_NS_RFF_ID:
136 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rff_id)); 135 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rff_id));
137 hton24(ct->payload.rff.fr_fid.fp_fid, 136 hton24(ct->payload.rff.fr_fid.fp_fid, lport->port_id);
138 fc_host_port_id(lport->host));
139 ct->payload.rff.fr_type = FC_TYPE_FCP; 137 ct->payload.rff.fr_type = FC_TYPE_FCP;
140 if (lport->service_params & FCP_SPPF_INIT_FCN) 138 if (lport->service_params & FCP_SPPF_INIT_FCN)
141 ct->payload.rff.fr_feat = FCP_FEAT_INIT; 139 ct->payload.rff.fr_feat = FCP_FEAT_INIT;
@@ -145,16 +143,14 @@ static inline int fc_ct_fill(struct fc_lport *lport,
145 143
146 case FC_NS_RNN_ID: 144 case FC_NS_RNN_ID:
147 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id)); 145 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id));
148 hton24(ct->payload.rn.fr_fid.fp_fid, 146 hton24(ct->payload.rn.fr_fid.fp_fid, lport->port_id);
149 fc_host_port_id(lport->host));
150 put_unaligned_be64(lport->wwnn, &ct->payload.rn.fr_wwn); 147 put_unaligned_be64(lport->wwnn, &ct->payload.rn.fr_wwn);
151 break; 148 break;
152 149
153 case FC_NS_RSPN_ID: 150 case FC_NS_RSPN_ID:
154 len = strnlen(fc_host_symbolic_name(lport->host), 255); 151 len = strnlen(fc_host_symbolic_name(lport->host), 255);
155 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn) + len); 152 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn) + len);
156 hton24(ct->payload.spn.fr_fid.fp_fid, 153 hton24(ct->payload.spn.fr_fid.fp_fid, lport->port_id);
157 fc_host_port_id(lport->host));
158 strncpy(ct->payload.spn.fr_name, 154 strncpy(ct->payload.spn.fr_name,
159 fc_host_symbolic_name(lport->host), len); 155 fc_host_symbolic_name(lport->host), len);
160 ct->payload.spn.fr_name_len = len; 156 ct->payload.spn.fr_name_len = len;
@@ -268,7 +264,7 @@ static inline void fc_logo_fill(struct fc_lport *lport, struct fc_frame *fp)
268 logo = fc_frame_payload_get(fp, sizeof(*logo)); 264 logo = fc_frame_payload_get(fp, sizeof(*logo));
269 memset(logo, 0, sizeof(*logo)); 265 memset(logo, 0, sizeof(*logo));
270 logo->fl_cmd = ELS_LOGO; 266 logo->fl_cmd = ELS_LOGO;
271 hton24(logo->fl_n_port_id, fc_host_port_id(lport->host)); 267 hton24(logo->fl_n_port_id, lport->port_id);
272 logo->fl_n_port_wwn = htonll(lport->wwpn); 268 logo->fl_n_port_wwn = htonll(lport->wwpn);
273} 269}
274 270
@@ -295,7 +291,7 @@ static inline void fc_rec_fill(struct fc_lport *lport, struct fc_frame *fp)
295 rec = fc_frame_payload_get(fp, sizeof(*rec)); 291 rec = fc_frame_payload_get(fp, sizeof(*rec));
296 memset(rec, 0, sizeof(*rec)); 292 memset(rec, 0, sizeof(*rec));
297 rec->rec_cmd = ELS_REC; 293 rec->rec_cmd = ELS_REC;
298 hton24(rec->rec_s_id, fc_host_port_id(lport->host)); 294 hton24(rec->rec_s_id, lport->port_id);
299 rec->rec_ox_id = htons(ep->oxid); 295 rec->rec_ox_id = htons(ep->oxid);
300 rec->rec_rx_id = htons(ep->rxid); 296 rec->rec_rx_id = htons(ep->rxid);
301} 297}
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 4b912eee33e5..7495c0ba67ee 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -47,13 +47,18 @@
47#define ntohll(x) be64_to_cpu(x) 47#define ntohll(x) be64_to_cpu(x)
48#define htonll(x) cpu_to_be64(x) 48#define htonll(x) cpu_to_be64(x)
49 49
50#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
51 50
52#define hton24(p, v) do { \ 51static inline u32 ntoh24(const u8 *p)
53 p[0] = (((v) >> 16) & 0xFF); \ 52{
54 p[1] = (((v) >> 8) & 0xFF); \ 53 return (p[0] << 16) | (p[1] << 8) | p[2];
55 p[2] = ((v) & 0xFF); \ 54}
56 } while (0) 55
56static inline void hton24(u8 *p, u32 v)
57{
58 p[0] = (v >> 16) & 0xff;
59 p[1] = (v >> 8) & 0xff;
60 p[2] = v & 0xff;
61}
57 62
58/** 63/**
59 * enum fc_lport_state - Local port states 64 * enum fc_lport_state - Local port states
@@ -775,6 +780,7 @@ struct fc_disc {
775 * @dev_stats: FCoE device stats (TODO: libfc should not be 780 * @dev_stats: FCoE device stats (TODO: libfc should not be
776 * FCoE aware) 781 * FCoE aware)
777 * @retry_count: Number of retries in the current state 782 * @retry_count: Number of retries in the current state
783 * @port_id: FC Port ID
778 * @wwpn: World Wide Port Name 784 * @wwpn: World Wide Port Name
779 * @wwnn: World Wide Node Name 785 * @wwnn: World Wide Node Name
780 * @service_params: Common service parameters 786 * @service_params: Common service parameters
@@ -821,6 +827,7 @@ struct fc_lport {
821 u8 retry_count; 827 u8 retry_count;
822 828
823 /* Fabric information */ 829 /* Fabric information */
830 u32 port_id;
824 u64 wwpn; 831 u64 wwpn;
825 u64 wwnn; 832 u64 wwnn;
826 unsigned int service_params; 833 unsigned int service_params;
@@ -918,15 +925,6 @@ static inline void fc_lport_free_stats(struct fc_lport *lport)
918} 925}
919 926
920/** 927/**
921 * fc_lport_get_stats() - Get a local port's statistics
922 * @lport: The local port whose statistics are to be retreived
923 */
924static inline struct fcoe_dev_stats *fc_lport_get_stats(struct fc_lport *lport)
925{
926 return per_cpu_ptr(lport->dev_stats, smp_processor_id());
927}
928
929/**
930 * lport_priv() - Return the private data from a local port 928 * lport_priv() - Return the private data from a local port
931 * @lport: The local port whose private data is to be retreived 929 * @lport: The local port whose private data is to be retreived
932 */ 930 */
@@ -1053,7 +1051,6 @@ void fc_exch_mgr_reset(struct fc_lport *, u32 s_id, u32 d_id);
1053 * Functions for fc_functions_template 1051 * Functions for fc_functions_template
1054 */ 1052 */
1055void fc_get_host_speed(struct Scsi_Host *); 1053void fc_get_host_speed(struct Scsi_Host *);
1056void fc_get_host_port_type(struct Scsi_Host *);
1057void fc_get_host_port_state(struct Scsi_Host *); 1054void fc_get_host_port_state(struct Scsi_Host *);
1058void fc_set_rport_loss_tmo(struct fc_rport *, u32 timeout); 1055void fc_set_rport_loss_tmo(struct fc_rport *, u32 timeout);
1059struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *); 1056struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *);
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index c603f4a7e7fc..ec13f51531f8 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -29,6 +29,8 @@
29#include <scsi/fc/fc_fcoe.h> 29#include <scsi/fc/fc_fcoe.h>
30#include <scsi/libfc.h> 30#include <scsi/libfc.h>
31 31
32#define FCOE_MAX_CMD_LEN 16 /* Supported CDB length */
33
32/* 34/*
33 * FIP tunable parameters. 35 * FIP tunable parameters.
34 */ 36 */
@@ -65,14 +67,12 @@ enum fip_state {
65 * @port_ka_time: time of next port keep-alive. 67 * @port_ka_time: time of next port keep-alive.
66 * @ctlr_ka_time: time of next controller keep-alive. 68 * @ctlr_ka_time: time of next controller keep-alive.
67 * @timer: timer struct used for all delayed events. 69 * @timer: timer struct used for all delayed events.
68 * @link_work: &work_struct for doing FCF selection. 70 * @timer_work: &work_struct for doing keep-alives and resets.
69 * @recv_work: &work_struct for receiving FIP frames. 71 * @recv_work: &work_struct for receiving FIP frames.
70 * @fip_recv_list: list of received FIP frames. 72 * @fip_recv_list: list of received FIP frames.
71 * @user_mfs: configured maximum FC frame size, including FC header. 73 * @user_mfs: configured maximum FC frame size, including FC header.
72 * @flogi_oxid: exchange ID of most recent fabric login. 74 * @flogi_oxid: exchange ID of most recent fabric login.
73 * @flogi_count: number of FLOGI attempts in AUTO mode. 75 * @flogi_count: number of FLOGI attempts in AUTO mode.
74 * @link: current link status for libfc.
75 * @last_link: last link state reported to libfc.
76 * @map_dest: use the FC_MAP mode for destination MAC addresses. 76 * @map_dest: use the FC_MAP mode for destination MAC addresses.
77 * @spma: supports SPMA server-provided MACs mode 77 * @spma: supports SPMA server-provided MACs mode
78 * @send_ctlr_ka: need to send controller keep alive 78 * @send_ctlr_ka: need to send controller keep alive
@@ -100,14 +100,12 @@ struct fcoe_ctlr {
100 unsigned long port_ka_time; 100 unsigned long port_ka_time;
101 unsigned long ctlr_ka_time; 101 unsigned long ctlr_ka_time;
102 struct timer_list timer; 102 struct timer_list timer;
103 struct work_struct link_work; 103 struct work_struct timer_work;
104 struct work_struct recv_work; 104 struct work_struct recv_work;
105 struct sk_buff_head fip_recv_list; 105 struct sk_buff_head fip_recv_list;
106 u16 user_mfs; 106 u16 user_mfs;
107 u16 flogi_oxid; 107 u16 flogi_oxid;
108 u8 flogi_count; 108 u8 flogi_count;
109 u8 link;
110 u8 last_link;
111 u8 reset_req; 109 u8 reset_req;
112 u8 map_dest; 110 u8 map_dest;
113 u8 spma; 111 u8 spma;
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 8b4deca996ad..9ae5c613131b 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -114,6 +114,7 @@ struct scsi_cmnd;
114#define READ_12 0xa8 114#define READ_12 0xa8
115#define WRITE_12 0xaa 115#define WRITE_12 0xaa
116#define WRITE_VERIFY_12 0xae 116#define WRITE_VERIFY_12 0xae
117#define VERIFY_12 0xaf
117#define SEARCH_HIGH_12 0xb0 118#define SEARCH_HIGH_12 0xb0
118#define SEARCH_EQUAL_12 0xb1 119#define SEARCH_EQUAL_12 0xb1
119#define SEARCH_LOW_12 0xb2 120#define SEARCH_LOW_12 0xb2
@@ -134,6 +135,7 @@ struct scsi_cmnd;
134#define MO_SET_TARGET_PGS 0x0a 135#define MO_SET_TARGET_PGS 0x0a
135/* values for variable length command */ 136/* values for variable length command */
136#define READ_32 0x09 137#define READ_32 0x09
138#define VERIFY_32 0x0a
137#define WRITE_32 0x0b 139#define WRITE_32 0x0b
138#define WRITE_SAME_32 0x0d 140#define WRITE_SAME_32 0x0d
139 141
@@ -423,6 +425,7 @@ static inline int scsi_is_wlun(unsigned int lun)
423#define ADD_TO_MLQUEUE 0x2006 425#define ADD_TO_MLQUEUE 0x2006
424#define TIMEOUT_ERROR 0x2007 426#define TIMEOUT_ERROR 0x2007
425#define SCSI_RETURN_NOT_HANDLED 0x2008 427#define SCSI_RETURN_NOT_HANDLED 0x2008
428#define FAST_IO_FAIL 0x2009
426 429
427/* 430/*
428 * Midlevel queue return values. 431 * Midlevel queue return values.
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 8e86a94faf06..87d81b3ce564 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -807,6 +807,6 @@ void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
807struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel, 807struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel,
808 struct fc_vport_identifiers *); 808 struct fc_vport_identifiers *);
809int fc_vport_terminate(struct fc_vport *vport); 809int fc_vport_terminate(struct fc_vport *vport);
810void fc_block_scsi_eh(struct scsi_cmnd *cmnd); 810int fc_block_scsi_eh(struct scsi_cmnd *cmnd);
811 811
812#endif /* SCSI_TRANSPORT_FC_H */ 812#endif /* SCSI_TRANSPORT_FC_H */
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
new file mode 100644
index 000000000000..25fbefdf2f2e
--- /dev/null
+++ b/include/trace/events/scsi.h
@@ -0,0 +1,345 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM scsi
3
4#if !defined(_TRACE_SCSI_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_SCSI_H
6
7#include <scsi/scsi_cmnd.h>
8#include <scsi/scsi_host.h>
9#include <linux/tracepoint.h>
10#include <linux/trace_seq.h>
11
12#define scsi_opcode_name(opcode) { opcode, #opcode }
13#define show_opcode_name(val) \
14 __print_symbolic(val, \
15 scsi_opcode_name(TEST_UNIT_READY), \
16 scsi_opcode_name(REZERO_UNIT), \
17 scsi_opcode_name(REQUEST_SENSE), \
18 scsi_opcode_name(FORMAT_UNIT), \
19 scsi_opcode_name(READ_BLOCK_LIMITS), \
20 scsi_opcode_name(REASSIGN_BLOCKS), \
21 scsi_opcode_name(INITIALIZE_ELEMENT_STATUS), \
22 scsi_opcode_name(READ_6), \
23 scsi_opcode_name(WRITE_6), \
24 scsi_opcode_name(SEEK_6), \
25 scsi_opcode_name(READ_REVERSE), \
26 scsi_opcode_name(WRITE_FILEMARKS), \
27 scsi_opcode_name(SPACE), \
28 scsi_opcode_name(INQUIRY), \
29 scsi_opcode_name(RECOVER_BUFFERED_DATA), \
30 scsi_opcode_name(MODE_SELECT), \
31 scsi_opcode_name(RESERVE), \
32 scsi_opcode_name(RELEASE), \
33 scsi_opcode_name(COPY), \
34 scsi_opcode_name(ERASE), \
35 scsi_opcode_name(MODE_SENSE), \
36 scsi_opcode_name(START_STOP), \
37 scsi_opcode_name(RECEIVE_DIAGNOSTIC), \
38 scsi_opcode_name(SEND_DIAGNOSTIC), \
39 scsi_opcode_name(ALLOW_MEDIUM_REMOVAL), \
40 scsi_opcode_name(SET_WINDOW), \
41 scsi_opcode_name(READ_CAPACITY), \
42 scsi_opcode_name(READ_10), \
43 scsi_opcode_name(WRITE_10), \
44 scsi_opcode_name(SEEK_10), \
45 scsi_opcode_name(POSITION_TO_ELEMENT), \
46 scsi_opcode_name(WRITE_VERIFY), \
47 scsi_opcode_name(VERIFY), \
48 scsi_opcode_name(SEARCH_HIGH), \
49 scsi_opcode_name(SEARCH_EQUAL), \
50 scsi_opcode_name(SEARCH_LOW), \
51 scsi_opcode_name(SET_LIMITS), \
52 scsi_opcode_name(PRE_FETCH), \
53 scsi_opcode_name(READ_POSITION), \
54 scsi_opcode_name(SYNCHRONIZE_CACHE), \
55 scsi_opcode_name(LOCK_UNLOCK_CACHE), \
56 scsi_opcode_name(READ_DEFECT_DATA), \
57 scsi_opcode_name(MEDIUM_SCAN), \
58 scsi_opcode_name(COMPARE), \
59 scsi_opcode_name(COPY_VERIFY), \
60 scsi_opcode_name(WRITE_BUFFER), \
61 scsi_opcode_name(READ_BUFFER), \
62 scsi_opcode_name(UPDATE_BLOCK), \
63 scsi_opcode_name(READ_LONG), \
64 scsi_opcode_name(WRITE_LONG), \
65 scsi_opcode_name(CHANGE_DEFINITION), \
66 scsi_opcode_name(WRITE_SAME), \
67 scsi_opcode_name(UNMAP), \
68 scsi_opcode_name(READ_TOC), \
69 scsi_opcode_name(LOG_SELECT), \
70 scsi_opcode_name(LOG_SENSE), \
71 scsi_opcode_name(XDWRITEREAD_10), \
72 scsi_opcode_name(MODE_SELECT_10), \
73 scsi_opcode_name(RESERVE_10), \
74 scsi_opcode_name(RELEASE_10), \
75 scsi_opcode_name(MODE_SENSE_10), \
76 scsi_opcode_name(PERSISTENT_RESERVE_IN), \
77 scsi_opcode_name(PERSISTENT_RESERVE_OUT), \
78 scsi_opcode_name(VARIABLE_LENGTH_CMD), \
79 scsi_opcode_name(REPORT_LUNS), \
80 scsi_opcode_name(MAINTENANCE_IN), \
81 scsi_opcode_name(MAINTENANCE_OUT), \
82 scsi_opcode_name(MOVE_MEDIUM), \
83 scsi_opcode_name(EXCHANGE_MEDIUM), \
84 scsi_opcode_name(READ_12), \
85 scsi_opcode_name(WRITE_12), \
86 scsi_opcode_name(WRITE_VERIFY_12), \
87 scsi_opcode_name(SEARCH_HIGH_12), \
88 scsi_opcode_name(SEARCH_EQUAL_12), \
89 scsi_opcode_name(SEARCH_LOW_12), \
90 scsi_opcode_name(READ_ELEMENT_STATUS), \
91 scsi_opcode_name(SEND_VOLUME_TAG), \
92 scsi_opcode_name(WRITE_LONG_2), \
93 scsi_opcode_name(READ_16), \
94 scsi_opcode_name(WRITE_16), \
95 scsi_opcode_name(VERIFY_16), \
96 scsi_opcode_name(WRITE_SAME_16), \
97 scsi_opcode_name(SERVICE_ACTION_IN), \
98 scsi_opcode_name(SAI_READ_CAPACITY_16), \
99 scsi_opcode_name(SAI_GET_LBA_STATUS), \
100 scsi_opcode_name(MI_REPORT_TARGET_PGS), \
101 scsi_opcode_name(MO_SET_TARGET_PGS), \
102 scsi_opcode_name(READ_32), \
103 scsi_opcode_name(WRITE_32), \
104 scsi_opcode_name(WRITE_SAME_32), \
105 scsi_opcode_name(ATA_16), \
106 scsi_opcode_name(ATA_12))
107
108#define scsi_hostbyte_name(result) { result, #result }
109#define show_hostbyte_name(val) \
110 __print_symbolic(val, \
111 scsi_hostbyte_name(DID_OK), \
112 scsi_hostbyte_name(DID_NO_CONNECT), \
113 scsi_hostbyte_name(DID_BUS_BUSY), \
114 scsi_hostbyte_name(DID_TIME_OUT), \
115 scsi_hostbyte_name(DID_BAD_TARGET), \
116 scsi_hostbyte_name(DID_ABORT), \
117 scsi_hostbyte_name(DID_PARITY), \
118 scsi_hostbyte_name(DID_ERROR), \
119 scsi_hostbyte_name(DID_RESET), \
120 scsi_hostbyte_name(DID_BAD_INTR), \
121 scsi_hostbyte_name(DID_PASSTHROUGH), \
122 scsi_hostbyte_name(DID_SOFT_ERROR), \
123 scsi_hostbyte_name(DID_IMM_RETRY), \
124 scsi_hostbyte_name(DID_REQUEUE), \
125 scsi_hostbyte_name(DID_TRANSPORT_DISRUPTED), \
126 scsi_hostbyte_name(DID_TRANSPORT_FAILFAST))
127
128#define scsi_driverbyte_name(result) { result, #result }
129#define show_driverbyte_name(val) \
130 __print_symbolic(val, \
131 scsi_driverbyte_name(DRIVER_OK), \
132 scsi_driverbyte_name(DRIVER_BUSY), \
133 scsi_driverbyte_name(DRIVER_SOFT), \
134 scsi_driverbyte_name(DRIVER_MEDIA), \
135 scsi_driverbyte_name(DRIVER_ERROR), \
136 scsi_driverbyte_name(DRIVER_INVALID), \
137 scsi_driverbyte_name(DRIVER_TIMEOUT), \
138 scsi_driverbyte_name(DRIVER_HARD), \
139 scsi_driverbyte_name(DRIVER_SENSE))
140
141#define scsi_msgbyte_name(result) { result, #result }
142#define show_msgbyte_name(val) \
143 __print_symbolic(val, \
144 scsi_msgbyte_name(COMMAND_COMPLETE), \
145 scsi_msgbyte_name(EXTENDED_MESSAGE), \
146 scsi_msgbyte_name(SAVE_POINTERS), \
147 scsi_msgbyte_name(RESTORE_POINTERS), \
148 scsi_msgbyte_name(DISCONNECT), \
149 scsi_msgbyte_name(INITIATOR_ERROR), \
150 scsi_msgbyte_name(ABORT_TASK_SET), \
151 scsi_msgbyte_name(MESSAGE_REJECT), \
152 scsi_msgbyte_name(NOP), \
153 scsi_msgbyte_name(MSG_PARITY_ERROR), \
154 scsi_msgbyte_name(LINKED_CMD_COMPLETE), \
155 scsi_msgbyte_name(LINKED_FLG_CMD_COMPLETE), \
156 scsi_msgbyte_name(TARGET_RESET), \
157 scsi_msgbyte_name(ABORT_TASK), \
158 scsi_msgbyte_name(CLEAR_TASK_SET), \
159 scsi_msgbyte_name(INITIATE_RECOVERY), \
160 scsi_msgbyte_name(RELEASE_RECOVERY), \
161 scsi_msgbyte_name(CLEAR_ACA), \
162 scsi_msgbyte_name(LOGICAL_UNIT_RESET), \
163 scsi_msgbyte_name(SIMPLE_QUEUE_TAG), \
164 scsi_msgbyte_name(HEAD_OF_QUEUE_TAG), \
165 scsi_msgbyte_name(ORDERED_QUEUE_TAG), \
166 scsi_msgbyte_name(IGNORE_WIDE_RESIDUE), \
167 scsi_msgbyte_name(ACA), \
168 scsi_msgbyte_name(QAS_REQUEST), \
169 scsi_msgbyte_name(BUS_DEVICE_RESET), \
170 scsi_msgbyte_name(ABORT))
171
172#define scsi_statusbyte_name(result) { result, #result }
173#define show_statusbyte_name(val) \
174 __print_symbolic(val, \
175 scsi_statusbyte_name(SAM_STAT_GOOD), \
176 scsi_statusbyte_name(SAM_STAT_CHECK_CONDITION), \
177 scsi_statusbyte_name(SAM_STAT_CONDITION_MET), \
178 scsi_statusbyte_name(SAM_STAT_BUSY), \
179 scsi_statusbyte_name(SAM_STAT_INTERMEDIATE), \
180 scsi_statusbyte_name(SAM_STAT_INTERMEDIATE_CONDITION_MET), \
181 scsi_statusbyte_name(SAM_STAT_RESERVATION_CONFLICT), \
182 scsi_statusbyte_name(SAM_STAT_COMMAND_TERMINATED), \
183 scsi_statusbyte_name(SAM_STAT_TASK_SET_FULL), \
184 scsi_statusbyte_name(SAM_STAT_ACA_ACTIVE), \
185 scsi_statusbyte_name(SAM_STAT_TASK_ABORTED))
186
187const char *scsi_trace_parse_cdb(struct trace_seq*, unsigned char*, int);
188#define __parse_cdb(cdb, len) scsi_trace_parse_cdb(p, cdb, len)
189
190TRACE_EVENT(scsi_dispatch_cmd_start,
191
192 TP_PROTO(struct scsi_cmnd *cmd),
193
194 TP_ARGS(cmd),
195
196 TP_STRUCT__entry(
197 __field( unsigned int, host_no )
198 __field( unsigned int, channel )
199 __field( unsigned int, id )
200 __field( unsigned int, lun )
201 __field( unsigned int, opcode )
202 __field( unsigned int, cmd_len )
203 __field( unsigned int, data_sglen )
204 __field( unsigned int, prot_sglen )
205 __dynamic_array(unsigned char, cmnd, cmd->cmd_len)
206 ),
207
208 TP_fast_assign(
209 __entry->host_no = cmd->device->host->host_no;
210 __entry->channel = cmd->device->channel;
211 __entry->id = cmd->device->id;
212 __entry->lun = cmd->device->lun;
213 __entry->opcode = cmd->cmnd[0];
214 __entry->cmd_len = cmd->cmd_len;
215 __entry->data_sglen = scsi_sg_count(cmd);
216 __entry->prot_sglen = scsi_prot_sg_count(cmd);
217 memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
218 ),
219
220 TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
221 " cmnd=(%s %s raw=%s)",
222 __entry->host_no, __entry->channel, __entry->id,
223 __entry->lun, __entry->data_sglen, __entry->prot_sglen,
224 show_opcode_name(__entry->opcode),
225 __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
226 __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len))
227);
228
229TRACE_EVENT(scsi_dispatch_cmd_error,
230
231 TP_PROTO(struct scsi_cmnd *cmd, int rtn),
232
233 TP_ARGS(cmd, rtn),
234
235 TP_STRUCT__entry(
236 __field( unsigned int, host_no )
237 __field( unsigned int, channel )
238 __field( unsigned int, id )
239 __field( unsigned int, lun )
240 __field( int, rtn )
241 __field( unsigned int, opcode )
242 __field( unsigned int, cmd_len )
243 __field( unsigned int, data_sglen )
244 __field( unsigned int, prot_sglen )
245 __dynamic_array(unsigned char, cmnd, cmd->cmd_len)
246 ),
247
248 TP_fast_assign(
249 __entry->host_no = cmd->device->host->host_no;
250 __entry->channel = cmd->device->channel;
251 __entry->id = cmd->device->id;
252 __entry->lun = cmd->device->lun;
253 __entry->rtn = rtn;
254 __entry->opcode = cmd->cmnd[0];
255 __entry->cmd_len = cmd->cmd_len;
256 __entry->data_sglen = scsi_sg_count(cmd);
257 __entry->prot_sglen = scsi_prot_sg_count(cmd);
258 memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
259 ),
260
261 TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
262 " cmnd=(%s %s raw=%s) rtn=%d",
263 __entry->host_no, __entry->channel, __entry->id,
264 __entry->lun, __entry->data_sglen, __entry->prot_sglen,
265 show_opcode_name(__entry->opcode),
266 __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
267 __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
268 __entry->rtn)
269);
270
271DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
272
273 TP_PROTO(struct scsi_cmnd *cmd),
274
275 TP_ARGS(cmd),
276
277 TP_STRUCT__entry(
278 __field( unsigned int, host_no )
279 __field( unsigned int, channel )
280 __field( unsigned int, id )
281 __field( unsigned int, lun )
282 __field( int, result )
283 __field( unsigned int, opcode )
284 __field( unsigned int, cmd_len )
285 __field( unsigned int, data_sglen )
286 __field( unsigned int, prot_sglen )
287 __dynamic_array(unsigned char, cmnd, cmd->cmd_len)
288 ),
289
290 TP_fast_assign(
291 __entry->host_no = cmd->device->host->host_no;
292 __entry->channel = cmd->device->channel;
293 __entry->id = cmd->device->id;
294 __entry->lun = cmd->device->lun;
295 __entry->result = cmd->result;
296 __entry->opcode = cmd->cmnd[0];
297 __entry->cmd_len = cmd->cmd_len;
298 __entry->data_sglen = scsi_sg_count(cmd);
299 __entry->prot_sglen = scsi_prot_sg_count(cmd);
300 memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
301 ),
302
303 TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u " \
304 "prot_sgl=%u cmnd=(%s %s raw=%s) result=(driver=%s host=%s " \
305 "message=%s status=%s)",
306 __entry->host_no, __entry->channel, __entry->id,
307 __entry->lun, __entry->data_sglen, __entry->prot_sglen,
308 show_opcode_name(__entry->opcode),
309 __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
310 __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
311 show_driverbyte_name(((__entry->result) >> 24) & 0xff),
312 show_hostbyte_name(((__entry->result) >> 16) & 0xff),
313 show_msgbyte_name(((__entry->result) >> 8) & 0xff),
314 show_statusbyte_name(__entry->result & 0xff))
315);
316
317DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_done,
318 TP_PROTO(struct scsi_cmnd *cmd),
319 TP_ARGS(cmd));
320
321DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_timeout,
322 TP_PROTO(struct scsi_cmnd *cmd),
323 TP_ARGS(cmd));
324
325TRACE_EVENT(scsi_eh_wakeup,
326
327 TP_PROTO(struct Scsi_Host *shost),
328
329 TP_ARGS(shost),
330
331 TP_STRUCT__entry(
332 __field( unsigned int, host_no )
333 ),
334
335 TP_fast_assign(
336 __entry->host_no = shost->host_no;
337 ),
338
339 TP_printk("host_no=%u", __entry->host_no)
340);
341
342#endif /* _TRACE_SCSI_H */
343
344/* This part must be outside protection */
345#include <trace/define_trace.h>
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index ea6f9d4a20e9..c48320b3dabd 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -198,6 +198,9 @@
198 ftrace_print_symbols_seq(p, value, symbols); \ 198 ftrace_print_symbols_seq(p, value, symbols); \
199 }) 199 })
200 200
201#undef __print_hex
202#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
203
201#undef DECLARE_EVENT_CLASS 204#undef DECLARE_EVENT_CLASS
202#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 205#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
203static notrace enum print_line_t \ 206static notrace enum print_line_t \
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 8e46b3323cdc..9cb5df5dc656 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -209,6 +209,7 @@ int trace_seq_putc(struct trace_seq *s, unsigned char c)
209 209
210 return 1; 210 return 1;
211} 211}
212EXPORT_SYMBOL(trace_seq_putc);
212 213
213int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) 214int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
214{ 215{
@@ -355,6 +356,21 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
355} 356}
356EXPORT_SYMBOL(ftrace_print_symbols_seq); 357EXPORT_SYMBOL(ftrace_print_symbols_seq);
357 358
359const char *
360ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
361{
362 int i;
363 const char *ret = p->buffer + p->len;
364
365 for (i = 0; i < buf_len; i++)
366 trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]);
367
368 trace_seq_putc(p, 0);
369
370 return ret;
371}
372EXPORT_SYMBOL(ftrace_print_hex_seq);
373
358#ifdef CONFIG_KRETPROBES 374#ifdef CONFIG_KRETPROBES
359static inline const char *kretprobed(const char *name) 375static inline const char *kretprobed(const char *name)
360{ 376{