aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-10-25 10:40:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-25 10:40:30 -0400
commitd49f8a52b15bf35db778035340d8a673149f9f93 (patch)
tree7a60b3298377f3b243bd4b414aabe9ff6d54dd37
parentbd6bf7c10484f026505814b690104cdef27ed460 (diff)
parenta0db8a7516d9eb9ebb7400df21fc061fe472b8ad (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly updates of the usual drivers: UFS, esp_scsi, NCR5380, qla2xxx, lpfc, libsas, hisi_sas. In addition there's a set of mostly small updates to the target subsystem a set of conversions to the generic DMA API, which do have some potential for issues in the older drivers but we'll handle those as case by case fixes. A new myrs driver for the DAC960/mylex raid controllers to replace the block based DAC960 which is also being removed by Jens in this merge window. Plus the usual slew of trivial changes" [ "myrs" stands for "MYlex Raid Scsi". Obviously. Silly of me to even wonder. There's also a "myrb" driver, where the 'b' stands for 'block'. Truly, somebody has got mad naming skillz. - Linus ] * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (237 commits) scsi: myrs: Fix the processor absent message in processor_show() scsi: myrs: Fix a logical vs bitwise bug scsi: hisi_sas: Fix NULL pointer dereference scsi: myrs: fix build failure on 32 bit scsi: fnic: replace gross legacy tag hack with blk-mq hack scsi: mesh: switch to generic DMA API scsi: ips: switch to generic DMA API scsi: smartpqi: fully convert to the generic DMA API scsi: vmw_pscsi: switch to generic DMA API scsi: snic: switch to generic DMA API scsi: qla4xxx: fully convert to the generic DMA API scsi: qla2xxx: fully convert to the generic DMA API scsi: qla1280: switch to generic DMA API scsi: qedi: fully convert to the generic DMA API scsi: qedf: fully convert to the generic DMA API scsi: pm8001: switch to generic DMA API scsi: nsp32: switch to generic DMA API scsi: mvsas: fully convert to the generic DMA API scsi: mvumi: switch to generic DMA API scsi: mpt3sas: switch to generic DMA API ...
-rw-r--r--Documentation/scsi/ufs.txt20
-rw-r--r--MAINTAINERS9
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h2
-rw-r--r--drivers/message/fusion/mptbase.c12
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/scsi/3w-9xxx.c50
-rw-r--r--drivers/scsi/3w-sas.c38
-rw-r--r--drivers/scsi/3w-xxxx.c20
-rw-r--r--drivers/scsi/3w-xxxx.h1
-rw-r--r--drivers/scsi/53c700.h2
-rw-r--r--drivers/scsi/BusLogic.c36
-rw-r--r--drivers/scsi/FlashPoint.c6
-rw-r--r--drivers/scsi/Kconfig35
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/NCR5380.c167
-rw-r--r--drivers/scsi/NCR5380.h2
-rw-r--r--drivers/scsi/a100u2w.c20
-rw-r--r--drivers/scsi/aacraid/aachba.c7
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/advansys.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7770.c6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c44
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_93cx6.c6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c41
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c7
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.h4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c9
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c5
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c46
-rw-r--r--drivers/scsi/am53c974.c54
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c29
-rw-r--r--drivers/scsi/atp870u.c6
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c10
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c15
-rw-r--r--drivers/scsi/be2iscsi/be_main.c74
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c27
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h2
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c108
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.h9
-rw-r--r--drivers/scsi/bfa/bfad_im.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c3
-rw-r--r--drivers/scsi/csiostor/csio_init.c7
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c6
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c12
-rw-r--r--drivers/scsi/csiostor/csio_wr.c17
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c154
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h3
-rw-r--r--drivers/scsi/dc395x.c191
-rw-r--r--drivers/scsi/esp_scsi.c286
-rw-r--r--drivers/scsi/esp_scsi.h38
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c41
-rw-r--r--drivers/scsi/fnic/fnic_main.c19
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c99
-rw-r--r--drivers/scsi/fnic/vnic_dev.c26
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c161
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c15
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c15
-rw-r--r--drivers/scsi/hpsa.c148
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c1
-rw-r--r--drivers/scsi/ips.c81
-rw-r--r--drivers/scsi/isci/host.c8
-rw-r--r--drivers/scsi/isci/host.h2
-rw-r--r--drivers/scsi/isci/request.c4
-rw-r--r--drivers/scsi/isci/task.c4
-rw-r--r--drivers/scsi/iscsi_tcp.c3
-rw-r--r--drivers/scsi/jazz_esp.c30
-rw-r--r--drivers/scsi/libfc/fc_fcp.c6
-rw-r--r--drivers/scsi/libfc/fc_rport.c22
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c22
-rw-r--r--drivers/scsi/lpfc/lpfc.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c111
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c344
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h36
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h45
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c103
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c310
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c14
-rw-r--r--drivers/scsi/mac_esp.c217
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c117
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c153
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c16
-rw-r--r--drivers/scsi/mesh.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c1189
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c89
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c527
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c1487
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c355
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c101
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c70
-rw-r--r--drivers/scsi/mvsas/mv_init.c21
-rw-r--r--drivers/scsi/mvsas/mv_sas.c12
-rw-r--r--drivers/scsi/mvumi.c89
-rw-r--r--drivers/scsi/myrb.c3656
-rw-r--r--drivers/scsi/myrb.h958
-rw-r--r--drivers/scsi/myrs.c3268
-rw-r--r--drivers/scsi/myrs.h1134
-rw-r--r--drivers/scsi/nsp32.c18
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h8
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c31
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c31
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c49
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h3
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c119
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h9
-rw-r--r--drivers/scsi/qedf/qedf_main.c8
-rw-r--r--drivers/scsi/qedi/qedi_main.c8
-rw-r--r--drivers/scsi/qla1280.c26
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c587
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h32
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c536
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c412
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h23
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c43
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c84
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c317
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c542
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c51
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c10
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c25
-rw-r--r--drivers/scsi/raid_class.c4
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/scsi/scsi_transport_sas.c2
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c100
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c11
-rw-r--r--drivers/scsi/snic/snic_disc.c7
-rw-r--r--drivers/scsi/snic/snic_io.c25
-rw-r--r--drivers/scsi/snic/snic_main.c24
-rw-r--r--drivers/scsi/snic/snic_scsi.c15
-rw-r--r--drivers/scsi/snic/vnic_dev.c29
-rw-r--r--drivers/scsi/sun3x_esp.c30
-rw-r--r--drivers/scsi/sun_esp.c61
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c15
-rw-r--r--drivers/scsi/ufs/Kconfig19
-rw-r--r--drivers/scsi/ufs/Makefile3
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c54
-rw-r--r--drivers/scsi/ufs/ufs.h94
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c210
-rw-r--r--drivers/scsi/ufs/ufs_bsg.h23
-rw-r--r--drivers/scsi/ufs/ufshcd.c431
-rw-r--r--drivers/scsi/ufs/ufshcd.h12
-rw-r--r--drivers/scsi/ufs/ufshci.h25
-rw-r--r--drivers/scsi/vmw_pvscsi.c77
-rw-r--r--drivers/scsi/zorro_esp.c290
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c23
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c15
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c44
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h1
-rw-r--r--drivers/target/target_core_iblock.c58
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_sbc.c23
-rw-r--r--drivers/target/target_core_transport.c19
-rw-r--r--drivers/target/target_core_xcopy.c3
-rw-r--r--include/linux/wait.h20
-rw-r--r--include/target/iscsi/iscsi_target_core.h6
-rw-r--r--include/target/iscsi/iscsi_target_stat.h4
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h106
200 files changed, 16045 insertions, 5954 deletions
diff --git a/Documentation/scsi/ufs.txt b/Documentation/scsi/ufs.txt
index 41a6164592aa..520b5b033256 100644
--- a/Documentation/scsi/ufs.txt
+++ b/Documentation/scsi/ufs.txt
@@ -128,6 +128,26 @@ The current UFSHCD implementation supports following functionality,
128In this version of UFSHCD Query requests and power management 128In this version of UFSHCD Query requests and power management
129functionality are not implemented. 129functionality are not implemented.
130 130
1314. BSG Support
132------------------
133
134This transport driver supports exchanging UFS protocol information units
135(UPIUs) with a UFS device. Typically, user space will allocate
136struct ufs_bsg_request and struct ufs_bsg_reply (see ufs_bsg.h) as
137request_upiu and reply_upiu respectively. Filling those UPIUs should
138be done in accordance with JEDEC spec UFS2.1 paragraph 10.7.
139*Caveat emptor*: The driver makes no further input validations and sends the
140UPIU to the device as it is. Open the bsg device in /dev/ufs-bsg and
141send SG_IO with the applicable sg_io_v4:
142
143 io_hdr_v4.guard = 'Q';
144 io_hdr_v4.protocol = BSG_PROTOCOL_SCSI;
145 io_hdr_v4.subprotocol = BSG_SUB_PROTOCOL_SCSI_TRANSPORT;
146 io_hdr_v4.response = (__u64)reply_upiu;
147 io_hdr_v4.max_response_len = reply_len;
148 io_hdr_v4.request_len = request_len;
149 io_hdr_v4.request = (__u64)request_upiu;
150
131UFS Specifications can be found at, 151UFS Specifications can be found at,
132UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf 152UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf
133UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf 153UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf
diff --git a/MAINTAINERS b/MAINTAINERS
index 16fb17ce1475..46a7656bf96b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4055,7 +4055,7 @@ M: Uma Krishnan <ukrishn@linux.vnet.ibm.com>
4055L: linux-scsi@vger.kernel.org 4055L: linux-scsi@vger.kernel.org
4056S: Supported 4056S: Supported
4057F: drivers/scsi/cxlflash/ 4057F: drivers/scsi/cxlflash/
4058F: include/uapi/scsi/cxlflash_ioctls.h 4058F: include/uapi/scsi/cxlflash_ioctl.h
4059F: Documentation/powerpc/cxlflash.txt 4059F: Documentation/powerpc/cxlflash.txt
4060 4060
4061CYBERPRO FB DRIVER 4061CYBERPRO FB DRIVER
@@ -10011,6 +10011,13 @@ S: Supported
10011F: drivers/gpu/drm/mxsfb/ 10011F: drivers/gpu/drm/mxsfb/
10012F: Documentation/devicetree/bindings/display/mxsfb.txt 10012F: Documentation/devicetree/bindings/display/mxsfb.txt
10013 10013
10014MYLEX DAC960 PCI RAID Controller
10015M: Hannes Reinecke <hare@kernel.org>
10016L: linux-scsi@vger.kernel.org
10017S: Supported
10018F: drivers/scsi/myrb.*
10019F: drivers/scsi/myrs.*
10020
10014MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) 10021MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
10015M: Chris Lee <christopher.lee@cspi.com> 10022M: Chris Lee <christopher.lee@cspi.com>
10016L: netdev@vger.kernel.org 10023L: netdev@vger.kernel.org
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 059997f8ebce..178f414ea8f9 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -2004,7 +2004,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_6
2004 U64 LinkFailureCount; /* 50h */ 2004 U64 LinkFailureCount; /* 50h */
2005 U64 LossOfSyncCount; /* 58h */ 2005 U64 LossOfSyncCount; /* 58h */
2006 U64 LossOfSignalCount; /* 60h */ 2006 U64 LossOfSignalCount; /* 60h */
2007 U64 PrimativeSeqErrCount; /* 68h */ 2007 U64 PrimitiveSeqErrCount; /* 68h */
2008 U64 InvalidTxWordCount; /* 70h */ 2008 U64 InvalidTxWordCount; /* 70h */
2009 U64 InvalidCrcCount; /* 78h */ 2009 U64 InvalidCrcCount; /* 78h */
2010 U64 FcpInitiatorIoCount; /* 80h */ 2010 U64 FcpInitiatorIoCount; /* 80h */
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index e6b4ae558767..ba551d8dfba4 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -335,11 +335,11 @@ static int mpt_remove_dead_ioc_func(void *arg)
335 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 335 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
336 struct pci_dev *pdev; 336 struct pci_dev *pdev;
337 337
338 if ((ioc == NULL)) 338 if (!ioc)
339 return -1; 339 return -1;
340 340
341 pdev = ioc->pcidev; 341 pdev = ioc->pcidev;
342 if ((pdev == NULL)) 342 if (!pdev)
343 return -1; 343 return -1;
344 344
345 pci_stop_and_remove_bus_device_locked(pdev); 345 pci_stop_and_remove_bus_device_locked(pdev);
@@ -7570,11 +7570,11 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
7570 u8 phy_num = (u8)(evData0); 7570 u8 phy_num = (u8)(evData0);
7571 u8 port_num = (u8)(evData0 >> 8); 7571 u8 port_num = (u8)(evData0 >> 8);
7572 u8 port_width = (u8)(evData0 >> 16); 7572 u8 port_width = (u8)(evData0 >> 16);
7573 u8 primative = (u8)(evData0 >> 24); 7573 u8 primitive = (u8)(evData0 >> 24);
7574 snprintf(evStr, EVENT_DESCR_STR_SZ, 7574 snprintf(evStr, EVENT_DESCR_STR_SZ,
7575 "SAS Broadcase Primative: phy=%d port=%d " 7575 "SAS Broadcast Primitive: phy=%d port=%d "
7576 "width=%d primative=0x%02x", 7576 "width=%d primitive=0x%02x",
7577 phy_num, port_num, port_width, primative); 7577 phy_num, port_num, port_width, primitive);
7578 break; 7578 break;
7579 } 7579 }
7580 7580
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index b8cf2658649e..9b404fc69c90 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -129,7 +129,7 @@ static void mptsas_expander_delete(MPT_ADAPTER *ioc,
129static void mptsas_send_expander_event(struct fw_event_work *fw_event); 129static void mptsas_send_expander_event(struct fw_event_work *fw_event);
130static void mptsas_not_responding_devices(MPT_ADAPTER *ioc); 130static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
131static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc); 131static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
132static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event); 132static void mptsas_broadcast_primitive_work(struct fw_event_work *fw_event);
133static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event); 133static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
134static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id); 134static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
135void mptsas_schedule_target_reset(void *ioc); 135void mptsas_schedule_target_reset(void *ioc);
@@ -1665,7 +1665,7 @@ mptsas_firmware_event_work(struct work_struct *work)
1665 mptsas_free_fw_event(ioc, fw_event); 1665 mptsas_free_fw_event(ioc, fw_event);
1666 break; 1666 break;
1667 case MPI_EVENT_SAS_BROADCAST_PRIMITIVE: 1667 case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
1668 mptsas_broadcast_primative_work(fw_event); 1668 mptsas_broadcast_primitive_work(fw_event);
1669 break; 1669 break;
1670 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE: 1670 case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
1671 mptsas_send_expander_event(fw_event); 1671 mptsas_send_expander_event(fw_event);
@@ -4826,13 +4826,13 @@ mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
4826} 4826}
4827 4827
4828/** 4828/**
4829 * mptsas_broadcast_primative_work - Handle broadcast primitives 4829 * mptsas_broadcast_primitive_work - Handle broadcast primitives
4830 * @work: work queue payload containing info describing the event 4830 * @work: work queue payload containing info describing the event
4831 * 4831 *
4832 * this will be handled in workqueue context. 4832 * this will be handled in workqueue context.
4833 */ 4833 */
4834static void 4834static void
4835mptsas_broadcast_primative_work(struct fw_event_work *fw_event) 4835mptsas_broadcast_primitive_work(struct fw_event_work *fw_event)
4836{ 4836{
4837 MPT_ADAPTER *ioc = fw_event->ioc; 4837 MPT_ADAPTER *ioc = fw_event->ioc;
4838 MPT_FRAME_HDR *mf; 4838 MPT_FRAME_HDR *mf;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 27521fc3ef5a..05293babb031 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -518,7 +518,8 @@ static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
518 unsigned long *cpu_addr; 518 unsigned long *cpu_addr;
519 int retval = 1; 519 int retval = 1;
520 520
521 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle); 521 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
522 size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
522 if (!cpu_addr) { 523 if (!cpu_addr) {
523 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); 524 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
524 goto out; 525 goto out;
@@ -526,7 +527,8 @@ static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
526 527
527 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) { 528 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
528 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory"); 529 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
529 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle); 530 dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
531 cpu_addr, dma_handle);
530 goto out; 532 goto out;
531 } 533 }
532 534
@@ -1027,16 +1029,16 @@ out:
1027static void twa_free_device_extension(TW_Device_Extension *tw_dev) 1029static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1028{ 1030{
1029 if (tw_dev->command_packet_virt[0]) 1031 if (tw_dev->command_packet_virt[0])
1030 pci_free_consistent(tw_dev->tw_pci_dev, 1032 dma_free_coherent(&tw_dev->tw_pci_dev->dev,
1031 sizeof(TW_Command_Full)*TW_Q_LENGTH, 1033 sizeof(TW_Command_Full) * TW_Q_LENGTH,
1032 tw_dev->command_packet_virt[0], 1034 tw_dev->command_packet_virt[0],
1033 tw_dev->command_packet_phys[0]); 1035 tw_dev->command_packet_phys[0]);
1034 1036
1035 if (tw_dev->generic_buffer_virt[0]) 1037 if (tw_dev->generic_buffer_virt[0])
1036 pci_free_consistent(tw_dev->tw_pci_dev, 1038 dma_free_coherent(&tw_dev->tw_pci_dev->dev,
1037 TW_SECTOR_SIZE*TW_Q_LENGTH, 1039 TW_SECTOR_SIZE * TW_Q_LENGTH,
1038 tw_dev->generic_buffer_virt[0], 1040 tw_dev->generic_buffer_virt[0],
1039 tw_dev->generic_buffer_phys[0]); 1041 tw_dev->generic_buffer_phys[0]);
1040 1042
1041 kfree(tw_dev->event_queue[0]); 1043 kfree(tw_dev->event_queue[0]);
1042} /* End twa_free_device_extension() */ 1044} /* End twa_free_device_extension() */
@@ -2015,14 +2017,12 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2015 pci_set_master(pdev); 2017 pci_set_master(pdev);
2016 pci_try_set_mwi(pdev); 2018 pci_try_set_mwi(pdev);
2017 2019
2018 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 2020 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
2019 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) 2021 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
2020 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 2022 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2021 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 2023 retval = -ENODEV;
2022 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask"); 2024 goto out_disable_device;
2023 retval = -ENODEV; 2025 }
2024 goto out_disable_device;
2025 }
2026 2026
2027 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); 2027 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2028 if (!host) { 2028 if (!host) {
@@ -2237,14 +2237,12 @@ static int twa_resume(struct pci_dev *pdev)
2237 pci_set_master(pdev); 2237 pci_set_master(pdev);
2238 pci_try_set_mwi(pdev); 2238 pci_try_set_mwi(pdev);
2239 2239
2240 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 2240 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
2241 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) 2241 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
2242 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 2242 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2243 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 2243 retval = -ENODEV;
2244 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume"); 2244 goto out_disable_device;
2245 retval = -ENODEV; 2245 }
2246 goto out_disable_device;
2247 }
2248 2246
2249 /* Initialize the card */ 2247 /* Initialize the card */
2250 if (twa_reset_sequence(tw_dev, 0)) { 2248 if (twa_reset_sequence(tw_dev, 0)) {
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 40c1e6e64f58..266bdac75304 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -644,8 +644,8 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
644 unsigned long *cpu_addr; 644 unsigned long *cpu_addr;
645 int retval = 1; 645 int retval = 1;
646 646
647 cpu_addr = pci_zalloc_consistent(tw_dev->tw_pci_dev, size * TW_Q_LENGTH, 647 cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev,
648 &dma_handle); 648 size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
649 if (!cpu_addr) { 649 if (!cpu_addr) {
650 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); 650 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
651 goto out; 651 goto out;
@@ -899,19 +899,19 @@ out:
899static void twl_free_device_extension(TW_Device_Extension *tw_dev) 899static void twl_free_device_extension(TW_Device_Extension *tw_dev)
900{ 900{
901 if (tw_dev->command_packet_virt[0]) 901 if (tw_dev->command_packet_virt[0])
902 pci_free_consistent(tw_dev->tw_pci_dev, 902 dma_free_coherent(&tw_dev->tw_pci_dev->dev,
903 sizeof(TW_Command_Full)*TW_Q_LENGTH, 903 sizeof(TW_Command_Full)*TW_Q_LENGTH,
904 tw_dev->command_packet_virt[0], 904 tw_dev->command_packet_virt[0],
905 tw_dev->command_packet_phys[0]); 905 tw_dev->command_packet_phys[0]);
906 906
907 if (tw_dev->generic_buffer_virt[0]) 907 if (tw_dev->generic_buffer_virt[0])
908 pci_free_consistent(tw_dev->tw_pci_dev, 908 dma_free_coherent(&tw_dev->tw_pci_dev->dev,
909 TW_SECTOR_SIZE*TW_Q_LENGTH, 909 TW_SECTOR_SIZE*TW_Q_LENGTH,
910 tw_dev->generic_buffer_virt[0], 910 tw_dev->generic_buffer_virt[0],
911 tw_dev->generic_buffer_phys[0]); 911 tw_dev->generic_buffer_phys[0]);
912 912
913 if (tw_dev->sense_buffer_virt[0]) 913 if (tw_dev->sense_buffer_virt[0])
914 pci_free_consistent(tw_dev->tw_pci_dev, 914 dma_free_coherent(&tw_dev->tw_pci_dev->dev,
915 sizeof(TW_Command_Apache_Header)* 915 sizeof(TW_Command_Apache_Header)*
916 TW_Q_LENGTH, 916 TW_Q_LENGTH,
917 tw_dev->sense_buffer_virt[0], 917 tw_dev->sense_buffer_virt[0],
@@ -1571,14 +1571,12 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1571 pci_set_master(pdev); 1571 pci_set_master(pdev);
1572 pci_try_set_mwi(pdev); 1572 pci_try_set_mwi(pdev);
1573 1573
1574 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 1574 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
1575 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) 1575 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
1576 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 1576 TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
1577 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 1577 retval = -ENODEV;
1578 TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask"); 1578 goto out_disable_device;
1579 retval = -ENODEV; 1579 }
1580 goto out_disable_device;
1581 }
1582 1580
1583 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); 1581 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
1584 if (!host) { 1582 if (!host) {
@@ -1805,14 +1803,12 @@ static int twl_resume(struct pci_dev *pdev)
1805 pci_set_master(pdev); 1803 pci_set_master(pdev);
1806 pci_try_set_mwi(pdev); 1804 pci_try_set_mwi(pdev);
1807 1805
1808 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) 1806 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
1809 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) 1807 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
1810 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 1808 TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
1811 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 1809 retval = -ENODEV;
1812 TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume"); 1810 goto out_disable_device;
1813 retval = -ENODEV; 1811 }
1814 goto out_disable_device;
1815 }
1816 1812
1817 /* Initialize the card */ 1813 /* Initialize the card */
1818 if (twl_reset_sequence(tw_dev, 0)) { 1814 if (twl_reset_sequence(tw_dev, 0)) {
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 471366945bd4..a58257645e94 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -834,15 +834,17 @@ static int tw_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
834 834
835 dprintk(KERN_NOTICE "3w-xxxx: tw_allocate_memory()\n"); 835 dprintk(KERN_NOTICE "3w-xxxx: tw_allocate_memory()\n");
836 836
837 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle); 837 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
838 size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
838 if (cpu_addr == NULL) { 839 if (cpu_addr == NULL) {
839 printk(KERN_WARNING "3w-xxxx: pci_alloc_consistent() failed.\n"); 840 printk(KERN_WARNING "3w-xxxx: dma_alloc_coherent() failed.\n");
840 return 1; 841 return 1;
841 } 842 }
842 843
843 if ((unsigned long)cpu_addr % (tw_dev->tw_pci_dev->device == TW_DEVICE_ID ? TW_ALIGNMENT_6000 : TW_ALIGNMENT_7000)) { 844 if ((unsigned long)cpu_addr % (tw_dev->tw_pci_dev->device == TW_DEVICE_ID ? TW_ALIGNMENT_6000 : TW_ALIGNMENT_7000)) {
844 printk(KERN_WARNING "3w-xxxx: Couldn't allocate correctly aligned memory.\n"); 845 printk(KERN_WARNING "3w-xxxx: Couldn't allocate correctly aligned memory.\n");
845 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle); 846 dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
847 cpu_addr, dma_handle);
846 return 1; 848 return 1;
847 } 849 }
848 850
@@ -1062,10 +1064,16 @@ static void tw_free_device_extension(TW_Device_Extension *tw_dev)
1062 1064
1063 /* Free command packet and generic buffer memory */ 1065 /* Free command packet and generic buffer memory */
1064 if (tw_dev->command_packet_virtual_address[0]) 1066 if (tw_dev->command_packet_virtual_address[0])
1065 pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Command)*TW_Q_LENGTH, tw_dev->command_packet_virtual_address[0], tw_dev->command_packet_physical_address[0]); 1067 dma_free_coherent(&tw_dev->tw_pci_dev->dev,
1068 sizeof(TW_Command) * TW_Q_LENGTH,
1069 tw_dev->command_packet_virtual_address[0],
1070 tw_dev->command_packet_physical_address[0]);
1066 1071
1067 if (tw_dev->alignment_virtual_address[0]) 1072 if (tw_dev->alignment_virtual_address[0])
1068 pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Sector)*TW_Q_LENGTH, tw_dev->alignment_virtual_address[0], tw_dev->alignment_physical_address[0]); 1073 dma_free_coherent(&tw_dev->tw_pci_dev->dev,
1074 sizeof(TW_Sector) * TW_Q_LENGTH,
1075 tw_dev->alignment_virtual_address[0],
1076 tw_dev->alignment_physical_address[0]);
1069} /* End tw_free_device_extension() */ 1077} /* End tw_free_device_extension() */
1070 1078
1071/* This function will send an initconnection command to controller */ 1079/* This function will send an initconnection command to controller */
@@ -2260,7 +2268,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2260 2268
2261 pci_set_master(pdev); 2269 pci_set_master(pdev);
2262 2270
2263 retval = pci_set_dma_mask(pdev, TW_DMA_MASK); 2271 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2264 if (retval) { 2272 if (retval) {
2265 printk(KERN_WARNING "3w-xxxx: Failed to set dma mask."); 2273 printk(KERN_WARNING "3w-xxxx: Failed to set dma mask.");
2266 goto out_disable_device; 2274 goto out_disable_device;
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 69e80c1ed1ca..bd87fbacfbc7 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -230,7 +230,6 @@ static unsigned char tw_sense_table[][4] =
230#define TW_IOCTL_TIMEOUT 25 /* 25 seconds */ 230#define TW_IOCTL_TIMEOUT 25 /* 25 seconds */
231#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */ 231#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
232#define TW_IOCTL_CHRDEV_FREE -1 232#define TW_IOCTL_CHRDEV_FREE -1
233#define TW_DMA_MASK DMA_BIT_MASK(32)
234#define TW_MAX_CDB_LEN 16 233#define TW_MAX_CDB_LEN 16
235 234
236/* Bitmask macros to eliminate bitfields */ 235/* Bitmask macros to eliminate bitfields */
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index 0c9a100af667..05fe439b66af 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -90,7 +90,7 @@ struct NCR_700_Device_Parameters {
90/* The SYNC negotiation sequence looks like: 90/* The SYNC negotiation sequence looks like:
91 * 91 *
92 * If DEV_NEGOTIATED_SYNC not set, tack and SDTR message on to the 92 * If DEV_NEGOTIATED_SYNC not set, tack and SDTR message on to the
93 * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTATION 93 * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTIATION
94 * If we get an SDTR reply, work out the SXFER parameters, squirrel 94 * If we get an SDTR reply, work out the SXFER parameters, squirrel
95 * them away here, clear DEV_BEGIN_SYNC_NEGOTIATION and set 95 * them away here, clear DEV_BEGIN_SYNC_NEGOTIATION and set
96 * DEV_NEGOTIATED_SYNC. If we get a REJECT msg, squirrel 96 * DEV_NEGOTIATED_SYNC. If we get a REJECT msg, squirrel
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 0d4ffe0ae306..9cee941f97d6 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -201,8 +201,8 @@ static bool __init blogic_create_initccbs(struct blogic_adapter *adapter)
201 dma_addr_t blkp; 201 dma_addr_t blkp;
202 202
203 while (adapter->alloc_ccbs < adapter->initccbs) { 203 while (adapter->alloc_ccbs < adapter->initccbs) {
204 blk_pointer = pci_alloc_consistent(adapter->pci_device, 204 blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev,
205 blk_size, &blkp); 205 blk_size, &blkp, GFP_KERNEL);
206 if (blk_pointer == NULL) { 206 if (blk_pointer == NULL) {
207 blogic_err("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n", 207 blogic_err("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n",
208 adapter); 208 adapter);
@@ -227,15 +227,16 @@ static void blogic_destroy_ccbs(struct blogic_adapter *adapter)
227 next_ccb = ccb->next_all; 227 next_ccb = ccb->next_all;
228 if (ccb->allocgrp_head) { 228 if (ccb->allocgrp_head) {
229 if (lastccb) 229 if (lastccb)
230 pci_free_consistent(adapter->pci_device, 230 dma_free_coherent(&adapter->pci_device->dev,
231 lastccb->allocgrp_size, lastccb, 231 lastccb->allocgrp_size, lastccb,
232 lastccb->allocgrp_head); 232 lastccb->allocgrp_head);
233 lastccb = ccb; 233 lastccb = ccb;
234 } 234 }
235 } 235 }
236 if (lastccb) 236 if (lastccb)
237 pci_free_consistent(adapter->pci_device, lastccb->allocgrp_size, 237 dma_free_coherent(&adapter->pci_device->dev,
238 lastccb, lastccb->allocgrp_head); 238 lastccb->allocgrp_size, lastccb,
239 lastccb->allocgrp_head);
239} 240}
240 241
241 242
@@ -256,8 +257,8 @@ static void blogic_create_addlccbs(struct blogic_adapter *adapter,
256 if (addl_ccbs <= 0) 257 if (addl_ccbs <= 0)
257 return; 258 return;
258 while (adapter->alloc_ccbs - prev_alloc < addl_ccbs) { 259 while (adapter->alloc_ccbs - prev_alloc < addl_ccbs) {
259 blk_pointer = pci_alloc_consistent(adapter->pci_device, 260 blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev,
260 blk_size, &blkp); 261 blk_size, &blkp, GFP_KERNEL);
261 if (blk_pointer == NULL) 262 if (blk_pointer == NULL)
262 break; 263 break;
263 blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp); 264 blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp);
@@ -318,8 +319,8 @@ static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap)
318 if (ccb->command != NULL) 319 if (ccb->command != NULL)
319 scsi_dma_unmap(ccb->command); 320 scsi_dma_unmap(ccb->command);
320 if (dma_unmap) 321 if (dma_unmap)
321 pci_unmap_single(adapter->pci_device, ccb->sensedata, 322 dma_unmap_single(&adapter->pci_device->dev, ccb->sensedata,
322 ccb->sense_datalen, PCI_DMA_FROMDEVICE); 323 ccb->sense_datalen, DMA_FROM_DEVICE);
323 324
324 ccb->command = NULL; 325 ccb->command = NULL;
325 ccb->status = BLOGIC_CCB_FREE; 326 ccb->status = BLOGIC_CCB_FREE;
@@ -712,7 +713,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
712 if (pci_enable_device(pci_device)) 713 if (pci_enable_device(pci_device))
713 continue; 714 continue;
714 715
715 if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32))) 716 if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
716 continue; 717 continue;
717 718
718 bus = pci_device->bus->number; 719 bus = pci_device->bus->number;
@@ -895,7 +896,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
895 if (pci_enable_device(pci_device)) 896 if (pci_enable_device(pci_device))
896 continue; 897 continue;
897 898
898 if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32))) 899 if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
899 continue; 900 continue;
900 901
901 bus = pci_device->bus->number; 902 bus = pci_device->bus->number;
@@ -952,7 +953,7 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter)
952 if (pci_enable_device(pci_device)) 953 if (pci_enable_device(pci_device))
953 continue; 954 continue;
954 955
955 if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32))) 956 if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
956 continue; 957 continue;
957 958
958 bus = pci_device->bus->number; 959 bus = pci_device->bus->number;
@@ -2040,7 +2041,7 @@ static void blogic_relres(struct blogic_adapter *adapter)
2040 Release any allocated memory structs not released elsewhere 2041 Release any allocated memory structs not released elsewhere
2041 */ 2042 */
2042 if (adapter->mbox_space) 2043 if (adapter->mbox_space)
2043 pci_free_consistent(adapter->pci_device, adapter->mbox_sz, 2044 dma_free_coherent(&adapter->pci_device->dev, adapter->mbox_sz,
2044 adapter->mbox_space, adapter->mbox_space_handle); 2045 adapter->mbox_space, adapter->mbox_space_handle);
2045 pci_dev_put(adapter->pci_device); 2046 pci_dev_put(adapter->pci_device);
2046 adapter->mbox_space = NULL; 2047 adapter->mbox_space = NULL;
@@ -2092,8 +2093,9 @@ static bool blogic_initadapter(struct blogic_adapter *adapter)
2092 Initialize the Outgoing and Incoming Mailbox pointers. 2093 Initialize the Outgoing and Incoming Mailbox pointers.
2093 */ 2094 */
2094 adapter->mbox_sz = adapter->mbox_count * (sizeof(struct blogic_outbox) + sizeof(struct blogic_inbox)); 2095 adapter->mbox_sz = adapter->mbox_count * (sizeof(struct blogic_outbox) + sizeof(struct blogic_inbox));
2095 adapter->mbox_space = pci_alloc_consistent(adapter->pci_device, 2096 adapter->mbox_space = dma_alloc_coherent(&adapter->pci_device->dev,
2096 adapter->mbox_sz, &adapter->mbox_space_handle); 2097 adapter->mbox_sz, &adapter->mbox_space_handle,
2098 GFP_KERNEL);
2097 if (adapter->mbox_space == NULL) 2099 if (adapter->mbox_space == NULL)
2098 return blogic_failure(adapter, "MAILBOX ALLOCATION"); 2100 return blogic_failure(adapter, "MAILBOX ALLOCATION");
2099 adapter->first_outbox = (struct blogic_outbox *) adapter->mbox_space; 2101 adapter->first_outbox = (struct blogic_outbox *) adapter->mbox_space;
@@ -3183,9 +3185,9 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
3183 memcpy(ccb->cdb, cdb, cdblen); 3185 memcpy(ccb->cdb, cdb, cdblen);
3184 ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE; 3186 ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE;
3185 ccb->command = command; 3187 ccb->command = command;
3186 sense_buf = pci_map_single(adapter->pci_device, 3188 sense_buf = dma_map_single(&adapter->pci_device->dev,
3187 command->sense_buffer, ccb->sense_datalen, 3189 command->sense_buffer, ccb->sense_datalen,
3188 PCI_DMA_FROMDEVICE); 3190 DMA_FROM_DEVICE);
3189 if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) { 3191 if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) {
3190 blogic_err("DMA mapping for sense data buffer failed\n", 3192 blogic_err("DMA mapping for sense data buffer failed\n",
3191 adapter); 3193 adapter);
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 867b864f5047..0f17bd51088a 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -2944,7 +2944,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
2944 } 2944 }
2945 2945
2946 if (currSCCB->Lun == 0x00) { 2946 if (currSCCB->Lun == 0x00) {
2947 if ((currSCCB->Sccb_scsistat == SELECT_SN_ST)) { 2947 if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
2948 2948
2949 currTar_Info->TarStatus |= 2949 currTar_Info->TarStatus |=
2950 (unsigned char)SYNC_SUPPORTED; 2950 (unsigned char)SYNC_SUPPORTED;
@@ -2953,8 +2953,8 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
2953 ~EE_SYNC_MASK; 2953 ~EE_SYNC_MASK;
2954 } 2954 }
2955 2955
2956 else if ((currSCCB->Sccb_scsistat == 2956 else if (currSCCB->Sccb_scsistat ==
2957 SELECT_WN_ST)) { 2957 SELECT_WN_ST) {
2958 2958
2959 currTar_Info->TarStatus = 2959 currTar_Info->TarStatus =
2960 (currTar_Info-> 2960 (currTar_Info->
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 7c097006c54d..70988c381268 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -42,6 +42,9 @@ config SCSI_DMA
42 bool 42 bool
43 default n 43 default n
44 44
45config SCSI_ESP_PIO
46 bool
47
45config SCSI_NETLINK 48config SCSI_NETLINK
46 bool 49 bool
47 default n 50 default n
@@ -557,6 +560,36 @@ config SCSI_FLASHPOINT
557 substantial, so users of MultiMaster Host Adapters may not 560 substantial, so users of MultiMaster Host Adapters may not
558 wish to include it. 561 wish to include it.
559 562
563config SCSI_MYRB
564 tristate "Mylex DAC960/DAC1100 PCI RAID Controller (Block Interface)"
565 depends on PCI
566 select RAID_ATTRS
567 help
568 This driver adds support for the Mylex DAC960, AcceleRAID, and
569 eXtremeRAID PCI RAID controllers. This driver supports the
570 older, block based interface.
571 This driver is a reimplementation of the original DAC960
572 driver. If you have used the DAC960 driver you should enable
573 this module.
574
575 To compile this driver as a module, choose M here: the
576 module will be called myrb.
577
578config SCSI_MYRS
579 tristate "Mylex DAC960/DAC1100 PCI RAID Controller (SCSI Interface)"
580 depends on PCI
581 select RAID_ATTRS
582 help
583 This driver adds support for the Mylex DAC960, AcceleRAID, and
584 eXtremeRAID PCI RAID controllers. This driver supports the
585 newer, SCSI-based interface only.
586 This driver is a reimplementation of the original DAC960
587 driver. If you have used the DAC960 driver you should enable
588 this module.
589
590 To compile this driver as a module, choose M here: the
591 module will be called myrs.
592
560config VMWARE_PVSCSI 593config VMWARE_PVSCSI
561 tristate "VMware PVSCSI driver support" 594 tristate "VMware PVSCSI driver support"
562 depends on PCI && SCSI && X86 595 depends on PCI && SCSI && X86
@@ -1332,6 +1365,7 @@ config SCSI_ZORRO_ESP
1332 tristate "Zorro ESP SCSI support" 1365 tristate "Zorro ESP SCSI support"
1333 depends on ZORRO && SCSI 1366 depends on ZORRO && SCSI
1334 select SCSI_SPI_ATTRS 1367 select SCSI_SPI_ATTRS
1368 select SCSI_ESP_PIO
1335 help 1369 help
1336 Support for various NCR53C9x (ESP) based SCSI controllers on Zorro 1370 Support for various NCR53C9x (ESP) based SCSI controllers on Zorro
1337 expansion boards for the Amiga. 1371 expansion boards for the Amiga.
@@ -1374,6 +1408,7 @@ config SCSI_MAC_ESP
1374 tristate "Macintosh NCR53c9[46] SCSI" 1408 tristate "Macintosh NCR53c9[46] SCSI"
1375 depends on MAC && SCSI 1409 depends on MAC && SCSI
1376 select SCSI_SPI_ATTRS 1410 select SCSI_SPI_ATTRS
1411 select SCSI_ESP_PIO
1377 help 1412 help
1378 This is the NCR 53c9x SCSI controller found on most of the 68040 1413 This is the NCR 53c9x SCSI controller found on most of the 68040
1379 based Macintoshes. 1414 based Macintoshes.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 6d71b2a9592b..fcb41ae329c4 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -106,6 +106,8 @@ obj-$(CONFIG_SCSI_INIA100) += a100u2w.o
106obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o 106obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o
107obj-$(CONFIG_SCSI_MESH) += mesh.o 107obj-$(CONFIG_SCSI_MESH) += mesh.o
108obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o 108obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
109obj-$(CONFIG_SCSI_MYRB) += myrb.o
110obj-$(CONFIG_SCSI_MYRS) += myrs.o
109obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o 111obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
110obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o 112obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
111obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o 113obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 90ea0f5d9bdb..8429c855701f 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -131,6 +131,7 @@
131 131
132static int do_abort(struct Scsi_Host *); 132static int do_abort(struct Scsi_Host *);
133static void do_reset(struct Scsi_Host *); 133static void do_reset(struct Scsi_Host *);
134static void bus_reset_cleanup(struct Scsi_Host *);
134 135
135/** 136/**
136 * initialize_SCp - init the scsi pointer field 137 * initialize_SCp - init the scsi pointer field
@@ -513,16 +514,15 @@ static void complete_cmd(struct Scsi_Host *instance,
513 514
514 if (hostdata->sensing == cmd) { 515 if (hostdata->sensing == cmd) {
515 /* Autosense processing ends here */ 516 /* Autosense processing ends here */
516 if ((cmd->result & 0xff) != SAM_STAT_GOOD) { 517 if (status_byte(cmd->result) != GOOD) {
517 scsi_eh_restore_cmnd(cmd, &hostdata->ses); 518 scsi_eh_restore_cmnd(cmd, &hostdata->ses);
518 set_host_byte(cmd, DID_ERROR); 519 } else {
519 } else
520 scsi_eh_restore_cmnd(cmd, &hostdata->ses); 520 scsi_eh_restore_cmnd(cmd, &hostdata->ses);
521 set_driver_byte(cmd, DRIVER_SENSE);
522 }
521 hostdata->sensing = NULL; 523 hostdata->sensing = NULL;
522 } 524 }
523 525
524 hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
525
526 cmd->scsi_done(cmd); 526 cmd->scsi_done(cmd);
527} 527}
528 528
@@ -884,7 +884,14 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
884 /* Probably Bus Reset */ 884 /* Probably Bus Reset */
885 NCR5380_read(RESET_PARITY_INTERRUPT_REG); 885 NCR5380_read(RESET_PARITY_INTERRUPT_REG);
886 886
887 dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n"); 887 if (sr & SR_RST) {
888 /* Certainly Bus Reset */
889 shost_printk(KERN_WARNING, instance,
890 "bus reset interrupt\n");
891 bus_reset_cleanup(instance);
892 } else {
893 dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n");
894 }
888#ifdef SUN3_SCSI_VME 895#ifdef SUN3_SCSI_VME
889 dregs->csr |= CSR_DMA_ENABLE; 896 dregs->csr |= CSR_DMA_ENABLE;
890#endif 897#endif
@@ -902,20 +909,16 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
902 return IRQ_RETVAL(handled); 909 return IRQ_RETVAL(handled);
903} 910}
904 911
905/* 912/**
906 * Function : int NCR5380_select(struct Scsi_Host *instance, 913 * NCR5380_select - attempt arbitration and selection for a given command
907 * struct scsi_cmnd *cmd) 914 * @instance: the Scsi_Host instance
908 * 915 * @cmd: the scsi_cmnd to execute
909 * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
910 * including ARBITRATION, SELECTION, and initial message out for
911 * IDENTIFY and queue messages.
912 * 916 *
913 * Inputs : instance - instantiation of the 5380 driver on which this 917 * This routine establishes an I_T_L nexus for a SCSI command. This involves
914 * target lives, cmd - SCSI command to execute. 918 * ARBITRATION, SELECTION and MESSAGE OUT phases and an IDENTIFY message.
915 * 919 *
916 * Returns cmd if selection failed but should be retried, 920 * Returns true if the operation should be retried.
917 * NULL if selection failed and should not be retried, or 921 * Returns false if it should not be retried.
918 * NULL if selection succeeded (hostdata->connected == cmd).
919 * 922 *
920 * Side effects : 923 * Side effects :
921 * If bus busy, arbitration failed, etc, NCR5380_select() will exit 924 * If bus busy, arbitration failed, etc, NCR5380_select() will exit
@@ -923,16 +926,15 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
923 * SELECT_ENABLE will be set appropriately, the NCR5380 926 * SELECT_ENABLE will be set appropriately, the NCR5380
924 * will cease to drive any SCSI bus signals. 927 * will cease to drive any SCSI bus signals.
925 * 928 *
926 * If successful : I_T_L or I_T_L_Q nexus will be established, 929 * If successful : the I_T_L nexus will be established, and
927 * instance->connected will be set to cmd. 930 * hostdata->connected will be set to cmd.
928 * SELECT interrupt will be disabled. 931 * SELECT interrupt will be disabled.
929 * 932 *
930 * If failed (no target) : cmd->scsi_done() will be called, and the 933 * If failed (no target) : cmd->scsi_done() will be called, and the
931 * cmd->result host byte set to DID_BAD_TARGET. 934 * cmd->result host byte set to DID_BAD_TARGET.
932 */ 935 */
933 936
934static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, 937static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
935 struct scsi_cmnd *cmd)
936 __releases(&hostdata->lock) __acquires(&hostdata->lock) 938 __releases(&hostdata->lock) __acquires(&hostdata->lock)
937{ 939{
938 struct NCR5380_hostdata *hostdata = shost_priv(instance); 940 struct NCR5380_hostdata *hostdata = shost_priv(instance);
@@ -940,6 +942,9 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
940 unsigned char *data; 942 unsigned char *data;
941 int len; 943 int len;
942 int err; 944 int err;
945 bool ret = true;
946 bool can_disconnect = instance->irq != NO_IRQ &&
947 cmd->cmnd[0] != REQUEST_SENSE;
943 948
944 NCR5380_dprint(NDEBUG_ARBITRATION, instance); 949 NCR5380_dprint(NDEBUG_ARBITRATION, instance);
945 dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n", 950 dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
@@ -948,7 +953,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
948 /* 953 /*
949 * Arbitration and selection phases are slow and involve dropping the 954 * Arbitration and selection phases are slow and involve dropping the
950 * lock, so we have to watch out for EH. An exception handler may 955 * lock, so we have to watch out for EH. An exception handler may
951 * change 'selecting' to NULL. This function will then return NULL 956 * change 'selecting' to NULL. This function will then return false
952 * so that the caller will forget about 'cmd'. (During information 957 * so that the caller will forget about 'cmd'. (During information
953 * transfer phases, EH may change 'connected' to NULL.) 958 * transfer phases, EH may change 'connected' to NULL.)
954 */ 959 */
@@ -984,7 +989,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
984 if (!hostdata->selecting) { 989 if (!hostdata->selecting) {
985 /* Command was aborted */ 990 /* Command was aborted */
986 NCR5380_write(MODE_REG, MR_BASE); 991 NCR5380_write(MODE_REG, MR_BASE);
987 goto out; 992 return false;
988 } 993 }
989 if (err < 0) { 994 if (err < 0) {
990 NCR5380_write(MODE_REG, MR_BASE); 995 NCR5380_write(MODE_REG, MR_BASE);
@@ -1033,7 +1038,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
1033 if (!hostdata->selecting) { 1038 if (!hostdata->selecting) {
1034 NCR5380_write(MODE_REG, MR_BASE); 1039 NCR5380_write(MODE_REG, MR_BASE);
1035 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1040 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1036 goto out; 1041 return false;
1037 } 1042 }
1038 1043
1039 dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n"); 1044 dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n");
@@ -1116,13 +1121,16 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
1116 spin_lock_irq(&hostdata->lock); 1121 spin_lock_irq(&hostdata->lock);
1117 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 1122 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
1118 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); 1123 NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
1124
1119 /* Can't touch cmd if it has been reclaimed by the scsi ML */ 1125 /* Can't touch cmd if it has been reclaimed by the scsi ML */
1120 if (hostdata->selecting) { 1126 if (!hostdata->selecting)
1121 cmd->result = DID_BAD_TARGET << 16; 1127 return false;
1122 complete_cmd(instance, cmd); 1128
1123 dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n"); 1129 cmd->result = DID_BAD_TARGET << 16;
1124 cmd = NULL; 1130 complete_cmd(instance, cmd);
1125 } 1131 dsprintk(NDEBUG_SELECTION, instance,
1132 "target did not respond within 250ms\n");
1133 ret = false;
1126 goto out; 1134 goto out;
1127 } 1135 }
1128 1136
@@ -1155,12 +1163,12 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
1155 } 1163 }
1156 if (!hostdata->selecting) { 1164 if (!hostdata->selecting) {
1157 do_abort(instance); 1165 do_abort(instance);
1158 goto out; 1166 return false;
1159 } 1167 }
1160 1168
1161 dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n", 1169 dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n",
1162 scmd_id(cmd)); 1170 scmd_id(cmd));
1163 tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun); 1171 tmp[0] = IDENTIFY(can_disconnect, cmd->device->lun);
1164 1172
1165 len = 1; 1173 len = 1;
1166 data = tmp; 1174 data = tmp;
@@ -1171,7 +1179,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
1171 cmd->result = DID_ERROR << 16; 1179 cmd->result = DID_ERROR << 16;
1172 complete_cmd(instance, cmd); 1180 complete_cmd(instance, cmd);
1173 dsprintk(NDEBUG_SELECTION, instance, "IDENTIFY message transfer failed\n"); 1181 dsprintk(NDEBUG_SELECTION, instance, "IDENTIFY message transfer failed\n");
1174 cmd = NULL; 1182 ret = false;
1175 goto out; 1183 goto out;
1176 } 1184 }
1177 1185
@@ -1186,13 +1194,13 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
1186 1194
1187 initialize_SCp(cmd); 1195 initialize_SCp(cmd);
1188 1196
1189 cmd = NULL; 1197 ret = false;
1190 1198
1191out: 1199out:
1192 if (!hostdata->selecting) 1200 if (!hostdata->selecting)
1193 return NULL; 1201 return NULL;
1194 hostdata->selecting = NULL; 1202 hostdata->selecting = NULL;
1195 return cmd; 1203 return ret;
1196} 1204}
1197 1205
1198/* 1206/*
@@ -1711,6 +1719,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
1711 cmd->result = DID_ERROR << 16; 1719 cmd->result = DID_ERROR << 16;
1712 complete_cmd(instance, cmd); 1720 complete_cmd(instance, cmd);
1713 hostdata->connected = NULL; 1721 hostdata->connected = NULL;
1722 hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
1714 return; 1723 return;
1715#endif 1724#endif
1716 case PHASE_DATAIN: 1725 case PHASE_DATAIN:
@@ -1793,6 +1802,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
1793 cmd, scmd_id(cmd), cmd->device->lun); 1802 cmd, scmd_id(cmd), cmd->device->lun);
1794 1803
1795 hostdata->connected = NULL; 1804 hostdata->connected = NULL;
1805 hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
1796 1806
1797 cmd->result &= ~0xffff; 1807 cmd->result &= ~0xffff;
1798 cmd->result |= cmd->SCp.Status; 1808 cmd->result |= cmd->SCp.Status;
@@ -1951,6 +1961,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
1951 NCR5380_transfer_pio(instance, &phase, &len, &data); 1961 NCR5380_transfer_pio(instance, &phase, &len, &data);
1952 if (msgout == ABORT) { 1962 if (msgout == ABORT) {
1953 hostdata->connected = NULL; 1963 hostdata->connected = NULL;
1964 hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
1954 cmd->result = DID_ERROR << 16; 1965 cmd->result = DID_ERROR << 16;
1955 complete_cmd(instance, cmd); 1966 complete_cmd(instance, cmd);
1956 maybe_release_dma_irq(instance); 1967 maybe_release_dma_irq(instance);
@@ -2014,8 +2025,11 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2014 NCR5380_write(MODE_REG, MR_BASE); 2025 NCR5380_write(MODE_REG, MR_BASE);
2015 2026
2016 target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); 2027 target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
2017 2028 if (!target_mask || target_mask & (target_mask - 1)) {
2018 dsprintk(NDEBUG_RESELECTION, instance, "reselect\n"); 2029 shost_printk(KERN_WARNING, instance,
2030 "reselect: bad target_mask 0x%02x\n", target_mask);
2031 return;
2032 }
2019 2033
2020 /* 2034 /*
2021 * At this point, we have detected that our SCSI ID is on the bus, 2035 * At this point, we have detected that our SCSI ID is on the bus,
@@ -2029,6 +2043,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2029 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); 2043 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
2030 if (NCR5380_poll_politely(hostdata, 2044 if (NCR5380_poll_politely(hostdata,
2031 STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) { 2045 STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
2046 shost_printk(KERN_ERR, instance, "reselect: !SEL timeout\n");
2032 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 2047 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
2033 return; 2048 return;
2034 } 2049 }
@@ -2040,6 +2055,10 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2040 2055
2041 if (NCR5380_poll_politely(hostdata, 2056 if (NCR5380_poll_politely(hostdata,
2042 STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) { 2057 STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
2058 if ((NCR5380_read(STATUS_REG) & (SR_BSY | SR_SEL)) == 0)
2059 /* BUS FREE phase */
2060 return;
2061 shost_printk(KERN_ERR, instance, "reselect: REQ timeout\n");
2043 do_abort(instance); 2062 do_abort(instance);
2044 return; 2063 return;
2045 } 2064 }
@@ -2101,13 +2120,16 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
2101 dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance, 2120 dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance,
2102 "reselect: removed %p from disconnected queue\n", tmp); 2121 "reselect: removed %p from disconnected queue\n", tmp);
2103 } else { 2122 } else {
2123 int target = ffs(target_mask) - 1;
2124
2104 shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n", 2125 shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n",
2105 target_mask, lun); 2126 target_mask, lun);
2106 /* 2127 /*
2107 * Since we have an established nexus that we can't do anything 2128 * Since we have an established nexus that we can't do anything
2108 * with, we must abort it. 2129 * with, we must abort it.
2109 */ 2130 */
2110 do_abort(instance); 2131 if (do_abort(instance) == 0)
2132 hostdata->busy[target] &= ~(1 << lun);
2111 return; 2133 return;
2112 } 2134 }
2113 2135
@@ -2272,15 +2294,16 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
2272 if (list_del_cmd(&hostdata->autosense, cmd)) { 2294 if (list_del_cmd(&hostdata->autosense, cmd)) {
2273 dsprintk(NDEBUG_ABORT, instance, 2295 dsprintk(NDEBUG_ABORT, instance,
2274 "abort: removed %p from sense queue\n", cmd); 2296 "abort: removed %p from sense queue\n", cmd);
2275 set_host_byte(cmd, DID_ERROR);
2276 complete_cmd(instance, cmd); 2297 complete_cmd(instance, cmd);
2277 } 2298 }
2278 2299
2279out: 2300out:
2280 if (result == FAILED) 2301 if (result == FAILED)
2281 dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd); 2302 dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd);
2282 else 2303 else {
2304 hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
2283 dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd); 2305 dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd);
2306 }
2284 2307
2285 queue_work(hostdata->work_q, &hostdata->main_task); 2308 queue_work(hostdata->work_q, &hostdata->main_task);
2286 maybe_release_dma_irq(instance); 2309 maybe_release_dma_irq(instance);
@@ -2290,31 +2313,12 @@ out:
2290} 2313}
2291 2314
2292 2315
2293/** 2316static void bus_reset_cleanup(struct Scsi_Host *instance)
2294 * NCR5380_host_reset - reset the SCSI host
2295 * @cmd: SCSI command undergoing EH
2296 *
2297 * Returns SUCCESS
2298 */
2299
2300static int NCR5380_host_reset(struct scsi_cmnd *cmd)
2301{ 2317{
2302 struct Scsi_Host *instance = cmd->device->host;
2303 struct NCR5380_hostdata *hostdata = shost_priv(instance); 2318 struct NCR5380_hostdata *hostdata = shost_priv(instance);
2304 int i; 2319 int i;
2305 unsigned long flags;
2306 struct NCR5380_cmd *ncmd; 2320 struct NCR5380_cmd *ncmd;
2307 2321
2308 spin_lock_irqsave(&hostdata->lock, flags);
2309
2310#if (NDEBUG & NDEBUG_ANY)
2311 scmd_printk(KERN_INFO, cmd, __func__);
2312#endif
2313 NCR5380_dprint(NDEBUG_ANY, instance);
2314 NCR5380_dprint_phase(NDEBUG_ANY, instance);
2315
2316 do_reset(instance);
2317
2318 /* reset NCR registers */ 2322 /* reset NCR registers */
2319 NCR5380_write(MODE_REG, MR_BASE); 2323 NCR5380_write(MODE_REG, MR_BASE);
2320 NCR5380_write(TARGET_COMMAND_REG, 0); 2324 NCR5380_write(TARGET_COMMAND_REG, 0);
@@ -2326,11 +2330,6 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
2326 * commands! 2330 * commands!
2327 */ 2331 */
2328 2332
2329 if (list_del_cmd(&hostdata->unissued, cmd)) {
2330 cmd->result = DID_RESET << 16;
2331 cmd->scsi_done(cmd);
2332 }
2333
2334 if (hostdata->selecting) { 2333 if (hostdata->selecting) {
2335 hostdata->selecting->result = DID_RESET << 16; 2334 hostdata->selecting->result = DID_RESET << 16;
2336 complete_cmd(instance, hostdata->selecting); 2335 complete_cmd(instance, hostdata->selecting);
@@ -2348,7 +2347,6 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
2348 list_for_each_entry(ncmd, &hostdata->autosense, list) { 2347 list_for_each_entry(ncmd, &hostdata->autosense, list) {
2349 struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); 2348 struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
2350 2349
2351 set_host_byte(cmd, DID_RESET);
2352 cmd->scsi_done(cmd); 2350 cmd->scsi_done(cmd);
2353 } 2351 }
2354 INIT_LIST_HEAD(&hostdata->autosense); 2352 INIT_LIST_HEAD(&hostdata->autosense);
@@ -2365,6 +2363,41 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
2365 2363
2366 queue_work(hostdata->work_q, &hostdata->main_task); 2364 queue_work(hostdata->work_q, &hostdata->main_task);
2367 maybe_release_dma_irq(instance); 2365 maybe_release_dma_irq(instance);
2366}
2367
2368/**
2369 * NCR5380_host_reset - reset the SCSI host
2370 * @cmd: SCSI command undergoing EH
2371 *
2372 * Returns SUCCESS
2373 */
2374
2375static int NCR5380_host_reset(struct scsi_cmnd *cmd)
2376{
2377 struct Scsi_Host *instance = cmd->device->host;
2378 struct NCR5380_hostdata *hostdata = shost_priv(instance);
2379 unsigned long flags;
2380 struct NCR5380_cmd *ncmd;
2381
2382 spin_lock_irqsave(&hostdata->lock, flags);
2383
2384#if (NDEBUG & NDEBUG_ANY)
2385 shost_printk(KERN_INFO, instance, __func__);
2386#endif
2387 NCR5380_dprint(NDEBUG_ANY, instance);
2388 NCR5380_dprint_phase(NDEBUG_ANY, instance);
2389
2390 list_for_each_entry(ncmd, &hostdata->unissued, list) {
2391 struct scsi_cmnd *scmd = NCR5380_to_scmd(ncmd);
2392
2393 scmd->result = DID_RESET << 16;
2394 scmd->scsi_done(scmd);
2395 }
2396 INIT_LIST_HEAD(&hostdata->unissued);
2397
2398 do_reset(instance);
2399 bus_reset_cleanup(instance);
2400
2368 spin_unlock_irqrestore(&hostdata->lock, flags); 2401 spin_unlock_irqrestore(&hostdata->lock, flags);
2369 2402
2370 return SUCCESS; 2403 return SUCCESS;
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 31096a0b0fdd..efca509b92b0 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -275,7 +275,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id);
275static void NCR5380_main(struct work_struct *work); 275static void NCR5380_main(struct work_struct *work);
276static const char *NCR5380_info(struct Scsi_Host *instance); 276static const char *NCR5380_info(struct Scsi_Host *instance);
277static void NCR5380_reselect(struct Scsi_Host *instance); 277static void NCR5380_reselect(struct Scsi_Host *instance);
278static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *); 278static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
279static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); 279static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
280static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); 280static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
281static int NCR5380_poll_politely2(struct NCR5380_hostdata *, 281static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 23b17621b6d2..00072ed9540b 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1094,7 +1094,7 @@ static int inia100_probe_one(struct pci_dev *pdev,
1094 1094
1095 if (pci_enable_device(pdev)) 1095 if (pci_enable_device(pdev))
1096 goto out; 1096 goto out;
1097 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 1097 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
1098 printk(KERN_WARNING "Unable to set 32bit DMA " 1098 printk(KERN_WARNING "Unable to set 32bit DMA "
1099 "on inia100 adapter, ignoring.\n"); 1099 "on inia100 adapter, ignoring.\n");
1100 goto out_disable_device; 1100 goto out_disable_device;
@@ -1124,7 +1124,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
1124 1124
1125 /* Get total memory needed for SCB */ 1125 /* Get total memory needed for SCB */
1126 sz = ORC_MAXQUEUE * sizeof(struct orc_scb); 1126 sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
1127 host->scb_virt = pci_zalloc_consistent(pdev, sz, &host->scb_phys); 1127 host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys,
1128 GFP_KERNEL);
1128 if (!host->scb_virt) { 1129 if (!host->scb_virt) {
1129 printk("inia100: SCB memory allocation error\n"); 1130 printk("inia100: SCB memory allocation error\n");
1130 goto out_host_put; 1131 goto out_host_put;
@@ -1132,7 +1133,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
1132 1133
1133 /* Get total memory needed for ESCB */ 1134 /* Get total memory needed for ESCB */
1134 sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb); 1135 sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
1135 host->escb_virt = pci_zalloc_consistent(pdev, sz, &host->escb_phys); 1136 host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys,
1137 GFP_KERNEL);
1136 if (!host->escb_virt) { 1138 if (!host->escb_virt) {
1137 printk("inia100: ESCB memory allocation error\n"); 1139 printk("inia100: ESCB memory allocation error\n");
1138 goto out_free_scb_array; 1140 goto out_free_scb_array;
@@ -1177,10 +1179,12 @@ static int inia100_probe_one(struct pci_dev *pdev,
1177out_free_irq: 1179out_free_irq:
1178 free_irq(shost->irq, shost); 1180 free_irq(shost->irq, shost);
1179out_free_escb_array: 1181out_free_escb_array:
1180 pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb), 1182 dma_free_coherent(&pdev->dev,
1183 ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
1181 host->escb_virt, host->escb_phys); 1184 host->escb_virt, host->escb_phys);
1182out_free_scb_array: 1185out_free_scb_array:
1183 pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb), 1186 dma_free_coherent(&pdev->dev,
1187 ORC_MAXQUEUE * sizeof(struct orc_scb),
1184 host->scb_virt, host->scb_phys); 1188 host->scb_virt, host->scb_phys);
1185out_host_put: 1189out_host_put:
1186 scsi_host_put(shost); 1190 scsi_host_put(shost);
@@ -1200,9 +1204,11 @@ static void inia100_remove_one(struct pci_dev *pdev)
1200 scsi_remove_host(shost); 1204 scsi_remove_host(shost);
1201 1205
1202 free_irq(shost->irq, shost); 1206 free_irq(shost->irq, shost);
1203 pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb), 1207 dma_free_coherent(&pdev->dev,
1208 ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
1204 host->escb_virt, host->escb_phys); 1209 host->escb_virt, host->escb_phys);
1205 pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb), 1210 dma_free_coherent(&pdev->dev,
1211 ORC_MAXQUEUE * sizeof(struct orc_scb),
1206 host->scb_virt, host->scb_phys); 1212 host->scb_virt, host->scb_phys);
1207 release_region(shost->io_port, 256); 1213 release_region(shost->io_port, 256);
1208 1214
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 6e356325d8d9..bd7f352c28f3 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -3480,7 +3480,6 @@ int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
3480 3480
3481static void aac_srb_callback(void *context, struct fib * fibptr) 3481static void aac_srb_callback(void *context, struct fib * fibptr)
3482{ 3482{
3483 struct aac_dev *dev;
3484 struct aac_srb_reply *srbreply; 3483 struct aac_srb_reply *srbreply;
3485 struct scsi_cmnd *scsicmd; 3484 struct scsi_cmnd *scsicmd;
3486 3485
@@ -3491,8 +3490,6 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
3491 3490
3492 BUG_ON(fibptr == NULL); 3491 BUG_ON(fibptr == NULL);
3493 3492
3494 dev = fibptr->dev;
3495
3496 srbreply = (struct aac_srb_reply *) fib_data(fibptr); 3493 srbreply = (struct aac_srb_reply *) fib_data(fibptr);
3497 3494
3498 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */ 3495 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
@@ -3921,13 +3918,11 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
3921 3918
3922static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg) 3919static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
3923{ 3920{
3924 struct aac_dev *dev;
3925 unsigned long byte_count = 0; 3921 unsigned long byte_count = 0;
3926 int nseg; 3922 int nseg;
3927 struct scatterlist *sg; 3923 struct scatterlist *sg;
3928 int i; 3924 int i;
3929 3925
3930 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
3931 // Get rid of old data 3926 // Get rid of old data
3932 psg->count = 0; 3927 psg->count = 0;
3933 psg->sg[0].addr = 0; 3928 psg->sg[0].addr = 0;
@@ -3963,14 +3958,12 @@ static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
3963 3958
3964static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg) 3959static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
3965{ 3960{
3966 struct aac_dev *dev;
3967 unsigned long byte_count = 0; 3961 unsigned long byte_count = 0;
3968 u64 addr; 3962 u64 addr;
3969 int nseg; 3963 int nseg;
3970 struct scatterlist *sg; 3964 struct scatterlist *sg;
3971 int i; 3965 int i;
3972 3966
3973 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
3974 // Get rid of old data 3967 // Get rid of old data
3975 psg->count = 0; 3968 psg->count = 0;
3976 psg->sg[0].addr[0] = 0; 3969 psg->sg[0].addr[0] = 0;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 6e1b022a823d..1e77d96a18f2 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2586,9 +2586,7 @@ int aac_acquire_irq(struct aac_dev *dev)
2586void aac_free_irq(struct aac_dev *dev) 2586void aac_free_irq(struct aac_dev *dev)
2587{ 2587{
2588 int i; 2588 int i;
2589 int cpu;
2590 2589
2591 cpu = cpumask_first(cpu_online_mask);
2592 if (aac_is_src(dev)) { 2590 if (aac_is_src(dev)) {
2593 if (dev->max_msix > 1) { 2591 if (dev->max_msix > 1) {
2594 for (i = 0; i < dev->max_msix; i++) 2592 for (i = 0; i < dev->max_msix; i++)
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 713f69033f20..223ef6f4e258 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -5949,7 +5949,6 @@ static void adv_async_callback(ADV_DVC_VAR *adv_dvc_varp, uchar code)
5949static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp) 5949static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
5950{ 5950{
5951 struct asc_board *boardp = adv_dvc_varp->drv_ptr; 5951 struct asc_board *boardp = adv_dvc_varp->drv_ptr;
5952 u32 srb_tag;
5953 adv_req_t *reqp; 5952 adv_req_t *reqp;
5954 adv_sgblk_t *sgblkp; 5953 adv_sgblk_t *sgblkp;
5955 struct scsi_cmnd *scp; 5954 struct scsi_cmnd *scp;
@@ -5965,7 +5964,6 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
5965 * completed. The adv_req_t structure actually contains the 5964 * completed. The adv_req_t structure actually contains the
5966 * completed ADV_SCSI_REQ_Q structure. 5965 * completed ADV_SCSI_REQ_Q structure.
5967 */ 5966 */
5968 srb_tag = le32_to_cpu(scsiqp->srb_tag);
5969 scp = scsi_host_find_tag(boardp->shost, scsiqp->srb_tag); 5967 scp = scsi_host_find_tag(boardp->shost, scsiqp->srb_tag);
5970 5968
5971 ASC_DBG(1, "scp 0x%p\n", scp); 5969 ASC_DBG(1, "scp 0x%p\n", scp);
@@ -6448,7 +6446,7 @@ static void AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
6448 sdtr_data = 6446 sdtr_data =
6449 AscCalSDTRData(asc_dvc, ext_msg.xfer_period, 6447 AscCalSDTRData(asc_dvc, ext_msg.xfer_period,
6450 ext_msg.req_ack_offset); 6448 ext_msg.req_ack_offset);
6451 if ((sdtr_data == 0xFF)) { 6449 if (sdtr_data == 0xFF) {
6452 6450
6453 q_cntl |= QC_MSG_OUT; 6451 q_cntl |= QC_MSG_OUT;
6454 asc_dvc->init_sdtr &= ~target_id; 6452 asc_dvc->init_sdtr &= ~target_id;
diff --git a/drivers/scsi/aic7xxx/aic7770.c b/drivers/scsi/aic7xxx/aic7770.c
index 5000bd69c13f..176704b24e6a 100644
--- a/drivers/scsi/aic7xxx/aic7770.c
+++ b/drivers/scsi/aic7xxx/aic7770.c
@@ -42,15 +42,9 @@
42 * $FreeBSD$ 42 * $FreeBSD$
43 */ 43 */
44 44
45#ifdef __linux__
46#include "aic7xxx_osm.h" 45#include "aic7xxx_osm.h"
47#include "aic7xxx_inline.h" 46#include "aic7xxx_inline.h"
48#include "aic7xxx_93cx6.h" 47#include "aic7xxx_93cx6.h"
49#else
50#include <dev/aic7xxx/aic7xxx_osm.h>
51#include <dev/aic7xxx/aic7xxx_inline.h>
52#include <dev/aic7xxx/aic7xxx_93cx6.h>
53#endif
54 48
55#define ID_AIC7770 0x04907770 49#define ID_AIC7770 0x04907770
56#define ID_AHA_274x 0x04907771 50#define ID_AHA_274x 0x04907771
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index 31f2bb9d7146..9a515551641c 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -607,9 +607,6 @@ struct scb {
607 ahd_io_ctx_t io_ctx; 607 ahd_io_ctx_t io_ctx;
608 struct ahd_softc *ahd_softc; 608 struct ahd_softc *ahd_softc;
609 scb_flag flags; 609 scb_flag flags;
610#ifndef __linux__
611 bus_dmamap_t dmamap;
612#endif
613 struct scb_platform_data *platform_data; 610 struct scb_platform_data *platform_data;
614 struct map_node *hscb_map; 611 struct map_node *hscb_map;
615 struct map_node *sg_map; 612 struct map_node *sg_map;
@@ -1056,9 +1053,6 @@ struct ahd_completion
1056struct ahd_softc { 1053struct ahd_softc {
1057 bus_space_tag_t tags[2]; 1054 bus_space_tag_t tags[2];
1058 bus_space_handle_t bshs[2]; 1055 bus_space_handle_t bshs[2];
1059#ifndef __linux__
1060 bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */
1061#endif
1062 struct scb_data scb_data; 1056 struct scb_data scb_data;
1063 1057
1064 struct hardware_scb *next_queued_hscb; 1058 struct hardware_scb *next_queued_hscb;
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 2d82ec85753e..9ee75c9a9aa1 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -40,16 +40,9 @@
40 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $ 40 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $
41 */ 41 */
42 42
43#ifdef __linux__
44#include "aic79xx_osm.h" 43#include "aic79xx_osm.h"
45#include "aic79xx_inline.h" 44#include "aic79xx_inline.h"
46#include "aicasm/aicasm_insformat.h" 45#include "aicasm/aicasm_insformat.h"
47#else
48#include <dev/aic7xxx/aic79xx_osm.h>
49#include <dev/aic7xxx/aic79xx_inline.h>
50#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
51#endif
52
53 46
54/***************************** Lookup Tables **********************************/ 47/***************************** Lookup Tables **********************************/
55static const char *const ahd_chip_names[] = 48static const char *const ahd_chip_names[] =
@@ -59,7 +52,6 @@ static const char *const ahd_chip_names[] =
59 "aic7902", 52 "aic7902",
60 "aic7901A" 53 "aic7901A"
61}; 54};
62static const u_int num_chip_names = ARRAY_SIZE(ahd_chip_names);
63 55
64/* 56/*
65 * Hardware error codes. 57 * Hardware error codes.
@@ -6172,17 +6164,11 @@ ahd_free(struct ahd_softc *ahd)
6172 case 2: 6164 case 2:
6173 ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat); 6165 ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat);
6174 case 1: 6166 case 1:
6175#ifndef __linux__
6176 ahd_dma_tag_destroy(ahd, ahd->buffer_dmat);
6177#endif
6178 break; 6167 break;
6179 case 0: 6168 case 0:
6180 break; 6169 break;
6181 } 6170 }
6182 6171
6183#ifndef __linux__
6184 ahd_dma_tag_destroy(ahd, ahd->parent_dmat);
6185#endif
6186 ahd_platform_free(ahd); 6172 ahd_platform_free(ahd);
6187 ahd_fini_scbdata(ahd); 6173 ahd_fini_scbdata(ahd);
6188 for (i = 0; i < AHD_NUM_TARGETS; i++) { 6174 for (i = 0; i < AHD_NUM_TARGETS; i++) {
@@ -6934,9 +6920,6 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
6934 for (i = 0; i < newcount; i++) { 6920 for (i = 0; i < newcount; i++) {
6935 struct scb_platform_data *pdata; 6921 struct scb_platform_data *pdata;
6936 u_int col_tag; 6922 u_int col_tag;
6937#ifndef __linux__
6938 int error;
6939#endif
6940 6923
6941 next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC); 6924 next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC);
6942 if (next_scb == NULL) 6925 if (next_scb == NULL)
@@ -6970,15 +6953,6 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
6970 next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg); 6953 next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg);
6971 next_scb->ahd_softc = ahd; 6954 next_scb->ahd_softc = ahd;
6972 next_scb->flags = SCB_FLAG_NONE; 6955 next_scb->flags = SCB_FLAG_NONE;
6973#ifndef __linux__
6974 error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0,
6975 &next_scb->dmamap);
6976 if (error != 0) {
6977 kfree(next_scb);
6978 kfree(pdata);
6979 break;
6980 }
6981#endif
6982 next_scb->hscb->tag = ahd_htole16(scb_data->numscbs); 6956 next_scb->hscb->tag = ahd_htole16(scb_data->numscbs);
6983 col_tag = scb_data->numscbs ^ 0x100; 6957 col_tag = scb_data->numscbs ^ 0x100;
6984 next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag); 6958 next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag);
@@ -7091,24 +7065,6 @@ ahd_init(struct ahd_softc *ahd)
7091 if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0) 7065 if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0)
7092 ahd->features &= ~AHD_TARGETMODE; 7066 ahd->features &= ~AHD_TARGETMODE;
7093 7067
7094#ifndef __linux__
7095 /* DMA tag for mapping buffers into device visible space. */
7096 if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
7097 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
7098 /*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING
7099 ? (dma_addr_t)0x7FFFFFFFFFULL
7100 : BUS_SPACE_MAXADDR_32BIT,
7101 /*highaddr*/BUS_SPACE_MAXADDR,
7102 /*filter*/NULL, /*filterarg*/NULL,
7103 /*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE,
7104 /*nsegments*/AHD_NSEG,
7105 /*maxsegsz*/AHD_MAXTRANSFER_SIZE,
7106 /*flags*/BUS_DMA_ALLOCNOW,
7107 &ahd->buffer_dmat) != 0) {
7108 return (ENOMEM);
7109 }
7110#endif
7111
7112 ahd->init_level++; 7068 ahd->init_level++;
7113 7069
7114 /* 7070 /*
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index cc9bd26f5d1a..8397ae93f7dd 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -41,14 +41,8 @@
41 * $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#92 $ 41 * $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#92 $
42 */ 42 */
43 43
44#ifdef __linux__
45#include "aic79xx_osm.h" 44#include "aic79xx_osm.h"
46#include "aic79xx_inline.h" 45#include "aic79xx_inline.h"
47#else
48#include <dev/aic7xxx/aic79xx_osm.h>
49#include <dev/aic7xxx/aic79xx_inline.h>
50#endif
51
52#include "aic79xx_pci.h" 46#include "aic79xx_pci.h"
53 47
54static inline uint64_t 48static inline uint64_t
@@ -294,13 +288,11 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
294int 288int
295ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry) 289ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
296{ 290{
297 struct scb_data *shared_scb_data;
298 u_int command; 291 u_int command;
299 uint32_t devconfig; 292 uint32_t devconfig;
300 uint16_t subvendor; 293 uint16_t subvendor;
301 int error; 294 int error;
302 295
303 shared_scb_data = NULL;
304 ahd->description = entry->name; 296 ahd->description = entry->name;
305 /* 297 /*
306 * Record if this is an HP board. 298 * Record if this is an HP board.
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index 4ce4e903a759..5614921b4041 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -568,9 +568,6 @@ struct scb {
568 ahc_io_ctx_t io_ctx; 568 ahc_io_ctx_t io_ctx;
569 struct ahc_softc *ahc_softc; 569 struct ahc_softc *ahc_softc;
570 scb_flag flags; 570 scb_flag flags;
571#ifndef __linux__
572 bus_dmamap_t dmamap;
573#endif
574 struct scb_platform_data *platform_data; 571 struct scb_platform_data *platform_data;
575 struct sg_map_node *sg_map; 572 struct sg_map_node *sg_map;
576 struct ahc_dma_seg *sg_list; 573 struct ahc_dma_seg *sg_list;
@@ -906,9 +903,6 @@ typedef void ahc_callback_t (void *);
906struct ahc_softc { 903struct ahc_softc {
907 bus_space_tag_t tag; 904 bus_space_tag_t tag;
908 bus_space_handle_t bsh; 905 bus_space_handle_t bsh;
909#ifndef __linux__
910 bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */
911#endif
912 struct scb_data *scb_data; 906 struct scb_data *scb_data;
913 907
914 struct scb *next_queued_scb; 908 struct scb *next_queued_scb;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
index 9e85a7ef9c8e..cc9e41967ce4 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
@@ -64,15 +64,9 @@
64 * bit to be sent from the chip. 64 * bit to be sent from the chip.
65 */ 65 */
66 66
67#ifdef __linux__
68#include "aic7xxx_osm.h" 67#include "aic7xxx_osm.h"
69#include "aic7xxx_inline.h" 68#include "aic7xxx_inline.h"
70#include "aic7xxx_93cx6.h" 69#include "aic7xxx_93cx6.h"
71#else
72#include <dev/aic7xxx/aic7xxx_osm.h>
73#include <dev/aic7xxx/aic7xxx_inline.h>
74#include <dev/aic7xxx/aic7xxx_93cx6.h>
75#endif
76 70
77/* 71/*
78 * Right now, we only have to read the SEEPROM. But we make it easier to 72 * Right now, we only have to read the SEEPROM. But we make it easier to
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 915a34f141e4..f3362f4ab16e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -40,15 +40,9 @@
40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $ 40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $
41 */ 41 */
42 42
43#ifdef __linux__
44#include "aic7xxx_osm.h" 43#include "aic7xxx_osm.h"
45#include "aic7xxx_inline.h" 44#include "aic7xxx_inline.h"
46#include "aicasm/aicasm_insformat.h" 45#include "aicasm/aicasm_insformat.h"
47#else
48#include <dev/aic7xxx/aic7xxx_osm.h>
49#include <dev/aic7xxx/aic7xxx_inline.h>
50#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
51#endif
52 46
53/***************************** Lookup Tables **********************************/ 47/***************************** Lookup Tables **********************************/
54static const char *const ahc_chip_names[] = { 48static const char *const ahc_chip_names[] = {
@@ -67,7 +61,6 @@ static const char *const ahc_chip_names[] = {
67 "aic7892", 61 "aic7892",
68 "aic7899" 62 "aic7899"
69}; 63};
70static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names);
71 64
72/* 65/*
73 * Hardware error codes. 66 * Hardware error codes.
@@ -4509,17 +4502,11 @@ ahc_free(struct ahc_softc *ahc)
4509 case 2: 4502 case 2:
4510 ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat); 4503 ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
4511 case 1: 4504 case 1:
4512#ifndef __linux__
4513 ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
4514#endif
4515 break; 4505 break;
4516 case 0: 4506 case 0:
4517 break; 4507 break;
4518 } 4508 }
4519 4509
4520#ifndef __linux__
4521 ahc_dma_tag_destroy(ahc, ahc->parent_dmat);
4522#endif
4523 ahc_platform_free(ahc); 4510 ahc_platform_free(ahc);
4524 ahc_fini_scbdata(ahc); 4511 ahc_fini_scbdata(ahc);
4525 for (i = 0; i < AHC_NUM_TARGETS; i++) { 4512 for (i = 0; i < AHC_NUM_TARGETS; i++) {
@@ -5005,9 +4992,7 @@ ahc_alloc_scbs(struct ahc_softc *ahc)
5005 newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); 4992 newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs));
5006 for (i = 0; i < newcount; i++) { 4993 for (i = 0; i < newcount; i++) {
5007 struct scb_platform_data *pdata; 4994 struct scb_platform_data *pdata;
5008#ifndef __linux__ 4995
5009 int error;
5010#endif
5011 pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC); 4996 pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC);
5012 if (pdata == NULL) 4997 if (pdata == NULL)
5013 break; 4998 break;
@@ -5021,12 +5006,6 @@ ahc_alloc_scbs(struct ahc_softc *ahc)
5021 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); 5006 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
5022 next_scb->ahc_softc = ahc; 5007 next_scb->ahc_softc = ahc;
5023 next_scb->flags = SCB_FREE; 5008 next_scb->flags = SCB_FREE;
5024#ifndef __linux__
5025 error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
5026 &next_scb->dmamap);
5027 if (error != 0)
5028 break;
5029#endif
5030 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; 5009 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
5031 next_scb->hscb->tag = ahc->scb_data->numscbs; 5010 next_scb->hscb->tag = ahc->scb_data->numscbs;
5032 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, 5011 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
@@ -5325,24 +5304,6 @@ ahc_init(struct ahc_softc *ahc)
5325 if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) 5304 if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
5326 ahc->features &= ~AHC_TARGETMODE; 5305 ahc->features &= ~AHC_TARGETMODE;
5327 5306
5328#ifndef __linux__
5329 /* DMA tag for mapping buffers into device visible space. */
5330 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
5331 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
5332 /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING
5333 ? (dma_addr_t)0x7FFFFFFFFFULL
5334 : BUS_SPACE_MAXADDR_32BIT,
5335 /*highaddr*/BUS_SPACE_MAXADDR,
5336 /*filter*/NULL, /*filterarg*/NULL,
5337 /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE,
5338 /*nsegments*/AHC_NSEG,
5339 /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
5340 /*flags*/BUS_DMA_ALLOCNOW,
5341 &ahc->buffer_dmat) != 0) {
5342 return (ENOMEM);
5343 }
5344#endif
5345
5346 ahc->init_level++; 5307 ahc->init_level++;
5347 5308
5348 /* 5309 /*
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 673e826d7adb..656f680c7802 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -42,16 +42,9 @@
42 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#79 $ 42 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#79 $
43 */ 43 */
44 44
45#ifdef __linux__
46#include "aic7xxx_osm.h" 45#include "aic7xxx_osm.h"
47#include "aic7xxx_inline.h" 46#include "aic7xxx_inline.h"
48#include "aic7xxx_93cx6.h" 47#include "aic7xxx_93cx6.h"
49#else
50#include <dev/aic7xxx/aic7xxx_osm.h>
51#include <dev/aic7xxx/aic7xxx_inline.h>
52#include <dev/aic7xxx/aic7xxx_93cx6.h>
53#endif
54
55#include "aic7xxx_pci.h" 48#include "aic7xxx_pci.h"
56 49
57static inline uint64_t 50static inline uint64_t
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.h b/drivers/scsi/aic7xxx/aicasm/aicasm.h
index 51678dd46ff7..716a2aefc925 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.h
@@ -42,11 +42,7 @@
42 * $FreeBSD$ 42 * $FreeBSD$
43 */ 43 */
44 44
45#ifdef __linux__
46#include "../queue.h" 45#include "../queue.h"
47#else
48#include <sys/queue.h>
49#endif
50 46
51#ifndef TRUE 47#ifndef TRUE
52#define TRUE 1 48#define TRUE 1
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index f1586a437906..924d55a8acbf 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -52,11 +52,7 @@
52#include <string.h> 52#include <string.h>
53#include <sysexits.h> 53#include <sysexits.h>
54 54
55#ifdef __linux__
56#include "../queue.h" 55#include "../queue.h"
57#else
58#include <sys/queue.h>
59#endif
60 56
61#include "aicasm.h" 57#include "aicasm.h"
62#include "aicasm_symbol.h" 58#include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
index 708326df0766..8c0479865f04 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
@@ -52,11 +52,7 @@
52#include <string.h> 52#include <string.h>
53#include <sysexits.h> 53#include <sysexits.h>
54 54
55#ifdef __linux__
56#include "../queue.h" 55#include "../queue.h"
57#else
58#include <sys/queue.h>
59#endif
60 56
61#include "aicasm.h" 57#include "aicasm.h"
62#include "aicasm_symbol.h" 58#include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
index c0457b8c3b77..98e9959c6907 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
@@ -51,11 +51,7 @@
51#include <stdio.h> 51#include <stdio.h>
52#include <string.h> 52#include <string.h>
53#include <sysexits.h> 53#include <sysexits.h>
54#ifdef __linux__
55#include "../queue.h" 54#include "../queue.h"
56#else
57#include <sys/queue.h>
58#endif
59 55
60#include "aicasm.h" 56#include "aicasm.h"
61#include "aicasm_symbol.h" 57#include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
index 93c8667cd704..c78d4f68eea5 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -51,11 +51,7 @@
51#include <stdio.h> 51#include <stdio.h>
52#include <string.h> 52#include <string.h>
53#include <sysexits.h> 53#include <sysexits.h>
54#ifdef __linux__
55#include "../queue.h" 54#include "../queue.h"
56#else
57#include <sys/queue.h>
58#endif
59 55
60#include "aicasm.h" 56#include "aicasm.h"
61#include "aicasm_symbol.h" 57#include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
index 232aff1fe784..975fcfcc0d8f 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -44,11 +44,7 @@
44 44
45#include <sys/types.h> 45#include <sys/types.h>
46 46
47#ifdef __linux__
48#include "aicdb.h" 47#include "aicdb.h"
49#else
50#include <db.h>
51#endif
52#include <fcntl.h> 48#include <fcntl.h>
53#include <inttypes.h> 49#include <inttypes.h>
54#include <regex.h> 50#include <regex.h>
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
index 34bbcad7f83f..7bf7fd5953ac 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -42,11 +42,7 @@
42 * $FreeBSD$ 42 * $FreeBSD$
43 */ 43 */
44 44
45#ifdef __linux__
46#include "../queue.h" 45#include "../queue.h"
47#else
48#include <sys/queue.h>
49#endif
50 46
51typedef enum { 47typedef enum {
52 UNINITIALIZED, 48 UNINITIALIZED,
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 1391e5f35918..41c4d8abdd4a 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -771,13 +771,8 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
771 goto Err_remove; 771 goto Err_remove;
772 772
773 err = -ENODEV; 773 err = -ENODEV;
774 if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64)) 774 if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) ||
775 && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64))) 775 dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) {
776 ;
777 else if (!pci_set_dma_mask(dev, DMA_BIT_MASK(32))
778 && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)))
779 ;
780 else {
781 asd_printk("no suitable DMA mask for %s\n", pci_name(dev)); 776 asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
782 goto Err_remove; 777 goto Err_remove;
783 } 778 }
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 22873ce8bbfa..91ea87dfb700 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -724,9 +724,11 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
724 switch (pd->max_sas_lrate) { 724 switch (pd->max_sas_lrate) {
725 case SAS_LINK_RATE_6_0_GBPS: 725 case SAS_LINK_RATE_6_0_GBPS:
726 *speed_mask &= ~SAS_SPEED_60_DIS; 726 *speed_mask &= ~SAS_SPEED_60_DIS;
727 /* fall through*/
727 default: 728 default:
728 case SAS_LINK_RATE_3_0_GBPS: 729 case SAS_LINK_RATE_3_0_GBPS:
729 *speed_mask &= ~SAS_SPEED_30_DIS; 730 *speed_mask &= ~SAS_SPEED_30_DIS;
731 /* fall through*/
730 case SAS_LINK_RATE_1_5_GBPS: 732 case SAS_LINK_RATE_1_5_GBPS:
731 *speed_mask &= ~SAS_SPEED_15_DIS; 733 *speed_mask &= ~SAS_SPEED_15_DIS;
732 } 734 }
@@ -734,6 +736,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
734 switch (pd->min_sas_lrate) { 736 switch (pd->min_sas_lrate) {
735 case SAS_LINK_RATE_6_0_GBPS: 737 case SAS_LINK_RATE_6_0_GBPS:
736 *speed_mask |= SAS_SPEED_30_DIS; 738 *speed_mask |= SAS_SPEED_30_DIS;
739 /* fall through*/
737 case SAS_LINK_RATE_3_0_GBPS: 740 case SAS_LINK_RATE_3_0_GBPS:
738 *speed_mask |= SAS_SPEED_15_DIS; 741 *speed_mask |= SAS_SPEED_15_DIS;
739 default: 742 default:
@@ -745,6 +748,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
745 switch (pd->max_sata_lrate) { 748 switch (pd->max_sata_lrate) {
746 case SAS_LINK_RATE_3_0_GBPS: 749 case SAS_LINK_RATE_3_0_GBPS:
747 *speed_mask &= ~SATA_SPEED_30_DIS; 750 *speed_mask &= ~SATA_SPEED_30_DIS;
751 /* fall through*/
748 default: 752 default:
749 case SAS_LINK_RATE_1_5_GBPS: 753 case SAS_LINK_RATE_1_5_GBPS:
750 *speed_mask &= ~SATA_SPEED_15_DIS; 754 *speed_mask &= ~SATA_SPEED_15_DIS;
@@ -803,6 +807,7 @@ void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc)
803 807
804 /* link reset retries, this should be nominal */ 808 /* link reset retries, this should be nominal */
805 control_phy->link_reset_retries = 10; 809 control_phy->link_reset_retries = 10;
810 /* fall through */
806 811
807 case RELEASE_SPINUP_HOLD: /* 0x02 */ 812 case RELEASE_SPINUP_HOLD: /* 0x02 */
808 /* decide the func_mask */ 813 /* decide the func_mask */
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index cdd4ab683be9..7fea344531f6 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -42,13 +42,13 @@ static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num)
42 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); 42 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
43} 43}
44 44
45/* PCI_DMA_... to our direction translation. 45/* DMA_... to our direction translation.
46 */ 46 */
47static const u8 data_dir_flags[] = { 47static const u8 data_dir_flags[] = {
48 [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */ 48 [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
49 [PCI_DMA_TODEVICE] = DATA_DIR_OUT, /* OUTBOUND */ 49 [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */
50 [PCI_DMA_FROMDEVICE] = DATA_DIR_IN, /* INBOUND */ 50 [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */
51 [PCI_DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */ 51 [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
52}; 52};
53 53
54static int asd_map_scatterlist(struct sas_task *task, 54static int asd_map_scatterlist(struct sas_task *task,
@@ -60,12 +60,12 @@ static int asd_map_scatterlist(struct sas_task *task,
60 struct scatterlist *sc; 60 struct scatterlist *sc;
61 int num_sg, res; 61 int num_sg, res;
62 62
63 if (task->data_dir == PCI_DMA_NONE) 63 if (task->data_dir == DMA_NONE)
64 return 0; 64 return 0;
65 65
66 if (task->num_scatter == 0) { 66 if (task->num_scatter == 0) {
67 void *p = task->scatter; 67 void *p = task->scatter;
68 dma_addr_t dma = pci_map_single(asd_ha->pcidev, p, 68 dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p,
69 task->total_xfer_len, 69 task->total_xfer_len,
70 task->data_dir); 70 task->data_dir);
71 sg_arr[0].bus_addr = cpu_to_le64((u64)dma); 71 sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
@@ -79,7 +79,7 @@ static int asd_map_scatterlist(struct sas_task *task,
79 if (sas_protocol_ata(task->task_proto)) 79 if (sas_protocol_ata(task->task_proto))
80 num_sg = task->num_scatter; 80 num_sg = task->num_scatter;
81 else 81 else
82 num_sg = pci_map_sg(asd_ha->pcidev, task->scatter, 82 num_sg = dma_map_sg(&asd_ha->pcidev->dev, task->scatter,
83 task->num_scatter, task->data_dir); 83 task->num_scatter, task->data_dir);
84 if (num_sg == 0) 84 if (num_sg == 0)
85 return -ENOMEM; 85 return -ENOMEM;
@@ -126,8 +126,8 @@ static int asd_map_scatterlist(struct sas_task *task,
126 return 0; 126 return 0;
127err_unmap: 127err_unmap:
128 if (sas_protocol_ata(task->task_proto)) 128 if (sas_protocol_ata(task->task_proto))
129 pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter, 129 dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
130 task->data_dir); 130 task->num_scatter, task->data_dir);
131 return res; 131 return res;
132} 132}
133 133
@@ -136,21 +136,21 @@ static void asd_unmap_scatterlist(struct asd_ascb *ascb)
136 struct asd_ha_struct *asd_ha = ascb->ha; 136 struct asd_ha_struct *asd_ha = ascb->ha;
137 struct sas_task *task = ascb->uldd_task; 137 struct sas_task *task = ascb->uldd_task;
138 138
139 if (task->data_dir == PCI_DMA_NONE) 139 if (task->data_dir == DMA_NONE)
140 return; 140 return;
141 141
142 if (task->num_scatter == 0) { 142 if (task->num_scatter == 0) {
143 dma_addr_t dma = (dma_addr_t) 143 dma_addr_t dma = (dma_addr_t)
144 le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr); 144 le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr);
145 pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len, 145 dma_unmap_single(&ascb->ha->pcidev->dev, dma,
146 task->data_dir); 146 task->total_xfer_len, task->data_dir);
147 return; 147 return;
148 } 148 }
149 149
150 asd_free_coherent(asd_ha, ascb->sg_arr); 150 asd_free_coherent(asd_ha, ascb->sg_arr);
151 if (task->task_proto != SAS_PROTOCOL_STP) 151 if (task->task_proto != SAS_PROTOCOL_STP)
152 pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter, 152 dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
153 task->data_dir); 153 task->num_scatter, task->data_dir);
154} 154}
155 155
156/* ---------- Task complete tasklet ---------- */ 156/* ---------- Task complete tasklet ---------- */
@@ -436,10 +436,10 @@ static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
436 struct domain_device *dev = task->dev; 436 struct domain_device *dev = task->dev;
437 struct scb *scb; 437 struct scb *scb;
438 438
439 pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1, 439 dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_req, 1,
440 PCI_DMA_TODEVICE); 440 DMA_TO_DEVICE);
441 pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1, 441 dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_resp, 1,
442 PCI_DMA_FROMDEVICE); 442 DMA_FROM_DEVICE);
443 443
444 scb = ascb->scb; 444 scb = ascb->scb;
445 445
@@ -471,10 +471,10 @@ static void asd_unbuild_smp_ascb(struct asd_ascb *a)
471 struct sas_task *task = a->uldd_task; 471 struct sas_task *task = a->uldd_task;
472 472
473 BUG_ON(!task); 473 BUG_ON(!task);
474 pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1, 474 dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_req, 1,
475 PCI_DMA_TODEVICE); 475 DMA_TO_DEVICE);
476 pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1, 476 dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_resp, 1,
477 PCI_DMA_FROMDEVICE); 477 DMA_FROM_DEVICE);
478} 478}
479 479
480/* ---------- SSP ---------- */ 480/* ---------- SSP ---------- */
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
index d81ca66e24d6..27c0a4a937d9 100644
--- a/drivers/scsi/am53c974.c
+++ b/drivers/scsi/am53c974.c
@@ -96,9 +96,7 @@ static void pci_esp_dma_drain(struct esp *esp);
96 96
97static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp) 97static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp)
98{ 98{
99 struct pci_dev *pdev = esp->dev; 99 return dev_get_drvdata(esp->dev);
100
101 return pci_get_drvdata(pdev);
102} 100}
103 101
104static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg) 102static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg)
@@ -116,30 +114,6 @@ static void pci_esp_write32(struct esp *esp, u32 val, unsigned long reg)
116 return iowrite32(val, esp->regs + (reg * 4UL)); 114 return iowrite32(val, esp->regs + (reg * 4UL));
117} 115}
118 116
119static dma_addr_t pci_esp_map_single(struct esp *esp, void *buf,
120 size_t sz, int dir)
121{
122 return pci_map_single(esp->dev, buf, sz, dir);
123}
124
125static int pci_esp_map_sg(struct esp *esp, struct scatterlist *sg,
126 int num_sg, int dir)
127{
128 return pci_map_sg(esp->dev, sg, num_sg, dir);
129}
130
131static void pci_esp_unmap_single(struct esp *esp, dma_addr_t addr,
132 size_t sz, int dir)
133{
134 pci_unmap_single(esp->dev, addr, sz, dir);
135}
136
137static void pci_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
138 int num_sg, int dir)
139{
140 pci_unmap_sg(esp->dev, sg, num_sg, dir);
141}
142
143static int pci_esp_irq_pending(struct esp *esp) 117static int pci_esp_irq_pending(struct esp *esp)
144{ 118{
145 struct pci_esp_priv *pep = pci_esp_get_priv(esp); 119 struct pci_esp_priv *pep = pci_esp_get_priv(esp);
@@ -295,10 +269,6 @@ static u32 pci_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
295static const struct esp_driver_ops pci_esp_ops = { 269static const struct esp_driver_ops pci_esp_ops = {
296 .esp_write8 = pci_esp_write8, 270 .esp_write8 = pci_esp_write8,
297 .esp_read8 = pci_esp_read8, 271 .esp_read8 = pci_esp_read8,
298 .map_single = pci_esp_map_single,
299 .map_sg = pci_esp_map_sg,
300 .unmap_single = pci_esp_unmap_single,
301 .unmap_sg = pci_esp_unmap_sg,
302 .irq_pending = pci_esp_irq_pending, 272 .irq_pending = pci_esp_irq_pending,
303 .reset_dma = pci_esp_reset_dma, 273 .reset_dma = pci_esp_reset_dma,
304 .dma_drain = pci_esp_dma_drain, 274 .dma_drain = pci_esp_dma_drain,
@@ -375,18 +345,18 @@ static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
375 345
376static void dc390_check_eeprom(struct esp *esp) 346static void dc390_check_eeprom(struct esp *esp)
377{ 347{
348 struct pci_dev *pdev = to_pci_dev(esp->dev);
378 u8 EEbuf[128]; 349 u8 EEbuf[128];
379 u16 *ptr = (u16 *)EEbuf, wval = 0; 350 u16 *ptr = (u16 *)EEbuf, wval = 0;
380 int i; 351 int i;
381 352
382 dc390_read_eeprom((struct pci_dev *)esp->dev, ptr); 353 dc390_read_eeprom(pdev, ptr);
383 354
384 for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++) 355 for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++)
385 wval += *ptr; 356 wval += *ptr;
386 357
387 /* no Tekram EEprom found */ 358 /* no Tekram EEprom found */
388 if (wval != 0x1234) { 359 if (wval != 0x1234) {
389 struct pci_dev *pdev = esp->dev;
390 dev_printk(KERN_INFO, &pdev->dev, 360 dev_printk(KERN_INFO, &pdev->dev,
391 "No valid Tekram EEprom found\n"); 361 "No valid Tekram EEprom found\n");
392 return; 362 return;
@@ -411,7 +381,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
411 return -ENODEV; 381 return -ENODEV;
412 } 382 }
413 383
414 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 384 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
415 dev_printk(KERN_INFO, &pdev->dev, 385 dev_printk(KERN_INFO, &pdev->dev,
416 "failed to set 32bit DMA mask\n"); 386 "failed to set 32bit DMA mask\n");
417 goto fail_disable_device; 387 goto fail_disable_device;
@@ -435,7 +405,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
435 405
436 esp = shost_priv(shost); 406 esp = shost_priv(shost);
437 esp->host = shost; 407 esp->host = shost;
438 esp->dev = pdev; 408 esp->dev = &pdev->dev;
439 esp->ops = &pci_esp_ops; 409 esp->ops = &pci_esp_ops;
440 /* 410 /*
441 * The am53c974 HBA has a design flaw of generating 411 * The am53c974 HBA has a design flaw of generating
@@ -467,8 +437,8 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
467 437
468 pci_set_master(pdev); 438 pci_set_master(pdev);
469 439
470 esp->command_block = pci_alloc_consistent(pdev, 16, 440 esp->command_block = dma_alloc_coherent(&pdev->dev, 16,
471 &esp->command_block_dma); 441 &esp->command_block_dma, GFP_KERNEL);
472 if (!esp->command_block) { 442 if (!esp->command_block) {
473 dev_printk(KERN_ERR, &pdev->dev, 443 dev_printk(KERN_ERR, &pdev->dev,
474 "failed to allocate command block\n"); 444 "failed to allocate command block\n");
@@ -498,7 +468,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
498 /* Assume 40MHz clock */ 468 /* Assume 40MHz clock */
499 esp->cfreq = 40000000; 469 esp->cfreq = 40000000;
500 470
501 err = scsi_esp_register(esp, &pdev->dev); 471 err = scsi_esp_register(esp);
502 if (err) 472 if (err)
503 goto fail_free_irq; 473 goto fail_free_irq;
504 474
@@ -508,8 +478,8 @@ fail_free_irq:
508 free_irq(pdev->irq, esp); 478 free_irq(pdev->irq, esp);
509fail_unmap_command_block: 479fail_unmap_command_block:
510 pci_set_drvdata(pdev, NULL); 480 pci_set_drvdata(pdev, NULL);
511 pci_free_consistent(pdev, 16, esp->command_block, 481 dma_free_coherent(&pdev->dev, 16, esp->command_block,
512 esp->command_block_dma); 482 esp->command_block_dma);
513fail_unmap_regs: 483fail_unmap_regs:
514 pci_iounmap(pdev, esp->regs); 484 pci_iounmap(pdev, esp->regs);
515fail_release_regions: 485fail_release_regions:
@@ -532,8 +502,8 @@ static void pci_esp_remove_one(struct pci_dev *pdev)
532 scsi_esp_unregister(esp); 502 scsi_esp_unregister(esp);
533 free_irq(pdev->irq, esp); 503 free_irq(pdev->irq, esp);
534 pci_set_drvdata(pdev, NULL); 504 pci_set_drvdata(pdev, NULL);
535 pci_free_consistent(pdev, 16, esp->command_block, 505 dma_free_coherent(&pdev->dev, 16, esp->command_block,
536 esp->command_block_dma); 506 esp->command_block_dma);
537 pci_iounmap(pdev, esp->regs); 507 pci_iounmap(pdev, esp->regs);
538 pci_release_regions(pdev); 508 pci_release_regions(pdev);
539 pci_disable_device(pdev); 509 pci_disable_device(pdev);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 12316ef4c893..d4404eea24fb 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1317,13 +1317,10 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
1317 1317
1318static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error) 1318static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
1319{ 1319{
1320 int id, lun;
1321 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { 1320 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
1322 if (pCCB->startdone == ARCMSR_CCB_ABORTED) { 1321 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
1323 struct scsi_cmnd *abortcmd = pCCB->pcmd; 1322 struct scsi_cmnd *abortcmd = pCCB->pcmd;
1324 if (abortcmd) { 1323 if (abortcmd) {
1325 id = abortcmd->device->id;
1326 lun = abortcmd->device->lun;
1327 abortcmd->result |= DID_ABORT << 16; 1324 abortcmd->result |= DID_ABORT << 16;
1328 arcmsr_ccb_complete(pCCB); 1325 arcmsr_ccb_complete(pCCB);
1329 printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n", 1326 printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
@@ -1798,7 +1795,7 @@ static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
1798 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0); 1795 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
1799 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 1796 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
1800 printk(KERN_NOTICE 1797 printk(KERN_NOTICE
1801 "arcmsr%d: wait 'stop adapter background rebulid' timeout\n" 1798 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1802 , acb->host->host_no); 1799 , acb->host->host_no);
1803 } 1800 }
1804} 1801}
@@ -1811,7 +1808,7 @@ static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
1811 1808
1812 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 1809 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
1813 printk(KERN_NOTICE 1810 printk(KERN_NOTICE
1814 "arcmsr%d: wait 'stop adapter background rebulid' timeout\n" 1811 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1815 , acb->host->host_no); 1812 , acb->host->host_no);
1816 } 1813 }
1817} 1814}
@@ -1824,7 +1821,7 @@ static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
1824 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); 1821 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
1825 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { 1822 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
1826 printk(KERN_NOTICE 1823 printk(KERN_NOTICE
1827 "arcmsr%d: wait 'stop adapter background rebulid' timeout\n" 1824 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1828 , pACB->host->host_no); 1825 , pACB->host->host_no);
1829 } 1826 }
1830 return; 1827 return;
@@ -1837,7 +1834,7 @@ static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
1837 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; 1834 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1838 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0); 1835 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
1839 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) 1836 if (!arcmsr_hbaD_wait_msgint_ready(pACB))
1840 pr_notice("arcmsr%d: wait 'stop adapter background rebulid' " 1837 pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
1841 "timeout\n", pACB->host->host_no); 1838 "timeout\n", pACB->host->host_no);
1842} 1839}
1843 1840
@@ -1850,7 +1847,7 @@ static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
1850 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; 1847 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
1851 writel(pACB->out_doorbell, &reg->iobound_doorbell); 1848 writel(pACB->out_doorbell, &reg->iobound_doorbell);
1852 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { 1849 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
1853 pr_notice("arcmsr%d: wait 'stop adapter background rebulid' " 1850 pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
1854 "timeout\n", pACB->host->host_no); 1851 "timeout\n", pACB->host->host_no);
1855 } 1852 }
1856} 1853}
@@ -3927,7 +3924,7 @@ static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
3927 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0); 3924 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
3928 if (!arcmsr_hbaA_wait_msgint_ready(acb)) { 3925 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
3929 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 3926 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
3930 rebulid' timeout \n", acb->host->host_no); 3927 rebuild' timeout \n", acb->host->host_no);
3931 } 3928 }
3932} 3929}
3933 3930
@@ -3938,7 +3935,7 @@ static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
3938 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell); 3935 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
3939 if (!arcmsr_hbaB_wait_msgint_ready(acb)) { 3936 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3940 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 3937 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
3941 rebulid' timeout \n",acb->host->host_no); 3938 rebuild' timeout \n",acb->host->host_no);
3942 } 3939 }
3943} 3940}
3944 3941
@@ -3950,7 +3947,7 @@ static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
3950 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell); 3947 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
3951 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { 3948 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
3952 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 3949 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
3953 rebulid' timeout \n", pACB->host->host_no); 3950 rebuild' timeout \n", pACB->host->host_no);
3954 } 3951 }
3955 return; 3952 return;
3956} 3953}
@@ -3963,7 +3960,7 @@ static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
3963 writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0); 3960 writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
3964 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) { 3961 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
3965 pr_notice("arcmsr%d: wait 'start adapter " 3962 pr_notice("arcmsr%d: wait 'start adapter "
3966 "background rebulid' timeout\n", pACB->host->host_no); 3963 "background rebuild' timeout\n", pACB->host->host_no);
3967 } 3964 }
3968} 3965}
3969 3966
@@ -3977,7 +3974,7 @@ static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
3977 writel(pACB->out_doorbell, &pmu->iobound_doorbell); 3974 writel(pACB->out_doorbell, &pmu->iobound_doorbell);
3978 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { 3975 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
3979 pr_notice("arcmsr%d: wait 'start adapter " 3976 pr_notice("arcmsr%d: wait 'start adapter "
3980 "background rebulid' timeout \n", pACB->host->host_no); 3977 "background rebuild' timeout \n", pACB->host->host_no);
3981 } 3978 }
3982} 3979}
3983 3980
@@ -4135,9 +4132,9 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
4135 pci_read_config_byte(acb->pdev, i, &value[i]); 4132 pci_read_config_byte(acb->pdev, i, &value[i]);
4136 } 4133 }
4137 /* hardware reset signal */ 4134 /* hardware reset signal */
4138 if ((acb->dev_id == 0x1680)) { 4135 if (acb->dev_id == 0x1680) {
4139 writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]); 4136 writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
4140 } else if ((acb->dev_id == 0x1880)) { 4137 } else if (acb->dev_id == 0x1880) {
4141 do { 4138 do {
4142 count++; 4139 count++;
4143 writel(0xF, &pmuC->write_sequence); 4140 writel(0xF, &pmuC->write_sequence);
@@ -4161,7 +4158,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
4161 } while (((readl(&pmuE->host_diagnostic_3xxx) & 4158 } while (((readl(&pmuE->host_diagnostic_3xxx) &
4162 ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5)); 4159 ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
4163 writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx); 4160 writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
4164 } else if ((acb->dev_id == 0x1214)) { 4161 } else if (acb->dev_id == 0x1214) {
4165 writel(0x20, pmuD->reset_request); 4162 writel(0x20, pmuD->reset_request);
4166 } else { 4163 } else {
4167 pci_write_config_byte(acb->pdev, 0x84, 0x20); 4164 pci_write_config_byte(acb->pdev, 0x84, 0x20);
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 8996d2329e11..802d15018ec0 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1193,7 +1193,7 @@ static void atp870u_free_tables(struct Scsi_Host *host)
1193 for (k = 0; k < 16; k++) { 1193 for (k = 0; k < 16; k++) {
1194 if (!atp_dev->id[j][k].prd_table) 1194 if (!atp_dev->id[j][k].prd_table)
1195 continue; 1195 continue;
1196 pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus); 1196 dma_free_coherent(&atp_dev->pdev->dev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
1197 atp_dev->id[j][k].prd_table = NULL; 1197 atp_dev->id[j][k].prd_table = NULL;
1198 } 1198 }
1199 } 1199 }
@@ -1205,7 +1205,7 @@ static int atp870u_init_tables(struct Scsi_Host *host)
1205 int c,k; 1205 int c,k;
1206 for(c=0;c < 2;c++) { 1206 for(c=0;c < 2;c++) {
1207 for(k=0;k<16;k++) { 1207 for(k=0;k<16;k++) {
1208 atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prd_bus)); 1208 atp_dev->id[c][k].prd_table = dma_alloc_coherent(&atp_dev->pdev->dev, 1024, &(atp_dev->id[c][k].prd_bus), GFP_KERNEL);
1209 if (!atp_dev->id[c][k].prd_table) { 1209 if (!atp_dev->id[c][k].prd_table) {
1210 printk("atp870u_init_tables fail\n"); 1210 printk("atp870u_init_tables fail\n");
1211 atp870u_free_tables(host); 1211 atp870u_free_tables(host);
@@ -1509,7 +1509,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1509 if (err) 1509 if (err)
1510 goto fail; 1510 goto fail;
1511 1511
1512 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 1512 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
1513 printk(KERN_ERR "atp870u: DMA mask required but not available.\n"); 1513 printk(KERN_ERR "atp870u: DMA mask required but not available.\n");
1514 err = -EIO; 1514 err = -EIO;
1515 goto disable_device; 1515 goto disable_device;
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index c10aac4dbc5e..0a6972ee94d7 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -520,7 +520,7 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
520 **/ 520 **/
521 tag_mem = &ctrl->ptag_state[tag].tag_mem_state; 521 tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
522 if (tag_mem->size) { 522 if (tag_mem->size) {
523 pci_free_consistent(ctrl->pdev, tag_mem->size, 523 dma_free_coherent(&ctrl->pdev->dev, tag_mem->size,
524 tag_mem->va, tag_mem->dma); 524 tag_mem->va, tag_mem->dma);
525 tag_mem->size = 0; 525 tag_mem->size = 0;
526 } 526 }
@@ -1269,12 +1269,12 @@ int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
1269 struct be_sge *sge = nonembedded_sgl(wrb); 1269 struct be_sge *sge = nonembedded_sgl(wrb);
1270 int status = 0; 1270 int status = 0;
1271 1271
1272 nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev, 1272 nonemb_cmd.va = dma_alloc_coherent(&ctrl->pdev->dev,
1273 sizeof(struct be_mgmt_controller_attributes), 1273 sizeof(struct be_mgmt_controller_attributes),
1274 &nonemb_cmd.dma); 1274 &nonemb_cmd.dma, GFP_KERNEL);
1275 if (nonemb_cmd.va == NULL) { 1275 if (nonemb_cmd.va == NULL) {
1276 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 1276 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1277 "BG_%d : pci_alloc_consistent failed in %s\n", 1277 "BG_%d : dma_alloc_coherent failed in %s\n",
1278 __func__); 1278 __func__);
1279 return -ENOMEM; 1279 return -ENOMEM;
1280 } 1280 }
@@ -1314,7 +1314,7 @@ int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
1314 "BG_%d : Failed in beiscsi_check_supported_fw\n"); 1314 "BG_%d : Failed in beiscsi_check_supported_fw\n");
1315 mutex_unlock(&ctrl->mbox_lock); 1315 mutex_unlock(&ctrl->mbox_lock);
1316 if (nonemb_cmd.va) 1316 if (nonemb_cmd.va)
1317 pci_free_consistent(ctrl->pdev, nonemb_cmd.size, 1317 dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd.size,
1318 nonemb_cmd.va, nonemb_cmd.dma); 1318 nonemb_cmd.va, nonemb_cmd.dma);
1319 1319
1320 return status; 1320 return status;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index c8f0a2144b44..96b96e2ab91a 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -771,7 +771,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
771 status = beiscsi_get_initiator_name(phba, buf, false); 771 status = beiscsi_get_initiator_name(phba, buf, false);
772 if (status < 0) { 772 if (status < 0) {
773 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 773 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
774 "BS_%d : Retreiving Initiator Name Failed\n"); 774 "BS_%d : Retrieving Initiator Name Failed\n");
775 status = 0; 775 status = 0;
776 } 776 }
777 } 777 }
@@ -1071,9 +1071,9 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1071 else 1071 else
1072 req_memsize = sizeof(struct tcp_connect_and_offload_in_v1); 1072 req_memsize = sizeof(struct tcp_connect_and_offload_in_v1);
1073 1073
1074 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 1074 nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
1075 req_memsize, 1075 req_memsize,
1076 &nonemb_cmd.dma); 1076 &nonemb_cmd.dma, GFP_KERNEL);
1077 if (nonemb_cmd.va == NULL) { 1077 if (nonemb_cmd.va == NULL) {
1078 1078
1079 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 1079 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -1091,7 +1091,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1091 "BS_%d : mgmt_open_connection Failed for cid=%d\n", 1091 "BS_%d : mgmt_open_connection Failed for cid=%d\n",
1092 beiscsi_ep->ep_cid); 1092 beiscsi_ep->ep_cid);
1093 1093
1094 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 1094 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
1095 nonemb_cmd.va, nonemb_cmd.dma); 1095 nonemb_cmd.va, nonemb_cmd.dma);
1096 beiscsi_free_ep(beiscsi_ep); 1096 beiscsi_free_ep(beiscsi_ep);
1097 return -EAGAIN; 1097 return -EAGAIN;
@@ -1104,8 +1104,9 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1104 "BS_%d : mgmt_open_connection Failed"); 1104 "BS_%d : mgmt_open_connection Failed");
1105 1105
1106 if (ret != -EBUSY) 1106 if (ret != -EBUSY)
1107 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 1107 dma_free_coherent(&phba->ctrl.pdev->dev,
1108 nonemb_cmd.va, nonemb_cmd.dma); 1108 nonemb_cmd.size, nonemb_cmd.va,
1109 nonemb_cmd.dma);
1109 1110
1110 beiscsi_free_ep(beiscsi_ep); 1111 beiscsi_free_ep(beiscsi_ep);
1111 return ret; 1112 return ret;
@@ -1118,7 +1119,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
1118 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1119 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1119 "BS_%d : mgmt_open_connection Success\n"); 1120 "BS_%d : mgmt_open_connection Success\n");
1120 1121
1121 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 1122 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
1122 nonemb_cmd.va, nonemb_cmd.dma); 1123 nonemb_cmd.va, nonemb_cmd.dma);
1123 return 0; 1124 return 0;
1124} 1125}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index a3019d8a7402..effb6fc95af4 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -511,18 +511,9 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
511 } 511 }
512 512
513 pci_set_master(pcidev); 513 pci_set_master(pcidev);
514 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); 514 ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
515 if (ret) { 515 if (ret) {
516 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); 516 ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
517 if (ret) {
518 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
519 goto pci_region_release;
520 } else {
521 ret = pci_set_consistent_dma_mask(pcidev,
522 DMA_BIT_MASK(32));
523 }
524 } else {
525 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
526 if (ret) { 517 if (ret) {
527 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 518 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
528 goto pci_region_release; 519 goto pci_region_release;
@@ -550,9 +541,8 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
550 if (status) 541 if (status)
551 return status; 542 return status;
552 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 543 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
553 mbox_mem_alloc->va = pci_alloc_consistent(pdev, 544 mbox_mem_alloc->va = dma_alloc_coherent(&pdev->dev,
554 mbox_mem_alloc->size, 545 mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL);
555 &mbox_mem_alloc->dma);
556 if (!mbox_mem_alloc->va) { 546 if (!mbox_mem_alloc->va) {
557 beiscsi_unmap_pci_function(phba); 547 beiscsi_unmap_pci_function(phba);
558 return -ENOMEM; 548 return -ENOMEM;
@@ -1866,7 +1856,6 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
1866{ 1856{
1867 struct be_queue_info *cq; 1857 struct be_queue_info *cq;
1868 struct sol_cqe *sol; 1858 struct sol_cqe *sol;
1869 struct dmsg_cqe *dmsg;
1870 unsigned int total = 0; 1859 unsigned int total = 0;
1871 unsigned int num_processed = 0; 1860 unsigned int num_processed = 0;
1872 unsigned short code = 0, cid = 0; 1861 unsigned short code = 0, cid = 0;
@@ -1939,7 +1928,6 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
1939 "BM_%d : Received %s[%d] on CID : %d\n", 1928 "BM_%d : Received %s[%d] on CID : %d\n",
1940 cqe_desc[code], code, cid); 1929 cqe_desc[code], code, cid);
1941 1930
1942 dmsg = (struct dmsg_cqe *)sol;
1943 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 1931 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1944 break; 1932 break;
1945 case UNSOL_HDR_NOTIFY: 1933 case UNSOL_HDR_NOTIFY:
@@ -2304,11 +2292,11 @@ static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2304 2292
2305 /* Map addr only if there is data_count */ 2293 /* Map addr only if there is data_count */
2306 if (dsp_value) { 2294 if (dsp_value) {
2307 io_task->mtask_addr = pci_map_single(phba->pcidev, 2295 io_task->mtask_addr = dma_map_single(&phba->pcidev->dev,
2308 task->data, 2296 task->data,
2309 task->data_count, 2297 task->data_count,
2310 PCI_DMA_TODEVICE); 2298 DMA_TO_DEVICE);
2311 if (pci_dma_mapping_error(phba->pcidev, 2299 if (dma_mapping_error(&phba->pcidev->dev,
2312 io_task->mtask_addr)) 2300 io_task->mtask_addr))
2313 return -ENOMEM; 2301 return -ENOMEM;
2314 io_task->mtask_data_count = task->data_count; 2302 io_task->mtask_data_count = task->data_count;
@@ -2519,10 +2507,9 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2519 BEISCSI_MAX_FRAGS_INIT); 2507 BEISCSI_MAX_FRAGS_INIT);
2520 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); 2508 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2521 do { 2509 do {
2522 mem_arr->virtual_address = pci_alloc_consistent( 2510 mem_arr->virtual_address =
2523 phba->pcidev, 2511 dma_alloc_coherent(&phba->pcidev->dev,
2524 curr_alloc_size, 2512 curr_alloc_size, &bus_add, GFP_KERNEL);
2525 &bus_add);
2526 if (!mem_arr->virtual_address) { 2513 if (!mem_arr->virtual_address) {
2527 if (curr_alloc_size <= BE_MIN_MEM_SIZE) 2514 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2528 goto free_mem; 2515 goto free_mem;
@@ -2560,7 +2547,7 @@ free_mem:
2560 mem_descr->num_elements = j; 2547 mem_descr->num_elements = j;
2561 while ((i) || (j)) { 2548 while ((i) || (j)) {
2562 for (j = mem_descr->num_elements; j > 0; j--) { 2549 for (j = mem_descr->num_elements; j > 0; j--) {
2563 pci_free_consistent(phba->pcidev, 2550 dma_free_coherent(&phba->pcidev->dev,
2564 mem_descr->mem_array[j - 1].size, 2551 mem_descr->mem_array[j - 1].size,
2565 mem_descr->mem_array[j - 1]. 2552 mem_descr->mem_array[j - 1].
2566 virtual_address, 2553 virtual_address,
@@ -3031,9 +3018,9 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
3031 eq = &phwi_context->be_eq[i].q; 3018 eq = &phwi_context->be_eq[i].q;
3032 mem = &eq->dma_mem; 3019 mem = &eq->dma_mem;
3033 phwi_context->be_eq[i].phba = phba; 3020 phwi_context->be_eq[i].phba = phba;
3034 eq_vaddress = pci_alloc_consistent(phba->pcidev, 3021 eq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
3035 num_eq_pages * PAGE_SIZE, 3022 num_eq_pages * PAGE_SIZE,
3036 &paddr); 3023 &paddr, GFP_KERNEL);
3037 if (!eq_vaddress) { 3024 if (!eq_vaddress) {
3038 ret = -ENOMEM; 3025 ret = -ENOMEM;
3039 goto create_eq_error; 3026 goto create_eq_error;
@@ -3069,7 +3056,7 @@ create_eq_error:
3069 eq = &phwi_context->be_eq[i].q; 3056 eq = &phwi_context->be_eq[i].q;
3070 mem = &eq->dma_mem; 3057 mem = &eq->dma_mem;
3071 if (mem->va) 3058 if (mem->va)
3072 pci_free_consistent(phba->pcidev, num_eq_pages 3059 dma_free_coherent(&phba->pcidev->dev, num_eq_pages
3073 * PAGE_SIZE, 3060 * PAGE_SIZE,
3074 mem->va, mem->dma); 3061 mem->va, mem->dma);
3075 } 3062 }
@@ -3097,9 +3084,9 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
3097 pbe_eq->cq = cq; 3084 pbe_eq->cq = cq;
3098 pbe_eq->phba = phba; 3085 pbe_eq->phba = phba;
3099 mem = &cq->dma_mem; 3086 mem = &cq->dma_mem;
3100 cq_vaddress = pci_alloc_consistent(phba->pcidev, 3087 cq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
3101 num_cq_pages * PAGE_SIZE, 3088 num_cq_pages * PAGE_SIZE,
3102 &paddr); 3089 &paddr, GFP_KERNEL);
3103 if (!cq_vaddress) { 3090 if (!cq_vaddress) {
3104 ret = -ENOMEM; 3091 ret = -ENOMEM;
3105 goto create_cq_error; 3092 goto create_cq_error;
@@ -3134,7 +3121,7 @@ create_cq_error:
3134 cq = &phwi_context->be_cq[i]; 3121 cq = &phwi_context->be_cq[i];
3135 mem = &cq->dma_mem; 3122 mem = &cq->dma_mem;
3136 if (mem->va) 3123 if (mem->va)
3137 pci_free_consistent(phba->pcidev, num_cq_pages 3124 dma_free_coherent(&phba->pcidev->dev, num_cq_pages
3138 * PAGE_SIZE, 3125 * PAGE_SIZE,
3139 mem->va, mem->dma); 3126 mem->va, mem->dma);
3140 } 3127 }
@@ -3326,7 +3313,7 @@ static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
3326{ 3313{
3327 struct be_dma_mem *mem = &q->dma_mem; 3314 struct be_dma_mem *mem = &q->dma_mem;
3328 if (mem->va) { 3315 if (mem->va) {
3329 pci_free_consistent(phba->pcidev, mem->size, 3316 dma_free_coherent(&phba->pcidev->dev, mem->size,
3330 mem->va, mem->dma); 3317 mem->va, mem->dma);
3331 mem->va = NULL; 3318 mem->va = NULL;
3332 } 3319 }
@@ -3341,7 +3328,8 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3341 q->len = len; 3328 q->len = len;
3342 q->entry_size = entry_size; 3329 q->entry_size = entry_size;
3343 mem->size = len * entry_size; 3330 mem->size = len * entry_size;
3344 mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma); 3331 mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
3332 GFP_KERNEL);
3345 if (!mem->va) 3333 if (!mem->va)
3346 return -ENOMEM; 3334 return -ENOMEM;
3347 return 0; 3335 return 0;
@@ -3479,7 +3467,7 @@ static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3479 &ctrl->ptag_state[tag].tag_state)) { 3467 &ctrl->ptag_state[tag].tag_state)) {
3480 ptag_mem = &ctrl->ptag_state[tag].tag_mem_state; 3468 ptag_mem = &ctrl->ptag_state[tag].tag_mem_state;
3481 if (ptag_mem->size) { 3469 if (ptag_mem->size) {
3482 pci_free_consistent(ctrl->pdev, 3470 dma_free_coherent(&ctrl->pdev->dev,
3483 ptag_mem->size, 3471 ptag_mem->size,
3484 ptag_mem->va, 3472 ptag_mem->va,
3485 ptag_mem->dma); 3473 ptag_mem->dma);
@@ -3880,7 +3868,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
3880 j = 0; 3868 j = 0;
3881 for (i = 0; i < SE_MEM_MAX; i++) { 3869 for (i = 0; i < SE_MEM_MAX; i++) {
3882 for (j = mem_descr->num_elements; j > 0; j--) { 3870 for (j = mem_descr->num_elements; j > 0; j--) {
3883 pci_free_consistent(phba->pcidev, 3871 dma_free_coherent(&phba->pcidev->dev,
3884 mem_descr->mem_array[j - 1].size, 3872 mem_descr->mem_array[j - 1].size,
3885 mem_descr->mem_array[j - 1].virtual_address, 3873 mem_descr->mem_array[j - 1].virtual_address,
3886 (unsigned long)mem_descr->mem_array[j - 1]. 3874 (unsigned long)mem_descr->mem_array[j - 1].
@@ -4255,10 +4243,10 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
4255 } 4243 }
4256 4244
4257 if (io_task->mtask_addr) { 4245 if (io_task->mtask_addr) {
4258 pci_unmap_single(phba->pcidev, 4246 dma_unmap_single(&phba->pcidev->dev,
4259 io_task->mtask_addr, 4247 io_task->mtask_addr,
4260 io_task->mtask_data_count, 4248 io_task->mtask_data_count,
4261 PCI_DMA_TODEVICE); 4249 DMA_TO_DEVICE);
4262 io_task->mtask_addr = 0; 4250 io_task->mtask_addr = 0;
4263 } 4251 }
4264} 4252}
@@ -4852,9 +4840,9 @@ static int beiscsi_bsg_request(struct bsg_job *job)
4852 4840
4853 switch (bsg_req->msgcode) { 4841 switch (bsg_req->msgcode) {
4854 case ISCSI_BSG_HST_VENDOR: 4842 case ISCSI_BSG_HST_VENDOR:
4855 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 4843 nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
4856 job->request_payload.payload_len, 4844 job->request_payload.payload_len,
4857 &nonemb_cmd.dma); 4845 &nonemb_cmd.dma, GFP_KERNEL);
4858 if (nonemb_cmd.va == NULL) { 4846 if (nonemb_cmd.va == NULL) {
4859 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4847 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4860 "BM_%d : Failed to allocate memory for " 4848 "BM_%d : Failed to allocate memory for "
@@ -4867,7 +4855,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
4867 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4855 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4868 "BM_%d : MBX Tag Allocation Failed\n"); 4856 "BM_%d : MBX Tag Allocation Failed\n");
4869 4857
4870 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4858 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
4871 nonemb_cmd.va, nonemb_cmd.dma); 4859 nonemb_cmd.va, nonemb_cmd.dma);
4872 return -EAGAIN; 4860 return -EAGAIN;
4873 } 4861 }
@@ -4881,7 +4869,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
4881 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 4869 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
4882 clear_bit(MCC_TAG_STATE_RUNNING, 4870 clear_bit(MCC_TAG_STATE_RUNNING,
4883 &phba->ctrl.ptag_state[tag].tag_state); 4871 &phba->ctrl.ptag_state[tag].tag_state);
4884 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4872 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
4885 nonemb_cmd.va, nonemb_cmd.dma); 4873 nonemb_cmd.va, nonemb_cmd.dma);
4886 return -EIO; 4874 return -EIO;
4887 } 4875 }
@@ -4898,7 +4886,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
4898 bsg_reply->result = status; 4886 bsg_reply->result = status;
4899 bsg_job_done(job, bsg_reply->result, 4887 bsg_job_done(job, bsg_reply->result,
4900 bsg_reply->reply_payload_rcv_len); 4888 bsg_reply->reply_payload_rcv_len);
4901 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4889 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
4902 nonemb_cmd.va, nonemb_cmd.dma); 4890 nonemb_cmd.va, nonemb_cmd.dma);
4903 if (status || extd_status) { 4891 if (status || extd_status) {
4904 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4892 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -5754,7 +5742,7 @@ free_twq:
5754 beiscsi_cleanup_port(phba); 5742 beiscsi_cleanup_port(phba);
5755 beiscsi_free_mem(phba); 5743 beiscsi_free_mem(phba);
5756free_port: 5744free_port:
5757 pci_free_consistent(phba->pcidev, 5745 dma_free_coherent(&phba->pcidev->dev,
5758 phba->ctrl.mbox_mem_alloced.size, 5746 phba->ctrl.mbox_mem_alloced.size,
5759 phba->ctrl.mbox_mem_alloced.va, 5747 phba->ctrl.mbox_mem_alloced.va,
5760 phba->ctrl.mbox_mem_alloced.dma); 5748 phba->ctrl.mbox_mem_alloced.dma);
@@ -5798,7 +5786,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
5798 5786
5799 /* ctrl uninit */ 5787 /* ctrl uninit */
5800 beiscsi_unmap_pci_function(phba); 5788 beiscsi_unmap_pci_function(phba);
5801 pci_free_consistent(phba->pcidev, 5789 dma_free_coherent(&phba->pcidev->dev,
5802 phba->ctrl.mbox_mem_alloced.size, 5790 phba->ctrl.mbox_mem_alloced.size,
5803 phba->ctrl.mbox_mem_alloced.va, 5791 phba->ctrl.mbox_mem_alloced.va,
5804 phba->ctrl.mbox_mem_alloced.dma); 5792 phba->ctrl.mbox_mem_alloced.dma);
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 8fdc07b6c686..ca7b7bbc8371 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -284,7 +284,7 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
284 return rc; 284 return rc;
285 285
286free_cmd: 286free_cmd:
287 pci_free_consistent(ctrl->pdev, nonemb_cmd->size, 287 dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd->size,
288 nonemb_cmd->va, nonemb_cmd->dma); 288 nonemb_cmd->va, nonemb_cmd->dma);
289 return rc; 289 return rc;
290} 290}
@@ -293,7 +293,8 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
293 struct be_dma_mem *cmd, 293 struct be_dma_mem *cmd,
294 u8 subsystem, u8 opcode, u32 size) 294 u8 subsystem, u8 opcode, u32 size)
295{ 295{
296 cmd->va = pci_zalloc_consistent(phba->ctrl.pdev, size, &cmd->dma); 296 cmd->va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma,
297 GFP_KERNEL);
297 if (!cmd->va) { 298 if (!cmd->va) {
298 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 299 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
299 "BG_%d : Failed to allocate memory for if info\n"); 300 "BG_%d : Failed to allocate memory for if info\n");
@@ -315,7 +316,7 @@ static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag)
315 __beiscsi_mcc_compl_status(phba, tag, NULL, NULL); 316 __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
316 tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state; 317 tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
317 if (tag_mem->size) { 318 if (tag_mem->size) {
318 pci_free_consistent(phba->pcidev, tag_mem->size, 319 dma_free_coherent(&phba->pcidev->dev, tag_mem->size,
319 tag_mem->va, tag_mem->dma); 320 tag_mem->va, tag_mem->dma);
320 tag_mem->size = 0; 321 tag_mem->size = 0;
321 } 322 }
@@ -761,7 +762,7 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
761 "BG_%d : Memory Allocation Failure\n"); 762 "BG_%d : Memory Allocation Failure\n");
762 763
763 /* Free the DMA memory for the IOCTL issuing */ 764 /* Free the DMA memory for the IOCTL issuing */
764 pci_free_consistent(phba->ctrl.pdev, 765 dma_free_coherent(&phba->ctrl.pdev->dev,
765 nonemb_cmd.size, 766 nonemb_cmd.size,
766 nonemb_cmd.va, 767 nonemb_cmd.va,
767 nonemb_cmd.dma); 768 nonemb_cmd.dma);
@@ -780,7 +781,7 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
780 ioctl_size += sizeof(struct be_cmd_req_hdr); 781 ioctl_size += sizeof(struct be_cmd_req_hdr);
781 782
782 /* Free the previous allocated DMA memory */ 783 /* Free the previous allocated DMA memory */
783 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 784 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
784 nonemb_cmd.va, 785 nonemb_cmd.va,
785 nonemb_cmd.dma); 786 nonemb_cmd.dma);
786 787
@@ -869,7 +870,7 @@ static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
869 status); 870 status);
870 boot_work = 0; 871 boot_work = 0;
871 } 872 }
872 pci_free_consistent(phba->ctrl.pdev, bs->nonemb_cmd.size, 873 dma_free_coherent(&phba->ctrl.pdev->dev, bs->nonemb_cmd.size,
873 bs->nonemb_cmd.va, bs->nonemb_cmd.dma); 874 bs->nonemb_cmd.va, bs->nonemb_cmd.dma);
874 bs->nonemb_cmd.va = NULL; 875 bs->nonemb_cmd.va = NULL;
875 break; 876 break;
@@ -1012,9 +1013,10 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
1012 1013
1013 nonemb_cmd = &phba->boot_struct.nonemb_cmd; 1014 nonemb_cmd = &phba->boot_struct.nonemb_cmd;
1014 nonemb_cmd->size = sizeof(struct be_cmd_get_session_resp); 1015 nonemb_cmd->size = sizeof(struct be_cmd_get_session_resp);
1015 nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev, 1016 nonemb_cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
1016 nonemb_cmd->size, 1017 nonemb_cmd->size,
1017 &nonemb_cmd->dma); 1018 &nonemb_cmd->dma,
1019 GFP_KERNEL);
1018 if (!nonemb_cmd->va) { 1020 if (!nonemb_cmd->va) {
1019 mutex_unlock(&ctrl->mbox_lock); 1021 mutex_unlock(&ctrl->mbox_lock);
1020 return 0; 1022 return 0;
@@ -1508,9 +1510,10 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
1508 return -EINVAL; 1510 return -EINVAL;
1509 1511
1510 nonemb_cmd.size = sizeof(union be_invldt_cmds_params); 1512 nonemb_cmd.size = sizeof(union be_invldt_cmds_params);
1511 nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev, 1513 nonemb_cmd.va = dma_zalloc_coherent(&phba->ctrl.pdev->dev,
1512 nonemb_cmd.size, 1514 nonemb_cmd.size,
1513 &nonemb_cmd.dma); 1515 &nonemb_cmd.dma,
1516 GFP_KERNEL);
1514 if (!nonemb_cmd.va) { 1517 if (!nonemb_cmd.va) {
1515 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 1518 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
1516 "BM_%d : invldt_cmds_params alloc failed\n"); 1519 "BM_%d : invldt_cmds_params alloc failed\n");
@@ -1521,7 +1524,7 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
1521 wrb = alloc_mcc_wrb(phba, &tag); 1524 wrb = alloc_mcc_wrb(phba, &tag);
1522 if (!wrb) { 1525 if (!wrb) {
1523 mutex_unlock(&ctrl->mbox_lock); 1526 mutex_unlock(&ctrl->mbox_lock);
1524 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 1527 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
1525 nonemb_cmd.va, nonemb_cmd.dma); 1528 nonemb_cmd.va, nonemb_cmd.dma);
1526 return -ENOMEM; 1529 return -ENOMEM;
1527 } 1530 }
@@ -1548,7 +1551,7 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
1548 1551
1549 rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd); 1552 rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
1550 if (rc != -EBUSY) 1553 if (rc != -EBUSY)
1551 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 1554 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
1552 nonemb_cmd.va, nonemb_cmd.dma); 1555 nonemb_cmd.va, nonemb_cmd.dma);
1553 return rc; 1556 return rc;
1554} 1557}
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 3d0c96a5c873..c19c26e0e405 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -1453,7 +1453,7 @@ union bfa_aen_data_u {
1453struct bfa_aen_entry_s { 1453struct bfa_aen_entry_s {
1454 struct list_head qe; 1454 struct list_head qe;
1455 enum bfa_aen_category aen_category; 1455 enum bfa_aen_category aen_category;
1456 u32 aen_type; 1456 int aen_type;
1457 union bfa_aen_data_u aen_data; 1457 union bfa_aen_data_u aen_data;
1458 u64 aen_tv_sec; 1458 u64 aen_tv_sec;
1459 u64 aen_tv_usec; 1459 u64 aen_tv_usec;
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index d3b00a475aeb..2de5d514e99c 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -190,27 +190,6 @@ fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
190 fchs->ox_id = ox_id; 190 fchs->ox_id = ox_id;
191} 191}
192 192
193enum fc_parse_status
194fc_els_rsp_parse(struct fchs_s *fchs, int len)
195{
196 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
197 struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd;
198
199 len = len;
200
201 switch (els_cmd->els_code) {
202 case FC_ELS_LS_RJT:
203 if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
204 return FC_PARSE_BUSY;
205 else
206 return FC_PARSE_FAILURE;
207
208 case FC_ELS_ACC:
209 return FC_PARSE_OK;
210 }
211 return FC_PARSE_OK;
212}
213
214static void 193static void
215fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) 194fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
216{ 195{
@@ -831,18 +810,6 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
831} 810}
832 811
833u16 812u16
834fc_logo_rsp_parse(struct fchs_s *fchs, int len)
835{
836 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
837
838 len = len;
839 if (els_cmd->els_code != FC_ELS_ACC)
840 return FC_PARSE_FAILURE;
841
842 return FC_PARSE_OK;
843}
844
845u16
846fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, 813fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
847 wwn_t port_name, wwn_t node_name, u16 pdu_size) 814 wwn_t port_name, wwn_t node_name, u16 pdu_size)
848{ 815{
@@ -908,40 +875,6 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
908} 875}
909 876
910u16 877u16
911fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
912{
913 struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1);
914 int num_pages = 0;
915 int page = 0;
916
917 len = len;
918
919 if (prlo->command != FC_ELS_ACC)
920 return FC_PARSE_FAILURE;
921
922 num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16;
923
924 for (page = 0; page < num_pages; page++) {
925 if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP)
926 return FC_PARSE_FAILURE;
927
928 if (prlo->prlo_acc_params[page].opa_valid != 0)
929 return FC_PARSE_FAILURE;
930
931 if (prlo->prlo_acc_params[page].rpa_valid != 0)
932 return FC_PARSE_FAILURE;
933
934 if (prlo->prlo_acc_params[page].orig_process_assc != 0)
935 return FC_PARSE_FAILURE;
936
937 if (prlo->prlo_acc_params[page].resp_process_assc != 0)
938 return FC_PARSE_FAILURE;
939 }
940 return FC_PARSE_OK;
941
942}
943
944u16
945fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, 878fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
946 int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id) 879 int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id)
947{ 880{
@@ -972,47 +905,6 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
972} 905}
973 906
974u16 907u16
975fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
976{
977 struct fc_tprlo_acc_s *tprlo = (struct fc_tprlo_acc_s *) (fchs + 1);
978 int num_pages = 0;
979 int page = 0;
980
981 len = len;
982
983 if (tprlo->command != FC_ELS_ACC)
984 return FC_PARSE_ACC_INVAL;
985
986 num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
987
988 for (page = 0; page < num_pages; page++) {
989 if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
990 return FC_PARSE_NOT_FCP;
991 if (tprlo->tprlo_acc_params[page].opa_valid != 0)
992 return FC_PARSE_OPAFLAG_INVAL;
993 if (tprlo->tprlo_acc_params[page].rpa_valid != 0)
994 return FC_PARSE_RPAFLAG_INVAL;
995 if (tprlo->tprlo_acc_params[page].orig_process_assc != 0)
996 return FC_PARSE_OPA_INVAL;
997 if (tprlo->tprlo_acc_params[page].resp_process_assc != 0)
998 return FC_PARSE_RPA_INVAL;
999 }
1000 return FC_PARSE_OK;
1001}
1002
1003enum fc_parse_status
1004fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
1005{
1006 struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
1007
1008 len = len;
1009 if (els_cmd->els_code != FC_ELS_ACC)
1010 return FC_PARSE_FAILURE;
1011
1012 return FC_PARSE_OK;
1013}
1014
1015u16
1016fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id, 908fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id,
1017 u32 reason_code, u32 reason_expl) 909 u32 reason_code, u32 reason_expl)
1018{ 910{
diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h
index b109a8813401..ac08d0b5b89a 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.h
+++ b/drivers/scsi/bfa/bfa_fcbuild.h
@@ -163,7 +163,6 @@ enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len);
163 163
164u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id, 164u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id,
165 u32 s_id, u16 ox_id, u16 rrq_oxid); 165 u32 s_id, u16 ox_id, u16 rrq_oxid);
166enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
167 166
168u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id, 167u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
169 u16 ox_id, u8 *name); 168 u16 ox_id, u8 *name);
@@ -276,8 +275,6 @@ void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
276void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 275void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
277 __be16 ox_id); 276 __be16 ox_id);
278 277
279enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len);
280
281enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len, 278enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len,
282 wwn_t port_name); 279 wwn_t port_name);
283 280
@@ -297,8 +294,6 @@ u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
297u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, 294u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
298 u32 d_id, u32 s_id, __be16 ox_id, int num_pages); 295 u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
299 296
300u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
301
302u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 297u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
303 u16 ox_id, wwn_t port_name, wwn_t node_name, 298 u16 ox_id, wwn_t port_name, wwn_t node_name,
304 u16 pdu_size); 299 u16 pdu_size);
@@ -308,14 +303,10 @@ u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name);
308u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 303u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
309 u16 ox_id, int num_pages); 304 u16 ox_id, int num_pages);
310 305
311u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len);
312
313u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 306u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
314 u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type, 307 u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type,
315 u32 tpr_id); 308 u32 tpr_id);
316 309
317u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
318
319u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, 310u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
320 __be16 ox_id, u32 reason_code, u32 reason_expl); 311 __be16 ox_id, u32 reason_code, u32 reason_expl);
321 312
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index e61ed8dad0b4..bd4ac187fd8e 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -143,7 +143,7 @@ struct bfad_im_s {
143static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry, 143static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry,
144 struct bfad_s *drv, int cnt, 144 struct bfad_s *drv, int cnt,
145 enum bfa_aen_category cat, 145 enum bfa_aen_category cat,
146 enum bfa_ioc_aen_event evt) 146 int evt)
147{ 147{
148 struct timespec64 ts; 148 struct timespec64 ts;
149 149
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 27c8d6ba05bb..cd160f2ec75d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -432,7 +432,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
432 struct fcoe_rcv_info *fr; 432 struct fcoe_rcv_info *fr;
433 struct fcoe_percpu_s *bg; 433 struct fcoe_percpu_s *bg;
434 struct sk_buff *tmp_skb; 434 struct sk_buff *tmp_skb;
435 unsigned short oxid;
436 435
437 interface = container_of(ptype, struct bnx2fc_interface, 436 interface = container_of(ptype, struct bnx2fc_interface,
438 fcoe_packet_type); 437 fcoe_packet_type);
@@ -466,8 +465,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
466 skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); 465 skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
467 fh = (struct fc_frame_header *) skb_transport_header(skb); 466 fh = (struct fc_frame_header *) skb_transport_header(skb);
468 467
469 oxid = ntohs(fh->fh_ox_id);
470
471 fr = fcoe_dev_from_skb(skb); 468 fr = fcoe_dev_from_skb(skb);
472 fr->fr_dev = lport; 469 fr->fr_dev = lport;
473 470
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 66b230bee7bc..1a458ce08210 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -210,11 +210,8 @@ csio_pci_init(struct pci_dev *pdev, int *bars)
210 pci_set_master(pdev); 210 pci_set_master(pdev);
211 pci_try_set_mwi(pdev); 211 pci_try_set_mwi(pdev);
212 212
213 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 213 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
214 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 214 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
215 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
216 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
217 } else {
218 dev_err(&pdev->dev, "No suitable DMA available.\n"); 215 dev_err(&pdev->dev, "No suitable DMA available.\n");
219 goto err_release_regions; 216 goto err_release_regions;
220 } 217 }
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index cc5611efc7a9..66e58f0a75dc 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -1845,8 +1845,8 @@ csio_ln_fdmi_init(struct csio_lnode *ln)
1845 /* Allocate Dma buffers for FDMI response Payload */ 1845 /* Allocate Dma buffers for FDMI response Payload */
1846 dma_buf = &ln->mgmt_req->dma_buf; 1846 dma_buf = &ln->mgmt_req->dma_buf;
1847 dma_buf->len = 2048; 1847 dma_buf->len = 2048;
1848 dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len, 1848 dma_buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, dma_buf->len,
1849 &dma_buf->paddr); 1849 &dma_buf->paddr, GFP_KERNEL);
1850 if (!dma_buf->vaddr) { 1850 if (!dma_buf->vaddr) {
1851 csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n"); 1851 csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
1852 kfree(ln->mgmt_req); 1852 kfree(ln->mgmt_req);
@@ -1873,7 +1873,7 @@ csio_ln_fdmi_exit(struct csio_lnode *ln)
1873 1873
1874 dma_buf = &ln->mgmt_req->dma_buf; 1874 dma_buf = &ln->mgmt_req->dma_buf;
1875 if (dma_buf->vaddr) 1875 if (dma_buf->vaddr)
1876 pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr, 1876 dma_free_coherent(&hw->pdev->dev, dma_buf->len, dma_buf->vaddr,
1877 dma_buf->paddr); 1877 dma_buf->paddr);
1878 1878
1879 kfree(ln->mgmt_req); 1879 kfree(ln->mgmt_req);
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index dab0d3f9bee1..8c15b7acb4b7 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -2349,8 +2349,8 @@ csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,
2349 } 2349 }
2350 2350
2351 /* Allocate Dma buffers for DDP */ 2351 /* Allocate Dma buffers for DDP */
2352 ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size, 2352 ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size,
2353 &ddp_desc->paddr); 2353 &ddp_desc->paddr, GFP_KERNEL);
2354 if (!ddp_desc->vaddr) { 2354 if (!ddp_desc->vaddr) {
2355 csio_err(hw, 2355 csio_err(hw,
2356 "SCSI response DMA buffer (ddp) allocation" 2356 "SCSI response DMA buffer (ddp) allocation"
@@ -2372,8 +2372,8 @@ no_mem:
2372 list_for_each(tmp, &scm->ddp_freelist) { 2372 list_for_each(tmp, &scm->ddp_freelist) {
2373 ddp_desc = (struct csio_dma_buf *) tmp; 2373 ddp_desc = (struct csio_dma_buf *) tmp;
2374 tmp = csio_list_prev(tmp); 2374 tmp = csio_list_prev(tmp);
2375 pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr, 2375 dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
2376 ddp_desc->paddr); 2376 ddp_desc->vaddr, ddp_desc->paddr);
2377 list_del_init(&ddp_desc->list); 2377 list_del_init(&ddp_desc->list);
2378 kfree(ddp_desc); 2378 kfree(ddp_desc);
2379 } 2379 }
@@ -2399,8 +2399,8 @@ csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)
2399 list_for_each(tmp, &scm->ddp_freelist) { 2399 list_for_each(tmp, &scm->ddp_freelist) {
2400 ddp_desc = (struct csio_dma_buf *) tmp; 2400 ddp_desc = (struct csio_dma_buf *) tmp;
2401 tmp = csio_list_prev(tmp); 2401 tmp = csio_list_prev(tmp);
2402 pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr, 2402 dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
2403 ddp_desc->paddr); 2403 ddp_desc->vaddr, ddp_desc->paddr);
2404 list_del_init(&ddp_desc->list); 2404 list_del_init(&ddp_desc->list);
2405 kfree(ddp_desc); 2405 kfree(ddp_desc);
2406 } 2406 }
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index 5022e82ccc4f..dc12933533d5 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -124,8 +124,8 @@ csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
124 124
125 while (n--) { 125 while (n--) {
126 buf->len = sge->sge_fl_buf_size[sreg]; 126 buf->len = sge->sge_fl_buf_size[sreg];
127 buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len, 127 buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, buf->len,
128 &buf->paddr); 128 &buf->paddr, GFP_KERNEL);
129 if (!buf->vaddr) { 129 if (!buf->vaddr) {
130 csio_err(hw, "Could only fill %d buffers!\n", n + 1); 130 csio_err(hw, "Could only fill %d buffers!\n", n + 1);
131 return -ENOMEM; 131 return -ENOMEM;
@@ -233,7 +233,8 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
233 233
234 q = wrm->q_arr[free_idx]; 234 q = wrm->q_arr[free_idx];
235 235
236 q->vstart = pci_zalloc_consistent(hw->pdev, qsz, &q->pstart); 236 q->vstart = dma_zalloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
237 GFP_KERNEL);
237 if (!q->vstart) { 238 if (!q->vstart) {
238 csio_err(hw, 239 csio_err(hw,
239 "Failed to allocate DMA memory for " 240 "Failed to allocate DMA memory for "
@@ -1703,14 +1704,14 @@ csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
1703 buf = &q->un.fl.bufs[j]; 1704 buf = &q->un.fl.bufs[j];
1704 if (!buf->vaddr) 1705 if (!buf->vaddr)
1705 continue; 1706 continue;
1706 pci_free_consistent(hw->pdev, buf->len, 1707 dma_free_coherent(&hw->pdev->dev,
1707 buf->vaddr, 1708 buf->len, buf->vaddr,
1708 buf->paddr); 1709 buf->paddr);
1709 } 1710 }
1710 kfree(q->un.fl.bufs); 1711 kfree(q->un.fl.bufs);
1711 } 1712 }
1712 pci_free_consistent(hw->pdev, q->size, 1713 dma_free_coherent(&hw->pdev->dev, q->size,
1713 q->vstart, q->pstart); 1714 q->vstart, q->pstart);
1714 } 1715 }
1715 kfree(q); 1716 kfree(q);
1716 } 1717 }
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 211da1d5a869..064ef5735182 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -35,6 +35,11 @@ static unsigned int dbg_level;
35 35
36#include "../libcxgbi.h" 36#include "../libcxgbi.h"
37 37
38#ifdef CONFIG_CHELSIO_T4_DCB
39#include <net/dcbevent.h>
40#include "cxgb4_dcb.h"
41#endif
42
38#define DRV_MODULE_NAME "cxgb4i" 43#define DRV_MODULE_NAME "cxgb4i"
39#define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver" 44#define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver"
40#define DRV_MODULE_VERSION "0.9.5-ko" 45#define DRV_MODULE_VERSION "0.9.5-ko"
@@ -155,6 +160,15 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
155 .session_recovery_timedout = iscsi_session_recovery_timedout, 160 .session_recovery_timedout = iscsi_session_recovery_timedout,
156}; 161};
157 162
163#ifdef CONFIG_CHELSIO_T4_DCB
164static int
165cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *);
166
167static struct notifier_block cxgb4_dcb_change = {
168 .notifier_call = cxgb4_dcb_change_notify,
169};
170#endif
171
158static struct scsi_transport_template *cxgb4i_stt; 172static struct scsi_transport_template *cxgb4i_stt;
159 173
160/* 174/*
@@ -574,6 +588,9 @@ static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
574 int nparams, flowclen16, flowclen; 588 int nparams, flowclen16, flowclen;
575 589
576 nparams = FLOWC_WR_NPARAMS_MIN; 590 nparams = FLOWC_WR_NPARAMS_MIN;
591#ifdef CONFIG_CHELSIO_T4_DCB
592 nparams++;
593#endif
577 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); 594 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
578 flowclen16 = DIV_ROUND_UP(flowclen, 16); 595 flowclen16 = DIV_ROUND_UP(flowclen, 16);
579 flowclen = flowclen16 * 16; 596 flowclen = flowclen16 * 16;
@@ -595,6 +612,9 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
595 struct fw_flowc_wr *flowc; 612 struct fw_flowc_wr *flowc;
596 int nparams, flowclen16, flowclen; 613 int nparams, flowclen16, flowclen;
597 614
615#ifdef CONFIG_CHELSIO_T4_DCB
616 u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
617#endif
598 flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen); 618 flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
599 skb = alloc_wr(flowclen, 0, GFP_ATOMIC); 619 skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
600 flowc = (struct fw_flowc_wr *)skb->head; 620 flowc = (struct fw_flowc_wr *)skb->head;
@@ -622,6 +642,17 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
622 flowc->mnemval[8].val = 0; 642 flowc->mnemval[8].val = 0;
623 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; 643 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
624 flowc->mnemval[8].val = 16384; 644 flowc->mnemval[8].val = 16384;
645#ifdef CONFIG_CHELSIO_T4_DCB
646 flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
647 if (vlan == CPL_L2T_VLAN_NONE) {
648 pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n",
649 csk->tid);
650 flowc->mnemval[9].val = cpu_to_be32(0);
651 } else {
652 flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >>
653 VLAN_PRIO_SHIFT);
654 }
655#endif
625 656
626 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); 657 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
627 658
@@ -1600,6 +1631,46 @@ static void release_offload_resources(struct cxgbi_sock *csk)
1600 csk->dst = NULL; 1631 csk->dst = NULL;
1601} 1632}
1602 1633
1634#ifdef CONFIG_CHELSIO_T4_DCB
1635static inline u8 get_iscsi_dcb_state(struct net_device *ndev)
1636{
1637 return ndev->dcbnl_ops->getstate(ndev);
1638}
1639
1640static int select_priority(int pri_mask)
1641{
1642 if (!pri_mask)
1643 return 0;
1644 return (ffs(pri_mask) - 1);
1645}
1646
1647static u8 get_iscsi_dcb_priority(struct net_device *ndev)
1648{
1649 int rv;
1650 u8 caps;
1651
1652 struct dcb_app iscsi_dcb_app = {
1653 .protocol = 3260
1654 };
1655
1656 rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
1657 if (rv)
1658 return 0;
1659
1660 if (caps & DCB_CAP_DCBX_VER_IEEE) {
1661 iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
1662 rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
1663 } else if (caps & DCB_CAP_DCBX_VER_CEE) {
1664 iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
1665 rv = dcb_getapp(ndev, &iscsi_dcb_app);
1666 }
1667
1668 log_debug(1 << CXGBI_DBG_ISCSI,
1669 "iSCSI priority is set to %u\n", select_priority(rv));
1670 return select_priority(rv);
1671}
1672#endif
1673
1603static int init_act_open(struct cxgbi_sock *csk) 1674static int init_act_open(struct cxgbi_sock *csk)
1604{ 1675{
1605 struct cxgbi_device *cdev = csk->cdev; 1676 struct cxgbi_device *cdev = csk->cdev;
@@ -1613,7 +1684,9 @@ static int init_act_open(struct cxgbi_sock *csk)
1613 unsigned int size, size6; 1684 unsigned int size, size6;
1614 unsigned int linkspeed; 1685 unsigned int linkspeed;
1615 unsigned int rcv_winf, snd_winf; 1686 unsigned int rcv_winf, snd_winf;
1616 1687#ifdef CONFIG_CHELSIO_T4_DCB
1688 u8 priority = 0;
1689#endif
1617 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1690 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1618 "csk 0x%p,%u,0x%lx,%u.\n", 1691 "csk 0x%p,%u,0x%lx,%u.\n",
1619 csk, csk->state, csk->flags, csk->tid); 1692 csk, csk->state, csk->flags, csk->tid);
@@ -1647,7 +1720,15 @@ static int init_act_open(struct cxgbi_sock *csk)
1647 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); 1720 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1648 cxgbi_sock_get(csk); 1721 cxgbi_sock_get(csk);
1649 1722
1723#ifdef CONFIG_CHELSIO_T4_DCB
1724 if (get_iscsi_dcb_state(ndev))
1725 priority = get_iscsi_dcb_priority(ndev);
1726
1727 csk->dcb_priority = priority;
1728 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority);
1729#else
1650 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); 1730 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
1731#endif
1651 if (!csk->l2t) { 1732 if (!csk->l2t) {
1652 pr_err("%s, cannot alloc l2t.\n", ndev->name); 1733 pr_err("%s, cannot alloc l2t.\n", ndev->name);
1653 goto rel_resource_without_clip; 1734 goto rel_resource_without_clip;
@@ -2146,6 +2227,70 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
2146 return 0; 2227 return 0;
2147} 2228}
2148 2229
2230#ifdef CONFIG_CHELSIO_T4_DCB
2231static int
2232cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val,
2233 void *data)
2234{
2235 int i, port = 0xFF;
2236 struct net_device *ndev;
2237 struct cxgbi_device *cdev = NULL;
2238 struct dcb_app_type *iscsi_app = data;
2239 struct cxgbi_ports_map *pmap;
2240 u8 priority;
2241
2242 if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
2243 if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)
2244 return NOTIFY_DONE;
2245
2246 priority = iscsi_app->app.priority;
2247 } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
2248 if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
2249 return NOTIFY_DONE;
2250
2251 if (!iscsi_app->app.priority)
2252 return NOTIFY_DONE;
2253
2254 priority = ffs(iscsi_app->app.priority) - 1;
2255 } else {
2256 return NOTIFY_DONE;
2257 }
2258
2259 if (iscsi_app->app.protocol != 3260)
2260 return NOTIFY_DONE;
2261
2262 log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n",
2263 iscsi_app->ifindex, priority);
2264
2265 ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
2266 if (!ndev)
2267 return NOTIFY_DONE;
2268
2269 cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port);
2270
2271 dev_put(ndev);
2272 if (!cdev)
2273 return NOTIFY_DONE;
2274
2275 pmap = &cdev->pmap;
2276
2277 for (i = 0; i < pmap->used; i++) {
2278 if (pmap->port_csk[i]) {
2279 struct cxgbi_sock *csk = pmap->port_csk[i];
2280
2281 if (csk->dcb_priority != priority) {
2282 iscsi_conn_failure(csk->user_data,
2283 ISCSI_ERR_CONN_FAILED);
2284 pr_info("Restarting iSCSI connection %p with "
2285 "priority %u->%u.\n", csk,
2286 csk->dcb_priority, priority);
2287 }
2288 }
2289 }
2290 return NOTIFY_OK;
2291}
2292#endif
2293
2149static int __init cxgb4i_init_module(void) 2294static int __init cxgb4i_init_module(void)
2150{ 2295{
2151 int rc; 2296 int rc;
@@ -2157,11 +2302,18 @@ static int __init cxgb4i_init_module(void)
2157 return rc; 2302 return rc;
2158 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); 2303 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
2159 2304
2305#ifdef CONFIG_CHELSIO_T4_DCB
2306 pr_info("%s dcb enabled.\n", DRV_MODULE_NAME);
2307 register_dcbevent_notifier(&cxgb4_dcb_change);
2308#endif
2160 return 0; 2309 return 0;
2161} 2310}
2162 2311
2163static void __exit cxgb4i_exit_module(void) 2312static void __exit cxgb4i_exit_module(void)
2164{ 2313{
2314#ifdef CONFIG_CHELSIO_T4_DCB
2315 unregister_dcbevent_notifier(&cxgb4_dcb_change);
2316#endif
2165 cxgb4_unregister_uld(CXGB4_ULD_ISCSI); 2317 cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
2166 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); 2318 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
2167 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); 2319 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index dcb190e75343..5d5d8b50d842 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -120,6 +120,9 @@ struct cxgbi_sock {
120 int wr_max_cred; 120 int wr_max_cred;
121 int wr_cred; 121 int wr_cred;
122 int wr_una_cred; 122 int wr_una_cred;
123#ifdef CONFIG_CHELSIO_T4_DCB
124 u8 dcb_priority;
125#endif
123 unsigned char hcrc_len; 126 unsigned char hcrc_len;
124 unsigned char dcrc_len; 127 unsigned char dcrc_len;
125 128
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 1ed2cd82129d..8c55ec6e1827 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -753,105 +753,6 @@ static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
753 return NULL; 753 return NULL;
754} 754}
755 755
756
757static struct ScsiReqBlk *srb_get_free(struct AdapterCtlBlk *acb)
758{
759 struct list_head *head = &acb->srb_free_list;
760 struct ScsiReqBlk *srb = NULL;
761
762 if (!list_empty(head)) {
763 srb = list_entry(head->next, struct ScsiReqBlk, list);
764 list_del(head->next);
765 dprintkdbg(DBG_0, "srb_get_free: srb=%p\n", srb);
766 }
767 return srb;
768}
769
770
771static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
772{
773 dprintkdbg(DBG_0, "srb_free_insert: srb=%p\n", srb);
774 list_add_tail(&srb->list, &acb->srb_free_list);
775}
776
777
778static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
779 struct ScsiReqBlk *srb)
780{
781 dprintkdbg(DBG_0, "srb_waiting_insert: (0x%p) <%02i-%i> srb=%p\n",
782 srb->cmd, dcb->target_id, dcb->target_lun, srb);
783 list_add(&srb->list, &dcb->srb_waiting_list);
784}
785
786
787static void srb_waiting_append(struct DeviceCtlBlk *dcb,
788 struct ScsiReqBlk *srb)
789{
790 dprintkdbg(DBG_0, "srb_waiting_append: (0x%p) <%02i-%i> srb=%p\n",
791 srb->cmd, dcb->target_id, dcb->target_lun, srb);
792 list_add_tail(&srb->list, &dcb->srb_waiting_list);
793}
794
795
796static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
797{
798 dprintkdbg(DBG_0, "srb_going_append: (0x%p) <%02i-%i> srb=%p\n",
799 srb->cmd, dcb->target_id, dcb->target_lun, srb);
800 list_add_tail(&srb->list, &dcb->srb_going_list);
801}
802
803
804static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
805{
806 struct ScsiReqBlk *i;
807 struct ScsiReqBlk *tmp;
808 dprintkdbg(DBG_0, "srb_going_remove: (0x%p) <%02i-%i> srb=%p\n",
809 srb->cmd, dcb->target_id, dcb->target_lun, srb);
810
811 list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
812 if (i == srb) {
813 list_del(&srb->list);
814 break;
815 }
816}
817
818
819static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
820 struct ScsiReqBlk *srb)
821{
822 struct ScsiReqBlk *i;
823 struct ScsiReqBlk *tmp;
824 dprintkdbg(DBG_0, "srb_waiting_remove: (0x%p) <%02i-%i> srb=%p\n",
825 srb->cmd, dcb->target_id, dcb->target_lun, srb);
826
827 list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
828 if (i == srb) {
829 list_del(&srb->list);
830 break;
831 }
832}
833
834
835static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
836 struct ScsiReqBlk *srb)
837{
838 dprintkdbg(DBG_0,
839 "srb_going_to_waiting_move: (0x%p) <%02i-%i> srb=%p\n",
840 srb->cmd, dcb->target_id, dcb->target_lun, srb);
841 list_move(&srb->list, &dcb->srb_waiting_list);
842}
843
844
845static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
846 struct ScsiReqBlk *srb)
847{
848 dprintkdbg(DBG_0,
849 "srb_waiting_to_going_move: (0x%p) <%02i-%i> srb=%p\n",
850 srb->cmd, dcb->target_id, dcb->target_lun, srb);
851 list_move(&srb->list, &dcb->srb_going_list);
852}
853
854
855/* Sets the timer to wake us up */ 756/* Sets the timer to wake us up */
856static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to) 757static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
857{ 758{
@@ -923,7 +824,7 @@ static void waiting_process_next(struct AdapterCtlBlk *acb)
923 824
924 /* Try to send to the bus */ 825 /* Try to send to the bus */
925 if (!start_scsi(acb, pos, srb)) 826 if (!start_scsi(acb, pos, srb))
926 srb_waiting_to_going_move(pos, srb); 827 list_move(&srb->list, &pos->srb_going_list);
927 else 828 else
928 waiting_set_timer(acb, HZ/50); 829 waiting_set_timer(acb, HZ/50);
929 break; 830 break;
@@ -960,15 +861,15 @@ static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
960 if (dcb->max_command <= list_size(&dcb->srb_going_list) || 861 if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
961 acb->active_dcb || 862 acb->active_dcb ||
962 (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) { 863 (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
963 srb_waiting_append(dcb, srb); 864 list_add_tail(&srb->list, &dcb->srb_waiting_list);
964 waiting_process_next(acb); 865 waiting_process_next(acb);
965 return; 866 return;
966 } 867 }
967 868
968 if (!start_scsi(acb, dcb, srb)) 869 if (!start_scsi(acb, dcb, srb)) {
969 srb_going_append(dcb, srb); 870 list_add_tail(&srb->list, &dcb->srb_going_list);
970 else { 871 } else {
971 srb_waiting_insert(dcb, srb); 872 list_add(&srb->list, &dcb->srb_waiting_list);
972 waiting_set_timer(acb, HZ / 50); 873 waiting_set_timer(acb, HZ / 50);
973 } 874 }
974} 875}
@@ -1045,10 +946,8 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
1045 sgp->length++; 946 sgp->length++;
1046 } 947 }
1047 948
1048 srb->sg_bus_addr = pci_map_single(dcb->acb->dev, 949 srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev,
1049 srb->segment_x, 950 srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE);
1050 SEGMENTX_LEN,
1051 PCI_DMA_TODEVICE);
1052 951
1053 dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n", 952 dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
1054 srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN); 953 srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
@@ -1116,9 +1015,9 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
1116 cmd->scsi_done = done; 1015 cmd->scsi_done = done;
1117 cmd->result = 0; 1016 cmd->result = 0;
1118 1017
1119 srb = srb_get_free(acb); 1018 srb = list_first_entry_or_null(&acb->srb_free_list,
1120 if (!srb) 1019 struct ScsiReqBlk, list);
1121 { 1020 if (!srb) {
1122 /* 1021 /*
1123 * Return 1 since we are unable to queue this command at this 1022 * Return 1 since we are unable to queue this command at this
1124 * point in time. 1023 * point in time.
@@ -1126,12 +1025,13 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
1126 dprintkdbg(DBG_0, "queue_command: No free srb's\n"); 1025 dprintkdbg(DBG_0, "queue_command: No free srb's\n");
1127 return 1; 1026 return 1;
1128 } 1027 }
1028 list_del(&srb->list);
1129 1029
1130 build_srb(cmd, dcb, srb); 1030 build_srb(cmd, dcb, srb);
1131 1031
1132 if (!list_empty(&dcb->srb_waiting_list)) { 1032 if (!list_empty(&dcb->srb_waiting_list)) {
1133 /* append to waiting queue */ 1033 /* append to waiting queue */
1134 srb_waiting_append(dcb, srb); 1034 list_add_tail(&srb->list, &dcb->srb_waiting_list);
1135 waiting_process_next(acb); 1035 waiting_process_next(acb);
1136 } else { 1036 } else {
1137 /* process immediately */ 1037 /* process immediately */
@@ -1376,11 +1276,11 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
1376 1276
1377 srb = find_cmd(cmd, &dcb->srb_waiting_list); 1277 srb = find_cmd(cmd, &dcb->srb_waiting_list);
1378 if (srb) { 1278 if (srb) {
1379 srb_waiting_remove(dcb, srb); 1279 list_del(&srb->list);
1380 pci_unmap_srb_sense(acb, srb); 1280 pci_unmap_srb_sense(acb, srb);
1381 pci_unmap_srb(acb, srb); 1281 pci_unmap_srb(acb, srb);
1382 free_tag(dcb, srb); 1282 free_tag(dcb, srb);
1383 srb_free_insert(acb, srb); 1283 list_add_tail(&srb->list, &acb->srb_free_list);
1384 dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n"); 1284 dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
1385 cmd->result = DID_ABORT << 16; 1285 cmd->result = DID_ABORT << 16;
1386 return SUCCESS; 1286 return SUCCESS;
@@ -1969,14 +1869,15 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
1969 xferred -= psge->length; 1869 xferred -= psge->length;
1970 } else { 1870 } else {
1971 /* Partial SG entry done */ 1871 /* Partial SG entry done */
1872 dma_sync_single_for_cpu(&srb->dcb->acb->dev->dev,
1873 srb->sg_bus_addr, SEGMENTX_LEN,
1874 DMA_TO_DEVICE);
1972 psge->length -= xferred; 1875 psge->length -= xferred;
1973 psge->address += xferred; 1876 psge->address += xferred;
1974 srb->sg_index = idx; 1877 srb->sg_index = idx;
1975 pci_dma_sync_single_for_device(srb->dcb-> 1878 dma_sync_single_for_device(&srb->dcb->acb->dev->dev,
1976 acb->dev, 1879 srb->sg_bus_addr, SEGMENTX_LEN,
1977 srb->sg_bus_addr, 1880 DMA_TO_DEVICE);
1978 SEGMENTX_LEN,
1979 PCI_DMA_TODEVICE);
1980 break; 1881 break;
1981 } 1882 }
1982 psge++; 1883 psge++;
@@ -3083,7 +2984,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
3083 goto disc1; 2984 goto disc1;
3084 } 2985 }
3085 free_tag(dcb, srb); 2986 free_tag(dcb, srb);
3086 srb_going_to_waiting_move(dcb, srb); 2987 list_move(&srb->list, &dcb->srb_waiting_list);
3087 dprintkdbg(DBG_KG, 2988 dprintkdbg(DBG_KG,
3088 "disconnect: (0x%p) Retry\n", 2989 "disconnect: (0x%p) Retry\n",
3089 srb->cmd); 2990 srb->cmd);
@@ -3148,7 +3049,7 @@ static void reselect(struct AdapterCtlBlk *acb)
3148 3049
3149 srb->state = SRB_READY; 3050 srb->state = SRB_READY;
3150 free_tag(dcb, srb); 3051 free_tag(dcb, srb);
3151 srb_going_to_waiting_move(dcb, srb); 3052 list_move(&srb->list, &dcb->srb_waiting_list);
3152 waiting_set_timer(acb, HZ / 20); 3053 waiting_set_timer(acb, HZ / 20);
3153 3054
3154 /* return; */ 3055 /* return; */
@@ -3271,9 +3172,8 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
3271 /* unmap DC395x SG list */ 3172 /* unmap DC395x SG list */
3272 dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n", 3173 dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
3273 srb->sg_bus_addr, SEGMENTX_LEN); 3174 srb->sg_bus_addr, SEGMENTX_LEN);
3274 pci_unmap_single(acb->dev, srb->sg_bus_addr, 3175 dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN,
3275 SEGMENTX_LEN, 3176 DMA_TO_DEVICE);
3276 PCI_DMA_TODEVICE);
3277 dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n", 3177 dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
3278 scsi_sg_count(cmd), scsi_bufflen(cmd)); 3178 scsi_sg_count(cmd), scsi_bufflen(cmd));
3279 /* unmap the sg segments */ 3179 /* unmap the sg segments */
@@ -3291,8 +3191,8 @@ static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
3291 /* Unmap sense buffer */ 3191 /* Unmap sense buffer */
3292 dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n", 3192 dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
3293 srb->segment_x[0].address); 3193 srb->segment_x[0].address);
3294 pci_unmap_single(acb->dev, srb->segment_x[0].address, 3194 dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address,
3295 srb->segment_x[0].length, PCI_DMA_FROMDEVICE); 3195 srb->segment_x[0].length, DMA_FROM_DEVICE);
3296 /* Restore SG stuff */ 3196 /* Restore SG stuff */
3297 srb->total_xfer_length = srb->xferred; 3197 srb->total_xfer_length = srb->xferred;
3298 srb->segment_x[0].address = 3198 srb->segment_x[0].address =
@@ -3411,7 +3311,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3411 tempcnt--; 3311 tempcnt--;
3412 dcb->max_command = tempcnt; 3312 dcb->max_command = tempcnt;
3413 free_tag(dcb, srb); 3313 free_tag(dcb, srb);
3414 srb_going_to_waiting_move(dcb, srb); 3314 list_move(&srb->list, &dcb->srb_waiting_list);
3415 waiting_set_timer(acb, HZ / 20); 3315 waiting_set_timer(acb, HZ / 20);
3416 srb->adapter_status = 0; 3316 srb->adapter_status = 0;
3417 srb->target_status = 0; 3317 srb->target_status = 0;
@@ -3447,14 +3347,12 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3447 } 3347 }
3448 } 3348 }
3449 3349
3450 if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
3451 pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
3452 scsi_sg_count(cmd), dir);
3453
3454 ckc_only = 0; 3350 ckc_only = 0;
3455/* Check Error Conditions */ 3351/* Check Error Conditions */
3456 ckc_e: 3352 ckc_e:
3457 3353
3354 pci_unmap_srb(acb, srb);
3355
3458 if (cmd->cmnd[0] == INQUIRY) { 3356 if (cmd->cmnd[0] == INQUIRY) {
3459 unsigned char *base = NULL; 3357 unsigned char *base = NULL;
3460 struct ScsiInqData *ptr; 3358 struct ScsiInqData *ptr;
@@ -3498,16 +3396,14 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3498 cmd->cmnd[0], srb->total_xfer_length); 3396 cmd->cmnd[0], srb->total_xfer_length);
3499 } 3397 }
3500 3398
3501 srb_going_remove(dcb, srb); 3399 if (srb != acb->tmp_srb) {
3502 /* Add to free list */ 3400 /* Add to free list */
3503 if (srb == acb->tmp_srb)
3504 dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
3505 else {
3506 dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n", 3401 dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
3507 cmd, cmd->result); 3402 cmd, cmd->result);
3508 srb_free_insert(acb, srb); 3403 list_move_tail(&srb->list, &acb->srb_free_list);
3404 } else {
3405 dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
3509 } 3406 }
3510 pci_unmap_srb(acb, srb);
3511 3407
3512 cmd->scsi_done(cmd); 3408 cmd->scsi_done(cmd);
3513 waiting_process_next(acb); 3409 waiting_process_next(acb);
@@ -3535,9 +3431,9 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
3535 result = MK_RES(0, did_flag, 0, 0); 3431 result = MK_RES(0, did_flag, 0, 0);
3536 printk("G:%p(%02i-%i) ", p, 3432 printk("G:%p(%02i-%i) ", p,
3537 p->device->id, (u8)p->device->lun); 3433 p->device->id, (u8)p->device->lun);
3538 srb_going_remove(dcb, srb); 3434 list_del(&srb->list);
3539 free_tag(dcb, srb); 3435 free_tag(dcb, srb);
3540 srb_free_insert(acb, srb); 3436 list_add_tail(&srb->list, &acb->srb_free_list);
3541 p->result = result; 3437 p->result = result;
3542 pci_unmap_srb_sense(acb, srb); 3438 pci_unmap_srb_sense(acb, srb);
3543 pci_unmap_srb(acb, srb); 3439 pci_unmap_srb(acb, srb);
@@ -3565,8 +3461,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
3565 result = MK_RES(0, did_flag, 0, 0); 3461 result = MK_RES(0, did_flag, 0, 0);
3566 printk("W:%p<%02i-%i>", p, p->device->id, 3462 printk("W:%p<%02i-%i>", p, p->device->id,
3567 (u8)p->device->lun); 3463 (u8)p->device->lun);
3568 srb_waiting_remove(dcb, srb); 3464 list_move_tail(&srb->list, &acb->srb_free_list);
3569 srb_free_insert(acb, srb);
3570 p->result = result; 3465 p->result = result;
3571 pci_unmap_srb_sense(acb, srb); 3466 pci_unmap_srb_sense(acb, srb);
3572 pci_unmap_srb(acb, srb); 3467 pci_unmap_srb(acb, srb);
@@ -3692,9 +3587,9 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3692 srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE; 3587 srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
3693 srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE; 3588 srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
3694 /* Map sense buffer */ 3589 /* Map sense buffer */
3695 srb->segment_x[0].address = 3590 srb->segment_x[0].address = dma_map_single(&acb->dev->dev,
3696 pci_map_single(acb->dev, cmd->sense_buffer, 3591 cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
3697 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); 3592 DMA_FROM_DEVICE);
3698 dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n", 3593 dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
3699 cmd->sense_buffer, srb->segment_x[0].address, 3594 cmd->sense_buffer, srb->segment_x[0].address,
3700 SCSI_SENSE_BUFFERSIZE); 3595 SCSI_SENSE_BUFFERSIZE);
@@ -3705,7 +3600,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3705 dprintkl(KERN_DEBUG, 3600 dprintkl(KERN_DEBUG,
3706 "request_sense: (0x%p) failed <%02i-%i>\n", 3601 "request_sense: (0x%p) failed <%02i-%i>\n",
3707 srb->cmd, dcb->target_id, dcb->target_lun); 3602 srb->cmd, dcb->target_id, dcb->target_lun);
3708 srb_going_to_waiting_move(dcb, srb); 3603 list_move(&srb->list, &dcb->srb_waiting_list);
3709 waiting_set_timer(acb, HZ / 100); 3604 waiting_set_timer(acb, HZ / 100);
3710 } 3605 }
3711} 3606}
@@ -4392,7 +4287,7 @@ static void adapter_init_params(struct AdapterCtlBlk *acb)
4392 4287
4393 /* link static array of srbs into the srb free list */ 4288 /* link static array of srbs into the srb free list */
4394 for (i = 0; i < acb->srb_count - 1; i++) 4289 for (i = 0; i < acb->srb_count - 1; i++)
4395 srb_free_insert(acb, &acb->srb_array[i]); 4290 list_add_tail(&acb->srb_array[i].list, &acb->srb_free_list);
4396} 4291}
4397 4292
4398 4293
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index c3fc34b9964d..ac7da9db7317 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -369,19 +369,28 @@ static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
369{ 369{
370 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); 370 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
371 struct scatterlist *sg = scsi_sglist(cmd); 371 struct scatterlist *sg = scsi_sglist(cmd);
372 int dir = cmd->sc_data_direction; 372 int total = 0, i;
373 int total, i;
374 373
375 if (dir == DMA_NONE) 374 if (cmd->sc_data_direction == DMA_NONE)
376 return; 375 return;
377 376
378 spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir); 377 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
378 /*
379 * For pseudo DMA and PIO we need the virtual address instead of
380 * a dma address, so perform an identity mapping.
381 */
382 spriv->num_sg = scsi_sg_count(cmd);
383 for (i = 0; i < spriv->num_sg; i++) {
384 sg[i].dma_address = (uintptr_t)sg_virt(&sg[i]);
385 total += sg_dma_len(&sg[i]);
386 }
387 } else {
388 spriv->num_sg = scsi_dma_map(cmd);
389 for (i = 0; i < spriv->num_sg; i++)
390 total += sg_dma_len(&sg[i]);
391 }
379 spriv->cur_residue = sg_dma_len(sg); 392 spriv->cur_residue = sg_dma_len(sg);
380 spriv->cur_sg = sg; 393 spriv->cur_sg = sg;
381
382 total = 0;
383 for (i = 0; i < spriv->u.num_sg; i++)
384 total += sg_dma_len(&sg[i]);
385 spriv->tot_residue = total; 394 spriv->tot_residue = total;
386} 395}
387 396
@@ -441,13 +450,8 @@ static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
441 450
442static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd) 451static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
443{ 452{
444 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); 453 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
445 int dir = cmd->sc_data_direction; 454 scsi_dma_unmap(cmd);
446
447 if (dir == DMA_NONE)
448 return;
449
450 esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
451} 455}
452 456
453static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent) 457static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
@@ -478,17 +482,6 @@ static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
478 spriv->tot_residue = ent->saved_tot_residue; 482 spriv->tot_residue = ent->saved_tot_residue;
479} 483}
480 484
481static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
482{
483 if (cmd->cmd_len == 6 ||
484 cmd->cmd_len == 10 ||
485 cmd->cmd_len == 12) {
486 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
487 } else {
488 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
489 }
490}
491
492static void esp_write_tgt_config3(struct esp *esp, int tgt) 485static void esp_write_tgt_config3(struct esp *esp, int tgt)
493{ 486{
494 if (esp->rev > ESP100A) { 487 if (esp->rev > ESP100A) {
@@ -624,6 +617,26 @@ static void esp_free_lun_tag(struct esp_cmd_entry *ent,
624 } 617 }
625} 618}
626 619
620static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
621{
622 ent->sense_ptr = ent->cmd->sense_buffer;
623 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
624 ent->sense_dma = (uintptr_t)ent->sense_ptr;
625 return;
626 }
627
628 ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
629 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
630}
631
632static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
633{
634 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
635 dma_unmap_single(esp->dev, ent->sense_dma,
636 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
637 ent->sense_ptr = NULL;
638}
639
627/* When a contingent allegiance conditon is created, we force feed a 640/* When a contingent allegiance conditon is created, we force feed a
628 * REQUEST_SENSE command to the device to fetch the sense data. I 641 * REQUEST_SENSE command to the device to fetch the sense data. I
629 * tried many other schemes, relying on the scsi error handling layer 642 * tried many other schemes, relying on the scsi error handling layer
@@ -645,12 +658,7 @@ static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
645 if (!ent->sense_ptr) { 658 if (!ent->sense_ptr) {
646 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n", 659 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
647 tgt, lun); 660 tgt, lun);
648 661 esp_map_sense(esp, ent);
649 ent->sense_ptr = cmd->sense_buffer;
650 ent->sense_dma = esp->ops->map_single(esp,
651 ent->sense_ptr,
652 SCSI_SENSE_BUFFERSIZE,
653 DMA_FROM_DEVICE);
654 } 662 }
655 ent->saved_sense_ptr = ent->sense_ptr; 663 ent->saved_sense_ptr = ent->sense_ptr;
656 664
@@ -717,10 +725,10 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
717static void esp_maybe_execute_command(struct esp *esp) 725static void esp_maybe_execute_command(struct esp *esp)
718{ 726{
719 struct esp_target_data *tp; 727 struct esp_target_data *tp;
720 struct esp_lun_data *lp;
721 struct scsi_device *dev; 728 struct scsi_device *dev;
722 struct scsi_cmnd *cmd; 729 struct scsi_cmnd *cmd;
723 struct esp_cmd_entry *ent; 730 struct esp_cmd_entry *ent;
731 bool select_and_stop = false;
724 int tgt, lun, i; 732 int tgt, lun, i;
725 u32 val, start_cmd; 733 u32 val, start_cmd;
726 u8 *p; 734 u8 *p;
@@ -743,7 +751,6 @@ static void esp_maybe_execute_command(struct esp *esp)
743 tgt = dev->id; 751 tgt = dev->id;
744 lun = dev->lun; 752 lun = dev->lun;
745 tp = &esp->target[tgt]; 753 tp = &esp->target[tgt];
746 lp = dev->hostdata;
747 754
748 list_move(&ent->list, &esp->active_cmds); 755 list_move(&ent->list, &esp->active_cmds);
749 756
@@ -752,7 +759,8 @@ static void esp_maybe_execute_command(struct esp *esp)
752 esp_map_dma(esp, cmd); 759 esp_map_dma(esp, cmd);
753 esp_save_pointers(esp, ent); 760 esp_save_pointers(esp, ent);
754 761
755 esp_check_command_len(esp, cmd); 762 if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
763 select_and_stop = true;
756 764
757 p = esp->command_block; 765 p = esp->command_block;
758 766
@@ -793,42 +801,22 @@ static void esp_maybe_execute_command(struct esp *esp)
793 tp->flags &= ~ESP_TGT_CHECK_NEGO; 801 tp->flags &= ~ESP_TGT_CHECK_NEGO;
794 } 802 }
795 803
796 /* Process it like a slow command. */ 804 /* If there are multiple message bytes, use Select and Stop */
797 if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC)) 805 if (esp->msg_out_len)
798 esp->flags |= ESP_FLAG_DOING_SLOWCMD; 806 select_and_stop = true;
799 } 807 }
800 808
801build_identify: 809build_identify:
802 /* If we don't have a lun-data struct yet, we're probing 810 *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
803 * so do not disconnect. Also, do not disconnect unless
804 * we have a tag on this command.
805 */
806 if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
807 *p++ = IDENTIFY(1, lun);
808 else
809 *p++ = IDENTIFY(0, lun);
810 811
811 if (ent->tag[0] && esp->rev == ESP100) { 812 if (ent->tag[0] && esp->rev == ESP100) {
812 /* ESP100 lacks select w/atn3 command, use select 813 /* ESP100 lacks select w/atn3 command, use select
813 * and stop instead. 814 * and stop instead.
814 */ 815 */
815 esp->flags |= ESP_FLAG_DOING_SLOWCMD; 816 select_and_stop = true;
816 } 817 }
817 818
818 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) { 819 if (select_and_stop) {
819 start_cmd = ESP_CMD_SELA;
820 if (ent->tag[0]) {
821 *p++ = ent->tag[0];
822 *p++ = ent->tag[1];
823
824 start_cmd = ESP_CMD_SA3;
825 }
826
827 for (i = 0; i < cmd->cmd_len; i++)
828 *p++ = cmd->cmnd[i];
829
830 esp->select_state = ESP_SELECT_BASIC;
831 } else {
832 esp->cmd_bytes_left = cmd->cmd_len; 820 esp->cmd_bytes_left = cmd->cmd_len;
833 esp->cmd_bytes_ptr = &cmd->cmnd[0]; 821 esp->cmd_bytes_ptr = &cmd->cmnd[0];
834 822
@@ -843,6 +831,19 @@ build_identify:
843 831
844 start_cmd = ESP_CMD_SELAS; 832 start_cmd = ESP_CMD_SELAS;
845 esp->select_state = ESP_SELECT_MSGOUT; 833 esp->select_state = ESP_SELECT_MSGOUT;
834 } else {
835 start_cmd = ESP_CMD_SELA;
836 if (ent->tag[0]) {
837 *p++ = ent->tag[0];
838 *p++ = ent->tag[1];
839
840 start_cmd = ESP_CMD_SA3;
841 }
842
843 for (i = 0; i < cmd->cmd_len; i++)
844 *p++ = cmd->cmnd[i];
845
846 esp->select_state = ESP_SELECT_BASIC;
846 } 847 }
847 val = tgt; 848 val = tgt;
848 if (esp->rev == FASHME) 849 if (esp->rev == FASHME)
@@ -902,9 +903,7 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
902 } 903 }
903 904
904 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 905 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
905 esp->ops->unmap_single(esp, ent->sense_dma, 906 esp_unmap_sense(esp, ent);
906 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
907 ent->sense_ptr = NULL;
908 907
909 /* Restore the message/status bytes to what we actually 908 /* Restore the message/status bytes to what we actually
910 * saw originally. Also, report that we are providing 909 * saw originally. Also, report that we are providing
@@ -965,7 +964,7 @@ static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_
965 cmd->scsi_done = done; 964 cmd->scsi_done = done;
966 965
967 spriv = ESP_CMD_PRIV(cmd); 966 spriv = ESP_CMD_PRIV(cmd);
968 spriv->u.dma_addr = ~(dma_addr_t)0x0; 967 spriv->num_sg = 0;
969 968
970 list_add_tail(&ent->list, &esp->queued_cmds); 969 list_add_tail(&ent->list, &esp->queued_cmds);
971 970
@@ -1252,14 +1251,10 @@ static int esp_finish_select(struct esp *esp)
1252 esp_unmap_dma(esp, cmd); 1251 esp_unmap_dma(esp, cmd);
1253 esp_free_lun_tag(ent, cmd->device->hostdata); 1252 esp_free_lun_tag(ent, cmd->device->hostdata);
1254 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE); 1253 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1255 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1256 esp->cmd_bytes_ptr = NULL; 1254 esp->cmd_bytes_ptr = NULL;
1257 esp->cmd_bytes_left = 0; 1255 esp->cmd_bytes_left = 0;
1258 } else { 1256 } else {
1259 esp->ops->unmap_single(esp, ent->sense_dma, 1257 esp_unmap_sense(esp, ent);
1260 SCSI_SENSE_BUFFERSIZE,
1261 DMA_FROM_DEVICE);
1262 ent->sense_ptr = NULL;
1263 } 1258 }
1264 1259
1265 /* Now that the state is unwound properly, put back onto 1260 /* Now that the state is unwound properly, put back onto
@@ -1303,9 +1298,8 @@ static int esp_finish_select(struct esp *esp)
1303 esp_flush_fifo(esp); 1298 esp_flush_fifo(esp);
1304 } 1299 }
1305 1300
1306 /* If we are doing a slow command, negotiation, etc. 1301 /* If we are doing a Select And Stop command, negotiation, etc.
1307 * we'll do the right thing as we transition to the 1302 * we'll do the right thing as we transition to the next phase.
1308 * next phase.
1309 */ 1303 */
1310 esp_event(esp, ESP_EVENT_CHECK_PHASE); 1304 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1311 return 0; 1305 return 0;
@@ -1338,6 +1332,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1338 1332
1339 bytes_sent = esp->data_dma_len; 1333 bytes_sent = esp->data_dma_len;
1340 bytes_sent -= ecount; 1334 bytes_sent -= ecount;
1335 bytes_sent -= esp->send_cmd_residual;
1341 1336
1342 /* 1337 /*
1343 * The am53c974 has a DMA 'pecularity'. The doc states: 1338 * The am53c974 has a DMA 'pecularity'. The doc states:
@@ -1358,7 +1353,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1358 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); 1353 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1359 u8 *ptr; 1354 u8 *ptr;
1360 1355
1361 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg, 1356 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
1362 &offset, &count); 1357 &offset, &count);
1363 if (likely(ptr)) { 1358 if (likely(ptr)) {
1364 *(ptr + offset) = bval; 1359 *(ptr + offset) = bval;
@@ -2039,11 +2034,8 @@ static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2039 esp_free_lun_tag(ent, cmd->device->hostdata); 2034 esp_free_lun_tag(ent, cmd->device->hostdata);
2040 cmd->result = DID_RESET << 16; 2035 cmd->result = DID_RESET << 16;
2041 2036
2042 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { 2037 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
2043 esp->ops->unmap_single(esp, ent->sense_dma, 2038 esp_unmap_sense(esp, ent);
2044 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
2045 ent->sense_ptr = NULL;
2046 }
2047 2039
2048 cmd->scsi_done(cmd); 2040 cmd->scsi_done(cmd);
2049 list_del(&ent->list); 2041 list_del(&ent->list);
@@ -2382,7 +2374,7 @@ static const char *esp_chip_names[] = {
2382 2374
2383static struct scsi_transport_template *esp_transport_template; 2375static struct scsi_transport_template *esp_transport_template;
2384 2376
2385int scsi_esp_register(struct esp *esp, struct device *dev) 2377int scsi_esp_register(struct esp *esp)
2386{ 2378{
2387 static int instance; 2379 static int instance;
2388 int err; 2380 int err;
@@ -2402,10 +2394,10 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
2402 2394
2403 esp_bootup_reset(esp); 2395 esp_bootup_reset(esp);
2404 2396
2405 dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n", 2397 dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2406 esp->host->unique_id, esp->regs, esp->dma_regs, 2398 esp->host->unique_id, esp->regs, esp->dma_regs,
2407 esp->host->irq); 2399 esp->host->irq);
2408 dev_printk(KERN_INFO, dev, 2400 dev_printk(KERN_INFO, esp->dev,
2409 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n", 2401 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2410 esp->host->unique_id, esp_chip_names[esp->rev], 2402 esp->host->unique_id, esp_chip_names[esp->rev],
2411 esp->cfreq / 1000000, esp->cfact, esp->scsi_id); 2403 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
@@ -2413,7 +2405,7 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
2413 /* Let the SCSI bus reset settle. */ 2405 /* Let the SCSI bus reset settle. */
2414 ssleep(esp_bus_reset_settle); 2406 ssleep(esp_bus_reset_settle);
2415 2407
2416 err = scsi_add_host(esp->host, dev); 2408 err = scsi_add_host(esp->host, esp->dev);
2417 if (err) 2409 if (err)
2418 return err; 2410 return err;
2419 2411
@@ -2790,3 +2782,131 @@ MODULE_PARM_DESC(esp_debug,
2790 2782
2791module_init(esp_init); 2783module_init(esp_init);
2792module_exit(esp_exit); 2784module_exit(esp_exit);
2785
2786#ifdef CONFIG_SCSI_ESP_PIO
2787static inline unsigned int esp_wait_for_fifo(struct esp *esp)
2788{
2789 int i = 500000;
2790
2791 do {
2792 unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
2793
2794 if (fbytes)
2795 return fbytes;
2796
2797 udelay(1);
2798 } while (--i);
2799
2800 shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
2801 esp_read8(ESP_STATUS));
2802 return 0;
2803}
2804
2805static inline int esp_wait_for_intr(struct esp *esp)
2806{
2807 int i = 500000;
2808
2809 do {
2810 esp->sreg = esp_read8(ESP_STATUS);
2811 if (esp->sreg & ESP_STAT_INTR)
2812 return 0;
2813
2814 udelay(1);
2815 } while (--i);
2816
2817 shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
2818 esp->sreg);
2819 return 1;
2820}
2821
2822#define ESP_FIFO_SIZE 16
2823
2824void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
2825 u32 dma_count, int write, u8 cmd)
2826{
2827 u8 phase = esp->sreg & ESP_STAT_PMASK;
2828
2829 cmd &= ~ESP_CMD_DMA;
2830 esp->send_cmd_error = 0;
2831
2832 if (write) {
2833 u8 *dst = (u8 *)addr;
2834 u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
2835
2836 scsi_esp_cmd(esp, cmd);
2837
2838 while (1) {
2839 if (!esp_wait_for_fifo(esp))
2840 break;
2841
2842 *dst++ = readb(esp->fifo_reg);
2843 --esp_count;
2844
2845 if (!esp_count)
2846 break;
2847
2848 if (esp_wait_for_intr(esp)) {
2849 esp->send_cmd_error = 1;
2850 break;
2851 }
2852
2853 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2854 break;
2855
2856 esp->ireg = esp_read8(ESP_INTRPT);
2857 if (esp->ireg & mask) {
2858 esp->send_cmd_error = 1;
2859 break;
2860 }
2861
2862 if (phase == ESP_MIP)
2863 esp_write8(ESP_CMD_MOK, ESP_CMD);
2864
2865 esp_write8(ESP_CMD_TI, ESP_CMD);
2866 }
2867 } else {
2868 unsigned int n = ESP_FIFO_SIZE;
2869 u8 *src = (u8 *)addr;
2870
2871 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
2872
2873 if (n > esp_count)
2874 n = esp_count;
2875 writesb(esp->fifo_reg, src, n);
2876 src += n;
2877 esp_count -= n;
2878
2879 scsi_esp_cmd(esp, cmd);
2880
2881 while (esp_count) {
2882 if (esp_wait_for_intr(esp)) {
2883 esp->send_cmd_error = 1;
2884 break;
2885 }
2886
2887 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2888 break;
2889
2890 esp->ireg = esp_read8(ESP_INTRPT);
2891 if (esp->ireg & ~ESP_INTR_BSERV) {
2892 esp->send_cmd_error = 1;
2893 break;
2894 }
2895
2896 n = ESP_FIFO_SIZE -
2897 (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
2898
2899 if (n > esp_count)
2900 n = esp_count;
2901 writesb(esp->fifo_reg, src, n);
2902 src += n;
2903 esp_count -= n;
2904
2905 esp_write8(ESP_CMD_TI, ESP_CMD);
2906 }
2907 }
2908
2909 esp->send_cmd_residual = esp_count;
2910}
2911EXPORT_SYMBOL(esp_send_pio_cmd);
2912#endif
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 8163dca2071b..aa87a6b72dcc 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -249,11 +249,7 @@
249#define SYNC_DEFP_FAST 0x19 /* 10mb/s */ 249#define SYNC_DEFP_FAST 0x19 /* 10mb/s */
250 250
251struct esp_cmd_priv { 251struct esp_cmd_priv {
252 union { 252 int num_sg;
253 dma_addr_t dma_addr;
254 int num_sg;
255 } u;
256
257 int cur_residue; 253 int cur_residue;
258 struct scatterlist *cur_sg; 254 struct scatterlist *cur_sg;
259 int tot_residue; 255 int tot_residue;
@@ -363,19 +359,6 @@ struct esp_driver_ops {
363 void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg); 359 void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg);
364 u8 (*esp_read8)(struct esp *esp, unsigned long reg); 360 u8 (*esp_read8)(struct esp *esp, unsigned long reg);
365 361
366 /* Map and unmap DMA memory. Eventually the driver will be
367 * converted to the generic DMA API as soon as SBUS is able to
368 * cope with that. At such time we can remove this.
369 */
370 dma_addr_t (*map_single)(struct esp *esp, void *buf,
371 size_t sz, int dir);
372 int (*map_sg)(struct esp *esp, struct scatterlist *sg,
373 int num_sg, int dir);
374 void (*unmap_single)(struct esp *esp, dma_addr_t addr,
375 size_t sz, int dir);
376 void (*unmap_sg)(struct esp *esp, struct scatterlist *sg,
377 int num_sg, int dir);
378
379 /* Return non-zero if there is an IRQ pending. Usually this 362 /* Return non-zero if there is an IRQ pending. Usually this
380 * status bit lives in the DMA controller sitting in front of 363 * status bit lives in the DMA controller sitting in front of
381 * the ESP. This has to be accurate or else the ESP interrupt 364 * the ESP. This has to be accurate or else the ESP interrupt
@@ -435,7 +418,7 @@ struct esp {
435 const struct esp_driver_ops *ops; 418 const struct esp_driver_ops *ops;
436 419
437 struct Scsi_Host *host; 420 struct Scsi_Host *host;
438 void *dev; 421 struct device *dev;
439 422
440 struct esp_cmd_entry *active_cmd; 423 struct esp_cmd_entry *active_cmd;
441 424
@@ -490,11 +473,11 @@ struct esp {
490 u32 flags; 473 u32 flags;
491#define ESP_FLAG_DIFFERENTIAL 0x00000001 474#define ESP_FLAG_DIFFERENTIAL 0x00000001
492#define ESP_FLAG_RESETTING 0x00000002 475#define ESP_FLAG_RESETTING 0x00000002
493#define ESP_FLAG_DOING_SLOWCMD 0x00000004
494#define ESP_FLAG_WIDE_CAPABLE 0x00000008 476#define ESP_FLAG_WIDE_CAPABLE 0x00000008
495#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010 477#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010
496#define ESP_FLAG_DISABLE_SYNC 0x00000020 478#define ESP_FLAG_DISABLE_SYNC 0x00000020
497#define ESP_FLAG_USE_FIFO 0x00000040 479#define ESP_FLAG_USE_FIFO 0x00000040
480#define ESP_FLAG_NO_DMA_MAP 0x00000080
498 481
499 u8 select_state; 482 u8 select_state;
500#define ESP_SELECT_NONE 0x00 /* Not selecting */ 483#define ESP_SELECT_NONE 0x00 /* Not selecting */
@@ -532,7 +515,7 @@ struct esp {
532 u32 min_period; 515 u32 min_period;
533 u32 radelay; 516 u32 radelay;
534 517
535 /* Slow command state. */ 518 /* ESP_CMD_SELAS command state */
536 u8 *cmd_bytes_ptr; 519 u8 *cmd_bytes_ptr;
537 int cmd_bytes_left; 520 int cmd_bytes_left;
538 521
@@ -540,6 +523,11 @@ struct esp {
540 523
541 void *dma; 524 void *dma;
542 int dmarev; 525 int dmarev;
526
527 /* These are used by esp_send_pio_cmd() */
528 u8 __iomem *fifo_reg;
529 int send_cmd_error;
530 u32 send_cmd_residual;
543}; 531};
544 532
545/* A front-end driver for the ESP chip should do the following in 533/* A front-end driver for the ESP chip should do the following in
@@ -568,16 +556,18 @@ struct esp {
568 * example, the DMA engine has to be reset before ESP can 556 * example, the DMA engine has to be reset before ESP can
569 * be programmed. 557 * be programmed.
570 * 11) If necessary, call dev_set_drvdata() as needed. 558 * 11) If necessary, call dev_set_drvdata() as needed.
571 * 12) Call scsi_esp_register() with prepared 'esp' structure 559 * 12) Call scsi_esp_register() with prepared 'esp' structure.
572 * and a device pointer if possible.
573 * 13) Check scsi_esp_register() return value, release all resources 560 * 13) Check scsi_esp_register() return value, release all resources
574 * if an error was returned. 561 * if an error was returned.
575 */ 562 */
576extern struct scsi_host_template scsi_esp_template; 563extern struct scsi_host_template scsi_esp_template;
577extern int scsi_esp_register(struct esp *, struct device *); 564extern int scsi_esp_register(struct esp *);
578 565
579extern void scsi_esp_unregister(struct esp *); 566extern void scsi_esp_unregister(struct esp *);
580extern irqreturn_t scsi_esp_intr(int, void *); 567extern irqreturn_t scsi_esp_intr(int, void *);
581extern void scsi_esp_cmd(struct esp *, u8); 568extern void scsi_esp_cmd(struct esp *, u8);
582 569
570extern void esp_send_pio_cmd(struct esp *esp, u32 dma_addr, u32 esp_count,
571 u32 dma_count, int write, u8 cmd);
572
583#endif /* !(_ESP_SCSI_H) */ 573#endif /* !(_ESP_SCSI_H) */
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index c7bf316d8e83..844ef688fa91 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -836,8 +836,8 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
836 u32 fcp_bytes_written = 0; 836 u32 fcp_bytes_written = 0;
837 unsigned long flags; 837 unsigned long flags;
838 838
839 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, 839 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
840 PCI_DMA_FROMDEVICE); 840 DMA_FROM_DEVICE);
841 skb = buf->os_buf; 841 skb = buf->os_buf;
842 fp = (struct fc_frame *)skb; 842 fp = (struct fc_frame *)skb;
843 buf->os_buf = NULL; 843 buf->os_buf = NULL;
@@ -977,9 +977,8 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
977 skb_reset_transport_header(skb); 977 skb_reset_transport_header(skb);
978 skb_reset_network_header(skb); 978 skb_reset_network_header(skb);
979 skb_put(skb, len); 979 skb_put(skb, len);
980 pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE); 980 pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
981 981 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
982 if (pci_dma_mapping_error(fnic->pdev, pa)) {
983 r = -ENOMEM; 982 r = -ENOMEM;
984 printk(KERN_ERR "PCI mapping failed with error %d\n", r); 983 printk(KERN_ERR "PCI mapping failed with error %d\n", r);
985 goto free_skb; 984 goto free_skb;
@@ -998,8 +997,8 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
998 struct fc_frame *fp = buf->os_buf; 997 struct fc_frame *fp = buf->os_buf;
999 struct fnic *fnic = vnic_dev_priv(rq->vdev); 998 struct fnic *fnic = vnic_dev_priv(rq->vdev);
1000 999
1001 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, 1000 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1002 PCI_DMA_FROMDEVICE); 1001 DMA_FROM_DEVICE);
1003 1002
1004 dev_kfree_skb(fp_skb(fp)); 1003 dev_kfree_skb(fp_skb(fp));
1005 buf->os_buf = NULL; 1004 buf->os_buf = NULL;
@@ -1018,7 +1017,6 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
1018 struct ethhdr *eth_hdr; 1017 struct ethhdr *eth_hdr;
1019 struct vlan_ethhdr *vlan_hdr; 1018 struct vlan_ethhdr *vlan_hdr;
1020 unsigned long flags; 1019 unsigned long flags;
1021 int r;
1022 1020
1023 if (!fnic->vlan_hw_insert) { 1021 if (!fnic->vlan_hw_insert) {
1024 eth_hdr = (struct ethhdr *)skb_mac_header(skb); 1022 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1038,11 +1036,10 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
1038 } 1036 }
1039 } 1037 }
1040 1038
1041 pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 1039 pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
1042 1040 DMA_TO_DEVICE);
1043 r = pci_dma_mapping_error(fnic->pdev, pa); 1041 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1044 if (r) { 1042 printk(KERN_ERR "DMA mapping failed\n");
1045 printk(KERN_ERR "PCI mapping failed with error %d\n", r);
1046 goto free_skb; 1043 goto free_skb;
1047 } 1044 }
1048 1045
@@ -1058,7 +1055,7 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
1058 1055
1059irq_restore: 1056irq_restore:
1060 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); 1057 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1061 pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE); 1058 dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
1062free_skb: 1059free_skb:
1063 kfree_skb(skb); 1060 kfree_skb(skb);
1064} 1061}
@@ -1115,9 +1112,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
1115 if (FC_FCOE_VER) 1112 if (FC_FCOE_VER)
1116 FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER); 1113 FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
1117 1114
1118 pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); 1115 pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
1119 1116 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1120 if (pci_dma_mapping_error(fnic->pdev, pa)) {
1121 ret = -ENOMEM; 1117 ret = -ENOMEM;
1122 printk(KERN_ERR "DMA map failed with error %d\n", ret); 1118 printk(KERN_ERR "DMA map failed with error %d\n", ret);
1123 goto free_skb_on_err; 1119 goto free_skb_on_err;
@@ -1131,8 +1127,7 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
1131 spin_lock_irqsave(&fnic->wq_lock[0], flags); 1127 spin_lock_irqsave(&fnic->wq_lock[0], flags);
1132 1128
1133 if (!vnic_wq_desc_avail(wq)) { 1129 if (!vnic_wq_desc_avail(wq)) {
1134 pci_unmap_single(fnic->pdev, pa, 1130 dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
1135 tot_len, PCI_DMA_TODEVICE);
1136 ret = -1; 1131 ret = -1;
1137 goto irq_restore; 1132 goto irq_restore;
1138 } 1133 }
@@ -1247,8 +1242,8 @@ static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
1247 struct fc_frame *fp = (struct fc_frame *)skb; 1242 struct fc_frame *fp = (struct fc_frame *)skb;
1248 struct fnic *fnic = vnic_dev_priv(wq->vdev); 1243 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1249 1244
1250 pci_unmap_single(fnic->pdev, buf->dma_addr, 1245 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1251 buf->len, PCI_DMA_TODEVICE); 1246 DMA_TO_DEVICE);
1252 dev_kfree_skb_irq(fp_skb(fp)); 1247 dev_kfree_skb_irq(fp_skb(fp));
1253 buf->os_buf = NULL; 1248 buf->os_buf = NULL;
1254} 1249}
@@ -1290,8 +1285,8 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
1290 struct fc_frame *fp = buf->os_buf; 1285 struct fc_frame *fp = buf->os_buf;
1291 struct fnic *fnic = vnic_dev_priv(wq->vdev); 1286 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1292 1287
1293 pci_unmap_single(fnic->pdev, buf->dma_addr, 1288 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1294 buf->len, PCI_DMA_TODEVICE); 1289 DMA_TO_DEVICE);
1295 1290
1296 dev_kfree_skb(fp_skb(fp)); 1291 dev_kfree_skb(fp_skb(fp));
1297 buf->os_buf = NULL; 1292 buf->os_buf = NULL;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index e52599f44170..cc461fd7bef1 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -611,30 +611,15 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
611 * limitation for the device. Try 64-bit first, and 611 * limitation for the device. Try 64-bit first, and
612 * fail to 32-bit. 612 * fail to 32-bit.
613 */ 613 */
614 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 614 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
615 if (err) { 615 if (err) {
616 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 616 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
617 if (err) { 617 if (err) {
618 shost_printk(KERN_ERR, fnic->lport->host, 618 shost_printk(KERN_ERR, fnic->lport->host,
619 "No usable DMA configuration " 619 "No usable DMA configuration "
620 "aborting\n"); 620 "aborting\n");
621 goto err_out_release_regions; 621 goto err_out_release_regions;
622 } 622 }
623 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
624 if (err) {
625 shost_printk(KERN_ERR, fnic->lport->host,
626 "Unable to obtain 32-bit DMA "
627 "for consistent allocations, aborting.\n");
628 goto err_out_release_regions;
629 }
630 } else {
631 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
632 if (err) {
633 shost_printk(KERN_ERR, fnic->lport->host,
634 "Unable to obtain 64-bit DMA "
635 "for consistent allocations, aborting.\n");
636 goto err_out_release_regions;
637 }
638 } 623 }
639 624
640 /* Map vNIC resources from BAR0 */ 625 /* Map vNIC resources from BAR0 */
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 8cbd3c9f0b4c..96acfcecd540 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -126,17 +126,17 @@ static void fnic_release_ioreq_buf(struct fnic *fnic,
126 struct scsi_cmnd *sc) 126 struct scsi_cmnd *sc)
127{ 127{
128 if (io_req->sgl_list_pa) 128 if (io_req->sgl_list_pa)
129 pci_unmap_single(fnic->pdev, io_req->sgl_list_pa, 129 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
130 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt, 130 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
131 PCI_DMA_TODEVICE); 131 DMA_TO_DEVICE);
132 scsi_dma_unmap(sc); 132 scsi_dma_unmap(sc);
133 133
134 if (io_req->sgl_cnt) 134 if (io_req->sgl_cnt)
135 mempool_free(io_req->sgl_list_alloc, 135 mempool_free(io_req->sgl_list_alloc,
136 fnic->io_sgl_pool[io_req->sgl_type]); 136 fnic->io_sgl_pool[io_req->sgl_type]);
137 if (io_req->sense_buf_pa) 137 if (io_req->sense_buf_pa)
138 pci_unmap_single(fnic->pdev, io_req->sense_buf_pa, 138 dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
139 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); 139 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
140} 140}
141 141
142/* Free up Copy Wq descriptors. Called with copy_wq lock held */ 142/* Free up Copy Wq descriptors. Called with copy_wq lock held */
@@ -330,7 +330,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
330 int flags; 330 int flags;
331 u8 exch_flags; 331 u8 exch_flags;
332 struct scsi_lun fc_lun; 332 struct scsi_lun fc_lun;
333 int r;
334 333
335 if (sg_count) { 334 if (sg_count) {
336 /* For each SGE, create a device desc entry */ 335 /* For each SGE, create a device desc entry */
@@ -342,30 +341,25 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
342 desc++; 341 desc++;
343 } 342 }
344 343
345 io_req->sgl_list_pa = pci_map_single 344 io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
346 (fnic->pdev, 345 io_req->sgl_list,
347 io_req->sgl_list, 346 sizeof(io_req->sgl_list[0]) * sg_count,
348 sizeof(io_req->sgl_list[0]) * sg_count, 347 DMA_TO_DEVICE);
349 PCI_DMA_TODEVICE); 348 if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
350 349 printk(KERN_ERR "DMA mapping failed\n");
351 r = pci_dma_mapping_error(fnic->pdev, io_req->sgl_list_pa);
352 if (r) {
353 printk(KERN_ERR "PCI mapping failed with error %d\n", r);
354 return SCSI_MLQUEUE_HOST_BUSY; 350 return SCSI_MLQUEUE_HOST_BUSY;
355 } 351 }
356 } 352 }
357 353
358 io_req->sense_buf_pa = pci_map_single(fnic->pdev, 354 io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
359 sc->sense_buffer, 355 sc->sense_buffer,
360 SCSI_SENSE_BUFFERSIZE, 356 SCSI_SENSE_BUFFERSIZE,
361 PCI_DMA_FROMDEVICE); 357 DMA_FROM_DEVICE);
362 358 if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
363 r = pci_dma_mapping_error(fnic->pdev, io_req->sense_buf_pa); 359 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
364 if (r) {
365 pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
366 sizeof(io_req->sgl_list[0]) * sg_count, 360 sizeof(io_req->sgl_list[0]) * sg_count,
367 PCI_DMA_TODEVICE); 361 DMA_TO_DEVICE);
368 printk(KERN_ERR "PCI mapping failed with error %d\n", r); 362 printk(KERN_ERR "DMA mapping failed\n");
369 return SCSI_MLQUEUE_HOST_BUSY; 363 return SCSI_MLQUEUE_HOST_BUSY;
370 } 364 }
371 365
@@ -2272,33 +2266,17 @@ clean_pending_aborts_end:
2272static inline int 2266static inline int
2273fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc) 2267fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2274{ 2268{
2275 struct blk_queue_tag *bqt = fnic->lport->host->bqt; 2269 struct request_queue *q = sc->request->q;
2276 int tag, ret = SCSI_NO_TAG; 2270 struct request *dummy;
2277
2278 BUG_ON(!bqt);
2279 if (!bqt) {
2280 pr_err("Tags are not supported\n");
2281 goto end;
2282 }
2283
2284 do {
2285 tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
2286 if (tag >= bqt->max_depth) {
2287 pr_err("Tag allocation failure\n");
2288 goto end;
2289 }
2290 } while (test_and_set_bit(tag, bqt->tag_map));
2291 2271
2292 bqt->tag_index[tag] = sc->request; 2272 dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
2293 sc->request->tag = tag; 2273 if (IS_ERR(dummy))
2294 sc->tag = tag; 2274 return SCSI_NO_TAG;
2295 if (!sc->request->special)
2296 sc->request->special = sc;
2297 2275
2298 ret = tag; 2276 sc->tag = sc->request->tag = dummy->tag;
2277 sc->request->special = sc;
2299 2278
2300end: 2279 return dummy->tag;
2301 return ret;
2302} 2280}
2303 2281
2304/** 2282/**
@@ -2308,20 +2286,9 @@ end:
2308static inline void 2286static inline void
2309fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc) 2287fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2310{ 2288{
2311 struct blk_queue_tag *bqt = fnic->lport->host->bqt; 2289 struct request *dummy = sc->request->special;
2312 int tag = sc->request->tag;
2313 2290
2314 if (tag == SCSI_NO_TAG) 2291 blk_mq_free_request(dummy);
2315 return;
2316
2317 BUG_ON(!bqt || !bqt->tag_index[tag]);
2318 if (!bqt)
2319 return;
2320
2321 bqt->tag_index[tag] = NULL;
2322 clear_bit(tag, bqt->tag_map);
2323
2324 return;
2325} 2292}
2326 2293
2327/* 2294/*
@@ -2380,19 +2347,9 @@ int fnic_device_reset(struct scsi_cmnd *sc)
2380 tag = sc->request->tag; 2347 tag = sc->request->tag;
2381 if (unlikely(tag < 0)) { 2348 if (unlikely(tag < 0)) {
2382 /* 2349 /*
2383 * XXX(hch): current the midlayer fakes up a struct 2350 * Really should fix the midlayer to pass in a proper
2384 * request for the explicit reset ioctls, and those 2351 * request for ioctls...
2385 * don't have a tag allocated to them. The below
2386 * code pokes into midlayer structures to paper over
2387 * this design issue, but that won't work for blk-mq.
2388 *
2389 * Either someone who can actually test the hardware
2390 * will have to come up with a similar hack for the
2391 * blk-mq case, or we'll have to bite the bullet and
2392 * fix the way the EH ioctls work for real, but until
2393 * that happens we fail these explicit requests here.
2394 */ 2352 */
2395
2396 tag = fnic_scsi_host_start_tag(fnic, sc); 2353 tag = fnic_scsi_host_start_tag(fnic, sc);
2397 if (unlikely(tag == SCSI_NO_TAG)) 2354 if (unlikely(tag == SCSI_NO_TAG))
2398 goto fnic_device_reset_end; 2355 goto fnic_device_reset_end;
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index ba69d6112fa1..434447ea24b8 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -195,9 +195,9 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
195{ 195{
196 vnic_dev_desc_ring_size(ring, desc_count, desc_size); 196 vnic_dev_desc_ring_size(ring, desc_count, desc_size);
197 197
198 ring->descs_unaligned = pci_alloc_consistent(vdev->pdev, 198 ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
199 ring->size_unaligned, 199 ring->size_unaligned,
200 &ring->base_addr_unaligned); 200 &ring->base_addr_unaligned, GFP_KERNEL);
201 201
202 if (!ring->descs_unaligned) { 202 if (!ring->descs_unaligned) {
203 printk(KERN_ERR 203 printk(KERN_ERR
@@ -221,7 +221,7 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
221void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) 221void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
222{ 222{
223 if (ring->descs) { 223 if (ring->descs) {
224 pci_free_consistent(vdev->pdev, 224 dma_free_coherent(&vdev->pdev->dev,
225 ring->size_unaligned, 225 ring->size_unaligned,
226 ring->descs_unaligned, 226 ring->descs_unaligned,
227 ring->base_addr_unaligned); 227 ring->base_addr_unaligned);
@@ -298,9 +298,9 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
298 int err = 0; 298 int err = 0;
299 299
300 if (!vdev->fw_info) { 300 if (!vdev->fw_info) {
301 vdev->fw_info = pci_alloc_consistent(vdev->pdev, 301 vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
302 sizeof(struct vnic_devcmd_fw_info), 302 sizeof(struct vnic_devcmd_fw_info),
303 &vdev->fw_info_pa); 303 &vdev->fw_info_pa, GFP_KERNEL);
304 if (!vdev->fw_info) 304 if (!vdev->fw_info)
305 return -ENOMEM; 305 return -ENOMEM;
306 306
@@ -361,8 +361,8 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
361 int wait = 1000; 361 int wait = 1000;
362 362
363 if (!vdev->stats) { 363 if (!vdev->stats) {
364 vdev->stats = pci_alloc_consistent(vdev->pdev, 364 vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
365 sizeof(struct vnic_stats), &vdev->stats_pa); 365 sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
366 if (!vdev->stats) 366 if (!vdev->stats)
367 return -ENOMEM; 367 return -ENOMEM;
368 } 368 }
@@ -523,9 +523,9 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
523 int wait = 1000; 523 int wait = 1000;
524 524
525 if (!vdev->notify) { 525 if (!vdev->notify) {
526 vdev->notify = pci_alloc_consistent(vdev->pdev, 526 vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
527 sizeof(struct vnic_devcmd_notify), 527 sizeof(struct vnic_devcmd_notify),
528 &vdev->notify_pa); 528 &vdev->notify_pa, GFP_KERNEL);
529 if (!vdev->notify) 529 if (!vdev->notify)
530 return -ENOMEM; 530 return -ENOMEM;
531 } 531 }
@@ -647,21 +647,21 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
647{ 647{
648 if (vdev) { 648 if (vdev) {
649 if (vdev->notify) 649 if (vdev->notify)
650 pci_free_consistent(vdev->pdev, 650 dma_free_coherent(&vdev->pdev->dev,
651 sizeof(struct vnic_devcmd_notify), 651 sizeof(struct vnic_devcmd_notify),
652 vdev->notify, 652 vdev->notify,
653 vdev->notify_pa); 653 vdev->notify_pa);
654 if (vdev->linkstatus) 654 if (vdev->linkstatus)
655 pci_free_consistent(vdev->pdev, 655 dma_free_coherent(&vdev->pdev->dev,
656 sizeof(u32), 656 sizeof(u32),
657 vdev->linkstatus, 657 vdev->linkstatus,
658 vdev->linkstatus_pa); 658 vdev->linkstatus_pa);
659 if (vdev->stats) 659 if (vdev->stats)
660 pci_free_consistent(vdev->pdev, 660 dma_free_coherent(&vdev->pdev->dev,
661 sizeof(struct vnic_stats), 661 sizeof(struct vnic_stats),
662 vdev->stats, vdev->stats_pa); 662 vdev->stats, vdev->stats_pa);
663 if (vdev->fw_info) 663 if (vdev->fw_info)
664 pci_free_consistent(vdev->pdev, 664 dma_free_coherent(&vdev->pdev->dev,
665 sizeof(struct vnic_devcmd_fw_info), 665 sizeof(struct vnic_devcmd_fw_info),
666 vdev->fw_info, vdev->fw_info_pa); 666 vdev->fw_info, vdev->fw_info_pa);
667 kfree(vdev); 667 kfree(vdev);
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 6c7d2e201abe..0ddb53c8a2e2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -34,6 +34,7 @@
34#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES 34#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
35#define HISI_SAS_RESET_BIT 0 35#define HISI_SAS_RESET_BIT 0
36#define HISI_SAS_REJECT_CMD_BIT 1 36#define HISI_SAS_REJECT_CMD_BIT 1
37#define HISI_SAS_RESERVED_IPTT_CNT 96
37 38
38#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer)) 39#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
39#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table)) 40#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
@@ -217,7 +218,7 @@ struct hisi_sas_hw {
217 int (*hw_init)(struct hisi_hba *hisi_hba); 218 int (*hw_init)(struct hisi_hba *hisi_hba);
218 void (*setup_itct)(struct hisi_hba *hisi_hba, 219 void (*setup_itct)(struct hisi_hba *hisi_hba,
219 struct hisi_sas_device *device); 220 struct hisi_sas_device *device);
220 int (*slot_index_alloc)(struct hisi_hba *hisi_hba, int *slot_idx, 221 int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
221 struct domain_device *device); 222 struct domain_device *device);
222 struct hisi_sas_device *(*alloc_dev)(struct domain_device *device); 223 struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
223 void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no); 224 void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index a4e2e6aa9a6b..b3f01d5b821b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -183,7 +183,14 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
183 183
184static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 184static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
185{ 185{
186 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 186 unsigned long flags;
187
188 if (hisi_hba->hw->slot_index_alloc || (slot_idx >=
189 hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) {
190 spin_lock_irqsave(&hisi_hba->lock, flags);
191 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
192 spin_unlock_irqrestore(&hisi_hba->lock, flags);
193 }
187} 194}
188 195
189static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 196static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
@@ -193,24 +200,34 @@ static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
193 set_bit(slot_idx, bitmap); 200 set_bit(slot_idx, bitmap);
194} 201}
195 202
196static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx) 203static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
204 struct scsi_cmnd *scsi_cmnd)
197{ 205{
198 unsigned int index; 206 int index;
199 void *bitmap = hisi_hba->slot_index_tags; 207 void *bitmap = hisi_hba->slot_index_tags;
208 unsigned long flags;
209
210 if (scsi_cmnd)
211 return scsi_cmnd->request->tag;
200 212
213 spin_lock_irqsave(&hisi_hba->lock, flags);
201 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, 214 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
202 hisi_hba->last_slot_index + 1); 215 hisi_hba->last_slot_index + 1);
203 if (index >= hisi_hba->slot_index_count) { 216 if (index >= hisi_hba->slot_index_count) {
204 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, 217 index = find_next_zero_bit(bitmap,
205 0); 218 hisi_hba->slot_index_count,
206 if (index >= hisi_hba->slot_index_count) 219 hisi_hba->hw->max_command_entries -
220 HISI_SAS_RESERVED_IPTT_CNT);
221 if (index >= hisi_hba->slot_index_count) {
222 spin_unlock_irqrestore(&hisi_hba->lock, flags);
207 return -SAS_QUEUE_FULL; 223 return -SAS_QUEUE_FULL;
224 }
208 } 225 }
209 hisi_sas_slot_index_set(hisi_hba, index); 226 hisi_sas_slot_index_set(hisi_hba, index);
210 *slot_idx = index;
211 hisi_hba->last_slot_index = index; 227 hisi_hba->last_slot_index = index;
228 spin_unlock_irqrestore(&hisi_hba->lock, flags);
212 229
213 return 0; 230 return index;
214} 231}
215 232
216static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) 233static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
@@ -249,9 +266,7 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
249 266
250 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 267 memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
251 268
252 spin_lock_irqsave(&hisi_hba->lock, flags);
253 hisi_sas_slot_index_free(hisi_hba, slot->idx); 269 hisi_sas_slot_index_free(hisi_hba, slot->idx);
254 spin_unlock_irqrestore(&hisi_hba->lock, flags);
255} 270}
256EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 271EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
257 272
@@ -287,13 +302,13 @@ static int hisi_sas_task_prep(struct sas_task *task,
287 int *pass) 302 int *pass)
288{ 303{
289 struct domain_device *device = task->dev; 304 struct domain_device *device = task->dev;
290 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 305 struct hisi_hba *hisi_hba;
291 struct hisi_sas_device *sas_dev = device->lldd_dev; 306 struct hisi_sas_device *sas_dev = device->lldd_dev;
292 struct hisi_sas_port *port; 307 struct hisi_sas_port *port;
293 struct hisi_sas_slot *slot; 308 struct hisi_sas_slot *slot;
294 struct hisi_sas_cmd_hdr *cmd_hdr_base; 309 struct hisi_sas_cmd_hdr *cmd_hdr_base;
295 struct asd_sas_port *sas_port = device->port; 310 struct asd_sas_port *sas_port = device->port;
296 struct device *dev = hisi_hba->dev; 311 struct device *dev;
297 int dlvry_queue_slot, dlvry_queue, rc, slot_idx; 312 int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
298 int n_elem = 0, n_elem_req = 0, n_elem_resp = 0; 313 int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
299 struct hisi_sas_dq *dq; 314 struct hisi_sas_dq *dq;
@@ -314,6 +329,9 @@ static int hisi_sas_task_prep(struct sas_task *task,
314 return -ECOMM; 329 return -ECOMM;
315 } 330 }
316 331
332 hisi_hba = dev_to_hisi_hba(device);
333 dev = hisi_hba->dev;
334
317 if (DEV_IS_GONE(sas_dev)) { 335 if (DEV_IS_GONE(sas_dev)) {
318 if (sas_dev) 336 if (sas_dev)
319 dev_info(dev, "task prep: device %d not ready\n", 337 dev_info(dev, "task prep: device %d not ready\n",
@@ -381,16 +399,27 @@ static int hisi_sas_task_prep(struct sas_task *task,
381 goto err_out_dma_unmap; 399 goto err_out_dma_unmap;
382 } 400 }
383 401
384 spin_lock_irqsave(&hisi_hba->lock, flags);
385 if (hisi_hba->hw->slot_index_alloc) 402 if (hisi_hba->hw->slot_index_alloc)
386 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx, 403 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
387 device); 404 else {
388 else 405 struct scsi_cmnd *scsi_cmnd = NULL;
389 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); 406
390 spin_unlock_irqrestore(&hisi_hba->lock, flags); 407 if (task->uldd_task) {
391 if (rc) 408 struct ata_queued_cmd *qc;
409
410 if (dev_is_sata(device)) {
411 qc = task->uldd_task;
412 scsi_cmnd = qc->scsicmd;
413 } else {
414 scsi_cmnd = task->uldd_task;
415 }
416 }
417 rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
418 }
419 if (rc < 0)
392 goto err_out_dma_unmap; 420 goto err_out_dma_unmap;
393 421
422 slot_idx = rc;
394 slot = &hisi_hba->slot_info[slot_idx]; 423 slot = &hisi_hba->slot_info[slot_idx];
395 424
396 spin_lock_irqsave(&dq->lock, flags); 425 spin_lock_irqsave(&dq->lock, flags);
@@ -451,9 +480,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
451 return 0; 480 return 0;
452 481
453err_out_tag: 482err_out_tag:
454 spin_lock_irqsave(&hisi_hba->lock, flags);
455 hisi_sas_slot_index_free(hisi_hba, slot_idx); 483 hisi_sas_slot_index_free(hisi_hba, slot_idx);
456 spin_unlock_irqrestore(&hisi_hba->lock, flags);
457err_out_dma_unmap: 484err_out_dma_unmap:
458 if (!sas_protocol_ata(task->task_proto)) { 485 if (!sas_protocol_ata(task->task_proto)) {
459 if (task->num_scatter) { 486 if (task->num_scatter) {
@@ -904,6 +931,9 @@ static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
904 _r.maximum_linkrate = max; 931 _r.maximum_linkrate = max;
905 _r.minimum_linkrate = min; 932 _r.minimum_linkrate = min;
906 933
934 sas_phy->phy->maximum_linkrate = max;
935 sas_phy->phy->minimum_linkrate = min;
936
907 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 937 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
908 msleep(100); 938 msleep(100);
909 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 939 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
@@ -950,8 +980,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
950 980
951static void hisi_sas_task_done(struct sas_task *task) 981static void hisi_sas_task_done(struct sas_task *task)
952{ 982{
953 if (!del_timer(&task->slow_task->timer)) 983 del_timer(&task->slow_task->timer);
954 return;
955 complete(&task->slow_task->completion); 984 complete(&task->slow_task->completion);
956} 985}
957 986
@@ -960,13 +989,17 @@ static void hisi_sas_tmf_timedout(struct timer_list *t)
960 struct sas_task_slow *slow = from_timer(slow, t, timer); 989 struct sas_task_slow *slow = from_timer(slow, t, timer);
961 struct sas_task *task = slow->task; 990 struct sas_task *task = slow->task;
962 unsigned long flags; 991 unsigned long flags;
992 bool is_completed = true;
963 993
964 spin_lock_irqsave(&task->task_state_lock, flags); 994 spin_lock_irqsave(&task->task_state_lock, flags);
965 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) 995 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
966 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 996 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
997 is_completed = false;
998 }
967 spin_unlock_irqrestore(&task->task_state_lock, flags); 999 spin_unlock_irqrestore(&task->task_state_lock, flags);
968 1000
969 complete(&task->slow_task->completion); 1001 if (!is_completed)
1002 complete(&task->slow_task->completion);
970} 1003}
971 1004
972#define TASK_TIMEOUT 20 1005#define TASK_TIMEOUT 20
@@ -1019,8 +1052,16 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
1019 struct hisi_sas_slot *slot = task->lldd_task; 1052 struct hisi_sas_slot *slot = task->lldd_task;
1020 1053
1021 dev_err(dev, "abort tmf: TMF task timeout and not done\n"); 1054 dev_err(dev, "abort tmf: TMF task timeout and not done\n");
1022 if (slot) 1055 if (slot) {
1056 struct hisi_sas_cq *cq =
1057 &hisi_hba->cq[slot->dlvry_queue];
1058 /*
1059 * flush tasklet to avoid free'ing task
1060 * before using task in IO completion
1061 */
1062 tasklet_kill(&cq->tasklet);
1023 slot->task = NULL; 1063 slot->task = NULL;
1064 }
1024 1065
1025 goto ex_err; 1066 goto ex_err;
1026 } else 1067 } else
@@ -1396,6 +1437,17 @@ static int hisi_sas_abort_task(struct sas_task *task)
1396 1437
1397 spin_lock_irqsave(&task->task_state_lock, flags); 1438 spin_lock_irqsave(&task->task_state_lock, flags);
1398 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1439 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1440 struct hisi_sas_slot *slot = task->lldd_task;
1441 struct hisi_sas_cq *cq;
1442
1443 if (slot) {
1444 /*
1445 * flush tasklet to avoid free'ing task
1446 * before using task in IO completion
1447 */
1448 cq = &hisi_hba->cq[slot->dlvry_queue];
1449 tasklet_kill(&cq->tasklet);
1450 }
1399 spin_unlock_irqrestore(&task->task_state_lock, flags); 1451 spin_unlock_irqrestore(&task->task_state_lock, flags);
1400 rc = TMF_RESP_FUNC_COMPLETE; 1452 rc = TMF_RESP_FUNC_COMPLETE;
1401 goto out; 1453 goto out;
@@ -1451,12 +1503,19 @@ static int hisi_sas_abort_task(struct sas_task *task)
1451 /* SMP */ 1503 /* SMP */
1452 struct hisi_sas_slot *slot = task->lldd_task; 1504 struct hisi_sas_slot *slot = task->lldd_task;
1453 u32 tag = slot->idx; 1505 u32 tag = slot->idx;
1506 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
1454 1507
1455 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1508 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1456 HISI_SAS_INT_ABT_CMD, tag); 1509 HISI_SAS_INT_ABT_CMD, tag);
1457 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1510 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1458 task->lldd_task) 1511 task->lldd_task) {
1459 hisi_sas_do_release_task(hisi_hba, task, slot); 1512 /*
1513 * flush tasklet to avoid free'ing task
1514 * before using task in IO completion
1515 */
1516 tasklet_kill(&cq->tasklet);
1517 slot->task = NULL;
1518 }
1460 } 1519 }
1461 1520
1462out: 1521out:
@@ -1705,14 +1764,11 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1705 port = to_hisi_sas_port(sas_port); 1764 port = to_hisi_sas_port(sas_port);
1706 1765
1707 /* simply get a slot and send abort command */ 1766 /* simply get a slot and send abort command */
1708 spin_lock_irqsave(&hisi_hba->lock, flags); 1767 rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
1709 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); 1768 if (rc < 0)
1710 if (rc) {
1711 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1712 goto err_out; 1769 goto err_out;
1713 }
1714 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1715 1770
1771 slot_idx = rc;
1716 slot = &hisi_hba->slot_info[slot_idx]; 1772 slot = &hisi_hba->slot_info[slot_idx];
1717 1773
1718 spin_lock_irqsave(&dq->lock, flags_dq); 1774 spin_lock_irqsave(&dq->lock, flags_dq);
@@ -1748,7 +1804,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1748 spin_lock_irqsave(&task->task_state_lock, flags); 1804 spin_lock_irqsave(&task->task_state_lock, flags);
1749 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 1805 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1750 spin_unlock_irqrestore(&task->task_state_lock, flags); 1806 spin_unlock_irqrestore(&task->task_state_lock, flags);
1751
1752 WRITE_ONCE(slot->ready, 1); 1807 WRITE_ONCE(slot->ready, 1);
1753 /* send abort command to the chip */ 1808 /* send abort command to the chip */
1754 spin_lock_irqsave(&dq->lock, flags); 1809 spin_lock_irqsave(&dq->lock, flags);
@@ -1759,9 +1814,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1759 return 0; 1814 return 0;
1760 1815
1761err_out_tag: 1816err_out_tag:
1762 spin_lock_irqsave(&hisi_hba->lock, flags);
1763 hisi_sas_slot_index_free(hisi_hba, slot_idx); 1817 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1764 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1765err_out: 1818err_out:
1766 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); 1819 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1767 1820
@@ -1823,8 +1876,16 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1823 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1876 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1824 struct hisi_sas_slot *slot = task->lldd_task; 1877 struct hisi_sas_slot *slot = task->lldd_task;
1825 1878
1826 if (slot) 1879 if (slot) {
1880 struct hisi_sas_cq *cq =
1881 &hisi_hba->cq[slot->dlvry_queue];
1882 /*
1883 * flush tasklet to avoid free'ing task
1884 * before using task in IO completion
1885 */
1886 tasklet_kill(&cq->tasklet);
1827 slot->task = NULL; 1887 slot->task = NULL;
1888 }
1828 dev_err(dev, "internal task abort: timeout and not done.\n"); 1889 dev_err(dev, "internal task abort: timeout and not done.\n");
1829 res = -EIO; 1890 res = -EIO;
1830 goto exit; 1891 goto exit;
@@ -1861,10 +1922,6 @@ static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
1861 hisi_sas_port_notify_formed(sas_phy); 1922 hisi_sas_port_notify_formed(sas_phy);
1862} 1923}
1863 1924
1864static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
1865{
1866}
1867
1868static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 1925static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
1869 u8 reg_index, u8 reg_count, u8 *write_data) 1926 u8 reg_index, u8 reg_count, u8 *write_data)
1870{ 1927{
@@ -1954,10 +2011,9 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
1954 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2011 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
1955 .lldd_lu_reset = hisi_sas_lu_reset, 2012 .lldd_lu_reset = hisi_sas_lu_reset,
1956 .lldd_query_task = hisi_sas_query_task, 2013 .lldd_query_task = hisi_sas_query_task,
1957 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2014 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
1958 .lldd_port_formed = hisi_sas_port_formed, 2015 .lldd_port_formed = hisi_sas_port_formed,
1959 .lldd_port_deformed = hisi_sas_port_deformed, 2016 .lldd_write_gpio = hisi_sas_write_gpio,
1960 .lldd_write_gpio = hisi_sas_write_gpio,
1961}; 2017};
1962 2018
1963void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2019void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
@@ -2120,6 +2176,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
2120 hisi_sas_init_mem(hisi_hba); 2176 hisi_sas_init_mem(hisi_hba);
2121 2177
2122 hisi_sas_slot_index_init(hisi_hba); 2178 hisi_sas_slot_index_init(hisi_hba);
2179 hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries -
2180 HISI_SAS_RESERVED_IPTT_CNT;
2123 2181
2124 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 2182 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
2125 if (!hisi_hba->wq) { 2183 if (!hisi_hba->wq) {
@@ -2323,8 +2381,15 @@ int hisi_sas_probe(struct platform_device *pdev,
2323 shost->max_channel = 1; 2381 shost->max_channel = 1;
2324 shost->max_cmd_len = 16; 2382 shost->max_cmd_len = 16;
2325 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT); 2383 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2326 shost->can_queue = hisi_hba->hw->max_command_entries; 2384 if (hisi_hba->hw->slot_index_alloc) {
2327 shost->cmd_per_lun = hisi_hba->hw->max_command_entries; 2385 shost->can_queue = hisi_hba->hw->max_command_entries;
2386 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
2387 } else {
2388 shost->can_queue = hisi_hba->hw->max_command_entries -
2389 HISI_SAS_RESERVED_IPTT_CNT;
2390 shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
2391 HISI_SAS_RESERVED_IPTT_CNT;
2392 }
2328 2393
2329 sha->sas_ha_name = DRV_NAME; 2394 sha->sas_ha_name = DRV_NAME;
2330 sha->dev = hisi_hba->dev; 2395 sha->dev = hisi_hba->dev;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 8f60f0e04599..f0e457e6884e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1809,7 +1809,6 @@ static struct scsi_host_template sht_v1_hw = {
1809 .scan_start = hisi_sas_scan_start, 1809 .scan_start = hisi_sas_scan_start,
1810 .change_queue_depth = sas_change_queue_depth, 1810 .change_queue_depth = sas_change_queue_depth,
1811 .bios_param = sas_bios_param, 1811 .bios_param = sas_bios_param,
1812 .can_queue = 1,
1813 .this_id = -1, 1812 .this_id = -1,
1814 .sg_tablesize = SG_ALL, 1813 .sg_tablesize = SG_ALL,
1815 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 1814 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 9c5c5a601332..cc36b6473e98 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -770,7 +770,7 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
770 770
771/* This function needs to be protected from pre-emption. */ 771/* This function needs to be protected from pre-emption. */
772static int 772static int
773slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx, 773slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
774 struct domain_device *device) 774 struct domain_device *device)
775{ 775{
776 int sata_dev = dev_is_sata(device); 776 int sata_dev = dev_is_sata(device);
@@ -778,6 +778,7 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
778 struct hisi_sas_device *sas_dev = device->lldd_dev; 778 struct hisi_sas_device *sas_dev = device->lldd_dev;
779 int sata_idx = sas_dev->sata_idx; 779 int sata_idx = sas_dev->sata_idx;
780 int start, end; 780 int start, end;
781 unsigned long flags;
781 782
782 if (!sata_dev) { 783 if (!sata_dev) {
783 /* 784 /*
@@ -801,11 +802,14 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
801 end = 64 * (sata_idx + 2); 802 end = 64 * (sata_idx + 2);
802 } 803 }
803 804
805 spin_lock_irqsave(&hisi_hba->lock, flags);
804 while (1) { 806 while (1) {
805 start = find_next_zero_bit(bitmap, 807 start = find_next_zero_bit(bitmap,
806 hisi_hba->slot_index_count, start); 808 hisi_hba->slot_index_count, start);
807 if (start >= end) 809 if (start >= end) {
810 spin_unlock_irqrestore(&hisi_hba->lock, flags);
808 return -SAS_QUEUE_FULL; 811 return -SAS_QUEUE_FULL;
812 }
809 /* 813 /*
810 * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0. 814 * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
811 */ 815 */
@@ -815,8 +819,8 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
815 } 819 }
816 820
817 set_bit(start, bitmap); 821 set_bit(start, bitmap);
818 *slot_idx = start; 822 spin_unlock_irqrestore(&hisi_hba->lock, flags);
819 return 0; 823 return start;
820} 824}
821 825
822static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx) 826static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx)
@@ -2483,7 +2487,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2483 } 2487 }
2484 2488
2485out: 2489out:
2486 hisi_sas_slot_task_free(hisi_hba, task, slot);
2487 sts = ts->stat; 2490 sts = ts->stat;
2488 spin_lock_irqsave(&task->task_state_lock, flags); 2491 spin_lock_irqsave(&task->task_state_lock, flags);
2489 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 2492 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
@@ -2493,6 +2496,7 @@ out:
2493 } 2496 }
2494 task->task_state_flags |= SAS_TASK_STATE_DONE; 2497 task->task_state_flags |= SAS_TASK_STATE_DONE;
2495 spin_unlock_irqrestore(&task->task_state_lock, flags); 2498 spin_unlock_irqrestore(&task->task_state_lock, flags);
2499 hisi_sas_slot_task_free(hisi_hba, task, slot);
2496 2500
2497 if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { 2501 if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
2498 spin_lock_irqsave(&device->done_lock, flags); 2502 spin_lock_irqsave(&device->done_lock, flags);
@@ -3560,7 +3564,6 @@ static struct scsi_host_template sht_v2_hw = {
3560 .scan_start = hisi_sas_scan_start, 3564 .scan_start = hisi_sas_scan_start,
3561 .change_queue_depth = sas_change_queue_depth, 3565 .change_queue_depth = sas_change_queue_depth,
3562 .bios_param = sas_bios_param, 3566 .bios_param = sas_bios_param,
3563 .can_queue = 1,
3564 .this_id = -1, 3567 .this_id = -1,
3565 .sg_tablesize = SG_ALL, 3568 .sg_tablesize = SG_ALL,
3566 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 3569 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 08b503e274b8..bd4ce38b98d2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -127,6 +127,7 @@
127#define PHY_CTRL_RESET_OFF 0 127#define PHY_CTRL_RESET_OFF 0
128#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) 128#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
129#define SL_CFG (PORT_BASE + 0x84) 129#define SL_CFG (PORT_BASE + 0x84)
130#define AIP_LIMIT (PORT_BASE + 0x90)
130#define SL_CONTROL (PORT_BASE + 0x94) 131#define SL_CONTROL (PORT_BASE + 0x94)
131#define SL_CONTROL_NOTIFY_EN_OFF 0 132#define SL_CONTROL_NOTIFY_EN_OFF 0
132#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) 133#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
@@ -431,6 +432,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
431 (u32)((1ULL << hisi_hba->queue_count) - 1)); 432 (u32)((1ULL << hisi_hba->queue_count) - 1));
432 hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400); 433 hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
433 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); 434 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
435 hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
434 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); 436 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
435 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); 437 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
436 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); 438 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
@@ -441,7 +443,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
441 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); 443 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
442 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); 444 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
443 if (pdev->revision >= 0x21) 445 if (pdev->revision >= 0x21)
444 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7fff); 446 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7aff);
445 else 447 else
446 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff); 448 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
447 hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); 449 hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
@@ -495,6 +497,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
495 497
496 /* used for 12G negotiate */ 498 /* used for 12G negotiate */
497 hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); 499 hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
500 hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
498 } 501 }
499 502
500 for (i = 0; i < hisi_hba->queue_count; i++) { 503 for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -1751,7 +1754,6 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
1751 } 1754 }
1752 1755
1753out: 1756out:
1754 hisi_sas_slot_task_free(hisi_hba, task, slot);
1755 sts = ts->stat; 1757 sts = ts->stat;
1756 spin_lock_irqsave(&task->task_state_lock, flags); 1758 spin_lock_irqsave(&task->task_state_lock, flags);
1757 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 1759 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
@@ -1761,6 +1763,7 @@ out:
1761 } 1763 }
1762 task->task_state_flags |= SAS_TASK_STATE_DONE; 1764 task->task_state_flags |= SAS_TASK_STATE_DONE;
1763 spin_unlock_irqrestore(&task->task_state_lock, flags); 1765 spin_unlock_irqrestore(&task->task_state_lock, flags);
1766 hisi_sas_slot_task_free(hisi_hba, task, slot);
1764 1767
1765 if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { 1768 if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
1766 spin_lock_irqsave(&device->done_lock, flags); 1769 spin_lock_irqsave(&device->done_lock, flags);
@@ -2098,7 +2101,6 @@ static struct scsi_host_template sht_v3_hw = {
2098 .scan_start = hisi_sas_scan_start, 2101 .scan_start = hisi_sas_scan_start,
2099 .change_queue_depth = sas_change_queue_depth, 2102 .change_queue_depth = sas_change_queue_depth,
2100 .bios_param = sas_bios_param, 2103 .bios_param = sas_bios_param,
2101 .can_queue = 1,
2102 .this_id = -1, 2104 .this_id = -1,
2103 .sg_tablesize = SG_ALL, 2105 .sg_tablesize = SG_ALL,
2104 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 2106 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
@@ -2108,6 +2110,7 @@ static struct scsi_host_template sht_v3_hw = {
2108 .target_destroy = sas_target_destroy, 2110 .target_destroy = sas_target_destroy,
2109 .ioctl = sas_ioctl, 2111 .ioctl = sas_ioctl,
2110 .shost_attrs = host_attrs, 2112 .shost_attrs = host_attrs,
2113 .tag_alloc_policy = BLK_TAG_ALLOC_RR,
2111}; 2114};
2112 2115
2113static const struct hisi_sas_hw hisi_sas_v3_hw = { 2116static const struct hisi_sas_hw hisi_sas_v3_hw = {
@@ -2245,8 +2248,10 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2245 shost->max_channel = 1; 2248 shost->max_channel = 1;
2246 shost->max_cmd_len = 16; 2249 shost->max_cmd_len = 16;
2247 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT); 2250 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2248 shost->can_queue = hisi_hba->hw->max_command_entries; 2251 shost->can_queue = hisi_hba->hw->max_command_entries -
2249 shost->cmd_per_lun = hisi_hba->hw->max_command_entries; 2252 HISI_SAS_RESERVED_IPTT_CNT;
2253 shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
2254 HISI_SAS_RESERVED_IPTT_CNT;
2250 2255
2251 sha->sas_ha_name = DRV_NAME; 2256 sha->sas_ha_name = DRV_NAME;
2252 sha->dev = dev; 2257 sha->dev = dev;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c120929d4ffe..c9cccf35e9d7 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2240,8 +2240,8 @@ static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2240 2240
2241 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; 2241 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2242 chain_size = le32_to_cpu(cp->sg[0].length); 2242 chain_size = le32_to_cpu(cp->sg[0].length);
2243 temp64 = pci_map_single(h->pdev, chain_block, chain_size, 2243 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
2244 PCI_DMA_TODEVICE); 2244 DMA_TO_DEVICE);
2245 if (dma_mapping_error(&h->pdev->dev, temp64)) { 2245 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2246 /* prevent subsequent unmapping */ 2246 /* prevent subsequent unmapping */
2247 cp->sg->address = 0; 2247 cp->sg->address = 0;
@@ -2261,7 +2261,7 @@ static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2261 chain_sg = cp->sg; 2261 chain_sg = cp->sg;
2262 temp64 = le64_to_cpu(chain_sg->address); 2262 temp64 = le64_to_cpu(chain_sg->address);
2263 chain_size = le32_to_cpu(cp->sg[0].length); 2263 chain_size = le32_to_cpu(cp->sg[0].length);
2264 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE); 2264 dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
2265} 2265}
2266 2266
2267static int hpsa_map_sg_chain_block(struct ctlr_info *h, 2267static int hpsa_map_sg_chain_block(struct ctlr_info *h,
@@ -2277,8 +2277,8 @@ static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2277 chain_len = sizeof(*chain_sg) * 2277 chain_len = sizeof(*chain_sg) *
2278 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); 2278 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2279 chain_sg->Len = cpu_to_le32(chain_len); 2279 chain_sg->Len = cpu_to_le32(chain_len);
2280 temp64 = pci_map_single(h->pdev, chain_block, chain_len, 2280 temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
2281 PCI_DMA_TODEVICE); 2281 DMA_TO_DEVICE);
2282 if (dma_mapping_error(&h->pdev->dev, temp64)) { 2282 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2283 /* prevent subsequent unmapping */ 2283 /* prevent subsequent unmapping */
2284 chain_sg->Addr = cpu_to_le64(0); 2284 chain_sg->Addr = cpu_to_le64(0);
@@ -2297,8 +2297,8 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2297 return; 2297 return;
2298 2298
2299 chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; 2299 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2300 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr), 2300 dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
2301 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE); 2301 le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
2302} 2302}
2303 2303
2304 2304
@@ -2759,13 +2759,13 @@ static void complete_scsi_command(struct CommandList *cp)
2759 return hpsa_cmd_free_and_done(h, cp, cmd); 2759 return hpsa_cmd_free_and_done(h, cp, cmd);
2760} 2760}
2761 2761
2762static void hpsa_pci_unmap(struct pci_dev *pdev, 2762static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
2763 struct CommandList *c, int sg_used, int data_direction) 2763 int sg_used, enum dma_data_direction data_direction)
2764{ 2764{
2765 int i; 2765 int i;
2766 2766
2767 for (i = 0; i < sg_used; i++) 2767 for (i = 0; i < sg_used; i++)
2768 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr), 2768 dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
2769 le32_to_cpu(c->SG[i].Len), 2769 le32_to_cpu(c->SG[i].Len),
2770 data_direction); 2770 data_direction);
2771} 2771}
@@ -2774,17 +2774,17 @@ static int hpsa_map_one(struct pci_dev *pdev,
2774 struct CommandList *cp, 2774 struct CommandList *cp,
2775 unsigned char *buf, 2775 unsigned char *buf,
2776 size_t buflen, 2776 size_t buflen,
2777 int data_direction) 2777 enum dma_data_direction data_direction)
2778{ 2778{
2779 u64 addr64; 2779 u64 addr64;
2780 2780
2781 if (buflen == 0 || data_direction == PCI_DMA_NONE) { 2781 if (buflen == 0 || data_direction == DMA_NONE) {
2782 cp->Header.SGList = 0; 2782 cp->Header.SGList = 0;
2783 cp->Header.SGTotal = cpu_to_le16(0); 2783 cp->Header.SGTotal = cpu_to_le16(0);
2784 return 0; 2784 return 0;
2785 } 2785 }
2786 2786
2787 addr64 = pci_map_single(pdev, buf, buflen, data_direction); 2787 addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
2788 if (dma_mapping_error(&pdev->dev, addr64)) { 2788 if (dma_mapping_error(&pdev->dev, addr64)) {
2789 /* Prevent subsequent unmap of something never mapped */ 2789 /* Prevent subsequent unmap of something never mapped */
2790 cp->Header.SGList = 0; 2790 cp->Header.SGList = 0;
@@ -2845,7 +2845,8 @@ static u32 lockup_detected(struct ctlr_info *h)
2845 2845
2846#define MAX_DRIVER_CMD_RETRIES 25 2846#define MAX_DRIVER_CMD_RETRIES 25
2847static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, 2847static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2848 struct CommandList *c, int data_direction, unsigned long timeout_msecs) 2848 struct CommandList *c, enum dma_data_direction data_direction,
2849 unsigned long timeout_msecs)
2849{ 2850{
2850 int backoff_time = 10, retry_count = 0; 2851 int backoff_time = 10, retry_count = 0;
2851 int rc; 2852 int rc;
@@ -2969,8 +2970,8 @@ static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
2969 rc = -1; 2970 rc = -1;
2970 goto out; 2971 goto out;
2971 } 2972 }
2972 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 2973 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
2973 PCI_DMA_FROMDEVICE, NO_TIMEOUT); 2974 NO_TIMEOUT);
2974 if (rc) 2975 if (rc)
2975 goto out; 2976 goto out;
2976 ei = c->err_info; 2977 ei = c->err_info;
@@ -3022,8 +3023,8 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
3022 rc = -1; 3023 rc = -1;
3023 goto out; 3024 goto out;
3024 } 3025 }
3025 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3026 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3026 PCI_DMA_FROMDEVICE, NO_TIMEOUT); 3027 NO_TIMEOUT);
3027 if (rc) 3028 if (rc)
3028 goto out; 3029 goto out;
3029 ei = c->err_info; 3030 ei = c->err_info;
@@ -3306,8 +3307,8 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
3306 cmd_free(h, c); 3307 cmd_free(h, c);
3307 return -1; 3308 return -1;
3308 } 3309 }
3309 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3310 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3310 PCI_DMA_FROMDEVICE, NO_TIMEOUT); 3311 NO_TIMEOUT);
3311 if (rc) 3312 if (rc)
3312 goto out; 3313 goto out;
3313 ei = c->err_info; 3314 ei = c->err_info;
@@ -3349,8 +3350,8 @@ static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3349 c->Request.CDB[2] = bmic_device_index & 0xff; 3350 c->Request.CDB[2] = bmic_device_index & 0xff;
3350 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; 3351 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3351 3352
3352 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3353 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3353 PCI_DMA_FROMDEVICE, NO_TIMEOUT); 3354 NO_TIMEOUT);
3354 if (rc) 3355 if (rc)
3355 goto out; 3356 goto out;
3356 ei = c->err_info; 3357 ei = c->err_info;
@@ -3377,8 +3378,8 @@ static int hpsa_bmic_id_controller(struct ctlr_info *h,
3377 if (rc) 3378 if (rc)
3378 goto out; 3379 goto out;
3379 3380
3380 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3381 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3381 PCI_DMA_FROMDEVICE, NO_TIMEOUT); 3382 NO_TIMEOUT);
3382 if (rc) 3383 if (rc)
3383 goto out; 3384 goto out;
3384 ei = c->err_info; 3385 ei = c->err_info;
@@ -3408,7 +3409,7 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3408 c->Request.CDB[2] = bmic_device_index & 0xff; 3409 c->Request.CDB[2] = bmic_device_index & 0xff;
3409 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; 3410 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3410 3411
3411 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, 3412 hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3412 NO_TIMEOUT); 3413 NO_TIMEOUT);
3413 ei = c->err_info; 3414 ei = c->err_info;
3414 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 3415 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
@@ -3484,7 +3485,7 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
3484 else 3485 else
3485 c->Request.CDB[5] = 0; 3486 c->Request.CDB[5] = 0;
3486 3487
3487 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, 3488 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3488 NO_TIMEOUT); 3489 NO_TIMEOUT);
3489 if (rc) 3490 if (rc)
3490 goto out; 3491 goto out;
@@ -3731,8 +3732,8 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3731 } 3732 }
3732 if (extended_response) 3733 if (extended_response)
3733 c->Request.CDB[1] = extended_response; 3734 c->Request.CDB[1] = extended_response;
3734 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3735 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
3735 PCI_DMA_FROMDEVICE, NO_TIMEOUT); 3736 NO_TIMEOUT);
3736 if (rc) 3737 if (rc)
3737 goto out; 3738 goto out;
3738 ei = c->err_info; 3739 ei = c->err_info;
@@ -6320,8 +6321,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6320 6321
6321 /* Fill in the scatter gather information */ 6322 /* Fill in the scatter gather information */
6322 if (iocommand.buf_size > 0) { 6323 if (iocommand.buf_size > 0) {
6323 temp64 = pci_map_single(h->pdev, buff, 6324 temp64 = dma_map_single(&h->pdev->dev, buff,
6324 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 6325 iocommand.buf_size, DMA_BIDIRECTIONAL);
6325 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { 6326 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6326 c->SG[0].Addr = cpu_to_le64(0); 6327 c->SG[0].Addr = cpu_to_le64(0);
6327 c->SG[0].Len = cpu_to_le32(0); 6328 c->SG[0].Len = cpu_to_le32(0);
@@ -6335,7 +6336,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6335 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, 6336 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6336 NO_TIMEOUT); 6337 NO_TIMEOUT);
6337 if (iocommand.buf_size > 0) 6338 if (iocommand.buf_size > 0)
6338 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 6339 hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
6339 check_ioctl_unit_attention(h, c); 6340 check_ioctl_unit_attention(h, c);
6340 if (rc) { 6341 if (rc) {
6341 rc = -EIO; 6342 rc = -EIO;
@@ -6381,13 +6382,9 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6381 return -EINVAL; 6382 return -EINVAL;
6382 if (!capable(CAP_SYS_RAWIO)) 6383 if (!capable(CAP_SYS_RAWIO))
6383 return -EPERM; 6384 return -EPERM;
6384 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); 6385 ioc = vmemdup_user(argp, sizeof(*ioc));
6385 if (!ioc) { 6386 if (IS_ERR(ioc)) {
6386 status = -ENOMEM; 6387 status = PTR_ERR(ioc);
6387 goto cleanup1;
6388 }
6389 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
6390 status = -EFAULT;
6391 goto cleanup1; 6388 goto cleanup1;
6392 } 6389 }
6393 if ((ioc->buf_size < 1) && 6390 if ((ioc->buf_size < 1) &&
@@ -6447,14 +6444,14 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6447 if (ioc->buf_size > 0) { 6444 if (ioc->buf_size > 0) {
6448 int i; 6445 int i;
6449 for (i = 0; i < sg_used; i++) { 6446 for (i = 0; i < sg_used; i++) {
6450 temp64 = pci_map_single(h->pdev, buff[i], 6447 temp64 = dma_map_single(&h->pdev->dev, buff[i],
6451 buff_size[i], PCI_DMA_BIDIRECTIONAL); 6448 buff_size[i], DMA_BIDIRECTIONAL);
6452 if (dma_mapping_error(&h->pdev->dev, 6449 if (dma_mapping_error(&h->pdev->dev,
6453 (dma_addr_t) temp64)) { 6450 (dma_addr_t) temp64)) {
6454 c->SG[i].Addr = cpu_to_le64(0); 6451 c->SG[i].Addr = cpu_to_le64(0);
6455 c->SG[i].Len = cpu_to_le32(0); 6452 c->SG[i].Len = cpu_to_le32(0);
6456 hpsa_pci_unmap(h->pdev, c, i, 6453 hpsa_pci_unmap(h->pdev, c, i,
6457 PCI_DMA_BIDIRECTIONAL); 6454 DMA_BIDIRECTIONAL);
6458 status = -ENOMEM; 6455 status = -ENOMEM;
6459 goto cleanup0; 6456 goto cleanup0;
6460 } 6457 }
@@ -6467,7 +6464,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6467 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, 6464 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6468 NO_TIMEOUT); 6465 NO_TIMEOUT);
6469 if (sg_used) 6466 if (sg_used)
6470 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); 6467 hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL);
6471 check_ioctl_unit_attention(h, c); 6468 check_ioctl_unit_attention(h, c);
6472 if (status) { 6469 if (status) {
6473 status = -EIO; 6470 status = -EIO;
@@ -6505,7 +6502,7 @@ cleanup1:
6505 kfree(buff); 6502 kfree(buff);
6506 } 6503 }
6507 kfree(buff_size); 6504 kfree(buff_size);
6508 kfree(ioc); 6505 kvfree(ioc);
6509 return status; 6506 return status;
6510} 6507}
6511 6508
@@ -6579,7 +6576,7 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6579 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 6576 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6580 int cmd_type) 6577 int cmd_type)
6581{ 6578{
6582 int pci_dir = XFER_NONE; 6579 enum dma_data_direction dir = DMA_NONE;
6583 6580
6584 c->cmd_type = CMD_IOCTL_PEND; 6581 c->cmd_type = CMD_IOCTL_PEND;
6585 c->scsi_cmd = SCSI_CMD_BUSY; 6582 c->scsi_cmd = SCSI_CMD_BUSY;
@@ -6785,18 +6782,18 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6785 6782
6786 switch (GET_DIR(c->Request.type_attr_dir)) { 6783 switch (GET_DIR(c->Request.type_attr_dir)) {
6787 case XFER_READ: 6784 case XFER_READ:
6788 pci_dir = PCI_DMA_FROMDEVICE; 6785 dir = DMA_FROM_DEVICE;
6789 break; 6786 break;
6790 case XFER_WRITE: 6787 case XFER_WRITE:
6791 pci_dir = PCI_DMA_TODEVICE; 6788 dir = DMA_TO_DEVICE;
6792 break; 6789 break;
6793 case XFER_NONE: 6790 case XFER_NONE:
6794 pci_dir = PCI_DMA_NONE; 6791 dir = DMA_NONE;
6795 break; 6792 break;
6796 default: 6793 default:
6797 pci_dir = PCI_DMA_BIDIRECTIONAL; 6794 dir = DMA_BIDIRECTIONAL;
6798 } 6795 }
6799 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir)) 6796 if (hpsa_map_one(h->pdev, c, buff, size, dir))
6800 return -1; 6797 return -1;
6801 return 0; 6798 return 0;
6802} 6799}
@@ -6992,13 +6989,13 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
6992 * CCISS commands, so they must be allocated from the lower 4GiB of 6989 * CCISS commands, so they must be allocated from the lower 4GiB of
6993 * memory. 6990 * memory.
6994 */ 6991 */
6995 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 6992 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
6996 if (err) { 6993 if (err) {
6997 iounmap(vaddr); 6994 iounmap(vaddr);
6998 return err; 6995 return err;
6999 } 6996 }
7000 6997
7001 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); 6998 cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL);
7002 if (cmd == NULL) { 6999 if (cmd == NULL) {
7003 iounmap(vaddr); 7000 iounmap(vaddr);
7004 return -ENOMEM; 7001 return -ENOMEM;
@@ -7047,7 +7044,7 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
7047 return -ETIMEDOUT; 7044 return -ETIMEDOUT;
7048 } 7045 }
7049 7046
7050 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 7047 dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64);
7051 7048
7052 if (tag & HPSA_ERROR_BIT) { 7049 if (tag & HPSA_ERROR_BIT) {
7053 dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 7050 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
@@ -7914,7 +7911,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
7914 kfree(h->cmd_pool_bits); 7911 kfree(h->cmd_pool_bits);
7915 h->cmd_pool_bits = NULL; 7912 h->cmd_pool_bits = NULL;
7916 if (h->cmd_pool) { 7913 if (h->cmd_pool) {
7917 pci_free_consistent(h->pdev, 7914 dma_free_coherent(&h->pdev->dev,
7918 h->nr_cmds * sizeof(struct CommandList), 7915 h->nr_cmds * sizeof(struct CommandList),
7919 h->cmd_pool, 7916 h->cmd_pool,
7920 h->cmd_pool_dhandle); 7917 h->cmd_pool_dhandle);
@@ -7922,7 +7919,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
7922 h->cmd_pool_dhandle = 0; 7919 h->cmd_pool_dhandle = 0;
7923 } 7920 }
7924 if (h->errinfo_pool) { 7921 if (h->errinfo_pool) {
7925 pci_free_consistent(h->pdev, 7922 dma_free_coherent(&h->pdev->dev,
7926 h->nr_cmds * sizeof(struct ErrorInfo), 7923 h->nr_cmds * sizeof(struct ErrorInfo),
7927 h->errinfo_pool, 7924 h->errinfo_pool,
7928 h->errinfo_pool_dhandle); 7925 h->errinfo_pool_dhandle);
@@ -7936,12 +7933,12 @@ static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
7936 h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG), 7933 h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
7937 sizeof(unsigned long), 7934 sizeof(unsigned long),
7938 GFP_KERNEL); 7935 GFP_KERNEL);
7939 h->cmd_pool = pci_alloc_consistent(h->pdev, 7936 h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
7940 h->nr_cmds * sizeof(*h->cmd_pool), 7937 h->nr_cmds * sizeof(*h->cmd_pool),
7941 &(h->cmd_pool_dhandle)); 7938 &h->cmd_pool_dhandle, GFP_KERNEL);
7942 h->errinfo_pool = pci_alloc_consistent(h->pdev, 7939 h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev,
7943 h->nr_cmds * sizeof(*h->errinfo_pool), 7940 h->nr_cmds * sizeof(*h->errinfo_pool),
7944 &(h->errinfo_pool_dhandle)); 7941 &h->errinfo_pool_dhandle, GFP_KERNEL);
7945 if ((h->cmd_pool_bits == NULL) 7942 if ((h->cmd_pool_bits == NULL)
7946 || (h->cmd_pool == NULL) 7943 || (h->cmd_pool == NULL)
7947 || (h->errinfo_pool == NULL)) { 7944 || (h->errinfo_pool == NULL)) {
@@ -8068,7 +8065,7 @@ static void hpsa_free_reply_queues(struct ctlr_info *h)
8068 for (i = 0; i < h->nreply_queues; i++) { 8065 for (i = 0; i < h->nreply_queues; i++) {
8069 if (!h->reply_queue[i].head) 8066 if (!h->reply_queue[i].head)
8070 continue; 8067 continue;
8071 pci_free_consistent(h->pdev, 8068 dma_free_coherent(&h->pdev->dev,
8072 h->reply_queue_size, 8069 h->reply_queue_size,
8073 h->reply_queue[i].head, 8070 h->reply_queue[i].head,
8074 h->reply_queue[i].busaddr); 8071 h->reply_queue[i].busaddr);
@@ -8594,11 +8591,11 @@ reinit_after_soft_reset:
8594 number_of_controllers++; 8591 number_of_controllers++;
8595 8592
8596 /* configure PCI DMA stuff */ 8593 /* configure PCI DMA stuff */
8597 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 8594 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
8598 if (rc == 0) { 8595 if (rc == 0) {
8599 dac = 1; 8596 dac = 1;
8600 } else { 8597 } else {
8601 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 8598 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
8602 if (rc == 0) { 8599 if (rc == 0) {
8603 dac = 0; 8600 dac = 0;
8604 } else { 8601 } else {
@@ -8797,8 +8794,8 @@ static void hpsa_flush_cache(struct ctlr_info *h)
8797 RAID_CTLR_LUNID, TYPE_CMD)) { 8794 RAID_CTLR_LUNID, TYPE_CMD)) {
8798 goto out; 8795 goto out;
8799 } 8796 }
8800 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 8797 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
8801 PCI_DMA_TODEVICE, DEFAULT_TIMEOUT); 8798 DEFAULT_TIMEOUT);
8802 if (rc) 8799 if (rc)
8803 goto out; 8800 goto out;
8804 if (c->err_info->CommandStatus != 0) 8801 if (c->err_info->CommandStatus != 0)
@@ -8833,8 +8830,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
8833 RAID_CTLR_LUNID, TYPE_CMD)) 8830 RAID_CTLR_LUNID, TYPE_CMD))
8834 goto errout; 8831 goto errout;
8835 8832
8836 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 8833 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
8837 PCI_DMA_FROMDEVICE, NO_TIMEOUT); 8834 NO_TIMEOUT);
8838 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 8835 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8839 goto errout; 8836 goto errout;
8840 8837
@@ -8845,8 +8842,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
8845 RAID_CTLR_LUNID, TYPE_CMD)) 8842 RAID_CTLR_LUNID, TYPE_CMD))
8846 goto errout; 8843 goto errout;
8847 8844
8848 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 8845 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
8849 PCI_DMA_TODEVICE, NO_TIMEOUT); 8846 NO_TIMEOUT);
8850 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 8847 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8851 goto errout; 8848 goto errout;
8852 8849
@@ -8855,8 +8852,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
8855 RAID_CTLR_LUNID, TYPE_CMD)) 8852 RAID_CTLR_LUNID, TYPE_CMD))
8856 goto errout; 8853 goto errout;
8857 8854
8858 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 8855 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
8859 PCI_DMA_FROMDEVICE, NO_TIMEOUT); 8856 NO_TIMEOUT);
8860 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 8857 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8861 goto errout; 8858 goto errout;
8862 8859
@@ -9228,9 +9225,9 @@ static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9228 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % 9225 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9229 IOACCEL1_COMMANDLIST_ALIGNMENT); 9226 IOACCEL1_COMMANDLIST_ALIGNMENT);
9230 h->ioaccel_cmd_pool = 9227 h->ioaccel_cmd_pool =
9231 pci_alloc_consistent(h->pdev, 9228 dma_alloc_coherent(&h->pdev->dev,
9232 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), 9229 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9233 &(h->ioaccel_cmd_pool_dhandle)); 9230 &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL);
9234 9231
9235 h->ioaccel1_blockFetchTable = 9232 h->ioaccel1_blockFetchTable =
9236 kmalloc(((h->ioaccel_maxsg + 1) * 9233 kmalloc(((h->ioaccel_maxsg + 1) *
@@ -9281,9 +9278,9 @@ static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9281 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % 9278 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9282 IOACCEL2_COMMANDLIST_ALIGNMENT); 9279 IOACCEL2_COMMANDLIST_ALIGNMENT);
9283 h->ioaccel2_cmd_pool = 9280 h->ioaccel2_cmd_pool =
9284 pci_alloc_consistent(h->pdev, 9281 dma_alloc_coherent(&h->pdev->dev,
9285 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), 9282 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9286 &(h->ioaccel2_cmd_pool_dhandle)); 9283 &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL);
9287 9284
9288 h->ioaccel2_blockFetchTable = 9285 h->ioaccel2_blockFetchTable =
9289 kmalloc(((h->ioaccel_maxsg + 1) * 9286 kmalloc(((h->ioaccel_maxsg + 1) *
@@ -9356,9 +9353,10 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9356 h->reply_queue_size = h->max_commands * sizeof(u64); 9353 h->reply_queue_size = h->max_commands * sizeof(u64);
9357 9354
9358 for (i = 0; i < h->nreply_queues; i++) { 9355 for (i = 0; i < h->nreply_queues; i++) {
9359 h->reply_queue[i].head = pci_alloc_consistent(h->pdev, 9356 h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev,
9360 h->reply_queue_size, 9357 h->reply_queue_size,
9361 &(h->reply_queue[i].busaddr)); 9358 &h->reply_queue[i].busaddr,
9359 GFP_KERNEL);
9362 if (!h->reply_queue[i].head) { 9360 if (!h->reply_queue[i].head) {
9363 rc = -ENOMEM; 9361 rc = -ENOMEM;
9364 goto clean1; /* rq, ioaccel */ 9362 goto clean1; /* rq, ioaccel */
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index f42a619198c4..e63aadd10dfd 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -2266,7 +2266,6 @@ static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
2266 /* 2266 /*
2267 * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port 2267 * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
2268 */ 2268 */
2269 target_wait_for_sess_cmds(se_sess);
2270 target_remove_session(se_sess); 2269 target_remove_session(se_sess);
2271 tport->ibmv_nexus = NULL; 2270 tport->ibmv_nexus = NULL;
2272 kfree(nexus); 2271 kfree(nexus);
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bd6ac6b5980a..ee8a1ecd58fd 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -208,7 +208,7 @@ module_param(ips, charp, 0);
208 208
209#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \ 209#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
210 DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \ 210 DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
211 PCI_DMA_BIDIRECTIONAL : \ 211 DMA_BIDIRECTIONAL : \
212 scb->scsi_cmd->sc_data_direction) 212 scb->scsi_cmd->sc_data_direction)
213 213
214#ifdef IPS_DEBUG 214#ifdef IPS_DEBUG
@@ -1529,11 +1529,12 @@ ips_alloc_passthru_buffer(ips_ha_t * ha, int length)
1529 if (ha->ioctl_data && length <= ha->ioctl_len) 1529 if (ha->ioctl_data && length <= ha->ioctl_len)
1530 return 0; 1530 return 0;
1531 /* there is no buffer or it's not big enough, allocate a new one */ 1531 /* there is no buffer or it's not big enough, allocate a new one */
1532 bigger_buf = pci_alloc_consistent(ha->pcidev, length, &dma_busaddr); 1532 bigger_buf = dma_alloc_coherent(&ha->pcidev->dev, length, &dma_busaddr,
1533 GFP_KERNEL);
1533 if (bigger_buf) { 1534 if (bigger_buf) {
1534 /* free the old memory */ 1535 /* free the old memory */
1535 pci_free_consistent(ha->pcidev, ha->ioctl_len, ha->ioctl_data, 1536 dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
1536 ha->ioctl_busaddr); 1537 ha->ioctl_data, ha->ioctl_busaddr);
1537 /* use the new memory */ 1538 /* use the new memory */
1538 ha->ioctl_data = (char *) bigger_buf; 1539 ha->ioctl_data = (char *) bigger_buf;
1539 ha->ioctl_len = length; 1540 ha->ioctl_len = length;
@@ -1678,9 +1679,8 @@ ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1678 } else if (!ha->flash_data) { 1679 } else if (!ha->flash_data) {
1679 datasize = pt->CoppCP.cmd.flashfw.total_packets * 1680 datasize = pt->CoppCP.cmd.flashfw.total_packets *
1680 pt->CoppCP.cmd.flashfw.count; 1681 pt->CoppCP.cmd.flashfw.count;
1681 ha->flash_data = pci_alloc_consistent(ha->pcidev, 1682 ha->flash_data = dma_alloc_coherent(&ha->pcidev->dev,
1682 datasize, 1683 datasize, &ha->flash_busaddr, GFP_KERNEL);
1683 &ha->flash_busaddr);
1684 if (!ha->flash_data){ 1684 if (!ha->flash_data){
1685 printk(KERN_WARNING "Unable to allocate a flash buffer\n"); 1685 printk(KERN_WARNING "Unable to allocate a flash buffer\n");
1686 return IPS_FAILURE; 1686 return IPS_FAILURE;
@@ -1858,7 +1858,7 @@ ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
1858 1858
1859 scb->data_len = ha->flash_datasize; 1859 scb->data_len = ha->flash_datasize;
1860 scb->data_busaddr = 1860 scb->data_busaddr =
1861 pci_map_single(ha->pcidev, ha->flash_data, scb->data_len, 1861 dma_map_single(&ha->pcidev->dev, ha->flash_data, scb->data_len,
1862 IPS_DMA_DIR(scb)); 1862 IPS_DMA_DIR(scb));
1863 scb->flags |= IPS_SCB_MAP_SINGLE; 1863 scb->flags |= IPS_SCB_MAP_SINGLE;
1864 scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb); 1864 scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
@@ -1880,8 +1880,8 @@ ips_free_flash_copperhead(ips_ha_t * ha)
1880 if (ha->flash_data == ips_FlashData) 1880 if (ha->flash_data == ips_FlashData)
1881 test_and_clear_bit(0, &ips_FlashDataInUse); 1881 test_and_clear_bit(0, &ips_FlashDataInUse);
1882 else if (ha->flash_data) 1882 else if (ha->flash_data)
1883 pci_free_consistent(ha->pcidev, ha->flash_len, ha->flash_data, 1883 dma_free_coherent(&ha->pcidev->dev, ha->flash_len,
1884 ha->flash_busaddr); 1884 ha->flash_data, ha->flash_busaddr);
1885 ha->flash_data = NULL; 1885 ha->flash_data = NULL;
1886} 1886}
1887 1887
@@ -3485,6 +3485,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
3485 3485
3486 case START_STOP: 3486 case START_STOP:
3487 scb->scsi_cmd->result = DID_OK << 16; 3487 scb->scsi_cmd->result = DID_OK << 16;
3488 break;
3488 3489
3489 case TEST_UNIT_READY: 3490 case TEST_UNIT_READY:
3490 case INQUIRY: 3491 case INQUIRY:
@@ -4212,7 +4213,7 @@ ips_free(ips_ha_t * ha)
4212 4213
4213 if (ha) { 4214 if (ha) {
4214 if (ha->enq) { 4215 if (ha->enq) {
4215 pci_free_consistent(ha->pcidev, sizeof(IPS_ENQ), 4216 dma_free_coherent(&ha->pcidev->dev, sizeof(IPS_ENQ),
4216 ha->enq, ha->enq_busaddr); 4217 ha->enq, ha->enq_busaddr);
4217 ha->enq = NULL; 4218 ha->enq = NULL;
4218 } 4219 }
@@ -4221,7 +4222,7 @@ ips_free(ips_ha_t * ha)
4221 ha->conf = NULL; 4222 ha->conf = NULL;
4222 4223
4223 if (ha->adapt) { 4224 if (ha->adapt) {
4224 pci_free_consistent(ha->pcidev, 4225 dma_free_coherent(&ha->pcidev->dev,
4225 sizeof (IPS_ADAPTER) + 4226 sizeof (IPS_ADAPTER) +
4226 sizeof (IPS_IO_CMD), ha->adapt, 4227 sizeof (IPS_IO_CMD), ha->adapt,
4227 ha->adapt->hw_status_start); 4228 ha->adapt->hw_status_start);
@@ -4229,7 +4230,7 @@ ips_free(ips_ha_t * ha)
4229 } 4230 }
4230 4231
4231 if (ha->logical_drive_info) { 4232 if (ha->logical_drive_info) {
4232 pci_free_consistent(ha->pcidev, 4233 dma_free_coherent(&ha->pcidev->dev,
4233 sizeof (IPS_LD_INFO), 4234 sizeof (IPS_LD_INFO),
4234 ha->logical_drive_info, 4235 ha->logical_drive_info,
4235 ha->logical_drive_info_dma_addr); 4236 ha->logical_drive_info_dma_addr);
@@ -4243,7 +4244,7 @@ ips_free(ips_ha_t * ha)
4243 ha->subsys = NULL; 4244 ha->subsys = NULL;
4244 4245
4245 if (ha->ioctl_data) { 4246 if (ha->ioctl_data) {
4246 pci_free_consistent(ha->pcidev, ha->ioctl_len, 4247 dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
4247 ha->ioctl_data, ha->ioctl_busaddr); 4248 ha->ioctl_data, ha->ioctl_busaddr);
4248 ha->ioctl_data = NULL; 4249 ha->ioctl_data = NULL;
4249 ha->ioctl_datasize = 0; 4250 ha->ioctl_datasize = 0;
@@ -4276,11 +4277,11 @@ static int
4276ips_deallocatescbs(ips_ha_t * ha, int cmds) 4277ips_deallocatescbs(ips_ha_t * ha, int cmds)
4277{ 4278{
4278 if (ha->scbs) { 4279 if (ha->scbs) {
4279 pci_free_consistent(ha->pcidev, 4280 dma_free_coherent(&ha->pcidev->dev,
4280 IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds, 4281 IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds,
4281 ha->scbs->sg_list.list, 4282 ha->scbs->sg_list.list,
4282 ha->scbs->sg_busaddr); 4283 ha->scbs->sg_busaddr);
4283 pci_free_consistent(ha->pcidev, sizeof (ips_scb_t) * cmds, 4284 dma_free_coherent(&ha->pcidev->dev, sizeof (ips_scb_t) * cmds,
4284 ha->scbs, ha->scbs->scb_busaddr); 4285 ha->scbs, ha->scbs->scb_busaddr);
4285 ha->scbs = NULL; 4286 ha->scbs = NULL;
4286 } /* end if */ 4287 } /* end if */
@@ -4307,17 +4308,16 @@ ips_allocatescbs(ips_ha_t * ha)
4307 METHOD_TRACE("ips_allocatescbs", 1); 4308 METHOD_TRACE("ips_allocatescbs", 1);
4308 4309
4309 /* Allocate memory for the SCBs */ 4310 /* Allocate memory for the SCBs */
4310 ha->scbs = 4311 ha->scbs = dma_alloc_coherent(&ha->pcidev->dev,
4311 pci_alloc_consistent(ha->pcidev, ha->max_cmds * sizeof (ips_scb_t), 4312 ha->max_cmds * sizeof (ips_scb_t),
4312 &command_dma); 4313 &command_dma, GFP_KERNEL);
4313 if (ha->scbs == NULL) 4314 if (ha->scbs == NULL)
4314 return 0; 4315 return 0;
4315 ips_sg.list = 4316 ips_sg.list = dma_alloc_coherent(&ha->pcidev->dev,
4316 pci_alloc_consistent(ha->pcidev, 4317 IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * ha->max_cmds,
4317 IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * 4318 &sg_dma, GFP_KERNEL);
4318 ha->max_cmds, &sg_dma);
4319 if (ips_sg.list == NULL) { 4319 if (ips_sg.list == NULL) {
4320 pci_free_consistent(ha->pcidev, 4320 dma_free_coherent(&ha->pcidev->dev,
4321 ha->max_cmds * sizeof (ips_scb_t), ha->scbs, 4321 ha->max_cmds * sizeof (ips_scb_t), ha->scbs,
4322 command_dma); 4322 command_dma);
4323 return 0; 4323 return 0;
@@ -4446,8 +4446,8 @@ ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
4446 if (scb->flags & IPS_SCB_MAP_SG) 4446 if (scb->flags & IPS_SCB_MAP_SG)
4447 scsi_dma_unmap(scb->scsi_cmd); 4447 scsi_dma_unmap(scb->scsi_cmd);
4448 else if (scb->flags & IPS_SCB_MAP_SINGLE) 4448 else if (scb->flags & IPS_SCB_MAP_SINGLE)
4449 pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len, 4449 dma_unmap_single(&ha->pcidev->dev, scb->data_busaddr,
4450 IPS_DMA_DIR(scb)); 4450 scb->data_len, IPS_DMA_DIR(scb));
4451 4451
4452 /* check to make sure this is not our "special" scb */ 4452 /* check to make sure this is not our "special" scb */
4453 if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) { 4453 if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) {
@@ -4559,7 +4559,8 @@ ips_flush_and_reset(ips_ha_t *ha)
4559 dma_addr_t command_dma; 4559 dma_addr_t command_dma;
4560 4560
4561 /* Create a usuable SCB */ 4561 /* Create a usuable SCB */
4562 scb = pci_alloc_consistent(ha->pcidev, sizeof(ips_scb_t), &command_dma); 4562 scb = dma_alloc_coherent(&ha->pcidev->dev, sizeof(ips_scb_t),
4563 &command_dma, GFP_KERNEL);
4563 if (scb) { 4564 if (scb) {
4564 memset(scb, 0, sizeof(ips_scb_t)); 4565 memset(scb, 0, sizeof(ips_scb_t));
4565 ips_init_scb(ha, scb); 4566 ips_init_scb(ha, scb);
@@ -4594,7 +4595,7 @@ ips_flush_and_reset(ips_ha_t *ha)
4594 /* Now RESET and INIT the adapter */ 4595 /* Now RESET and INIT the adapter */
4595 (*ha->func.reset) (ha); 4596 (*ha->func.reset) (ha);
4596 4597
4597 pci_free_consistent(ha->pcidev, sizeof(ips_scb_t), scb, command_dma); 4598 dma_free_coherent(&ha->pcidev->dev, sizeof(ips_scb_t), scb, command_dma);
4598 return; 4599 return;
4599} 4600}
4600 4601
@@ -6926,29 +6927,30 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
6926 * are guaranteed to be < 4G. 6927 * are guaranteed to be < 4G.
6927 */ 6928 */
6928 if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) && 6929 if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) &&
6929 !pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(64))) { 6930 !dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(64))) {
6930 (ha)->flags |= IPS_HA_ENH_SG; 6931 (ha)->flags |= IPS_HA_ENH_SG;
6931 } else { 6932 } else {
6932 if (pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(32)) != 0) { 6933 if (dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(32)) != 0) {
6933 printk(KERN_WARNING "Unable to set DMA Mask\n"); 6934 printk(KERN_WARNING "Unable to set DMA Mask\n");
6934 return ips_abort_init(ha, index); 6935 return ips_abort_init(ha, index);
6935 } 6936 }
6936 } 6937 }
6937 if(ips_cd_boot && !ips_FlashData){ 6938 if(ips_cd_boot && !ips_FlashData){
6938 ips_FlashData = pci_alloc_consistent(pci_dev, PAGE_SIZE << 7, 6939 ips_FlashData = dma_alloc_coherent(&pci_dev->dev,
6939 &ips_flashbusaddr); 6940 PAGE_SIZE << 7, &ips_flashbusaddr, GFP_KERNEL);
6940 } 6941 }
6941 6942
6942 ha->enq = pci_alloc_consistent(pci_dev, sizeof (IPS_ENQ), 6943 ha->enq = dma_alloc_coherent(&pci_dev->dev, sizeof (IPS_ENQ),
6943 &ha->enq_busaddr); 6944 &ha->enq_busaddr, GFP_KERNEL);
6944 if (!ha->enq) { 6945 if (!ha->enq) {
6945 IPS_PRINTK(KERN_WARNING, pci_dev, 6946 IPS_PRINTK(KERN_WARNING, pci_dev,
6946 "Unable to allocate host inquiry structure\n"); 6947 "Unable to allocate host inquiry structure\n");
6947 return ips_abort_init(ha, index); 6948 return ips_abort_init(ha, index);
6948 } 6949 }
6949 6950
6950 ha->adapt = pci_alloc_consistent(pci_dev, sizeof (IPS_ADAPTER) + 6951 ha->adapt = dma_alloc_coherent(&pci_dev->dev,
6951 sizeof (IPS_IO_CMD), &dma_address); 6952 sizeof (IPS_ADAPTER) + sizeof (IPS_IO_CMD),
6953 &dma_address, GFP_KERNEL);
6952 if (!ha->adapt) { 6954 if (!ha->adapt) {
6953 IPS_PRINTK(KERN_WARNING, pci_dev, 6955 IPS_PRINTK(KERN_WARNING, pci_dev,
6954 "Unable to allocate host adapt & dummy structures\n"); 6956 "Unable to allocate host adapt & dummy structures\n");
@@ -6959,7 +6961,8 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
6959 6961
6960 6962
6961 6963
6962 ha->logical_drive_info = pci_alloc_consistent(pci_dev, sizeof (IPS_LD_INFO), &dma_address); 6964 ha->logical_drive_info = dma_alloc_coherent(&pci_dev->dev,
6965 sizeof (IPS_LD_INFO), &dma_address, GFP_KERNEL);
6963 if (!ha->logical_drive_info) { 6966 if (!ha->logical_drive_info) {
6964 IPS_PRINTK(KERN_WARNING, pci_dev, 6967 IPS_PRINTK(KERN_WARNING, pci_dev,
6965 "Unable to allocate logical drive info structure\n"); 6968 "Unable to allocate logical drive info structure\n");
@@ -6997,8 +7000,8 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
6997 if (ips_ioctlsize < PAGE_SIZE) 7000 if (ips_ioctlsize < PAGE_SIZE)
6998 ips_ioctlsize = PAGE_SIZE; 7001 ips_ioctlsize = PAGE_SIZE;
6999 7002
7000 ha->ioctl_data = pci_alloc_consistent(pci_dev, ips_ioctlsize, 7003 ha->ioctl_data = dma_alloc_coherent(&pci_dev->dev, ips_ioctlsize,
7001 &ha->ioctl_busaddr); 7004 &ha->ioctl_busaddr, GFP_KERNEL);
7002 ha->ioctl_len = ips_ioctlsize; 7005 ha->ioctl_len = ips_ioctlsize;
7003 if (!ha->ioctl_data) { 7006 if (!ha->ioctl_data) {
7004 IPS_PRINTK(KERN_WARNING, pci_dev, 7007 IPS_PRINTK(KERN_WARNING, pci_dev,
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 1ee3868ade07..7b5deae68d33 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -2717,9 +2717,9 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
2717 * the task management request. 2717 * the task management request.
2718 * @task_request: the handle to the task request object to start. 2718 * @task_request: the handle to the task request object to start.
2719 */ 2719 */
2720enum sci_task_status sci_controller_start_task(struct isci_host *ihost, 2720enum sci_status sci_controller_start_task(struct isci_host *ihost,
2721 struct isci_remote_device *idev, 2721 struct isci_remote_device *idev,
2722 struct isci_request *ireq) 2722 struct isci_request *ireq)
2723{ 2723{
2724 enum sci_status status; 2724 enum sci_status status;
2725 2725
@@ -2728,7 +2728,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
2728 "%s: SCIC Controller starting task from invalid " 2728 "%s: SCIC Controller starting task from invalid "
2729 "state\n", 2729 "state\n",
2730 __func__); 2730 __func__);
2731 return SCI_TASK_FAILURE_INVALID_STATE; 2731 return SCI_FAILURE_INVALID_STATE;
2732 } 2732 }
2733 2733
2734 status = sci_remote_device_start_task(ihost, idev, ireq); 2734 status = sci_remote_device_start_task(ihost, idev, ireq);
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index b3539928073c..6bc3f022630a 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -489,7 +489,7 @@ enum sci_status sci_controller_start_io(
489 struct isci_remote_device *idev, 489 struct isci_remote_device *idev,
490 struct isci_request *ireq); 490 struct isci_request *ireq);
491 491
492enum sci_task_status sci_controller_start_task( 492enum sci_status sci_controller_start_task(
493 struct isci_host *ihost, 493 struct isci_host *ihost,
494 struct isci_remote_device *idev, 494 struct isci_remote_device *idev,
495 struct isci_request *ireq); 495 struct isci_request *ireq);
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index ed197bc8e801..2f151708b59a 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -1626,9 +1626,9 @@ static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
1626 1626
1627 if (status == SCI_SUCCESS) { 1627 if (status == SCI_SUCCESS) {
1628 if (ireq->stp.rsp.status & ATA_ERR) 1628 if (ireq->stp.rsp.status & ATA_ERR)
1629 status = SCI_IO_FAILURE_RESPONSE_VALID; 1629 status = SCI_FAILURE_IO_RESPONSE_VALID;
1630 } else { 1630 } else {
1631 status = SCI_IO_FAILURE_RESPONSE_VALID; 1631 status = SCI_FAILURE_IO_RESPONSE_VALID;
1632 } 1632 }
1633 1633
1634 if (status != SCI_SUCCESS) { 1634 if (status != SCI_SUCCESS) {
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 6dcaed0c1fc8..fb6eba331ac6 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -258,7 +258,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
258 struct isci_tmf *tmf, unsigned long timeout_ms) 258 struct isci_tmf *tmf, unsigned long timeout_ms)
259{ 259{
260 DECLARE_COMPLETION_ONSTACK(completion); 260 DECLARE_COMPLETION_ONSTACK(completion);
261 enum sci_task_status status = SCI_TASK_FAILURE; 261 enum sci_status status = SCI_FAILURE;
262 struct isci_request *ireq; 262 struct isci_request *ireq;
263 int ret = TMF_RESP_FUNC_FAILED; 263 int ret = TMF_RESP_FUNC_FAILED;
264 unsigned long flags; 264 unsigned long flags;
@@ -301,7 +301,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
301 /* start the TMF io. */ 301 /* start the TMF io. */
302 status = sci_controller_start_task(ihost, idev, ireq); 302 status = sci_controller_start_task(ihost, idev, ireq);
303 303
304 if (status != SCI_TASK_SUCCESS) { 304 if (status != SCI_SUCCESS) {
305 dev_dbg(&ihost->pdev->dev, 305 dev_dbg(&ihost->pdev->dev,
306 "%s: start_io failed - status = 0x%x, request = %p\n", 306 "%s: start_io failed - status = 0x%x, request = %p\n",
307 __func__, 307 __func__,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index b025a0b74341..23354f206533 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -800,7 +800,8 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
800 return rc; 800 return rc;
801 801
802 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 802 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
803 &addr, param, buf); 803 &addr,
804 (enum iscsi_param)param, buf);
804 default: 805 default:
805 return iscsi_host_get_param(shost, param, buf); 806 return iscsi_host_get_param(shost, param, buf);
806 } 807 }
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 6eb5ff3e2e61..1ad28262b00a 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -38,30 +38,6 @@ static u8 jazz_esp_read8(struct esp *esp, unsigned long reg)
38 return *(volatile u8 *)(esp->regs + reg); 38 return *(volatile u8 *)(esp->regs + reg);
39} 39}
40 40
41static dma_addr_t jazz_esp_map_single(struct esp *esp, void *buf,
42 size_t sz, int dir)
43{
44 return dma_map_single(esp->dev, buf, sz, dir);
45}
46
47static int jazz_esp_map_sg(struct esp *esp, struct scatterlist *sg,
48 int num_sg, int dir)
49{
50 return dma_map_sg(esp->dev, sg, num_sg, dir);
51}
52
53static void jazz_esp_unmap_single(struct esp *esp, dma_addr_t addr,
54 size_t sz, int dir)
55{
56 dma_unmap_single(esp->dev, addr, sz, dir);
57}
58
59static void jazz_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
60 int num_sg, int dir)
61{
62 dma_unmap_sg(esp->dev, sg, num_sg, dir);
63}
64
65static int jazz_esp_irq_pending(struct esp *esp) 41static int jazz_esp_irq_pending(struct esp *esp)
66{ 42{
67 if (jazz_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) 43 if (jazz_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
@@ -117,10 +93,6 @@ static int jazz_esp_dma_error(struct esp *esp)
117static const struct esp_driver_ops jazz_esp_ops = { 93static const struct esp_driver_ops jazz_esp_ops = {
118 .esp_write8 = jazz_esp_write8, 94 .esp_write8 = jazz_esp_write8,
119 .esp_read8 = jazz_esp_read8, 95 .esp_read8 = jazz_esp_read8,
120 .map_single = jazz_esp_map_single,
121 .map_sg = jazz_esp_map_sg,
122 .unmap_single = jazz_esp_unmap_single,
123 .unmap_sg = jazz_esp_unmap_sg,
124 .irq_pending = jazz_esp_irq_pending, 96 .irq_pending = jazz_esp_irq_pending,
125 .reset_dma = jazz_esp_reset_dma, 97 .reset_dma = jazz_esp_reset_dma,
126 .dma_drain = jazz_esp_dma_drain, 98 .dma_drain = jazz_esp_dma_drain,
@@ -182,7 +154,7 @@ static int esp_jazz_probe(struct platform_device *dev)
182 154
183 dev_set_drvdata(&dev->dev, esp); 155 dev_set_drvdata(&dev->dev, esp);
184 156
185 err = scsi_esp_register(esp, &dev->dev); 157 err = scsi_esp_register(esp);
186 if (err) 158 if (err)
187 goto fail_free_irq; 159 goto fail_free_irq;
188 160
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 4fae253d4f3d..b1bd283be51c 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1872,7 +1872,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
1872 struct fc_lport *lport = shost_priv(shost); 1872 struct fc_lport *lport = shost_priv(shost);
1873 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1873 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1874 struct fc_fcp_pkt *fsp; 1874 struct fc_fcp_pkt *fsp;
1875 struct fc_rport_libfc_priv *rpriv;
1876 int rval; 1875 int rval;
1877 int rc = 0; 1876 int rc = 0;
1878 struct fc_stats *stats; 1877 struct fc_stats *stats;
@@ -1894,8 +1893,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
1894 goto out; 1893 goto out;
1895 } 1894 }
1896 1895
1897 rpriv = rport->dd_data;
1898
1899 if (!fc_fcp_lport_queue_ready(lport)) { 1896 if (!fc_fcp_lport_queue_ready(lport)) {
1900 if (lport->qfull) { 1897 if (lport->qfull) {
1901 if (fc_fcp_can_queue_ramp_down(lport)) 1898 if (fc_fcp_can_queue_ramp_down(lport))
@@ -2295,8 +2292,7 @@ int fc_setup_fcp(void)
2295 2292
2296void fc_destroy_fcp(void) 2293void fc_destroy_fcp(void)
2297{ 2294{
2298 if (scsi_pkt_cachep) 2295 kmem_cache_destroy(scsi_pkt_cachep);
2299 kmem_cache_destroy(scsi_pkt_cachep);
2300} 2296}
2301 2297
2302/** 2298/**
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 372387a450df..1e1c0f1b9e69 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1038,8 +1038,11 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1038 struct fc_els_ls_rjt *rjt; 1038 struct fc_els_ls_rjt *rjt;
1039 1039
1040 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 1040 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1041 FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n", 1041 if (!rjt)
1042 rjt->er_reason, rjt->er_explan); 1042 FC_RPORT_DBG(rdata, "PLOGI bad response\n");
1043 else
1044 FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
1045 rjt->er_reason, rjt->er_explan);
1043 fc_rport_error_retry(rdata, -FC_EX_ELS_RJT); 1046 fc_rport_error_retry(rdata, -FC_EX_ELS_RJT);
1044 } 1047 }
1045out: 1048out:
@@ -1158,8 +1161,10 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
1158 op = fc_frame_payload_op(fp); 1161 op = fc_frame_payload_op(fp);
1159 if (op == ELS_LS_ACC) { 1162 if (op == ELS_LS_ACC) {
1160 pp = fc_frame_payload_get(fp, sizeof(*pp)); 1163 pp = fc_frame_payload_get(fp, sizeof(*pp));
1161 if (!pp) 1164 if (!pp) {
1165 fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
1162 goto out; 1166 goto out;
1167 }
1163 1168
1164 resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK); 1169 resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
1165 FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n", 1170 FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
@@ -1172,8 +1177,10 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
1172 fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR); 1177 fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
1173 goto out; 1178 goto out;
1174 } 1179 }
1175 if (pp->prli.prli_spp_len < sizeof(pp->spp)) 1180 if (pp->prli.prli_spp_len < sizeof(pp->spp)) {
1181 fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
1176 goto out; 1182 goto out;
1183 }
1177 1184
1178 fcp_parm = ntohl(pp->spp.spp_params); 1185 fcp_parm = ntohl(pp->spp.spp_params);
1179 if (fcp_parm & FCP_SPPF_RETRY) 1186 if (fcp_parm & FCP_SPPF_RETRY)
@@ -1211,8 +1218,11 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
1211 1218
1212 } else { 1219 } else {
1213 rjt = fc_frame_payload_get(fp, sizeof(*rjt)); 1220 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1214 FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n", 1221 if (!rjt)
1215 rjt->er_reason, rjt->er_explan); 1222 FC_RPORT_DBG(rdata, "PRLI bad response\n");
1223 else
1224 FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
1225 rjt->er_reason, rjt->er_explan);
1216 fc_rport_error_retry(rdata, FC_EX_ELS_RJT); 1226 fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
1217 } 1227 }
1218 1228
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 64a958a99f6a..4f6cdf53e913 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -654,7 +654,7 @@ void sas_probe_sata(struct asd_sas_port *port)
654 /* if libata could not bring the link up, don't surface 654 /* if libata could not bring the link up, don't surface
655 * the device 655 * the device
656 */ 656 */
657 if (ata_dev_disabled(sas_to_ata_dev(dev))) 657 if (!ata_dev_enabled(sas_to_ata_dev(dev)))
658 sas_fail_probe(dev, __func__, -ENODEV); 658 sas_fail_probe(dev, __func__, -ENODEV);
659 } 659 }
660 660
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 0148ae62a52a..dde433aa59c2 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -260,7 +260,7 @@ static void sas_suspend_devices(struct work_struct *work)
260 * phy_list is not being mutated 260 * phy_list is not being mutated
261 */ 261 */
262 list_for_each_entry(phy, &port->phy_list, port_phy_el) { 262 list_for_each_entry(phy, &port->phy_list, port_phy_el) {
263 if (si->dft->lldd_port_formed) 263 if (si->dft->lldd_port_deformed)
264 si->dft->lldd_port_deformed(phy); 264 si->dft->lldd_port_deformed(phy);
265 phy->suspended = 1; 265 phy->suspended = 1;
266 port->suspended = 1; 266 port->suspended = 1;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index fadc99cb60df..0d1f72752ca2 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -48,17 +48,16 @@ static void smp_task_timedout(struct timer_list *t)
48 unsigned long flags; 48 unsigned long flags;
49 49
50 spin_lock_irqsave(&task->task_state_lock, flags); 50 spin_lock_irqsave(&task->task_state_lock, flags);
51 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) 51 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
52 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 52 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
53 complete(&task->slow_task->completion);
54 }
53 spin_unlock_irqrestore(&task->task_state_lock, flags); 55 spin_unlock_irqrestore(&task->task_state_lock, flags);
54
55 complete(&task->slow_task->completion);
56} 56}
57 57
58static void smp_task_done(struct sas_task *task) 58static void smp_task_done(struct sas_task *task)
59{ 59{
60 if (!del_timer(&task->slow_task->timer)) 60 del_timer(&task->slow_task->timer);
61 return;
62 complete(&task->slow_task->completion); 61 complete(&task->slow_task->completion);
63} 62}
64 63
@@ -2054,14 +2053,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
2054 return res; 2053 return res;
2055 } 2054 }
2056 2055
2057 /* delete the old link */ 2056 /* we always have to delete the old device when we went here */
2058 if (SAS_ADDR(phy->attached_sas_addr) && 2057 SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
2059 SAS_ADDR(sas_addr) != SAS_ADDR(phy->attached_sas_addr)) { 2058 SAS_ADDR(dev->sas_addr), phy_id,
2060 SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n", 2059 SAS_ADDR(phy->attached_sas_addr));
2061 SAS_ADDR(dev->sas_addr), phy_id, 2060 sas_unregister_devs_sas_addr(dev, phy_id, last);
2062 SAS_ADDR(phy->attached_sas_addr));
2063 sas_unregister_devs_sas_addr(dev, phy_id, last);
2064 }
2065 2061
2066 return sas_discover_new(dev, phy_id); 2062 return sas_discover_new(dev, phy_id);
2067} 2063}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 43732e8d1347..c1eb2b00ca7f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -52,7 +52,7 @@ struct lpfc_sli2_slim;
52 downloads using bsg */ 52 downloads using bsg */
53 53
54#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */ 54#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */
55#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */ 55#define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */
56#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */ 56#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
57#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ 57#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
58#define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */ 58#define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */
@@ -583,6 +583,25 @@ struct lpfc_mbox_ext_buf_ctx {
583 struct list_head ext_dmabuf_list; 583 struct list_head ext_dmabuf_list;
584}; 584};
585 585
586struct lpfc_ras_fwlog {
587 uint8_t *fwlog_buff;
588 uint32_t fw_buffcount; /* Buffer size posted to FW */
589#define LPFC_RAS_BUFF_ENTERIES 16 /* Each entry can hold max of 64k */
590#define LPFC_RAS_MAX_ENTRY_SIZE (64 * 1024)
591#define LPFC_RAS_MIN_BUFF_POST_SIZE (256 * 1024)
592#define LPFC_RAS_MAX_BUFF_POST_SIZE (1024 * 1024)
593 uint32_t fw_loglevel; /* Log level set */
594 struct lpfc_dmabuf lwpd;
595 struct list_head fwlog_buff_list;
596
597 /* RAS support status on adapter */
598 bool ras_hwsupport; /* RAS Support available on HW or not */
599 bool ras_enabled; /* Ras Enabled for the function */
600#define LPFC_RAS_DISABLE_LOGGING 0x00
601#define LPFC_RAS_ENABLE_LOGGING 0x01
602 bool ras_active; /* RAS logging running state */
603};
604
586struct lpfc_hba { 605struct lpfc_hba {
587 /* SCSI interface function jump table entries */ 606 /* SCSI interface function jump table entries */
588 int (*lpfc_new_scsi_buf) 607 int (*lpfc_new_scsi_buf)
@@ -790,6 +809,7 @@ struct lpfc_hba {
790 uint32_t cfg_total_seg_cnt; 809 uint32_t cfg_total_seg_cnt;
791 uint32_t cfg_sg_seg_cnt; 810 uint32_t cfg_sg_seg_cnt;
792 uint32_t cfg_nvme_seg_cnt; 811 uint32_t cfg_nvme_seg_cnt;
812 uint32_t cfg_scsi_seg_cnt;
793 uint32_t cfg_sg_dma_buf_size; 813 uint32_t cfg_sg_dma_buf_size;
794 uint64_t cfg_soft_wwnn; 814 uint64_t cfg_soft_wwnn;
795 uint64_t cfg_soft_wwpn; 815 uint64_t cfg_soft_wwpn;
@@ -833,6 +853,9 @@ struct lpfc_hba {
833#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */ 853#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */
834 uint32_t cfg_enable_SmartSAN; 854 uint32_t cfg_enable_SmartSAN;
835 uint32_t cfg_enable_mds_diags; 855 uint32_t cfg_enable_mds_diags;
856 uint32_t cfg_ras_fwlog_level;
857 uint32_t cfg_ras_fwlog_buffsize;
858 uint32_t cfg_ras_fwlog_func;
836 uint32_t cfg_enable_fc4_type; 859 uint32_t cfg_enable_fc4_type;
837 uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */ 860 uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
838 uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */ 861 uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
@@ -963,6 +986,7 @@ struct lpfc_hba {
963 uint32_t intr_mode; 986 uint32_t intr_mode;
964#define LPFC_INTR_ERROR 0xFFFFFFFF 987#define LPFC_INTR_ERROR 0xFFFFFFFF
965 struct list_head port_list; 988 struct list_head port_list;
989 spinlock_t port_list_lock; /* lock for port_list mutations */
966 struct lpfc_vport *pport; /* physical lpfc_vport pointer */ 990 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
967 uint16_t max_vpi; /* Maximum virtual nports */ 991 uint16_t max_vpi; /* Maximum virtual nports */
968#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ 992#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
@@ -1092,6 +1116,9 @@ struct lpfc_hba {
1092 struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX]; 1116 struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
1093 uint32_t ctx_idx; 1117 uint32_t ctx_idx;
1094 1118
1119 /* RAS Support */
1120 struct lpfc_ras_fwlog ras_fwlog;
1121
1095 uint8_t menlo_flag; /* menlo generic flags */ 1122 uint8_t menlo_flag; /* menlo generic flags */
1096#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */ 1123#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
1097 uint32_t iocb_cnt; 1124 uint32_t iocb_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 1a6ed9b0a249..dda7f450b96d 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5358,15 +5358,74 @@ LPFC_ATTR(delay_discovery, 0, 0, 1,
5358 5358
5359/* 5359/*
5360 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count 5360 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
5361 * This value can be set to values between 64 and 4096. The default value is 5361 * This value can be set to values between 64 and 4096. The default value
5362 * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer 5362 * is 64, but may be increased to allow for larger Max I/O sizes. The scsi
5363 * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE). 5363 * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE).
5364 * Because of the additional overhead involved in setting up T10-DIF, 5364 * Because of the additional overhead involved in setting up T10-DIF,
5365 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4 5365 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
5366 * and will be limited to 512 if BlockGuard is enabled under SLI3. 5366 * and will be limited to 512 if BlockGuard is enabled under SLI3.
5367 */ 5367 */
5368LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT, 5368static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
5369 LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); 5369module_param(lpfc_sg_seg_cnt, uint, 0444);
5370MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
5371
5372/**
5373 * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes
5374 * configured for the adapter
5375 * @dev: class converted to a Scsi_host structure.
5376 * @attr: device attribute, not used.
5377 * @buf: on return contains a string with the list sizes
5378 *
5379 * Returns: size of formatted string.
5380 **/
5381static ssize_t
5382lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
5383 char *buf)
5384{
5385 struct Scsi_Host *shost = class_to_shost(dev);
5386 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5387 struct lpfc_hba *phba = vport->phba;
5388 int len;
5389
5390 len = snprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
5391 phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
5392
5393 len += snprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n",
5394 phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
5395 phba->cfg_nvme_seg_cnt);
5396 return len;
5397}
5398
5399static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
5400
5401/**
5402 * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value
5403 * @phba: lpfc_hba pointer.
5404 * @val: contains the initial value
5405 *
5406 * Description:
5407 * Validates the initial value is within range and assigns it to the
5408 * adapter. If not in range, an error message is posted and the
5409 * default value is assigned.
5410 *
5411 * Returns:
5412 * zero if value is in range and is set
5413 * -EINVAL if value was out of range
5414 **/
5415static int
5416lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
5417{
5418 if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
5419 phba->cfg_sg_seg_cnt = val;
5420 return 0;
5421 }
5422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5423 "0409 "LPFC_DRIVER_NAME"_sg_seg_cnt attribute cannot "
5424 "be set to %d, allowed range is [%d, %d]\n",
5425 val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
5426 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
5427 return -EINVAL;
5428}
5370 5429
5371/* 5430/*
5372 * lpfc_enable_mds_diags: Enable MDS Diagnostics 5431 * lpfc_enable_mds_diags: Enable MDS Diagnostics
@@ -5377,6 +5436,31 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
5377LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); 5436LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
5378 5437
5379/* 5438/*
5439 * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
5440 * 0 = Disable firmware logging (default)
5441 * [1-4] = Multiple of 1/4th Mb of host memory for FW logging
5442 * Value range [0..4]. Default value is 0
5443 */
5444LPFC_ATTR_RW(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
5445
5446/*
5447 * lpfc_ras_fwlog_level: Firmware logging verbosity level
5448 * Valid only if firmware logging is enabled
5449 * 0(Least Verbosity) 4 (most verbosity)
5450 * Value range is [0..4]. Default value is 0
5451 */
5452LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
5453
5454/*
5455 * lpfc_ras_fwlog_func: Firmware logging enabled on function number
5456 * Default function which has RAS support : 0
5457 * Value Range is [0..7].
5458 * FW logging is a global action and enablement is via a specific
5459 * port.
5460 */
5461LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
5462
5463/*
5380 * lpfc_enable_bbcr: Enable BB Credit Recovery 5464 * lpfc_enable_bbcr: Enable BB Credit Recovery
5381 * 0 = BB Credit Recovery disabled 5465 * 0 = BB Credit Recovery disabled
5382 * 1 = BB Credit Recovery enabled (default) 5466 * 1 = BB Credit Recovery enabled (default)
@@ -5501,6 +5585,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
5501 &dev_attr_protocol, 5585 &dev_attr_protocol,
5502 &dev_attr_lpfc_xlane_supported, 5586 &dev_attr_lpfc_xlane_supported,
5503 &dev_attr_lpfc_enable_mds_diags, 5587 &dev_attr_lpfc_enable_mds_diags,
5588 &dev_attr_lpfc_ras_fwlog_buffsize,
5589 &dev_attr_lpfc_ras_fwlog_level,
5590 &dev_attr_lpfc_ras_fwlog_func,
5504 &dev_attr_lpfc_enable_bbcr, 5591 &dev_attr_lpfc_enable_bbcr,
5505 &dev_attr_lpfc_enable_dpp, 5592 &dev_attr_lpfc_enable_dpp,
5506 NULL, 5593 NULL,
@@ -6587,6 +6674,20 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
6587 lpfc_sli_mode_init(phba, lpfc_sli_mode); 6674 lpfc_sli_mode_init(phba, lpfc_sli_mode);
6588 phba->cfg_enable_dss = 1; 6675 phba->cfg_enable_dss = 1;
6589 lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags); 6676 lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
6677 lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
6678 lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
6679 lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
6680
6681
6682 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
6683 * accommodate 512K and 1M IOs in a single nvme buf and supply
6684 * enough NVME LS iocb buffers for larger connectivity counts.
6685 */
6686 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6687 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
6688 phba->cfg_iocb_cnt = 5;
6689 }
6690
6590 return; 6691 return;
6591} 6692}
6592 6693
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 90745feca808..7bd7ae86bed5 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -27,6 +27,7 @@
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/bsg-lib.h> 29#include <linux/bsg-lib.h>
30#include <linux/vmalloc.h>
30 31
31#include <scsi/scsi.h> 32#include <scsi/scsi.h>
32#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
@@ -2843,9 +2844,6 @@ diag_cmd_data_alloc(struct lpfc_hba *phba,
2843 2844
2844 if (nocopydata) { 2845 if (nocopydata) {
2845 bpl->tus.f.bdeFlags = 0; 2846 bpl->tus.f.bdeFlags = 0;
2846 pci_dma_sync_single_for_device(phba->pcidev,
2847 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
2848
2849 } else { 2847 } else {
2850 memset((uint8_t *)dmp->dma.virt, 0, cnt); 2848 memset((uint8_t *)dmp->dma.virt, 0, cnt);
2851 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 2849 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
@@ -5309,6 +5307,330 @@ job_error:
5309} 5307}
5310 5308
5311/** 5309/**
5310 * lpfc_check_fwlog_support: Check FW log support on the adapter
5311 * @phba: Pointer to HBA context object.
5312 *
5313 * Check if FW Logging support by the adapter
5314 **/
5315int
5316lpfc_check_fwlog_support(struct lpfc_hba *phba)
5317{
5318 struct lpfc_ras_fwlog *ras_fwlog = NULL;
5319
5320 ras_fwlog = &phba->ras_fwlog;
5321
5322 if (ras_fwlog->ras_hwsupport == false)
5323 return -EACCES;
5324 else if (ras_fwlog->ras_enabled == false)
5325 return -EPERM;
5326 else
5327 return 0;
5328}
5329
5330/**
5331 * lpfc_bsg_get_ras_config: Get RAS configuration settings
5332 * @job: fc_bsg_job to handle
5333 *
5334 * Get RAS configuration values set.
5335 **/
5336static int
5337lpfc_bsg_get_ras_config(struct bsg_job *job)
5338{
5339 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5340 struct lpfc_vport *vport = shost_priv(shost);
5341 struct fc_bsg_reply *bsg_reply = job->reply;
5342 struct lpfc_hba *phba = vport->phba;
5343 struct lpfc_bsg_get_ras_config_reply *ras_reply;
5344 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5345 int rc = 0;
5346
5347 if (job->request_len <
5348 sizeof(struct fc_bsg_request) +
5349 sizeof(struct lpfc_bsg_ras_req)) {
5350 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5351 "6181 Received RAS_LOG request "
5352 "below minimum size\n");
5353 rc = -EINVAL;
5354 goto ras_job_error;
5355 }
5356
5357 /* Check FW log status */
5358 rc = lpfc_check_fwlog_support(phba);
5359 if (rc == -EACCES || rc == -EPERM)
5360 goto ras_job_error;
5361
5362 ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
5363 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5364
5365 /* Current logging state */
5366 if (ras_fwlog->ras_active == true)
5367 ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
5368 else
5369 ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
5370
5371 ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
5372 ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
5373
5374ras_job_error:
5375 /* make error code available to userspace */
5376 bsg_reply->result = rc;
5377
5378 /* complete the job back to userspace */
5379 bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
5380 return rc;
5381}
5382
5383/**
5384 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
5385 * @phba: Pointer to HBA context object.
5386 *
5387 * Disable FW logging into host memory on the adapter. To
5388 * be done before reading logs from the host memory.
5389 **/
5390static void
5391lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
5392{
5393 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5394
5395 ras_fwlog->ras_active = false;
5396
5397 /* Disable FW logging to host memory */
5398 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
5399 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
5400}
5401
5402/**
5403 * lpfc_bsg_set_ras_config: Set FW logging parameters
5404 * @job: fc_bsg_job to handle
5405 *
5406 * Set log-level parameters for FW-logging in host memory
5407 **/
5408static int
5409lpfc_bsg_set_ras_config(struct bsg_job *job)
5410{
5411 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5412 struct lpfc_vport *vport = shost_priv(shost);
5413 struct lpfc_hba *phba = vport->phba;
5414 struct lpfc_bsg_set_ras_config_req *ras_req;
5415 struct fc_bsg_request *bsg_request = job->request;
5416 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5417 struct fc_bsg_reply *bsg_reply = job->reply;
5418 uint8_t action = 0, log_level = 0;
5419 int rc = 0;
5420
5421 if (job->request_len <
5422 sizeof(struct fc_bsg_request) +
5423 sizeof(struct lpfc_bsg_set_ras_config_req)) {
5424 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5425 "6182 Received RAS_LOG request "
5426 "below minimum size\n");
5427 rc = -EINVAL;
5428 goto ras_job_error;
5429 }
5430
5431 /* Check FW log status */
5432 rc = lpfc_check_fwlog_support(phba);
5433 if (rc == -EACCES || rc == -EPERM)
5434 goto ras_job_error;
5435
5436 ras_req = (struct lpfc_bsg_set_ras_config_req *)
5437 bsg_request->rqst_data.h_vendor.vendor_cmd;
5438 action = ras_req->action;
5439 log_level = ras_req->log_level;
5440
5441 if (action == LPFC_RASACTION_STOP_LOGGING) {
5442 /* Check if already disabled */
5443 if (ras_fwlog->ras_active == false) {
5444 rc = -ESRCH;
5445 goto ras_job_error;
5446 }
5447
5448 /* Disable logging */
5449 lpfc_ras_stop_fwlog(phba);
5450 } else {
5451 /*action = LPFC_RASACTION_START_LOGGING*/
5452 if (ras_fwlog->ras_active == true) {
5453 rc = -EINPROGRESS;
5454 goto ras_job_error;
5455 }
5456
5457 /* Enable logging */
5458 rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
5459 LPFC_RAS_ENABLE_LOGGING);
5460 if (rc)
5461 rc = -EINVAL;
5462 }
5463ras_job_error:
5464 /* make error code available to userspace */
5465 bsg_reply->result = rc;
5466
5467 /* complete the job back to userspace */
5468 bsg_job_done(job, bsg_reply->result,
5469 bsg_reply->reply_payload_rcv_len);
5470
5471 return rc;
5472}
5473
5474/**
5475 * lpfc_bsg_get_ras_lwpd: Get log write position data
5476 * @job: fc_bsg_job to handle
5477 *
5478 * Get Offset/Wrap count of the log message written
5479 * in host memory
5480 **/
5481static int
5482lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
5483{
5484 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5485 struct lpfc_vport *vport = shost_priv(shost);
5486 struct lpfc_bsg_get_ras_lwpd *ras_reply;
5487 struct lpfc_hba *phba = vport->phba;
5488 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5489 struct fc_bsg_reply *bsg_reply = job->reply;
5490 uint32_t lwpd_offset = 0;
5491 uint64_t wrap_value = 0;
5492 int rc = 0;
5493
5494 rc = lpfc_check_fwlog_support(phba);
5495 if (rc == -EACCES || rc == -EPERM)
5496 goto ras_job_error;
5497
5498 if (job->request_len <
5499 sizeof(struct fc_bsg_request) +
5500 sizeof(struct lpfc_bsg_ras_req)) {
5501 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5502 "6183 Received RAS_LOG request "
5503 "below minimum size\n");
5504 rc = -EINVAL;
5505 goto ras_job_error;
5506 }
5507
5508 ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
5509 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5510
5511 lwpd_offset = *((uint32_t *)ras_fwlog->lwpd.virt) & 0xffffffff;
5512 ras_reply->offset = be32_to_cpu(lwpd_offset);
5513
5514 wrap_value = *((uint64_t *)ras_fwlog->lwpd.virt);
5515 ras_reply->wrap_count = be32_to_cpu((wrap_value >> 32) & 0xffffffff);
5516
5517ras_job_error:
5518 /* make error code available to userspace */
5519 bsg_reply->result = rc;
5520
5521 /* complete the job back to userspace */
5522 bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
5523
5524 return rc;
5525}
5526
5527/**
5528 * lpfc_bsg_get_ras_fwlog: Read FW log
5529 * @job: fc_bsg_job to handle
5530 *
5531 * Copy the FW log into the passed buffer.
5532 **/
5533static int
5534lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
5535{
5536 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5537 struct lpfc_vport *vport = shost_priv(shost);
5538 struct lpfc_hba *phba = vport->phba;
5539 struct fc_bsg_request *bsg_request = job->request;
5540 struct fc_bsg_reply *bsg_reply = job->reply;
5541 struct lpfc_bsg_get_fwlog_req *ras_req;
5542 uint32_t rd_offset, rd_index, offset, pending_wlen;
5543 uint32_t boundary = 0, align_len = 0, write_len = 0;
5544 void *dest, *src, *fwlog_buff;
5545 struct lpfc_ras_fwlog *ras_fwlog = NULL;
5546 struct lpfc_dmabuf *dmabuf, *next;
5547 int rc = 0;
5548
5549 ras_fwlog = &phba->ras_fwlog;
5550
5551 rc = lpfc_check_fwlog_support(phba);
5552 if (rc == -EACCES || rc == -EPERM)
5553 goto ras_job_error;
5554
5555 /* Logging to be stopped before reading */
5556 if (ras_fwlog->ras_active == true) {
5557 rc = -EINPROGRESS;
5558 goto ras_job_error;
5559 }
5560
5561 if (job->request_len <
5562 sizeof(struct fc_bsg_request) +
5563 sizeof(struct lpfc_bsg_get_fwlog_req)) {
5564 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5565 "6184 Received RAS_LOG request "
5566 "below minimum size\n");
5567 rc = -EINVAL;
5568 goto ras_job_error;
5569 }
5570
5571 ras_req = (struct lpfc_bsg_get_fwlog_req *)
5572 bsg_request->rqst_data.h_vendor.vendor_cmd;
5573 rd_offset = ras_req->read_offset;
5574
5575 /* Allocate memory to read fw log*/
5576 fwlog_buff = vmalloc(ras_req->read_size);
5577 if (!fwlog_buff) {
5578 rc = -ENOMEM;
5579 goto ras_job_error;
5580 }
5581
5582 rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
5583 offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
5584 pending_wlen = ras_req->read_size;
5585 dest = fwlog_buff;
5586
5587 list_for_each_entry_safe(dmabuf, next,
5588 &ras_fwlog->fwlog_buff_list, list) {
5589
5590 if (dmabuf->buffer_tag < rd_index)
5591 continue;
5592
5593 /* Align read to buffer size */
5594 if (offset) {
5595 boundary = ((dmabuf->buffer_tag + 1) *
5596 LPFC_RAS_MAX_ENTRY_SIZE);
5597
5598 align_len = (boundary - offset);
5599 write_len = min_t(u32, align_len,
5600 LPFC_RAS_MAX_ENTRY_SIZE);
5601 } else {
5602 write_len = min_t(u32, pending_wlen,
5603 LPFC_RAS_MAX_ENTRY_SIZE);
5604 align_len = 0;
5605 boundary = 0;
5606 }
5607 src = dmabuf->virt + offset;
5608 memcpy(dest, src, write_len);
5609
5610 pending_wlen -= write_len;
5611 if (!pending_wlen)
5612 break;
5613
5614 dest += write_len;
5615 offset = (offset + write_len) % LPFC_RAS_MAX_ENTRY_SIZE;
5616 }
5617
5618 bsg_reply->reply_payload_rcv_len =
5619 sg_copy_from_buffer(job->reply_payload.sg_list,
5620 job->reply_payload.sg_cnt,
5621 fwlog_buff, ras_req->read_size);
5622
5623 vfree(fwlog_buff);
5624
5625ras_job_error:
5626 bsg_reply->result = rc;
5627 bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
5628
5629 return rc;
5630}
5631
5632
5633/**
5312 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 5634 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5313 * @job: fc_bsg_job to handle 5635 * @job: fc_bsg_job to handle
5314 **/ 5636 **/
@@ -5355,6 +5677,18 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
5355 case LPFC_BSG_VENDOR_FORCED_LINK_SPEED: 5677 case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
5356 rc = lpfc_forced_link_speed(job); 5678 rc = lpfc_forced_link_speed(job);
5357 break; 5679 break;
5680 case LPFC_BSG_VENDOR_RAS_GET_LWPD:
5681 rc = lpfc_bsg_get_ras_lwpd(job);
5682 break;
5683 case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
5684 rc = lpfc_bsg_get_ras_fwlog(job);
5685 break;
5686 case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
5687 rc = lpfc_bsg_get_ras_config(job);
5688 break;
5689 case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
5690 rc = lpfc_bsg_set_ras_config(job);
5691 break;
5358 default: 5692 default:
5359 rc = -EINVAL; 5693 rc = -EINVAL;
5360 bsg_reply->reply_payload_rcv_len = 0; 5694 bsg_reply->reply_payload_rcv_len = 0;
@@ -5368,7 +5702,7 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
5368 5702
5369/** 5703/**
5370 * lpfc_bsg_request - handle a bsg request from the FC transport 5704 * lpfc_bsg_request - handle a bsg request from the FC transport
5371 * @job: fc_bsg_job to handle 5705 * @job: bsg_job to handle
5372 **/ 5706 **/
5373int 5707int
5374lpfc_bsg_request(struct bsg_job *job) 5708lpfc_bsg_request(struct bsg_job *job)
@@ -5402,7 +5736,7 @@ lpfc_bsg_request(struct bsg_job *job)
5402 5736
5403/** 5737/**
5404 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport 5738 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
5405 * @job: fc_bsg_job that has timed out 5739 * @job: bsg_job that has timed out
5406 * 5740 *
5407 * This function just aborts the job's IOCB. The aborted IOCB will return to 5741 * This function just aborts the job's IOCB. The aborted IOCB will return to
5408 * the waiting function which will handle passing the error back to userspace 5742 * the waiting function which will handle passing the error back to userspace
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 32347c87e3b4..820323f1139b 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -38,6 +38,10 @@
38#define LPFC_BSG_VENDOR_DIAG_MODE_END 10 38#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
39#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11 39#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
40#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14 40#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14
41#define LPFC_BSG_VENDOR_RAS_GET_LWPD 16
42#define LPFC_BSG_VENDOR_RAS_GET_FWLOG 17
43#define LPFC_BSG_VENDOR_RAS_GET_CONFIG 18
44#define LPFC_BSG_VENDOR_RAS_SET_CONFIG 19
41 45
42struct set_ct_event { 46struct set_ct_event {
43 uint32_t command; 47 uint32_t command;
@@ -296,6 +300,38 @@ struct forced_link_speed_support_reply {
296 uint8_t supported; 300 uint8_t supported;
297}; 301};
298 302
303struct lpfc_bsg_ras_req {
304 uint32_t command;
305};
306
307struct lpfc_bsg_get_fwlog_req {
308 uint32_t command;
309 uint32_t read_size;
310 uint32_t read_offset;
311};
312
313struct lpfc_bsg_get_ras_lwpd {
314 uint32_t offset;
315 uint32_t wrap_count;
316};
317
318struct lpfc_bsg_set_ras_config_req {
319 uint32_t command;
320 uint8_t action;
321#define LPFC_RASACTION_STOP_LOGGING 0x00
322#define LPFC_RASACTION_START_LOGGING 0x01
323 uint8_t log_level;
324};
325
326struct lpfc_bsg_get_ras_config_reply {
327 uint8_t state;
328#define LPFC_RASLOG_STATE_STOPPED 0x00
329#define LPFC_RASLOG_STATE_RUNNING 0x01
330 uint8_t log_level;
331 uint32_t log_buff_sz;
332};
333
334
299/* driver only */ 335/* driver only */
300#define SLI_CONFIG_NOT_HANDLED 0 336#define SLI_CONFIG_NOT_HANDLED 0
301#define SLI_CONFIG_HANDLED 1 337#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index bea24bc4410a..e01136507780 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -545,6 +545,13 @@ bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
545int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox); 545int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
546void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb); 546void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
547 547
548/* RAS Interface */
549void lpfc_sli4_ras_init(struct lpfc_hba *phba);
550void lpfc_sli4_ras_setup(struct lpfc_hba *phba);
551int lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, uint32_t fwlog_level,
552 uint32_t fwlog_enable);
553int lpfc_check_fwlog_support(struct lpfc_hba *phba);
554
548/* NVME interfaces. */ 555/* NVME interfaces. */
549void lpfc_nvme_unregister_port(struct lpfc_vport *vport, 556void lpfc_nvme_unregister_port(struct lpfc_vport *vport,
550 struct lpfc_nodelist *ndlp); 557 struct lpfc_nodelist *ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 1cbdc892ff95..789ad1502534 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -445,14 +445,14 @@ lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
445 struct lpfc_vport *vport_curr; 445 struct lpfc_vport *vport_curr;
446 unsigned long flags; 446 unsigned long flags;
447 447
448 spin_lock_irqsave(&phba->hbalock, flags); 448 spin_lock_irqsave(&phba->port_list_lock, flags);
449 list_for_each_entry(vport_curr, &phba->port_list, listentry) { 449 list_for_each_entry(vport_curr, &phba->port_list, listentry) {
450 if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) { 450 if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) {
451 spin_unlock_irqrestore(&phba->hbalock, flags); 451 spin_unlock_irqrestore(&phba->port_list_lock, flags);
452 return vport_curr; 452 return vport_curr;
453 } 453 }
454 } 454 }
455 spin_unlock_irqrestore(&phba->hbalock, flags); 455 spin_unlock_irqrestore(&phba->port_list_lock, flags);
456 return NULL; 456 return NULL;
457} 457}
458 458
@@ -471,11 +471,6 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
471 "Parse GID_FTrsp: did:x%x flg:x%x x%x", 471 "Parse GID_FTrsp: did:x%x flg:x%x x%x",
472 Did, ndlp->nlp_flag, vport->fc_flag); 472 Did, ndlp->nlp_flag, vport->fc_flag);
473 473
474 /* Don't assume the rport is always the previous
475 * FC4 type.
476 */
477 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
478
479 /* By default, the driver expects to support FCP FC4 */ 474 /* By default, the driver expects to support FCP FC4 */
480 if (fc4_type == FC_TYPE_FCP) 475 if (fc4_type == FC_TYPE_FCP)
481 ndlp->nlp_fc4_type |= NLP_FC4_FCP; 476 ndlp->nlp_fc4_type |= NLP_FC4_FCP;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index aec5b10a8c85..0c8005bb0f53 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -550,7 +550,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
550 struct lpfc_nodelist *ndlp; 550 struct lpfc_nodelist *ndlp;
551 unsigned char *statep; 551 unsigned char *statep;
552 struct nvme_fc_local_port *localport; 552 struct nvme_fc_local_port *localport;
553 struct lpfc_nvmet_tgtport *tgtp;
554 struct nvme_fc_remote_port *nrport = NULL; 553 struct nvme_fc_remote_port *nrport = NULL;
555 struct lpfc_nvme_rport *rport; 554 struct lpfc_nvme_rport *rport;
556 555
@@ -654,7 +653,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
654 "\nOutstanding IO x%x\n", outio); 653 "\nOutstanding IO x%x\n", outio);
655 654
656 if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) { 655 if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
657 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
658 len += snprintf(buf + len, size - len, 656 len += snprintf(buf + len, size - len,
659 "\nNVME Targetport Entry ...\n"); 657 "\nNVME Targetport Entry ...\n");
660 658
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4dda969e947c..f1c1faa74b46 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7673,8 +7673,11 @@ void
7673lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 7673lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
7674{ 7674{
7675 struct lpfc_vport *vport; 7675 struct lpfc_vport *vport;
7676
7677 spin_lock_irq(&phba->port_list_lock);
7676 list_for_each_entry(vport, &phba->port_list, listentry) 7678 list_for_each_entry(vport, &phba->port_list, listentry)
7677 lpfc_els_flush_cmd(vport); 7679 lpfc_els_flush_cmd(vport);
7680 spin_unlock_irq(&phba->port_list_lock);
7678 7681
7679 return; 7682 return;
7680} 7683}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index eb71877f12f8..f4deb862efc6 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4193,7 +4193,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4193 4193
4194 if (new_state == NLP_STE_MAPPED_NODE || 4194 if (new_state == NLP_STE_MAPPED_NODE ||
4195 new_state == NLP_STE_UNMAPPED_NODE) { 4195 new_state == NLP_STE_UNMAPPED_NODE) {
4196 if (ndlp->nlp_fc4_type & NLP_FC4_FCP || 4196 if (ndlp->nlp_fc4_type ||
4197 ndlp->nlp_DID == Fabric_DID || 4197 ndlp->nlp_DID == Fabric_DID ||
4198 ndlp->nlp_DID == NameServer_DID || 4198 ndlp->nlp_DID == NameServer_DID ||
4199 ndlp->nlp_DID == FDMI_DID) { 4199 ndlp->nlp_DID == FDMI_DID) {
@@ -5428,12 +5428,10 @@ static void
5428lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 5428lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5429{ 5429{
5430 LIST_HEAD(completions); 5430 LIST_HEAD(completions);
5431 struct lpfc_sli *psli;
5432 IOCB_t *icmd; 5431 IOCB_t *icmd;
5433 struct lpfc_iocbq *iocb, *next_iocb; 5432 struct lpfc_iocbq *iocb, *next_iocb;
5434 struct lpfc_sli_ring *pring; 5433 struct lpfc_sli_ring *pring;
5435 5434
5436 psli = &phba->sli;
5437 pring = lpfc_phba_elsring(phba); 5435 pring = lpfc_phba_elsring(phba);
5438 if (unlikely(!pring)) 5436 if (unlikely(!pring))
5439 return; 5437 return;
@@ -5938,14 +5936,14 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
5938 } 5936 }
5939 } 5937 }
5940 5938
5941 spin_lock_irqsave(&phba->hbalock, flags); 5939 spin_lock_irqsave(&phba->port_list_lock, flags);
5942 list_for_each_entry(vport, &phba->port_list, listentry) { 5940 list_for_each_entry(vport, &phba->port_list, listentry) {
5943 if (vport->vpi == i) { 5941 if (vport->vpi == i) {
5944 spin_unlock_irqrestore(&phba->hbalock, flags); 5942 spin_unlock_irqrestore(&phba->port_list_lock, flags);
5945 return vport; 5943 return vport;
5946 } 5944 }
5947 } 5945 }
5948 spin_unlock_irqrestore(&phba->hbalock, flags); 5946 spin_unlock_irqrestore(&phba->port_list_lock, flags);
5949 return NULL; 5947 return NULL;
5950} 5948}
5951 5949
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 083f8c8706e5..bbd0a57e953f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -186,6 +186,7 @@ struct lpfc_sli_intf {
186#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00 186#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00
187#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10 187#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10
188#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20 188#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20
189#define LPFC_CTL_PDEV_CTL_DDL_RAS 0x1000000
189 190
190#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST) 191#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
191 192
@@ -964,6 +965,7 @@ struct mbox_header {
964/* Subsystem Definitions */ 965/* Subsystem Definitions */
965#define LPFC_MBOX_SUBSYSTEM_NA 0x0 966#define LPFC_MBOX_SUBSYSTEM_NA 0x0
966#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1 967#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
968#define LPFC_MBOX_SUBSYSTEM_LOWLEVEL 0xB
967#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC 969#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
968 970
969/* Device Specific Definitions */ 971/* Device Specific Definitions */
@@ -1030,6 +1032,9 @@ struct mbox_header {
1030#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22 1032#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
1031#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23 1033#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
1032 1034
1035/* Low level Opcodes */
1036#define LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION 0x37
1037
1033/* Mailbox command structures */ 1038/* Mailbox command structures */
1034struct eq_context { 1039struct eq_context {
1035 uint32_t word0; 1040 uint32_t word0;
@@ -1162,6 +1167,45 @@ struct lpfc_mbx_nop {
1162 uint32_t context[2]; 1167 uint32_t context[2];
1163}; 1168};
1164 1169
1170
1171
1172struct lpfc_mbx_set_ras_fwlog {
1173 struct mbox_header header;
1174 union {
1175 struct {
1176 uint32_t word4;
1177#define lpfc_fwlog_enable_SHIFT 0
1178#define lpfc_fwlog_enable_MASK 0x00000001
1179#define lpfc_fwlog_enable_WORD word4
1180#define lpfc_fwlog_loglvl_SHIFT 8
1181#define lpfc_fwlog_loglvl_MASK 0x0000000F
1182#define lpfc_fwlog_loglvl_WORD word4
1183#define lpfc_fwlog_ra_SHIFT 15
1184#define lpfc_fwlog_ra_WORD 0x00000008
1185#define lpfc_fwlog_buffcnt_SHIFT 16
1186#define lpfc_fwlog_buffcnt_MASK 0x000000FF
1187#define lpfc_fwlog_buffcnt_WORD word4
1188#define lpfc_fwlog_buffsz_SHIFT 24
1189#define lpfc_fwlog_buffsz_MASK 0x000000FF
1190#define lpfc_fwlog_buffsz_WORD word4
1191 uint32_t word5;
1192#define lpfc_fwlog_acqe_SHIFT 0
1193#define lpfc_fwlog_acqe_MASK 0x0000FFFF
1194#define lpfc_fwlog_acqe_WORD word5
1195#define lpfc_fwlog_cqid_SHIFT 16
1196#define lpfc_fwlog_cqid_MASK 0x0000FFFF
1197#define lpfc_fwlog_cqid_WORD word5
1198#define LPFC_MAX_FWLOG_PAGE 16
1199 struct dma_address lwpd;
1200 struct dma_address buff_fwlog[LPFC_MAX_FWLOG_PAGE];
1201 } request;
1202 struct {
1203 uint32_t word0;
1204 } response;
1205 } u;
1206};
1207
1208
1165struct cq_context { 1209struct cq_context {
1166 uint32_t word0; 1210 uint32_t word0;
1167#define lpfc_cq_context_event_SHIFT 31 1211#define lpfc_cq_context_event_SHIFT 31
@@ -3868,6 +3912,7 @@ struct lpfc_mqe {
3868 struct lpfc_mbx_memory_dump_type3 mem_dump_type3; 3912 struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
3869 struct lpfc_mbx_set_host_data set_host_data; 3913 struct lpfc_mbx_set_host_data set_host_data;
3870 struct lpfc_mbx_nop nop; 3914 struct lpfc_mbx_nop nop;
3915 struct lpfc_mbx_set_ras_fwlog ras_fwlog;
3871 } un; 3916 } un;
3872}; 3917};
3873 3918
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 0503237b8145..20fa6785a0e2 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3956,7 +3956,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
3956 if (phba->sli_rev == LPFC_SLI_REV4) { 3956 if (phba->sli_rev == LPFC_SLI_REV4) {
3957 shost->dma_boundary = 3957 shost->dma_boundary =
3958 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 3958 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
3959 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 3959 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
3960 } 3960 }
3961 3961
3962 /* 3962 /*
@@ -3988,9 +3988,9 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
3988 if (error) 3988 if (error)
3989 goto out_put_shost; 3989 goto out_put_shost;
3990 3990
3991 spin_lock_irq(&phba->hbalock); 3991 spin_lock_irq(&phba->port_list_lock);
3992 list_add_tail(&vport->listentry, &phba->port_list); 3992 list_add_tail(&vport->listentry, &phba->port_list);
3993 spin_unlock_irq(&phba->hbalock); 3993 spin_unlock_irq(&phba->port_list_lock);
3994 return vport; 3994 return vport;
3995 3995
3996out_put_shost: 3996out_put_shost:
@@ -4016,9 +4016,9 @@ destroy_port(struct lpfc_vport *vport)
4016 fc_remove_host(shost); 4016 fc_remove_host(shost);
4017 scsi_remove_host(shost); 4017 scsi_remove_host(shost);
4018 4018
4019 spin_lock_irq(&phba->hbalock); 4019 spin_lock_irq(&phba->port_list_lock);
4020 list_del_init(&vport->listentry); 4020 list_del_init(&vport->listentry);
4021 spin_unlock_irq(&phba->hbalock); 4021 spin_unlock_irq(&phba->port_list_lock);
4022 4022
4023 lpfc_cleanup(vport); 4023 lpfc_cleanup(vport);
4024 return; 4024 return;
@@ -5621,7 +5621,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5621 /* Initialize ndlp management spinlock */ 5621 /* Initialize ndlp management spinlock */
5622 spin_lock_init(&phba->ndlp_lock); 5622 spin_lock_init(&phba->ndlp_lock);
5623 5623
5624 /* Initialize port_list spinlock */
5625 spin_lock_init(&phba->port_list_lock);
5624 INIT_LIST_HEAD(&phba->port_list); 5626 INIT_LIST_HEAD(&phba->port_list);
5627
5625 INIT_LIST_HEAD(&phba->work_list); 5628 INIT_LIST_HEAD(&phba->work_list);
5626 init_waitqueue_head(&phba->wait_4_mlo_m_q); 5629 init_waitqueue_head(&phba->wait_4_mlo_m_q);
5627 5630
@@ -5919,8 +5922,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5919 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 5922 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
5920 */ 5923 */
5921 max_buf_size = (2 * SLI4_PAGE_SIZE); 5924 max_buf_size = (2 * SLI4_PAGE_SIZE);
5922 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
5923 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
5924 5925
5925 /* 5926 /*
5926 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size 5927 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
@@ -5942,9 +5943,16 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5942 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 5943 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
5943 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 5944 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
5944 5945
5945 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) 5946 /*
5946 phba->cfg_sg_seg_cnt = 5947 * If supporting DIF, reduce the seg count for scsi to
5947 LPFC_MAX_SG_SLI4_SEG_CNT_DIF; 5948 * allow room for the DIF sges.
5949 */
5950 if (phba->cfg_enable_bg &&
5951 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
5952 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
5953 else
5954 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
5955
5948 } else { 5956 } else {
5949 /* 5957 /*
5950 * The scsi_buf for a regular I/O holds the FCP cmnd, 5958 * The scsi_buf for a regular I/O holds the FCP cmnd,
@@ -5958,6 +5966,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5958 5966
5959 /* Total SGEs for scsi_sg_list */ 5967 /* Total SGEs for scsi_sg_list */
5960 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; 5968 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
5969 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
5961 5970
5962 /* 5971 /*
5963 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only 5972 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
@@ -5965,10 +5974,22 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5965 */ 5974 */
5966 } 5975 }
5967 5976
5977 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
5978 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
5979 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
5980 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
5981 "6300 Reducing NVME sg segment "
5982 "cnt to %d\n",
5983 LPFC_MAX_NVME_SEG_CNT);
5984 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
5985 } else
5986 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
5987 }
5988
5968 /* Initialize the host templates with the updated values. */ 5989 /* Initialize the host templates with the updated values. */
5969 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5990 lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
5970 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5991 lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
5971 lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; 5992 lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
5972 5993
5973 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 5994 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
5974 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 5995 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
@@ -5977,9 +5998,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5977 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 5998 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
5978 5999
5979 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 6000 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5980 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", 6001 "9087 sg_seg_cnt:%d dmabuf_size:%d "
6002 "total:%d scsi:%d nvme:%d\n",
5981 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 6003 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5982 phba->cfg_total_seg_cnt); 6004 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
6005 phba->cfg_nvme_seg_cnt);
5983 6006
5984 /* Initialize buffer queue management fields */ 6007 /* Initialize buffer queue management fields */
5985 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 6008 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
@@ -6205,6 +6228,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6205 if (phba->cfg_fof) 6228 if (phba->cfg_fof)
6206 fof_vectors = 1; 6229 fof_vectors = 1;
6207 6230
6231 /* Verify RAS support on adapter */
6232 lpfc_sli4_ras_init(phba);
6233
6208 /* Verify all the SLI4 queues */ 6234 /* Verify all the SLI4 queues */
6209 rc = lpfc_sli4_queue_verify(phba); 6235 rc = lpfc_sli4_queue_verify(phba);
6210 if (rc) 6236 if (rc)
@@ -7967,7 +7993,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
7967 else 7993 else
7968 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7994 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7969 "3028 GET_FUNCTION_CONFIG: failed to find " 7995 "3028 GET_FUNCTION_CONFIG: failed to find "
7970 "Resrouce Descriptor:x%x\n", 7996 "Resource Descriptor:x%x\n",
7971 LPFC_RSRC_DESC_TYPE_FCFCOE); 7997 LPFC_RSRC_DESC_TYPE_FCFCOE);
7972 7998
7973read_cfg_out: 7999read_cfg_out:
@@ -10492,6 +10518,14 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
10492 /* Stop kthread signal shall trigger work_done one more time */ 10518 /* Stop kthread signal shall trigger work_done one more time */
10493 kthread_stop(phba->worker_thread); 10519 kthread_stop(phba->worker_thread);
10494 10520
10521 /* Disable FW logging to host memory */
10522 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
10523 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
10524
10525 /* Free RAS DMA memory */
10526 if (phba->ras_fwlog.ras_enabled == true)
10527 lpfc_sli4_ras_dma_free(phba);
10528
10495 /* Unset the queues shared with the hardware then release all 10529 /* Unset the queues shared with the hardware then release all
10496 * allocated resources. 10530 * allocated resources.
10497 */ 10531 */
@@ -10737,6 +10771,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
10737 phba->mds_diags_support = 1; 10771 phba->mds_diags_support = 1;
10738 else 10772 else
10739 phba->mds_diags_support = 0; 10773 phba->mds_diags_support = 0;
10774
10740 return 0; 10775 return 0;
10741} 10776}
10742 10777
@@ -10965,9 +11000,9 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
10965 kfree(phba->vpi_ids); 11000 kfree(phba->vpi_ids);
10966 11001
10967 lpfc_stop_hba_timers(phba); 11002 lpfc_stop_hba_timers(phba);
10968 spin_lock_irq(&phba->hbalock); 11003 spin_lock_irq(&phba->port_list_lock);
10969 list_del_init(&vport->listentry); 11004 list_del_init(&vport->listentry);
10970 spin_unlock_irq(&phba->hbalock); 11005 spin_unlock_irq(&phba->port_list_lock);
10971 11006
10972 lpfc_debugfs_terminate(vport); 11007 lpfc_debugfs_terminate(vport);
10973 11008
@@ -11694,6 +11729,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
11694 11729
11695 /* Check if there are static vports to be created. */ 11730 /* Check if there are static vports to be created. */
11696 lpfc_create_static_vport(phba); 11731 lpfc_create_static_vport(phba);
11732
11733 /* Enable RAS FW log support */
11734 lpfc_sli4_ras_setup(phba);
11735
11697 return 0; 11736 return 0;
11698 11737
11699out_disable_intr: 11738out_disable_intr:
@@ -11773,9 +11812,9 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
11773 lpfc_sli4_hba_unset(phba); 11812 lpfc_sli4_hba_unset(phba);
11774 11813
11775 lpfc_stop_hba_timers(phba); 11814 lpfc_stop_hba_timers(phba);
11776 spin_lock_irq(&phba->hbalock); 11815 spin_lock_irq(&phba->port_list_lock);
11777 list_del_init(&vport->listentry); 11816 list_del_init(&vport->listentry);
11778 spin_unlock_irq(&phba->hbalock); 11817 spin_unlock_irq(&phba->port_list_lock);
11779 11818
11780 /* Perform scsi free before driver resource_unset since scsi 11819 /* Perform scsi free before driver resource_unset since scsi
11781 * buffers are released to their corresponding pools here. 11820 * buffers are released to their corresponding pools here.
@@ -12420,6 +12459,30 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
12420} 12459}
12421 12460
12422/** 12461/**
12462 * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
12463 * @phba: pointer to lpfc hba data structure.
12464 *
12465 * This routine checks to see if RAS is supported by the adapter. Check the
12466 * function through which RAS support enablement is to be done.
12467 **/
12468void
12469lpfc_sli4_ras_init(struct lpfc_hba *phba)
12470{
12471 switch (phba->pcidev->device) {
12472 case PCI_DEVICE_ID_LANCER_G6_FC:
12473 case PCI_DEVICE_ID_LANCER_G7_FC:
12474 phba->ras_fwlog.ras_hwsupport = true;
12475 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn))
12476 phba->ras_fwlog.ras_enabled = true;
12477 else
12478 phba->ras_fwlog.ras_enabled = false;
12479 break;
12480 default:
12481 phba->ras_fwlog.ras_hwsupport = false;
12482 }
12483}
12484
12485/**
12423 * lpfc_fof_queue_setup - Set up all the fof queues 12486 * lpfc_fof_queue_setup - Set up all the fof queues
12424 * @phba: pointer to lpfc hba data structure. 12487 * @phba: pointer to lpfc hba data structure.
12425 * 12488 *
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bd9bce9d9974..269808e8480f 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -2318,6 +2318,7 @@ lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
2318 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2318 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2319 spin_lock_irq(shost->host_lock); 2319 spin_lock_irq(shost->host_lock);
2320 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 2320 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2321 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
2321 spin_unlock_irq(shost->host_lock); 2322 spin_unlock_irq(shost->host_lock);
2322 lpfc_disc_set_adisc(vport, ndlp); 2323 lpfc_disc_set_adisc(vport, ndlp);
2323 2324
@@ -2395,6 +2396,7 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
2395 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2396 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2396 spin_lock_irq(shost->host_lock); 2397 spin_lock_irq(shost->host_lock);
2397 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 2398 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2399 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
2398 spin_unlock_irq(shost->host_lock); 2400 spin_unlock_irq(shost->host_lock);
2399 lpfc_disc_set_adisc(vport, ndlp); 2401 lpfc_disc_set_adisc(vport, ndlp);
2400 return ndlp->nlp_state; 2402 return ndlp->nlp_state;
@@ -2652,6 +2654,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2652 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2654 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2653 spin_lock_irq(shost->host_lock); 2655 spin_lock_irq(shost->host_lock);
2654 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 2656 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2657 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
2655 spin_unlock_irq(shost->host_lock); 2658 spin_unlock_irq(shost->host_lock);
2656 return ndlp->nlp_state; 2659 return ndlp->nlp_state;
2657} 2660}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 918ae18ef8a8..ba831def9301 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -282,7 +282,7 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
282 vport = lport->vport; 282 vport = lport->vport;
283 283
284 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 284 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
285 "6001 ENTER. lpfc_pnvme %p, qidx x%xi qhandle %p\n", 285 "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n",
286 lport, qidx, handle); 286 lport, qidx, handle);
287 kfree(handle); 287 kfree(handle);
288} 288}
@@ -2235,13 +2235,11 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
2235 struct sli4_sge *sgl; 2235 struct sli4_sge *sgl;
2236 dma_addr_t pdma_phys_sgl; 2236 dma_addr_t pdma_phys_sgl;
2237 uint16_t iotag, lxri = 0; 2237 uint16_t iotag, lxri = 0;
2238 int bcnt, num_posted, sgl_size; 2238 int bcnt, num_posted;
2239 LIST_HEAD(prep_nblist); 2239 LIST_HEAD(prep_nblist);
2240 LIST_HEAD(post_nblist); 2240 LIST_HEAD(post_nblist);
2241 LIST_HEAD(nvme_nblist); 2241 LIST_HEAD(nvme_nblist);
2242 2242
2243 sgl_size = phba->cfg_sg_dma_buf_size;
2244
2245 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 2243 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
2246 lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL); 2244 lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
2247 if (!lpfc_ncmd) 2245 if (!lpfc_ncmd)
@@ -2462,17 +2460,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2462 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); 2460 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2463 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn); 2461 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2464 2462
2465 /* Limit to LPFC_MAX_NVME_SEG_CNT. 2463 /* We need to tell the transport layer + 1 because it takes page
2466 * For now need + 1 to get around NVME transport logic. 2464 * alignment into account. When space for the SGL is allocated we
2465 * allocate + 3, one for cmd, one for rsp and one for this alignment
2467 */ 2466 */
2468 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
2469 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT,
2470 "6300 Reducing sg segment cnt to %d\n",
2471 LPFC_MAX_NVME_SEG_CNT);
2472 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
2473 } else {
2474 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
2475 }
2476 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; 2467 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2477 lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel; 2468 lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
2478 2469
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index b766afe10d3d..6245f442d784 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1339,15 +1339,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1339 idx = 0; 1339 idx = 0;
1340 } 1340 }
1341 1341
1342 infop = phba->sli4_hba.nvmet_ctx_info; 1342 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1343 for (j = 0; j < phba->cfg_nvmet_mrq; j++) { 1343 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1344 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 1344 infop = lpfc_get_ctx_list(phba, i, j);
1345 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 1345 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1346 "6408 TOTAL NVMET ctx for CPU %d " 1346 "6408 TOTAL NVMET ctx for CPU %d "
1347 "MRQ %d: cnt %d nextcpu %p\n", 1347 "MRQ %d: cnt %d nextcpu %p\n",
1348 i, j, infop->nvmet_ctx_list_cnt, 1348 i, j, infop->nvmet_ctx_list_cnt,
1349 infop->nvmet_ctx_next_cpu); 1349 infop->nvmet_ctx_next_cpu);
1350 infop++;
1351 } 1350 }
1352 } 1351 }
1353 return 0; 1352 return 0;
@@ -1373,17 +1372,10 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1373 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); 1372 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1374 pinfo.port_id = vport->fc_myDID; 1373 pinfo.port_id = vport->fc_myDID;
1375 1374
1376 /* Limit to LPFC_MAX_NVME_SEG_CNT. 1375 /* We need to tell the transport layer + 1 because it takes page
1377 * For now need + 1 to get around NVME transport logic. 1376 * alignment into account. When space for the SGL is allocated we
1377 * allocate + 3, one for cmd, one for rsp and one for this alignment
1378 */ 1378 */
1379 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
1380 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1381 "6400 Reducing sg segment cnt to %d\n",
1382 LPFC_MAX_NVME_SEG_CNT);
1383 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
1384 } else {
1385 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
1386 }
1387 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; 1379 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1388 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; 1380 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
1389 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP; 1381 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 5c7858e735c9..4fa6703a9ec9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -202,8 +202,8 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
202static void 202static void
203lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 203lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
204{ 204{
205 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 205 struct lpfc_rport_data *rdata;
206 struct lpfc_nodelist *pnode = rdata->pnode; 206 struct lpfc_nodelist *pnode;
207 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 207 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
208 unsigned long flags; 208 unsigned long flags;
209 struct Scsi_Host *shost = cmd->device->host; 209 struct Scsi_Host *shost = cmd->device->host;
@@ -211,17 +211,19 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
211 unsigned long latency; 211 unsigned long latency;
212 int i; 212 int i;
213 213
214 if (cmd->result) 214 if (!vport->stat_data_enabled ||
215 vport->stat_data_blocked ||
216 (cmd->result))
215 return; 217 return;
216 218
217 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time); 219 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
220 rdata = lpfc_cmd->rdata;
221 pnode = rdata->pnode;
218 222
219 spin_lock_irqsave(shost->host_lock, flags); 223 spin_lock_irqsave(shost->host_lock, flags);
220 if (!vport->stat_data_enabled || 224 if (!pnode ||
221 vport->stat_data_blocked || 225 !pnode->lat_data ||
222 !pnode || 226 (phba->bucket_type == LPFC_NO_BUCKET)) {
223 !pnode->lat_data ||
224 (phba->bucket_type == LPFC_NO_BUCKET)) {
225 spin_unlock_irqrestore(shost->host_lock, flags); 227 spin_unlock_irqrestore(shost->host_lock, flags);
226 return; 228 return;
227 } 229 }
@@ -1050,7 +1052,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1050 if (!found) 1052 if (!found)
1051 return NULL; 1053 return NULL;
1052 1054
1053 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { 1055 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1054 atomic_inc(&ndlp->cmd_pending); 1056 atomic_inc(&ndlp->cmd_pending);
1055 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 1057 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
1056 } 1058 }
@@ -4158,9 +4160,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4158 } 4160 }
4159 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4161 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4160 4162
4161 spin_lock_irqsave(&phba->hbalock, flags); 4163 /* If pCmd was set to NULL from abort path, do not call scsi_done */
4162 lpfc_cmd->pCmd = NULL; 4164 if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
4163 spin_unlock_irqrestore(&phba->hbalock, flags); 4165 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4166 "0711 FCP cmd already NULL, sid: 0x%06x, "
4167 "did: 0x%06x, oxid: 0x%04x\n",
4168 vport->fc_myDID,
4169 (pnode) ? pnode->nlp_DID : 0,
4170 phba->sli_rev == LPFC_SLI_REV4 ?
4171 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff);
4172 return;
4173 }
4164 4174
4165 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4175 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4166 cmd->scsi_done(cmd); 4176 cmd->scsi_done(cmd);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 9830bdb6e072..783a1540cfbe 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -392,11 +392,7 @@ lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
392 struct lpfc_register doorbell; 392 struct lpfc_register doorbell;
393 393
394 doorbell.word0 = 0; 394 doorbell.word0 = 0;
395 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 395 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
396 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
397 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
398 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
399 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
400 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 396 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
401} 397}
402 398
@@ -3797,6 +3793,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3797 struct hbq_dmabuf *dmabuf; 3793 struct hbq_dmabuf *dmabuf;
3798 struct lpfc_cq_event *cq_event; 3794 struct lpfc_cq_event *cq_event;
3799 unsigned long iflag; 3795 unsigned long iflag;
3796 int count = 0;
3800 3797
3801 spin_lock_irqsave(&phba->hbalock, iflag); 3798 spin_lock_irqsave(&phba->hbalock, iflag);
3802 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3799 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
@@ -3818,16 +3815,22 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3818 if (irspiocbq) 3815 if (irspiocbq)
3819 lpfc_sli_sp_handle_rspiocb(phba, pring, 3816 lpfc_sli_sp_handle_rspiocb(phba, pring,
3820 irspiocbq); 3817 irspiocbq);
3818 count++;
3821 break; 3819 break;
3822 case CQE_CODE_RECEIVE: 3820 case CQE_CODE_RECEIVE:
3823 case CQE_CODE_RECEIVE_V1: 3821 case CQE_CODE_RECEIVE_V1:
3824 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3822 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3825 cq_event); 3823 cq_event);
3826 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3824 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3825 count++;
3827 break; 3826 break;
3828 default: 3827 default:
3829 break; 3828 break;
3830 } 3829 }
3830
3831 /* Limit the number of events to 64 to avoid soft lockups */
3832 if (count == 64)
3833 break;
3831 } 3834 }
3832} 3835}
3833 3836
@@ -6146,6 +6149,271 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6146} 6149}
6147 6150
6148/** 6151/**
6152 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6153 * @phba: Pointer to HBA context object.
6154 *
6155 * This function is called to free memory allocated for RAS FW logging
6156 * support in the driver.
6157 **/
6158void
6159lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6160{
6161 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6162 struct lpfc_dmabuf *dmabuf, *next;
6163
6164 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6165 list_for_each_entry_safe(dmabuf, next,
6166 &ras_fwlog->fwlog_buff_list,
6167 list) {
6168 list_del(&dmabuf->list);
6169 dma_free_coherent(&phba->pcidev->dev,
6170 LPFC_RAS_MAX_ENTRY_SIZE,
6171 dmabuf->virt, dmabuf->phys);
6172 kfree(dmabuf);
6173 }
6174 }
6175
6176 if (ras_fwlog->lwpd.virt) {
6177 dma_free_coherent(&phba->pcidev->dev,
6178 sizeof(uint32_t) * 2,
6179 ras_fwlog->lwpd.virt,
6180 ras_fwlog->lwpd.phys);
6181 ras_fwlog->lwpd.virt = NULL;
6182 }
6183
6184 ras_fwlog->ras_active = false;
6185}
6186
6187/**
6188 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6189 * @phba: Pointer to HBA context object.
6190 * @fwlog_buff_count: Count of buffers to be created.
6191 *
6192 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6193 * to update FW log is posted to the adapter.
6194 * Buffer count is calculated based on module param ras_fwlog_buffsize
6195 * Size of each buffer posted to FW is 64K.
6196 **/
6197
6198static int
6199lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6200 uint32_t fwlog_buff_count)
6201{
6202 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6203 struct lpfc_dmabuf *dmabuf;
6204 int rc = 0, i = 0;
6205
6206 /* Initialize List */
6207 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6208
6209 /* Allocate memory for the LWPD */
6210 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6211 sizeof(uint32_t) * 2,
6212 &ras_fwlog->lwpd.phys,
6213 GFP_KERNEL);
6214 if (!ras_fwlog->lwpd.virt) {
6215 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6216 "6185 LWPD Memory Alloc Failed\n");
6217
6218 return -ENOMEM;
6219 }
6220
6221 ras_fwlog->fw_buffcount = fwlog_buff_count;
6222 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6223 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6224 GFP_KERNEL);
6225 if (!dmabuf) {
6226 rc = -ENOMEM;
6227 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6228 "6186 Memory Alloc failed FW logging");
6229 goto free_mem;
6230 }
6231
6232 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6233 LPFC_RAS_MAX_ENTRY_SIZE,
6234 &dmabuf->phys,
6235 GFP_KERNEL);
6236 if (!dmabuf->virt) {
6237 kfree(dmabuf);
6238 rc = -ENOMEM;
6239 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6240 "6187 DMA Alloc Failed FW logging");
6241 goto free_mem;
6242 }
6243 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6244 dmabuf->buffer_tag = i;
6245 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6246 }
6247
6248free_mem:
6249 if (rc)
6250 lpfc_sli4_ras_dma_free(phba);
6251
6252 return rc;
6253}
6254
6255/**
6256 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6257 * @phba: pointer to lpfc hba data structure.
6258 * @pmboxq: pointer to the driver internal queue element for mailbox command.
6259 *
6260 * Completion handler for driver's RAS MBX command to the device.
6261 **/
6262static void
6263lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6264{
6265 MAILBOX_t *mb;
6266 union lpfc_sli4_cfg_shdr *shdr;
6267 uint32_t shdr_status, shdr_add_status;
6268 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6269
6270 mb = &pmb->u.mb;
6271
6272 shdr = (union lpfc_sli4_cfg_shdr *)
6273 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6274 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6275 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6276
6277 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6278 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
6279 "6188 FW LOG mailbox "
6280 "completed with status x%x add_status x%x,"
6281 " mbx status x%x\n",
6282 shdr_status, shdr_add_status, mb->mbxStatus);
6283 goto disable_ras;
6284 }
6285
6286 ras_fwlog->ras_active = true;
6287 mempool_free(pmb, phba->mbox_mem_pool);
6288
6289 return;
6290
6291disable_ras:
6292 /* Free RAS DMA memory */
6293 lpfc_sli4_ras_dma_free(phba);
6294 mempool_free(pmb, phba->mbox_mem_pool);
6295}
6296
6297/**
6298 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6299 * @phba: pointer to lpfc hba data structure.
6300 * @fwlog_level: Logging verbosity level.
6301 * @fwlog_enable: Enable/Disable logging.
6302 *
6303 * Initialize memory and post mailbox command to enable FW logging in host
6304 * memory.
6305 **/
6306int
6307lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6308 uint32_t fwlog_level,
6309 uint32_t fwlog_enable)
6310{
6311 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6312 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6313 struct lpfc_dmabuf *dmabuf;
6314 LPFC_MBOXQ_t *mbox;
6315 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6316 int rc = 0;
6317
6318 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6319 phba->cfg_ras_fwlog_buffsize);
6320 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6321
6322 /*
6323 * If re-enabling FW logging support use earlier allocated
6324 * DMA buffers while posting MBX command.
6325 **/
6326 if (!ras_fwlog->lwpd.virt) {
6327 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6328 if (rc) {
6329 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6330 "6189 RAS FW Log Support Not Enabled");
6331 return rc;
6332 }
6333 }
6334
6335 /* Setup Mailbox command */
6336 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6337 if (!mbox) {
6338 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6339 "6190 RAS MBX Alloc Failed");
6340 rc = -ENOMEM;
6341 goto mem_free;
6342 }
6343
6344 ras_fwlog->fw_loglevel = fwlog_level;
6345 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6346 sizeof(struct lpfc_sli4_cfg_mhdr));
6347
6348 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6349 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6350 len, LPFC_SLI4_MBX_EMBED);
6351
6352 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6353 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6354 fwlog_enable);
6355 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6356 ras_fwlog->fw_loglevel);
6357 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6358 ras_fwlog->fw_buffcount);
6359 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6360 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6361
6362 /* Update DMA buffer address */
6363 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6364 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6365
6366 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6367 putPaddrLow(dmabuf->phys);
6368
6369 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6370 putPaddrHigh(dmabuf->phys);
6371 }
6372
6373 /* Update LPWD address */
6374 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6375 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6376
6377 mbox->vport = phba->pport;
6378 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6379
6380 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6381
6382 if (rc == MBX_NOT_FINISHED) {
6383 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6384 "6191 RAS Mailbox failed. "
6385 "status %d mbxStatus : x%x", rc,
6386 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6387 mempool_free(mbox, phba->mbox_mem_pool);
6388 rc = -EIO;
6389 goto mem_free;
6390 } else
6391 rc = 0;
6392mem_free:
6393 if (rc)
6394 lpfc_sli4_ras_dma_free(phba);
6395
6396 return rc;
6397}
6398
6399/**
6400 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6401 * @phba: Pointer to HBA context object.
6402 *
6403 * Check if RAS is supported on the adapter and initialize it.
6404 **/
6405void
6406lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6407{
6408 /* Check RAS FW Log needs to be enabled or not */
6409 if (lpfc_check_fwlog_support(phba))
6410 return;
6411
6412 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6413 LPFC_RAS_ENABLE_LOGGING);
6414}
6415
6416/**
6149 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 6417 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6150 * @phba: Pointer to HBA context object. 6418 * @phba: Pointer to HBA context object.
6151 * 6419 *
@@ -10266,8 +10534,12 @@ lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10266 LPFC_MBOXQ_t *pmb; 10534 LPFC_MBOXQ_t *pmb;
10267 unsigned long iflag; 10535 unsigned long iflag;
10268 10536
10537 /* Disable softirqs, including timers from obtaining phba->hbalock */
10538 local_bh_disable();
10539
10269 /* Flush all the mailbox commands in the mbox system */ 10540 /* Flush all the mailbox commands in the mbox system */
10270 spin_lock_irqsave(&phba->hbalock, iflag); 10541 spin_lock_irqsave(&phba->hbalock, iflag);
10542
10271 /* The pending mailbox command queue */ 10543 /* The pending mailbox command queue */
10272 list_splice_init(&phba->sli.mboxq, &completions); 10544 list_splice_init(&phba->sli.mboxq, &completions);
10273 /* The outstanding active mailbox command */ 10545 /* The outstanding active mailbox command */
@@ -10280,6 +10552,9 @@ lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10280 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 10552 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10281 spin_unlock_irqrestore(&phba->hbalock, iflag); 10553 spin_unlock_irqrestore(&phba->hbalock, iflag);
10282 10554
10555 /* Enable softirqs again, done with phba->hbalock */
10556 local_bh_enable();
10557
10283 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 10558 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10284 while (!list_empty(&completions)) { 10559 while (!list_empty(&completions)) {
10285 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 10560 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
@@ -10419,6 +10694,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
10419 10694
10420 lpfc_hba_down_prep(phba); 10695 lpfc_hba_down_prep(phba);
10421 10696
10697 /* Disable softirqs, including timers from obtaining phba->hbalock */
10698 local_bh_disable();
10699
10422 lpfc_fabric_abort_hba(phba); 10700 lpfc_fabric_abort_hba(phba);
10423 10701
10424 spin_lock_irqsave(&phba->hbalock, flags); 10702 spin_lock_irqsave(&phba->hbalock, flags);
@@ -10472,6 +10750,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
10472 kfree(buf_ptr); 10750 kfree(buf_ptr);
10473 } 10751 }
10474 10752
10753 /* Enable softirqs again, done with phba->hbalock */
10754 local_bh_enable();
10755
10475 /* Return any active mbox cmds */ 10756 /* Return any active mbox cmds */
10476 del_timer_sync(&psli->mbox_tmo); 10757 del_timer_sync(&psli->mbox_tmo);
10477 10758
@@ -11775,6 +12056,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
11775 } 12056 }
11776 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 12057 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
11777 12058
12059 /* Disable softirqs, including timers from obtaining phba->hbalock */
12060 local_bh_disable();
12061
11778 spin_lock_irq(&phba->hbalock); 12062 spin_lock_irq(&phba->hbalock);
11779 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 12063 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11780 12064
@@ -11788,6 +12072,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
11788 1000) + jiffies; 12072 1000) + jiffies;
11789 spin_unlock_irq(&phba->hbalock); 12073 spin_unlock_irq(&phba->hbalock);
11790 12074
12075 /* Enable softirqs again, done with phba->hbalock */
12076 local_bh_enable();
12077
11791 while (phba->sli.mbox_active) { 12078 while (phba->sli.mbox_active) {
11792 /* Check active mailbox complete status every 2ms */ 12079 /* Check active mailbox complete status every 2ms */
11793 msleep(2); 12080 msleep(2);
@@ -11797,9 +12084,13 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
11797 */ 12084 */
11798 break; 12085 break;
11799 } 12086 }
11800 } else 12087 } else {
11801 spin_unlock_irq(&phba->hbalock); 12088 spin_unlock_irq(&phba->hbalock);
11802 12089
12090 /* Enable softirqs again, done with phba->hbalock */
12091 local_bh_enable();
12092 }
12093
11803 lpfc_sli_mbox_sys_flush(phba); 12094 lpfc_sli_mbox_sys_flush(phba);
11804} 12095}
11805 12096
@@ -13136,7 +13427,6 @@ static bool
13136lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 13427lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13137{ 13428{
13138 bool workposted = false; 13429 bool workposted = false;
13139 struct fc_frame_header *fc_hdr;
13140 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 13430 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13141 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 13431 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13142 struct lpfc_nvmet_tgtport *tgtp; 13432 struct lpfc_nvmet_tgtport *tgtp;
@@ -13173,9 +13463,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13173 hrq->RQ_buf_posted--; 13463 hrq->RQ_buf_posted--;
13174 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 13464 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13175 13465
13176 /* If a NVME LS event (type 0x28), treat it as Fast path */
13177 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13178
13179 /* save off the frame for the word thread to process */ 13466 /* save off the frame for the word thread to process */
13180 list_add_tail(&dma_buf->cq_event.list, 13467 list_add_tail(&dma_buf->cq_event.list,
13181 &phba->sli4_hba.sp_queue_event); 13468 &phba->sli4_hba.sp_queue_event);
@@ -14558,13 +14845,10 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14558 int rc, length, status = 0; 14845 int rc, length, status = 0;
14559 uint32_t shdr_status, shdr_add_status; 14846 uint32_t shdr_status, shdr_add_status;
14560 union lpfc_sli4_cfg_shdr *shdr; 14847 union lpfc_sli4_cfg_shdr *shdr;
14561 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14562 14848
14563 /* sanity check on queue memory */ 14849 /* sanity check on queue memory */
14564 if (!cq || !eq) 14850 if (!cq || !eq)
14565 return -ENODEV; 14851 return -ENODEV;
14566 if (!phba->sli4_hba.pc_sli4_params.supported)
14567 hw_page_size = cq->page_size;
14568 14852
14569 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14853 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14570 if (!mbox) 14854 if (!mbox)
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 399c0015c546..e76c380e1a84 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -886,3 +886,4 @@ int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
886int lpfc_sli4_post_status_check(struct lpfc_hba *); 886int lpfc_sli4_post_status_check(struct lpfc_hba *);
887uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *); 887uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
888uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *); 888uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
889void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 501249509af4..5a0d512ff497 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
20 * included with this package. * 20 * included with this package. *
21 *******************************************************************/ 21 *******************************************************************/
22 22
23#define LPFC_DRIVER_VERSION "12.0.0.6" 23#define LPFC_DRIVER_VERSION "12.0.0.7"
24#define LPFC_DRIVER_NAME "lpfc" 24#define LPFC_DRIVER_NAME "lpfc"
25 25
26/* Used for SLI 2/3 */ 26/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 1ff0f7de9105..c340e0e47473 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -207,7 +207,7 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
207 struct lpfc_vport *vport; 207 struct lpfc_vport *vport;
208 unsigned long flags; 208 unsigned long flags;
209 209
210 spin_lock_irqsave(&phba->hbalock, flags); 210 spin_lock_irqsave(&phba->port_list_lock, flags);
211 list_for_each_entry(vport, &phba->port_list, listentry) { 211 list_for_each_entry(vport, &phba->port_list, listentry) {
212 if (vport == new_vport) 212 if (vport == new_vport)
213 continue; 213 continue;
@@ -215,11 +215,11 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
215 if (memcmp(&vport->fc_sparam.portName, 215 if (memcmp(&vport->fc_sparam.portName,
216 &new_vport->fc_sparam.portName, 216 &new_vport->fc_sparam.portName,
217 sizeof(struct lpfc_name)) == 0) { 217 sizeof(struct lpfc_name)) == 0) {
218 spin_unlock_irqrestore(&phba->hbalock, flags); 218 spin_unlock_irqrestore(&phba->port_list_lock, flags);
219 return 0; 219 return 0;
220 } 220 }
221 } 221 }
222 spin_unlock_irqrestore(&phba->hbalock, flags); 222 spin_unlock_irqrestore(&phba->port_list_lock, flags);
223 return 1; 223 return 1;
224} 224}
225 225
@@ -825,9 +825,9 @@ skip_logo:
825 825
826 lpfc_free_vpi(phba, vport->vpi); 826 lpfc_free_vpi(phba, vport->vpi);
827 vport->work_port_events = 0; 827 vport->work_port_events = 0;
828 spin_lock_irq(&phba->hbalock); 828 spin_lock_irq(&phba->port_list_lock);
829 list_del_init(&vport->listentry); 829 list_del_init(&vport->listentry);
830 spin_unlock_irq(&phba->hbalock); 830 spin_unlock_irq(&phba->port_list_lock);
831 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 831 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
832 "1828 Vport Deleted.\n"); 832 "1828 Vport Deleted.\n");
833 scsi_host_put(shost); 833 scsi_host_put(shost);
@@ -844,7 +844,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
844 GFP_KERNEL); 844 GFP_KERNEL);
845 if (vports == NULL) 845 if (vports == NULL)
846 return NULL; 846 return NULL;
847 spin_lock_irq(&phba->hbalock); 847 spin_lock_irq(&phba->port_list_lock);
848 list_for_each_entry(port_iterator, &phba->port_list, listentry) { 848 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
849 if (port_iterator->load_flag & FC_UNLOADING) 849 if (port_iterator->load_flag & FC_UNLOADING)
850 continue; 850 continue;
@@ -856,7 +856,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
856 } 856 }
857 vports[index++] = port_iterator; 857 vports[index++] = port_iterator;
858 } 858 }
859 spin_unlock_irq(&phba->hbalock); 859 spin_unlock_irq(&phba->port_list_lock);
860 return vports; 860 return vports;
861} 861}
862 862
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index eb551f3cc471..764d320bb2ca 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -52,14 +52,12 @@ struct mac_esp_priv {
52 struct esp *esp; 52 struct esp *esp;
53 void __iomem *pdma_regs; 53 void __iomem *pdma_regs;
54 void __iomem *pdma_io; 54 void __iomem *pdma_io;
55 int error;
56}; 55};
57static struct esp *esp_chips[2]; 56static struct esp *esp_chips[2];
58static DEFINE_SPINLOCK(esp_chips_lock); 57static DEFINE_SPINLOCK(esp_chips_lock);
59 58
60#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \ 59#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
61 platform_get_drvdata((struct platform_device *) \ 60 dev_get_drvdata((esp)->dev))
62 (esp->dev)))
63 61
64static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg) 62static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg)
65{ 63{
@@ -71,38 +69,6 @@ static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg)
71 return nubus_readb(esp->regs + reg * 16); 69 return nubus_readb(esp->regs + reg * 16);
72} 70}
73 71
74/* For pseudo DMA and PIO we need the virtual address
75 * so this address mapping is the identity mapping.
76 */
77
78static dma_addr_t mac_esp_map_single(struct esp *esp, void *buf,
79 size_t sz, int dir)
80{
81 return (dma_addr_t)buf;
82}
83
84static int mac_esp_map_sg(struct esp *esp, struct scatterlist *sg,
85 int num_sg, int dir)
86{
87 int i;
88
89 for (i = 0; i < num_sg; i++)
90 sg[i].dma_address = (u32)sg_virt(&sg[i]);
91 return num_sg;
92}
93
94static void mac_esp_unmap_single(struct esp *esp, dma_addr_t addr,
95 size_t sz, int dir)
96{
97 /* Nothing to do. */
98}
99
100static void mac_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
101 int num_sg, int dir)
102{
103 /* Nothing to do. */
104}
105
106static void mac_esp_reset_dma(struct esp *esp) 72static void mac_esp_reset_dma(struct esp *esp)
107{ 73{
108 /* Nothing to do. */ 74 /* Nothing to do. */
@@ -120,12 +86,11 @@ static void mac_esp_dma_invalidate(struct esp *esp)
120 86
121static int mac_esp_dma_error(struct esp *esp) 87static int mac_esp_dma_error(struct esp *esp)
122{ 88{
123 return MAC_ESP_GET_PRIV(esp)->error; 89 return esp->send_cmd_error;
124} 90}
125 91
126static inline int mac_esp_wait_for_empty_fifo(struct esp *esp) 92static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
127{ 93{
128 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
129 int i = 500000; 94 int i = 500000;
130 95
131 do { 96 do {
@@ -140,7 +105,7 @@ static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
140 105
141 printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n", 106 printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n",
142 esp_read8(ESP_STATUS)); 107 esp_read8(ESP_STATUS));
143 mep->error = 1; 108 esp->send_cmd_error = 1;
144 return 1; 109 return 1;
145} 110}
146 111
@@ -166,7 +131,7 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
166 131
167 printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n", 132 printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n",
168 esp_read8(ESP_STATUS)); 133 esp_read8(ESP_STATUS));
169 mep->error = 1; 134 esp->send_cmd_error = 1;
170 return 1; 135 return 1;
171} 136}
172 137
@@ -233,7 +198,7 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
233{ 198{
234 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); 199 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
235 200
236 mep->error = 0; 201 esp->send_cmd_error = 0;
237 202
238 if (!write) 203 if (!write)
239 scsi_esp_cmd(esp, ESP_CMD_FLUSH); 204 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
@@ -271,164 +236,6 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
271 } while (esp_count); 236 } while (esp_count);
272} 237}
273 238
274/*
275 * Programmed IO routines follow.
276 */
277
278static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp)
279{
280 int i = 500000;
281
282 do {
283 unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
284
285 if (fbytes)
286 return fbytes;
287
288 udelay(2);
289 } while (--i);
290
291 printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
292 esp_read8(ESP_STATUS));
293 return 0;
294}
295
296static inline int mac_esp_wait_for_intr(struct esp *esp)
297{
298 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
299 int i = 500000;
300
301 do {
302 esp->sreg = esp_read8(ESP_STATUS);
303 if (esp->sreg & ESP_STAT_INTR)
304 return 0;
305
306 udelay(2);
307 } while (--i);
308
309 printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
310 mep->error = 1;
311 return 1;
312}
313
314#define MAC_ESP_PIO_LOOP(operands, reg1) \
315 asm volatile ( \
316 "1: moveb " operands " \n" \
317 " subqw #1,%1 \n" \
318 " jbne 1b \n" \
319 : "+a" (addr), "+r" (reg1) \
320 : "a" (fifo))
321
322#define MAC_ESP_PIO_FILL(operands, reg1) \
323 asm volatile ( \
324 " moveb " operands " \n" \
325 " moveb " operands " \n" \
326 " moveb " operands " \n" \
327 " moveb " operands " \n" \
328 " moveb " operands " \n" \
329 " moveb " operands " \n" \
330 " moveb " operands " \n" \
331 " moveb " operands " \n" \
332 " moveb " operands " \n" \
333 " moveb " operands " \n" \
334 " moveb " operands " \n" \
335 " moveb " operands " \n" \
336 " moveb " operands " \n" \
337 " moveb " operands " \n" \
338 " moveb " operands " \n" \
339 " moveb " operands " \n" \
340 " subqw #8,%1 \n" \
341 " subqw #8,%1 \n" \
342 : "+a" (addr), "+r" (reg1) \
343 : "a" (fifo))
344
345#define MAC_ESP_FIFO_SIZE 16
346
347static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
348 u32 dma_count, int write, u8 cmd)
349{
350 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
351 u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
352 u8 phase = esp->sreg & ESP_STAT_PMASK;
353
354 cmd &= ~ESP_CMD_DMA;
355 mep->error = 0;
356
357 if (write) {
358 u8 *dst = (u8 *)addr;
359 u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
360
361 scsi_esp_cmd(esp, cmd);
362
363 while (1) {
364 if (!mac_esp_wait_for_fifo(esp))
365 break;
366
367 *dst++ = esp_read8(ESP_FDATA);
368 --esp_count;
369
370 if (!esp_count)
371 break;
372
373 if (mac_esp_wait_for_intr(esp))
374 break;
375
376 if ((esp->sreg & ESP_STAT_PMASK) != phase)
377 break;
378
379 esp->ireg = esp_read8(ESP_INTRPT);
380 if (esp->ireg & mask) {
381 mep->error = 1;
382 break;
383 }
384
385 if (phase == ESP_MIP)
386 scsi_esp_cmd(esp, ESP_CMD_MOK);
387
388 scsi_esp_cmd(esp, ESP_CMD_TI);
389 }
390 } else {
391 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
392
393 if (esp_count >= MAC_ESP_FIFO_SIZE)
394 MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
395 else
396 MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
397
398 scsi_esp_cmd(esp, cmd);
399
400 while (esp_count) {
401 unsigned int n;
402
403 if (mac_esp_wait_for_intr(esp))
404 break;
405
406 if ((esp->sreg & ESP_STAT_PMASK) != phase)
407 break;
408
409 esp->ireg = esp_read8(ESP_INTRPT);
410 if (esp->ireg & ~ESP_INTR_BSERV) {
411 mep->error = 1;
412 break;
413 }
414
415 n = MAC_ESP_FIFO_SIZE -
416 (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
417 if (n > esp_count)
418 n = esp_count;
419
420 if (n == MAC_ESP_FIFO_SIZE) {
421 MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
422 } else {
423 esp_count -= n;
424 MAC_ESP_PIO_LOOP("%0@+,%2@", n);
425 }
426
427 scsi_esp_cmd(esp, ESP_CMD_TI);
428 }
429 }
430}
431
432static int mac_esp_irq_pending(struct esp *esp) 239static int mac_esp_irq_pending(struct esp *esp)
433{ 240{
434 if (esp_read8(ESP_STATUS) & ESP_STAT_INTR) 241 if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
@@ -470,10 +277,6 @@ static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id)
470static struct esp_driver_ops mac_esp_ops = { 277static struct esp_driver_ops mac_esp_ops = {
471 .esp_write8 = mac_esp_write8, 278 .esp_write8 = mac_esp_write8,
472 .esp_read8 = mac_esp_read8, 279 .esp_read8 = mac_esp_read8,
473 .map_single = mac_esp_map_single,
474 .map_sg = mac_esp_map_sg,
475 .unmap_single = mac_esp_unmap_single,
476 .unmap_sg = mac_esp_unmap_sg,
477 .irq_pending = mac_esp_irq_pending, 280 .irq_pending = mac_esp_irq_pending,
478 .dma_length_limit = mac_esp_dma_length_limit, 281 .dma_length_limit = mac_esp_dma_length_limit,
479 .reset_dma = mac_esp_reset_dma, 282 .reset_dma = mac_esp_reset_dma,
@@ -508,7 +311,7 @@ static int esp_mac_probe(struct platform_device *dev)
508 esp = shost_priv(host); 311 esp = shost_priv(host);
509 312
510 esp->host = host; 313 esp->host = host;
511 esp->dev = dev; 314 esp->dev = &dev->dev;
512 315
513 esp->command_block = kzalloc(16, GFP_KERNEL); 316 esp->command_block = kzalloc(16, GFP_KERNEL);
514 if (!esp->command_block) 317 if (!esp->command_block)
@@ -551,14 +354,16 @@ static int esp_mac_probe(struct platform_device *dev)
551 mep->pdma_regs = NULL; 354 mep->pdma_regs = NULL;
552 break; 355 break;
553 } 356 }
357 esp->fifo_reg = esp->regs + ESP_FDATA * 16;
554 358
555 esp->ops = &mac_esp_ops; 359 esp->ops = &mac_esp_ops;
360 esp->flags = ESP_FLAG_NO_DMA_MAP;
556 if (mep->pdma_io == NULL) { 361 if (mep->pdma_io == NULL) {
557 printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id); 362 printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id);
558 esp_write8(0, ESP_TCLOW); 363 esp_write8(0, ESP_TCLOW);
559 esp_write8(0, ESP_TCMED); 364 esp_write8(0, ESP_TCMED);
560 esp->flags = ESP_FLAG_DISABLE_SYNC; 365 esp->flags |= ESP_FLAG_DISABLE_SYNC;
561 mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd; 366 mac_esp_ops.send_dma_cmd = esp_send_pio_cmd;
562 } else { 367 } else {
563 printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id); 368 printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id);
564 } 369 }
@@ -577,7 +382,7 @@ static int esp_mac_probe(struct platform_device *dev)
577 esp_chips[dev->id] = esp; 382 esp_chips[dev->id] = esp;
578 spin_unlock(&esp_chips_lock); 383 spin_unlock(&esp_chips_lock);
579 384
580 err = scsi_esp_register(esp, &dev->dev); 385 err = scsi_esp_register(esp);
581 if (err) 386 if (err)
582 goto fail_free_irq; 387 goto fail_free_irq;
583 388
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 530358cdcb39..3b7abe5ca7f5 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -202,13 +202,6 @@ module_param_named(debug_level, mraid_debug_level, int, 0);
202MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)"); 202MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
203 203
204/* 204/*
205 * ### global data ###
206 */
207static uint8_t megaraid_mbox_version[8] =
208 { 0x02, 0x20, 0x04, 0x06, 3, 7, 20, 5 };
209
210
211/*
212 * PCI table for all supported controllers. 205 * PCI table for all supported controllers.
213 */ 206 */
214static struct pci_device_id pci_id_table_g[] = { 207static struct pci_device_id pci_id_table_g[] = {
@@ -457,10 +450,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
457 450
458 // Setup the default DMA mask. This would be changed later on 451 // Setup the default DMA mask. This would be changed later on
459 // depending on hardware capabilities 452 // depending on hardware capabilities
460 if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(32)) != 0) { 453 if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(32))) {
461
462 con_log(CL_ANN, (KERN_WARNING 454 con_log(CL_ANN, (KERN_WARNING
463 "megaraid: pci_set_dma_mask failed:%d\n", __LINE__)); 455 "megaraid: dma_set_mask failed:%d\n", __LINE__));
464 456
465 goto out_free_adapter; 457 goto out_free_adapter;
466 } 458 }
@@ -484,7 +476,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
484 // Start the mailbox based controller 476 // Start the mailbox based controller
485 if (megaraid_init_mbox(adapter) != 0) { 477 if (megaraid_init_mbox(adapter) != 0) {
486 con_log(CL_ANN, (KERN_WARNING 478 con_log(CL_ANN, (KERN_WARNING
487 "megaraid: maibox adapter did not initialize\n")); 479 "megaraid: mailbox adapter did not initialize\n"));
488 480
489 goto out_free_adapter; 481 goto out_free_adapter;
490 } 482 }
@@ -878,11 +870,12 @@ megaraid_init_mbox(adapter_t *adapter)
878 adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) || 870 adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
879 (adapter->pdev->vendor == PCI_VENDOR_ID_DELL && 871 (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
880 adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) { 872 adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
881 if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) { 873 if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(64))) {
882 con_log(CL_ANN, (KERN_WARNING 874 con_log(CL_ANN, (KERN_WARNING
883 "megaraid: DMA mask for 64-bit failed\n")); 875 "megaraid: DMA mask for 64-bit failed\n"));
884 876
885 if (pci_set_dma_mask (adapter->pdev, DMA_BIT_MASK(32))) { 877 if (dma_set_mask(&adapter->pdev->dev,
878 DMA_BIT_MASK(32))) {
886 con_log(CL_ANN, (KERN_WARNING 879 con_log(CL_ANN, (KERN_WARNING
887 "megaraid: 32-bit DMA mask failed\n")); 880 "megaraid: 32-bit DMA mask failed\n"));
888 goto out_free_sysfs_res; 881 goto out_free_sysfs_res;
@@ -950,7 +943,7 @@ megaraid_fini_mbox(adapter_t *adapter)
950 * megaraid_alloc_cmd_packets - allocate shared mailbox 943 * megaraid_alloc_cmd_packets - allocate shared mailbox
951 * @adapter : soft state of the raid controller 944 * @adapter : soft state of the raid controller
952 * 945 *
953 * Allocate and align the shared mailbox. This maibox is used to issue 946 * Allocate and align the shared mailbox. This mailbox is used to issue
954 * all the commands. For IO based controllers, the mailbox is also registered 947 * all the commands. For IO based controllers, the mailbox is also registered
955 * with the FW. Allocate memory for all commands as well. 948 * with the FW. Allocate memory for all commands as well.
956 * This is our big allocator. 949 * This is our big allocator.
@@ -975,9 +968,9 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
975 * Allocate the common 16-byte aligned memory for the handshake 968 * Allocate the common 16-byte aligned memory for the handshake
976 * mailbox. 969 * mailbox.
977 */ 970 */
978 raid_dev->una_mbox64 = pci_zalloc_consistent(adapter->pdev, 971 raid_dev->una_mbox64 = dma_zalloc_coherent(&adapter->pdev->dev,
979 sizeof(mbox64_t), 972 sizeof(mbox64_t), &raid_dev->una_mbox64_dma,
980 &raid_dev->una_mbox64_dma); 973 GFP_KERNEL);
981 974
982 if (!raid_dev->una_mbox64) { 975 if (!raid_dev->una_mbox64) {
983 con_log(CL_ANN, (KERN_WARNING 976 con_log(CL_ANN, (KERN_WARNING
@@ -1003,8 +996,8 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
1003 align; 996 align;
1004 997
1005 // Allocate memory for commands issued internally 998 // Allocate memory for commands issued internally
1006 adapter->ibuf = pci_zalloc_consistent(pdev, MBOX_IBUF_SIZE, 999 adapter->ibuf = dma_zalloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
1007 &adapter->ibuf_dma_h); 1000 &adapter->ibuf_dma_h, GFP_KERNEL);
1008 if (!adapter->ibuf) { 1001 if (!adapter->ibuf) {
1009 1002
1010 con_log(CL_ANN, (KERN_WARNING 1003 con_log(CL_ANN, (KERN_WARNING
@@ -1082,7 +1075,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
1082 1075
1083 scb->scp = NULL; 1076 scb->scp = NULL;
1084 scb->state = SCB_FREE; 1077 scb->state = SCB_FREE;
1085 scb->dma_direction = PCI_DMA_NONE; 1078 scb->dma_direction = DMA_NONE;
1086 scb->dma_type = MRAID_DMA_NONE; 1079 scb->dma_type = MRAID_DMA_NONE;
1087 scb->dev_channel = -1; 1080 scb->dev_channel = -1;
1088 scb->dev_target = -1; 1081 scb->dev_target = -1;
@@ -1098,10 +1091,10 @@ out_teardown_dma_pools:
1098out_free_scb_list: 1091out_free_scb_list:
1099 kfree(adapter->kscb_list); 1092 kfree(adapter->kscb_list);
1100out_free_ibuf: 1093out_free_ibuf:
1101 pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf, 1094 dma_free_coherent(&pdev->dev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
1102 adapter->ibuf_dma_h); 1095 adapter->ibuf_dma_h);
1103out_free_common_mbox: 1096out_free_common_mbox:
1104 pci_free_consistent(adapter->pdev, sizeof(mbox64_t), 1097 dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t),
1105 (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma); 1098 (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
1106 1099
1107 return -1; 1100 return -1;
@@ -1123,10 +1116,10 @@ megaraid_free_cmd_packets(adapter_t *adapter)
1123 1116
1124 kfree(adapter->kscb_list); 1117 kfree(adapter->kscb_list);
1125 1118
1126 pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE, 1119 dma_free_coherent(&adapter->pdev->dev, MBOX_IBUF_SIZE,
1127 (void *)adapter->ibuf, adapter->ibuf_dma_h); 1120 (void *)adapter->ibuf, adapter->ibuf_dma_h);
1128 1121
1129 pci_free_consistent(adapter->pdev, sizeof(mbox64_t), 1122 dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t),
1130 (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma); 1123 (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
1131 return; 1124 return;
1132} 1125}
@@ -1428,12 +1421,6 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
1428 1421
1429 adapter->outstanding_cmds++; 1422 adapter->outstanding_cmds++;
1430 1423
1431 if (scb->dma_direction == PCI_DMA_TODEVICE)
1432 pci_dma_sync_sg_for_device(adapter->pdev,
1433 scsi_sglist(scb->scp),
1434 scsi_sg_count(scb->scp),
1435 PCI_DMA_TODEVICE);
1436
1437 mbox->busy = 1; // Set busy 1424 mbox->busy = 1; // Set busy
1438 mbox->poll = 0; 1425 mbox->poll = 0;
1439 mbox->ack = 0; 1426 mbox->ack = 0;
@@ -2181,31 +2168,6 @@ megaraid_isr(int irq, void *devp)
2181 2168
2182 2169
2183/** 2170/**
2184 * megaraid_mbox_sync_scb - sync kernel buffers
2185 * @adapter : controller's soft state
2186 * @scb : pointer to the resource packet
2187 *
2188 * DMA sync if required.
2189 */
2190static void
2191megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
2192{
2193 mbox_ccb_t *ccb;
2194
2195 ccb = (mbox_ccb_t *)scb->ccb;
2196
2197 if (scb->dma_direction == PCI_DMA_FROMDEVICE)
2198 pci_dma_sync_sg_for_cpu(adapter->pdev,
2199 scsi_sglist(scb->scp),
2200 scsi_sg_count(scb->scp),
2201 PCI_DMA_FROMDEVICE);
2202
2203 scsi_dma_unmap(scb->scp);
2204 return;
2205}
2206
2207
2208/**
2209 * megaraid_mbox_dpc - the tasklet to complete the commands from completed list 2171 * megaraid_mbox_dpc - the tasklet to complete the commands from completed list
2210 * @devp : pointer to HBA soft state 2172 * @devp : pointer to HBA soft state
2211 * 2173 *
@@ -2403,9 +2365,7 @@ megaraid_mbox_dpc(unsigned long devp)
2403 megaraid_mbox_display_scb(adapter, scb); 2365 megaraid_mbox_display_scb(adapter, scb);
2404 } 2366 }
2405 2367
2406 // Free our internal resources and call the mid-layer callback 2368 scsi_dma_unmap(scp);
2407 // routine
2408 megaraid_mbox_sync_scb(adapter, scb);
2409 2369
2410 // remove from local clist 2370 // remove from local clist
2411 list_del_init(&scb->list); 2371 list_del_init(&scb->list);
@@ -2577,7 +2537,6 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2577 uint8_t raw_mbox[sizeof(mbox_t)]; 2537 uint8_t raw_mbox[sizeof(mbox_t)];
2578 int rval; 2538 int rval;
2579 int recovery_window; 2539 int recovery_window;
2580 int recovering;
2581 int i; 2540 int i;
2582 uioc_t *kioc; 2541 uioc_t *kioc;
2583 2542
@@ -2591,7 +2550,6 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2591 return FAILED; 2550 return FAILED;
2592 } 2551 }
2593 2552
2594
2595 // Under exceptional conditions, FW can take up to 3 minutes to 2553 // Under exceptional conditions, FW can take up to 3 minutes to
2596 // complete command processing. Wait for additional 2 minutes for the 2554 // complete command processing. Wait for additional 2 minutes for the
2597 // pending commands counter to go down to 0. If it doesn't, let the 2555 // pending commands counter to go down to 0. If it doesn't, let the
@@ -2640,8 +2598,6 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2640 2598
2641 recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; 2599 recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
2642 2600
2643 recovering = adapter->outstanding_cmds;
2644
2645 for (i = 0; i < recovery_window; i++) { 2601 for (i = 0; i < recovery_window; i++) {
2646 2602
2647 megaraid_ack_sequence(adapter); 2603 megaraid_ack_sequence(adapter);
@@ -2725,13 +2681,10 @@ static int
2725mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[]) 2681mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
2726{ 2682{
2727 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 2683 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
2728 mbox64_t *mbox64;
2729 mbox_t *mbox; 2684 mbox_t *mbox;
2730 uint8_t status; 2685 uint8_t status;
2731 int i; 2686 int i;
2732 2687
2733
2734 mbox64 = raid_dev->mbox64;
2735 mbox = raid_dev->mbox; 2688 mbox = raid_dev->mbox;
2736 2689
2737 /* 2690 /*
@@ -2948,9 +2901,8 @@ megaraid_mbox_product_info(adapter_t *adapter)
2948 * Issue an ENQUIRY3 command to find out certain adapter parameters, 2901 * Issue an ENQUIRY3 command to find out certain adapter parameters,
2949 * e.g., max channels, max commands etc. 2902 * e.g., max channels, max commands etc.
2950 */ 2903 */
2951 pinfo = pci_zalloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t), 2904 pinfo = dma_zalloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
2952 &pinfo_dma_h); 2905 &pinfo_dma_h, GFP_KERNEL);
2953
2954 if (pinfo == NULL) { 2906 if (pinfo == NULL) {
2955 con_log(CL_ANN, (KERN_WARNING 2907 con_log(CL_ANN, (KERN_WARNING
2956 "megaraid: out of memory, %s %d\n", __func__, 2908 "megaraid: out of memory, %s %d\n", __func__,
@@ -2971,7 +2923,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
2971 2923
2972 con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n")); 2924 con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n"));
2973 2925
2974 pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), 2926 dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
2975 pinfo, pinfo_dma_h); 2927 pinfo, pinfo_dma_h);
2976 2928
2977 return -1; 2929 return -1;
@@ -3002,7 +2954,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
3002 con_log(CL_ANN, (KERN_WARNING 2954 con_log(CL_ANN, (KERN_WARNING
3003 "megaraid: product info failed\n")); 2955 "megaraid: product info failed\n"));
3004 2956
3005 pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), 2957 dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
3006 pinfo, pinfo_dma_h); 2958 pinfo, pinfo_dma_h);
3007 2959
3008 return -1; 2960 return -1;
@@ -3038,7 +2990,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
3038 "megaraid: fw version:[%s] bios version:[%s]\n", 2990 "megaraid: fw version:[%s] bios version:[%s]\n",
3039 adapter->fw_version, adapter->bios_version)); 2991 adapter->fw_version, adapter->bios_version));
3040 2992
3041 pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo, 2993 dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), pinfo,
3042 pinfo_dma_h); 2994 pinfo_dma_h);
3043 2995
3044 return 0; 2996 return 0;
@@ -3135,7 +3087,6 @@ megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id)
3135static int 3087static int
3136megaraid_mbox_support_random_del(adapter_t *adapter) 3088megaraid_mbox_support_random_del(adapter_t *adapter)
3137{ 3089{
3138 mbox_t *mbox;
3139 uint8_t raw_mbox[sizeof(mbox_t)]; 3090 uint8_t raw_mbox[sizeof(mbox_t)];
3140 int rval; 3091 int rval;
3141 3092
@@ -3157,8 +3108,6 @@ megaraid_mbox_support_random_del(adapter_t *adapter)
3157 return 0; 3108 return 0;
3158 } 3109 }
3159 3110
3160 mbox = (mbox_t *)raw_mbox;
3161
3162 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); 3111 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3163 3112
3164 raw_mbox[0] = FC_DEL_LOGDRV; 3113 raw_mbox[0] = FC_DEL_LOGDRV;
@@ -3263,12 +3212,8 @@ megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
3263static void 3212static void
3264megaraid_mbox_flush_cache(adapter_t *adapter) 3213megaraid_mbox_flush_cache(adapter_t *adapter)
3265{ 3214{
3266 mbox_t *mbox;
3267 uint8_t raw_mbox[sizeof(mbox_t)]; 3215 uint8_t raw_mbox[sizeof(mbox_t)];
3268 3216
3269
3270 mbox = (mbox_t *)raw_mbox;
3271
3272 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); 3217 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3273 3218
3274 raw_mbox[0] = FLUSH_ADAPTER; 3219 raw_mbox[0] = FLUSH_ADAPTER;
@@ -3299,7 +3244,6 @@ megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
3299 mbox_t *mbox; 3244 mbox_t *mbox;
3300 uint8_t raw_mbox[sizeof(mbox_t)]; 3245 uint8_t raw_mbox[sizeof(mbox_t)];
3301 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 3246 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3302 mbox64_t *mbox64;
3303 int status = 0; 3247 int status = 0;
3304 int i; 3248 int i;
3305 uint32_t dword; 3249 uint32_t dword;
@@ -3310,7 +3254,6 @@ megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
3310 3254
3311 raw_mbox[0] = 0xFF; 3255 raw_mbox[0] = 0xFF;
3312 3256
3313 mbox64 = raid_dev->mbox64;
3314 mbox = raid_dev->mbox; 3257 mbox = raid_dev->mbox;
3315 3258
3316 /* Wait until mailbox is free */ 3259 /* Wait until mailbox is free */
@@ -3515,7 +3458,7 @@ megaraid_cmm_register(adapter_t *adapter)
3515 3458
3516 scb->scp = NULL; 3459 scb->scp = NULL;
3517 scb->state = SCB_FREE; 3460 scb->state = SCB_FREE;
3518 scb->dma_direction = PCI_DMA_NONE; 3461 scb->dma_direction = DMA_NONE;
3519 scb->dma_type = MRAID_DMA_NONE; 3462 scb->dma_type = MRAID_DMA_NONE;
3520 scb->dev_channel = -1; 3463 scb->dev_channel = -1;
3521 scb->dev_target = -1; 3464 scb->dev_target = -1;
@@ -3653,7 +3596,7 @@ megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc)
3653 3596
3654 scb->state = SCB_ACTIVE; 3597 scb->state = SCB_ACTIVE;
3655 scb->dma_type = MRAID_DMA_NONE; 3598 scb->dma_type = MRAID_DMA_NONE;
3656 scb->dma_direction = PCI_DMA_NONE; 3599 scb->dma_direction = DMA_NONE;
3657 3600
3658 ccb = (mbox_ccb_t *)scb->ccb; 3601 ccb = (mbox_ccb_t *)scb->ccb;
3659 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; 3602 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
@@ -3794,10 +3737,6 @@ megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
3794static int 3737static int
3795gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo) 3738gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
3796{ 3739{
3797 uint8_t dmajor;
3798
3799 dmajor = megaraid_mbox_version[0];
3800
3801 hinfo->pci_vendor_id = adapter->pdev->vendor; 3740 hinfo->pci_vendor_id = adapter->pdev->vendor;
3802 hinfo->pci_device_id = adapter->pdev->device; 3741 hinfo->pci_device_id = adapter->pdev->device;
3803 hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor; 3742 hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor;
@@ -3843,8 +3782,8 @@ megaraid_sysfs_alloc_resources(adapter_t *adapter)
3843 3782
3844 raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL); 3783 raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL);
3845 3784
3846 raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev, 3785 raid_dev->sysfs_buffer = dma_alloc_coherent(&adapter->pdev->dev,
3847 PAGE_SIZE, &raid_dev->sysfs_buffer_dma); 3786 PAGE_SIZE, &raid_dev->sysfs_buffer_dma, GFP_KERNEL);
3848 3787
3849 if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 || 3788 if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 ||
3850 !raid_dev->sysfs_buffer) { 3789 !raid_dev->sysfs_buffer) {
@@ -3881,7 +3820,7 @@ megaraid_sysfs_free_resources(adapter_t *adapter)
3881 kfree(raid_dev->sysfs_mbox64); 3820 kfree(raid_dev->sysfs_mbox64);
3882 3821
3883 if (raid_dev->sysfs_buffer) { 3822 if (raid_dev->sysfs_buffer) {
3884 pci_free_consistent(adapter->pdev, PAGE_SIZE, 3823 dma_free_coherent(&adapter->pdev->dev, PAGE_SIZE,
3885 raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma); 3824 raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma);
3886 } 3825 }
3887} 3826}
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index c1d86d961a92..e075aeb4012f 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -117,7 +117,7 @@
117 * @raw_mbox : raw mailbox pointer 117 * @raw_mbox : raw mailbox pointer
118 * @mbox : mailbox 118 * @mbox : mailbox
119 * @mbox64 : extended mailbox 119 * @mbox64 : extended mailbox
120 * @mbox_dma_h : maibox dma address 120 * @mbox_dma_h : mailbox dma address
121 * @sgl64 : 64-bit scatter-gather list 121 * @sgl64 : 64-bit scatter-gather list
122 * @sgl32 : 32-bit scatter-gather list 122 * @sgl32 : 32-bit scatter-gather list
123 * @sgl_dma_h : dma handle for the scatter-gather list 123 * @sgl_dma_h : dma handle for the scatter-gather list
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 9aa9590c5373..9b90c716f06d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1330,11 +1330,11 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1330 device_id = MEGASAS_DEV_INDEX(scp); 1330 device_id = MEGASAS_DEV_INDEX(scp);
1331 pthru = (struct megasas_pthru_frame *)cmd->frame; 1331 pthru = (struct megasas_pthru_frame *)cmd->frame;
1332 1332
1333 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 1333 if (scp->sc_data_direction == DMA_TO_DEVICE)
1334 flags = MFI_FRAME_DIR_WRITE; 1334 flags = MFI_FRAME_DIR_WRITE;
1335 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1335 else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1336 flags = MFI_FRAME_DIR_READ; 1336 flags = MFI_FRAME_DIR_READ;
1337 else if (scp->sc_data_direction == PCI_DMA_NONE) 1337 else if (scp->sc_data_direction == DMA_NONE)
1338 flags = MFI_FRAME_DIR_NONE; 1338 flags = MFI_FRAME_DIR_NONE;
1339 1339
1340 if (instance->flag_ieee == 1) { 1340 if (instance->flag_ieee == 1) {
@@ -1428,9 +1428,9 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1428 device_id = MEGASAS_DEV_INDEX(scp); 1428 device_id = MEGASAS_DEV_INDEX(scp);
1429 ldio = (struct megasas_io_frame *)cmd->frame; 1429 ldio = (struct megasas_io_frame *)cmd->frame;
1430 1430
1431 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 1431 if (scp->sc_data_direction == DMA_TO_DEVICE)
1432 flags = MFI_FRAME_DIR_WRITE; 1432 flags = MFI_FRAME_DIR_WRITE;
1433 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1433 else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1434 flags = MFI_FRAME_DIR_READ; 1434 flags = MFI_FRAME_DIR_READ;
1435 1435
1436 if (instance->flag_ieee == 1) { 1436 if (instance->flag_ieee == 1) {
@@ -2240,9 +2240,9 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2240 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2240 sizeof(struct MR_LD_VF_AFFILIATION_111));
2241 else { 2241 else {
2242 new_affiliation_111 = 2242 new_affiliation_111 =
2243 pci_zalloc_consistent(instance->pdev, 2243 dma_zalloc_coherent(&instance->pdev->dev,
2244 sizeof(struct MR_LD_VF_AFFILIATION_111), 2244 sizeof(struct MR_LD_VF_AFFILIATION_111),
2245 &new_affiliation_111_h); 2245 &new_affiliation_111_h, GFP_KERNEL);
2246 if (!new_affiliation_111) { 2246 if (!new_affiliation_111) {
2247 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2247 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2248 "memory for new affiliation for scsi%d\n", 2248 "memory for new affiliation for scsi%d\n",
@@ -2302,7 +2302,7 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2302 } 2302 }
2303out: 2303out:
2304 if (new_affiliation_111) { 2304 if (new_affiliation_111) {
2305 pci_free_consistent(instance->pdev, 2305 dma_free_coherent(&instance->pdev->dev,
2306 sizeof(struct MR_LD_VF_AFFILIATION_111), 2306 sizeof(struct MR_LD_VF_AFFILIATION_111),
2307 new_affiliation_111, 2307 new_affiliation_111,
2308 new_affiliation_111_h); 2308 new_affiliation_111_h);
@@ -2347,10 +2347,10 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2347 sizeof(struct MR_LD_VF_AFFILIATION)); 2347 sizeof(struct MR_LD_VF_AFFILIATION));
2348 else { 2348 else {
2349 new_affiliation = 2349 new_affiliation =
2350 pci_zalloc_consistent(instance->pdev, 2350 dma_zalloc_coherent(&instance->pdev->dev,
2351 (MAX_LOGICAL_DRIVES + 1) * 2351 (MAX_LOGICAL_DRIVES + 1) *
2352 sizeof(struct MR_LD_VF_AFFILIATION), 2352 sizeof(struct MR_LD_VF_AFFILIATION),
2353 &new_affiliation_h); 2353 &new_affiliation_h, GFP_KERNEL);
2354 if (!new_affiliation) { 2354 if (!new_affiliation) {
2355 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2355 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2356 "memory for new affiliation for scsi%d\n", 2356 "memory for new affiliation for scsi%d\n",
@@ -2470,7 +2470,7 @@ out:
2470 } 2470 }
2471 2471
2472 if (new_affiliation) 2472 if (new_affiliation)
2473 pci_free_consistent(instance->pdev, 2473 dma_free_coherent(&instance->pdev->dev,
2474 (MAX_LOGICAL_DRIVES + 1) * 2474 (MAX_LOGICAL_DRIVES + 1) *
2475 sizeof(struct MR_LD_VF_AFFILIATION), 2475 sizeof(struct MR_LD_VF_AFFILIATION),
2476 new_affiliation, new_affiliation_h); 2476 new_affiliation, new_affiliation_h);
@@ -2513,9 +2513,9 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2513 2513
2514 if (initial) { 2514 if (initial) {
2515 instance->hb_host_mem = 2515 instance->hb_host_mem =
2516 pci_zalloc_consistent(instance->pdev, 2516 dma_zalloc_coherent(&instance->pdev->dev,
2517 sizeof(struct MR_CTRL_HB_HOST_MEM), 2517 sizeof(struct MR_CTRL_HB_HOST_MEM),
2518 &instance->hb_host_mem_h); 2518 &instance->hb_host_mem_h, GFP_KERNEL);
2519 if (!instance->hb_host_mem) { 2519 if (!instance->hb_host_mem) {
2520 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2520 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2521 " memory for heartbeat host memory for scsi%d\n", 2521 " memory for heartbeat host memory for scsi%d\n",
@@ -4995,9 +4995,8 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
4995 context_sz = sizeof(u32); 4995 context_sz = sizeof(u32);
4996 reply_q_sz = context_sz * (instance->max_fw_cmds + 1); 4996 reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
4997 4997
4998 instance->reply_queue = pci_alloc_consistent(instance->pdev, 4998 instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
4999 reply_q_sz, 4999 reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
5000 &instance->reply_queue_h);
5001 5000
5002 if (!instance->reply_queue) { 5001 if (!instance->reply_queue) {
5003 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n"); 5002 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
@@ -5029,7 +5028,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
5029 5028
5030fail_fw_init: 5029fail_fw_init:
5031 5030
5032 pci_free_consistent(instance->pdev, reply_q_sz, 5031 dma_free_coherent(&instance->pdev->dev, reply_q_sz,
5033 instance->reply_queue, instance->reply_queue_h); 5032 instance->reply_queue, instance->reply_queue_h);
5034fail_reply_queue: 5033fail_reply_queue:
5035 megasas_free_cmds(instance); 5034 megasas_free_cmds(instance);
@@ -5533,7 +5532,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
5533 5532
5534 else { 5533 else {
5535 if (instance->crash_dump_buf) 5534 if (instance->crash_dump_buf)
5536 pci_free_consistent(instance->pdev, 5535 dma_free_coherent(&instance->pdev->dev,
5537 CRASH_DMA_BUF_SIZE, 5536 CRASH_DMA_BUF_SIZE,
5538 instance->crash_dump_buf, 5537 instance->crash_dump_buf,
5539 instance->crash_dump_h); 5538 instance->crash_dump_h);
@@ -5616,7 +5615,7 @@ static void megasas_release_mfi(struct megasas_instance *instance)
5616 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); 5615 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
5617 5616
5618 if (instance->reply_queue) 5617 if (instance->reply_queue)
5619 pci_free_consistent(instance->pdev, reply_q_sz, 5618 dma_free_coherent(&instance->pdev->dev, reply_q_sz,
5620 instance->reply_queue, instance->reply_queue_h); 5619 instance->reply_queue, instance->reply_queue_h);
5621 5620
5622 megasas_free_cmds(instance); 5621 megasas_free_cmds(instance);
@@ -5655,10 +5654,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
5655 } 5654 }
5656 5655
5657 dcmd = &cmd->frame->dcmd; 5656 dcmd = &cmd->frame->dcmd;
5658 el_info = pci_zalloc_consistent(instance->pdev, 5657 el_info = dma_zalloc_coherent(&instance->pdev->dev,
5659 sizeof(struct megasas_evt_log_info), 5658 sizeof(struct megasas_evt_log_info), &el_info_h,
5660 &el_info_h); 5659 GFP_KERNEL);
5661
5662 if (!el_info) { 5660 if (!el_info) {
5663 megasas_return_cmd(instance, cmd); 5661 megasas_return_cmd(instance, cmd);
5664 return -ENOMEM; 5662 return -ENOMEM;
@@ -5695,8 +5693,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
5695 eli->boot_seq_num = el_info->boot_seq_num; 5693 eli->boot_seq_num = el_info->boot_seq_num;
5696 5694
5697dcmd_failed: 5695dcmd_failed:
5698 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), 5696 dma_free_coherent(&instance->pdev->dev,
5699 el_info, el_info_h); 5697 sizeof(struct megasas_evt_log_info),
5698 el_info, el_info_h);
5700 5699
5701 megasas_return_cmd(instance, cmd); 5700 megasas_return_cmd(instance, cmd);
5702 5701
@@ -6134,10 +6133,10 @@ static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6134 6133
6135static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance) 6134static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
6136{ 6135{
6137 instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32), 6136 instance->producer = dma_alloc_coherent(&instance->pdev->dev,
6138 &instance->producer_h); 6137 sizeof(u32), &instance->producer_h, GFP_KERNEL);
6139 instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32), 6138 instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
6140 &instance->consumer_h); 6139 sizeof(u32), &instance->consumer_h, GFP_KERNEL);
6141 6140
6142 if (!instance->producer || !instance->consumer) { 6141 if (!instance->producer || !instance->consumer) {
6143 dev_err(&instance->pdev->dev, 6142 dev_err(&instance->pdev->dev,
@@ -6199,11 +6198,11 @@ static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
6199 kfree(instance->reply_map); 6198 kfree(instance->reply_map);
6200 if (instance->adapter_type == MFI_SERIES) { 6199 if (instance->adapter_type == MFI_SERIES) {
6201 if (instance->producer) 6200 if (instance->producer)
6202 pci_free_consistent(instance->pdev, sizeof(u32), 6201 dma_free_coherent(&instance->pdev->dev, sizeof(u32),
6203 instance->producer, 6202 instance->producer,
6204 instance->producer_h); 6203 instance->producer_h);
6205 if (instance->consumer) 6204 if (instance->consumer)
6206 pci_free_consistent(instance->pdev, sizeof(u32), 6205 dma_free_coherent(&instance->pdev->dev, sizeof(u32),
6207 instance->consumer, 6206 instance->consumer,
6208 instance->consumer_h); 6207 instance->consumer_h);
6209 } else { 6208 } else {
@@ -6224,10 +6223,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
6224 struct pci_dev *pdev = instance->pdev; 6223 struct pci_dev *pdev = instance->pdev;
6225 struct fusion_context *fusion = instance->ctrl_context; 6224 struct fusion_context *fusion = instance->ctrl_context;
6226 6225
6227 instance->evt_detail = 6226 instance->evt_detail = dma_alloc_coherent(&pdev->dev,
6228 pci_alloc_consistent(pdev, 6227 sizeof(struct megasas_evt_detail),
6229 sizeof(struct megasas_evt_detail), 6228 &instance->evt_detail_h, GFP_KERNEL);
6230 &instance->evt_detail_h);
6231 6229
6232 if (!instance->evt_detail) { 6230 if (!instance->evt_detail) {
6233 dev_err(&instance->pdev->dev, 6231 dev_err(&instance->pdev->dev,
@@ -6250,9 +6248,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
6250 } 6248 }
6251 6249
6252 instance->pd_list_buf = 6250 instance->pd_list_buf =
6253 pci_alloc_consistent(pdev, 6251 dma_alloc_coherent(&pdev->dev,
6254 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 6252 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
6255 &instance->pd_list_buf_h); 6253 &instance->pd_list_buf_h, GFP_KERNEL);
6256 6254
6257 if (!instance->pd_list_buf) { 6255 if (!instance->pd_list_buf) {
6258 dev_err(&pdev->dev, "Failed to allocate PD list buffer\n"); 6256 dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
@@ -6260,9 +6258,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
6260 } 6258 }
6261 6259
6262 instance->ctrl_info_buf = 6260 instance->ctrl_info_buf =
6263 pci_alloc_consistent(pdev, 6261 dma_alloc_coherent(&pdev->dev,
6264 sizeof(struct megasas_ctrl_info), 6262 sizeof(struct megasas_ctrl_info),
6265 &instance->ctrl_info_buf_h); 6263 &instance->ctrl_info_buf_h, GFP_KERNEL);
6266 6264
6267 if (!instance->ctrl_info_buf) { 6265 if (!instance->ctrl_info_buf) {
6268 dev_err(&pdev->dev, 6266 dev_err(&pdev->dev,
@@ -6271,9 +6269,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
6271 } 6269 }
6272 6270
6273 instance->ld_list_buf = 6271 instance->ld_list_buf =
6274 pci_alloc_consistent(pdev, 6272 dma_alloc_coherent(&pdev->dev,
6275 sizeof(struct MR_LD_LIST), 6273 sizeof(struct MR_LD_LIST),
6276 &instance->ld_list_buf_h); 6274 &instance->ld_list_buf_h, GFP_KERNEL);
6277 6275
6278 if (!instance->ld_list_buf) { 6276 if (!instance->ld_list_buf) {
6279 dev_err(&pdev->dev, "Failed to allocate LD list buffer\n"); 6277 dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
@@ -6281,9 +6279,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
6281 } 6279 }
6282 6280
6283 instance->ld_targetid_list_buf = 6281 instance->ld_targetid_list_buf =
6284 pci_alloc_consistent(pdev, 6282 dma_alloc_coherent(&pdev->dev,
6285 sizeof(struct MR_LD_TARGETID_LIST), 6283 sizeof(struct MR_LD_TARGETID_LIST),
6286 &instance->ld_targetid_list_buf_h); 6284 &instance->ld_targetid_list_buf_h, GFP_KERNEL);
6287 6285
6288 if (!instance->ld_targetid_list_buf) { 6286 if (!instance->ld_targetid_list_buf) {
6289 dev_err(&pdev->dev, 6287 dev_err(&pdev->dev,
@@ -6293,21 +6291,20 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
6293 6291
6294 if (!reset_devices) { 6292 if (!reset_devices) {
6295 instance->system_info_buf = 6293 instance->system_info_buf =
6296 pci_alloc_consistent(pdev, 6294 dma_alloc_coherent(&pdev->dev,
6297 sizeof(struct MR_DRV_SYSTEM_INFO), 6295 sizeof(struct MR_DRV_SYSTEM_INFO),
6298 &instance->system_info_h); 6296 &instance->system_info_h, GFP_KERNEL);
6299 instance->pd_info = 6297 instance->pd_info =
6300 pci_alloc_consistent(pdev, 6298 dma_alloc_coherent(&pdev->dev,
6301 sizeof(struct MR_PD_INFO), 6299 sizeof(struct MR_PD_INFO),
6302 &instance->pd_info_h); 6300 &instance->pd_info_h, GFP_KERNEL);
6303 instance->tgt_prop = 6301 instance->tgt_prop =
6304 pci_alloc_consistent(pdev, 6302 dma_alloc_coherent(&pdev->dev,
6305 sizeof(struct MR_TARGET_PROPERTIES), 6303 sizeof(struct MR_TARGET_PROPERTIES),
6306 &instance->tgt_prop_h); 6304 &instance->tgt_prop_h, GFP_KERNEL);
6307 instance->crash_dump_buf = 6305 instance->crash_dump_buf =
6308 pci_alloc_consistent(pdev, 6306 dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
6309 CRASH_DMA_BUF_SIZE, 6307 &instance->crash_dump_h, GFP_KERNEL);
6310 &instance->crash_dump_h);
6311 6308
6312 if (!instance->system_info_buf) 6309 if (!instance->system_info_buf)
6313 dev_err(&instance->pdev->dev, 6310 dev_err(&instance->pdev->dev,
@@ -6343,7 +6340,7 @@ void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
6343 struct fusion_context *fusion = instance->ctrl_context; 6340 struct fusion_context *fusion = instance->ctrl_context;
6344 6341
6345 if (instance->evt_detail) 6342 if (instance->evt_detail)
6346 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 6343 dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
6347 instance->evt_detail, 6344 instance->evt_detail,
6348 instance->evt_detail_h); 6345 instance->evt_detail_h);
6349 6346
@@ -6354,41 +6351,41 @@ void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
6354 fusion->ioc_init_request_phys); 6351 fusion->ioc_init_request_phys);
6355 6352
6356 if (instance->pd_list_buf) 6353 if (instance->pd_list_buf)
6357 pci_free_consistent(pdev, 6354 dma_free_coherent(&pdev->dev,
6358 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 6355 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
6359 instance->pd_list_buf, 6356 instance->pd_list_buf,
6360 instance->pd_list_buf_h); 6357 instance->pd_list_buf_h);
6361 6358
6362 if (instance->ld_list_buf) 6359 if (instance->ld_list_buf)
6363 pci_free_consistent(pdev, sizeof(struct MR_LD_LIST), 6360 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
6364 instance->ld_list_buf, 6361 instance->ld_list_buf,
6365 instance->ld_list_buf_h); 6362 instance->ld_list_buf_h);
6366 6363
6367 if (instance->ld_targetid_list_buf) 6364 if (instance->ld_targetid_list_buf)
6368 pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST), 6365 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
6369 instance->ld_targetid_list_buf, 6366 instance->ld_targetid_list_buf,
6370 instance->ld_targetid_list_buf_h); 6367 instance->ld_targetid_list_buf_h);
6371 6368
6372 if (instance->ctrl_info_buf) 6369 if (instance->ctrl_info_buf)
6373 pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info), 6370 dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
6374 instance->ctrl_info_buf, 6371 instance->ctrl_info_buf,
6375 instance->ctrl_info_buf_h); 6372 instance->ctrl_info_buf_h);
6376 6373
6377 if (instance->system_info_buf) 6374 if (instance->system_info_buf)
6378 pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO), 6375 dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
6379 instance->system_info_buf, 6376 instance->system_info_buf,
6380 instance->system_info_h); 6377 instance->system_info_h);
6381 6378
6382 if (instance->pd_info) 6379 if (instance->pd_info)
6383 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), 6380 dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
6384 instance->pd_info, instance->pd_info_h); 6381 instance->pd_info, instance->pd_info_h);
6385 6382
6386 if (instance->tgt_prop) 6383 if (instance->tgt_prop)
6387 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), 6384 dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
6388 instance->tgt_prop, instance->tgt_prop_h); 6385 instance->tgt_prop, instance->tgt_prop_h);
6389 6386
6390 if (instance->crash_dump_buf) 6387 if (instance->crash_dump_buf)
6391 pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE, 6388 dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
6392 instance->crash_dump_buf, 6389 instance->crash_dump_buf,
6393 instance->crash_dump_h); 6390 instance->crash_dump_h);
6394} 6391}
@@ -6516,17 +6513,20 @@ static int megasas_probe_one(struct pci_dev *pdev,
6516 if (instance->requestorId) { 6513 if (instance->requestorId) {
6517 if (instance->PlasmaFW111) { 6514 if (instance->PlasmaFW111) {
6518 instance->vf_affiliation_111 = 6515 instance->vf_affiliation_111 =
6519 pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111), 6516 dma_alloc_coherent(&pdev->dev,
6520 &instance->vf_affiliation_111_h); 6517 sizeof(struct MR_LD_VF_AFFILIATION_111),
6518 &instance->vf_affiliation_111_h,
6519 GFP_KERNEL);
6521 if (!instance->vf_affiliation_111) 6520 if (!instance->vf_affiliation_111)
6522 dev_warn(&pdev->dev, "Can't allocate " 6521 dev_warn(&pdev->dev, "Can't allocate "
6523 "memory for VF affiliation buffer\n"); 6522 "memory for VF affiliation buffer\n");
6524 } else { 6523 } else {
6525 instance->vf_affiliation = 6524 instance->vf_affiliation =
6526 pci_alloc_consistent(pdev, 6525 dma_alloc_coherent(&pdev->dev,
6527 (MAX_LOGICAL_DRIVES + 1) * 6526 (MAX_LOGICAL_DRIVES + 1) *
6528 sizeof(struct MR_LD_VF_AFFILIATION), 6527 sizeof(struct MR_LD_VF_AFFILIATION),
6529 &instance->vf_affiliation_h); 6528 &instance->vf_affiliation_h,
6529 GFP_KERNEL);
6530 if (!instance->vf_affiliation) 6530 if (!instance->vf_affiliation)
6531 dev_warn(&pdev->dev, "Can't allocate " 6531 dev_warn(&pdev->dev, "Can't allocate "
6532 "memory for VF affiliation buffer\n"); 6532 "memory for VF affiliation buffer\n");
@@ -6994,19 +6994,19 @@ skip_firing_dcmds:
6994 } 6994 }
6995 6995
6996 if (instance->vf_affiliation) 6996 if (instance->vf_affiliation)
6997 pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) * 6997 dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
6998 sizeof(struct MR_LD_VF_AFFILIATION), 6998 sizeof(struct MR_LD_VF_AFFILIATION),
6999 instance->vf_affiliation, 6999 instance->vf_affiliation,
7000 instance->vf_affiliation_h); 7000 instance->vf_affiliation_h);
7001 7001
7002 if (instance->vf_affiliation_111) 7002 if (instance->vf_affiliation_111)
7003 pci_free_consistent(pdev, 7003 dma_free_coherent(&pdev->dev,
7004 sizeof(struct MR_LD_VF_AFFILIATION_111), 7004 sizeof(struct MR_LD_VF_AFFILIATION_111),
7005 instance->vf_affiliation_111, 7005 instance->vf_affiliation_111,
7006 instance->vf_affiliation_111_h); 7006 instance->vf_affiliation_111_h);
7007 7007
7008 if (instance->hb_host_mem) 7008 if (instance->hb_host_mem)
7009 pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM), 7009 dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
7010 instance->hb_host_mem, 7010 instance->hb_host_mem,
7011 instance->hb_host_mem_h); 7011 instance->hb_host_mem_h);
7012 7012
@@ -7254,7 +7254,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
7254 7254
7255 /* 7255 /*
7256 * We don't change the dma_coherent_mask, so 7256 * We don't change the dma_coherent_mask, so
7257 * pci_alloc_consistent only returns 32bit addresses 7257 * dma_alloc_coherent only returns 32bit addresses
7258 */ 7258 */
7259 if (instance->consistent_mask_64bit) { 7259 if (instance->consistent_mask_64bit) {
7260 kern_sge64[i].phys_addr = cpu_to_le64(buf_handle); 7260 kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
@@ -7523,6 +7523,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
7523 get_user(user_sense_off, &cioc->sense_off)) 7523 get_user(user_sense_off, &cioc->sense_off))
7524 return -EFAULT; 7524 return -EFAULT;
7525 7525
7526 if (local_sense_off != user_sense_off)
7527 return -EINVAL;
7528
7526 if (local_sense_len) { 7529 if (local_sense_len) {
7527 void __user **sense_ioc_ptr = 7530 void __user **sense_ioc_ptr =
7528 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off); 7531 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index c7f95bace353..f74b5ea24f0f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -684,8 +684,8 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
684 array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * 684 array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
685 MAX_MSIX_QUEUES_FUSION; 685 MAX_MSIX_QUEUES_FUSION;
686 686
687 fusion->rdpq_virt = pci_zalloc_consistent(instance->pdev, array_size, 687 fusion->rdpq_virt = dma_zalloc_coherent(&instance->pdev->dev,
688 &fusion->rdpq_phys); 688 array_size, &fusion->rdpq_phys, GFP_KERNEL);
689 if (!fusion->rdpq_virt) { 689 if (!fusion->rdpq_virt) {
690 dev_err(&instance->pdev->dev, 690 dev_err(&instance->pdev->dev,
691 "Failed from %s %d\n", __func__, __LINE__); 691 "Failed from %s %d\n", __func__, __LINE__);
@@ -813,7 +813,7 @@ megasas_free_rdpq_fusion(struct megasas_instance *instance) {
813 dma_pool_destroy(fusion->reply_frames_desc_pool_align); 813 dma_pool_destroy(fusion->reply_frames_desc_pool_align);
814 814
815 if (fusion->rdpq_virt) 815 if (fusion->rdpq_virt)
816 pci_free_consistent(instance->pdev, 816 dma_free_coherent(&instance->pdev->dev,
817 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION, 817 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
818 fusion->rdpq_virt, fusion->rdpq_phys); 818 fusion->rdpq_virt, fusion->rdpq_phys);
819} 819}
@@ -2209,7 +2209,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
2209 cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD; 2209 cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
2210 cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN; 2210 cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN;
2211 2211
2212 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 2212 if (scp->sc_data_direction == DMA_FROM_DEVICE)
2213 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32; 2213 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
2214 else 2214 else
2215 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32; 2215 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
@@ -2238,7 +2238,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
2238 cdb[31] = (u8)(num_blocks & 0xff); 2238 cdb[31] = (u8)(num_blocks & 0xff);
2239 2239
2240 /* set SCSI IO EEDPFlags */ 2240 /* set SCSI IO EEDPFlags */
2241 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) { 2241 if (scp->sc_data_direction == DMA_FROM_DEVICE) {
2242 io_request->EEDPFlags = cpu_to_le16( 2242 io_request->EEDPFlags = cpu_to_le16(
2243 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 2243 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2244 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 2244 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
@@ -2621,7 +2621,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2621 scsi_buff_len = scsi_bufflen(scp); 2621 scsi_buff_len = scsi_bufflen(scp);
2622 io_request->DataLength = cpu_to_le32(scsi_buff_len); 2622 io_request->DataLength = cpu_to_le32(scsi_buff_len);
2623 2623
2624 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 2624 if (scp->sc_data_direction == DMA_FROM_DEVICE)
2625 io_info.isRead = 1; 2625 io_info.isRead = 1;
2626 2626
2627 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 2627 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
@@ -3088,9 +3088,9 @@ megasas_build_io_fusion(struct megasas_instance *instance,
3088 3088
3089 io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); 3089 io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
3090 3090
3091 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 3091 if (scp->sc_data_direction == DMA_TO_DEVICE)
3092 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE); 3092 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
3093 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 3093 else if (scp->sc_data_direction == DMA_FROM_DEVICE)
3094 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ); 3094 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
3095 3095
3096 io_request->SGLOffset0 = 3096 io_request->SGLOffset0 =
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 82e01dbe90af..ec6940f2fcb3 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1915,8 +1915,8 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
1915 /* We use the PCI APIs for now until the generic one gets fixed 1915 /* We use the PCI APIs for now until the generic one gets fixed
1916 * enough or until we get some macio-specific versions 1916 * enough or until we get some macio-specific versions
1917 */ 1917 */
1918 dma_cmd_space = pci_zalloc_consistent(macio_get_pci_dev(mdev), 1918 dma_cmd_space = dma_zalloc_coherent(&macio_get_pci_dev(mdev)->dev,
1919 ms->dma_cmd_size, &dma_cmd_bus); 1919 ms->dma_cmd_size, &dma_cmd_bus, GFP_KERNEL);
1920 if (dma_cmd_space == NULL) { 1920 if (dma_cmd_space == NULL) {
1921 printk(KERN_ERR "mesh: can't allocate DMA table\n"); 1921 printk(KERN_ERR "mesh: can't allocate DMA table\n");
1922 goto out_unmap; 1922 goto out_unmap;
@@ -1974,7 +1974,7 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
1974 */ 1974 */
1975 mesh_shutdown(mdev); 1975 mesh_shutdown(mdev);
1976 set_mesh_power(ms, 0); 1976 set_mesh_power(ms, 0);
1977 pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size, 1977 dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size,
1978 ms->dma_cmd_space, ms->dma_cmd_bus); 1978 ms->dma_cmd_space, ms->dma_cmd_bus);
1979 out_unmap: 1979 out_unmap:
1980 iounmap(ms->dma); 1980 iounmap(ms->dma);
@@ -2007,7 +2007,7 @@ static int mesh_remove(struct macio_dev *mdev)
2007 iounmap(ms->dma); 2007 iounmap(ms->dma);
2008 2008
2009 /* Free DMA commands memory */ 2009 /* Free DMA commands memory */
2010 pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size, 2010 dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size,
2011 ms->dma_cmd_space, ms->dma_cmd_bus); 2011 ms->dma_cmd_space, ms->dma_cmd_bus);
2012 2012
2013 /* Release memory resources */ 2013 /* Release memory resources */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 59d7844ee022..2500377d0723 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -122,8 +122,8 @@ mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
122 if (!(status & MPT3_CMD_RESET)) 122 if (!(status & MPT3_CMD_RESET))
123 issue_reset = 1; 123 issue_reset = 1;
124 124
125 pr_err(MPT3SAS_FMT "Command %s\n", ioc->name, 125 ioc_err(ioc, "Command %s\n",
126 ((issue_reset == 0) ? "terminated due to Host Reset" : "Timeout")); 126 issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
127 _debug_dump_mf(mpi_request, sz); 127 _debug_dump_mf(mpi_request, sz);
128 128
129 return issue_reset; 129 return issue_reset;
@@ -336,9 +336,7 @@ _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
336 return ct->chain_buffer; 336 return ct->chain_buffer;
337 } 337 }
338 } 338 }
339 pr_info(MPT3SAS_FMT 339 ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
340 "Provided chain_buffer_dma address is not in the lookup list\n",
341 ioc->name);
342 return NULL; 340 return NULL;
343} 341}
344 342
@@ -394,7 +392,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
394 /* Get scsi_cmd using smid */ 392 /* Get scsi_cmd using smid */
395 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 393 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
396 if (scmd == NULL) { 394 if (scmd == NULL) {
397 pr_err(MPT3SAS_FMT "scmd is NULL\n", ioc->name); 395 ioc_err(ioc, "scmd is NULL\n");
398 return; 396 return;
399 } 397 }
400 398
@@ -532,11 +530,11 @@ static int mpt3sas_remove_dead_ioc_func(void *arg)
532 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; 530 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
533 struct pci_dev *pdev; 531 struct pci_dev *pdev;
534 532
535 if ((ioc == NULL)) 533 if (!ioc)
536 return -1; 534 return -1;
537 535
538 pdev = ioc->pdev; 536 pdev = ioc->pdev;
539 if ((pdev == NULL)) 537 if (!pdev)
540 return -1; 538 return -1;
541 pci_stop_and_remove_bus_device_locked(pdev); 539 pci_stop_and_remove_bus_device_locked(pdev);
542 return 0; 540 return 0;
@@ -566,8 +564,7 @@ _base_fault_reset_work(struct work_struct *work)
566 564
567 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 565 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
568 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) { 566 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
569 pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n", 567 ioc_err(ioc, "SAS host is non-operational !!!!\n");
570 ioc->name);
571 568
572 /* It may be possible that EEH recovery can resolve some of 569 /* It may be possible that EEH recovery can resolve some of
573 * pci bus failure issues rather removing the dead ioc function 570 * pci bus failure issues rather removing the dead ioc function
@@ -600,13 +597,11 @@ _base_fault_reset_work(struct work_struct *work)
600 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc, 597 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
601 "%s_dead_ioc_%d", ioc->driver_name, ioc->id); 598 "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
602 if (IS_ERR(p)) 599 if (IS_ERR(p))
603 pr_err(MPT3SAS_FMT 600 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
604 "%s: Running mpt3sas_dead_ioc thread failed !!!!\n", 601 __func__);
605 ioc->name, __func__);
606 else 602 else
607 pr_err(MPT3SAS_FMT 603 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
608 "%s: Running mpt3sas_dead_ioc thread success !!!!\n", 604 __func__);
609 ioc->name, __func__);
610 return; /* don't rearm timer */ 605 return; /* don't rearm timer */
611 } 606 }
612 607
@@ -614,8 +609,8 @@ _base_fault_reset_work(struct work_struct *work)
614 609
615 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { 610 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
616 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 611 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
617 pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name, 612 ioc_warn(ioc, "%s: hard reset: %s\n",
618 __func__, (rc == 0) ? "success" : "failed"); 613 __func__, rc == 0 ? "success" : "failed");
619 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 614 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
620 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) 615 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
621 mpt3sas_base_fault_info(ioc, doorbell & 616 mpt3sas_base_fault_info(ioc, doorbell &
@@ -657,8 +652,7 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
657 ioc->fault_reset_work_q = 652 ioc->fault_reset_work_q =
658 create_singlethread_workqueue(ioc->fault_reset_work_q_name); 653 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
659 if (!ioc->fault_reset_work_q) { 654 if (!ioc->fault_reset_work_q) {
660 pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n", 655 ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
661 ioc->name, __func__, __LINE__);
662 return; 656 return;
663 } 657 }
664 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 658 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
@@ -700,8 +694,7 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
700void 694void
701mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code) 695mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
702{ 696{
703 pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n", 697 ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
704 ioc->name, fault_code);
705} 698}
706 699
707/** 700/**
@@ -728,8 +721,7 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
728 mpt3sas_base_fault_info(ioc , doorbell); 721 mpt3sas_base_fault_info(ioc , doorbell);
729 else { 722 else {
730 writel(0xC0FFEE00, &ioc->chip->Doorbell); 723 writel(0xC0FFEE00, &ioc->chip->Doorbell);
731 pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n", 724 ioc_err(ioc, "Firmware is halted due to command timeout\n");
732 ioc->name);
733 } 725 }
734 726
735 if (ioc->fwfault_debug == 2) 727 if (ioc->fwfault_debug == 2)
@@ -956,8 +948,8 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
956 break; 948 break;
957 } 949 }
958 950
959 pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n", 951 ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
960 ioc->name, desc, ioc_status, request_hdr, func_str); 952 desc, ioc_status, request_hdr, func_str);
961 953
962 _debug_dump_mf(request_hdr, frame_sz/4); 954 _debug_dump_mf(request_hdr, frame_sz/4);
963} 955}
@@ -1003,9 +995,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1003 { 995 {
1004 Mpi2EventDataSasDiscovery_t *event_data = 996 Mpi2EventDataSasDiscovery_t *event_data =
1005 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData; 997 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
1006 pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name, 998 ioc_info(ioc, "Discovery: (%s)",
1007 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? 999 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
1008 "start" : "stop"); 1000 "start" : "stop");
1009 if (event_data->DiscoveryStatus) 1001 if (event_data->DiscoveryStatus)
1010 pr_cont(" discovery_status(0x%08x)", 1002 pr_cont(" discovery_status(0x%08x)",
1011 le32_to_cpu(event_data->DiscoveryStatus)); 1003 le32_to_cpu(event_data->DiscoveryStatus));
@@ -1059,14 +1051,13 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1059 { 1051 {
1060 Mpi26EventDataPCIeEnumeration_t *event_data = 1052 Mpi26EventDataPCIeEnumeration_t *event_data =
1061 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData; 1053 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1062 pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name, 1054 ioc_info(ioc, "PCIE Enumeration: (%s)",
1063 (event_data->ReasonCode == 1055 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
1064 MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? 1056 "start" : "stop");
1065 "start" : "stop");
1066 if (event_data->EnumerationStatus) 1057 if (event_data->EnumerationStatus)
1067 pr_info("enumeration_status(0x%08x)", 1058 pr_cont("enumeration_status(0x%08x)",
1068 le32_to_cpu(event_data->EnumerationStatus)); 1059 le32_to_cpu(event_data->EnumerationStatus));
1069 pr_info("\n"); 1060 pr_cont("\n");
1070 return; 1061 return;
1071 } 1062 }
1072 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 1063 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
@@ -1077,7 +1068,7 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1077 if (!desc) 1068 if (!desc)
1078 return; 1069 return;
1079 1070
1080 pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc); 1071 ioc_info(ioc, "%s\n", desc);
1081} 1072}
1082 1073
1083/** 1074/**
@@ -1128,11 +1119,9 @@ _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
1128 break; 1119 break;
1129 } 1120 }
1130 1121
1131 pr_warn(MPT3SAS_FMT 1122 ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1132 "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n", 1123 log_info,
1133 ioc->name, log_info, 1124 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
1134 originator_str, sas_loginfo.dw.code,
1135 sas_loginfo.dw.subcode);
1136} 1125}
1137 1126
1138/** 1127/**
@@ -1152,8 +1141,8 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1152 1141
1153 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 1142 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1154 if (unlikely(!mpi_reply)) { 1143 if (unlikely(!mpi_reply)) {
1155 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", 1144 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1156 ioc->name, __FILE__, __LINE__, __func__); 1145 __FILE__, __LINE__, __func__);
1157 return; 1146 return;
1158 } 1147 }
1159 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 1148 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
@@ -1249,9 +1238,9 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1249 delayed_event_ack->EventContext = mpi_reply->EventContext; 1238 delayed_event_ack->EventContext = mpi_reply->EventContext;
1250 list_add_tail(&delayed_event_ack->list, 1239 list_add_tail(&delayed_event_ack->list,
1251 &ioc->delayed_event_ack_list); 1240 &ioc->delayed_event_ack_list);
1252 dewtprintk(ioc, pr_info(MPT3SAS_FMT 1241 dewtprintk(ioc,
1253 "DELAYED: EVENT ACK: event (0x%04x)\n", 1242 ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1254 ioc->name, le16_to_cpu(mpi_reply->Event))); 1243 le16_to_cpu(mpi_reply->Event)));
1255 goto out; 1244 goto out;
1256 } 1245 }
1257 1246
@@ -2270,7 +2259,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2270 sges_left = scsi_dma_map(scmd); 2259 sges_left = scsi_dma_map(scmd);
2271 if (sges_left < 0) { 2260 if (sges_left < 0) {
2272 sdev_printk(KERN_ERR, scmd->device, 2261 sdev_printk(KERN_ERR, scmd->device,
2273 "pci_map_sg failed: request for %d bytes!\n", 2262 "scsi_dma_map failed: request for %d bytes!\n",
2274 scsi_bufflen(scmd)); 2263 scsi_bufflen(scmd));
2275 return -ENOMEM; 2264 return -ENOMEM;
2276 } 2265 }
@@ -2418,7 +2407,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2418 sges_left = scsi_dma_map(scmd); 2407 sges_left = scsi_dma_map(scmd);
2419 if (sges_left < 0) { 2408 if (sges_left < 0) {
2420 sdev_printk(KERN_ERR, scmd->device, 2409 sdev_printk(KERN_ERR, scmd->device,
2421 "pci_map_sg failed: request for %d bytes!\n", 2410 "scsi_dma_map failed: request for %d bytes!\n",
2422 scsi_bufflen(scmd)); 2411 scsi_bufflen(scmd));
2423 return -ENOMEM; 2412 return -ENOMEM;
2424 } 2413 }
@@ -2563,44 +2552,41 @@ _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2563static int 2552static int
2564_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) 2553_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2565{ 2554{
2555 u64 required_mask, coherent_mask;
2566 struct sysinfo s; 2556 struct sysinfo s;
2567 u64 consistent_dma_mask;
2568 2557
2569 if (ioc->is_mcpu_endpoint) 2558 if (ioc->is_mcpu_endpoint)
2570 goto try_32bit; 2559 goto try_32bit;
2571 2560
2561 required_mask = dma_get_required_mask(&pdev->dev);
2562 if (sizeof(dma_addr_t) == 4 || required_mask == 32)
2563 goto try_32bit;
2564
2572 if (ioc->dma_mask) 2565 if (ioc->dma_mask)
2573 consistent_dma_mask = DMA_BIT_MASK(64); 2566 coherent_mask = DMA_BIT_MASK(64);
2574 else 2567 else
2575 consistent_dma_mask = DMA_BIT_MASK(32); 2568 coherent_mask = DMA_BIT_MASK(32);
2576 2569
2577 if (sizeof(dma_addr_t) > 4) { 2570 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
2578 const uint64_t required_mask = 2571 dma_set_coherent_mask(&pdev->dev, coherent_mask))
2579 dma_get_required_mask(&pdev->dev); 2572 goto try_32bit;
2580 if ((required_mask > DMA_BIT_MASK(32)) && 2573
2581 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 2574 ioc->base_add_sg_single = &_base_add_sg_single_64;
2582 !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) { 2575 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
2583 ioc->base_add_sg_single = &_base_add_sg_single_64; 2576 ioc->dma_mask = 64;
2584 ioc->sge_size = sizeof(Mpi2SGESimple64_t); 2577 goto out;
2585 ioc->dma_mask = 64;
2586 goto out;
2587 }
2588 }
2589 2578
2590 try_32bit: 2579 try_32bit:
2591 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 2580 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
2592 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2593 ioc->base_add_sg_single = &_base_add_sg_single_32;
2594 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
2595 ioc->dma_mask = 32;
2596 } else
2597 return -ENODEV; 2581 return -ENODEV;
2598 2582
2583 ioc->base_add_sg_single = &_base_add_sg_single_32;
2584 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
2585 ioc->dma_mask = 32;
2599 out: 2586 out:
2600 si_meminfo(&s); 2587 si_meminfo(&s);
2601 pr_info(MPT3SAS_FMT 2588 ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
2602 "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", 2589 ioc->dma_mask, convert_to_kb(s.totalram));
2603 ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
2604 2590
2605 return 0; 2591 return 0;
2606} 2592}
@@ -2639,8 +2625,7 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2639 2625
2640 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); 2626 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2641 if (!base) { 2627 if (!base) {
2642 dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n", 2628 dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
2643 ioc->name));
2644 return -EINVAL; 2629 return -EINVAL;
2645 } 2630 }
2646 2631
@@ -2658,9 +2643,8 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2658 pci_read_config_word(ioc->pdev, base + 2, &message_control); 2643 pci_read_config_word(ioc->pdev, base + 2, &message_control);
2659 ioc->msix_vector_count = (message_control & 0x3FF) + 1; 2644 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2660 } 2645 }
2661 dinitprintk(ioc, pr_info(MPT3SAS_FMT 2646 dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
2662 "msix is supported, vector_count(%d)\n", 2647 ioc->msix_vector_count));
2663 ioc->name, ioc->msix_vector_count));
2664 return 0; 2648 return 0;
2665} 2649}
2666 2650
@@ -2702,8 +2686,8 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
2702 2686
2703 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL); 2687 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
2704 if (!reply_q) { 2688 if (!reply_q) {
2705 pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n", 2689 ioc_err(ioc, "unable to allocate memory %zu!\n",
2706 ioc->name, (int)sizeof(struct adapter_reply_queue)); 2690 sizeof(struct adapter_reply_queue));
2707 return -ENOMEM; 2691 return -ENOMEM;
2708 } 2692 }
2709 reply_q->ioc = ioc; 2693 reply_q->ioc = ioc;
@@ -2719,7 +2703,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
2719 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt, 2703 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
2720 IRQF_SHARED, reply_q->name, reply_q); 2704 IRQF_SHARED, reply_q->name, reply_q);
2721 if (r) { 2705 if (r) {
2722 pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n", 2706 pr_err("%s: unable to allocate interrupt %d!\n",
2723 reply_q->name, pci_irq_vector(pdev, index)); 2707 reply_q->name, pci_irq_vector(pdev, index));
2724 kfree(reply_q); 2708 kfree(reply_q);
2725 return -EBUSY; 2709 return -EBUSY;
@@ -2761,8 +2745,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
2761 const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev, 2745 const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
2762 reply_q->msix_index); 2746 reply_q->msix_index);
2763 if (!mask) { 2747 if (!mask) {
2764 pr_warn(MPT3SAS_FMT "no affinity for msi %x\n", 2748 ioc_warn(ioc, "no affinity for msi %x\n",
2765 ioc->name, reply_q->msix_index); 2749 reply_q->msix_index);
2766 continue; 2750 continue;
2767 } 2751 }
2768 2752
@@ -2833,9 +2817,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2833 ioc->reply_queue_count = min_t(int, ioc->cpu_count, 2817 ioc->reply_queue_count = min_t(int, ioc->cpu_count,
2834 ioc->msix_vector_count); 2818 ioc->msix_vector_count);
2835 2819
2836 printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores" 2820 ioc_info(ioc, "MSI-X vectors supported: %d, no of cores: %d, max_msix_vectors: %d\n",
2837 ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count, 2821 ioc->msix_vector_count, ioc->cpu_count, max_msix_vectors);
2838 ioc->cpu_count, max_msix_vectors);
2839 2822
2840 if (!ioc->rdpq_array_enable && max_msix_vectors == -1) 2823 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
2841 local_max_msix_vectors = (reset_devices) ? 1 : 8; 2824 local_max_msix_vectors = (reset_devices) ? 1 : 8;
@@ -2857,9 +2840,9 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2857 r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count, 2840 r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
2858 irq_flags); 2841 irq_flags);
2859 if (r < 0) { 2842 if (r < 0) {
2860 dfailprintk(ioc, pr_info(MPT3SAS_FMT 2843 dfailprintk(ioc,
2861 "pci_alloc_irq_vectors failed (r=%d) !!!\n", 2844 ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n",
2862 ioc->name, r)); 2845 r));
2863 goto try_ioapic; 2846 goto try_ioapic;
2864 } 2847 }
2865 2848
@@ -2882,9 +2865,9 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2882 ioc->reply_queue_count = 1; 2865 ioc->reply_queue_count = 1;
2883 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY); 2866 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
2884 if (r < 0) { 2867 if (r < 0) {
2885 dfailprintk(ioc, pr_info(MPT3SAS_FMT 2868 dfailprintk(ioc,
2886 "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n", 2869 ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
2887 ioc->name, r)); 2870 r));
2888 } else 2871 } else
2889 r = _base_request_irq(ioc, 0); 2872 r = _base_request_irq(ioc, 0);
2890 2873
@@ -2900,8 +2883,7 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
2900{ 2883{
2901 struct pci_dev *pdev = ioc->pdev; 2884 struct pci_dev *pdev = ioc->pdev;
2902 2885
2903 dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n", 2886 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
2904 ioc->name, __func__));
2905 2887
2906 _base_free_irq(ioc); 2888 _base_free_irq(ioc);
2907 _base_disable_msix(ioc); 2889 _base_disable_msix(ioc);
@@ -2939,13 +2921,11 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2939 phys_addr_t chip_phys = 0; 2921 phys_addr_t chip_phys = 0;
2940 struct adapter_reply_queue *reply_q; 2922 struct adapter_reply_queue *reply_q;
2941 2923
2942 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", 2924 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
2943 ioc->name, __func__));
2944 2925
2945 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 2926 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
2946 if (pci_enable_device_mem(pdev)) { 2927 if (pci_enable_device_mem(pdev)) {
2947 pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n", 2928 ioc_warn(ioc, "pci_enable_device_mem: failed\n");
2948 ioc->name);
2949 ioc->bars = 0; 2929 ioc->bars = 0;
2950 return -ENODEV; 2930 return -ENODEV;
2951 } 2931 }
@@ -2953,8 +2933,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2953 2933
2954 if (pci_request_selected_regions(pdev, ioc->bars, 2934 if (pci_request_selected_regions(pdev, ioc->bars,
2955 ioc->driver_name)) { 2935 ioc->driver_name)) {
2956 pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n", 2936 ioc_warn(ioc, "pci_request_selected_regions: failed\n");
2957 ioc->name);
2958 ioc->bars = 0; 2937 ioc->bars = 0;
2959 r = -ENODEV; 2938 r = -ENODEV;
2960 goto out_fail; 2939 goto out_fail;
@@ -2967,8 +2946,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2967 2946
2968 2947
2969 if (_base_config_dma_addressing(ioc, pdev) != 0) { 2948 if (_base_config_dma_addressing(ioc, pdev) != 0) {
2970 pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n", 2949 ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
2971 ioc->name, pci_name(pdev));
2972 r = -ENODEV; 2950 r = -ENODEV;
2973 goto out_fail; 2951 goto out_fail;
2974 } 2952 }
@@ -2991,8 +2969,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2991 } 2969 }
2992 2970
2993 if (ioc->chip == NULL) { 2971 if (ioc->chip == NULL) {
2994 pr_err(MPT3SAS_FMT "unable to map adapter memory! " 2972 ioc_err(ioc, "unable to map adapter memory! or resource not found\n");
2995 " or resource not found\n", ioc->name);
2996 r = -EINVAL; 2973 r = -EINVAL;
2997 goto out_fail; 2974 goto out_fail;
2998 } 2975 }
@@ -3026,9 +3003,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3026 ioc->combined_reply_index_count, 3003 ioc->combined_reply_index_count,
3027 sizeof(resource_size_t *), GFP_KERNEL); 3004 sizeof(resource_size_t *), GFP_KERNEL);
3028 if (!ioc->replyPostRegisterIndex) { 3005 if (!ioc->replyPostRegisterIndex) {
3029 dfailprintk(ioc, printk(MPT3SAS_FMT 3006 dfailprintk(ioc,
3030 "allocation for reply Post Register Index failed!!!\n", 3007 ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n"));
3031 ioc->name));
3032 r = -ENOMEM; 3008 r = -ENOMEM;
3033 goto out_fail; 3009 goto out_fail;
3034 } 3010 }
@@ -3053,15 +3029,15 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3053 } 3029 }
3054 3030
3055 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) 3031 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
3056 pr_info(MPT3SAS_FMT "%s: IRQ %d\n", 3032 pr_info("%s: %s enabled: IRQ %d\n",
3057 reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : 3033 reply_q->name,
3058 "IO-APIC enabled"), 3034 ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3059 pci_irq_vector(ioc->pdev, reply_q->msix_index)); 3035 pci_irq_vector(ioc->pdev, reply_q->msix_index));
3060 3036
3061 pr_info(MPT3SAS_FMT "iomem(%pap), mapped(0x%p), size(%d)\n", 3037 ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3062 ioc->name, &chip_phys, ioc->chip, memap_sz); 3038 &chip_phys, ioc->chip, memap_sz);
3063 pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n", 3039 ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3064 ioc->name, (unsigned long long)pio_chip, pio_sz); 3040 (unsigned long long)pio_chip, pio_sz);
3065 3041
3066 /* Save PCI configuration state for recovery from PCI AER/EEH errors */ 3042 /* Save PCI configuration state for recovery from PCI AER/EEH errors */
3067 pci_save_state(pdev); 3043 pci_save_state(pdev);
@@ -3176,8 +3152,7 @@ mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3176 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 3152 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3177 if (list_empty(&ioc->internal_free_list)) { 3153 if (list_empty(&ioc->internal_free_list)) {
3178 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3154 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3179 pr_err(MPT3SAS_FMT "%s: smid not available\n", 3155 ioc_err(ioc, "%s: smid not available\n", __func__);
3180 ioc->name, __func__);
3181 return 0; 3156 return 0;
3182 } 3157 }
3183 3158
@@ -3545,89 +3520,85 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
3545 case MPI2_MFGPAGE_DEVID_SAS2008: 3520 case MPI2_MFGPAGE_DEVID_SAS2008:
3546 switch (ioc->pdev->subsystem_device) { 3521 switch (ioc->pdev->subsystem_device) {
3547 case MPT2SAS_INTEL_RMS2LL080_SSDID: 3522 case MPT2SAS_INTEL_RMS2LL080_SSDID:
3548 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3523 ioc_info(ioc, "%s\n",
3549 MPT2SAS_INTEL_RMS2LL080_BRANDING); 3524 MPT2SAS_INTEL_RMS2LL080_BRANDING);
3550 break; 3525 break;
3551 case MPT2SAS_INTEL_RMS2LL040_SSDID: 3526 case MPT2SAS_INTEL_RMS2LL040_SSDID:
3552 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3527 ioc_info(ioc, "%s\n",
3553 MPT2SAS_INTEL_RMS2LL040_BRANDING); 3528 MPT2SAS_INTEL_RMS2LL040_BRANDING);
3554 break; 3529 break;
3555 case MPT2SAS_INTEL_SSD910_SSDID: 3530 case MPT2SAS_INTEL_SSD910_SSDID:
3556 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3531 ioc_info(ioc, "%s\n",
3557 MPT2SAS_INTEL_SSD910_BRANDING); 3532 MPT2SAS_INTEL_SSD910_BRANDING);
3558 break; 3533 break;
3559 default: 3534 default:
3560 pr_info(MPT3SAS_FMT 3535 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
3561 "Intel(R) Controller: Subsystem ID: 0x%X\n", 3536 ioc->pdev->subsystem_device);
3562 ioc->name, ioc->pdev->subsystem_device);
3563 break; 3537 break;
3564 } 3538 }
3565 case MPI2_MFGPAGE_DEVID_SAS2308_2: 3539 case MPI2_MFGPAGE_DEVID_SAS2308_2:
3566 switch (ioc->pdev->subsystem_device) { 3540 switch (ioc->pdev->subsystem_device) {
3567 case MPT2SAS_INTEL_RS25GB008_SSDID: 3541 case MPT2SAS_INTEL_RS25GB008_SSDID:
3568 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3542 ioc_info(ioc, "%s\n",
3569 MPT2SAS_INTEL_RS25GB008_BRANDING); 3543 MPT2SAS_INTEL_RS25GB008_BRANDING);
3570 break; 3544 break;
3571 case MPT2SAS_INTEL_RMS25JB080_SSDID: 3545 case MPT2SAS_INTEL_RMS25JB080_SSDID:
3572 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3546 ioc_info(ioc, "%s\n",
3573 MPT2SAS_INTEL_RMS25JB080_BRANDING); 3547 MPT2SAS_INTEL_RMS25JB080_BRANDING);
3574 break; 3548 break;
3575 case MPT2SAS_INTEL_RMS25JB040_SSDID: 3549 case MPT2SAS_INTEL_RMS25JB040_SSDID:
3576 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3550 ioc_info(ioc, "%s\n",
3577 MPT2SAS_INTEL_RMS25JB040_BRANDING); 3551 MPT2SAS_INTEL_RMS25JB040_BRANDING);
3578 break; 3552 break;
3579 case MPT2SAS_INTEL_RMS25KB080_SSDID: 3553 case MPT2SAS_INTEL_RMS25KB080_SSDID:
3580 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3554 ioc_info(ioc, "%s\n",
3581 MPT2SAS_INTEL_RMS25KB080_BRANDING); 3555 MPT2SAS_INTEL_RMS25KB080_BRANDING);
3582 break; 3556 break;
3583 case MPT2SAS_INTEL_RMS25KB040_SSDID: 3557 case MPT2SAS_INTEL_RMS25KB040_SSDID:
3584 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3558 ioc_info(ioc, "%s\n",
3585 MPT2SAS_INTEL_RMS25KB040_BRANDING); 3559 MPT2SAS_INTEL_RMS25KB040_BRANDING);
3586 break; 3560 break;
3587 case MPT2SAS_INTEL_RMS25LB040_SSDID: 3561 case MPT2SAS_INTEL_RMS25LB040_SSDID:
3588 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3562 ioc_info(ioc, "%s\n",
3589 MPT2SAS_INTEL_RMS25LB040_BRANDING); 3563 MPT2SAS_INTEL_RMS25LB040_BRANDING);
3590 break; 3564 break;
3591 case MPT2SAS_INTEL_RMS25LB080_SSDID: 3565 case MPT2SAS_INTEL_RMS25LB080_SSDID:
3592 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3566 ioc_info(ioc, "%s\n",
3593 MPT2SAS_INTEL_RMS25LB080_BRANDING); 3567 MPT2SAS_INTEL_RMS25LB080_BRANDING);
3594 break; 3568 break;
3595 default: 3569 default:
3596 pr_info(MPT3SAS_FMT 3570 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
3597 "Intel(R) Controller: Subsystem ID: 0x%X\n", 3571 ioc->pdev->subsystem_device);
3598 ioc->name, ioc->pdev->subsystem_device);
3599 break; 3572 break;
3600 } 3573 }
3601 case MPI25_MFGPAGE_DEVID_SAS3008: 3574 case MPI25_MFGPAGE_DEVID_SAS3008:
3602 switch (ioc->pdev->subsystem_device) { 3575 switch (ioc->pdev->subsystem_device) {
3603 case MPT3SAS_INTEL_RMS3JC080_SSDID: 3576 case MPT3SAS_INTEL_RMS3JC080_SSDID:
3604 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3577 ioc_info(ioc, "%s\n",
3605 MPT3SAS_INTEL_RMS3JC080_BRANDING); 3578 MPT3SAS_INTEL_RMS3JC080_BRANDING);
3606 break; 3579 break;
3607 3580
3608 case MPT3SAS_INTEL_RS3GC008_SSDID: 3581 case MPT3SAS_INTEL_RS3GC008_SSDID:
3609 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3582 ioc_info(ioc, "%s\n",
3610 MPT3SAS_INTEL_RS3GC008_BRANDING); 3583 MPT3SAS_INTEL_RS3GC008_BRANDING);
3611 break; 3584 break;
3612 case MPT3SAS_INTEL_RS3FC044_SSDID: 3585 case MPT3SAS_INTEL_RS3FC044_SSDID:
3613 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3586 ioc_info(ioc, "%s\n",
3614 MPT3SAS_INTEL_RS3FC044_BRANDING); 3587 MPT3SAS_INTEL_RS3FC044_BRANDING);
3615 break; 3588 break;
3616 case MPT3SAS_INTEL_RS3UC080_SSDID: 3589 case MPT3SAS_INTEL_RS3UC080_SSDID:
3617 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3590 ioc_info(ioc, "%s\n",
3618 MPT3SAS_INTEL_RS3UC080_BRANDING); 3591 MPT3SAS_INTEL_RS3UC080_BRANDING);
3619 break; 3592 break;
3620 default: 3593 default:
3621 pr_info(MPT3SAS_FMT 3594 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
3622 "Intel(R) Controller: Subsystem ID: 0x%X\n", 3595 ioc->pdev->subsystem_device);
3623 ioc->name, ioc->pdev->subsystem_device);
3624 break; 3596 break;
3625 } 3597 }
3626 break; 3598 break;
3627 default: 3599 default:
3628 pr_info(MPT3SAS_FMT 3600 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
3629 "Intel(R) Controller: Subsystem ID: 0x%X\n", 3601 ioc->pdev->subsystem_device);
3630 ioc->name, ioc->pdev->subsystem_device);
3631 break; 3602 break;
3632 } 3603 }
3633 break; 3604 break;
@@ -3636,57 +3607,54 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
3636 case MPI2_MFGPAGE_DEVID_SAS2008: 3607 case MPI2_MFGPAGE_DEVID_SAS2008:
3637 switch (ioc->pdev->subsystem_device) { 3608 switch (ioc->pdev->subsystem_device) {
3638 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID: 3609 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
3639 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3610 ioc_info(ioc, "%s\n",
3640 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING); 3611 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
3641 break; 3612 break;
3642 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID: 3613 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
3643 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3614 ioc_info(ioc, "%s\n",
3644 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING); 3615 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
3645 break; 3616 break;
3646 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID: 3617 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
3647 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3618 ioc_info(ioc, "%s\n",
3648 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING); 3619 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
3649 break; 3620 break;
3650 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID: 3621 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
3651 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3622 ioc_info(ioc, "%s\n",
3652 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING); 3623 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
3653 break; 3624 break;
3654 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID: 3625 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
3655 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3626 ioc_info(ioc, "%s\n",
3656 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING); 3627 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
3657 break; 3628 break;
3658 case MPT2SAS_DELL_PERC_H200_SSDID: 3629 case MPT2SAS_DELL_PERC_H200_SSDID:
3659 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3630 ioc_info(ioc, "%s\n",
3660 MPT2SAS_DELL_PERC_H200_BRANDING); 3631 MPT2SAS_DELL_PERC_H200_BRANDING);
3661 break; 3632 break;
3662 case MPT2SAS_DELL_6GBPS_SAS_SSDID: 3633 case MPT2SAS_DELL_6GBPS_SAS_SSDID:
3663 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3634 ioc_info(ioc, "%s\n",
3664 MPT2SAS_DELL_6GBPS_SAS_BRANDING); 3635 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
3665 break; 3636 break;
3666 default: 3637 default:
3667 pr_info(MPT3SAS_FMT 3638 ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
3668 "Dell 6Gbps HBA: Subsystem ID: 0x%X\n", 3639 ioc->pdev->subsystem_device);
3669 ioc->name, ioc->pdev->subsystem_device);
3670 break; 3640 break;
3671 } 3641 }
3672 break; 3642 break;
3673 case MPI25_MFGPAGE_DEVID_SAS3008: 3643 case MPI25_MFGPAGE_DEVID_SAS3008:
3674 switch (ioc->pdev->subsystem_device) { 3644 switch (ioc->pdev->subsystem_device) {
3675 case MPT3SAS_DELL_12G_HBA_SSDID: 3645 case MPT3SAS_DELL_12G_HBA_SSDID:
3676 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3646 ioc_info(ioc, "%s\n",
3677 MPT3SAS_DELL_12G_HBA_BRANDING); 3647 MPT3SAS_DELL_12G_HBA_BRANDING);
3678 break; 3648 break;
3679 default: 3649 default:
3680 pr_info(MPT3SAS_FMT 3650 ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
3681 "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", 3651 ioc->pdev->subsystem_device);
3682 ioc->name, ioc->pdev->subsystem_device);
3683 break; 3652 break;
3684 } 3653 }
3685 break; 3654 break;
3686 default: 3655 default:
3687 pr_info(MPT3SAS_FMT 3656 ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
3688 "Dell HBA: Subsystem ID: 0x%X\n", ioc->name, 3657 ioc->pdev->subsystem_device);
3689 ioc->pdev->subsystem_device);
3690 break; 3658 break;
3691 } 3659 }
3692 break; 3660 break;
@@ -3695,46 +3663,42 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
3695 case MPI25_MFGPAGE_DEVID_SAS3008: 3663 case MPI25_MFGPAGE_DEVID_SAS3008:
3696 switch (ioc->pdev->subsystem_device) { 3664 switch (ioc->pdev->subsystem_device) {
3697 case MPT3SAS_CISCO_12G_8E_HBA_SSDID: 3665 case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
3698 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3666 ioc_info(ioc, "%s\n",
3699 MPT3SAS_CISCO_12G_8E_HBA_BRANDING); 3667 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
3700 break; 3668 break;
3701 case MPT3SAS_CISCO_12G_8I_HBA_SSDID: 3669 case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
3702 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3670 ioc_info(ioc, "%s\n",
3703 MPT3SAS_CISCO_12G_8I_HBA_BRANDING); 3671 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
3704 break; 3672 break;
3705 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: 3673 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
3706 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3674 ioc_info(ioc, "%s\n",
3707 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); 3675 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
3708 break; 3676 break;
3709 default: 3677 default:
3710 pr_info(MPT3SAS_FMT 3678 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
3711 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", 3679 ioc->pdev->subsystem_device);
3712 ioc->name, ioc->pdev->subsystem_device);
3713 break; 3680 break;
3714 } 3681 }
3715 break; 3682 break;
3716 case MPI25_MFGPAGE_DEVID_SAS3108_1: 3683 case MPI25_MFGPAGE_DEVID_SAS3108_1:
3717 switch (ioc->pdev->subsystem_device) { 3684 switch (ioc->pdev->subsystem_device) {
3718 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: 3685 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
3719 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3686 ioc_info(ioc, "%s\n",
3720 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); 3687 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
3721 break; 3688 break;
3722 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID: 3689 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
3723 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3690 ioc_info(ioc, "%s\n",
3724 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING 3691 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
3725 );
3726 break; 3692 break;
3727 default: 3693 default:
3728 pr_info(MPT3SAS_FMT 3694 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
3729 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", 3695 ioc->pdev->subsystem_device);
3730 ioc->name, ioc->pdev->subsystem_device);
3731 break; 3696 break;
3732 } 3697 }
3733 break; 3698 break;
3734 default: 3699 default:
3735 pr_info(MPT3SAS_FMT 3700 ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
3736 "Cisco SAS HBA: Subsystem ID: 0x%X\n", 3701 ioc->pdev->subsystem_device);
3737 ioc->name, ioc->pdev->subsystem_device);
3738 break; 3702 break;
3739 } 3703 }
3740 break; 3704 break;
@@ -3743,43 +3707,40 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
3743 case MPI2_MFGPAGE_DEVID_SAS2004: 3707 case MPI2_MFGPAGE_DEVID_SAS2004:
3744 switch (ioc->pdev->subsystem_device) { 3708 switch (ioc->pdev->subsystem_device) {
3745 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID: 3709 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
3746 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3710 ioc_info(ioc, "%s\n",
3747 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING); 3711 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
3748 break; 3712 break;
3749 default: 3713 default:
3750 pr_info(MPT3SAS_FMT 3714 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
3751 "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", 3715 ioc->pdev->subsystem_device);
3752 ioc->name, ioc->pdev->subsystem_device);
3753 break; 3716 break;
3754 } 3717 }
3755 case MPI2_MFGPAGE_DEVID_SAS2308_2: 3718 case MPI2_MFGPAGE_DEVID_SAS2308_2:
3756 switch (ioc->pdev->subsystem_device) { 3719 switch (ioc->pdev->subsystem_device) {
3757 case MPT2SAS_HP_2_4_INTERNAL_SSDID: 3720 case MPT2SAS_HP_2_4_INTERNAL_SSDID:
3758 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3721 ioc_info(ioc, "%s\n",
3759 MPT2SAS_HP_2_4_INTERNAL_BRANDING); 3722 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
3760 break; 3723 break;
3761 case MPT2SAS_HP_2_4_EXTERNAL_SSDID: 3724 case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
3762 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3725 ioc_info(ioc, "%s\n",
3763 MPT2SAS_HP_2_4_EXTERNAL_BRANDING); 3726 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
3764 break; 3727 break;
3765 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID: 3728 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
3766 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3729 ioc_info(ioc, "%s\n",
3767 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING); 3730 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
3768 break; 3731 break;
3769 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID: 3732 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
3770 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3733 ioc_info(ioc, "%s\n",
3771 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING); 3734 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
3772 break; 3735 break;
3773 default: 3736 default:
3774 pr_info(MPT3SAS_FMT 3737 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
3775 "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", 3738 ioc->pdev->subsystem_device);
3776 ioc->name, ioc->pdev->subsystem_device);
3777 break; 3739 break;
3778 } 3740 }
3779 default: 3741 default:
3780 pr_info(MPT3SAS_FMT 3742 ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
3781 "HP SAS HBA: Subsystem ID: 0x%X\n", 3743 ioc->pdev->subsystem_device);
3782 ioc->name, ioc->pdev->subsystem_device);
3783 break; 3744 break;
3784 } 3745 }
3785 default: 3746 default:
@@ -3806,28 +3767,25 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
3806 u16 smid, ioc_status; 3767 u16 smid, ioc_status;
3807 size_t data_length; 3768 size_t data_length;
3808 3769
3809 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3770 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3810 __func__));
3811 3771
3812 if (ioc->base_cmds.status & MPT3_CMD_PENDING) { 3772 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
3813 pr_err(MPT3SAS_FMT "%s: internal command already in use\n", 3773 ioc_err(ioc, "%s: internal command already in use\n", __func__);
3814 ioc->name, __func__);
3815 return -EAGAIN; 3774 return -EAGAIN;
3816 } 3775 }
3817 3776
3818 data_length = sizeof(Mpi2FWImageHeader_t); 3777 data_length = sizeof(Mpi2FWImageHeader_t);
3819 fwpkg_data = pci_alloc_consistent(ioc->pdev, data_length, 3778 fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
3820 &fwpkg_data_dma); 3779 &fwpkg_data_dma, GFP_KERNEL);
3821 if (!fwpkg_data) { 3780 if (!fwpkg_data) {
3822 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 3781 ioc_err(ioc, "failure at %s:%d/%s()!\n",
3823 ioc->name, __FILE__, __LINE__, __func__); 3782 __FILE__, __LINE__, __func__);
3824 return -ENOMEM; 3783 return -ENOMEM;
3825 } 3784 }
3826 3785
3827 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 3786 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3828 if (!smid) { 3787 if (!smid) {
3829 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 3788 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3830 ioc->name, __func__);
3831 r = -EAGAIN; 3789 r = -EAGAIN;
3832 goto out; 3790 goto out;
3833 } 3791 }
@@ -3846,11 +3804,9 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
3846 /* Wait for 15 seconds */ 3804 /* Wait for 15 seconds */
3847 wait_for_completion_timeout(&ioc->base_cmds.done, 3805 wait_for_completion_timeout(&ioc->base_cmds.done,
3848 FW_IMG_HDR_READ_TIMEOUT*HZ); 3806 FW_IMG_HDR_READ_TIMEOUT*HZ);
3849 pr_info(MPT3SAS_FMT "%s: complete\n", 3807 ioc_info(ioc, "%s: complete\n", __func__);
3850 ioc->name, __func__);
3851 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 3808 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3852 pr_err(MPT3SAS_FMT "%s: timeout\n", 3809 ioc_err(ioc, "%s: timeout\n", __func__);
3853 ioc->name, __func__);
3854 _debug_dump_mf(mpi_request, 3810 _debug_dump_mf(mpi_request,
3855 sizeof(Mpi25FWUploadRequest_t)/4); 3811 sizeof(Mpi25FWUploadRequest_t)/4);
3856 r = -ETIME; 3812 r = -ETIME;
@@ -3864,13 +3820,11 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
3864 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 3820 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
3865 FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data; 3821 FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
3866 if (FWImgHdr->PackageVersion.Word) { 3822 if (FWImgHdr->PackageVersion.Word) {
3867 pr_info(MPT3SAS_FMT "FW Package Version" 3823 ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n",
3868 "(%02d.%02d.%02d.%02d)\n", 3824 FWImgHdr->PackageVersion.Struct.Major,
3869 ioc->name, 3825 FWImgHdr->PackageVersion.Struct.Minor,
3870 FWImgHdr->PackageVersion.Struct.Major, 3826 FWImgHdr->PackageVersion.Struct.Unit,
3871 FWImgHdr->PackageVersion.Struct.Minor, 3827 FWImgHdr->PackageVersion.Struct.Dev);
3872 FWImgHdr->PackageVersion.Struct.Unit,
3873 FWImgHdr->PackageVersion.Struct.Dev);
3874 } 3828 }
3875 } else { 3829 } else {
3876 _debug_dump_mf(&mpi_reply, 3830 _debug_dump_mf(&mpi_reply,
@@ -3881,7 +3835,7 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
3881 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 3835 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3882out: 3836out:
3883 if (fwpkg_data) 3837 if (fwpkg_data)
3884 pci_free_consistent(ioc->pdev, data_length, fwpkg_data, 3838 dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
3885 fwpkg_data_dma); 3839 fwpkg_data_dma);
3886 return r; 3840 return r;
3887} 3841}
@@ -3900,18 +3854,17 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
3900 3854
3901 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 3855 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
3902 strncpy(desc, ioc->manu_pg0.ChipName, 16); 3856 strncpy(desc, ioc->manu_pg0.ChipName, 16);
3903 pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\ 3857 ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
3904 "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n", 3858 desc,
3905 ioc->name, desc, 3859 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
3906 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, 3860 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
3907 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, 3861 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
3908 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, 3862 ioc->facts.FWVersion.Word & 0x000000FF,
3909 ioc->facts.FWVersion.Word & 0x000000FF, 3863 ioc->pdev->revision,
3910 ioc->pdev->revision, 3864 (bios_version & 0xFF000000) >> 24,
3911 (bios_version & 0xFF000000) >> 24, 3865 (bios_version & 0x00FF0000) >> 16,
3912 (bios_version & 0x00FF0000) >> 16, 3866 (bios_version & 0x0000FF00) >> 8,
3913 (bios_version & 0x0000FF00) >> 8, 3867 bios_version & 0x000000FF);
3914 bios_version & 0x000000FF);
3915 3868
3916 _base_display_OEMs_branding(ioc); 3869 _base_display_OEMs_branding(ioc);
3917 3870
@@ -3920,82 +3873,81 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
3920 i++; 3873 i++;
3921 } 3874 }
3922 3875
3923 pr_info(MPT3SAS_FMT "Protocol=(", ioc->name); 3876 ioc_info(ioc, "Protocol=(");
3924 3877
3925 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { 3878 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
3926 pr_info("Initiator"); 3879 pr_cont("Initiator");
3927 i++; 3880 i++;
3928 } 3881 }
3929 3882
3930 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) { 3883 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
3931 pr_info("%sTarget", i ? "," : ""); 3884 pr_cont("%sTarget", i ? "," : "");
3932 i++; 3885 i++;
3933 } 3886 }
3934 3887
3935 i = 0; 3888 i = 0;
3936 pr_info("), "); 3889 pr_cont("), Capabilities=(");
3937 pr_info("Capabilities=(");
3938 3890
3939 if (!ioc->hide_ir_msg) { 3891 if (!ioc->hide_ir_msg) {
3940 if (ioc->facts.IOCCapabilities & 3892 if (ioc->facts.IOCCapabilities &
3941 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { 3893 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
3942 pr_info("Raid"); 3894 pr_cont("Raid");
3943 i++; 3895 i++;
3944 } 3896 }
3945 } 3897 }
3946 3898
3947 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { 3899 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
3948 pr_info("%sTLR", i ? "," : ""); 3900 pr_cont("%sTLR", i ? "," : "");
3949 i++; 3901 i++;
3950 } 3902 }
3951 3903
3952 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) { 3904 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
3953 pr_info("%sMulticast", i ? "," : ""); 3905 pr_cont("%sMulticast", i ? "," : "");
3954 i++; 3906 i++;
3955 } 3907 }
3956 3908
3957 if (ioc->facts.IOCCapabilities & 3909 if (ioc->facts.IOCCapabilities &
3958 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { 3910 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
3959 pr_info("%sBIDI Target", i ? "," : ""); 3911 pr_cont("%sBIDI Target", i ? "," : "");
3960 i++; 3912 i++;
3961 } 3913 }
3962 3914
3963 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) { 3915 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
3964 pr_info("%sEEDP", i ? "," : ""); 3916 pr_cont("%sEEDP", i ? "," : "");
3965 i++; 3917 i++;
3966 } 3918 }
3967 3919
3968 if (ioc->facts.IOCCapabilities & 3920 if (ioc->facts.IOCCapabilities &
3969 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) { 3921 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
3970 pr_info("%sSnapshot Buffer", i ? "," : ""); 3922 pr_cont("%sSnapshot Buffer", i ? "," : "");
3971 i++; 3923 i++;
3972 } 3924 }
3973 3925
3974 if (ioc->facts.IOCCapabilities & 3926 if (ioc->facts.IOCCapabilities &
3975 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) { 3927 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
3976 pr_info("%sDiag Trace Buffer", i ? "," : ""); 3928 pr_cont("%sDiag Trace Buffer", i ? "," : "");
3977 i++; 3929 i++;
3978 } 3930 }
3979 3931
3980 if (ioc->facts.IOCCapabilities & 3932 if (ioc->facts.IOCCapabilities &
3981 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) { 3933 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
3982 pr_info("%sDiag Extended Buffer", i ? "," : ""); 3934 pr_cont("%sDiag Extended Buffer", i ? "," : "");
3983 i++; 3935 i++;
3984 } 3936 }
3985 3937
3986 if (ioc->facts.IOCCapabilities & 3938 if (ioc->facts.IOCCapabilities &
3987 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { 3939 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
3988 pr_info("%sTask Set Full", i ? "," : ""); 3940 pr_cont("%sTask Set Full", i ? "," : "");
3989 i++; 3941 i++;
3990 } 3942 }
3991 3943
3992 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); 3944 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
3993 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { 3945 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
3994 pr_info("%sNCQ", i ? "," : ""); 3946 pr_cont("%sNCQ", i ? "," : "");
3995 i++; 3947 i++;
3996 } 3948 }
3997 3949
3998 pr_info(")\n"); 3950 pr_cont(")\n");
3999} 3951}
4000 3952
4001/** 3953/**
@@ -4028,21 +3980,21 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4028 sizeof(Mpi2SasIOUnit1PhyData_t)); 3980 sizeof(Mpi2SasIOUnit1PhyData_t));
4029 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 3981 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4030 if (!sas_iounit_pg1) { 3982 if (!sas_iounit_pg1) {
4031 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 3983 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4032 ioc->name, __FILE__, __LINE__, __func__); 3984 __FILE__, __LINE__, __func__);
4033 goto out; 3985 goto out;
4034 } 3986 }
4035 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 3987 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4036 sas_iounit_pg1, sz))) { 3988 sas_iounit_pg1, sz))) {
4037 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 3989 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4038 ioc->name, __FILE__, __LINE__, __func__); 3990 __FILE__, __LINE__, __func__);
4039 goto out; 3991 goto out;
4040 } 3992 }
4041 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 3993 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4042 MPI2_IOCSTATUS_MASK; 3994 MPI2_IOCSTATUS_MASK;
4043 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 3995 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4044 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 3996 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4045 ioc->name, __FILE__, __LINE__, __func__); 3997 __FILE__, __LINE__, __func__);
4046 goto out; 3998 goto out;
4047 } 3999 }
4048 4000
@@ -4074,11 +4026,11 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4074 else 4026 else
4075 dmd_new = 4027 dmd_new =
4076 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 4028 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4077 pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n", 4029 ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4078 ioc->name, dmd_orignal, dmd_new); 4030 dmd_orignal, dmd_new);
4079 pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n", 4031 ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4080 ioc->name, io_missing_delay_original, 4032 io_missing_delay_original,
4081 io_missing_delay); 4033 io_missing_delay);
4082 ioc->device_missing_delay = dmd_new; 4034 ioc->device_missing_delay = dmd_new;
4083 ioc->io_missing_delay = io_missing_delay; 4035 ioc->io_missing_delay = io_missing_delay;
4084 } 4036 }
@@ -4189,33 +4141,32 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4189 struct chain_tracker *ct; 4141 struct chain_tracker *ct;
4190 struct reply_post_struct *rps; 4142 struct reply_post_struct *rps;
4191 4143
4192 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4144 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4193 __func__));
4194 4145
4195 if (ioc->request) { 4146 if (ioc->request) {
4196 pci_free_consistent(ioc->pdev, ioc->request_dma_sz, 4147 dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
4197 ioc->request, ioc->request_dma); 4148 ioc->request, ioc->request_dma);
4198 dexitprintk(ioc, pr_info(MPT3SAS_FMT 4149 dexitprintk(ioc,
4199 "request_pool(0x%p): free\n", 4150 ioc_info(ioc, "request_pool(0x%p): free\n",
4200 ioc->name, ioc->request)); 4151 ioc->request));
4201 ioc->request = NULL; 4152 ioc->request = NULL;
4202 } 4153 }
4203 4154
4204 if (ioc->sense) { 4155 if (ioc->sense) {
4205 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); 4156 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4206 dma_pool_destroy(ioc->sense_dma_pool); 4157 dma_pool_destroy(ioc->sense_dma_pool);
4207 dexitprintk(ioc, pr_info(MPT3SAS_FMT 4158 dexitprintk(ioc,
4208 "sense_pool(0x%p): free\n", 4159 ioc_info(ioc, "sense_pool(0x%p): free\n",
4209 ioc->name, ioc->sense)); 4160 ioc->sense));
4210 ioc->sense = NULL; 4161 ioc->sense = NULL;
4211 } 4162 }
4212 4163
4213 if (ioc->reply) { 4164 if (ioc->reply) {
4214 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); 4165 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
4215 dma_pool_destroy(ioc->reply_dma_pool); 4166 dma_pool_destroy(ioc->reply_dma_pool);
4216 dexitprintk(ioc, pr_info(MPT3SAS_FMT 4167 dexitprintk(ioc,
4217 "reply_pool(0x%p): free\n", 4168 ioc_info(ioc, "reply_pool(0x%p): free\n",
4218 ioc->name, ioc->reply)); 4169 ioc->reply));
4219 ioc->reply = NULL; 4170 ioc->reply = NULL;
4220 } 4171 }
4221 4172
@@ -4223,9 +4174,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4223 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, 4174 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
4224 ioc->reply_free_dma); 4175 ioc->reply_free_dma);
4225 dma_pool_destroy(ioc->reply_free_dma_pool); 4176 dma_pool_destroy(ioc->reply_free_dma_pool);
4226 dexitprintk(ioc, pr_info(MPT3SAS_FMT 4177 dexitprintk(ioc,
4227 "reply_free_pool(0x%p): free\n", 4178 ioc_info(ioc, "reply_free_pool(0x%p): free\n",
4228 ioc->name, ioc->reply_free)); 4179 ioc->reply_free));
4229 ioc->reply_free = NULL; 4180 ioc->reply_free = NULL;
4230 } 4181 }
4231 4182
@@ -4237,9 +4188,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4237 ioc->reply_post_free_dma_pool, 4188 ioc->reply_post_free_dma_pool,
4238 rps->reply_post_free, 4189 rps->reply_post_free,
4239 rps->reply_post_free_dma); 4190 rps->reply_post_free_dma);
4240 dexitprintk(ioc, pr_info(MPT3SAS_FMT 4191 dexitprintk(ioc,
4241 "reply_post_free_pool(0x%p): free\n", 4192 ioc_info(ioc, "reply_post_free_pool(0x%p): free\n",
4242 ioc->name, rps->reply_post_free)); 4193 rps->reply_post_free));
4243 rps->reply_post_free = NULL; 4194 rps->reply_post_free = NULL;
4244 } 4195 }
4245 } while (ioc->rdpq_array_enable && 4196 } while (ioc->rdpq_array_enable &&
@@ -4267,10 +4218,10 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4267 } 4218 }
4268 4219
4269 if (ioc->config_page) { 4220 if (ioc->config_page) {
4270 dexitprintk(ioc, pr_info(MPT3SAS_FMT 4221 dexitprintk(ioc,
4271 "config_page(0x%p): free\n", ioc->name, 4222 ioc_info(ioc, "config_page(0x%p): free\n",
4272 ioc->config_page)); 4223 ioc->config_page));
4273 pci_free_consistent(ioc->pdev, ioc->config_page_sz, 4224 dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
4274 ioc->config_page, ioc->config_page_dma); 4225 ioc->config_page, ioc->config_page_dma);
4275 } 4226 }
4276 4227
@@ -4338,8 +4289,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4338 int i, j; 4289 int i, j;
4339 struct chain_tracker *ct; 4290 struct chain_tracker *ct;
4340 4291
4341 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4292 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4342 __func__));
4343 4293
4344 4294
4345 retry_sz = 0; 4295 retry_sz = 0;
@@ -4368,10 +4318,8 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4368 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) { 4318 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
4369 sg_tablesize = min_t(unsigned short, sg_tablesize, 4319 sg_tablesize = min_t(unsigned short, sg_tablesize,
4370 SG_MAX_SEGMENTS); 4320 SG_MAX_SEGMENTS);
4371 pr_warn(MPT3SAS_FMT 4321 ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
4372 "sg_tablesize(%u) is bigger than kernel " 4322 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
4373 "defined SG_CHUNK_SIZE(%u)\n", ioc->name,
4374 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
4375 } 4323 }
4376 ioc->shost->sg_tablesize = sg_tablesize; 4324 ioc->shost->sg_tablesize = sg_tablesize;
4377 } 4325 }
@@ -4381,9 +4329,8 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4381 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) { 4329 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
4382 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT + 4330 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
4383 INTERNAL_SCSIIO_CMDS_COUNT)) { 4331 INTERNAL_SCSIIO_CMDS_COUNT)) {
4384 pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \ 4332 ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
4385 Credits, it has just %d number of credits\n", 4333 facts->RequestCredit);
4386 ioc->name, facts->RequestCredit);
4387 return -ENOMEM; 4334 return -ENOMEM;
4388 } 4335 }
4389 ioc->internal_depth = 10; 4336 ioc->internal_depth = 10;
@@ -4482,11 +4429,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4482 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; 4429 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
4483 } 4430 }
4484 4431
4485 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \ 4432 dinitprintk(ioc,
4486 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), " 4433 ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n",
4487 "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message, 4434 ioc->max_sges_in_main_message,
4488 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize, 4435 ioc->max_sges_in_chain_message,
4489 ioc->chains_needed_per_io)); 4436 ioc->shost->sg_tablesize,
4437 ioc->chains_needed_per_io));
4490 4438
4491 /* reply post queue, 16 byte align */ 4439 /* reply post queue, 16 byte align */
4492 reply_post_free_sz = ioc->reply_post_queue_depth * 4440 reply_post_free_sz = ioc->reply_post_queue_depth *
@@ -4501,48 +4449,40 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4501 sizeof(struct reply_post_struct), GFP_KERNEL); 4449 sizeof(struct reply_post_struct), GFP_KERNEL);
4502 4450
4503 if (!ioc->reply_post) { 4451 if (!ioc->reply_post) {
4504 pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n", 4452 ioc_err(ioc, "reply_post_free pool: kcalloc failed\n");
4505 ioc->name);
4506 goto out; 4453 goto out;
4507 } 4454 }
4508 ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool", 4455 ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
4509 &ioc->pdev->dev, sz, 16, 0); 4456 &ioc->pdev->dev, sz, 16, 0);
4510 if (!ioc->reply_post_free_dma_pool) { 4457 if (!ioc->reply_post_free_dma_pool) {
4511 pr_err(MPT3SAS_FMT 4458 ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
4512 "reply_post_free pool: dma_pool_create failed\n",
4513 ioc->name);
4514 goto out; 4459 goto out;
4515 } 4460 }
4516 i = 0; 4461 i = 0;
4517 do { 4462 do {
4518 ioc->reply_post[i].reply_post_free = 4463 ioc->reply_post[i].reply_post_free =
4519 dma_pool_alloc(ioc->reply_post_free_dma_pool, 4464 dma_pool_zalloc(ioc->reply_post_free_dma_pool,
4520 GFP_KERNEL, 4465 GFP_KERNEL,
4521 &ioc->reply_post[i].reply_post_free_dma); 4466 &ioc->reply_post[i].reply_post_free_dma);
4522 if (!ioc->reply_post[i].reply_post_free) { 4467 if (!ioc->reply_post[i].reply_post_free) {
4523 pr_err(MPT3SAS_FMT 4468 ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
4524 "reply_post_free pool: dma_pool_alloc failed\n",
4525 ioc->name);
4526 goto out; 4469 goto out;
4527 } 4470 }
4528 memset(ioc->reply_post[i].reply_post_free, 0, sz); 4471 dinitprintk(ioc,
4529 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4472 ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
4530 "reply post free pool (0x%p): depth(%d)," 4473 ioc->reply_post[i].reply_post_free,
4531 "element_size(%d), pool_size(%d kB)\n", ioc->name, 4474 ioc->reply_post_queue_depth,
4532 ioc->reply_post[i].reply_post_free, 4475 8, sz / 1024));
4533 ioc->reply_post_queue_depth, 8, sz/1024)); 4476 dinitprintk(ioc,
4534 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4477 ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
4535 "reply_post_free_dma = (0x%llx)\n", ioc->name, 4478 (u64)ioc->reply_post[i].reply_post_free_dma));
4536 (unsigned long long)
4537 ioc->reply_post[i].reply_post_free_dma));
4538 total_sz += sz; 4479 total_sz += sz;
4539 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count)); 4480 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
4540 4481
4541 if (ioc->dma_mask == 64) { 4482 if (ioc->dma_mask == 64) {
4542 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) { 4483 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
4543 pr_warn(MPT3SAS_FMT 4484 ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
4544 "no suitable consistent DMA mask for %s\n", 4485 pci_name(ioc->pdev));
4545 ioc->name, pci_name(ioc->pdev));
4546 goto out; 4486 goto out;
4547 } 4487 }
4548 } 4488 }
@@ -4554,9 +4494,9 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4554 * with some internal commands that could be outstanding 4494 * with some internal commands that could be outstanding
4555 */ 4495 */
4556 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT; 4496 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
4557 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4497 dinitprintk(ioc,
4558 "scsi host: can_queue depth (%d)\n", 4498 ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
4559 ioc->name, ioc->shost->can_queue)); 4499 ioc->shost->can_queue));
4560 4500
4561 4501
4562 /* contiguous pool for request and chains, 16 byte align, one extra " 4502 /* contiguous pool for request and chains, 16 byte align, one extra "
@@ -4572,12 +4512,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4572 sz += (ioc->internal_depth * ioc->request_sz); 4512 sz += (ioc->internal_depth * ioc->request_sz);
4573 4513
4574 ioc->request_dma_sz = sz; 4514 ioc->request_dma_sz = sz;
4575 ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma); 4515 ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
4516 &ioc->request_dma, GFP_KERNEL);
4576 if (!ioc->request) { 4517 if (!ioc->request) {
4577 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \ 4518 ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
4578 "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " 4519 ioc->hba_queue_depth, ioc->chains_needed_per_io,
4579 "total(%d kB)\n", ioc->name, ioc->hba_queue_depth, 4520 ioc->request_sz, sz / 1024);
4580 ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
4581 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH) 4521 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
4582 goto out; 4522 goto out;
4583 retry_sz = 64; 4523 retry_sz = 64;
@@ -4587,10 +4527,9 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4587 } 4527 }
4588 4528
4589 if (retry_sz) 4529 if (retry_sz)
4590 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \ 4530 ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
4591 "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " 4531 ioc->hba_queue_depth, ioc->chains_needed_per_io,
4592 "total(%d kb)\n", ioc->name, ioc->hba_queue_depth, 4532 ioc->request_sz, sz / 1024);
4593 ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
4594 4533
4595 /* hi-priority queue */ 4534 /* hi-priority queue */
4596 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) * 4535 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
@@ -4604,24 +4543,26 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4604 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * 4543 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
4605 ioc->request_sz); 4544 ioc->request_sz);
4606 4545
4607 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4546 dinitprintk(ioc,
4608 "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", 4547 ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
4609 ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz, 4548 ioc->request, ioc->hba_queue_depth,
4610 (ioc->hba_queue_depth * ioc->request_sz)/1024)); 4549 ioc->request_sz,
4550 (ioc->hba_queue_depth * ioc->request_sz) / 1024));
4611 4551
4612 dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n", 4552 dinitprintk(ioc,
4613 ioc->name, (unsigned long long) ioc->request_dma)); 4553 ioc_info(ioc, "request pool: dma(0x%llx)\n",
4554 (unsigned long long)ioc->request_dma));
4614 total_sz += sz; 4555 total_sz += sz;
4615 4556
4616 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n", 4557 dinitprintk(ioc,
4617 ioc->name, ioc->request, ioc->scsiio_depth)); 4558 ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
4559 ioc->request, ioc->scsiio_depth));
4618 4560
4619 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); 4561 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
4620 sz = ioc->scsiio_depth * sizeof(struct chain_lookup); 4562 sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
4621 ioc->chain_lookup = kzalloc(sz, GFP_KERNEL); 4563 ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
4622 if (!ioc->chain_lookup) { 4564 if (!ioc->chain_lookup) {
4623 pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages " 4565 ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
4624 "failed\n", ioc->name);
4625 goto out; 4566 goto out;
4626 } 4567 }
4627 4568
@@ -4629,8 +4570,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4629 for (i = 0; i < ioc->scsiio_depth; i++) { 4570 for (i = 0; i < ioc->scsiio_depth; i++) {
4630 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL); 4571 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
4631 if (!ioc->chain_lookup[i].chains_per_smid) { 4572 if (!ioc->chain_lookup[i].chains_per_smid) {
4632 pr_err(MPT3SAS_FMT "chain_lookup: " 4573 ioc_err(ioc, "chain_lookup: kzalloc failed\n");
4633 " kzalloc failed\n", ioc->name);
4634 goto out; 4574 goto out;
4635 } 4575 }
4636 } 4576 }
@@ -4639,29 +4579,27 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4639 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, 4579 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
4640 sizeof(struct request_tracker), GFP_KERNEL); 4580 sizeof(struct request_tracker), GFP_KERNEL);
4641 if (!ioc->hpr_lookup) { 4581 if (!ioc->hpr_lookup) {
4642 pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n", 4582 ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
4643 ioc->name);
4644 goto out; 4583 goto out;
4645 } 4584 }
4646 ioc->hi_priority_smid = ioc->scsiio_depth + 1; 4585 ioc->hi_priority_smid = ioc->scsiio_depth + 1;
4647 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4586 dinitprintk(ioc,
4648 "hi_priority(0x%p): depth(%d), start smid(%d)\n", 4587 ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
4649 ioc->name, ioc->hi_priority, 4588 ioc->hi_priority,
4650 ioc->hi_priority_depth, ioc->hi_priority_smid)); 4589 ioc->hi_priority_depth, ioc->hi_priority_smid));
4651 4590
4652 /* initialize internal queue smid's */ 4591 /* initialize internal queue smid's */
4653 ioc->internal_lookup = kcalloc(ioc->internal_depth, 4592 ioc->internal_lookup = kcalloc(ioc->internal_depth,
4654 sizeof(struct request_tracker), GFP_KERNEL); 4593 sizeof(struct request_tracker), GFP_KERNEL);
4655 if (!ioc->internal_lookup) { 4594 if (!ioc->internal_lookup) {
4656 pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n", 4595 ioc_err(ioc, "internal_lookup: kcalloc failed\n");
4657 ioc->name);
4658 goto out; 4596 goto out;
4659 } 4597 }
4660 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; 4598 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
4661 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4599 dinitprintk(ioc,
4662 "internal(0x%p): depth(%d), start smid(%d)\n", 4600 ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
4663 ioc->name, ioc->internal, 4601 ioc->internal,
4664 ioc->internal_depth, ioc->internal_smid)); 4602 ioc->internal_depth, ioc->internal_smid));
4665 /* 4603 /*
4666 * The number of NVMe page sized blocks needed is: 4604 * The number of NVMe page sized blocks needed is:
4667 * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1 4605 * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
@@ -4685,17 +4623,14 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4685 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth; 4623 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
4686 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL); 4624 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
4687 if (!ioc->pcie_sg_lookup) { 4625 if (!ioc->pcie_sg_lookup) {
4688 pr_info(MPT3SAS_FMT 4626 ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
4689 "PCIe SGL lookup: kzalloc failed\n", ioc->name);
4690 goto out; 4627 goto out;
4691 } 4628 }
4692 sz = nvme_blocks_needed * ioc->page_size; 4629 sz = nvme_blocks_needed * ioc->page_size;
4693 ioc->pcie_sgl_dma_pool = 4630 ioc->pcie_sgl_dma_pool =
4694 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0); 4631 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
4695 if (!ioc->pcie_sgl_dma_pool) { 4632 if (!ioc->pcie_sgl_dma_pool) {
4696 pr_info(MPT3SAS_FMT 4633 ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
4697 "PCIe SGL pool: dma_pool_create failed\n",
4698 ioc->name);
4699 goto out; 4634 goto out;
4700 } 4635 }
4701 4636
@@ -4708,9 +4643,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4708 ioc->pcie_sgl_dma_pool, GFP_KERNEL, 4643 ioc->pcie_sgl_dma_pool, GFP_KERNEL,
4709 &ioc->pcie_sg_lookup[i].pcie_sgl_dma); 4644 &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
4710 if (!ioc->pcie_sg_lookup[i].pcie_sgl) { 4645 if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
4711 pr_info(MPT3SAS_FMT 4646 ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
4712 "PCIe SGL pool: dma_pool_alloc failed\n",
4713 ioc->name);
4714 goto out; 4647 goto out;
4715 } 4648 }
4716 for (j = 0; j < ioc->chains_per_prp_buffer; j++) { 4649 for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
@@ -4724,20 +4657,20 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4724 } 4657 }
4725 } 4658 }
4726 4659
4727 dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), " 4660 dinitprintk(ioc,
4728 "element_size(%d), pool_size(%d kB)\n", ioc->name, 4661 ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
4729 ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); 4662 ioc->scsiio_depth, sz,
4730 dinitprintk(ioc, pr_info(MPT3SAS_FMT "Number of chains can " 4663 (sz * ioc->scsiio_depth) / 1024));
4731 "fit in a PRP page(%d)\n", ioc->name, 4664 dinitprintk(ioc,
4732 ioc->chains_per_prp_buffer)); 4665 ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
4666 ioc->chains_per_prp_buffer));
4733 total_sz += sz * ioc->scsiio_depth; 4667 total_sz += sz * ioc->scsiio_depth;
4734 } 4668 }
4735 4669
4736 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, 4670 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
4737 ioc->chain_segment_sz, 16, 0); 4671 ioc->chain_segment_sz, 16, 0);
4738 if (!ioc->chain_dma_pool) { 4672 if (!ioc->chain_dma_pool) {
4739 pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n", 4673 ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
4740 ioc->name);
4741 goto out; 4674 goto out;
4742 } 4675 }
4743 for (i = 0; i < ioc->scsiio_depth; i++) { 4676 for (i = 0; i < ioc->scsiio_depth; i++) {
@@ -4748,8 +4681,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4748 ioc->chain_dma_pool, GFP_KERNEL, 4681 ioc->chain_dma_pool, GFP_KERNEL,
4749 &ct->chain_buffer_dma); 4682 &ct->chain_buffer_dma);
4750 if (!ct->chain_buffer) { 4683 if (!ct->chain_buffer) {
4751 pr_err(MPT3SAS_FMT "chain_lookup: " 4684 ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
4752 " pci_pool_alloc failed\n", ioc->name);
4753 _base_release_memory_pools(ioc); 4685 _base_release_memory_pools(ioc);
4754 goto out; 4686 goto out;
4755 } 4687 }
@@ -4757,25 +4689,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4757 total_sz += ioc->chain_segment_sz; 4689 total_sz += ioc->chain_segment_sz;
4758 } 4690 }
4759 4691
4760 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4692 dinitprintk(ioc,
4761 "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", 4693 ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
4762 ioc->name, ioc->chain_depth, ioc->chain_segment_sz, 4694 ioc->chain_depth, ioc->chain_segment_sz,
4763 ((ioc->chain_depth * ioc->chain_segment_sz))/1024)); 4695 (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
4764 4696
4765 /* sense buffers, 4 byte align */ 4697 /* sense buffers, 4 byte align */
4766 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; 4698 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
4767 ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4699 ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
4768 4, 0); 4700 4, 0);
4769 if (!ioc->sense_dma_pool) { 4701 if (!ioc->sense_dma_pool) {
4770 pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n", 4702 ioc_err(ioc, "sense pool: dma_pool_create failed\n");
4771 ioc->name);
4772 goto out; 4703 goto out;
4773 } 4704 }
4774 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL, 4705 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
4775 &ioc->sense_dma); 4706 &ioc->sense_dma);
4776 if (!ioc->sense) { 4707 if (!ioc->sense) {
4777 pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n", 4708 ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
4778 ioc->name);
4779 goto out; 4709 goto out;
4780 } 4710 }
4781 /* sense buffer requires to be in same 4 gb region. 4711 /* sense buffer requires to be in same 4 gb region.
@@ -4797,24 +4727,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4797 dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4727 dma_pool_create("sense pool", &ioc->pdev->dev, sz,
4798 roundup_pow_of_two(sz), 0); 4728 roundup_pow_of_two(sz), 0);
4799 if (!ioc->sense_dma_pool) { 4729 if (!ioc->sense_dma_pool) {
4800 pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n", 4730 ioc_err(ioc, "sense pool: pci_pool_create failed\n");
4801 ioc->name);
4802 goto out; 4731 goto out;
4803 } 4732 }
4804 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL, 4733 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
4805 &ioc->sense_dma); 4734 &ioc->sense_dma);
4806 if (!ioc->sense) { 4735 if (!ioc->sense) {
4807 pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n", 4736 ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
4808 ioc->name);
4809 goto out; 4737 goto out;
4810 } 4738 }
4811 } 4739 }
4812 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4740 dinitprintk(ioc,
4813 "sense pool(0x%p): depth(%d), element_size(%d), pool_size" 4741 ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
4814 "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth, 4742 ioc->sense, ioc->scsiio_depth,
4815 SCSI_SENSE_BUFFERSIZE, sz/1024)); 4743 SCSI_SENSE_BUFFERSIZE, sz / 1024));
4816 dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n", 4744 dinitprintk(ioc,
4817 ioc->name, (unsigned long long)ioc->sense_dma)); 4745 ioc_info(ioc, "sense_dma(0x%llx)\n",
4746 (unsigned long long)ioc->sense_dma));
4818 total_sz += sz; 4747 total_sz += sz;
4819 4748
4820 /* reply pool, 4 byte align */ 4749 /* reply pool, 4 byte align */
@@ -4822,25 +4751,24 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4822 ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz, 4751 ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
4823 4, 0); 4752 4, 0);
4824 if (!ioc->reply_dma_pool) { 4753 if (!ioc->reply_dma_pool) {
4825 pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n", 4754 ioc_err(ioc, "reply pool: dma_pool_create failed\n");
4826 ioc->name);
4827 goto out; 4755 goto out;
4828 } 4756 }
4829 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, 4757 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
4830 &ioc->reply_dma); 4758 &ioc->reply_dma);
4831 if (!ioc->reply) { 4759 if (!ioc->reply) {
4832 pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n", 4760 ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
4833 ioc->name);
4834 goto out; 4761 goto out;
4835 } 4762 }
4836 ioc->reply_dma_min_address = (u32)(ioc->reply_dma); 4763 ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
4837 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; 4764 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
4838 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4765 dinitprintk(ioc,
4839 "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", 4766 ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
4840 ioc->name, ioc->reply, 4767 ioc->reply, ioc->reply_free_queue_depth,
4841 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024)); 4768 ioc->reply_sz, sz / 1024));
4842 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n", 4769 dinitprintk(ioc,
4843 ioc->name, (unsigned long long)ioc->reply_dma)); 4770 ioc_info(ioc, "reply_dma(0x%llx)\n",
4771 (unsigned long long)ioc->reply_dma));
4844 total_sz += sz; 4772 total_sz += sz;
4845 4773
4846 /* reply free queue, 16 byte align */ 4774 /* reply free queue, 16 byte align */
@@ -4848,24 +4776,22 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4848 ioc->reply_free_dma_pool = dma_pool_create("reply_free pool", 4776 ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
4849 &ioc->pdev->dev, sz, 16, 0); 4777 &ioc->pdev->dev, sz, 16, 0);
4850 if (!ioc->reply_free_dma_pool) { 4778 if (!ioc->reply_free_dma_pool) {
4851 pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n", 4779 ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
4852 ioc->name);
4853 goto out; 4780 goto out;
4854 } 4781 }
4855 ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL, 4782 ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
4856 &ioc->reply_free_dma); 4783 &ioc->reply_free_dma);
4857 if (!ioc->reply_free) { 4784 if (!ioc->reply_free) {
4858 pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n", 4785 ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
4859 ioc->name);
4860 goto out; 4786 goto out;
4861 } 4787 }
4862 memset(ioc->reply_free, 0, sz); 4788 dinitprintk(ioc,
4863 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \ 4789 ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
4864 "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name, 4790 ioc->reply_free, ioc->reply_free_queue_depth,
4865 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); 4791 4, sz / 1024));
4866 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4792 dinitprintk(ioc,
4867 "reply_free_dma (0x%llx)\n", 4793 ioc_info(ioc, "reply_free_dma (0x%llx)\n",
4868 ioc->name, (unsigned long long)ioc->reply_free_dma)); 4794 (unsigned long long)ioc->reply_free_dma));
4869 total_sz += sz; 4795 total_sz += sz;
4870 4796
4871 if (ioc->rdpq_array_enable) { 4797 if (ioc->rdpq_array_enable) {
@@ -4876,8 +4802,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4876 &ioc->pdev->dev, reply_post_free_array_sz, 16, 0); 4802 &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
4877 if (!ioc->reply_post_free_array_dma_pool) { 4803 if (!ioc->reply_post_free_array_dma_pool) {
4878 dinitprintk(ioc, 4804 dinitprintk(ioc,
4879 pr_info(MPT3SAS_FMT "reply_post_free_array pool: " 4805 ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
4880 "dma_pool_create failed\n", ioc->name));
4881 goto out; 4806 goto out;
4882 } 4807 }
4883 ioc->reply_post_free_array = 4808 ioc->reply_post_free_array =
@@ -4885,34 +4810,31 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4885 GFP_KERNEL, &ioc->reply_post_free_array_dma); 4810 GFP_KERNEL, &ioc->reply_post_free_array_dma);
4886 if (!ioc->reply_post_free_array) { 4811 if (!ioc->reply_post_free_array) {
4887 dinitprintk(ioc, 4812 dinitprintk(ioc,
4888 pr_info(MPT3SAS_FMT "reply_post_free_array pool: " 4813 ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
4889 "dma_pool_alloc failed\n", ioc->name));
4890 goto out; 4814 goto out;
4891 } 4815 }
4892 } 4816 }
4893 ioc->config_page_sz = 512; 4817 ioc->config_page_sz = 512;
4894 ioc->config_page = pci_alloc_consistent(ioc->pdev, 4818 ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
4895 ioc->config_page_sz, &ioc->config_page_dma); 4819 ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
4896 if (!ioc->config_page) { 4820 if (!ioc->config_page) {
4897 pr_err(MPT3SAS_FMT 4821 ioc_err(ioc, "config page: dma_pool_alloc failed\n");
4898 "config page: dma_pool_alloc failed\n",
4899 ioc->name);
4900 goto out; 4822 goto out;
4901 } 4823 }
4902 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4824 dinitprintk(ioc,
4903 "config page(0x%p): size(%d)\n", 4825 ioc_info(ioc, "config page(0x%p): size(%d)\n",
4904 ioc->name, ioc->config_page, ioc->config_page_sz)); 4826 ioc->config_page, ioc->config_page_sz));
4905 dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n", 4827 dinitprintk(ioc,
4906 ioc->name, (unsigned long long)ioc->config_page_dma)); 4828 ioc_info(ioc, "config_page_dma(0x%llx)\n",
4829 (unsigned long long)ioc->config_page_dma));
4907 total_sz += ioc->config_page_sz; 4830 total_sz += ioc->config_page_sz;
4908 4831
4909 pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n", 4832 ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
4910 ioc->name, total_sz/1024); 4833 total_sz / 1024);
4911 pr_info(MPT3SAS_FMT 4834 ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
4912 "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n", 4835 ioc->shost->can_queue, facts->RequestCredit);
4913 ioc->name, ioc->shost->can_queue, facts->RequestCredit); 4836 ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
4914 pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n", 4837 ioc->shost->sg_tablesize);
4915 ioc->name, ioc->shost->sg_tablesize);
4916 return 0; 4838 return 0;
4917 4839
4918 out: 4840 out:
@@ -4990,9 +4912,9 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
4990 do { 4912 do {
4991 int_status = readl(&ioc->chip->HostInterruptStatus); 4913 int_status = readl(&ioc->chip->HostInterruptStatus);
4992 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 4914 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
4993 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4915 dhsprintk(ioc,
4994 "%s: successful count(%d), timeout(%d)\n", 4916 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
4995 ioc->name, __func__, count, timeout)); 4917 __func__, count, timeout));
4996 return 0; 4918 return 0;
4997 } 4919 }
4998 4920
@@ -5000,9 +4922,8 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5000 count++; 4922 count++;
5001 } while (--cntdn); 4923 } while (--cntdn);
5002 4924
5003 pr_err(MPT3SAS_FMT 4925 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5004 "%s: failed due to timeout count(%d), int_status(%x)!\n", 4926 __func__, count, int_status);
5005 ioc->name, __func__, count, int_status);
5006 return -EFAULT; 4927 return -EFAULT;
5007} 4928}
5008 4929
@@ -5017,9 +4938,9 @@ _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5017 do { 4938 do {
5018 int_status = readl(&ioc->chip->HostInterruptStatus); 4939 int_status = readl(&ioc->chip->HostInterruptStatus);
5019 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 4940 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5020 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4941 dhsprintk(ioc,
5021 "%s: successful count(%d), timeout(%d)\n", 4942 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5022 ioc->name, __func__, count, timeout)); 4943 __func__, count, timeout));
5023 return 0; 4944 return 0;
5024 } 4945 }
5025 4946
@@ -5027,9 +4948,8 @@ _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5027 count++; 4948 count++;
5028 } while (--cntdn); 4949 } while (--cntdn);
5029 4950
5030 pr_err(MPT3SAS_FMT 4951 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5031 "%s: failed due to timeout count(%d), int_status(%x)!\n", 4952 __func__, count, int_status);
5032 ioc->name, __func__, count, int_status);
5033 return -EFAULT; 4953 return -EFAULT;
5034 4954
5035} 4955}
@@ -5056,9 +4976,9 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
5056 do { 4976 do {
5057 int_status = readl(&ioc->chip->HostInterruptStatus); 4977 int_status = readl(&ioc->chip->HostInterruptStatus);
5058 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { 4978 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
5059 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4979 dhsprintk(ioc,
5060 "%s: successful count(%d), timeout(%d)\n", 4980 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5061 ioc->name, __func__, count, timeout)); 4981 __func__, count, timeout));
5062 return 0; 4982 return 0;
5063 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 4983 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5064 doorbell = readl(&ioc->chip->Doorbell); 4984 doorbell = readl(&ioc->chip->Doorbell);
@@ -5075,9 +4995,8 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
5075 } while (--cntdn); 4995 } while (--cntdn);
5076 4996
5077 out: 4997 out:
5078 pr_err(MPT3SAS_FMT 4998 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5079 "%s: failed due to timeout count(%d), int_status(%x)!\n", 4999 __func__, count, int_status);
5080 ioc->name, __func__, count, int_status);
5081 return -EFAULT; 5000 return -EFAULT;
5082} 5001}
5083 5002
@@ -5099,9 +5018,9 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
5099 do { 5018 do {
5100 doorbell_reg = readl(&ioc->chip->Doorbell); 5019 doorbell_reg = readl(&ioc->chip->Doorbell);
5101 if (!(doorbell_reg & MPI2_DOORBELL_USED)) { 5020 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
5102 dhsprintk(ioc, pr_info(MPT3SAS_FMT 5021 dhsprintk(ioc,
5103 "%s: successful count(%d), timeout(%d)\n", 5022 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5104 ioc->name, __func__, count, timeout)); 5023 __func__, count, timeout));
5105 return 0; 5024 return 0;
5106 } 5025 }
5107 5026
@@ -5109,9 +5028,8 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
5109 count++; 5028 count++;
5110 } while (--cntdn); 5029 } while (--cntdn);
5111 5030
5112 pr_err(MPT3SAS_FMT 5031 ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
5113 "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n", 5032 __func__, count, doorbell_reg);
5114 ioc->name, __func__, count, doorbell_reg);
5115 return -EFAULT; 5033 return -EFAULT;
5116} 5034}
5117 5035
@@ -5130,8 +5048,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
5130 int r = 0; 5048 int r = 0;
5131 5049
5132 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { 5050 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
5133 pr_err(MPT3SAS_FMT "%s: unknown reset_type\n", 5051 ioc_err(ioc, "%s: unknown reset_type\n", __func__);
5134 ioc->name, __func__);
5135 return -EFAULT; 5052 return -EFAULT;
5136 } 5053 }
5137 5054
@@ -5139,7 +5056,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
5139 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY)) 5056 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
5140 return -EFAULT; 5057 return -EFAULT;
5141 5058
5142 pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name); 5059 ioc_info(ioc, "sending message unit reset !!\n");
5143 5060
5144 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT, 5061 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
5145 &ioc->chip->Doorbell); 5062 &ioc->chip->Doorbell);
@@ -5149,15 +5066,14 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
5149 } 5066 }
5150 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); 5067 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
5151 if (ioc_state) { 5068 if (ioc_state) {
5152 pr_err(MPT3SAS_FMT 5069 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
5153 "%s: failed going to ready state (ioc_state=0x%x)\n", 5070 __func__, ioc_state);
5154 ioc->name, __func__, ioc_state);
5155 r = -EFAULT; 5071 r = -EFAULT;
5156 goto out; 5072 goto out;
5157 } 5073 }
5158 out: 5074 out:
5159 pr_info(MPT3SAS_FMT "message unit reset: %s\n", 5075 ioc_info(ioc, "message unit reset: %s\n",
5160 ioc->name, ((r == 0) ? "SUCCESS" : "FAILED")); 5076 r == 0 ? "SUCCESS" : "FAILED");
5161 return r; 5077 return r;
5162} 5078}
5163 5079
@@ -5183,9 +5099,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5183 5099
5184 /* make sure doorbell is not in use */ 5100 /* make sure doorbell is not in use */
5185 if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { 5101 if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
5186 pr_err(MPT3SAS_FMT 5102 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
5187 "doorbell is in use (line=%d)\n",
5188 ioc->name, __LINE__);
5189 return -EFAULT; 5103 return -EFAULT;
5190 } 5104 }
5191 5105
@@ -5200,17 +5114,15 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5200 &ioc->chip->Doorbell); 5114 &ioc->chip->Doorbell);
5201 5115
5202 if ((_base_spin_on_doorbell_int(ioc, 5))) { 5116 if ((_base_spin_on_doorbell_int(ioc, 5))) {
5203 pr_err(MPT3SAS_FMT 5117 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5204 "doorbell handshake int failed (line=%d)\n", 5118 __LINE__);
5205 ioc->name, __LINE__);
5206 return -EFAULT; 5119 return -EFAULT;
5207 } 5120 }
5208 writel(0, &ioc->chip->HostInterruptStatus); 5121 writel(0, &ioc->chip->HostInterruptStatus);
5209 5122
5210 if ((_base_wait_for_doorbell_ack(ioc, 5))) { 5123 if ((_base_wait_for_doorbell_ack(ioc, 5))) {
5211 pr_err(MPT3SAS_FMT 5124 ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
5212 "doorbell handshake ack failed (line=%d)\n", 5125 __LINE__);
5213 ioc->name, __LINE__);
5214 return -EFAULT; 5126 return -EFAULT;
5215 } 5127 }
5216 5128
@@ -5222,17 +5134,15 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5222 } 5134 }
5223 5135
5224 if (failed) { 5136 if (failed) {
5225 pr_err(MPT3SAS_FMT 5137 ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
5226 "doorbell handshake sending request failed (line=%d)\n", 5138 __LINE__);
5227 ioc->name, __LINE__);
5228 return -EFAULT; 5139 return -EFAULT;
5229 } 5140 }
5230 5141
5231 /* now wait for the reply */ 5142 /* now wait for the reply */
5232 if ((_base_wait_for_doorbell_int(ioc, timeout))) { 5143 if ((_base_wait_for_doorbell_int(ioc, timeout))) {
5233 pr_err(MPT3SAS_FMT 5144 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5234 "doorbell handshake int failed (line=%d)\n", 5145 __LINE__);
5235 ioc->name, __LINE__);
5236 return -EFAULT; 5146 return -EFAULT;
5237 } 5147 }
5238 5148
@@ -5241,9 +5151,8 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5241 & MPI2_DOORBELL_DATA_MASK); 5151 & MPI2_DOORBELL_DATA_MASK);
5242 writel(0, &ioc->chip->HostInterruptStatus); 5152 writel(0, &ioc->chip->HostInterruptStatus);
5243 if ((_base_wait_for_doorbell_int(ioc, 5))) { 5153 if ((_base_wait_for_doorbell_int(ioc, 5))) {
5244 pr_err(MPT3SAS_FMT 5154 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5245 "doorbell handshake int failed (line=%d)\n", 5155 __LINE__);
5246 ioc->name, __LINE__);
5247 return -EFAULT; 5156 return -EFAULT;
5248 } 5157 }
5249 reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell) 5158 reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
@@ -5252,9 +5161,8 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5252 5161
5253 for (i = 2; i < default_reply->MsgLength * 2; i++) { 5162 for (i = 2; i < default_reply->MsgLength * 2; i++) {
5254 if ((_base_wait_for_doorbell_int(ioc, 5))) { 5163 if ((_base_wait_for_doorbell_int(ioc, 5))) {
5255 pr_err(MPT3SAS_FMT 5164 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5256 "doorbell handshake int failed (line=%d)\n", 5165 __LINE__);
5257 ioc->name, __LINE__);
5258 return -EFAULT; 5166 return -EFAULT;
5259 } 5167 }
5260 if (i >= reply_bytes/2) /* overflow case */ 5168 if (i >= reply_bytes/2) /* overflow case */
@@ -5267,8 +5175,9 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5267 5175
5268 _base_wait_for_doorbell_int(ioc, 5); 5176 _base_wait_for_doorbell_int(ioc, 5);
5269 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) { 5177 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
5270 dhsprintk(ioc, pr_info(MPT3SAS_FMT 5178 dhsprintk(ioc,
5271 "doorbell is in use (line=%d)\n", ioc->name, __LINE__)); 5179 ioc_info(ioc, "doorbell is in use (line=%d)\n",
5180 __LINE__));
5272 } 5181 }
5273 writel(0, &ioc->chip->HostInterruptStatus); 5182 writel(0, &ioc->chip->HostInterruptStatus);
5274 5183
@@ -5308,14 +5217,12 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
5308 void *request; 5217 void *request;
5309 u16 wait_state_count; 5218 u16 wait_state_count;
5310 5219
5311 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5220 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5312 __func__));
5313 5221
5314 mutex_lock(&ioc->base_cmds.mutex); 5222 mutex_lock(&ioc->base_cmds.mutex);
5315 5223
5316 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { 5224 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
5317 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n", 5225 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
5318 ioc->name, __func__);
5319 rc = -EAGAIN; 5226 rc = -EAGAIN;
5320 goto out; 5227 goto out;
5321 } 5228 }
@@ -5324,23 +5231,20 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
5324 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 5231 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5325 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 5232 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
5326 if (wait_state_count++ == 10) { 5233 if (wait_state_count++ == 10) {
5327 pr_err(MPT3SAS_FMT 5234 ioc_err(ioc, "%s: failed due to ioc not operational\n",
5328 "%s: failed due to ioc not operational\n", 5235 __func__);
5329 ioc->name, __func__);
5330 rc = -EFAULT; 5236 rc = -EFAULT;
5331 goto out; 5237 goto out;
5332 } 5238 }
5333 ssleep(1); 5239 ssleep(1);
5334 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 5240 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5335 pr_info(MPT3SAS_FMT 5241 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
5336 "%s: waiting for operational state(count=%d)\n", 5242 __func__, wait_state_count);
5337 ioc->name, __func__, wait_state_count);
5338 } 5243 }
5339 5244
5340 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 5245 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5341 if (!smid) { 5246 if (!smid) {
5342 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 5247 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5343 ioc->name, __func__);
5344 rc = -EAGAIN; 5248 rc = -EAGAIN;
5345 goto out; 5249 goto out;
5346 } 5250 }
@@ -5408,14 +5312,12 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
5408 void *request; 5312 void *request;
5409 u16 wait_state_count; 5313 u16 wait_state_count;
5410 5314
5411 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5315 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5412 __func__));
5413 5316
5414 mutex_lock(&ioc->base_cmds.mutex); 5317 mutex_lock(&ioc->base_cmds.mutex);
5415 5318
5416 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { 5319 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
5417 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n", 5320 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
5418 ioc->name, __func__);
5419 rc = -EAGAIN; 5321 rc = -EAGAIN;
5420 goto out; 5322 goto out;
5421 } 5323 }
@@ -5424,24 +5326,20 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
5424 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 5326 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5425 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 5327 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
5426 if (wait_state_count++ == 10) { 5328 if (wait_state_count++ == 10) {
5427 pr_err(MPT3SAS_FMT 5329 ioc_err(ioc, "%s: failed due to ioc not operational\n",
5428 "%s: failed due to ioc not operational\n", 5330 __func__);
5429 ioc->name, __func__);
5430 rc = -EFAULT; 5331 rc = -EFAULT;
5431 goto out; 5332 goto out;
5432 } 5333 }
5433 ssleep(1); 5334 ssleep(1);
5434 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 5335 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5435 pr_info(MPT3SAS_FMT 5336 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
5436 "%s: waiting for operational state(count=%d)\n", 5337 __func__, wait_state_count);
5437 ioc->name,
5438 __func__, wait_state_count);
5439 } 5338 }
5440 5339
5441 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 5340 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5442 if (!smid) { 5341 if (!smid) {
5443 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 5342 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5444 ioc->name, __func__);
5445 rc = -EAGAIN; 5343 rc = -EAGAIN;
5446 goto out; 5344 goto out;
5447 } 5345 }
@@ -5495,8 +5393,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
5495 struct mpt3sas_port_facts *pfacts; 5393 struct mpt3sas_port_facts *pfacts;
5496 int mpi_reply_sz, mpi_request_sz, r; 5394 int mpi_reply_sz, mpi_request_sz, r;
5497 5395
5498 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5396 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5499 __func__));
5500 5397
5501 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t); 5398 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
5502 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t); 5399 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
@@ -5507,8 +5404,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
5507 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); 5404 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
5508 5405
5509 if (r != 0) { 5406 if (r != 0) {
5510 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 5407 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
5511 ioc->name, __func__, r);
5512 return r; 5408 return r;
5513 } 5409 }
5514 5410
@@ -5536,26 +5432,26 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
5536 u32 ioc_state; 5432 u32 ioc_state;
5537 int rc; 5433 int rc;
5538 5434
5539 dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name, 5435 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5540 __func__));
5541 5436
5542 if (ioc->pci_error_recovery) { 5437 if (ioc->pci_error_recovery) {
5543 dfailprintk(ioc, printk(MPT3SAS_FMT 5438 dfailprintk(ioc,
5544 "%s: host in pci error recovery\n", ioc->name, __func__)); 5439 ioc_info(ioc, "%s: host in pci error recovery\n",
5440 __func__));
5545 return -EFAULT; 5441 return -EFAULT;
5546 } 5442 }
5547 5443
5548 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 5444 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5549 dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n", 5445 dhsprintk(ioc,
5550 ioc->name, __func__, ioc_state)); 5446 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
5447 __func__, ioc_state));
5551 5448
5552 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) || 5449 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
5553 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) 5450 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
5554 return 0; 5451 return 0;
5555 5452
5556 if (ioc_state & MPI2_DOORBELL_USED) { 5453 if (ioc_state & MPI2_DOORBELL_USED) {
5557 dhsprintk(ioc, printk(MPT3SAS_FMT 5454 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
5558 "unexpected doorbell active!\n", ioc->name));
5559 goto issue_diag_reset; 5455 goto issue_diag_reset;
5560 } 5456 }
5561 5457
@@ -5567,9 +5463,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
5567 5463
5568 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); 5464 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
5569 if (ioc_state) { 5465 if (ioc_state) {
5570 dfailprintk(ioc, printk(MPT3SAS_FMT 5466 dfailprintk(ioc,
5571 "%s: failed going to ready state (ioc_state=0x%x)\n", 5467 ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
5572 ioc->name, __func__, ioc_state)); 5468 __func__, ioc_state));
5573 return -EFAULT; 5469 return -EFAULT;
5574 } 5470 }
5575 5471
@@ -5592,14 +5488,13 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
5592 struct mpt3sas_facts *facts; 5488 struct mpt3sas_facts *facts;
5593 int mpi_reply_sz, mpi_request_sz, r; 5489 int mpi_reply_sz, mpi_request_sz, r;
5594 5490
5595 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5491 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5596 __func__));
5597 5492
5598 r = _base_wait_for_iocstate(ioc, 10); 5493 r = _base_wait_for_iocstate(ioc, 10);
5599 if (r) { 5494 if (r) {
5600 dfailprintk(ioc, printk(MPT3SAS_FMT 5495 dfailprintk(ioc,
5601 "%s: failed getting to correct state\n", 5496 ioc_info(ioc, "%s: failed getting to correct state\n",
5602 ioc->name, __func__)); 5497 __func__));
5603 return r; 5498 return r;
5604 } 5499 }
5605 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); 5500 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
@@ -5610,8 +5505,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
5610 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5); 5505 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
5611 5506
5612 if (r != 0) { 5507 if (r != 0) {
5613 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 5508 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
5614 ioc->name, __func__, r);
5615 return r; 5509 return r;
5616 } 5510 }
5617 5511
@@ -5663,20 +5557,20 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
5663 */ 5557 */
5664 ioc->page_size = 1 << facts->CurrentHostPageSize; 5558 ioc->page_size = 1 << facts->CurrentHostPageSize;
5665 if (ioc->page_size == 1) { 5559 if (ioc->page_size == 1) {
5666 pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting " 5560 ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
5667 "default host page size to 4k\n", ioc->name);
5668 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K; 5561 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
5669 } 5562 }
5670 dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n", 5563 dinitprintk(ioc,
5671 ioc->name, facts->CurrentHostPageSize)); 5564 ioc_info(ioc, "CurrentHostPageSize(%d)\n",
5672 5565 facts->CurrentHostPageSize));
5673 dinitprintk(ioc, pr_info(MPT3SAS_FMT 5566
5674 "hba queue depth(%d), max chains per io(%d)\n", 5567 dinitprintk(ioc,
5675 ioc->name, facts->RequestCredit, 5568 ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
5676 facts->MaxChainDepth)); 5569 facts->RequestCredit, facts->MaxChainDepth));
5677 dinitprintk(ioc, pr_info(MPT3SAS_FMT 5570 dinitprintk(ioc,
5678 "request frame size(%d), reply frame size(%d)\n", ioc->name, 5571 ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
5679 facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4)); 5572 facts->IOCRequestFrameSize * 4,
5573 facts->ReplyFrameSize * 4));
5680 return 0; 5574 return 0;
5681} 5575}
5682 5576
@@ -5696,8 +5590,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
5696 u16 ioc_status; 5590 u16 ioc_status;
5697 u32 reply_post_free_array_sz = 0; 5591 u32 reply_post_free_array_sz = 0;
5698 5592
5699 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5593 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5700 __func__));
5701 5594
5702 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t)); 5595 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
5703 mpi_request.Function = MPI2_FUNCTION_IOC_INIT; 5596 mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
@@ -5763,15 +5656,14 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
5763 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10); 5656 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
5764 5657
5765 if (r != 0) { 5658 if (r != 0) {
5766 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 5659 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
5767 ioc->name, __func__, r);
5768 return r; 5660 return r;
5769 } 5661 }
5770 5662
5771 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 5663 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5772 if (ioc_status != MPI2_IOCSTATUS_SUCCESS || 5664 if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
5773 mpi_reply.IOCLogInfo) { 5665 mpi_reply.IOCLogInfo) {
5774 pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__); 5666 ioc_err(ioc, "%s: failed\n", __func__);
5775 r = -EIO; 5667 r = -EIO;
5776 } 5668 }
5777 5669
@@ -5842,18 +5734,16 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
5842 u16 smid; 5734 u16 smid;
5843 u16 ioc_status; 5735 u16 ioc_status;
5844 5736
5845 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name); 5737 ioc_info(ioc, "sending port enable !!\n");
5846 5738
5847 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 5739 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
5848 pr_err(MPT3SAS_FMT "%s: internal command already in use\n", 5740 ioc_err(ioc, "%s: internal command already in use\n", __func__);
5849 ioc->name, __func__);
5850 return -EAGAIN; 5741 return -EAGAIN;
5851 } 5742 }
5852 5743
5853 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); 5744 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
5854 if (!smid) { 5745 if (!smid) {
5855 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 5746 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5856 ioc->name, __func__);
5857 return -EAGAIN; 5747 return -EAGAIN;
5858 } 5748 }
5859 5749
@@ -5867,8 +5757,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
5867 mpt3sas_base_put_smid_default(ioc, smid); 5757 mpt3sas_base_put_smid_default(ioc, smid);
5868 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ); 5758 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
5869 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { 5759 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
5870 pr_err(MPT3SAS_FMT "%s: timeout\n", 5760 ioc_err(ioc, "%s: timeout\n", __func__);
5871 ioc->name, __func__);
5872 _debug_dump_mf(mpi_request, 5761 _debug_dump_mf(mpi_request,
5873 sizeof(Mpi2PortEnableRequest_t)/4); 5762 sizeof(Mpi2PortEnableRequest_t)/4);
5874 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) 5763 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
@@ -5881,16 +5770,15 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
5881 mpi_reply = ioc->port_enable_cmds.reply; 5770 mpi_reply = ioc->port_enable_cmds.reply;
5882 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 5771 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
5883 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5772 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5884 pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n", 5773 ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
5885 ioc->name, __func__, ioc_status); 5774 __func__, ioc_status);
5886 r = -EFAULT; 5775 r = -EFAULT;
5887 goto out; 5776 goto out;
5888 } 5777 }
5889 5778
5890 out: 5779 out:
5891 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 5780 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
5892 pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ? 5781 ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
5893 "SUCCESS" : "FAILED"));
5894 return r; 5782 return r;
5895} 5783}
5896 5784
@@ -5906,18 +5794,16 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
5906 Mpi2PortEnableRequest_t *mpi_request; 5794 Mpi2PortEnableRequest_t *mpi_request;
5907 u16 smid; 5795 u16 smid;
5908 5796
5909 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name); 5797 ioc_info(ioc, "sending port enable !!\n");
5910 5798
5911 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 5799 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
5912 pr_err(MPT3SAS_FMT "%s: internal command already in use\n", 5800 ioc_err(ioc, "%s: internal command already in use\n", __func__);
5913 ioc->name, __func__);
5914 return -EAGAIN; 5801 return -EAGAIN;
5915 } 5802 }
5916 5803
5917 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); 5804 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
5918 if (!smid) { 5805 if (!smid) {
5919 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 5806 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5920 ioc->name, __func__);
5921 return -EAGAIN; 5807 return -EAGAIN;
5922 } 5808 }
5923 5809
@@ -6020,19 +5906,16 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
6020 int r = 0; 5906 int r = 0;
6021 int i; 5907 int i;
6022 5908
6023 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5909 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6024 __func__));
6025 5910
6026 if (ioc->base_cmds.status & MPT3_CMD_PENDING) { 5911 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
6027 pr_err(MPT3SAS_FMT "%s: internal command already in use\n", 5912 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6028 ioc->name, __func__);
6029 return -EAGAIN; 5913 return -EAGAIN;
6030 } 5914 }
6031 5915
6032 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 5916 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6033 if (!smid) { 5917 if (!smid) {
6034 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 5918 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6035 ioc->name, __func__);
6036 return -EAGAIN; 5919 return -EAGAIN;
6037 } 5920 }
6038 ioc->base_cmds.status = MPT3_CMD_PENDING; 5921 ioc->base_cmds.status = MPT3_CMD_PENDING;
@@ -6049,8 +5932,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
6049 mpt3sas_base_put_smid_default(ioc, smid); 5932 mpt3sas_base_put_smid_default(ioc, smid);
6050 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); 5933 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
6051 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 5934 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6052 pr_err(MPT3SAS_FMT "%s: timeout\n", 5935 ioc_err(ioc, "%s: timeout\n", __func__);
6053 ioc->name, __func__);
6054 _debug_dump_mf(mpi_request, 5936 _debug_dump_mf(mpi_request,
6055 sizeof(Mpi2EventNotificationRequest_t)/4); 5937 sizeof(Mpi2EventNotificationRequest_t)/4);
6056 if (ioc->base_cmds.status & MPT3_CMD_RESET) 5938 if (ioc->base_cmds.status & MPT3_CMD_RESET)
@@ -6058,8 +5940,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
6058 else 5940 else
6059 r = -ETIME; 5941 r = -ETIME;
6060 } else 5942 } else
6061 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n", 5943 dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
6062 ioc->name, __func__));
6063 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 5944 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6064 return r; 5945 return r;
6065} 5946}
@@ -6115,18 +5996,16 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
6115 u32 count; 5996 u32 count;
6116 u32 hcb_size; 5997 u32 hcb_size;
6117 5998
6118 pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name); 5999 ioc_info(ioc, "sending diag reset !!\n");
6119 6000
6120 drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n", 6001 drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
6121 ioc->name));
6122 6002
6123 count = 0; 6003 count = 0;
6124 do { 6004 do {
6125 /* Write magic sequence to WriteSequence register 6005 /* Write magic sequence to WriteSequence register
6126 * Loop until in diagnostic mode 6006 * Loop until in diagnostic mode
6127 */ 6007 */
6128 drsprintk(ioc, pr_info(MPT3SAS_FMT 6008 drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
6129 "write magic sequence\n", ioc->name));
6130 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); 6009 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6131 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence); 6010 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
6132 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence); 6011 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
@@ -6142,16 +6021,15 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
6142 goto out; 6021 goto out;
6143 6022
6144 host_diagnostic = readl(&ioc->chip->HostDiagnostic); 6023 host_diagnostic = readl(&ioc->chip->HostDiagnostic);
6145 drsprintk(ioc, pr_info(MPT3SAS_FMT 6024 drsprintk(ioc,
6146 "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n", 6025 ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
6147 ioc->name, count, host_diagnostic)); 6026 count, host_diagnostic));
6148 6027
6149 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0); 6028 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
6150 6029
6151 hcb_size = readl(&ioc->chip->HCBSize); 6030 hcb_size = readl(&ioc->chip->HCBSize);
6152 6031
6153 drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n", 6032 drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
6154 ioc->name));
6155 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, 6033 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
6156 &ioc->chip->HostDiagnostic); 6034 &ioc->chip->HostDiagnostic);
6157 6035
@@ -6174,43 +6052,38 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
6174 6052
6175 if (host_diagnostic & MPI2_DIAG_HCB_MODE) { 6053 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
6176 6054
6177 drsprintk(ioc, pr_info(MPT3SAS_FMT 6055 drsprintk(ioc,
6178 "restart the adapter assuming the HCB Address points to good F/W\n", 6056 ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
6179 ioc->name));
6180 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK; 6057 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
6181 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW; 6058 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
6182 writel(host_diagnostic, &ioc->chip->HostDiagnostic); 6059 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
6183 6060
6184 drsprintk(ioc, pr_info(MPT3SAS_FMT 6061 drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
6185 "re-enable the HCDW\n", ioc->name));
6186 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE, 6062 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
6187 &ioc->chip->HCBSize); 6063 &ioc->chip->HCBSize);
6188 } 6064 }
6189 6065
6190 drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n", 6066 drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
6191 ioc->name));
6192 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET, 6067 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
6193 &ioc->chip->HostDiagnostic); 6068 &ioc->chip->HostDiagnostic);
6194 6069
6195 drsprintk(ioc, pr_info(MPT3SAS_FMT 6070 drsprintk(ioc,
6196 "disable writes to the diagnostic register\n", ioc->name)); 6071 ioc_info(ioc, "disable writes to the diagnostic register\n"));
6197 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); 6072 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6198 6073
6199 drsprintk(ioc, pr_info(MPT3SAS_FMT 6074 drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
6200 "Wait for FW to go to the READY state\n", ioc->name));
6201 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20); 6075 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
6202 if (ioc_state) { 6076 if (ioc_state) {
6203 pr_err(MPT3SAS_FMT 6077 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6204 "%s: failed going to ready state (ioc_state=0x%x)\n", 6078 __func__, ioc_state);
6205 ioc->name, __func__, ioc_state);
6206 goto out; 6079 goto out;
6207 } 6080 }
6208 6081
6209 pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name); 6082 ioc_info(ioc, "diag reset: SUCCESS\n");
6210 return 0; 6083 return 0;
6211 6084
6212 out: 6085 out:
6213 pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name); 6086 ioc_err(ioc, "diag reset: FAILED\n");
6214 return -EFAULT; 6087 return -EFAULT;
6215} 6088}
6216 6089
@@ -6228,15 +6101,15 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
6228 int rc; 6101 int rc;
6229 int count; 6102 int count;
6230 6103
6231 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 6104 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6232 __func__));
6233 6105
6234 if (ioc->pci_error_recovery) 6106 if (ioc->pci_error_recovery)
6235 return 0; 6107 return 0;
6236 6108
6237 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 6109 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6238 dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n", 6110 dhsprintk(ioc,
6239 ioc->name, __func__, ioc_state)); 6111 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6112 __func__, ioc_state));
6240 6113
6241 /* if in RESET state, it should move to READY state shortly */ 6114 /* if in RESET state, it should move to READY state shortly */
6242 count = 0; 6115 count = 0;
@@ -6244,9 +6117,8 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
6244 while ((ioc_state & MPI2_IOC_STATE_MASK) != 6117 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
6245 MPI2_IOC_STATE_READY) { 6118 MPI2_IOC_STATE_READY) {
6246 if (count++ == 10) { 6119 if (count++ == 10) {
6247 pr_err(MPT3SAS_FMT 6120 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6248 "%s: failed going to ready state (ioc_state=0x%x)\n", 6121 __func__, ioc_state);
6249 ioc->name, __func__, ioc_state);
6250 return -EFAULT; 6122 return -EFAULT;
6251 } 6123 }
6252 ssleep(1); 6124 ssleep(1);
@@ -6258,9 +6130,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
6258 return 0; 6130 return 0;
6259 6131
6260 if (ioc_state & MPI2_DOORBELL_USED) { 6132 if (ioc_state & MPI2_DOORBELL_USED) {
6261 dhsprintk(ioc, pr_info(MPT3SAS_FMT 6133 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
6262 "unexpected doorbell active!\n",
6263 ioc->name));
6264 goto issue_diag_reset; 6134 goto issue_diag_reset;
6265 } 6135 }
6266 6136
@@ -6304,8 +6174,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
6304 struct adapter_reply_queue *reply_q; 6174 struct adapter_reply_queue *reply_q;
6305 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig; 6175 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
6306 6176
6307 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 6177 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6308 __func__));
6309 6178
6310 /* clean the delayed target reset list */ 6179 /* clean the delayed target reset list */
6311 list_for_each_entry_safe(delayed_tr, delayed_tr_next, 6180 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
@@ -6465,8 +6334,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
6465void 6334void
6466mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) 6335mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
6467{ 6336{
6468 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 6337 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6469 __func__));
6470 6338
6471 /* synchronizing freeing resource with pci_access_mutex lock */ 6339 /* synchronizing freeing resource with pci_access_mutex lock */
6472 mutex_lock(&ioc->pci_access_mutex); 6340 mutex_lock(&ioc->pci_access_mutex);
@@ -6494,8 +6362,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
6494 int r, i; 6362 int r, i;
6495 int cpu_id, last_cpu_id = 0; 6363 int cpu_id, last_cpu_id = 0;
6496 6364
6497 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 6365 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6498 __func__));
6499 6366
6500 /* setup cpu_msix_table */ 6367 /* setup cpu_msix_table */
6501 ioc->cpu_count = num_online_cpus(); 6368 ioc->cpu_count = num_online_cpus();
@@ -6505,9 +6372,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
6505 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); 6372 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
6506 ioc->reply_queue_count = 1; 6373 ioc->reply_queue_count = 1;
6507 if (!ioc->cpu_msix_table) { 6374 if (!ioc->cpu_msix_table) {
6508 dfailprintk(ioc, pr_info(MPT3SAS_FMT 6375 dfailprintk(ioc,
6509 "allocation for cpu_msix_table failed!!!\n", 6376 ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n"));
6510 ioc->name));
6511 r = -ENOMEM; 6377 r = -ENOMEM;
6512 goto out_free_resources; 6378 goto out_free_resources;
6513 } 6379 }
@@ -6516,9 +6382,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
6516 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz, 6382 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
6517 sizeof(resource_size_t *), GFP_KERNEL); 6383 sizeof(resource_size_t *), GFP_KERNEL);
6518 if (!ioc->reply_post_host_index) { 6384 if (!ioc->reply_post_host_index) {
6519 dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation " 6385 dfailprintk(ioc,
6520 "for reply_post_host_index failed!!!\n", 6386 ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n"));
6521 ioc->name));
6522 r = -ENOMEM; 6387 r = -ENOMEM;
6523 goto out_free_resources; 6388 goto out_free_resources;
6524 } 6389 }
@@ -6747,8 +6612,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
6747void 6612void
6748mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) 6613mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
6749{ 6614{
6750 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 6615 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6751 __func__));
6752 6616
6753 mpt3sas_base_stop_watchdog(ioc); 6617 mpt3sas_base_stop_watchdog(ioc);
6754 mpt3sas_base_free_resources(ioc); 6618 mpt3sas_base_free_resources(ioc);
@@ -6781,8 +6645,7 @@ static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
6781{ 6645{
6782 mpt3sas_scsih_pre_reset_handler(ioc); 6646 mpt3sas_scsih_pre_reset_handler(ioc);
6783 mpt3sas_ctl_pre_reset_handler(ioc); 6647 mpt3sas_ctl_pre_reset_handler(ioc);
6784 dtmprintk(ioc, pr_info(MPT3SAS_FMT 6648 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
6785 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
6786} 6649}
6787 6650
6788/** 6651/**
@@ -6793,8 +6656,7 @@ static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
6793{ 6656{
6794 mpt3sas_scsih_after_reset_handler(ioc); 6657 mpt3sas_scsih_after_reset_handler(ioc);
6795 mpt3sas_ctl_after_reset_handler(ioc); 6658 mpt3sas_ctl_after_reset_handler(ioc);
6796 dtmprintk(ioc, pr_info(MPT3SAS_FMT 6659 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
6797 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
6798 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { 6660 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
6799 ioc->transport_cmds.status |= MPT3_CMD_RESET; 6661 ioc->transport_cmds.status |= MPT3_CMD_RESET;
6800 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); 6662 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
@@ -6835,8 +6697,7 @@ static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
6835{ 6697{
6836 mpt3sas_scsih_reset_done_handler(ioc); 6698 mpt3sas_scsih_reset_done_handler(ioc);
6837 mpt3sas_ctl_reset_done_handler(ioc); 6699 mpt3sas_ctl_reset_done_handler(ioc);
6838 dtmprintk(ioc, pr_info(MPT3SAS_FMT 6700 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
6839 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
6840} 6701}
6841 6702
6842/** 6703/**
@@ -6883,12 +6744,10 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
6883 u32 ioc_state; 6744 u32 ioc_state;
6884 u8 is_fault = 0, is_trigger = 0; 6745 u8 is_fault = 0, is_trigger = 0;
6885 6746
6886 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 6747 dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
6887 __func__));
6888 6748
6889 if (ioc->pci_error_recovery) { 6749 if (ioc->pci_error_recovery) {
6890 pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n", 6750 ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
6891 ioc->name, __func__);
6892 r = 0; 6751 r = 0;
6893 goto out_unlocked; 6752 goto out_unlocked;
6894 } 6753 }
@@ -6942,8 +6801,9 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
6942 _base_reset_done_handler(ioc); 6801 _base_reset_done_handler(ioc);
6943 6802
6944 out: 6803 out:
6945 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n", 6804 dtmprintk(ioc,
6946 ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); 6805 ioc_info(ioc, "%s: %s\n",
6806 __func__, r == 0 ? "SUCCESS" : "FAILED"));
6947 6807
6948 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 6808 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
6949 ioc->shost_recovery = 0; 6809 ioc->shost_recovery = 0;
@@ -6959,7 +6819,6 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
6959 mpt3sas_trigger_master(ioc, 6819 mpt3sas_trigger_master(ioc,
6960 MASTER_TRIGGER_ADAPTER_RESET); 6820 MASTER_TRIGGER_ADAPTER_RESET);
6961 } 6821 }
6962 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, 6822 dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
6963 __func__));
6964 return r; 6823 return r;
6965} 6824}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 96dc15e90bd8..8f1d6b071b39 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -158,7 +158,14 @@ struct mpt3sas_nvme_cmd {
158/* 158/*
159 * logging format 159 * logging format
160 */ 160 */
161#define MPT3SAS_FMT "%s: " 161#define ioc_err(ioc, fmt, ...) \
162 pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
163#define ioc_notice(ioc, fmt, ...) \
164 pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
165#define ioc_warn(ioc, fmt, ...) \
166 pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
167#define ioc_info(ioc, fmt, ...) \
168 pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
162 169
163/* 170/*
164 * WarpDrive Specific Log codes 171 * WarpDrive Specific Log codes
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index d29a2dcc7d0e..02209447f4ef 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -175,20 +175,18 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
175 if (!desc) 175 if (!desc)
176 return; 176 return;
177 177
178 pr_info(MPT3SAS_FMT 178 ioc_info(ioc, "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n",
179 "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n", 179 calling_function_name, desc,
180 ioc->name, calling_function_name, desc, 180 mpi_request->Header.PageNumber, mpi_request->Action,
181 mpi_request->Header.PageNumber, mpi_request->Action, 181 le32_to_cpu(mpi_request->PageAddress), smid);
182 le32_to_cpu(mpi_request->PageAddress), smid);
183 182
184 if (!mpi_reply) 183 if (!mpi_reply)
185 return; 184 return;
186 185
187 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) 186 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
188 pr_info(MPT3SAS_FMT 187 ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
189 "\tiocstatus(0x%04x), loginfo(0x%08x)\n", 188 le16_to_cpu(mpi_reply->IOCStatus),
190 ioc->name, le16_to_cpu(mpi_reply->IOCStatus), 189 le32_to_cpu(mpi_reply->IOCLogInfo));
191 le32_to_cpu(mpi_reply->IOCLogInfo));
192} 190}
193 191
194/** 192/**
@@ -210,9 +208,8 @@ _config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
210 mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz, 208 mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz,
211 &mem->page_dma, GFP_KERNEL); 209 &mem->page_dma, GFP_KERNEL);
212 if (!mem->page) { 210 if (!mem->page) {
213 pr_err(MPT3SAS_FMT 211 ioc_err(ioc, "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n",
214 "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n", 212 __func__, mem->sz);
215 ioc->name, __func__, mem->sz);
216 r = -ENOMEM; 213 r = -ENOMEM;
217 } 214 }
218 } else { /* use tmp buffer if less than 512 bytes */ 215 } else { /* use tmp buffer if less than 512 bytes */
@@ -313,8 +310,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
313 310
314 mutex_lock(&ioc->config_cmds.mutex); 311 mutex_lock(&ioc->config_cmds.mutex);
315 if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) { 312 if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) {
316 pr_err(MPT3SAS_FMT "%s: config_cmd in use\n", 313 ioc_err(ioc, "%s: config_cmd in use\n", __func__);
317 ioc->name, __func__);
318 mutex_unlock(&ioc->config_cmds.mutex); 314 mutex_unlock(&ioc->config_cmds.mutex);
319 return -EAGAIN; 315 return -EAGAIN;
320 } 316 }
@@ -362,34 +358,30 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
362 r = -EFAULT; 358 r = -EFAULT;
363 goto free_mem; 359 goto free_mem;
364 } 360 }
365 pr_info(MPT3SAS_FMT "%s: attempting retry (%d)\n", 361 ioc_info(ioc, "%s: attempting retry (%d)\n",
366 ioc->name, __func__, retry_count); 362 __func__, retry_count);
367 } 363 }
368 wait_state_count = 0; 364 wait_state_count = 0;
369 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 365 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
370 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 366 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
371 if (wait_state_count++ == MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT) { 367 if (wait_state_count++ == MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT) {
372 pr_err(MPT3SAS_FMT 368 ioc_err(ioc, "%s: failed due to ioc not operational\n",
373 "%s: failed due to ioc not operational\n", 369 __func__);
374 ioc->name, __func__);
375 ioc->config_cmds.status = MPT3_CMD_NOT_USED; 370 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
376 r = -EFAULT; 371 r = -EFAULT;
377 goto free_mem; 372 goto free_mem;
378 } 373 }
379 ssleep(1); 374 ssleep(1);
380 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 375 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
381 pr_info(MPT3SAS_FMT 376 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
382 "%s: waiting for operational state(count=%d)\n", 377 __func__, wait_state_count);
383 ioc->name, __func__, wait_state_count);
384 } 378 }
385 if (wait_state_count) 379 if (wait_state_count)
386 pr_info(MPT3SAS_FMT "%s: ioc is operational\n", 380 ioc_info(ioc, "%s: ioc is operational\n", __func__);
387 ioc->name, __func__);
388 381
389 smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx); 382 smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx);
390 if (!smid) { 383 if (!smid) {
391 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 384 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
392 ioc->name, __func__);
393 ioc->config_cmds.status = MPT3_CMD_NOT_USED; 385 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
394 r = -EAGAIN; 386 r = -EAGAIN;
395 goto free_mem; 387 goto free_mem;
@@ -429,12 +421,10 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
429 (mpi_reply->Header.PageType & 0xF)) { 421 (mpi_reply->Header.PageType & 0xF)) {
430 _debug_dump_mf(mpi_request, ioc->request_sz/4); 422 _debug_dump_mf(mpi_request, ioc->request_sz/4);
431 _debug_dump_reply(mpi_reply, ioc->request_sz/4); 423 _debug_dump_reply(mpi_reply, ioc->request_sz/4);
432 panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \ 424 panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
433 " mpi_reply mismatch: Requested PageType(0x%02x)" \ 425 ioc->name, __func__,
434 " Reply PageType(0x%02x)\n", \ 426 mpi_request->Header.PageType & 0xF,
435 ioc->name, __func__, 427 mpi_reply->Header.PageType & 0xF);
436 (mpi_request->Header.PageType & 0xF),
437 (mpi_reply->Header.PageType & 0xF));
438 } 428 }
439 429
440 if (((mpi_request->Header.PageType & 0xF) == 430 if (((mpi_request->Header.PageType & 0xF) ==
@@ -442,19 +432,18 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
442 mpi_request->ExtPageType != mpi_reply->ExtPageType) { 432 mpi_request->ExtPageType != mpi_reply->ExtPageType) {
443 _debug_dump_mf(mpi_request, ioc->request_sz/4); 433 _debug_dump_mf(mpi_request, ioc->request_sz/4);
444 _debug_dump_reply(mpi_reply, ioc->request_sz/4); 434 _debug_dump_reply(mpi_reply, ioc->request_sz/4);
445 panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \ 435 panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
446 " mpi_reply mismatch: Requested ExtPageType(0x%02x)" 436 ioc->name, __func__,
447 " Reply ExtPageType(0x%02x)\n", 437 mpi_request->ExtPageType,
448 ioc->name, __func__, mpi_request->ExtPageType, 438 mpi_reply->ExtPageType);
449 mpi_reply->ExtPageType);
450 } 439 }
451 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) 440 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
452 & MPI2_IOCSTATUS_MASK; 441 & MPI2_IOCSTATUS_MASK;
453 } 442 }
454 443
455 if (retry_count) 444 if (retry_count)
456 pr_info(MPT3SAS_FMT "%s: retry (%d) completed!!\n", \ 445 ioc_info(ioc, "%s: retry (%d) completed!!\n",
457 ioc->name, __func__, retry_count); 446 __func__, retry_count);
458 447
459 if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) && 448 if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) &&
460 config_page && mpi_request->Action == 449 config_page && mpi_request->Action ==
@@ -469,14 +458,10 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
469 _debug_dump_reply(mpi_reply, ioc->request_sz/4); 458 _debug_dump_reply(mpi_reply, ioc->request_sz/4);
470 _debug_dump_config(p, min_t(u16, mem.sz, 459 _debug_dump_config(p, min_t(u16, mem.sz,
471 config_page_sz)/4); 460 config_page_sz)/4);
472 panic(KERN_WARNING MPT3SAS_FMT 461 panic("%s: %s: Firmware BUG: config page mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
473 "%s: Firmware BUG:" \ 462 ioc->name, __func__,
474 " config page mismatch:" 463 mpi_request->Header.PageType & 0xF,
475 " Requested PageType(0x%02x)" 464 p[3] & 0xF);
476 " Reply PageType(0x%02x)\n",
477 ioc->name, __func__,
478 (mpi_request->Header.PageType & 0xF),
479 (p[3] & 0xF));
480 } 465 }
481 466
482 if (((mpi_request->Header.PageType & 0xF) == 467 if (((mpi_request->Header.PageType & 0xF) ==
@@ -486,13 +471,9 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
486 _debug_dump_reply(mpi_reply, ioc->request_sz/4); 471 _debug_dump_reply(mpi_reply, ioc->request_sz/4);
487 _debug_dump_config(p, min_t(u16, mem.sz, 472 _debug_dump_config(p, min_t(u16, mem.sz,
488 config_page_sz)/4); 473 config_page_sz)/4);
489 panic(KERN_WARNING MPT3SAS_FMT 474 panic("%s: %s: Firmware BUG: config page mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
490 "%s: Firmware BUG:" \ 475 ioc->name, __func__,
491 " config page mismatch:" 476 mpi_request->ExtPageType, p[6]);
492 " Requested ExtPageType(0x%02x)"
493 " Reply ExtPageType(0x%02x)\n",
494 ioc->name, __func__,
495 mpi_request->ExtPageType, p[6]);
496 } 477 }
497 } 478 }
498 memcpy(config_page, mem.page, min_t(u16, mem.sz, 479 memcpy(config_page, mem.page, min_t(u16, mem.sz,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 5e8c059ce2c9..4afa597cbfba 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -185,17 +185,15 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
185 if (!desc) 185 if (!desc)
186 return; 186 return;
187 187
188 pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n", 188 ioc_info(ioc, "%s: %s, smid(%d)\n", calling_function_name, desc, smid);
189 ioc->name, calling_function_name, desc, smid);
190 189
191 if (!mpi_reply) 190 if (!mpi_reply)
192 return; 191 return;
193 192
194 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) 193 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
195 pr_info(MPT3SAS_FMT 194 ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
196 "\tiocstatus(0x%04x), loginfo(0x%08x)\n", 195 le16_to_cpu(mpi_reply->IOCStatus),
197 ioc->name, le16_to_cpu(mpi_reply->IOCStatus), 196 le32_to_cpu(mpi_reply->IOCLogInfo));
198 le32_to_cpu(mpi_reply->IOCLogInfo));
199 197
200 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 198 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
201 mpi_request->Function == 199 mpi_request->Function ==
@@ -208,38 +206,32 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
208 sas_device = mpt3sas_get_sdev_by_handle(ioc, 206 sas_device = mpt3sas_get_sdev_by_handle(ioc,
209 le16_to_cpu(scsi_reply->DevHandle)); 207 le16_to_cpu(scsi_reply->DevHandle));
210 if (sas_device) { 208 if (sas_device) {
211 pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n", 209 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
212 ioc->name, (unsigned long long) 210 (u64)sas_device->sas_address,
213 sas_device->sas_address, sas_device->phy); 211 sas_device->phy);
214 pr_warn(MPT3SAS_FMT 212 ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
215 "\tenclosure_logical_id(0x%016llx), slot(%d)\n", 213 (u64)sas_device->enclosure_logical_id,
216 ioc->name, (unsigned long long) 214 sas_device->slot);
217 sas_device->enclosure_logical_id, sas_device->slot);
218 sas_device_put(sas_device); 215 sas_device_put(sas_device);
219 } 216 }
220 if (!sas_device) { 217 if (!sas_device) {
221 pcie_device = mpt3sas_get_pdev_by_handle(ioc, 218 pcie_device = mpt3sas_get_pdev_by_handle(ioc,
222 le16_to_cpu(scsi_reply->DevHandle)); 219 le16_to_cpu(scsi_reply->DevHandle));
223 if (pcie_device) { 220 if (pcie_device) {
224 pr_warn(MPT3SAS_FMT 221 ioc_warn(ioc, "\tWWID(0x%016llx), port(%d)\n",
225 "\tWWID(0x%016llx), port(%d)\n", ioc->name, 222 (unsigned long long)pcie_device->wwid,
226 (unsigned long long)pcie_device->wwid, 223 pcie_device->port_num);
227 pcie_device->port_num);
228 if (pcie_device->enclosure_handle != 0) 224 if (pcie_device->enclosure_handle != 0)
229 pr_warn(MPT3SAS_FMT 225 ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
230 "\tenclosure_logical_id(0x%016llx), slot(%d)\n", 226 (u64)pcie_device->enclosure_logical_id,
231 ioc->name, (unsigned long long) 227 pcie_device->slot);
232 pcie_device->enclosure_logical_id,
233 pcie_device->slot);
234 pcie_device_put(pcie_device); 228 pcie_device_put(pcie_device);
235 } 229 }
236 } 230 }
237 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) 231 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
238 pr_info(MPT3SAS_FMT 232 ioc_info(ioc, "\tscsi_state(0x%02x), scsi_status(0x%02x)\n",
239 "\tscsi_state(0x%02x), scsi_status" 233 scsi_reply->SCSIState,
240 "(0x%02x)\n", ioc->name, 234 scsi_reply->SCSIStatus);
241 scsi_reply->SCSIState,
242 scsi_reply->SCSIStatus);
243 } 235 }
244} 236}
245 237
@@ -466,8 +458,7 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
466 int i; 458 int i;
467 u8 issue_reset; 459 u8 issue_reset;
468 460
469 dtmprintk(ioc, pr_info(MPT3SAS_FMT 461 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
470 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
471 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 462 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
472 if (!(ioc->diag_buffer_status[i] & 463 if (!(ioc->diag_buffer_status[i] &
473 MPT3_DIAG_BUFFER_IS_REGISTERED)) 464 MPT3_DIAG_BUFFER_IS_REGISTERED))
@@ -487,8 +478,7 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
487 */ 478 */
488void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) 479void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
489{ 480{
490 dtmprintk(ioc, pr_info(MPT3SAS_FMT 481 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
491 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
492 if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) { 482 if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
493 ioc->ctl_cmds.status |= MPT3_CMD_RESET; 483 ioc->ctl_cmds.status |= MPT3_CMD_RESET;
494 mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid); 484 mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
@@ -506,8 +496,7 @@ void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
506{ 496{
507 int i; 497 int i;
508 498
509 dtmprintk(ioc, pr_info(MPT3SAS_FMT 499 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
510 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
511 500
512 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 501 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
513 if (!(ioc->diag_buffer_status[i] & 502 if (!(ioc->diag_buffer_status[i] &
@@ -612,10 +601,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
612 } 601 }
613 602
614 if (!found) { 603 if (!found) {
615 dctlprintk(ioc, pr_info(MPT3SAS_FMT 604 dctlprintk(ioc,
616 "%s: handle(0x%04x), lun(%d), no active mid!!\n", 605 ioc_info(ioc, "%s: handle(0x%04x), lun(%d), no active mid!!\n",
617 ioc->name, 606 desc, le16_to_cpu(tm_request->DevHandle),
618 desc, le16_to_cpu(tm_request->DevHandle), lun)); 607 lun));
619 tm_reply = ioc->ctl_cmds.reply; 608 tm_reply = ioc->ctl_cmds.reply;
620 tm_reply->DevHandle = tm_request->DevHandle; 609 tm_reply->DevHandle = tm_request->DevHandle;
621 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 610 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -631,10 +620,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
631 return 1; 620 return 1;
632 } 621 }
633 622
634 dctlprintk(ioc, pr_info(MPT3SAS_FMT 623 dctlprintk(ioc,
635 "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name, 624 ioc_info(ioc, "%s: handle(0x%04x), lun(%d), task_mid(%d)\n",
636 desc, le16_to_cpu(tm_request->DevHandle), lun, 625 desc, le16_to_cpu(tm_request->DevHandle), lun,
637 le16_to_cpu(tm_request->TaskMID))); 626 le16_to_cpu(tm_request->TaskMID)));
638 return 0; 627 return 0;
639} 628}
640 629
@@ -672,8 +661,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
672 issue_reset = 0; 661 issue_reset = 0;
673 662
674 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 663 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
675 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 664 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
676 ioc->name, __func__);
677 ret = -EAGAIN; 665 ret = -EAGAIN;
678 goto out; 666 goto out;
679 } 667 }
@@ -682,28 +670,23 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
682 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 670 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
683 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 671 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
684 if (wait_state_count++ == 10) { 672 if (wait_state_count++ == 10) {
685 pr_err(MPT3SAS_FMT 673 ioc_err(ioc, "%s: failed due to ioc not operational\n",
686 "%s: failed due to ioc not operational\n", 674 __func__);
687 ioc->name, __func__);
688 ret = -EFAULT; 675 ret = -EFAULT;
689 goto out; 676 goto out;
690 } 677 }
691 ssleep(1); 678 ssleep(1);
692 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 679 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
693 pr_info(MPT3SAS_FMT 680 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
694 "%s: waiting for operational state(count=%d)\n", 681 __func__, wait_state_count);
695 ioc->name,
696 __func__, wait_state_count);
697 } 682 }
698 if (wait_state_count) 683 if (wait_state_count)
699 pr_info(MPT3SAS_FMT "%s: ioc is operational\n", 684 ioc_info(ioc, "%s: ioc is operational\n", __func__);
700 ioc->name, __func__);
701 685
702 mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); 686 mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
703 if (!mpi_request) { 687 if (!mpi_request) {
704 pr_err(MPT3SAS_FMT 688 ioc_err(ioc, "%s: failed obtaining a memory for mpi_request\n",
705 "%s: failed obtaining a memory for mpi_request\n", 689 __func__);
706 ioc->name, __func__);
707 ret = -ENOMEM; 690 ret = -ENOMEM;
708 goto out; 691 goto out;
709 } 692 }
@@ -726,8 +709,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
726 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 709 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
727 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); 710 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
728 if (!smid) { 711 if (!smid) {
729 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 712 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
730 ioc->name, __func__);
731 ret = -EAGAIN; 713 ret = -EAGAIN;
732 goto out; 714 goto out;
733 } 715 }
@@ -762,8 +744,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
762 744
763 /* obtain dma-able memory for data transfer */ 745 /* obtain dma-able memory for data transfer */
764 if (data_out_sz) /* WRITE */ { 746 if (data_out_sz) /* WRITE */ {
765 data_out = pci_alloc_consistent(ioc->pdev, data_out_sz, 747 data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
766 &data_out_dma); 748 &data_out_dma, GFP_KERNEL);
767 if (!data_out) { 749 if (!data_out) {
768 pr_err("failure at %s:%d/%s()!\n", __FILE__, 750 pr_err("failure at %s:%d/%s()!\n", __FILE__,
769 __LINE__, __func__); 751 __LINE__, __func__);
@@ -782,8 +764,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
782 } 764 }
783 765
784 if (data_in_sz) /* READ */ { 766 if (data_in_sz) /* READ */ {
785 data_in = pci_alloc_consistent(ioc->pdev, data_in_sz, 767 data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
786 &data_in_dma); 768 &data_in_dma, GFP_KERNEL);
787 if (!data_in) { 769 if (!data_in) {
788 pr_err("failure at %s:%d/%s()!\n", __FILE__, 770 pr_err("failure at %s:%d/%s()!\n", __FILE__,
789 __LINE__, __func__); 771 __LINE__, __func__);
@@ -823,9 +805,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
823 ioc->build_nvme_prp(ioc, smid, nvme_encap_request, 805 ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
824 data_out_dma, data_out_sz, data_in_dma, data_in_sz); 806 data_out_dma, data_out_sz, data_in_dma, data_in_sz);
825 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 807 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
826 dtmprintk(ioc, pr_info(MPT3SAS_FMT "handle(0x%04x) :" 808 dtmprintk(ioc,
827 "ioctl failed due to device removal in progress\n", 809 ioc_info(ioc, "handle(0x%04x): ioctl failed due to device removal in progress\n",
828 ioc->name, device_handle)); 810 device_handle));
829 mpt3sas_base_free_smid(ioc, smid); 811 mpt3sas_base_free_smid(ioc, smid);
830 ret = -EINVAL; 812 ret = -EINVAL;
831 goto out; 813 goto out;
@@ -843,9 +825,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
843 mpt3sas_base_get_sense_buffer_dma(ioc, smid); 825 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
844 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); 826 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
845 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 827 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
846 dtmprintk(ioc, pr_info(MPT3SAS_FMT 828 dtmprintk(ioc,
847 "handle(0x%04x) :ioctl failed due to device removal in progress\n", 829 ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
848 ioc->name, device_handle)); 830 device_handle));
849 mpt3sas_base_free_smid(ioc, smid); 831 mpt3sas_base_free_smid(ioc, smid);
850 ret = -EINVAL; 832 ret = -EINVAL;
851 goto out; 833 goto out;
@@ -863,10 +845,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
863 Mpi2SCSITaskManagementRequest_t *tm_request = 845 Mpi2SCSITaskManagementRequest_t *tm_request =
864 (Mpi2SCSITaskManagementRequest_t *)request; 846 (Mpi2SCSITaskManagementRequest_t *)request;
865 847
866 dtmprintk(ioc, pr_info(MPT3SAS_FMT 848 dtmprintk(ioc,
867 "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", 849 ioc_info(ioc, "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
868 ioc->name, 850 le16_to_cpu(tm_request->DevHandle),
869 le16_to_cpu(tm_request->DevHandle), tm_request->TaskType)); 851 tm_request->TaskType));
870 ioc->got_task_abort_from_ioctl = 1; 852 ioc->got_task_abort_from_ioctl = 1;
871 if (tm_request->TaskType == 853 if (tm_request->TaskType ==
872 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || 854 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
@@ -881,9 +863,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
881 ioc->got_task_abort_from_ioctl = 0; 863 ioc->got_task_abort_from_ioctl = 0;
882 864
883 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 865 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
884 dtmprintk(ioc, pr_info(MPT3SAS_FMT 866 dtmprintk(ioc,
885 "handle(0x%04x) :ioctl failed due to device removal in progress\n", 867 ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
886 ioc->name, device_handle)); 868 device_handle));
887 mpt3sas_base_free_smid(ioc, smid); 869 mpt3sas_base_free_smid(ioc, smid);
888 ret = -EINVAL; 870 ret = -EINVAL;
889 goto out; 871 goto out;
@@ -929,9 +911,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
929 case MPI2_FUNCTION_SATA_PASSTHROUGH: 911 case MPI2_FUNCTION_SATA_PASSTHROUGH:
930 { 912 {
931 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 913 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
932 dtmprintk(ioc, pr_info(MPT3SAS_FMT 914 dtmprintk(ioc,
933 "handle(0x%04x) :ioctl failed due to device removal in progress\n", 915 ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
934 ioc->name, device_handle)); 916 device_handle));
935 mpt3sas_base_free_smid(ioc, smid); 917 mpt3sas_base_free_smid(ioc, smid);
936 ret = -EINVAL; 918 ret = -EINVAL;
937 goto out; 919 goto out;
@@ -1017,12 +999,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
1017 Mpi2SCSITaskManagementReply_t *tm_reply = 999 Mpi2SCSITaskManagementReply_t *tm_reply =
1018 (Mpi2SCSITaskManagementReply_t *)mpi_reply; 1000 (Mpi2SCSITaskManagementReply_t *)mpi_reply;
1019 1001
1020 pr_info(MPT3SAS_FMT "TASK_MGMT: " \ 1002 ioc_info(ioc, "TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n",
1021 "IOCStatus(0x%04x), IOCLogInfo(0x%08x), " 1003 le16_to_cpu(tm_reply->IOCStatus),
1022 "TerminationCount(0x%08x)\n", ioc->name, 1004 le32_to_cpu(tm_reply->IOCLogInfo),
1023 le16_to_cpu(tm_reply->IOCStatus), 1005 le32_to_cpu(tm_reply->TerminationCount));
1024 le32_to_cpu(tm_reply->IOCLogInfo),
1025 le32_to_cpu(tm_reply->TerminationCount));
1026 } 1006 }
1027 1007
1028 /* copy out xdata to user */ 1008 /* copy out xdata to user */
@@ -1054,9 +1034,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
1054 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function == 1034 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
1055 MPI2_FUNCTION_NVME_ENCAPSULATED)) { 1035 MPI2_FUNCTION_NVME_ENCAPSULATED)) {
1056 if (karg.sense_data_ptr == NULL) { 1036 if (karg.sense_data_ptr == NULL) {
1057 pr_info(MPT3SAS_FMT "Response buffer provided" 1037 ioc_info(ioc, "Response buffer provided by application is NULL; Response data will not be returned\n");
1058 " by application is NULL; Response data will"
1059 " not be returned.\n", ioc->name);
1060 goto out; 1038 goto out;
1061 } 1039 }
1062 sz_arg = (mpi_request->Function == 1040 sz_arg = (mpi_request->Function ==
@@ -1079,9 +1057,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
1079 mpi_request->Function == 1057 mpi_request->Function ==
1080 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 1058 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
1081 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) { 1059 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
1082 pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n", 1060 ioc_info(ioc, "issue target reset: handle = (0x%04x)\n",
1083 ioc->name, 1061 le16_to_cpu(mpi_request->FunctionDependent1));
1084 le16_to_cpu(mpi_request->FunctionDependent1));
1085 mpt3sas_halt_firmware(ioc); 1062 mpt3sas_halt_firmware(ioc);
1086 pcie_device = mpt3sas_get_pdev_by_handle(ioc, 1063 pcie_device = mpt3sas_get_pdev_by_handle(ioc,
1087 le16_to_cpu(mpi_request->FunctionDependent1)); 1064 le16_to_cpu(mpi_request->FunctionDependent1));
@@ -1106,11 +1083,11 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
1106 1083
1107 /* free memory associated with sg buffers */ 1084 /* free memory associated with sg buffers */
1108 if (data_in) 1085 if (data_in)
1109 pci_free_consistent(ioc->pdev, data_in_sz, data_in, 1086 dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
1110 data_in_dma); 1087 data_in_dma);
1111 1088
1112 if (data_out) 1089 if (data_out)
1113 pci_free_consistent(ioc->pdev, data_out_sz, data_out, 1090 dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
1114 data_out_dma); 1091 data_out_dma);
1115 1092
1116 kfree(mpi_request); 1093 kfree(mpi_request);
@@ -1128,8 +1105,8 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1128{ 1105{
1129 struct mpt3_ioctl_iocinfo karg; 1106 struct mpt3_ioctl_iocinfo karg;
1130 1107
1131 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1108 dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1132 __func__)); 1109 __func__));
1133 1110
1134 memset(&karg, 0 , sizeof(karg)); 1111 memset(&karg, 0 , sizeof(karg));
1135 if (ioc->pfacts) 1112 if (ioc->pfacts)
@@ -1188,8 +1165,8 @@ _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1188 return -EFAULT; 1165 return -EFAULT;
1189 } 1166 }
1190 1167
1191 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1168 dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1192 __func__)); 1169 __func__));
1193 1170
1194 karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE; 1171 karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
1195 memcpy(karg.event_types, ioc->event_type, 1172 memcpy(karg.event_types, ioc->event_type,
@@ -1219,8 +1196,8 @@ _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1219 return -EFAULT; 1196 return -EFAULT;
1220 } 1197 }
1221 1198
1222 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1199 dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1223 __func__)); 1200 __func__));
1224 1201
1225 memcpy(ioc->event_type, karg.event_types, 1202 memcpy(ioc->event_type, karg.event_types,
1226 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); 1203 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
@@ -1259,8 +1236,8 @@ _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1259 return -EFAULT; 1236 return -EFAULT;
1260 } 1237 }
1261 1238
1262 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1239 dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1263 __func__)); 1240 __func__));
1264 1241
1265 number_bytes = karg.hdr.max_data_size - 1242 number_bytes = karg.hdr.max_data_size -
1266 sizeof(struct mpt3_ioctl_header); 1243 sizeof(struct mpt3_ioctl_header);
@@ -1306,12 +1283,11 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1306 ioc->is_driver_loading) 1283 ioc->is_driver_loading)
1307 return -EAGAIN; 1284 return -EAGAIN;
1308 1285
1309 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1286 dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1310 __func__)); 1287 __func__));
1311 1288
1312 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1289 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1313 pr_info(MPT3SAS_FMT "host reset: %s\n", 1290 ioc_info(ioc, "host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
1314 ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
1315 return 0; 1291 return 0;
1316} 1292}
1317 1293
@@ -1440,8 +1416,8 @@ _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1440 return -EFAULT; 1416 return -EFAULT;
1441 } 1417 }
1442 1418
1443 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1419 dctlprintk(ioc, ioc_info(ioc, "%s\n",
1444 __func__)); 1420 __func__));
1445 1421
1446 rc = _ctl_btdh_search_sas_device(ioc, &karg); 1422 rc = _ctl_btdh_search_sas_device(ioc, &karg);
1447 if (!rc) 1423 if (!rc)
@@ -1512,53 +1488,46 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1512 u32 ioc_state; 1488 u32 ioc_state;
1513 u8 issue_reset = 0; 1489 u8 issue_reset = 0;
1514 1490
1515 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1491 dctlprintk(ioc, ioc_info(ioc, "%s\n",
1516 __func__)); 1492 __func__));
1517 1493
1518 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1494 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1519 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1495 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1520 pr_err(MPT3SAS_FMT 1496 ioc_err(ioc, "%s: failed due to ioc not operational\n",
1521 "%s: failed due to ioc not operational\n", 1497 __func__);
1522 ioc->name, __func__);
1523 rc = -EAGAIN; 1498 rc = -EAGAIN;
1524 goto out; 1499 goto out;
1525 } 1500 }
1526 1501
1527 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 1502 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
1528 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 1503 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
1529 ioc->name, __func__);
1530 rc = -EAGAIN; 1504 rc = -EAGAIN;
1531 goto out; 1505 goto out;
1532 } 1506 }
1533 1507
1534 buffer_type = diag_register->buffer_type; 1508 buffer_type = diag_register->buffer_type;
1535 if (!_ctl_diag_capability(ioc, buffer_type)) { 1509 if (!_ctl_diag_capability(ioc, buffer_type)) {
1536 pr_err(MPT3SAS_FMT 1510 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
1537 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1511 __func__, buffer_type);
1538 ioc->name, __func__, buffer_type);
1539 return -EPERM; 1512 return -EPERM;
1540 } 1513 }
1541 1514
1542 if (ioc->diag_buffer_status[buffer_type] & 1515 if (ioc->diag_buffer_status[buffer_type] &
1543 MPT3_DIAG_BUFFER_IS_REGISTERED) { 1516 MPT3_DIAG_BUFFER_IS_REGISTERED) {
1544 pr_err(MPT3SAS_FMT 1517 ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
1545 "%s: already has a registered buffer for buffer_type(0x%02x)\n", 1518 __func__, buffer_type);
1546 ioc->name, __func__,
1547 buffer_type);
1548 return -EINVAL; 1519 return -EINVAL;
1549 } 1520 }
1550 1521
1551 if (diag_register->requested_buffer_size % 4) { 1522 if (diag_register->requested_buffer_size % 4) {
1552 pr_err(MPT3SAS_FMT 1523 ioc_err(ioc, "%s: the requested_buffer_size is not 4 byte aligned\n",
1553 "%s: the requested_buffer_size is not 4 byte aligned\n", 1524 __func__);
1554 ioc->name, __func__);
1555 return -EINVAL; 1525 return -EINVAL;
1556 } 1526 }
1557 1527
1558 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 1528 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1559 if (!smid) { 1529 if (!smid) {
1560 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 1530 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
1561 ioc->name, __func__);
1562 rc = -EAGAIN; 1531 rc = -EAGAIN;
1563 goto out; 1532 goto out;
1564 } 1533 }
@@ -1580,9 +1549,9 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1580 if (request_data) { 1549 if (request_data) {
1581 request_data_dma = ioc->diag_buffer_dma[buffer_type]; 1550 request_data_dma = ioc->diag_buffer_dma[buffer_type];
1582 if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) { 1551 if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
1583 pci_free_consistent(ioc->pdev, 1552 dma_free_coherent(&ioc->pdev->dev,
1584 ioc->diag_buffer_sz[buffer_type], 1553 ioc->diag_buffer_sz[buffer_type],
1585 request_data, request_data_dma); 1554 request_data, request_data_dma);
1586 request_data = NULL; 1555 request_data = NULL;
1587 } 1556 }
1588 } 1557 }
@@ -1590,12 +1559,11 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1590 if (request_data == NULL) { 1559 if (request_data == NULL) {
1591 ioc->diag_buffer_sz[buffer_type] = 0; 1560 ioc->diag_buffer_sz[buffer_type] = 0;
1592 ioc->diag_buffer_dma[buffer_type] = 0; 1561 ioc->diag_buffer_dma[buffer_type] = 0;
1593 request_data = pci_alloc_consistent( 1562 request_data = dma_alloc_coherent(&ioc->pdev->dev,
1594 ioc->pdev, request_data_sz, &request_data_dma); 1563 request_data_sz, &request_data_dma, GFP_KERNEL);
1595 if (request_data == NULL) { 1564 if (request_data == NULL) {
1596 pr_err(MPT3SAS_FMT "%s: failed allocating memory" \ 1565 ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
1597 " for diag buffers, requested size(%d)\n", 1566 __func__, request_data_sz);
1598 ioc->name, __func__, request_data_sz);
1599 mpt3sas_base_free_smid(ioc, smid); 1567 mpt3sas_base_free_smid(ioc, smid);
1600 return -ENOMEM; 1568 return -ENOMEM;
1601 } 1569 }
@@ -1612,11 +1580,11 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1612 mpi_request->VF_ID = 0; /* TODO */ 1580 mpi_request->VF_ID = 0; /* TODO */
1613 mpi_request->VP_ID = 0; 1581 mpi_request->VP_ID = 0;
1614 1582
1615 dctlprintk(ioc, pr_info(MPT3SAS_FMT 1583 dctlprintk(ioc,
1616 "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n", 1584 ioc_info(ioc, "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
1617 ioc->name, __func__, request_data, 1585 __func__, request_data,
1618 (unsigned long long)request_data_dma, 1586 (unsigned long long)request_data_dma,
1619 le32_to_cpu(mpi_request->BufferLength))); 1587 le32_to_cpu(mpi_request->BufferLength)));
1620 1588
1621 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 1589 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
1622 mpi_request->ProductSpecific[i] = 1590 mpi_request->ProductSpecific[i] =
@@ -1637,8 +1605,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1637 1605
1638 /* process the completed Reply Message Frame */ 1606 /* process the completed Reply Message Frame */
1639 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 1607 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
1640 pr_err(MPT3SAS_FMT "%s: no reply message\n", 1608 ioc_err(ioc, "%s: no reply message\n", __func__);
1641 ioc->name, __func__);
1642 rc = -EFAULT; 1609 rc = -EFAULT;
1643 goto out; 1610 goto out;
1644 } 1611 }
@@ -1649,13 +1616,11 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1649 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 1616 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1650 ioc->diag_buffer_status[buffer_type] |= 1617 ioc->diag_buffer_status[buffer_type] |=
1651 MPT3_DIAG_BUFFER_IS_REGISTERED; 1618 MPT3_DIAG_BUFFER_IS_REGISTERED;
1652 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 1619 dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
1653 ioc->name, __func__));
1654 } else { 1620 } else {
1655 pr_info(MPT3SAS_FMT 1621 ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
1656 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 1622 __func__,
1657 ioc->name, __func__, 1623 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1658 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1659 rc = -EFAULT; 1624 rc = -EFAULT;
1660 } 1625 }
1661 1626
@@ -1666,7 +1631,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1666 out: 1631 out:
1667 1632
1668 if (rc && request_data) 1633 if (rc && request_data)
1669 pci_free_consistent(ioc->pdev, request_data_sz, 1634 dma_free_coherent(&ioc->pdev->dev, request_data_sz,
1670 request_data, request_data_dma); 1635 request_data, request_data_dma);
1671 1636
1672 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1637 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
@@ -1689,8 +1654,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
1689 memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); 1654 memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
1690 1655
1691 if (bits_to_register & 1) { 1656 if (bits_to_register & 1) {
1692 pr_info(MPT3SAS_FMT "registering trace buffer support\n", 1657 ioc_info(ioc, "registering trace buffer support\n");
1693 ioc->name);
1694 ioc->diag_trigger_master.MasterData = 1658 ioc->diag_trigger_master.MasterData =
1695 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); 1659 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
1696 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; 1660 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
@@ -1701,8 +1665,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
1701 } 1665 }
1702 1666
1703 if (bits_to_register & 2) { 1667 if (bits_to_register & 2) {
1704 pr_info(MPT3SAS_FMT "registering snapshot buffer support\n", 1668 ioc_info(ioc, "registering snapshot buffer support\n");
1705 ioc->name);
1706 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT; 1669 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
1707 /* register for 2MB buffers */ 1670 /* register for 2MB buffers */
1708 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1671 diag_register.requested_buffer_size = 2 * (1024 * 1024);
@@ -1711,8 +1674,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
1711 } 1674 }
1712 1675
1713 if (bits_to_register & 4) { 1676 if (bits_to_register & 4) {
1714 pr_info(MPT3SAS_FMT "registering extended buffer support\n", 1677 ioc_info(ioc, "registering extended buffer support\n");
1715 ioc->name);
1716 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED; 1678 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
1717 /* register for 2MB buffers */ 1679 /* register for 2MB buffers */
1718 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1680 diag_register.requested_buffer_size = 2 * (1024 * 1024);
@@ -1768,51 +1730,46 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1768 return -EFAULT; 1730 return -EFAULT;
1769 } 1731 }
1770 1732
1771 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1733 dctlprintk(ioc, ioc_info(ioc, "%s\n",
1772 __func__)); 1734 __func__));
1773 1735
1774 buffer_type = karg.unique_id & 0x000000ff; 1736 buffer_type = karg.unique_id & 0x000000ff;
1775 if (!_ctl_diag_capability(ioc, buffer_type)) { 1737 if (!_ctl_diag_capability(ioc, buffer_type)) {
1776 pr_err(MPT3SAS_FMT 1738 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
1777 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1739 __func__, buffer_type);
1778 ioc->name, __func__, buffer_type);
1779 return -EPERM; 1740 return -EPERM;
1780 } 1741 }
1781 1742
1782 if ((ioc->diag_buffer_status[buffer_type] & 1743 if ((ioc->diag_buffer_status[buffer_type] &
1783 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 1744 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
1784 pr_err(MPT3SAS_FMT 1745 ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
1785 "%s: buffer_type(0x%02x) is not registered\n", 1746 __func__, buffer_type);
1786 ioc->name, __func__, buffer_type);
1787 return -EINVAL; 1747 return -EINVAL;
1788 } 1748 }
1789 if ((ioc->diag_buffer_status[buffer_type] & 1749 if ((ioc->diag_buffer_status[buffer_type] &
1790 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { 1750 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
1791 pr_err(MPT3SAS_FMT 1751 ioc_err(ioc, "%s: buffer_type(0x%02x) has not been released\n",
1792 "%s: buffer_type(0x%02x) has not been released\n", 1752 __func__, buffer_type);
1793 ioc->name, __func__, buffer_type);
1794 return -EINVAL; 1753 return -EINVAL;
1795 } 1754 }
1796 1755
1797 if (karg.unique_id != ioc->unique_id[buffer_type]) { 1756 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1798 pr_err(MPT3SAS_FMT 1757 ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
1799 "%s: unique_id(0x%08x) is not registered\n", 1758 __func__, karg.unique_id);
1800 ioc->name, __func__, karg.unique_id);
1801 return -EINVAL; 1759 return -EINVAL;
1802 } 1760 }
1803 1761
1804 request_data = ioc->diag_buffer[buffer_type]; 1762 request_data = ioc->diag_buffer[buffer_type];
1805 if (!request_data) { 1763 if (!request_data) {
1806 pr_err(MPT3SAS_FMT 1764 ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
1807 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", 1765 __func__, buffer_type);
1808 ioc->name, __func__, buffer_type);
1809 return -ENOMEM; 1766 return -ENOMEM;
1810 } 1767 }
1811 1768
1812 request_data_sz = ioc->diag_buffer_sz[buffer_type]; 1769 request_data_sz = ioc->diag_buffer_sz[buffer_type];
1813 request_data_dma = ioc->diag_buffer_dma[buffer_type]; 1770 request_data_dma = ioc->diag_buffer_dma[buffer_type];
1814 pci_free_consistent(ioc->pdev, request_data_sz, 1771 dma_free_coherent(&ioc->pdev->dev, request_data_sz,
1815 request_data, request_data_dma); 1772 request_data, request_data_dma);
1816 ioc->diag_buffer[buffer_type] = NULL; 1773 ioc->diag_buffer[buffer_type] = NULL;
1817 ioc->diag_buffer_status[buffer_type] = 0; 1774 ioc->diag_buffer_status[buffer_type] = 0;
1818 return 0; 1775 return 0;
@@ -1841,41 +1798,37 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1841 return -EFAULT; 1798 return -EFAULT;
1842 } 1799 }
1843 1800
1844 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1801 dctlprintk(ioc, ioc_info(ioc, "%s\n",
1845 __func__)); 1802 __func__));
1846 1803
1847 karg.application_flags = 0; 1804 karg.application_flags = 0;
1848 buffer_type = karg.buffer_type; 1805 buffer_type = karg.buffer_type;
1849 1806
1850 if (!_ctl_diag_capability(ioc, buffer_type)) { 1807 if (!_ctl_diag_capability(ioc, buffer_type)) {
1851 pr_err(MPT3SAS_FMT 1808 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
1852 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1809 __func__, buffer_type);
1853 ioc->name, __func__, buffer_type);
1854 return -EPERM; 1810 return -EPERM;
1855 } 1811 }
1856 1812
1857 if ((ioc->diag_buffer_status[buffer_type] & 1813 if ((ioc->diag_buffer_status[buffer_type] &
1858 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 1814 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
1859 pr_err(MPT3SAS_FMT 1815 ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
1860 "%s: buffer_type(0x%02x) is not registered\n", 1816 __func__, buffer_type);
1861 ioc->name, __func__, buffer_type);
1862 return -EINVAL; 1817 return -EINVAL;
1863 } 1818 }
1864 1819
1865 if (karg.unique_id & 0xffffff00) { 1820 if (karg.unique_id & 0xffffff00) {
1866 if (karg.unique_id != ioc->unique_id[buffer_type]) { 1821 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1867 pr_err(MPT3SAS_FMT 1822 ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
1868 "%s: unique_id(0x%08x) is not registered\n", 1823 __func__, karg.unique_id);
1869 ioc->name, __func__, karg.unique_id);
1870 return -EINVAL; 1824 return -EINVAL;
1871 } 1825 }
1872 } 1826 }
1873 1827
1874 request_data = ioc->diag_buffer[buffer_type]; 1828 request_data = ioc->diag_buffer[buffer_type];
1875 if (!request_data) { 1829 if (!request_data) {
1876 pr_err(MPT3SAS_FMT 1830 ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
1877 "%s: doesn't have buffer for buffer_type(0x%02x)\n", 1831 __func__, buffer_type);
1878 ioc->name, __func__, buffer_type);
1879 return -ENOMEM; 1832 return -ENOMEM;
1880 } 1833 }
1881 1834
@@ -1897,9 +1850,8 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1897 karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type]; 1850 karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
1898 1851
1899 if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) { 1852 if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
1900 pr_err(MPT3SAS_FMT 1853 ioc_err(ioc, "%s: unable to write mpt3_diag_query data @ %p\n",
1901 "%s: unable to write mpt3_diag_query data @ %p\n", 1854 __func__, arg);
1902 ioc->name, __func__, arg);
1903 return -EFAULT; 1855 return -EFAULT;
1904 } 1856 }
1905 return 0; 1857 return 0;
@@ -1923,8 +1875,8 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
1923 u32 ioc_state; 1875 u32 ioc_state;
1924 int rc; 1876 int rc;
1925 1877
1926 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1878 dctlprintk(ioc, ioc_info(ioc, "%s\n",
1927 __func__)); 1879 __func__));
1928 1880
1929 rc = 0; 1881 rc = 0;
1930 *issue_reset = 0; 1882 *issue_reset = 0;
@@ -1935,24 +1887,22 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
1935 MPT3_DIAG_BUFFER_IS_REGISTERED) 1887 MPT3_DIAG_BUFFER_IS_REGISTERED)
1936 ioc->diag_buffer_status[buffer_type] |= 1888 ioc->diag_buffer_status[buffer_type] |=
1937 MPT3_DIAG_BUFFER_IS_RELEASED; 1889 MPT3_DIAG_BUFFER_IS_RELEASED;
1938 dctlprintk(ioc, pr_info(MPT3SAS_FMT 1890 dctlprintk(ioc,
1939 "%s: skipping due to FAULT state\n", ioc->name, 1891 ioc_info(ioc, "%s: skipping due to FAULT state\n",
1940 __func__)); 1892 __func__));
1941 rc = -EAGAIN; 1893 rc = -EAGAIN;
1942 goto out; 1894 goto out;
1943 } 1895 }
1944 1896
1945 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 1897 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
1946 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 1898 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
1947 ioc->name, __func__);
1948 rc = -EAGAIN; 1899 rc = -EAGAIN;
1949 goto out; 1900 goto out;
1950 } 1901 }
1951 1902
1952 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 1903 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1953 if (!smid) { 1904 if (!smid) {
1954 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 1905 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
1955 ioc->name, __func__);
1956 rc = -EAGAIN; 1906 rc = -EAGAIN;
1957 goto out; 1907 goto out;
1958 } 1908 }
@@ -1982,8 +1932,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
1982 1932
1983 /* process the completed Reply Message Frame */ 1933 /* process the completed Reply Message Frame */
1984 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 1934 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
1985 pr_err(MPT3SAS_FMT "%s: no reply message\n", 1935 ioc_err(ioc, "%s: no reply message\n", __func__);
1986 ioc->name, __func__);
1987 rc = -EFAULT; 1936 rc = -EFAULT;
1988 goto out; 1937 goto out;
1989 } 1938 }
@@ -1994,13 +1943,11 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
1994 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 1943 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1995 ioc->diag_buffer_status[buffer_type] |= 1944 ioc->diag_buffer_status[buffer_type] |=
1996 MPT3_DIAG_BUFFER_IS_RELEASED; 1945 MPT3_DIAG_BUFFER_IS_RELEASED;
1997 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 1946 dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
1998 ioc->name, __func__));
1999 } else { 1947 } else {
2000 pr_info(MPT3SAS_FMT 1948 ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
2001 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 1949 __func__,
2002 ioc->name, __func__, 1950 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
2003 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
2004 rc = -EFAULT; 1951 rc = -EFAULT;
2005 } 1952 }
2006 1953
@@ -2033,47 +1980,41 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2033 return -EFAULT; 1980 return -EFAULT;
2034 } 1981 }
2035 1982
2036 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1983 dctlprintk(ioc, ioc_info(ioc, "%s\n",
2037 __func__)); 1984 __func__));
2038 1985
2039 buffer_type = karg.unique_id & 0x000000ff; 1986 buffer_type = karg.unique_id & 0x000000ff;
2040 if (!_ctl_diag_capability(ioc, buffer_type)) { 1987 if (!_ctl_diag_capability(ioc, buffer_type)) {
2041 pr_err(MPT3SAS_FMT 1988 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
2042 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1989 __func__, buffer_type);
2043 ioc->name, __func__, buffer_type);
2044 return -EPERM; 1990 return -EPERM;
2045 } 1991 }
2046 1992
2047 if ((ioc->diag_buffer_status[buffer_type] & 1993 if ((ioc->diag_buffer_status[buffer_type] &
2048 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 1994 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
2049 pr_err(MPT3SAS_FMT 1995 ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
2050 "%s: buffer_type(0x%02x) is not registered\n", 1996 __func__, buffer_type);
2051 ioc->name, __func__, buffer_type);
2052 return -EINVAL; 1997 return -EINVAL;
2053 } 1998 }
2054 1999
2055 if (karg.unique_id != ioc->unique_id[buffer_type]) { 2000 if (karg.unique_id != ioc->unique_id[buffer_type]) {
2056 pr_err(MPT3SAS_FMT 2001 ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
2057 "%s: unique_id(0x%08x) is not registered\n", 2002 __func__, karg.unique_id);
2058 ioc->name, __func__, karg.unique_id);
2059 return -EINVAL; 2003 return -EINVAL;
2060 } 2004 }
2061 2005
2062 if (ioc->diag_buffer_status[buffer_type] & 2006 if (ioc->diag_buffer_status[buffer_type] &
2063 MPT3_DIAG_BUFFER_IS_RELEASED) { 2007 MPT3_DIAG_BUFFER_IS_RELEASED) {
2064 pr_err(MPT3SAS_FMT 2008 ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n",
2065 "%s: buffer_type(0x%02x) is already released\n", 2009 __func__, buffer_type);
2066 ioc->name, __func__,
2067 buffer_type);
2068 return 0; 2010 return 0;
2069 } 2011 }
2070 2012
2071 request_data = ioc->diag_buffer[buffer_type]; 2013 request_data = ioc->diag_buffer[buffer_type];
2072 2014
2073 if (!request_data) { 2015 if (!request_data) {
2074 pr_err(MPT3SAS_FMT 2016 ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
2075 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", 2017 __func__, buffer_type);
2076 ioc->name, __func__, buffer_type);
2077 return -ENOMEM; 2018 return -ENOMEM;
2078 } 2019 }
2079 2020
@@ -2084,9 +2025,8 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2084 MPT3_DIAG_BUFFER_IS_RELEASED; 2025 MPT3_DIAG_BUFFER_IS_RELEASED;
2085 ioc->diag_buffer_status[buffer_type] &= 2026 ioc->diag_buffer_status[buffer_type] &=
2086 ~MPT3_DIAG_BUFFER_IS_DIAG_RESET; 2027 ~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
2087 pr_err(MPT3SAS_FMT 2028 ioc_err(ioc, "%s: buffer_type(0x%02x) was released due to host reset\n",
2088 "%s: buffer_type(0x%02x) was released due to host reset\n", 2029 __func__, buffer_type);
2089 ioc->name, __func__, buffer_type);
2090 return 0; 2030 return 0;
2091 } 2031 }
2092 2032
@@ -2124,38 +2064,34 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2124 return -EFAULT; 2064 return -EFAULT;
2125 } 2065 }
2126 2066
2127 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2067 dctlprintk(ioc, ioc_info(ioc, "%s\n",
2128 __func__)); 2068 __func__));
2129 2069
2130 buffer_type = karg.unique_id & 0x000000ff; 2070 buffer_type = karg.unique_id & 0x000000ff;
2131 if (!_ctl_diag_capability(ioc, buffer_type)) { 2071 if (!_ctl_diag_capability(ioc, buffer_type)) {
2132 pr_err(MPT3SAS_FMT 2072 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
2133 "%s: doesn't have capability for buffer_type(0x%02x)\n", 2073 __func__, buffer_type);
2134 ioc->name, __func__, buffer_type);
2135 return -EPERM; 2074 return -EPERM;
2136 } 2075 }
2137 2076
2138 if (karg.unique_id != ioc->unique_id[buffer_type]) { 2077 if (karg.unique_id != ioc->unique_id[buffer_type]) {
2139 pr_err(MPT3SAS_FMT 2078 ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
2140 "%s: unique_id(0x%08x) is not registered\n", 2079 __func__, karg.unique_id);
2141 ioc->name, __func__, karg.unique_id);
2142 return -EINVAL; 2080 return -EINVAL;
2143 } 2081 }
2144 2082
2145 request_data = ioc->diag_buffer[buffer_type]; 2083 request_data = ioc->diag_buffer[buffer_type];
2146 if (!request_data) { 2084 if (!request_data) {
2147 pr_err(MPT3SAS_FMT 2085 ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
2148 "%s: doesn't have buffer for buffer_type(0x%02x)\n", 2086 __func__, buffer_type);
2149 ioc->name, __func__, buffer_type);
2150 return -ENOMEM; 2087 return -ENOMEM;
2151 } 2088 }
2152 2089
2153 request_size = ioc->diag_buffer_sz[buffer_type]; 2090 request_size = ioc->diag_buffer_sz[buffer_type];
2154 2091
2155 if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { 2092 if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
2156 pr_err(MPT3SAS_FMT "%s: either the starting_offset " \ 2093 ioc_err(ioc, "%s: either the starting_offset or bytes_to_read are not 4 byte aligned\n",
2157 "or bytes_to_read are not 4 byte aligned\n", ioc->name, 2094 __func__);
2158 __func__);
2159 return -EINVAL; 2095 return -EINVAL;
2160 } 2096 }
2161 2097
@@ -2163,10 +2099,10 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2163 return -EINVAL; 2099 return -EINVAL;
2164 2100
2165 diag_data = (void *)(request_data + karg.starting_offset); 2101 diag_data = (void *)(request_data + karg.starting_offset);
2166 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2102 dctlprintk(ioc,
2167 "%s: diag_buffer(%p), offset(%d), sz(%d)\n", 2103 ioc_info(ioc, "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
2168 ioc->name, __func__, 2104 __func__, diag_data, karg.starting_offset,
2169 diag_data, karg.starting_offset, karg.bytes_to_read)); 2105 karg.bytes_to_read));
2170 2106
2171 /* Truncate data on requests that are too large */ 2107 /* Truncate data on requests that are too large */
2172 if ((diag_data + karg.bytes_to_read < diag_data) || 2108 if ((diag_data + karg.bytes_to_read < diag_data) ||
@@ -2177,39 +2113,36 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2177 2113
2178 if (copy_to_user((void __user *)uarg->diagnostic_data, 2114 if (copy_to_user((void __user *)uarg->diagnostic_data,
2179 diag_data, copy_size)) { 2115 diag_data, copy_size)) {
2180 pr_err(MPT3SAS_FMT 2116 ioc_err(ioc, "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
2181 "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n", 2117 __func__, diag_data);
2182 ioc->name, __func__, diag_data);
2183 return -EFAULT; 2118 return -EFAULT;
2184 } 2119 }
2185 2120
2186 if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0) 2121 if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
2187 return 0; 2122 return 0;
2188 2123
2189 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2124 dctlprintk(ioc,
2190 "%s: Reregister buffer_type(0x%02x)\n", 2125 ioc_info(ioc, "%s: Reregister buffer_type(0x%02x)\n",
2191 ioc->name, __func__, buffer_type)); 2126 __func__, buffer_type));
2192 if ((ioc->diag_buffer_status[buffer_type] & 2127 if ((ioc->diag_buffer_status[buffer_type] &
2193 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { 2128 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
2194 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2129 dctlprintk(ioc,
2195 "%s: buffer_type(0x%02x) is still registered\n", 2130 ioc_info(ioc, "%s: buffer_type(0x%02x) is still registered\n",
2196 ioc->name, __func__, buffer_type)); 2131 __func__, buffer_type));
2197 return 0; 2132 return 0;
2198 } 2133 }
2199 /* Get a free request frame and save the message context. 2134 /* Get a free request frame and save the message context.
2200 */ 2135 */
2201 2136
2202 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 2137 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
2203 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 2138 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
2204 ioc->name, __func__);
2205 rc = -EAGAIN; 2139 rc = -EAGAIN;
2206 goto out; 2140 goto out;
2207 } 2141 }
2208 2142
2209 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 2143 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
2210 if (!smid) { 2144 if (!smid) {
2211 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 2145 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2212 ioc->name, __func__);
2213 rc = -EAGAIN; 2146 rc = -EAGAIN;
2214 goto out; 2147 goto out;
2215 } 2148 }
@@ -2247,8 +2180,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2247 2180
2248 /* process the completed Reply Message Frame */ 2181 /* process the completed Reply Message Frame */
2249 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 2182 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
2250 pr_err(MPT3SAS_FMT "%s: no reply message\n", 2183 ioc_err(ioc, "%s: no reply message\n", __func__);
2251 ioc->name, __func__);
2252 rc = -EFAULT; 2184 rc = -EFAULT;
2253 goto out; 2185 goto out;
2254 } 2186 }
@@ -2259,13 +2191,11 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2259 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 2191 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
2260 ioc->diag_buffer_status[buffer_type] |= 2192 ioc->diag_buffer_status[buffer_type] |=
2261 MPT3_DIAG_BUFFER_IS_REGISTERED; 2193 MPT3_DIAG_BUFFER_IS_REGISTERED;
2262 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 2194 dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
2263 ioc->name, __func__));
2264 } else { 2195 } else {
2265 pr_info(MPT3SAS_FMT 2196 ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
2266 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 2197 __func__, ioc_status,
2267 ioc->name, __func__, 2198 le32_to_cpu(mpi_reply->IOCLogInfo));
2268 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
2269 rc = -EFAULT; 2199 rc = -EFAULT;
2270 } 2200 }
2271 2201
@@ -2450,8 +2380,9 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
2450 ret = _ctl_diag_read_buffer(ioc, arg); 2380 ret = _ctl_diag_read_buffer(ioc, arg);
2451 break; 2381 break;
2452 default: 2382 default:
2453 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2383 dctlprintk(ioc,
2454 "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd)); 2384 ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n",
2385 cmd));
2455 break; 2386 break;
2456 } 2387 }
2457 2388
@@ -2840,8 +2771,8 @@ _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
2840 return -EINVAL; 2771 return -EINVAL;
2841 2772
2842 ioc->logging_level = val; 2773 ioc->logging_level = val;
2843 pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name, 2774 ioc_info(ioc, "logging_level=%08xh\n",
2844 ioc->logging_level); 2775 ioc->logging_level);
2845 return strlen(buf); 2776 return strlen(buf);
2846} 2777}
2847static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show, 2778static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
@@ -2877,8 +2808,8 @@ _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
2877 return -EINVAL; 2808 return -EINVAL;
2878 2809
2879 ioc->fwfault_debug = val; 2810 ioc->fwfault_debug = val;
2880 pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name, 2811 ioc_info(ioc, "fwfault_debug=%d\n",
2881 ioc->fwfault_debug); 2812 ioc->fwfault_debug);
2882 return strlen(buf); 2813 return strlen(buf);
2883} 2814}
2884static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR, 2815static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
@@ -2958,8 +2889,8 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
2958 ssize_t rc = 0; 2889 ssize_t rc = 0;
2959 2890
2960 if (!ioc->is_warpdrive) { 2891 if (!ioc->is_warpdrive) {
2961 pr_err(MPT3SAS_FMT "%s: BRM attribute is only for" 2892 ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
2962 " warpdrive\n", ioc->name, __func__); 2893 __func__);
2963 goto out; 2894 goto out;
2964 } 2895 }
2965 /* pci_access_mutex lock acquired by sysfs show path */ 2896 /* pci_access_mutex lock acquired by sysfs show path */
@@ -2973,30 +2904,28 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
2973 sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36); 2904 sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
2974 io_unit_pg3 = kzalloc(sz, GFP_KERNEL); 2905 io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
2975 if (!io_unit_pg3) { 2906 if (!io_unit_pg3) {
2976 pr_err(MPT3SAS_FMT "%s: failed allocating memory " 2907 ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n",
2977 "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz); 2908 __func__, sz);
2978 goto out; 2909 goto out;
2979 } 2910 }
2980 2911
2981 if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) != 2912 if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
2982 0) { 2913 0) {
2983 pr_err(MPT3SAS_FMT 2914 ioc_err(ioc, "%s: failed reading iounit_pg3\n",
2984 "%s: failed reading iounit_pg3\n", ioc->name, 2915 __func__);
2985 __func__);
2986 goto out; 2916 goto out;
2987 } 2917 }
2988 2918
2989 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 2919 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
2990 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 2920 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2991 pr_err(MPT3SAS_FMT "%s: iounit_pg3 failed with " 2921 ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
2992 "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status); 2922 __func__, ioc_status);
2993 goto out; 2923 goto out;
2994 } 2924 }
2995 2925
2996 if (io_unit_pg3->GPIOCount < 25) { 2926 if (io_unit_pg3->GPIOCount < 25) {
2997 pr_err(MPT3SAS_FMT "%s: iounit_pg3->GPIOCount less than " 2927 ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n",
2998 "25 entries, detected (%d) entries\n", ioc->name, __func__, 2928 __func__, io_unit_pg3->GPIOCount);
2999 io_unit_pg3->GPIOCount);
3000 goto out; 2929 goto out;
3001 } 2930 }
3002 2931
@@ -3039,17 +2968,15 @@ _ctl_host_trace_buffer_size_show(struct device *cdev,
3039 struct DIAG_BUFFER_START *request_data; 2968 struct DIAG_BUFFER_START *request_data;
3040 2969
3041 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { 2970 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
3042 pr_err(MPT3SAS_FMT 2971 ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3043 "%s: host_trace_buffer is not registered\n", 2972 __func__);
3044 ioc->name, __func__);
3045 return 0; 2973 return 0;
3046 } 2974 }
3047 2975
3048 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 2976 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3049 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 2977 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
3050 pr_err(MPT3SAS_FMT 2978 ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3051 "%s: host_trace_buffer is not registered\n", 2979 __func__);
3052 ioc->name, __func__);
3053 return 0; 2980 return 0;
3054 } 2981 }
3055 2982
@@ -3089,17 +3016,15 @@ _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
3089 u32 size; 3016 u32 size;
3090 3017
3091 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { 3018 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
3092 pr_err(MPT3SAS_FMT 3019 ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3093 "%s: host_trace_buffer is not registered\n", 3020 __func__);
3094 ioc->name, __func__);
3095 return 0; 3021 return 0;
3096 } 3022 }
3097 3023
3098 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3024 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3099 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 3025 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
3100 pr_err(MPT3SAS_FMT 3026 ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3101 "%s: host_trace_buffer is not registered\n", 3027 __func__);
3102 ioc->name, __func__);
3103 return 0; 3028 return 0;
3104 } 3029 }
3105 3030
@@ -3188,8 +3113,7 @@ _ctl_host_trace_buffer_enable_store(struct device *cdev,
3188 MPT3_DIAG_BUFFER_IS_RELEASED) == 0)) 3113 MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
3189 goto out; 3114 goto out;
3190 memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); 3115 memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
3191 pr_info(MPT3SAS_FMT "posting host trace buffers\n", 3116 ioc_info(ioc, "posting host trace buffers\n");
3192 ioc->name);
3193 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; 3117 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
3194 diag_register.requested_buffer_size = (1024 * 1024); 3118 diag_register.requested_buffer_size = (1024 * 1024);
3195 diag_register.unique_id = 0x7075900; 3119 diag_register.unique_id = 0x7075900;
@@ -3205,8 +3129,7 @@ _ctl_host_trace_buffer_enable_store(struct device *cdev,
3205 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3129 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3206 MPT3_DIAG_BUFFER_IS_RELEASED)) 3130 MPT3_DIAG_BUFFER_IS_RELEASED))
3207 goto out; 3131 goto out;
3208 pr_info(MPT3SAS_FMT "releasing host trace buffer\n", 3132 ioc_info(ioc, "releasing host trace buffer\n");
3209 ioc->name);
3210 mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, 3133 mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
3211 &issue_reset); 3134 &issue_reset);
3212 } 3135 }
@@ -3658,8 +3581,10 @@ mpt3sas_ctl_exit(ushort hbas_to_enumerate)
3658 if ((ioc->diag_buffer_status[i] & 3581 if ((ioc->diag_buffer_status[i] &
3659 MPT3_DIAG_BUFFER_IS_RELEASED)) 3582 MPT3_DIAG_BUFFER_IS_RELEASED))
3660 continue; 3583 continue;
3661 pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i], 3584 dma_free_coherent(&ioc->pdev->dev,
3662 ioc->diag_buffer[i], ioc->diag_buffer_dma[i]); 3585 ioc->diag_buffer_sz[i],
3586 ioc->diag_buffer[i],
3587 ioc->diag_buffer_dma[i]);
3663 ioc->diag_buffer[i] = NULL; 3588 ioc->diag_buffer[i] = NULL;
3664 ioc->diag_buffer_status[i] = 0; 3589 ioc->diag_buffer_status[i] = 0;
3665 } 3590 }
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 86eaa893adfc..03c52847ed07 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -418,8 +418,8 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
418 418
419 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 419 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
420 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 420 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
421 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, 421 ioc_err(ioc, "failure at %s:%d/%s()!\n",
422 __FILE__, __LINE__, __func__); 422 __FILE__, __LINE__, __func__);
423 return -ENXIO; 423 return -ENXIO;
424 } 424 }
425 425
@@ -442,10 +442,8 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
442 return -ENXIO; 442 return -ENXIO;
443 443
444 /* else error case */ 444 /* else error case */
445 pr_err(MPT3SAS_FMT 445 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
446 "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", 446 handle, ioc_status, __FILE__, __LINE__, __func__);
447 ioc->name, handle, ioc_status,
448 __FILE__, __LINE__, __func__);
449 return -EIO; 447 return -EIO;
450} 448}
451 449
@@ -508,10 +506,9 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
508 (ioc->bios_pg2.ReqBootDeviceForm & 506 (ioc->bios_pg2.ReqBootDeviceForm &
509 MPI2_BIOSPAGE2_FORM_MASK), 507 MPI2_BIOSPAGE2_FORM_MASK),
510 &ioc->bios_pg2.RequestedBootDevice)) { 508 &ioc->bios_pg2.RequestedBootDevice)) {
511 dinitprintk(ioc, pr_info(MPT3SAS_FMT 509 dinitprintk(ioc,
512 "%s: req_boot_device(0x%016llx)\n", 510 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
513 ioc->name, __func__, 511 __func__, (u64)sas_address));
514 (unsigned long long)sas_address));
515 ioc->req_boot_device.device = device; 512 ioc->req_boot_device.device = device;
516 ioc->req_boot_device.channel = channel; 513 ioc->req_boot_device.channel = channel;
517 } 514 }
@@ -523,10 +520,9 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
523 (ioc->bios_pg2.ReqAltBootDeviceForm & 520 (ioc->bios_pg2.ReqAltBootDeviceForm &
524 MPI2_BIOSPAGE2_FORM_MASK), 521 MPI2_BIOSPAGE2_FORM_MASK),
525 &ioc->bios_pg2.RequestedAltBootDevice)) { 522 &ioc->bios_pg2.RequestedAltBootDevice)) {
526 dinitprintk(ioc, pr_info(MPT3SAS_FMT 523 dinitprintk(ioc,
527 "%s: req_alt_boot_device(0x%016llx)\n", 524 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
528 ioc->name, __func__, 525 __func__, (u64)sas_address));
529 (unsigned long long)sas_address));
530 ioc->req_alt_boot_device.device = device; 526 ioc->req_alt_boot_device.device = device;
531 ioc->req_alt_boot_device.channel = channel; 527 ioc->req_alt_boot_device.channel = channel;
532 } 528 }
@@ -538,10 +534,9 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
538 (ioc->bios_pg2.CurrentBootDeviceForm & 534 (ioc->bios_pg2.CurrentBootDeviceForm &
539 MPI2_BIOSPAGE2_FORM_MASK), 535 MPI2_BIOSPAGE2_FORM_MASK),
540 &ioc->bios_pg2.CurrentBootDevice)) { 536 &ioc->bios_pg2.CurrentBootDevice)) {
541 dinitprintk(ioc, pr_info(MPT3SAS_FMT 537 dinitprintk(ioc,
542 "%s: current_boot_device(0x%016llx)\n", 538 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
543 ioc->name, __func__, 539 __func__, (u64)sas_address));
544 (unsigned long long)sas_address));
545 ioc->current_boot_device.device = device; 540 ioc->current_boot_device.device = device;
546 ioc->current_boot_device.channel = channel; 541 ioc->current_boot_device.channel = channel;
547 } 542 }
@@ -752,19 +747,16 @@ _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
752 sas_device->chassis_slot); 747 sas_device->chassis_slot);
753 } else { 748 } else {
754 if (sas_device->enclosure_handle != 0) 749 if (sas_device->enclosure_handle != 0)
755 pr_info(MPT3SAS_FMT 750 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
756 "enclosure logical id(0x%016llx), slot(%d) \n", 751 (u64)sas_device->enclosure_logical_id,
757 ioc->name, (unsigned long long) 752 sas_device->slot);
758 sas_device->enclosure_logical_id,
759 sas_device->slot);
760 if (sas_device->connector_name[0] != '\0') 753 if (sas_device->connector_name[0] != '\0')
761 pr_info(MPT3SAS_FMT 754 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
762 "enclosure level(0x%04x), connector name( %s)\n", 755 sas_device->enclosure_level,
763 ioc->name, sas_device->enclosure_level, 756 sas_device->connector_name);
764 sas_device->connector_name);
765 if (sas_device->is_chassis_slot_valid) 757 if (sas_device->is_chassis_slot_valid)
766 pr_info(MPT3SAS_FMT "chassis slot(0x%04x)\n", 758 ioc_info(ioc, "chassis slot(0x%04x)\n",
767 ioc->name, sas_device->chassis_slot); 759 sas_device->chassis_slot);
768 } 760 }
769} 761}
770 762
@@ -784,10 +776,8 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
784 776
785 if (!sas_device) 777 if (!sas_device)
786 return; 778 return;
787 pr_info(MPT3SAS_FMT 779 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
788 "removing handle(0x%04x), sas_addr(0x%016llx)\n", 780 sas_device->handle, (u64)sas_device->sas_address);
789 ioc->name, sas_device->handle,
790 (unsigned long long) sas_device->sas_address);
791 781
792 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); 782 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
793 783
@@ -872,10 +862,10 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
872{ 862{
873 unsigned long flags; 863 unsigned long flags;
874 864
875 dewtprintk(ioc, pr_info(MPT3SAS_FMT 865 dewtprintk(ioc,
876 "%s: handle(0x%04x), sas_addr(0x%016llx)\n", 866 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
877 ioc->name, __func__, sas_device->handle, 867 __func__, sas_device->handle,
878 (unsigned long long)sas_device->sas_address)); 868 (u64)sas_device->sas_address));
879 869
880 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 870 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
881 NULL, NULL)); 871 NULL, NULL));
@@ -923,10 +913,10 @@ _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
923{ 913{
924 unsigned long flags; 914 unsigned long flags;
925 915
926 dewtprintk(ioc, pr_info(MPT3SAS_FMT 916 dewtprintk(ioc,
927 "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, 917 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
928 __func__, sas_device->handle, 918 __func__, sas_device->handle,
929 (unsigned long long)sas_device->sas_address)); 919 (u64)sas_device->sas_address));
930 920
931 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 921 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
932 NULL, NULL)); 922 NULL, NULL));
@@ -1073,21 +1063,16 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1073 1063
1074 if (!pcie_device) 1064 if (!pcie_device)
1075 return; 1065 return;
1076 pr_info(MPT3SAS_FMT 1066 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1077 "removing handle(0x%04x), wwid(0x%016llx)\n", 1067 pcie_device->handle, (u64)pcie_device->wwid);
1078 ioc->name, pcie_device->handle,
1079 (unsigned long long) pcie_device->wwid);
1080 if (pcie_device->enclosure_handle != 0) 1068 if (pcie_device->enclosure_handle != 0)
1081 pr_info(MPT3SAS_FMT 1069 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1082 "removing enclosure logical id(0x%016llx), slot(%d)\n", 1070 (u64)pcie_device->enclosure_logical_id,
1083 ioc->name, 1071 pcie_device->slot);
1084 (unsigned long long)pcie_device->enclosure_logical_id,
1085 pcie_device->slot);
1086 if (pcie_device->connector_name[0] != '\0') 1072 if (pcie_device->connector_name[0] != '\0')
1087 pr_info(MPT3SAS_FMT 1073 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1088 "removing enclosure level(0x%04x), connector name( %s)\n", 1074 pcie_device->enclosure_level,
1089 ioc->name, pcie_device->enclosure_level, 1075 pcie_device->connector_name);
1090 pcie_device->connector_name);
1091 1076
1092 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1077 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1093 if (!list_empty(&pcie_device->list)) { 1078 if (!list_empty(&pcie_device->list)) {
@@ -1146,20 +1131,21 @@ _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1146{ 1131{
1147 unsigned long flags; 1132 unsigned long flags;
1148 1133
1149 dewtprintk(ioc, pr_info(MPT3SAS_FMT 1134 dewtprintk(ioc,
1150 "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__, 1135 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1151 pcie_device->handle, (unsigned long long)pcie_device->wwid)); 1136 __func__,
1137 pcie_device->handle, (u64)pcie_device->wwid));
1152 if (pcie_device->enclosure_handle != 0) 1138 if (pcie_device->enclosure_handle != 0)
1153 dewtprintk(ioc, pr_info(MPT3SAS_FMT 1139 dewtprintk(ioc,
1154 "%s: enclosure logical id(0x%016llx), slot( %d)\n", 1140 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1155 ioc->name, __func__, 1141 __func__,
1156 (unsigned long long)pcie_device->enclosure_logical_id, 1142 (u64)pcie_device->enclosure_logical_id,
1157 pcie_device->slot)); 1143 pcie_device->slot));
1158 if (pcie_device->connector_name[0] != '\0') 1144 if (pcie_device->connector_name[0] != '\0')
1159 dewtprintk(ioc, pr_info(MPT3SAS_FMT 1145 dewtprintk(ioc,
1160 "%s: enclosure level(0x%04x), connector name( %s)\n", 1146 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1161 ioc->name, __func__, pcie_device->enclosure_level, 1147 __func__, pcie_device->enclosure_level,
1162 pcie_device->connector_name)); 1148 pcie_device->connector_name));
1163 1149
1164 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1150 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1165 pcie_device_get(pcie_device); 1151 pcie_device_get(pcie_device);
@@ -1191,20 +1177,21 @@ _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1191{ 1177{
1192 unsigned long flags; 1178 unsigned long flags;
1193 1179
1194 dewtprintk(ioc, pr_info(MPT3SAS_FMT 1180 dewtprintk(ioc,
1195 "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__, 1181 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1196 pcie_device->handle, (unsigned long long)pcie_device->wwid)); 1182 __func__,
1183 pcie_device->handle, (u64)pcie_device->wwid));
1197 if (pcie_device->enclosure_handle != 0) 1184 if (pcie_device->enclosure_handle != 0)
1198 dewtprintk(ioc, pr_info(MPT3SAS_FMT 1185 dewtprintk(ioc,
1199 "%s: enclosure logical id(0x%016llx), slot( %d)\n", 1186 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1200 ioc->name, __func__, 1187 __func__,
1201 (unsigned long long)pcie_device->enclosure_logical_id, 1188 (u64)pcie_device->enclosure_logical_id,
1202 pcie_device->slot)); 1189 pcie_device->slot));
1203 if (pcie_device->connector_name[0] != '\0') 1190 if (pcie_device->connector_name[0] != '\0')
1204 dewtprintk(ioc, pr_info(MPT3SAS_FMT 1191 dewtprintk(ioc,
1205 "%s: enclosure level(0x%04x), connector name( %s)\n", 1192 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1206 ioc->name, __func__, pcie_device->enclosure_level, 1193 __func__, pcie_device->enclosure_level,
1207 pcie_device->connector_name)); 1194 pcie_device->connector_name));
1208 1195
1209 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1196 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1210 pcie_device_get(pcie_device); 1197 pcie_device_get(pcie_device);
@@ -1304,9 +1291,10 @@ _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1304{ 1291{
1305 unsigned long flags; 1292 unsigned long flags;
1306 1293
1307 dewtprintk(ioc, pr_info(MPT3SAS_FMT 1294 dewtprintk(ioc,
1308 "%s: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__, 1295 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1309 raid_device->handle, (unsigned long long)raid_device->wwid)); 1296 __func__,
1297 raid_device->handle, (u64)raid_device->wwid));
1310 1298
1311 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1299 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1312 list_add_tail(&raid_device->list, &ioc->raid_device_list); 1300 list_add_tail(&raid_device->list, &ioc->raid_device_list);
@@ -1857,16 +1845,16 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
1857 1845
1858 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 1846 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
1859 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 1847 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
1860 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 1848 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1861 ioc->name, __FILE__, __LINE__, __func__); 1849 __FILE__, __LINE__, __func__);
1862 return; 1850 return;
1863 } 1851 }
1864 1852
1865 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 1853 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1866 MPI2_IOCSTATUS_MASK; 1854 MPI2_IOCSTATUS_MASK;
1867 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 1855 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1868 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 1856 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1869 ioc->name, __FILE__, __LINE__, __func__); 1857 __FILE__, __LINE__, __func__);
1870 return; 1858 return;
1871 } 1859 }
1872 1860
@@ -1952,8 +1940,8 @@ scsih_get_resync(struct device *dev)
1952 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 1940 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
1953 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 1941 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
1954 sizeof(Mpi2RaidVolPage0_t))) { 1942 sizeof(Mpi2RaidVolPage0_t))) {
1955 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 1943 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1956 ioc->name, __FILE__, __LINE__, __func__); 1944 __FILE__, __LINE__, __func__);
1957 percent_complete = 0; 1945 percent_complete = 0;
1958 goto out; 1946 goto out;
1959 } 1947 }
@@ -2006,8 +1994,8 @@ scsih_get_state(struct device *dev)
2006 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 1994 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2007 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 1995 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2008 sizeof(Mpi2RaidVolPage0_t))) { 1996 sizeof(Mpi2RaidVolPage0_t))) {
2009 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 1997 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2010 ioc->name, __FILE__, __LINE__, __func__); 1998 __FILE__, __LINE__, __func__);
2011 goto out; 1999 goto out;
2012 } 2000 }
2013 2001
@@ -2103,9 +2091,9 @@ _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2103 2091
2104 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, 2092 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2105 &num_pds)) || !num_pds) { 2093 &num_pds)) || !num_pds) {
2106 dfailprintk(ioc, pr_warn(MPT3SAS_FMT 2094 dfailprintk(ioc,
2107 "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, 2095 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2108 __func__)); 2096 __FILE__, __LINE__, __func__));
2109 return 1; 2097 return 1;
2110 } 2098 }
2111 2099
@@ -2114,17 +2102,17 @@ _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2114 sizeof(Mpi2RaidVol0PhysDisk_t)); 2102 sizeof(Mpi2RaidVol0PhysDisk_t));
2115 vol_pg0 = kzalloc(sz, GFP_KERNEL); 2103 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2116 if (!vol_pg0) { 2104 if (!vol_pg0) {
2117 dfailprintk(ioc, pr_warn(MPT3SAS_FMT 2105 dfailprintk(ioc,
2118 "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, 2106 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2119 __func__)); 2107 __FILE__, __LINE__, __func__));
2120 return 1; 2108 return 1;
2121 } 2109 }
2122 2110
2123 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, 2111 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2124 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { 2112 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2125 dfailprintk(ioc, pr_warn(MPT3SAS_FMT 2113 dfailprintk(ioc,
2126 "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, 2114 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2127 __func__)); 2115 __FILE__, __LINE__, __func__));
2128 kfree(vol_pg0); 2116 kfree(vol_pg0);
2129 return 1; 2117 return 1;
2130 } 2118 }
@@ -2215,16 +2203,16 @@ scsih_slave_configure(struct scsi_device *sdev)
2215 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 2203 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2216 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2204 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2217 if (!raid_device) { 2205 if (!raid_device) {
2218 dfailprintk(ioc, pr_warn(MPT3SAS_FMT 2206 dfailprintk(ioc,
2219 "failure at %s:%d/%s()!\n", ioc->name, __FILE__, 2207 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2220 __LINE__, __func__)); 2208 __FILE__, __LINE__, __func__));
2221 return 1; 2209 return 1;
2222 } 2210 }
2223 2211
2224 if (_scsih_get_volume_capabilities(ioc, raid_device)) { 2212 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2225 dfailprintk(ioc, pr_warn(MPT3SAS_FMT 2213 dfailprintk(ioc,
2226 "failure at %s:%d/%s()!\n", ioc->name, __FILE__, 2214 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2227 __LINE__, __func__)); 2215 __FILE__, __LINE__, __func__));
2228 return 1; 2216 return 1;
2229 } 2217 }
2230 2218
@@ -2308,16 +2296,16 @@ scsih_slave_configure(struct scsi_device *sdev)
2308 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { 2296 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2309 if (mpt3sas_config_get_volume_handle(ioc, handle, 2297 if (mpt3sas_config_get_volume_handle(ioc, handle,
2310 &volume_handle)) { 2298 &volume_handle)) {
2311 dfailprintk(ioc, pr_warn(MPT3SAS_FMT 2299 dfailprintk(ioc,
2312 "failure at %s:%d/%s()!\n", ioc->name, 2300 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2313 __FILE__, __LINE__, __func__)); 2301 __FILE__, __LINE__, __func__));
2314 return 1; 2302 return 1;
2315 } 2303 }
2316 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc, 2304 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2317 volume_handle, &volume_wwid)) { 2305 volume_handle, &volume_wwid)) {
2318 dfailprintk(ioc, pr_warn(MPT3SAS_FMT 2306 dfailprintk(ioc,
2319 "failure at %s:%d/%s()!\n", ioc->name, 2307 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2320 __FILE__, __LINE__, __func__)); 2308 __FILE__, __LINE__, __func__));
2321 return 1; 2309 return 1;
2322 } 2310 }
2323 } 2311 }
@@ -2329,9 +2317,9 @@ scsih_slave_configure(struct scsi_device *sdev)
2329 sas_device_priv_data->sas_target->sas_address); 2317 sas_device_priv_data->sas_target->sas_address);
2330 if (!pcie_device) { 2318 if (!pcie_device) {
2331 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2319 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2332 dfailprintk(ioc, pr_warn(MPT3SAS_FMT 2320 dfailprintk(ioc,
2333 "failure at %s:%d/%s()!\n", ioc->name, __FILE__, 2321 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2334 __LINE__, __func__)); 2322 __FILE__, __LINE__, __func__));
2335 return 1; 2323 return 1;
2336 } 2324 }
2337 2325
@@ -2377,9 +2365,9 @@ scsih_slave_configure(struct scsi_device *sdev)
2377 sas_device_priv_data->sas_target->sas_address); 2365 sas_device_priv_data->sas_target->sas_address);
2378 if (!sas_device) { 2366 if (!sas_device) {
2379 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2367 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2380 dfailprintk(ioc, pr_warn(MPT3SAS_FMT 2368 dfailprintk(ioc,
2381 "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, 2369 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2382 __func__)); 2370 __FILE__, __LINE__, __func__));
2383 return 1; 2371 return 1;
2384 } 2372 }
2385 2373
@@ -2515,8 +2503,7 @@ _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2515 desc = "unknown"; 2503 desc = "unknown";
2516 break; 2504 break;
2517 } 2505 }
2518 pr_warn(MPT3SAS_FMT "response_code(0x%01x): %s\n", 2506 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2519 ioc->name, response_code, desc);
2520} 2507}
2521 2508
2522/** 2509/**
@@ -2640,22 +2627,19 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
2640 lockdep_assert_held(&ioc->tm_cmds.mutex); 2627 lockdep_assert_held(&ioc->tm_cmds.mutex);
2641 2628
2642 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) { 2629 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
2643 pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n", 2630 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
2644 __func__, ioc->name);
2645 return FAILED; 2631 return FAILED;
2646 } 2632 }
2647 2633
2648 if (ioc->shost_recovery || ioc->remove_host || 2634 if (ioc->shost_recovery || ioc->remove_host ||
2649 ioc->pci_error_recovery) { 2635 ioc->pci_error_recovery) {
2650 pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", 2636 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
2651 __func__, ioc->name);
2652 return FAILED; 2637 return FAILED;
2653 } 2638 }
2654 2639
2655 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 2640 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
2656 if (ioc_state & MPI2_DOORBELL_USED) { 2641 if (ioc_state & MPI2_DOORBELL_USED) {
2657 dhsprintk(ioc, pr_info(MPT3SAS_FMT 2642 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
2658 "unexpected doorbell active!\n", ioc->name));
2659 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 2643 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2660 return (!rc) ? SUCCESS : FAILED; 2644 return (!rc) ? SUCCESS : FAILED;
2661 } 2645 }
@@ -2669,14 +2653,13 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
2669 2653
2670 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx); 2654 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
2671 if (!smid) { 2655 if (!smid) {
2672 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 2656 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2673 ioc->name, __func__);
2674 return FAILED; 2657 return FAILED;
2675 } 2658 }
2676 2659
2677 dtmprintk(ioc, pr_info(MPT3SAS_FMT 2660 dtmprintk(ioc,
2678 "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n", 2661 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
2679 ioc->name, handle, type, smid_task, timeout, tr_method)); 2662 handle, type, smid_task, timeout, tr_method));
2680 ioc->tm_cmds.status = MPT3_CMD_PENDING; 2663 ioc->tm_cmds.status = MPT3_CMD_PENDING;
2681 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 2664 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2682 ioc->tm_cmds.smid = smid; 2665 ioc->tm_cmds.smid = smid;
@@ -2709,11 +2692,11 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
2709 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { 2692 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
2710 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 2693 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
2711 mpi_reply = ioc->tm_cmds.reply; 2694 mpi_reply = ioc->tm_cmds.reply;
2712 dtmprintk(ioc, pr_info(MPT3SAS_FMT "complete tm: " \ 2695 dtmprintk(ioc,
2713 "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", 2696 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
2714 ioc->name, le16_to_cpu(mpi_reply->IOCStatus), 2697 le16_to_cpu(mpi_reply->IOCStatus),
2715 le32_to_cpu(mpi_reply->IOCLogInfo), 2698 le32_to_cpu(mpi_reply->IOCLogInfo),
2716 le32_to_cpu(mpi_reply->TerminationCount))); 2699 le32_to_cpu(mpi_reply->TerminationCount)));
2717 if (ioc->logging_level & MPT_DEBUG_TM) { 2700 if (ioc->logging_level & MPT_DEBUG_TM) {
2718 _scsih_response_code(ioc, mpi_reply->ResponseCode); 2701 _scsih_response_code(ioc, mpi_reply->ResponseCode);
2719 if (mpi_reply->IOCStatus) 2702 if (mpi_reply->IOCStatus)
@@ -3060,13 +3043,11 @@ scsih_host_reset(struct scsi_cmnd *scmd)
3060 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3043 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3061 int r, retval; 3044 int r, retval;
3062 3045
3063 pr_info(MPT3SAS_FMT "attempting host reset! scmd(%p)\n", 3046 ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
3064 ioc->name, scmd);
3065 scsi_print_command(scmd); 3047 scsi_print_command(scmd);
3066 3048
3067 if (ioc->is_driver_loading || ioc->remove_host) { 3049 if (ioc->is_driver_loading || ioc->remove_host) {
3068 pr_info(MPT3SAS_FMT "Blocking the host reset\n", 3050 ioc_info(ioc, "Blocking the host reset\n");
3069 ioc->name);
3070 r = FAILED; 3051 r = FAILED;
3071 goto out; 3052 goto out;
3072 } 3053 }
@@ -3074,8 +3055,8 @@ scsih_host_reset(struct scsi_cmnd *scmd)
3074 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3055 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3075 r = (retval < 0) ? FAILED : SUCCESS; 3056 r = (retval < 0) ? FAILED : SUCCESS;
3076out: 3057out:
3077 pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n", 3058 ioc_info(ioc, "host reset: %s scmd(%p)\n",
3078 ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3059 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3079 3060
3080 return r; 3061 return r;
3081} 3062}
@@ -3567,18 +3548,16 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3567 u8 tr_method = 0; 3548 u8 tr_method = 0;
3568 3549
3569 if (ioc->pci_error_recovery) { 3550 if (ioc->pci_error_recovery) {
3570 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3551 dewtprintk(ioc,
3571 "%s: host in pci error recovery: handle(0x%04x)\n", 3552 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
3572 __func__, ioc->name, 3553 __func__, handle));
3573 handle));
3574 return; 3554 return;
3575 } 3555 }
3576 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 3556 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3577 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 3557 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3578 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3558 dewtprintk(ioc,
3579 "%s: host is not operational: handle(0x%04x)\n", 3559 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
3580 __func__, ioc->name, 3560 __func__, handle));
3581 handle));
3582 return; 3561 return;
3583 } 3562 }
3584 3563
@@ -3614,39 +3593,31 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3614 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3593 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3615 } 3594 }
3616 if (sas_target_priv_data) { 3595 if (sas_target_priv_data) {
3617 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3596 dewtprintk(ioc,
3618 "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", 3597 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
3619 ioc->name, handle, 3598 handle, (u64)sas_address));
3620 (unsigned long long)sas_address));
3621 if (sas_device) { 3599 if (sas_device) {
3622 if (sas_device->enclosure_handle != 0) 3600 if (sas_device->enclosure_handle != 0)
3623 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3601 dewtprintk(ioc,
3624 "setting delete flag:enclosure logical " 3602 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
3625 "id(0x%016llx), slot(%d)\n", ioc->name, 3603 (u64)sas_device->enclosure_logical_id,
3626 (unsigned long long) 3604 sas_device->slot));
3627 sas_device->enclosure_logical_id,
3628 sas_device->slot));
3629 if (sas_device->connector_name[0] != '\0') 3605 if (sas_device->connector_name[0] != '\0')
3630 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3606 dewtprintk(ioc,
3631 "setting delete flag: enclosure " 3607 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
3632 "level(0x%04x), connector name( %s)\n", 3608 sas_device->enclosure_level,
3633 ioc->name, sas_device->enclosure_level, 3609 sas_device->connector_name));
3634 sas_device->connector_name));
3635 } else if (pcie_device) { 3610 } else if (pcie_device) {
3636 if (pcie_device->enclosure_handle != 0) 3611 if (pcie_device->enclosure_handle != 0)
3637 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3612 dewtprintk(ioc,
3638 "setting delete flag: logical " 3613 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
3639 "id(0x%016llx), slot(%d)\n", ioc->name, 3614 (u64)pcie_device->enclosure_logical_id,
3640 (unsigned long long) 3615 pcie_device->slot));
3641 pcie_device->enclosure_logical_id,
3642 pcie_device->slot));
3643 if (pcie_device->connector_name[0] != '\0') 3616 if (pcie_device->connector_name[0] != '\0')
3644 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3617 dewtprintk(ioc,
3645 "setting delete flag:, enclosure " 3618 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
3646 "level(0x%04x), " 3619 pcie_device->enclosure_level,
3647 "connector name( %s)\n", ioc->name, 3620 pcie_device->connector_name));
3648 pcie_device->enclosure_level,
3649 pcie_device->connector_name));
3650 } 3621 }
3651 _scsih_ublock_io_device(ioc, sas_address); 3622 _scsih_ublock_io_device(ioc, sas_address);
3652 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 3623 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
@@ -3660,16 +3631,15 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3660 INIT_LIST_HEAD(&delayed_tr->list); 3631 INIT_LIST_HEAD(&delayed_tr->list);
3661 delayed_tr->handle = handle; 3632 delayed_tr->handle = handle;
3662 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); 3633 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
3663 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3634 dewtprintk(ioc,
3664 "DELAYED:tr:handle(0x%04x), (open)\n", 3635 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3665 ioc->name, handle)); 3636 handle));
3666 goto out; 3637 goto out;
3667 } 3638 }
3668 3639
3669 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3640 dewtprintk(ioc,
3670 "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 3641 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3671 ioc->name, handle, smid, 3642 handle, smid, ioc->tm_tr_cb_idx));
3672 ioc->tm_tr_cb_idx));
3673 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 3643 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3674 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 3644 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3675 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3645 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -3717,39 +3687,39 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3717 struct _sc_list *delayed_sc; 3687 struct _sc_list *delayed_sc;
3718 3688
3719 if (ioc->pci_error_recovery) { 3689 if (ioc->pci_error_recovery) {
3720 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3690 dewtprintk(ioc,
3721 "%s: host in pci error recovery\n", __func__, 3691 ioc_info(ioc, "%s: host in pci error recovery\n",
3722 ioc->name)); 3692 __func__));
3723 return 1; 3693 return 1;
3724 } 3694 }
3725 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 3695 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3726 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 3696 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3727 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3697 dewtprintk(ioc,
3728 "%s: host is not operational\n", __func__, ioc->name)); 3698 ioc_info(ioc, "%s: host is not operational\n",
3699 __func__));
3729 return 1; 3700 return 1;
3730 } 3701 }
3731 if (unlikely(!mpi_reply)) { 3702 if (unlikely(!mpi_reply)) {
3732 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", 3703 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3733 ioc->name, __FILE__, __LINE__, __func__); 3704 __FILE__, __LINE__, __func__);
3734 return 1; 3705 return 1;
3735 } 3706 }
3736 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); 3707 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3737 handle = le16_to_cpu(mpi_request_tm->DevHandle); 3708 handle = le16_to_cpu(mpi_request_tm->DevHandle);
3738 if (handle != le16_to_cpu(mpi_reply->DevHandle)) { 3709 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3739 dewtprintk(ioc, pr_err(MPT3SAS_FMT 3710 dewtprintk(ioc,
3740 "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", 3711 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3741 ioc->name, handle, 3712 handle,
3742 le16_to_cpu(mpi_reply->DevHandle), smid)); 3713 le16_to_cpu(mpi_reply->DevHandle), smid));
3743 return 0; 3714 return 0;
3744 } 3715 }
3745 3716
3746 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 3717 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3747 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3718 dewtprintk(ioc,
3748 "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), " 3719 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
3749 "loginfo(0x%08x), completed(%d)\n", ioc->name, 3720 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3750 handle, smid, le16_to_cpu(mpi_reply->IOCStatus), 3721 le32_to_cpu(mpi_reply->IOCLogInfo),
3751 le32_to_cpu(mpi_reply->IOCLogInfo), 3722 le32_to_cpu(mpi_reply->TerminationCount)));
3752 le32_to_cpu(mpi_reply->TerminationCount)));
3753 3723
3754 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); 3724 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
3755 if (!smid_sas_ctrl) { 3725 if (!smid_sas_ctrl) {
@@ -3759,16 +3729,15 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3759 INIT_LIST_HEAD(&delayed_sc->list); 3729 INIT_LIST_HEAD(&delayed_sc->list);
3760 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle); 3730 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
3761 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list); 3731 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
3762 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3732 dewtprintk(ioc,
3763 "DELAYED:sc:handle(0x%04x), (open)\n", 3733 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
3764 ioc->name, handle)); 3734 handle));
3765 return _scsih_check_for_pending_tm(ioc, smid); 3735 return _scsih_check_for_pending_tm(ioc, smid);
3766 } 3736 }
3767 3737
3768 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3738 dewtprintk(ioc,
3769 "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 3739 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3770 ioc->name, handle, smid_sas_ctrl, 3740 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
3771 ioc->tm_sas_control_cb_idx));
3772 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl); 3741 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
3773 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 3742 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
3774 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 3743 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
@@ -3803,20 +3772,19 @@ _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3803 mpt3sas_base_get_reply_virt_addr(ioc, reply); 3772 mpt3sas_base_get_reply_virt_addr(ioc, reply);
3804 3773
3805 if (likely(mpi_reply)) { 3774 if (likely(mpi_reply)) {
3806 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3775 dewtprintk(ioc,
3807 "sc_complete:handle(0x%04x), (open) " 3776 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
3808 "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", 3777 le16_to_cpu(mpi_reply->DevHandle), smid,
3809 ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid, 3778 le16_to_cpu(mpi_reply->IOCStatus),
3810 le16_to_cpu(mpi_reply->IOCStatus), 3779 le32_to_cpu(mpi_reply->IOCLogInfo)));
3811 le32_to_cpu(mpi_reply->IOCLogInfo)));
3812 if (le16_to_cpu(mpi_reply->IOCStatus) == 3780 if (le16_to_cpu(mpi_reply->IOCStatus) ==
3813 MPI2_IOCSTATUS_SUCCESS) { 3781 MPI2_IOCSTATUS_SUCCESS) {
3814 clear_bit(le16_to_cpu(mpi_reply->DevHandle), 3782 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
3815 ioc->device_remove_in_progress); 3783 ioc->device_remove_in_progress);
3816 } 3784 }
3817 } else { 3785 } else {
3818 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", 3786 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3819 ioc->name, __FILE__, __LINE__, __func__); 3787 __FILE__, __LINE__, __func__);
3820 } 3788 }
3821 return mpt3sas_check_for_pending_internal_cmds(ioc, smid); 3789 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
3822} 3790}
@@ -3839,9 +3807,9 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3839 struct _tr_list *delayed_tr; 3807 struct _tr_list *delayed_tr;
3840 3808
3841 if (ioc->pci_error_recovery) { 3809 if (ioc->pci_error_recovery) {
3842 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3810 dewtprintk(ioc,
3843 "%s: host reset in progress!\n", 3811 ioc_info(ioc, "%s: host reset in progress!\n",
3844 __func__, ioc->name)); 3812 __func__));
3845 return; 3813 return;
3846 } 3814 }
3847 3815
@@ -3853,16 +3821,15 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3853 INIT_LIST_HEAD(&delayed_tr->list); 3821 INIT_LIST_HEAD(&delayed_tr->list);
3854 delayed_tr->handle = handle; 3822 delayed_tr->handle = handle;
3855 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); 3823 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
3856 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3824 dewtprintk(ioc,
3857 "DELAYED:tr:handle(0x%04x), (open)\n", 3825 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3858 ioc->name, handle)); 3826 handle));
3859 return; 3827 return;
3860 } 3828 }
3861 3829
3862 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3830 dewtprintk(ioc,
3863 "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 3831 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3864 ioc->name, handle, smid, 3832 handle, smid, ioc->tm_tr_volume_cb_idx));
3865 ioc->tm_tr_volume_cb_idx));
3866 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 3833 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3867 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 3834 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3868 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3835 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -3892,33 +3859,32 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3892 mpt3sas_base_get_reply_virt_addr(ioc, reply); 3859 mpt3sas_base_get_reply_virt_addr(ioc, reply);
3893 3860
3894 if (ioc->shost_recovery || ioc->pci_error_recovery) { 3861 if (ioc->shost_recovery || ioc->pci_error_recovery) {
3895 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3862 dewtprintk(ioc,
3896 "%s: host reset in progress!\n", 3863 ioc_info(ioc, "%s: host reset in progress!\n",
3897 __func__, ioc->name)); 3864 __func__));
3898 return 1; 3865 return 1;
3899 } 3866 }
3900 if (unlikely(!mpi_reply)) { 3867 if (unlikely(!mpi_reply)) {
3901 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", 3868 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3902 ioc->name, __FILE__, __LINE__, __func__); 3869 __FILE__, __LINE__, __func__);
3903 return 1; 3870 return 1;
3904 } 3871 }
3905 3872
3906 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); 3873 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3907 handle = le16_to_cpu(mpi_request_tm->DevHandle); 3874 handle = le16_to_cpu(mpi_request_tm->DevHandle);
3908 if (handle != le16_to_cpu(mpi_reply->DevHandle)) { 3875 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3909 dewtprintk(ioc, pr_err(MPT3SAS_FMT 3876 dewtprintk(ioc,
3910 "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", 3877 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3911 ioc->name, handle, 3878 handle, le16_to_cpu(mpi_reply->DevHandle),
3912 le16_to_cpu(mpi_reply->DevHandle), smid)); 3879 smid));
3913 return 0; 3880 return 0;
3914 } 3881 }
3915 3882
3916 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3883 dewtprintk(ioc,
3917 "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), " 3884 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
3918 "loginfo(0x%08x), completed(%d)\n", ioc->name, 3885 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3919 handle, smid, le16_to_cpu(mpi_reply->IOCStatus), 3886 le32_to_cpu(mpi_reply->IOCLogInfo),
3920 le32_to_cpu(mpi_reply->IOCLogInfo), 3887 le32_to_cpu(mpi_reply->TerminationCount)));
3921 le32_to_cpu(mpi_reply->TerminationCount)));
3922 3888
3923 return _scsih_check_for_pending_tm(ioc, smid); 3889 return _scsih_check_for_pending_tm(ioc, smid);
3924} 3890}
@@ -3948,10 +3914,9 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
3948 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx; 3914 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
3949 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3915 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3950 3916
3951 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3917 dewtprintk(ioc,
3952 "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n", 3918 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
3953 ioc->name, le16_to_cpu(event), smid, 3919 le16_to_cpu(event), smid, ioc->base_cb_idx));
3954 ioc->base_cb_idx));
3955 ack_request = mpt3sas_base_get_msg_frame(ioc, smid); 3920 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
3956 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); 3921 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
3957 ack_request->Function = MPI2_FUNCTION_EVENT_ACK; 3922 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
@@ -3981,21 +3946,21 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
3981 unsigned long flags; 3946 unsigned long flags;
3982 3947
3983 if (ioc->remove_host) { 3948 if (ioc->remove_host) {
3984 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3949 dewtprintk(ioc,
3985 "%s: host has been removed\n", 3950 ioc_info(ioc, "%s: host has been removed\n",
3986 __func__, ioc->name)); 3951 __func__));
3987 return; 3952 return;
3988 } else if (ioc->pci_error_recovery) { 3953 } else if (ioc->pci_error_recovery) {
3989 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3954 dewtprintk(ioc,
3990 "%s: host in pci error recovery\n", 3955 ioc_info(ioc, "%s: host in pci error recovery\n",
3991 __func__, ioc->name)); 3956 __func__));
3992 return; 3957 return;
3993 } 3958 }
3994 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 3959 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3995 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 3960 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3996 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3961 dewtprintk(ioc,
3997 "%s: host is not operational\n", 3962 ioc_info(ioc, "%s: host is not operational\n",
3998 __func__, ioc->name)); 3963 __func__));
3999 return; 3964 return;
4000 } 3965 }
4001 3966
@@ -4007,10 +3972,9 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4007 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx; 3972 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4008 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3973 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4009 3974
4010 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3975 dewtprintk(ioc,
4011 "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 3976 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4012 ioc->name, handle, smid, 3977 handle, smid, ioc->tm_sas_control_cb_idx));
4013 ioc->tm_sas_control_cb_idx));
4014 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 3978 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4015 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 3979 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4016 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 3980 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
@@ -4171,8 +4135,8 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4171 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { 4135 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4172 if (le16_to_cpu(local_event_data->ExpanderDevHandle) == 4136 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4173 expander_handle) { 4137 expander_handle) {
4174 dewtprintk(ioc, pr_info(MPT3SAS_FMT 4138 dewtprintk(ioc,
4175 "setting ignoring flag\n", ioc->name)); 4139 ioc_info(ioc, "setting ignoring flag\n"));
4176 fw_event->ignore = 1; 4140 fw_event->ignore = 1;
4177 } 4141 }
4178 } 4142 }
@@ -4243,9 +4207,8 @@ _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4243 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { 4207 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4244 if (le16_to_cpu(local_event_data->SwitchDevHandle) == 4208 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4245 switch_handle) { 4209 switch_handle) {
4246 dewtprintk(ioc, pr_info(MPT3SAS_FMT 4210 dewtprintk(ioc,
4247 "setting ignoring flag for switch event\n", 4211 ioc_info(ioc, "setting ignoring flag for switch event\n"));
4248 ioc->name));
4249 fw_event->ignore = 1; 4212 fw_event->ignore = 1;
4250 } 4213 }
4251 } 4214 }
@@ -4274,10 +4237,9 @@ _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4274 sas_target_priv_data = 4237 sas_target_priv_data =
4275 raid_device->starget->hostdata; 4238 raid_device->starget->hostdata;
4276 sas_target_priv_data->deleted = 1; 4239 sas_target_priv_data->deleted = 1;
4277 dewtprintk(ioc, pr_info(MPT3SAS_FMT 4240 dewtprintk(ioc,
4278 "setting delete flag: handle(0x%04x), " 4241 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4279 "wwid(0x%016llx)\n", ioc->name, handle, 4242 handle, (u64)raid_device->wwid));
4280 (unsigned long long) raid_device->wwid));
4281 } 4243 }
4282 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 4244 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4283} 4245}
@@ -4379,9 +4341,9 @@ _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4379 INIT_LIST_HEAD(&delayed_tr->list); 4341 INIT_LIST_HEAD(&delayed_tr->list);
4380 delayed_tr->handle = handle; 4342 delayed_tr->handle = handle;
4381 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); 4343 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4382 dewtprintk(ioc, pr_info(MPT3SAS_FMT 4344 dewtprintk(ioc,
4383 "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name, 4345 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4384 handle)); 4346 handle));
4385 } else 4347 } else
4386 _scsih_tm_tr_send(ioc, handle); 4348 _scsih_tm_tr_send(ioc, handle);
4387 } 4349 }
@@ -4424,15 +4386,14 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4424 Mpi2EventDataTemperature_t *event_data) 4386 Mpi2EventDataTemperature_t *event_data)
4425{ 4387{
4426 if (ioc->temp_sensors_count >= event_data->SensorNum) { 4388 if (ioc->temp_sensors_count >= event_data->SensorNum) {
4427 pr_err(MPT3SAS_FMT "Temperature Threshold flags %s%s%s%s" 4389 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4428 " exceeded for Sensor: %d !!!\n", ioc->name, 4390 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4429 ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ", 4391 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4430 ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ", 4392 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4431 ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ", 4393 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4432 ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ", 4394 event_data->SensorNum);
4433 event_data->SensorNum); 4395 ioc_err(ioc, "Current Temp In Celsius: %d\n",
4434 pr_err(MPT3SAS_FMT "Current Temp In Celsius: %d\n", 4396 event_data->CurrentTemperature);
4435 ioc->name, event_data->CurrentTemperature);
4436 } 4397 }
4437} 4398}
4438 4399
@@ -4480,8 +4441,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4480 scmd->result = DID_RESET << 16; 4441 scmd->result = DID_RESET << 16;
4481 scmd->scsi_done(scmd); 4442 scmd->scsi_done(scmd);
4482 } 4443 }
4483 dtmprintk(ioc, pr_info(MPT3SAS_FMT "completing %d cmds\n", 4444 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4484 ioc->name, count));
4485} 4445}
4486 4446
4487/** 4447/**
@@ -4680,8 +4640,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4680 4640
4681 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); 4641 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
4682 if (!smid) { 4642 if (!smid) {
4683 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 4643 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4684 ioc->name, __func__);
4685 _scsih_set_satl_pending(scmd, false); 4644 _scsih_set_satl_pending(scmd, false);
4686 goto out; 4645 goto out;
4687 } 4646 }
@@ -4919,37 +4878,28 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4919 scsi_print_command(scmd); 4878 scsi_print_command(scmd);
4920 4879
4921 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 4880 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
4922 pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name, 4881 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
4923 device_str, (unsigned long long)priv_target->sas_address); 4882 device_str, (u64)priv_target->sas_address);
4924 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 4883 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
4925 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target); 4884 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
4926 if (pcie_device) { 4885 if (pcie_device) {
4927 pr_info(MPT3SAS_FMT "\twwid(0x%016llx), port(%d)\n", 4886 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
4928 ioc->name, 4887 (u64)pcie_device->wwid, pcie_device->port_num);
4929 (unsigned long long)pcie_device->wwid,
4930 pcie_device->port_num);
4931 if (pcie_device->enclosure_handle != 0) 4888 if (pcie_device->enclosure_handle != 0)
4932 pr_info(MPT3SAS_FMT 4889 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
4933 "\tenclosure logical id(0x%016llx), " 4890 (u64)pcie_device->enclosure_logical_id,
4934 "slot(%d)\n", ioc->name, 4891 pcie_device->slot);
4935 (unsigned long long)
4936 pcie_device->enclosure_logical_id,
4937 pcie_device->slot);
4938 if (pcie_device->connector_name[0]) 4892 if (pcie_device->connector_name[0])
4939 pr_info(MPT3SAS_FMT 4893 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
4940 "\tenclosure level(0x%04x)," 4894 pcie_device->enclosure_level,
4941 "connector name( %s)\n", 4895 pcie_device->connector_name);
4942 ioc->name, pcie_device->enclosure_level,
4943 pcie_device->connector_name);
4944 pcie_device_put(pcie_device); 4896 pcie_device_put(pcie_device);
4945 } 4897 }
4946 } else { 4898 } else {
4947 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target); 4899 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
4948 if (sas_device) { 4900 if (sas_device) {
4949 pr_warn(MPT3SAS_FMT 4901 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
4950 "\tsas_address(0x%016llx), phy(%d)\n", 4902 (u64)sas_device->sas_address, sas_device->phy);
4951 ioc->name, (unsigned long long)
4952 sas_device->sas_address, sas_device->phy);
4953 4903
4954 _scsih_display_enclosure_chassis_info(ioc, sas_device, 4904 _scsih_display_enclosure_chassis_info(ioc, sas_device,
4955 NULL, NULL); 4905 NULL, NULL);
@@ -4958,30 +4908,23 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4958 } 4908 }
4959 } 4909 }
4960 4910
4961 pr_warn(MPT3SAS_FMT 4911 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
4962 "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", 4912 le16_to_cpu(mpi_reply->DevHandle),
4963 ioc->name, le16_to_cpu(mpi_reply->DevHandle), 4913 desc_ioc_state, ioc_status, smid);
4964 desc_ioc_state, ioc_status, smid); 4914 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
4965 pr_warn(MPT3SAS_FMT 4915 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
4966 "\trequest_len(%d), underflow(%d), resid(%d)\n", 4916 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
4967 ioc->name, scsi_bufflen(scmd), scmd->underflow, 4917 le16_to_cpu(mpi_reply->TaskTag),
4968 scsi_get_resid(scmd)); 4918 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
4969 pr_warn(MPT3SAS_FMT 4919 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
4970 "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", 4920 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
4971 ioc->name, le16_to_cpu(mpi_reply->TaskTag),
4972 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
4973 pr_warn(MPT3SAS_FMT
4974 "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
4975 ioc->name, desc_scsi_status,
4976 scsi_status, desc_scsi_state, scsi_state);
4977 4921
4978 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 4922 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
4979 struct sense_info data; 4923 struct sense_info data;
4980 _scsih_normalize_sense(scmd->sense_buffer, &data); 4924 _scsih_normalize_sense(scmd->sense_buffer, &data);
4981 pr_warn(MPT3SAS_FMT 4925 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
4982 "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", 4926 data.skey, data.asc, data.ascq,
4983 ioc->name, data.skey, 4927 le32_to_cpu(mpi_reply->SenseCount));
4984 data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
4985 } 4928 }
4986 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 4929 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
4987 response_info = le32_to_cpu(mpi_reply->ResponseInfo); 4930 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
@@ -5016,17 +4959,17 @@ _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5016 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; 4959 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5017 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, 4960 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5018 &mpi_request)) != 0) { 4961 &mpi_request)) != 0) {
5019 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, 4962 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5020 __FILE__, __LINE__, __func__); 4963 __FILE__, __LINE__, __func__);
5021 goto out; 4964 goto out;
5022 } 4965 }
5023 sas_device->pfa_led_on = 1; 4966 sas_device->pfa_led_on = 1;
5024 4967
5025 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { 4968 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5026 dewtprintk(ioc, pr_info(MPT3SAS_FMT 4969 dewtprintk(ioc,
5027 "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", 4970 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5028 ioc->name, le16_to_cpu(mpi_reply.IOCStatus), 4971 le16_to_cpu(mpi_reply.IOCStatus),
5029 le32_to_cpu(mpi_reply.IOCLogInfo))); 4972 le32_to_cpu(mpi_reply.IOCLogInfo)));
5030 goto out; 4973 goto out;
5031 } 4974 }
5032out: 4975out:
@@ -5056,16 +4999,16 @@ _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5056 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; 4999 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5057 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, 5000 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5058 &mpi_request)) != 0) { 5001 &mpi_request)) != 0) {
5059 printk(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, 5002 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5060 __FILE__, __LINE__, __func__); 5003 __FILE__, __LINE__, __func__);
5061 return; 5004 return;
5062 } 5005 }
5063 5006
5064 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { 5007 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5065 dewtprintk(ioc, printk(MPT3SAS_FMT 5008 dewtprintk(ioc,
5066 "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", 5009 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5067 ioc->name, le16_to_cpu(mpi_reply.IOCStatus), 5010 le16_to_cpu(mpi_reply.IOCStatus),
5068 le32_to_cpu(mpi_reply.IOCLogInfo))); 5011 le32_to_cpu(mpi_reply.IOCLogInfo)));
5069 return; 5012 return;
5070 } 5013 }
5071} 5014}
@@ -5133,8 +5076,8 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5133 sizeof(Mpi2EventDataSasDeviceStatusChange_t); 5076 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5134 event_reply = kzalloc(sz, GFP_KERNEL); 5077 event_reply = kzalloc(sz, GFP_KERNEL);
5135 if (!event_reply) { 5078 if (!event_reply) {
5136 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5079 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5137 ioc->name, __FILE__, __LINE__, __func__); 5080 __FILE__, __LINE__, __func__);
5138 goto out; 5081 goto out;
5139 } 5082 }
5140 5083
@@ -5424,16 +5367,16 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
5424 u16 attached_handle; 5367 u16 attached_handle;
5425 u8 link_rate; 5368 u8 link_rate;
5426 5369
5427 dtmprintk(ioc, pr_info(MPT3SAS_FMT 5370 dtmprintk(ioc,
5428 "updating handles for sas_host(0x%016llx)\n", 5371 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
5429 ioc->name, (unsigned long long)ioc->sas_hba.sas_address)); 5372 (u64)ioc->sas_hba.sas_address));
5430 5373
5431 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys 5374 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
5432 * sizeof(Mpi2SasIOUnit0PhyData_t)); 5375 * sizeof(Mpi2SasIOUnit0PhyData_t));
5433 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 5376 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5434 if (!sas_iounit_pg0) { 5377 if (!sas_iounit_pg0) {
5435 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5378 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5436 ioc->name, __FILE__, __LINE__, __func__); 5379 __FILE__, __LINE__, __func__);
5437 return; 5380 return;
5438 } 5381 }
5439 5382
@@ -5483,15 +5426,15 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
5483 5426
5484 mpt3sas_config_get_number_hba_phys(ioc, &num_phys); 5427 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
5485 if (!num_phys) { 5428 if (!num_phys) {
5486 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5429 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5487 ioc->name, __FILE__, __LINE__, __func__); 5430 __FILE__, __LINE__, __func__);
5488 return; 5431 return;
5489 } 5432 }
5490 ioc->sas_hba.phy = kcalloc(num_phys, 5433 ioc->sas_hba.phy = kcalloc(num_phys,
5491 sizeof(struct _sas_phy), GFP_KERNEL); 5434 sizeof(struct _sas_phy), GFP_KERNEL);
5492 if (!ioc->sas_hba.phy) { 5435 if (!ioc->sas_hba.phy) {
5493 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5436 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5494 ioc->name, __FILE__, __LINE__, __func__); 5437 __FILE__, __LINE__, __func__);
5495 goto out; 5438 goto out;
5496 } 5439 }
5497 ioc->sas_hba.num_phys = num_phys; 5440 ioc->sas_hba.num_phys = num_phys;
@@ -5501,21 +5444,21 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
5501 sizeof(Mpi2SasIOUnit0PhyData_t)); 5444 sizeof(Mpi2SasIOUnit0PhyData_t));
5502 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 5445 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5503 if (!sas_iounit_pg0) { 5446 if (!sas_iounit_pg0) {
5504 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5447 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5505 ioc->name, __FILE__, __LINE__, __func__); 5448 __FILE__, __LINE__, __func__);
5506 return; 5449 return;
5507 } 5450 }
5508 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 5451 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5509 sas_iounit_pg0, sz))) { 5452 sas_iounit_pg0, sz))) {
5510 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5453 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5511 ioc->name, __FILE__, __LINE__, __func__); 5454 __FILE__, __LINE__, __func__);
5512 goto out; 5455 goto out;
5513 } 5456 }
5514 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 5457 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5515 MPI2_IOCSTATUS_MASK; 5458 MPI2_IOCSTATUS_MASK;
5516 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5459 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5517 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5460 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5518 ioc->name, __FILE__, __LINE__, __func__); 5461 __FILE__, __LINE__, __func__);
5519 goto out; 5462 goto out;
5520 } 5463 }
5521 5464
@@ -5524,21 +5467,21 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
5524 sizeof(Mpi2SasIOUnit1PhyData_t)); 5467 sizeof(Mpi2SasIOUnit1PhyData_t));
5525 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 5468 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
5526 if (!sas_iounit_pg1) { 5469 if (!sas_iounit_pg1) {
5527 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5470 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5528 ioc->name, __FILE__, __LINE__, __func__); 5471 __FILE__, __LINE__, __func__);
5529 goto out; 5472 goto out;
5530 } 5473 }
5531 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 5474 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
5532 sas_iounit_pg1, sz))) { 5475 sas_iounit_pg1, sz))) {
5533 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5476 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5534 ioc->name, __FILE__, __LINE__, __func__); 5477 __FILE__, __LINE__, __func__);
5535 goto out; 5478 goto out;
5536 } 5479 }
5537 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 5480 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5538 MPI2_IOCSTATUS_MASK; 5481 MPI2_IOCSTATUS_MASK;
5539 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5482 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5540 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5483 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5541 ioc->name, __FILE__, __LINE__, __func__); 5484 __FILE__, __LINE__, __func__);
5542 goto out; 5485 goto out;
5543 } 5486 }
5544 5487
@@ -5557,15 +5500,15 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
5557 for (i = 0; i < ioc->sas_hba.num_phys ; i++) { 5500 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5558 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, 5501 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5559 i))) { 5502 i))) {
5560 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5503 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5561 ioc->name, __FILE__, __LINE__, __func__); 5504 __FILE__, __LINE__, __func__);
5562 goto out; 5505 goto out;
5563 } 5506 }
5564 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 5507 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5565 MPI2_IOCSTATUS_MASK; 5508 MPI2_IOCSTATUS_MASK;
5566 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5509 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5567 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5510 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5568 ioc->name, __FILE__, __LINE__, __func__); 5511 __FILE__, __LINE__, __func__);
5569 goto out; 5512 goto out;
5570 } 5513 }
5571 5514
@@ -5579,18 +5522,17 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
5579 } 5522 }
5580 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 5523 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5581 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { 5524 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
5582 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5525 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5583 ioc->name, __FILE__, __LINE__, __func__); 5526 __FILE__, __LINE__, __func__);
5584 goto out; 5527 goto out;
5585 } 5528 }
5586 ioc->sas_hba.enclosure_handle = 5529 ioc->sas_hba.enclosure_handle =
5587 le16_to_cpu(sas_device_pg0.EnclosureHandle); 5530 le16_to_cpu(sas_device_pg0.EnclosureHandle);
5588 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 5531 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5589 pr_info(MPT3SAS_FMT 5532 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5590 "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", 5533 ioc->sas_hba.handle,
5591 ioc->name, ioc->sas_hba.handle, 5534 (u64)ioc->sas_hba.sas_address,
5592 (unsigned long long) ioc->sas_hba.sas_address, 5535 ioc->sas_hba.num_phys);
5593 ioc->sas_hba.num_phys) ;
5594 5536
5595 if (ioc->sas_hba.enclosure_handle) { 5537 if (ioc->sas_hba.enclosure_handle) {
5596 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 5538 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
@@ -5639,16 +5581,16 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5639 5581
5640 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 5582 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
5641 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { 5583 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
5642 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5584 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5643 ioc->name, __FILE__, __LINE__, __func__); 5585 __FILE__, __LINE__, __func__);
5644 return -1; 5586 return -1;
5645 } 5587 }
5646 5588
5647 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 5589 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5648 MPI2_IOCSTATUS_MASK; 5590 MPI2_IOCSTATUS_MASK;
5649 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5591 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5650 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5592 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5651 ioc->name, __FILE__, __LINE__, __func__); 5593 __FILE__, __LINE__, __func__);
5652 return -1; 5594 return -1;
5653 } 5595 }
5654 5596
@@ -5656,8 +5598,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5656 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); 5598 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
5657 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent) 5599 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
5658 != 0) { 5600 != 0) {
5659 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5601 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5660 ioc->name, __FILE__, __LINE__, __func__); 5602 __FILE__, __LINE__, __func__);
5661 return -1; 5603 return -1;
5662 } 5604 }
5663 if (sas_address_parent != ioc->sas_hba.sas_address) { 5605 if (sas_address_parent != ioc->sas_hba.sas_address) {
@@ -5684,8 +5626,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5684 sas_expander = kzalloc(sizeof(struct _sas_node), 5626 sas_expander = kzalloc(sizeof(struct _sas_node),
5685 GFP_KERNEL); 5627 GFP_KERNEL);
5686 if (!sas_expander) { 5628 if (!sas_expander) {
5687 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5629 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5688 ioc->name, __FILE__, __LINE__, __func__); 5630 __FILE__, __LINE__, __func__);
5689 return -1; 5631 return -1;
5690 } 5632 }
5691 5633
@@ -5694,18 +5636,17 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5694 sas_expander->sas_address_parent = sas_address_parent; 5636 sas_expander->sas_address_parent = sas_address_parent;
5695 sas_expander->sas_address = sas_address; 5637 sas_expander->sas_address = sas_address;
5696 5638
5697 pr_info(MPT3SAS_FMT "expander_add: handle(0x%04x)," \ 5639 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5698 " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name, 5640 handle, parent_handle,
5699 handle, parent_handle, (unsigned long long) 5641 (u64)sas_expander->sas_address, sas_expander->num_phys);
5700 sas_expander->sas_address, sas_expander->num_phys);
5701 5642
5702 if (!sas_expander->num_phys) 5643 if (!sas_expander->num_phys)
5703 goto out_fail; 5644 goto out_fail;
5704 sas_expander->phy = kcalloc(sas_expander->num_phys, 5645 sas_expander->phy = kcalloc(sas_expander->num_phys,
5705 sizeof(struct _sas_phy), GFP_KERNEL); 5646 sizeof(struct _sas_phy), GFP_KERNEL);
5706 if (!sas_expander->phy) { 5647 if (!sas_expander->phy) {
5707 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5648 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5708 ioc->name, __FILE__, __LINE__, __func__); 5649 __FILE__, __LINE__, __func__);
5709 rc = -1; 5650 rc = -1;
5710 goto out_fail; 5651 goto out_fail;
5711 } 5652 }
@@ -5714,8 +5655,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5714 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle, 5655 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
5715 sas_address_parent); 5656 sas_address_parent);
5716 if (!mpt3sas_port) { 5657 if (!mpt3sas_port) {
5717 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5658 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5718 ioc->name, __FILE__, __LINE__, __func__); 5659 __FILE__, __LINE__, __func__);
5719 rc = -1; 5660 rc = -1;
5720 goto out_fail; 5661 goto out_fail;
5721 } 5662 }
@@ -5724,8 +5665,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5724 for (i = 0 ; i < sas_expander->num_phys ; i++) { 5665 for (i = 0 ; i < sas_expander->num_phys ; i++) {
5725 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, 5666 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
5726 &expander_pg1, i, handle))) { 5667 &expander_pg1, i, handle))) {
5727 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5668 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5728 ioc->name, __FILE__, __LINE__, __func__); 5669 __FILE__, __LINE__, __func__);
5729 rc = -1; 5670 rc = -1;
5730 goto out_fail; 5671 goto out_fail;
5731 } 5672 }
@@ -5735,8 +5676,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5735 if ((mpt3sas_transport_add_expander_phy(ioc, 5676 if ((mpt3sas_transport_add_expander_phy(ioc,
5736 &sas_expander->phy[i], expander_pg1, 5677 &sas_expander->phy[i], expander_pg1,
5737 sas_expander->parent_dev))) { 5678 sas_expander->parent_dev))) {
5738 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5679 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5739 ioc->name, __FILE__, __LINE__, __func__); 5680 __FILE__, __LINE__, __func__);
5740 rc = -1; 5681 rc = -1;
5741 goto out_fail; 5682 goto out_fail;
5742 } 5683 }
@@ -5883,9 +5824,8 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
5883 if (!rc) 5824 if (!rc)
5884 return 0; 5825 return 0;
5885 5826
5886 pr_err(MPT3SAS_FMT 5827 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
5887 "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n", 5828 desc, (u64)sas_address, handle);
5888 ioc->name, desc, (unsigned long long)sas_address, handle);
5889 return rc; 5829 return rc;
5890} 5830}
5891 5831
@@ -5979,9 +5919,8 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
5979 /* check if device is present */ 5919 /* check if device is present */
5980 if (!(le16_to_cpu(sas_device_pg0.Flags) & 5920 if (!(le16_to_cpu(sas_device_pg0.Flags) &
5981 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 5921 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
5982 pr_err(MPT3SAS_FMT 5922 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
5983 "device is not present handle(0x%04x), flags!!!\n", 5923 handle);
5984 ioc->name, handle);
5985 goto out_unlock; 5924 goto out_unlock;
5986 } 5925 }
5987 5926
@@ -6028,16 +5967,16 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6028 5967
6029 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 5968 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6030 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 5969 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
6031 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5970 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6032 ioc->name, __FILE__, __LINE__, __func__); 5971 __FILE__, __LINE__, __func__);
6033 return -1; 5972 return -1;
6034 } 5973 }
6035 5974
6036 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 5975 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6037 MPI2_IOCSTATUS_MASK; 5976 MPI2_IOCSTATUS_MASK;
6038 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5977 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6039 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 5978 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6040 ioc->name, __FILE__, __LINE__, __func__); 5979 __FILE__, __LINE__, __func__);
6041 return -1; 5980 return -1;
6042 } 5981 }
6043 5982
@@ -6051,8 +5990,8 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6051 /* check if device is present */ 5990 /* check if device is present */
6052 if (!(le16_to_cpu(sas_device_pg0.Flags) & 5991 if (!(le16_to_cpu(sas_device_pg0.Flags) &
6053 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 5992 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6054 pr_err(MPT3SAS_FMT "device is not present handle(0x04%x)!!!\n", 5993 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6055 ioc->name, handle); 5994 handle);
6056 return -1; 5995 return -1;
6057 } 5996 }
6058 5997
@@ -6074,16 +6013,15 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6074 mpt3sas_scsih_enclosure_find_by_handle(ioc, 6013 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6075 le16_to_cpu(sas_device_pg0.EnclosureHandle)); 6014 le16_to_cpu(sas_device_pg0.EnclosureHandle));
6076 if (enclosure_dev == NULL) 6015 if (enclosure_dev == NULL)
6077 pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)" 6016 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
6078 "doesn't match with enclosure device!\n", 6017 sas_device_pg0.EnclosureHandle);
6079 ioc->name, sas_device_pg0.EnclosureHandle);
6080 } 6018 }
6081 6019
6082 sas_device = kzalloc(sizeof(struct _sas_device), 6020 sas_device = kzalloc(sizeof(struct _sas_device),
6083 GFP_KERNEL); 6021 GFP_KERNEL);
6084 if (!sas_device) { 6022 if (!sas_device) {
6085 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 6023 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6086 ioc->name, __FILE__, __LINE__, __func__); 6024 __FILE__, __LINE__, __func__);
6087 return 0; 6025 return 0;
6088 } 6026 }
6089 6027
@@ -6092,8 +6030,8 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6092 if (_scsih_get_sas_address(ioc, 6030 if (_scsih_get_sas_address(ioc,
6093 le16_to_cpu(sas_device_pg0.ParentDevHandle), 6031 le16_to_cpu(sas_device_pg0.ParentDevHandle),
6094 &sas_device->sas_address_parent) != 0) 6032 &sas_device->sas_address_parent) != 0)
6095 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 6033 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6096 ioc->name, __FILE__, __LINE__, __func__); 6034 __FILE__, __LINE__, __func__);
6097 sas_device->enclosure_handle = 6035 sas_device->enclosure_handle =
6098 le16_to_cpu(sas_device_pg0.EnclosureHandle); 6036 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6099 if (sas_device->enclosure_handle != 0) 6037 if (sas_device->enclosure_handle != 0)
@@ -6158,11 +6096,10 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
6158 sas_device->pfa_led_on = 0; 6096 sas_device->pfa_led_on = 0;
6159 } 6097 }
6160 6098
6161 dewtprintk(ioc, pr_info(MPT3SAS_FMT 6099 dewtprintk(ioc,
6162 "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", 6100 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
6163 ioc->name, __func__, 6101 __func__,
6164 sas_device->handle, (unsigned long long) 6102 sas_device->handle, (u64)sas_device->sas_address));
6165 sas_device->sas_address));
6166 6103
6167 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 6104 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6168 NULL, NULL)); 6105 NULL, NULL));
@@ -6180,18 +6117,15 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
6180 sas_device->sas_address, 6117 sas_device->sas_address,
6181 sas_device->sas_address_parent); 6118 sas_device->sas_address_parent);
6182 6119
6183 pr_info(MPT3SAS_FMT 6120 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
6184 "removing handle(0x%04x), sas_addr(0x%016llx)\n", 6121 sas_device->handle, (u64)sas_device->sas_address);
6185 ioc->name, sas_device->handle,
6186 (unsigned long long) sas_device->sas_address);
6187 6122
6188 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); 6123 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
6189 6124
6190 dewtprintk(ioc, pr_info(MPT3SAS_FMT 6125 dewtprintk(ioc,
6191 "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", 6126 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
6192 ioc->name, __func__, 6127 __func__,
6193 sas_device->handle, (unsigned long long) 6128 sas_device->handle, (u64)sas_device->sas_address));
6194 sas_device->sas_address));
6195 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 6129 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6196 NULL, NULL)); 6130 NULL, NULL));
6197} 6131}
@@ -6231,8 +6165,7 @@ _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6231 status_str = "unknown status"; 6165 status_str = "unknown status";
6232 break; 6166 break;
6233 } 6167 }
6234 pr_info(MPT3SAS_FMT "sas topology change: (%s)\n", 6168 ioc_info(ioc, "sas topology change: (%s)\n", status_str);
6235 ioc->name, status_str);
6236 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \ 6169 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
6237 "start_phy(%02d), count(%d)\n", 6170 "start_phy(%02d), count(%d)\n",
6238 le16_to_cpu(event_data->ExpanderDevHandle), 6171 le16_to_cpu(event_data->ExpanderDevHandle),
@@ -6309,8 +6242,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
6309 _scsih_sas_host_refresh(ioc); 6242 _scsih_sas_host_refresh(ioc);
6310 6243
6311 if (fw_event->ignore) { 6244 if (fw_event->ignore) {
6312 dewtprintk(ioc, pr_info(MPT3SAS_FMT 6245 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
6313 "ignoring expander event\n", ioc->name));
6314 return 0; 6246 return 0;
6315 } 6247 }
6316 6248
@@ -6339,8 +6271,8 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
6339 /* handle siblings events */ 6271 /* handle siblings events */
6340 for (i = 0; i < event_data->NumEntries; i++) { 6272 for (i = 0; i < event_data->NumEntries; i++) {
6341 if (fw_event->ignore) { 6273 if (fw_event->ignore) {
6342 dewtprintk(ioc, pr_info(MPT3SAS_FMT 6274 dewtprintk(ioc,
6343 "ignoring expander event\n", ioc->name)); 6275 ioc_info(ioc, "ignoring expander event\n"));
6344 return 0; 6276 return 0;
6345 } 6277 }
6346 if (ioc->remove_host || ioc->pci_error_recovery) 6278 if (ioc->remove_host || ioc->pci_error_recovery)
@@ -6464,15 +6396,14 @@ _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6464 reason_str = "unknown reason"; 6396 reason_str = "unknown reason";
6465 break; 6397 break;
6466 } 6398 }
6467 pr_info(MPT3SAS_FMT "device status change: (%s)\n" 6399 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
6468 "\thandle(0x%04x), sas address(0x%016llx), tag(%d)", 6400 reason_str, le16_to_cpu(event_data->DevHandle),
6469 ioc->name, reason_str, le16_to_cpu(event_data->DevHandle), 6401 (u64)le64_to_cpu(event_data->SASAddress),
6470 (unsigned long long)le64_to_cpu(event_data->SASAddress), 6402 le16_to_cpu(event_data->TaskTag));
6471 le16_to_cpu(event_data->TaskTag));
6472 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) 6403 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
6473 pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name, 6404 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
6474 event_data->ASC, event_data->ASCQ); 6405 event_data->ASC, event_data->ASCQ);
6475 pr_info("\n"); 6406 pr_cont("\n");
6476} 6407}
6477 6408
6478/** 6409/**
@@ -6605,20 +6536,16 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
6605 desc = "nvme failure status"; 6536 desc = "nvme failure status";
6606 break; 6537 break;
6607 default: 6538 default:
6608 pr_err(MPT3SAS_FMT 6539 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
6609 " NVMe discovery error(0x%02x): wwid(0x%016llx)," 6540 access_status, (u64)wwid, handle);
6610 "handle(0x%04x)\n", ioc->name, access_status,
6611 (unsigned long long)wwid, handle);
6612 return rc; 6541 return rc;
6613 } 6542 }
6614 6543
6615 if (!rc) 6544 if (!rc)
6616 return rc; 6545 return rc;
6617 6546
6618 pr_info(MPT3SAS_FMT 6547 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
6619 "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n", 6548 desc, (u64)wwid, handle);
6620 ioc->name, desc,
6621 (unsigned long long)wwid, handle);
6622 return rc; 6549 return rc;
6623} 6550}
6624 6551
@@ -6634,22 +6561,22 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
6634{ 6561{
6635 struct MPT3SAS_TARGET *sas_target_priv_data; 6562 struct MPT3SAS_TARGET *sas_target_priv_data;
6636 6563
6637 dewtprintk(ioc, pr_info(MPT3SAS_FMT 6564 dewtprintk(ioc,
6638 "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__, 6565 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
6639 pcie_device->handle, (unsigned long long) 6566 __func__,
6640 pcie_device->wwid)); 6567 pcie_device->handle, (u64)pcie_device->wwid));
6641 if (pcie_device->enclosure_handle != 0) 6568 if (pcie_device->enclosure_handle != 0)
6642 dewtprintk(ioc, pr_info(MPT3SAS_FMT 6569 dewtprintk(ioc,
6643 "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n", 6570 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
6644 ioc->name, __func__, 6571 __func__,
6645 (unsigned long long)pcie_device->enclosure_logical_id, 6572 (u64)pcie_device->enclosure_logical_id,
6646 pcie_device->slot)); 6573 pcie_device->slot));
6647 if (pcie_device->connector_name[0] != '\0') 6574 if (pcie_device->connector_name[0] != '\0')
6648 dewtprintk(ioc, pr_info(MPT3SAS_FMT 6575 dewtprintk(ioc,
6649 "%s: enter: enclosure level(0x%04x), connector name( %s)\n", 6576 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
6650 ioc->name, __func__, 6577 __func__,
6651 pcie_device->enclosure_level, 6578 pcie_device->enclosure_level,
6652 pcie_device->connector_name)); 6579 pcie_device->connector_name));
6653 6580
6654 if (pcie_device->starget && pcie_device->starget->hostdata) { 6581 if (pcie_device->starget && pcie_device->starget->hostdata) {
6655 sas_target_priv_data = pcie_device->starget->hostdata; 6582 sas_target_priv_data = pcie_device->starget->hostdata;
@@ -6658,39 +6585,35 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
6658 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 6585 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
6659 } 6586 }
6660 6587
6661 pr_info(MPT3SAS_FMT 6588 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
6662 "removing handle(0x%04x), wwid (0x%016llx)\n", 6589 pcie_device->handle, (u64)pcie_device->wwid);
6663 ioc->name, pcie_device->handle,
6664 (unsigned long long) pcie_device->wwid);
6665 if (pcie_device->enclosure_handle != 0) 6590 if (pcie_device->enclosure_handle != 0)
6666 pr_info(MPT3SAS_FMT 6591 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
6667 "removing : enclosure logical id(0x%016llx), slot(%d)\n", 6592 (u64)pcie_device->enclosure_logical_id,
6668 ioc->name, 6593 pcie_device->slot);
6669 (unsigned long long)pcie_device->enclosure_logical_id,
6670 pcie_device->slot);
6671 if (pcie_device->connector_name[0] != '\0') 6594 if (pcie_device->connector_name[0] != '\0')
6672 pr_info(MPT3SAS_FMT 6595 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
6673 "removing: enclosure level(0x%04x), connector name( %s)\n", 6596 pcie_device->enclosure_level,
6674 ioc->name, pcie_device->enclosure_level, 6597 pcie_device->connector_name);
6675 pcie_device->connector_name);
6676 6598
6677 if (pcie_device->starget) 6599 if (pcie_device->starget)
6678 scsi_remove_target(&pcie_device->starget->dev); 6600 scsi_remove_target(&pcie_device->starget->dev);
6679 dewtprintk(ioc, pr_info(MPT3SAS_FMT 6601 dewtprintk(ioc,
6680 "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__, 6602 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
6681 pcie_device->handle, (unsigned long long) 6603 __func__,
6682 pcie_device->wwid)); 6604 pcie_device->handle, (u64)pcie_device->wwid));
6683 if (pcie_device->enclosure_handle != 0) 6605 if (pcie_device->enclosure_handle != 0)
6684 dewtprintk(ioc, pr_info(MPT3SAS_FMT 6606 dewtprintk(ioc,
6685 "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n", 6607 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
6686 ioc->name, __func__, 6608 __func__,
6687 (unsigned long long)pcie_device->enclosure_logical_id, 6609 (u64)pcie_device->enclosure_logical_id,
6688 pcie_device->slot)); 6610 pcie_device->slot));
6689 if (pcie_device->connector_name[0] != '\0') 6611 if (pcie_device->connector_name[0] != '\0')
6690 dewtprintk(ioc, pr_info(MPT3SAS_FMT 6612 dewtprintk(ioc,
6691 "%s: exit: enclosure level(0x%04x), connector name( %s)\n", 6613 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
6692 ioc->name, __func__, pcie_device->enclosure_level, 6614 __func__,
6693 pcie_device->connector_name)); 6615 pcie_device->enclosure_level,
6616 pcie_device->connector_name));
6694 6617
6695 kfree(pcie_device->serial_number); 6618 kfree(pcie_device->serial_number);
6696} 6619}
@@ -6760,9 +6683,8 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6760 /* check if device is present */ 6683 /* check if device is present */
6761 if (!(le32_to_cpu(pcie_device_pg0.Flags) & 6684 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6762 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { 6685 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6763 pr_info(MPT3SAS_FMT 6686 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
6764 "device is not present handle(0x%04x), flags!!!\n", 6687 handle);
6765 ioc->name, handle);
6766 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 6688 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6767 pcie_device_put(pcie_device); 6689 pcie_device_put(pcie_device);
6768 return; 6690 return;
@@ -6806,16 +6728,15 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6806 6728
6807 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 6729 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6808 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) { 6730 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
6809 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 6731 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6810 ioc->name, __FILE__, __LINE__, __func__); 6732 __FILE__, __LINE__, __func__);
6811 return 0; 6733 return 0;
6812 } 6734 }
6813 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6735 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6814 MPI2_IOCSTATUS_MASK; 6736 MPI2_IOCSTATUS_MASK;
6815 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6737 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6816 pr_err(MPT3SAS_FMT 6738 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6817 "failure at %s:%d/%s()!\n", 6739 __FILE__, __LINE__, __func__);
6818 ioc->name, __FILE__, __LINE__, __func__);
6819 return 0; 6740 return 0;
6820 } 6741 }
6821 6742
@@ -6825,9 +6746,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6825 /* check if device is present */ 6746 /* check if device is present */
6826 if (!(le32_to_cpu(pcie_device_pg0.Flags) & 6747 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6827 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { 6748 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6828 pr_err(MPT3SAS_FMT 6749 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6829 "device is not present handle(0x04%x)!!!\n", 6750 handle);
6830 ioc->name, handle);
6831 return 0; 6751 return 0;
6832 } 6752 }
6833 6753
@@ -6848,8 +6768,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6848 6768
6849 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL); 6769 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
6850 if (!pcie_device) { 6770 if (!pcie_device) {
6851 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 6771 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6852 ioc->name, __FILE__, __LINE__, __func__); 6772 __FILE__, __LINE__, __func__);
6853 return 0; 6773 return 0;
6854 } 6774 }
6855 6775
@@ -6890,16 +6810,16 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6890 /* TODO -- Add device name once FW supports it */ 6810 /* TODO -- Add device name once FW supports it */
6891 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply, 6811 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
6892 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) { 6812 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
6893 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 6813 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6894 ioc->name, __FILE__, __LINE__, __func__); 6814 __FILE__, __LINE__, __func__);
6895 kfree(pcie_device); 6815 kfree(pcie_device);
6896 return 0; 6816 return 0;
6897 } 6817 }
6898 6818
6899 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 6819 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6900 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6820 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6901 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 6821 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6902 ioc->name, __FILE__, __LINE__, __func__); 6822 __FILE__, __LINE__, __func__);
6903 kfree(pcie_device); 6823 kfree(pcie_device);
6904 return 0; 6824 return 0;
6905 } 6825 }
@@ -6956,8 +6876,7 @@ _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6956 status_str = "unknown status"; 6876 status_str = "unknown status";
6957 break; 6877 break;
6958 } 6878 }
6959 pr_info(MPT3SAS_FMT "pcie topology change: (%s)\n", 6879 ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
6960 ioc->name, status_str);
6961 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)" 6880 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
6962 "start_port(%02d), count(%d)\n", 6881 "start_port(%02d), count(%d)\n",
6963 le16_to_cpu(event_data->SwitchDevHandle), 6882 le16_to_cpu(event_data->SwitchDevHandle),
@@ -7030,16 +6949,15 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7030 return; 6949 return;
7031 6950
7032 if (fw_event->ignore) { 6951 if (fw_event->ignore) {
7033 dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n", 6952 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
7034 ioc->name));
7035 return; 6953 return;
7036 } 6954 }
7037 6955
7038 /* handle siblings events */ 6956 /* handle siblings events */
7039 for (i = 0; i < event_data->NumEntries; i++) { 6957 for (i = 0; i < event_data->NumEntries; i++) {
7040 if (fw_event->ignore) { 6958 if (fw_event->ignore) {
7041 dewtprintk(ioc, pr_info(MPT3SAS_FMT 6959 dewtprintk(ioc,
7042 "ignoring switch event\n", ioc->name)); 6960 ioc_info(ioc, "ignoring switch event\n"));
7043 return; 6961 return;
7044 } 6962 }
7045 if (ioc->remove_host || ioc->pci_error_recovery) 6963 if (ioc->remove_host || ioc->pci_error_recovery)
@@ -7084,9 +7002,9 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7084 if (!test_bit(handle, ioc->pend_os_device_add)) 7002 if (!test_bit(handle, ioc->pend_os_device_add))
7085 break; 7003 break;
7086 7004
7087 dewtprintk(ioc, pr_info(MPT3SAS_FMT 7005 dewtprintk(ioc,
7088 "handle(0x%04x) device not found: convert " 7006 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
7089 "event to a device add\n", ioc->name, handle)); 7007 handle));
7090 event_data->PortEntry[i].PortStatus &= 0xF0; 7008 event_data->PortEntry[i].PortStatus &= 0xF0;
7091 event_data->PortEntry[i].PortStatus |= 7009 event_data->PortEntry[i].PortStatus |=
7092 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; 7010 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
@@ -7169,15 +7087,15 @@ _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7169 break; 7087 break;
7170 } 7088 }
7171 7089
7172 pr_info(MPT3SAS_FMT "PCIE device status change: (%s)\n" 7090 ioc_info(ioc, "PCIE device status change: (%s)\n"
7173 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)", 7091 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
7174 ioc->name, reason_str, le16_to_cpu(event_data->DevHandle), 7092 reason_str, le16_to_cpu(event_data->DevHandle),
7175 (unsigned long long)le64_to_cpu(event_data->WWID), 7093 (u64)le64_to_cpu(event_data->WWID),
7176 le16_to_cpu(event_data->TaskTag)); 7094 le16_to_cpu(event_data->TaskTag));
7177 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA) 7095 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
7178 pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name, 7096 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7179 event_data->ASC, event_data->ASCQ); 7097 event_data->ASC, event_data->ASCQ);
7180 pr_info("\n"); 7098 pr_cont("\n");
7181} 7099}
7182 7100
7183/** 7101/**
@@ -7255,12 +7173,12 @@ _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7255 break; 7173 break;
7256 } 7174 }
7257 7175
7258 pr_info(MPT3SAS_FMT "enclosure status change: (%s)\n" 7176 ioc_info(ioc, "enclosure status change: (%s)\n"
7259 "\thandle(0x%04x), enclosure logical id(0x%016llx)" 7177 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
7260 " number slots(%d)\n", ioc->name, reason_str, 7178 reason_str,
7261 le16_to_cpu(event_data->EnclosureHandle), 7179 le16_to_cpu(event_data->EnclosureHandle),
7262 (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID), 7180 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
7263 le16_to_cpu(event_data->StartSlot)); 7181 le16_to_cpu(event_data->StartSlot));
7264} 7182}
7265 7183
7266/** 7184/**
@@ -7298,9 +7216,8 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7298 kzalloc(sizeof(struct _enclosure_node), 7216 kzalloc(sizeof(struct _enclosure_node),
7299 GFP_KERNEL); 7217 GFP_KERNEL);
7300 if (!enclosure_dev) { 7218 if (!enclosure_dev) {
7301 pr_info(MPT3SAS_FMT 7219 ioc_info(ioc, "failure at %s:%d/%s()!\n",
7302 "failure at %s:%d/%s()!\n", ioc->name, 7220 __FILE__, __LINE__, __func__);
7303 __FILE__, __LINE__, __func__);
7304 return; 7221 return;
7305 } 7222 }
7306 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 7223 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
@@ -7358,10 +7275,8 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7358 u8 task_abort_retries; 7275 u8 task_abort_retries;
7359 7276
7360 mutex_lock(&ioc->tm_cmds.mutex); 7277 mutex_lock(&ioc->tm_cmds.mutex);
7361 pr_info(MPT3SAS_FMT 7278 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
7362 "%s: enter: phy number(%d), width(%d)\n", 7279 __func__, event_data->PhyNum, event_data->PortWidth);
7363 ioc->name, __func__, event_data->PhyNum,
7364 event_data->PortWidth);
7365 7280
7366 _scsih_block_io_all_device(ioc); 7281 _scsih_block_io_all_device(ioc);
7367 7282
@@ -7371,12 +7286,12 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7371 7286
7372 /* sanity checks for retrying this loop */ 7287 /* sanity checks for retrying this loop */
7373 if (max_retries++ == 5) { 7288 if (max_retries++ == 5) {
7374 dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: giving up\n", 7289 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
7375 ioc->name, __func__));
7376 goto out; 7290 goto out;
7377 } else if (max_retries > 1) 7291 } else if (max_retries > 1)
7378 dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: %d retry\n", 7292 dewtprintk(ioc,
7379 ioc->name, __func__, max_retries - 1)); 7293 ioc_info(ioc, "%s: %d retry\n",
7294 __func__, max_retries - 1));
7380 7295
7381 termination_count = 0; 7296 termination_count = 0;
7382 query_count = 0; 7297 query_count = 0;
@@ -7443,9 +7358,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7443 task_abort_retries = 0; 7358 task_abort_retries = 0;
7444 tm_retry: 7359 tm_retry:
7445 if (task_abort_retries++ == 60) { 7360 if (task_abort_retries++ == 60) {
7446 dewtprintk(ioc, pr_info(MPT3SAS_FMT 7361 dewtprintk(ioc,
7447 "%s: ABORT_TASK: giving up\n", ioc->name, 7362 ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
7448 __func__)); 7363 __func__));
7449 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 7364 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7450 goto broadcast_aen_retry; 7365 goto broadcast_aen_retry;
7451 } 7366 }
@@ -7474,9 +7389,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7474 } 7389 }
7475 7390
7476 if (ioc->broadcast_aen_pending) { 7391 if (ioc->broadcast_aen_pending) {
7477 dewtprintk(ioc, pr_info(MPT3SAS_FMT 7392 dewtprintk(ioc,
7478 "%s: loop back due to pending AEN\n", 7393 ioc_info(ioc,
7479 ioc->name, __func__)); 7394 "%s: loop back due to pending AEN\n",
7395 __func__));
7480 ioc->broadcast_aen_pending = 0; 7396 ioc->broadcast_aen_pending = 0;
7481 goto broadcast_aen_retry; 7397 goto broadcast_aen_retry;
7482 } 7398 }
@@ -7485,9 +7401,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7485 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 7401 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7486 out_no_lock: 7402 out_no_lock:
7487 7403
7488 dewtprintk(ioc, pr_info(MPT3SAS_FMT 7404 dewtprintk(ioc,
7489 "%s - exit, query_count = %d termination_count = %d\n", 7405 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
7490 ioc->name, __func__, query_count, termination_count)); 7406 __func__, query_count, termination_count));
7491 7407
7492 ioc->broadcast_aen_busy = 0; 7408 ioc->broadcast_aen_busy = 0;
7493 if (!ioc->shost_recovery) 7409 if (!ioc->shost_recovery)
@@ -7509,13 +7425,13 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
7509 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data; 7425 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
7510 7426
7511 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { 7427 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
7512 pr_info(MPT3SAS_FMT "discovery event: (%s)", ioc->name, 7428 ioc_info(ioc, "discovery event: (%s)",
7513 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? 7429 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
7514 "start" : "stop"); 7430 "start" : "stop");
7515 if (event_data->DiscoveryStatus) 7431 if (event_data->DiscoveryStatus)
7516 pr_info("discovery_status(0x%08x)", 7432 pr_cont("discovery_status(0x%08x)",
7517 le32_to_cpu(event_data->DiscoveryStatus)); 7433 le32_to_cpu(event_data->DiscoveryStatus));
7518 pr_info("\n"); 7434 pr_cont("\n");
7519 } 7435 }
7520 7436
7521 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED && 7437 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
@@ -7545,20 +7461,16 @@ _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
7545 7461
7546 switch (event_data->ReasonCode) { 7462 switch (event_data->ReasonCode) {
7547 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED: 7463 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
7548 pr_warn(MPT3SAS_FMT "SMP command sent to the expander" 7464 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
7549 "(handle:0x%04x, sas_address:0x%016llx," 7465 le16_to_cpu(event_data->DevHandle),
7550 "physical_port:0x%02x) has failed", 7466 (u64)le64_to_cpu(event_data->SASAddress),
7551 ioc->name, le16_to_cpu(event_data->DevHandle), 7467 event_data->PhysicalPort);
7552 (unsigned long long)le64_to_cpu(event_data->SASAddress),
7553 event_data->PhysicalPort);
7554 break; 7468 break;
7555 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT: 7469 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
7556 pr_warn(MPT3SAS_FMT "SMP command sent to the expander" 7470 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
7557 "(handle:0x%04x, sas_address:0x%016llx," 7471 le16_to_cpu(event_data->DevHandle),
7558 "physical_port:0x%02x) has timed out", 7472 (u64)le64_to_cpu(event_data->SASAddress),
7559 ioc->name, le16_to_cpu(event_data->DevHandle), 7473 event_data->PhysicalPort);
7560 (unsigned long long)le64_to_cpu(event_data->SASAddress),
7561 event_data->PhysicalPort);
7562 break; 7474 break;
7563 default: 7475 default:
7564 break; 7476 break;
@@ -7581,11 +7493,10 @@ _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
7581 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)) 7493 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
7582 return; 7494 return;
7583 7495
7584 pr_info(MPT3SAS_FMT "pcie enumeration event: (%s) Flag 0x%02x", 7496 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
7585 ioc->name, 7497 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
7586 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? 7498 "started" : "completed",
7587 "started" : "completed", 7499 event_data->Flags);
7588 event_data->Flags);
7589 if (event_data->EnumerationStatus) 7500 if (event_data->EnumerationStatus)
7590 pr_cont("enumeration_status(0x%08x)", 7501 pr_cont("enumeration_status(0x%08x)",
7591 le32_to_cpu(event_data->EnumerationStatus)); 7502 le32_to_cpu(event_data->EnumerationStatus));
@@ -7617,8 +7528,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
7617 mutex_lock(&ioc->scsih_cmds.mutex); 7528 mutex_lock(&ioc->scsih_cmds.mutex);
7618 7529
7619 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 7530 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
7620 pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n", 7531 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
7621 ioc->name, __func__);
7622 rc = -EAGAIN; 7532 rc = -EAGAIN;
7623 goto out; 7533 goto out;
7624 } 7534 }
@@ -7626,8 +7536,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
7626 7536
7627 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 7537 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
7628 if (!smid) { 7538 if (!smid) {
7629 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 7539 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7630 ioc->name, __func__);
7631 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 7540 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7632 rc = -EAGAIN; 7541 rc = -EAGAIN;
7633 goto out; 7542 goto out;
@@ -7641,9 +7550,9 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
7641 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN; 7550 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
7642 mpi_request->PhysDiskNum = phys_disk_num; 7551 mpi_request->PhysDiskNum = phys_disk_num;
7643 7552
7644 dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: turning fast "\ 7553 dewtprintk(ioc,
7645 "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name, 7554 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
7646 handle, phys_disk_num)); 7555 handle, phys_disk_num));
7647 7556
7648 init_completion(&ioc->scsih_cmds.done); 7557 init_completion(&ioc->scsih_cmds.done);
7649 mpt3sas_base_put_smid_default(ioc, smid); 7558 mpt3sas_base_put_smid_default(ioc, smid);
@@ -7668,15 +7577,13 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
7668 log_info = 0; 7577 log_info = 0;
7669 ioc_status &= MPI2_IOCSTATUS_MASK; 7578 ioc_status &= MPI2_IOCSTATUS_MASK;
7670 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 7579 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7671 dewtprintk(ioc, pr_info(MPT3SAS_FMT 7580 dewtprintk(ioc,
7672 "IR RAID_ACTION: failed: ioc_status(0x%04x), " 7581 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
7673 "loginfo(0x%08x)!!!\n", ioc->name, ioc_status, 7582 ioc_status, log_info));
7674 log_info));
7675 rc = -EFAULT; 7583 rc = -EFAULT;
7676 } else 7584 } else
7677 dewtprintk(ioc, pr_info(MPT3SAS_FMT 7585 dewtprintk(ioc,
7678 "IR RAID_ACTION: completed successfully\n", 7586 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
7679 ioc->name));
7680 } 7587 }
7681 7588
7682 out: 7589 out:
@@ -7721,9 +7628,8 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
7721 7628
7722 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); 7629 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
7723 if (!wwid) { 7630 if (!wwid) {
7724 pr_err(MPT3SAS_FMT 7631 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7725 "failure at %s:%d/%s()!\n", ioc->name, 7632 __FILE__, __LINE__, __func__);
7726 __FILE__, __LINE__, __func__);
7727 return; 7633 return;
7728 } 7634 }
7729 7635
@@ -7736,9 +7642,8 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
7736 7642
7737 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); 7643 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
7738 if (!raid_device) { 7644 if (!raid_device) {
7739 pr_err(MPT3SAS_FMT 7645 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7740 "failure at %s:%d/%s()!\n", ioc->name, 7646 __FILE__, __LINE__, __func__);
7741 __FILE__, __LINE__, __func__);
7742 return; 7647 return;
7743 } 7648 }
7744 7649
@@ -7781,9 +7686,8 @@ _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7781 sas_target_priv_data = starget->hostdata; 7686 sas_target_priv_data = starget->hostdata;
7782 sas_target_priv_data->deleted = 1; 7687 sas_target_priv_data->deleted = 1;
7783 } 7688 }
7784 pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n", 7689 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7785 ioc->name, raid_device->handle, 7690 raid_device->handle, (u64)raid_device->wwid);
7786 (unsigned long long) raid_device->wwid);
7787 list_del(&raid_device->list); 7691 list_del(&raid_device->list);
7788 kfree(raid_device); 7692 kfree(raid_device);
7789 } 7693 }
@@ -7925,16 +7829,16 @@ _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
7925 7829
7926 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 7830 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7927 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 7831 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7928 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 7832 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7929 ioc->name, __FILE__, __LINE__, __func__); 7833 __FILE__, __LINE__, __func__);
7930 return; 7834 return;
7931 } 7835 }
7932 7836
7933 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 7837 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7934 MPI2_IOCSTATUS_MASK; 7838 MPI2_IOCSTATUS_MASK;
7935 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 7839 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7936 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 7840 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7937 ioc->name, __FILE__, __LINE__, __func__); 7841 __FILE__, __LINE__, __func__);
7938 return; 7842 return;
7939 } 7843 }
7940 7844
@@ -7964,10 +7868,10 @@ _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7964 7868
7965 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 7869 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
7966 7870
7967 pr_info(MPT3SAS_FMT "raid config change: (%s), elements(%d)\n", 7871 ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
7968 ioc->name, (le32_to_cpu(event_data->Flags) & 7872 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
7969 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 7873 "foreign" : "native",
7970 "foreign" : "native", event_data->NumElements); 7874 event_data->NumElements);
7971 for (i = 0; i < event_data->NumElements; i++, element++) { 7875 for (i = 0; i < event_data->NumElements; i++, element++) {
7972 switch (element->ReasonCode) { 7876 switch (element->ReasonCode) {
7973 case MPI2_EVENT_IR_CHANGE_RC_ADDED: 7877 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
@@ -8123,10 +8027,11 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
8123 handle = le16_to_cpu(event_data->VolDevHandle); 8027 handle = le16_to_cpu(event_data->VolDevHandle);
8124 state = le32_to_cpu(event_data->NewValue); 8028 state = le32_to_cpu(event_data->NewValue);
8125 if (!ioc->hide_ir_msg) 8029 if (!ioc->hide_ir_msg)
8126 dewtprintk(ioc, pr_info(MPT3SAS_FMT 8030 dewtprintk(ioc,
8127 "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", 8031 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8128 ioc->name, __func__, handle, 8032 __func__, handle,
8129 le32_to_cpu(event_data->PreviousValue), state)); 8033 le32_to_cpu(event_data->PreviousValue),
8034 state));
8130 switch (state) { 8035 switch (state) {
8131 case MPI2_RAID_VOL_STATE_MISSING: 8036 case MPI2_RAID_VOL_STATE_MISSING:
8132 case MPI2_RAID_VOL_STATE_FAILED: 8037 case MPI2_RAID_VOL_STATE_FAILED:
@@ -8146,17 +8051,15 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
8146 8051
8147 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); 8052 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8148 if (!wwid) { 8053 if (!wwid) {
8149 pr_err(MPT3SAS_FMT 8054 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8150 "failure at %s:%d/%s()!\n", ioc->name, 8055 __FILE__, __LINE__, __func__);
8151 __FILE__, __LINE__, __func__);
8152 break; 8056 break;
8153 } 8057 }
8154 8058
8155 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); 8059 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8156 if (!raid_device) { 8060 if (!raid_device) {
8157 pr_err(MPT3SAS_FMT 8061 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8158 "failure at %s:%d/%s()!\n", ioc->name, 8062 __FILE__, __LINE__, __func__);
8159 __FILE__, __LINE__, __func__);
8160 break; 8063 break;
8161 } 8064 }
8162 8065
@@ -8207,10 +8110,11 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
8207 state = le32_to_cpu(event_data->NewValue); 8110 state = le32_to_cpu(event_data->NewValue);
8208 8111
8209 if (!ioc->hide_ir_msg) 8112 if (!ioc->hide_ir_msg)
8210 dewtprintk(ioc, pr_info(MPT3SAS_FMT 8113 dewtprintk(ioc,
8211 "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", 8114 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8212 ioc->name, __func__, handle, 8115 __func__, handle,
8213 le32_to_cpu(event_data->PreviousValue), state)); 8116 le32_to_cpu(event_data->PreviousValue),
8117 state));
8214 8118
8215 switch (state) { 8119 switch (state) {
8216 case MPI2_RAID_PD_STATE_ONLINE: 8120 case MPI2_RAID_PD_STATE_ONLINE:
@@ -8231,16 +8135,16 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
8231 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 8135 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8232 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 8136 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8233 handle))) { 8137 handle))) {
8234 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 8138 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8235 ioc->name, __FILE__, __LINE__, __func__); 8139 __FILE__, __LINE__, __func__);
8236 return; 8140 return;
8237 } 8141 }
8238 8142
8239 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8143 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8240 MPI2_IOCSTATUS_MASK; 8144 MPI2_IOCSTATUS_MASK;
8241 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8145 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8242 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 8146 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8243 ioc->name, __FILE__, __LINE__, __func__); 8147 __FILE__, __LINE__, __func__);
8244 return; 8148 return;
8245 } 8149 }
8246 8150
@@ -8294,11 +8198,10 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
8294 if (!reason_str) 8198 if (!reason_str)
8295 return; 8199 return;
8296 8200
8297 pr_info(MPT3SAS_FMT "raid operational status: (%s)" \ 8201 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
8298 "\thandle(0x%04x), percent complete(%d)\n", 8202 reason_str,
8299 ioc->name, reason_str, 8203 le16_to_cpu(event_data->VolDevHandle),
8300 le16_to_cpu(event_data->VolDevHandle), 8204 event_data->PercentComplete);
8301 event_data->PercentComplete);
8302} 8205}
8303 8206
8304/** 8207/**
@@ -8379,9 +8282,8 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
8379 mpt3sas_scsih_enclosure_find_by_handle(ioc, 8282 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8380 le16_to_cpu(sas_device_pg0->EnclosureHandle)); 8283 le16_to_cpu(sas_device_pg0->EnclosureHandle));
8381 if (enclosure_dev == NULL) 8284 if (enclosure_dev == NULL)
8382 pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)" 8285 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
8383 "doesn't match with enclosure device!\n", 8286 sas_device_pg0->EnclosureHandle);
8384 ioc->name, sas_device_pg0->EnclosureHandle);
8385 } 8287 }
8386 spin_lock_irqsave(&ioc->sas_device_lock, flags); 8288 spin_lock_irqsave(&ioc->sas_device_lock, flags);
8387 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 8289 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
@@ -8475,8 +8377,7 @@ _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
8475 enclosure_dev = 8377 enclosure_dev =
8476 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL); 8378 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
8477 if (!enclosure_dev) { 8379 if (!enclosure_dev) {
8478 pr_err(MPT3SAS_FMT 8380 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8479 "failure at %s:%d/%s()!\n", ioc->name,
8480 __FILE__, __LINE__, __func__); 8381 __FILE__, __LINE__, __func__);
8481 return; 8382 return;
8482 } 8383 }
@@ -8513,7 +8414,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
8513 u16 handle; 8414 u16 handle;
8514 u32 device_info; 8415 u32 device_info;
8515 8416
8516 pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name); 8417 ioc_info(ioc, "search for end-devices: start\n");
8517 8418
8518 if (list_empty(&ioc->sas_device_list)) 8419 if (list_empty(&ioc->sas_device_list))
8519 goto out; 8420 goto out;
@@ -8534,8 +8435,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
8534 } 8435 }
8535 8436
8536 out: 8437 out:
8537 pr_info(MPT3SAS_FMT "search for end-devices: complete\n", 8438 ioc_info(ioc, "search for end-devices: complete\n");
8538 ioc->name);
8539} 8439}
8540 8440
8541/** 8441/**
@@ -8628,7 +8528,7 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8628 u16 handle; 8528 u16 handle;
8629 u32 device_info; 8529 u32 device_info;
8630 8530
8631 pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name); 8531 ioc_info(ioc, "search for end-devices: start\n");
8632 8532
8633 if (list_empty(&ioc->pcie_device_list)) 8533 if (list_empty(&ioc->pcie_device_list))
8634 goto out; 8534 goto out;
@@ -8640,10 +8540,9 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8640 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8540 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8641 MPI2_IOCSTATUS_MASK; 8541 MPI2_IOCSTATUS_MASK;
8642 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8542 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8643 pr_info(MPT3SAS_FMT "\tbreak from %s: " 8543 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
8644 "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name, 8544 __func__, ioc_status,
8645 __func__, ioc_status, 8545 le32_to_cpu(mpi_reply.IOCLogInfo));
8646 le32_to_cpu(mpi_reply.IOCLogInfo));
8647 break; 8546 break;
8648 } 8547 }
8649 handle = le16_to_cpu(pcie_device_pg0.DevHandle); 8548 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
@@ -8653,8 +8552,7 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8653 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0); 8552 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
8654 } 8553 }
8655out: 8554out:
8656 pr_info(MPT3SAS_FMT "search for PCIe end-devices: complete\n", 8555 ioc_info(ioc, "search for PCIe end-devices: complete\n");
8657 ioc->name);
8658} 8556}
8659 8557
8660/** 8558/**
@@ -8735,8 +8633,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
8735 if (!ioc->ir_firmware) 8633 if (!ioc->ir_firmware)
8736 return; 8634 return;
8737 8635
8738 pr_info(MPT3SAS_FMT "search for raid volumes: start\n", 8636 ioc_info(ioc, "search for raid volumes: start\n");
8739 ioc->name);
8740 8637
8741 if (list_empty(&ioc->raid_device_list)) 8638 if (list_empty(&ioc->raid_device_list))
8742 goto out; 8639 goto out;
@@ -8779,8 +8676,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
8779 } 8676 }
8780 } 8677 }
8781 out: 8678 out:
8782 pr_info(MPT3SAS_FMT "search for responding raid volumes: complete\n", 8679 ioc_info(ioc, "search for responding raid volumes: complete\n");
8783 ioc->name);
8784} 8680}
8785 8681
8786/** 8682/**
@@ -8852,7 +8748,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
8852 u64 sas_address; 8748 u64 sas_address;
8853 u16 handle; 8749 u16 handle;
8854 8750
8855 pr_info(MPT3SAS_FMT "search for expanders: start\n", ioc->name); 8751 ioc_info(ioc, "search for expanders: start\n");
8856 8752
8857 if (list_empty(&ioc->sas_expander_list)) 8753 if (list_empty(&ioc->sas_expander_list))
8858 goto out; 8754 goto out;
@@ -8875,7 +8771,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
8875 } 8771 }
8876 8772
8877 out: 8773 out:
8878 pr_info(MPT3SAS_FMT "search for expanders: complete\n", ioc->name); 8774 ioc_info(ioc, "search for expanders: complete\n");
8879} 8775}
8880 8776
8881/** 8777/**
@@ -8893,12 +8789,10 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
8893 unsigned long flags; 8789 unsigned long flags;
8894 LIST_HEAD(head); 8790 LIST_HEAD(head);
8895 8791
8896 pr_info(MPT3SAS_FMT "removing unresponding devices: start\n", 8792 ioc_info(ioc, "removing unresponding devices: start\n");
8897 ioc->name);
8898 8793
8899 /* removing unresponding end devices */ 8794 /* removing unresponding end devices */
8900 pr_info(MPT3SAS_FMT "removing unresponding devices: end-devices\n", 8795 ioc_info(ioc, "removing unresponding devices: end-devices\n");
8901 ioc->name);
8902 /* 8796 /*
8903 * Iterate, pulling off devices marked as non-responding. We become the 8797 * Iterate, pulling off devices marked as non-responding. We become the
8904 * owner for the reference the list had on any object we prune. 8798 * owner for the reference the list had on any object we prune.
@@ -8922,9 +8816,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
8922 sas_device_put(sas_device); 8816 sas_device_put(sas_device);
8923 } 8817 }
8924 8818
8925 pr_info(MPT3SAS_FMT 8819 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
8926 " Removing unresponding devices: pcie end-devices\n"
8927 , ioc->name);
8928 INIT_LIST_HEAD(&head); 8820 INIT_LIST_HEAD(&head);
8929 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 8821 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8930 list_for_each_entry_safe(pcie_device, pcie_device_next, 8822 list_for_each_entry_safe(pcie_device, pcie_device_next,
@@ -8944,8 +8836,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
8944 8836
8945 /* removing unresponding volumes */ 8837 /* removing unresponding volumes */
8946 if (ioc->ir_firmware) { 8838 if (ioc->ir_firmware) {
8947 pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n", 8839 ioc_info(ioc, "removing unresponding devices: volumes\n");
8948 ioc->name);
8949 list_for_each_entry_safe(raid_device, raid_device_next, 8840 list_for_each_entry_safe(raid_device, raid_device_next,
8950 &ioc->raid_device_list, list) { 8841 &ioc->raid_device_list, list) {
8951 if (!raid_device->responding) 8842 if (!raid_device->responding)
@@ -8957,8 +8848,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
8957 } 8848 }
8958 8849
8959 /* removing unresponding expanders */ 8850 /* removing unresponding expanders */
8960 pr_info(MPT3SAS_FMT "removing unresponding devices: expanders\n", 8851 ioc_info(ioc, "removing unresponding devices: expanders\n");
8961 ioc->name);
8962 spin_lock_irqsave(&ioc->sas_node_lock, flags); 8852 spin_lock_irqsave(&ioc->sas_node_lock, flags);
8963 INIT_LIST_HEAD(&tmp_list); 8853 INIT_LIST_HEAD(&tmp_list);
8964 list_for_each_entry_safe(sas_expander, sas_expander_next, 8854 list_for_each_entry_safe(sas_expander, sas_expander_next,
@@ -8974,8 +8864,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
8974 _scsih_expander_node_remove(ioc, sas_expander); 8864 _scsih_expander_node_remove(ioc, sas_expander);
8975 } 8865 }
8976 8866
8977 pr_info(MPT3SAS_FMT "removing unresponding devices: complete\n", 8867 ioc_info(ioc, "removing unresponding devices: complete\n");
8978 ioc->name);
8979 8868
8980 /* unblock devices */ 8869 /* unblock devices */
8981 _scsih_ublock_io_all_device(ioc); 8870 _scsih_ublock_io_all_device(ioc);
@@ -8992,8 +8881,8 @@ _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
8992 for (i = 0 ; i < sas_expander->num_phys ; i++) { 8881 for (i = 0 ; i < sas_expander->num_phys ; i++) {
8993 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, 8882 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
8994 &expander_pg1, i, handle))) { 8883 &expander_pg1, i, handle))) {
8995 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 8884 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8996 ioc->name, __FILE__, __LINE__, __func__); 8885 __FILE__, __LINE__, __func__);
8997 return; 8886 return;
8998 } 8887 }
8999 8888
@@ -9029,11 +8918,11 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9029 u8 retry_count; 8918 u8 retry_count;
9030 unsigned long flags; 8919 unsigned long flags;
9031 8920
9032 pr_info(MPT3SAS_FMT "scan devices: start\n", ioc->name); 8921 ioc_info(ioc, "scan devices: start\n");
9033 8922
9034 _scsih_sas_host_refresh(ioc); 8923 _scsih_sas_host_refresh(ioc);
9035 8924
9036 pr_info(MPT3SAS_FMT "\tscan devices: expanders start\n", ioc->name); 8925 ioc_info(ioc, "\tscan devices: expanders start\n");
9037 8926
9038 /* expanders */ 8927 /* expanders */
9039 handle = 0xFFFF; 8928 handle = 0xFFFF;
@@ -9042,10 +8931,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9042 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8931 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9043 MPI2_IOCSTATUS_MASK; 8932 MPI2_IOCSTATUS_MASK;
9044 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8933 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9045 pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \ 8934 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9046 "ioc_status(0x%04x), loginfo(0x%08x)\n", 8935 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9047 ioc->name, ioc_status,
9048 le32_to_cpu(mpi_reply.IOCLogInfo));
9049 break; 8936 break;
9050 } 8937 }
9051 handle = le16_to_cpu(expander_pg0.DevHandle); 8938 handle = le16_to_cpu(expander_pg0.DevHandle);
@@ -9057,25 +8944,22 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9057 _scsih_refresh_expander_links(ioc, expander_device, 8944 _scsih_refresh_expander_links(ioc, expander_device,
9058 handle); 8945 handle);
9059 else { 8946 else {
9060 pr_info(MPT3SAS_FMT "\tBEFORE adding expander: " \ 8947 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
9061 "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, 8948 handle,
9062 handle, (unsigned long long) 8949 (u64)le64_to_cpu(expander_pg0.SASAddress));
9063 le64_to_cpu(expander_pg0.SASAddress));
9064 _scsih_expander_add(ioc, handle); 8950 _scsih_expander_add(ioc, handle);
9065 pr_info(MPT3SAS_FMT "\tAFTER adding expander: " \ 8951 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
9066 "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, 8952 handle,
9067 handle, (unsigned long long) 8953 (u64)le64_to_cpu(expander_pg0.SASAddress));
9068 le64_to_cpu(expander_pg0.SASAddress));
9069 } 8954 }
9070 } 8955 }
9071 8956
9072 pr_info(MPT3SAS_FMT "\tscan devices: expanders complete\n", 8957 ioc_info(ioc, "\tscan devices: expanders complete\n");
9073 ioc->name);
9074 8958
9075 if (!ioc->ir_firmware) 8959 if (!ioc->ir_firmware)
9076 goto skip_to_sas; 8960 goto skip_to_sas;
9077 8961
9078 pr_info(MPT3SAS_FMT "\tscan devices: phys disk start\n", ioc->name); 8962 ioc_info(ioc, "\tscan devices: phys disk start\n");
9079 8963
9080 /* phys disk */ 8964 /* phys disk */
9081 phys_disk_num = 0xFF; 8965 phys_disk_num = 0xFF;
@@ -9085,10 +8969,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9085 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8969 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9086 MPI2_IOCSTATUS_MASK; 8970 MPI2_IOCSTATUS_MASK;
9087 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8971 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9088 pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\ 8972 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9089 "ioc_status(0x%04x), loginfo(0x%08x)\n", 8973 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9090 ioc->name, ioc_status,
9091 le32_to_cpu(mpi_reply.IOCLogInfo));
9092 break; 8974 break;
9093 } 8975 }
9094 phys_disk_num = pd_pg0.PhysDiskNum; 8976 phys_disk_num = pd_pg0.PhysDiskNum;
@@ -9105,19 +8987,16 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9105 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8987 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9106 MPI2_IOCSTATUS_MASK; 8988 MPI2_IOCSTATUS_MASK;
9107 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8989 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9108 pr_info(MPT3SAS_FMT "\tbreak from phys disk scan " \ 8990 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
9109 "ioc_status(0x%04x), loginfo(0x%08x)\n", 8991 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9110 ioc->name, ioc_status,
9111 le32_to_cpu(mpi_reply.IOCLogInfo));
9112 break; 8992 break;
9113 } 8993 }
9114 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 8994 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9115 if (!_scsih_get_sas_address(ioc, parent_handle, 8995 if (!_scsih_get_sas_address(ioc, parent_handle,
9116 &sas_address)) { 8996 &sas_address)) {
9117 pr_info(MPT3SAS_FMT "\tBEFORE adding phys disk: " \ 8997 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9118 " handle (0x%04x), sas_addr(0x%016llx)\n", 8998 handle,
9119 ioc->name, handle, (unsigned long long) 8999 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9120 le64_to_cpu(sas_device_pg0.SASAddress));
9121 mpt3sas_transport_update_links(ioc, sas_address, 9000 mpt3sas_transport_update_links(ioc, sas_address,
9122 handle, sas_device_pg0.PhyNum, 9001 handle, sas_device_pg0.PhyNum,
9123 MPI2_SAS_NEG_LINK_RATE_1_5); 9002 MPI2_SAS_NEG_LINK_RATE_1_5);
@@ -9131,17 +9010,15 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9131 1)) { 9010 1)) {
9132 ssleep(1); 9011 ssleep(1);
9133 } 9012 }
9134 pr_info(MPT3SAS_FMT "\tAFTER adding phys disk: " \ 9013 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9135 " handle (0x%04x), sas_addr(0x%016llx)\n", 9014 handle,
9136 ioc->name, handle, (unsigned long long) 9015 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9137 le64_to_cpu(sas_device_pg0.SASAddress));
9138 } 9016 }
9139 } 9017 }
9140 9018
9141 pr_info(MPT3SAS_FMT "\tscan devices: phys disk complete\n", 9019 ioc_info(ioc, "\tscan devices: phys disk complete\n");
9142 ioc->name);
9143 9020
9144 pr_info(MPT3SAS_FMT "\tscan devices: volumes start\n", ioc->name); 9021 ioc_info(ioc, "\tscan devices: volumes start\n");
9145 9022
9146 /* volumes */ 9023 /* volumes */
9147 handle = 0xFFFF; 9024 handle = 0xFFFF;
@@ -9150,10 +9027,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9150 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9027 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9151 MPI2_IOCSTATUS_MASK; 9028 MPI2_IOCSTATUS_MASK;
9152 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9029 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9153 pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \ 9030 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9154 "ioc_status(0x%04x), loginfo(0x%08x)\n", 9031 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9155 ioc->name, ioc_status,
9156 le32_to_cpu(mpi_reply.IOCLogInfo));
9157 break; 9032 break;
9158 } 9033 }
9159 handle = le16_to_cpu(volume_pg1.DevHandle); 9034 handle = le16_to_cpu(volume_pg1.DevHandle);
@@ -9170,10 +9045,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9170 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9045 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9171 MPI2_IOCSTATUS_MASK; 9046 MPI2_IOCSTATUS_MASK;
9172 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9047 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9173 pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \ 9048 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9174 "ioc_status(0x%04x), loginfo(0x%08x)\n", 9049 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9175 ioc->name, ioc_status,
9176 le32_to_cpu(mpi_reply.IOCLogInfo));
9177 break; 9050 break;
9178 } 9051 }
9179 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || 9052 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
@@ -9182,23 +9055,19 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9182 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); 9055 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
9183 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; 9056 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
9184 element.VolDevHandle = volume_pg1.DevHandle; 9057 element.VolDevHandle = volume_pg1.DevHandle;
9185 pr_info(MPT3SAS_FMT 9058 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
9186 "\tBEFORE adding volume: handle (0x%04x)\n", 9059 volume_pg1.DevHandle);
9187 ioc->name, volume_pg1.DevHandle);
9188 _scsih_sas_volume_add(ioc, &element); 9060 _scsih_sas_volume_add(ioc, &element);
9189 pr_info(MPT3SAS_FMT 9061 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
9190 "\tAFTER adding volume: handle (0x%04x)\n", 9062 volume_pg1.DevHandle);
9191 ioc->name, volume_pg1.DevHandle);
9192 } 9063 }
9193 } 9064 }
9194 9065
9195 pr_info(MPT3SAS_FMT "\tscan devices: volumes complete\n", 9066 ioc_info(ioc, "\tscan devices: volumes complete\n");
9196 ioc->name);
9197 9067
9198 skip_to_sas: 9068 skip_to_sas:
9199 9069
9200 pr_info(MPT3SAS_FMT "\tscan devices: end devices start\n", 9070 ioc_info(ioc, "\tscan devices: end devices start\n");
9201 ioc->name);
9202 9071
9203 /* sas devices */ 9072 /* sas devices */
9204 handle = 0xFFFF; 9073 handle = 0xFFFF;
@@ -9208,10 +9077,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9208 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9077 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9209 MPI2_IOCSTATUS_MASK; 9078 MPI2_IOCSTATUS_MASK;
9210 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9079 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9211 pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\ 9080 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9212 " ioc_status(0x%04x), loginfo(0x%08x)\n", 9081 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9213 ioc->name, ioc_status,
9214 le32_to_cpu(mpi_reply.IOCLogInfo));
9215 break; 9082 break;
9216 } 9083 }
9217 handle = le16_to_cpu(sas_device_pg0.DevHandle); 9084 handle = le16_to_cpu(sas_device_pg0.DevHandle);
@@ -9226,10 +9093,9 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9226 } 9093 }
9227 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 9094 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9228 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { 9095 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
9229 pr_info(MPT3SAS_FMT "\tBEFORE adding end device: " \ 9096 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9230 "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, 9097 handle,
9231 handle, (unsigned long long) 9098 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9232 le64_to_cpu(sas_device_pg0.SASAddress));
9233 mpt3sas_transport_update_links(ioc, sas_address, handle, 9099 mpt3sas_transport_update_links(ioc, sas_address, handle,
9234 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); 9100 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
9235 retry_count = 0; 9101 retry_count = 0;
@@ -9241,16 +9107,13 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9241 0)) { 9107 0)) {
9242 ssleep(1); 9108 ssleep(1);
9243 } 9109 }
9244 pr_info(MPT3SAS_FMT "\tAFTER adding end device: " \ 9110 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9245 "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, 9111 handle,
9246 handle, (unsigned long long) 9112 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9247 le64_to_cpu(sas_device_pg0.SASAddress));
9248 } 9113 }
9249 } 9114 }
9250 pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n", 9115 ioc_info(ioc, "\tscan devices: end devices complete\n");
9251 ioc->name); 9116 ioc_info(ioc, "\tscan devices: pcie end devices start\n");
9252 pr_info(MPT3SAS_FMT "\tscan devices: pcie end devices start\n",
9253 ioc->name);
9254 9117
9255 /* pcie devices */ 9118 /* pcie devices */
9256 handle = 0xFFFF; 9119 handle = 0xFFFF;
@@ -9260,10 +9123,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9260 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) 9123 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
9261 & MPI2_IOCSTATUS_MASK; 9124 & MPI2_IOCSTATUS_MASK;
9262 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9125 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9263 pr_info(MPT3SAS_FMT "\tbreak from pcie end device" 9126 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9264 " scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 9127 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9265 ioc->name, ioc_status,
9266 le32_to_cpu(mpi_reply.IOCLogInfo));
9267 break; 9128 break;
9268 } 9129 }
9269 handle = le16_to_cpu(pcie_device_pg0.DevHandle); 9130 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
@@ -9280,14 +9141,11 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9280 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle); 9141 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
9281 _scsih_pcie_add_device(ioc, handle); 9142 _scsih_pcie_add_device(ioc, handle);
9282 9143
9283 pr_info(MPT3SAS_FMT "\tAFTER adding pcie end device: " 9144 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
9284 "handle (0x%04x), wwid(0x%016llx)\n", ioc->name, 9145 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
9285 handle,
9286 (unsigned long long) le64_to_cpu(pcie_device_pg0.WWID));
9287 } 9146 }
9288 pr_info(MPT3SAS_FMT "\tpcie devices: pcie end devices complete\n", 9147 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
9289 ioc->name); 9148 ioc_info(ioc, "scan devices: complete\n");
9290 pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
9291} 9149}
9292 9150
9293/** 9151/**
@@ -9298,8 +9156,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9298 */ 9156 */
9299void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) 9157void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9300{ 9158{
9301 dtmprintk(ioc, pr_info(MPT3SAS_FMT 9159 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
9302 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
9303} 9160}
9304 9161
9305/** 9162/**
@@ -9311,8 +9168,7 @@ void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9311void 9168void
9312mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) 9169mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9313{ 9170{
9314 dtmprintk(ioc, pr_info(MPT3SAS_FMT 9171 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
9315 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
9316 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { 9172 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
9317 ioc->scsih_cmds.status |= MPT3_CMD_RESET; 9173 ioc->scsih_cmds.status |= MPT3_CMD_RESET;
9318 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); 9174 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
@@ -9340,8 +9196,7 @@ mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9340void 9196void
9341mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) 9197mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
9342{ 9198{
9343 dtmprintk(ioc, pr_info(MPT3SAS_FMT 9199 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
9344 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
9345 if ((!ioc->is_driver_loading) && !(disable_discovery > 0 && 9200 if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
9346 !ioc->sas_hba.num_phys)) { 9201 !ioc->sas_hba.num_phys)) {
9347 _scsih_prep_device_scan(ioc); 9202 _scsih_prep_device_scan(ioc);
@@ -9396,9 +9251,8 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
9396 if (missing_delay[0] != -1 && missing_delay[1] != -1) 9251 if (missing_delay[0] != -1 && missing_delay[1] != -1)
9397 mpt3sas_base_update_missing_delay(ioc, missing_delay[0], 9252 mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
9398 missing_delay[1]); 9253 missing_delay[1]);
9399 dewtprintk(ioc, pr_info(MPT3SAS_FMT 9254 dewtprintk(ioc,
9400 "port enable: complete from worker thread\n", 9255 ioc_info(ioc, "port enable: complete from worker thread\n"));
9401 ioc->name));
9402 break; 9256 break;
9403 case MPT3SAS_TURN_ON_PFA_LED: 9257 case MPT3SAS_TURN_ON_PFA_LED:
9404 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle); 9258 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
@@ -9496,8 +9350,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9496 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 9350 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
9497 9351
9498 if (unlikely(!mpi_reply)) { 9352 if (unlikely(!mpi_reply)) {
9499 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", 9353 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
9500 ioc->name, __FILE__, __LINE__, __func__); 9354 __FILE__, __LINE__, __func__);
9501 return 1; 9355 return 1;
9502 } 9356 }
9503 9357
@@ -9564,30 +9418,16 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9564 9418
9565 switch (le32_to_cpu(*log_code)) { 9419 switch (le32_to_cpu(*log_code)) {
9566 case MPT2_WARPDRIVE_LC_SSDT: 9420 case MPT2_WARPDRIVE_LC_SSDT:
9567 pr_warn(MPT3SAS_FMT "WarpDrive Warning: " 9421 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9568 "IO Throttling has occurred in the WarpDrive "
9569 "subsystem. Check WarpDrive documentation for "
9570 "additional details.\n", ioc->name);
9571 break; 9422 break;
9572 case MPT2_WARPDRIVE_LC_SSDLW: 9423 case MPT2_WARPDRIVE_LC_SSDLW:
9573 pr_warn(MPT3SAS_FMT "WarpDrive Warning: " 9424 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
9574 "Program/Erase Cycles for the WarpDrive subsystem "
9575 "in degraded range. Check WarpDrive documentation "
9576 "for additional details.\n", ioc->name);
9577 break; 9425 break;
9578 case MPT2_WARPDRIVE_LC_SSDLF: 9426 case MPT2_WARPDRIVE_LC_SSDLF:
9579 pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: " 9427 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
9580 "There are no Program/Erase Cycles for the "
9581 "WarpDrive subsystem. The storage device will be "
9582 "in read-only mode. Check WarpDrive documentation "
9583 "for additional details.\n", ioc->name);
9584 break; 9428 break;
9585 case MPT2_WARPDRIVE_LC_BRMF: 9429 case MPT2_WARPDRIVE_LC_BRMF:
9586 pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: " 9430 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9587 "The Backup Rail Monitor has failed on the "
9588 "WarpDrive subsystem. Check WarpDrive "
9589 "documentation for additional details.\n",
9590 ioc->name);
9591 break; 9431 break;
9592 } 9432 }
9593 9433
@@ -9613,9 +9453,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9613 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; 9453 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
9614 switch (ActiveCableEventData->ReasonCode) { 9454 switch (ActiveCableEventData->ReasonCode) {
9615 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER: 9455 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
9616 pr_notice(MPT3SAS_FMT 9456 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
9617 "Currently an active cable with ReceptacleID %d\n", 9457 ActiveCableEventData->ReceptacleID);
9618 ioc->name, ActiveCableEventData->ReceptacleID);
9619 pr_notice("cannot be powered and devices connected\n"); 9458 pr_notice("cannot be powered and devices connected\n");
9620 pr_notice("to this active cable will not be seen\n"); 9459 pr_notice("to this active cable will not be seen\n");
9621 pr_notice("This active cable requires %d mW of power\n", 9460 pr_notice("This active cable requires %d mW of power\n",
@@ -9623,9 +9462,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9623 break; 9462 break;
9624 9463
9625 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED: 9464 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
9626 pr_notice(MPT3SAS_FMT 9465 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
9627 "Currently a cable with ReceptacleID %d\n", 9466 ActiveCableEventData->ReceptacleID);
9628 ioc->name, ActiveCableEventData->ReceptacleID);
9629 pr_notice( 9467 pr_notice(
9630 "is not running at optimal speed(12 Gb/s rate)\n"); 9468 "is not running at optimal speed(12 Gb/s rate)\n");
9631 break; 9469 break;
@@ -9640,8 +9478,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9640 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; 9478 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
9641 fw_event = alloc_fw_event_work(sz); 9479 fw_event = alloc_fw_event_work(sz);
9642 if (!fw_event) { 9480 if (!fw_event) {
9643 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 9481 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9644 ioc->name, __FILE__, __LINE__, __func__); 9482 __FILE__, __LINE__, __func__);
9645 return 1; 9483 return 1;
9646 } 9484 }
9647 9485
@@ -9690,11 +9528,9 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
9690 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, 9528 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
9691 sas_expander->sas_address_parent); 9529 sas_expander->sas_address_parent);
9692 9530
9693 pr_info(MPT3SAS_FMT 9531 ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
9694 "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n", 9532 sas_expander->handle, (unsigned long long)
9695 ioc->name, 9533 sas_expander->sas_address);
9696 sas_expander->handle, (unsigned long long)
9697 sas_expander->sas_address);
9698 9534
9699 spin_lock_irqsave(&ioc->sas_node_lock, flags); 9535 spin_lock_irqsave(&ioc->sas_node_lock, flags);
9700 list_del(&sas_expander->list); 9536 list_del(&sas_expander->list);
@@ -9729,16 +9565,14 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
9729 mutex_lock(&ioc->scsih_cmds.mutex); 9565 mutex_lock(&ioc->scsih_cmds.mutex);
9730 9566
9731 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 9567 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
9732 pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n", 9568 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
9733 ioc->name, __func__);
9734 goto out; 9569 goto out;
9735 } 9570 }
9736 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 9571 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
9737 9572
9738 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 9573 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
9739 if (!smid) { 9574 if (!smid) {
9740 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 9575 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
9741 ioc->name, __func__);
9742 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 9576 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9743 goto out; 9577 goto out;
9744 } 9578 }
@@ -9751,24 +9585,22 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
9751 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; 9585 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
9752 9586
9753 if (!ioc->hide_ir_msg) 9587 if (!ioc->hide_ir_msg)
9754 pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name); 9588 ioc_info(ioc, "IR shutdown (sending)\n");
9755 init_completion(&ioc->scsih_cmds.done); 9589 init_completion(&ioc->scsih_cmds.done);
9756 mpt3sas_base_put_smid_default(ioc, smid); 9590 mpt3sas_base_put_smid_default(ioc, smid);
9757 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 9591 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
9758 9592
9759 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 9593 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
9760 pr_err(MPT3SAS_FMT "%s: timeout\n", 9594 ioc_err(ioc, "%s: timeout\n", __func__);
9761 ioc->name, __func__);
9762 goto out; 9595 goto out;
9763 } 9596 }
9764 9597
9765 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 9598 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
9766 mpi_reply = ioc->scsih_cmds.reply; 9599 mpi_reply = ioc->scsih_cmds.reply;
9767 if (!ioc->hide_ir_msg) 9600 if (!ioc->hide_ir_msg)
9768 pr_info(MPT3SAS_FMT "IR shutdown " 9601 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
9769 "(complete): ioc_status(0x%04x), loginfo(0x%08x)\n", 9602 le16_to_cpu(mpi_reply->IOCStatus),
9770 ioc->name, le16_to_cpu(mpi_reply->IOCStatus), 9603 le32_to_cpu(mpi_reply->IOCLogInfo));
9771 le32_to_cpu(mpi_reply->IOCLogInfo));
9772 } 9604 }
9773 9605
9774 out: 9606 out:
@@ -9817,9 +9649,8 @@ static void scsih_remove(struct pci_dev *pdev)
9817 sas_target_priv_data->deleted = 1; 9649 sas_target_priv_data->deleted = 1;
9818 scsi_remove_target(&raid_device->starget->dev); 9650 scsi_remove_target(&raid_device->starget->dev);
9819 } 9651 }
9820 pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n", 9652 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9821 ioc->name, raid_device->handle, 9653 raid_device->handle, (u64)raid_device->wwid);
9822 (unsigned long long) raid_device->wwid);
9823 _scsih_raid_device_remove(ioc, raid_device); 9654 _scsih_raid_device_remove(ioc, raid_device);
9824 } 9655 }
9825 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list, 9656 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
@@ -10230,7 +10061,7 @@ scsih_scan_start(struct Scsi_Host *shost)
10230 rc = mpt3sas_port_enable(ioc); 10061 rc = mpt3sas_port_enable(ioc);
10231 10062
10232 if (rc != 0) 10063 if (rc != 0)
10233 pr_info(MPT3SAS_FMT "port enable: FAILED\n", ioc->name); 10064 ioc_info(ioc, "port enable: FAILED\n");
10234} 10065}
10235 10066
10236/** 10067/**
@@ -10255,9 +10086,7 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
10255 10086
10256 if (time >= (300 * HZ)) { 10087 if (time >= (300 * HZ)) {
10257 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 10088 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10258 pr_info(MPT3SAS_FMT 10089 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
10259 "port enable: FAILED with timeout (timeout=300s)\n",
10260 ioc->name);
10261 ioc->is_driver_loading = 0; 10090 ioc->is_driver_loading = 0;
10262 return 1; 10091 return 1;
10263 } 10092 }
@@ -10266,16 +10095,15 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
10266 return 0; 10095 return 0;
10267 10096
10268 if (ioc->start_scan_failed) { 10097 if (ioc->start_scan_failed) {
10269 pr_info(MPT3SAS_FMT 10098 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
10270 "port enable: FAILED with (ioc_status=0x%08x)\n", 10099 ioc->start_scan_failed);
10271 ioc->name, ioc->start_scan_failed);
10272 ioc->is_driver_loading = 0; 10100 ioc->is_driver_loading = 0;
10273 ioc->wait_for_discovery_to_complete = 0; 10101 ioc->wait_for_discovery_to_complete = 0;
10274 ioc->remove_host = 1; 10102 ioc->remove_host = 1;
10275 return 1; 10103 return 1;
10276 } 10104 }
10277 10105
10278 pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name); 10106 ioc_info(ioc, "port enable: SUCCESS\n");
10279 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 10107 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10280 10108
10281 if (ioc->wait_for_discovery_to_complete) { 10109 if (ioc->wait_for_discovery_to_complete) {
@@ -10586,28 +10414,22 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10586 if (ioc->is_mcpu_endpoint) { 10414 if (ioc->is_mcpu_endpoint) {
10587 /* mCPU MPI support 64K max IO */ 10415 /* mCPU MPI support 64K max IO */
10588 shost->max_sectors = 128; 10416 shost->max_sectors = 128;
10589 pr_info(MPT3SAS_FMT 10417 ioc_info(ioc, "The max_sectors value is set to %d\n",
10590 "The max_sectors value is set to %d\n", 10418 shost->max_sectors);
10591 ioc->name, shost->max_sectors);
10592 } else { 10419 } else {
10593 if (max_sectors != 0xFFFF) { 10420 if (max_sectors != 0xFFFF) {
10594 if (max_sectors < 64) { 10421 if (max_sectors < 64) {
10595 shost->max_sectors = 64; 10422 shost->max_sectors = 64;
10596 pr_warn(MPT3SAS_FMT "Invalid value %d passed " \ 10423 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
10597 "for max_sectors, range is 64 to 32767. " \ 10424 max_sectors);
10598 "Assigning value of 64.\n", \
10599 ioc->name, max_sectors);
10600 } else if (max_sectors > 32767) { 10425 } else if (max_sectors > 32767) {
10601 shost->max_sectors = 32767; 10426 shost->max_sectors = 32767;
10602 pr_warn(MPT3SAS_FMT "Invalid value %d passed " \ 10427 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
10603 "for max_sectors, range is 64 to 32767." \ 10428 max_sectors);
10604 "Assigning default value of 32767.\n", \
10605 ioc->name, max_sectors);
10606 } else { 10429 } else {
10607 shost->max_sectors = max_sectors & 0xFFFE; 10430 shost->max_sectors = max_sectors & 0xFFFE;
10608 pr_info(MPT3SAS_FMT 10431 ioc_info(ioc, "The max_sectors value is set to %d\n",
10609 "The max_sectors value is set to %d\n", 10432 shost->max_sectors);
10610 ioc->name, shost->max_sectors);
10611 } 10433 }
10612 } 10434 }
10613 } 10435 }
@@ -10627,16 +10449,16 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10627 ioc->firmware_event_thread = alloc_ordered_workqueue( 10449 ioc->firmware_event_thread = alloc_ordered_workqueue(
10628 ioc->firmware_event_name, 0); 10450 ioc->firmware_event_name, 0);
10629 if (!ioc->firmware_event_thread) { 10451 if (!ioc->firmware_event_thread) {
10630 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 10452 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10631 ioc->name, __FILE__, __LINE__, __func__); 10453 __FILE__, __LINE__, __func__);
10632 rv = -ENODEV; 10454 rv = -ENODEV;
10633 goto out_thread_fail; 10455 goto out_thread_fail;
10634 } 10456 }
10635 10457
10636 ioc->is_driver_loading = 1; 10458 ioc->is_driver_loading = 1;
10637 if ((mpt3sas_base_attach(ioc))) { 10459 if ((mpt3sas_base_attach(ioc))) {
10638 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 10460 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10639 ioc->name, __FILE__, __LINE__, __func__); 10461 __FILE__, __LINE__, __func__);
10640 rv = -ENODEV; 10462 rv = -ENODEV;
10641 goto out_attach_fail; 10463 goto out_attach_fail;
10642 } 10464 }
@@ -10657,8 +10479,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10657 10479
10658 rv = scsi_add_host(shost, &pdev->dev); 10480 rv = scsi_add_host(shost, &pdev->dev);
10659 if (rv) { 10481 if (rv) {
10660 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 10482 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10661 ioc->name, __FILE__, __LINE__, __func__); 10483 __FILE__, __LINE__, __func__);
10662 goto out_add_shost_fail; 10484 goto out_add_shost_fail;
10663 } 10485 }
10664 10486
@@ -10695,9 +10517,8 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
10695 flush_scheduled_work(); 10517 flush_scheduled_work();
10696 scsi_block_requests(shost); 10518 scsi_block_requests(shost);
10697 device_state = pci_choose_state(pdev, state); 10519 device_state = pci_choose_state(pdev, state);
10698 pr_info(MPT3SAS_FMT 10520 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
10699 "pdev=0x%p, slot=%s, entering operating state [D%d]\n", 10521 pdev, pci_name(pdev), device_state);
10700 ioc->name, pdev, pci_name(pdev), device_state);
10701 10522
10702 pci_save_state(pdev); 10523 pci_save_state(pdev);
10703 mpt3sas_base_free_resources(ioc); 10524 mpt3sas_base_free_resources(ioc);
@@ -10719,9 +10540,8 @@ scsih_resume(struct pci_dev *pdev)
10719 pci_power_t device_state = pdev->current_state; 10540 pci_power_t device_state = pdev->current_state;
10720 int r; 10541 int r;
10721 10542
10722 pr_info(MPT3SAS_FMT 10543 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
10723 "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 10544 pdev, pci_name(pdev), device_state);
10724 ioc->name, pdev, pci_name(pdev), device_state);
10725 10545
10726 pci_set_power_state(pdev, PCI_D0); 10546 pci_set_power_state(pdev, PCI_D0);
10727 pci_enable_wake(pdev, PCI_D0, 0); 10547 pci_enable_wake(pdev, PCI_D0, 0);
@@ -10753,8 +10573,7 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
10753 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10573 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10754 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 10574 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10755 10575
10756 pr_info(MPT3SAS_FMT "PCI error: detected callback, state(%d)!!\n", 10576 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
10757 ioc->name, state);
10758 10577
10759 switch (state) { 10578 switch (state) {
10760 case pci_channel_io_normal: 10579 case pci_channel_io_normal:
@@ -10791,8 +10610,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
10791 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 10610 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10792 int rc; 10611 int rc;
10793 10612
10794 pr_info(MPT3SAS_FMT "PCI error: slot reset callback!!\n", 10613 ioc_info(ioc, "PCI error: slot reset callback!!\n");
10795 ioc->name);
10796 10614
10797 ioc->pci_error_recovery = 0; 10615 ioc->pci_error_recovery = 0;
10798 ioc->pdev = pdev; 10616 ioc->pdev = pdev;
@@ -10803,8 +10621,8 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
10803 10621
10804 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 10622 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
10805 10623
10806 pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name, 10624 ioc_warn(ioc, "hard reset: %s\n",
10807 (rc == 0) ? "success" : "failed"); 10625 (rc == 0) ? "success" : "failed");
10808 10626
10809 if (!rc) 10627 if (!rc)
10810 return PCI_ERS_RESULT_RECOVERED; 10628 return PCI_ERS_RESULT_RECOVERED;
@@ -10826,7 +10644,7 @@ scsih_pci_resume(struct pci_dev *pdev)
10826 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10644 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10827 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 10645 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10828 10646
10829 pr_info(MPT3SAS_FMT "PCI error: resume callback!!\n", ioc->name); 10647 ioc_info(ioc, "PCI error: resume callback!!\n");
10830 10648
10831 mpt3sas_base_start_watchdog(ioc); 10649 mpt3sas_base_start_watchdog(ioc);
10832 scsi_unblock_requests(ioc->shost); 10650 scsi_unblock_requests(ioc->shost);
@@ -10842,8 +10660,7 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
10842 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10660 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10843 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 10661 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10844 10662
10845 pr_info(MPT3SAS_FMT "PCI error: mmio enabled callback!!\n", 10663 ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
10846 ioc->name);
10847 10664
10848 /* TODO - dump whatever for debugging purposes */ 10665 /* TODO - dump whatever for debugging purposes */
10849 10666
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index f8cc2677c1cd..6a8a3c09b4b1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -146,25 +146,22 @@ _transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle,
146 u32 ioc_status; 146 u32 ioc_status;
147 147
148 if (ioc->shost_recovery || ioc->pci_error_recovery) { 148 if (ioc->shost_recovery || ioc->pci_error_recovery) {
149 pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", 149 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
150 __func__, ioc->name);
151 return -EFAULT; 150 return -EFAULT;
152 } 151 }
153 152
154 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 153 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
155 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 154 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
156 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 155 ioc_err(ioc, "failure at %s:%d/%s()!\n",
157 ioc->name, __FILE__, __LINE__, __func__); 156 __FILE__, __LINE__, __func__);
158 return -ENXIO; 157 return -ENXIO;
159 } 158 }
160 159
161 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 160 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
162 MPI2_IOCSTATUS_MASK; 161 MPI2_IOCSTATUS_MASK;
163 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 162 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
164 pr_err(MPT3SAS_FMT 163 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x) failure at %s:%d/%s()!\n",
165 "handle(0x%04x), ioc_status(0x%04x)\nfailure at %s:%d/%s()!\n", 164 handle, ioc_status, __FILE__, __LINE__, __func__);
166 ioc->name, handle, ioc_status,
167 __FILE__, __LINE__, __func__);
168 return -EIO; 165 return -EIO;
169 } 166 }
170 167
@@ -310,16 +307,14 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
310 u16 wait_state_count; 307 u16 wait_state_count;
311 308
312 if (ioc->shost_recovery || ioc->pci_error_recovery) { 309 if (ioc->shost_recovery || ioc->pci_error_recovery) {
313 pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", 310 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
314 __func__, ioc->name);
315 return -EFAULT; 311 return -EFAULT;
316 } 312 }
317 313
318 mutex_lock(&ioc->transport_cmds.mutex); 314 mutex_lock(&ioc->transport_cmds.mutex);
319 315
320 if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { 316 if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
321 pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", 317 ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
322 ioc->name, __func__);
323 rc = -EAGAIN; 318 rc = -EAGAIN;
324 goto out; 319 goto out;
325 } 320 }
@@ -329,26 +324,22 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
329 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 324 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
330 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 325 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
331 if (wait_state_count++ == 10) { 326 if (wait_state_count++ == 10) {
332 pr_err(MPT3SAS_FMT 327 ioc_err(ioc, "%s: failed due to ioc not operational\n",
333 "%s: failed due to ioc not operational\n", 328 __func__);
334 ioc->name, __func__);
335 rc = -EFAULT; 329 rc = -EFAULT;
336 goto out; 330 goto out;
337 } 331 }
338 ssleep(1); 332 ssleep(1);
339 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 333 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
340 pr_info(MPT3SAS_FMT 334 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
341 "%s: waiting for operational state(count=%d)\n", 335 __func__, wait_state_count);
342 ioc->name, __func__, wait_state_count);
343 } 336 }
344 if (wait_state_count) 337 if (wait_state_count)
345 pr_info(MPT3SAS_FMT "%s: ioc is operational\n", 338 ioc_info(ioc, "%s: ioc is operational\n", __func__);
346 ioc->name, __func__);
347 339
348 smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); 340 smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
349 if (!smid) { 341 if (!smid) {
350 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 342 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
351 ioc->name, __func__);
352 rc = -EAGAIN; 343 rc = -EAGAIN;
353 goto out; 344 goto out;
354 } 345 }
@@ -359,9 +350,8 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
359 350
360 data_out_sz = sizeof(struct rep_manu_request); 351 data_out_sz = sizeof(struct rep_manu_request);
361 data_in_sz = sizeof(struct rep_manu_reply); 352 data_in_sz = sizeof(struct rep_manu_reply);
362 data_out = pci_alloc_consistent(ioc->pdev, data_out_sz + data_in_sz, 353 data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz,
363 &data_out_dma); 354 &data_out_dma, GFP_KERNEL);
364
365 if (!data_out) { 355 if (!data_out) {
366 pr_err("failure at %s:%d/%s()!\n", __FILE__, 356 pr_err("failure at %s:%d/%s()!\n", __FILE__,
367 __LINE__, __func__); 357 __LINE__, __func__);
@@ -388,16 +378,15 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
388 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 378 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
389 data_in_sz); 379 data_in_sz);
390 380
391 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 381 dtransportprintk(ioc,
392 "report_manufacture - send to sas_addr(0x%016llx)\n", 382 ioc_info(ioc, "report_manufacture - send to sas_addr(0x%016llx)\n",
393 ioc->name, (unsigned long long)sas_address)); 383 (u64)sas_address));
394 init_completion(&ioc->transport_cmds.done); 384 init_completion(&ioc->transport_cmds.done);
395 mpt3sas_base_put_smid_default(ioc, smid); 385 mpt3sas_base_put_smid_default(ioc, smid);
396 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); 386 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
397 387
398 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { 388 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
399 pr_err(MPT3SAS_FMT "%s: timeout\n", 389 ioc_err(ioc, "%s: timeout\n", __func__);
400 ioc->name, __func__);
401 _debug_dump_mf(mpi_request, 390 _debug_dump_mf(mpi_request,
402 sizeof(Mpi2SmpPassthroughRequest_t)/4); 391 sizeof(Mpi2SmpPassthroughRequest_t)/4);
403 if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) 392 if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -405,17 +394,16 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
405 goto issue_host_reset; 394 goto issue_host_reset;
406 } 395 }
407 396
408 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 397 dtransportprintk(ioc, ioc_info(ioc, "report_manufacture - complete\n"));
409 "report_manufacture - complete\n", ioc->name));
410 398
411 if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { 399 if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
412 u8 *tmp; 400 u8 *tmp;
413 401
414 mpi_reply = ioc->transport_cmds.reply; 402 mpi_reply = ioc->transport_cmds.reply;
415 403
416 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 404 dtransportprintk(ioc,
417 "report_manufacture - reply data transfer size(%d)\n", 405 ioc_info(ioc, "report_manufacture - reply data transfer size(%d)\n",
418 ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength))); 406 le16_to_cpu(mpi_reply->ResponseDataLength)));
419 407
420 if (le16_to_cpu(mpi_reply->ResponseDataLength) != 408 if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
421 sizeof(struct rep_manu_reply)) 409 sizeof(struct rep_manu_reply))
@@ -439,8 +427,8 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
439 manufacture_reply->component_revision_id; 427 manufacture_reply->component_revision_id;
440 } 428 }
441 } else 429 } else
442 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 430 dtransportprintk(ioc,
443 "report_manufacture - no reply\n", ioc->name)); 431 ioc_info(ioc, "report_manufacture - no reply\n"));
444 432
445 issue_host_reset: 433 issue_host_reset:
446 if (issue_reset) 434 if (issue_reset)
@@ -448,7 +436,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
448 out: 436 out:
449 ioc->transport_cmds.status = MPT3_CMD_NOT_USED; 437 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
450 if (data_out) 438 if (data_out)
451 pci_free_consistent(ioc->pdev, data_out_sz + data_in_sz, 439 dma_free_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz,
452 data_out, data_out_dma); 440 data_out, data_out_dma);
453 441
454 mutex_unlock(&ioc->transport_cmds.mutex); 442 mutex_unlock(&ioc->transport_cmds.mutex);
@@ -643,8 +631,8 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
643 mpt3sas_port = kzalloc(sizeof(struct _sas_port), 631 mpt3sas_port = kzalloc(sizeof(struct _sas_port),
644 GFP_KERNEL); 632 GFP_KERNEL);
645 if (!mpt3sas_port) { 633 if (!mpt3sas_port) {
646 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 634 ioc_err(ioc, "failure at %s:%d/%s()!\n",
647 ioc->name, __FILE__, __LINE__, __func__); 635 __FILE__, __LINE__, __func__);
648 return NULL; 636 return NULL;
649 } 637 }
650 638
@@ -655,22 +643,21 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
655 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 643 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
656 644
657 if (!sas_node) { 645 if (!sas_node) {
658 pr_err(MPT3SAS_FMT 646 ioc_err(ioc, "%s: Could not find parent sas_address(0x%016llx)!\n",
659 "%s: Could not find parent sas_address(0x%016llx)!\n", 647 __func__, (u64)sas_address);
660 ioc->name, __func__, (unsigned long long)sas_address);
661 goto out_fail; 648 goto out_fail;
662 } 649 }
663 650
664 if ((_transport_set_identify(ioc, handle, 651 if ((_transport_set_identify(ioc, handle,
665 &mpt3sas_port->remote_identify))) { 652 &mpt3sas_port->remote_identify))) {
666 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 653 ioc_err(ioc, "failure at %s:%d/%s()!\n",
667 ioc->name, __FILE__, __LINE__, __func__); 654 __FILE__, __LINE__, __func__);
668 goto out_fail; 655 goto out_fail;
669 } 656 }
670 657
671 if (mpt3sas_port->remote_identify.device_type == SAS_PHY_UNUSED) { 658 if (mpt3sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
672 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 659 ioc_err(ioc, "failure at %s:%d/%s()!\n",
673 ioc->name, __FILE__, __LINE__, __func__); 660 __FILE__, __LINE__, __func__);
674 goto out_fail; 661 goto out_fail;
675 } 662 }
676 663
@@ -687,20 +674,20 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
687 } 674 }
688 675
689 if (!mpt3sas_port->num_phys) { 676 if (!mpt3sas_port->num_phys) {
690 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 677 ioc_err(ioc, "failure at %s:%d/%s()!\n",
691 ioc->name, __FILE__, __LINE__, __func__); 678 __FILE__, __LINE__, __func__);
692 goto out_fail; 679 goto out_fail;
693 } 680 }
694 681
695 if (!sas_node->parent_dev) { 682 if (!sas_node->parent_dev) {
696 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 683 ioc_err(ioc, "failure at %s:%d/%s()!\n",
697 ioc->name, __FILE__, __LINE__, __func__); 684 __FILE__, __LINE__, __func__);
698 goto out_fail; 685 goto out_fail;
699 } 686 }
700 port = sas_port_alloc_num(sas_node->parent_dev); 687 port = sas_port_alloc_num(sas_node->parent_dev);
701 if ((sas_port_add(port))) { 688 if ((sas_port_add(port))) {
702 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 689 ioc_err(ioc, "failure at %s:%d/%s()!\n",
703 ioc->name, __FILE__, __LINE__, __func__); 690 __FILE__, __LINE__, __func__);
704 goto out_fail; 691 goto out_fail;
705 } 692 }
706 693
@@ -729,17 +716,17 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
729 sas_device = mpt3sas_get_sdev_by_addr(ioc, 716 sas_device = mpt3sas_get_sdev_by_addr(ioc,
730 mpt3sas_port->remote_identify.sas_address); 717 mpt3sas_port->remote_identify.sas_address);
731 if (!sas_device) { 718 if (!sas_device) {
732 dfailprintk(ioc, printk(MPT3SAS_FMT 719 dfailprintk(ioc,
733 "failure at %s:%d/%s()!\n", 720 ioc_info(ioc, "failure at %s:%d/%s()!\n",
734 ioc->name, __FILE__, __LINE__, __func__)); 721 __FILE__, __LINE__, __func__));
735 goto out_fail; 722 goto out_fail;
736 } 723 }
737 sas_device->pend_sas_rphy_add = 1; 724 sas_device->pend_sas_rphy_add = 1;
738 } 725 }
739 726
740 if ((sas_rphy_add(rphy))) { 727 if ((sas_rphy_add(rphy))) {
741 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 728 ioc_err(ioc, "failure at %s:%d/%s()!\n",
742 ioc->name, __FILE__, __LINE__, __func__); 729 __FILE__, __LINE__, __func__);
743 } 730 }
744 731
745 if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { 732 if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
@@ -861,14 +848,14 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
861 INIT_LIST_HEAD(&mpt3sas_phy->port_siblings); 848 INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
862 phy = sas_phy_alloc(parent_dev, phy_index); 849 phy = sas_phy_alloc(parent_dev, phy_index);
863 if (!phy) { 850 if (!phy) {
864 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 851 ioc_err(ioc, "failure at %s:%d/%s()!\n",
865 ioc->name, __FILE__, __LINE__, __func__); 852 __FILE__, __LINE__, __func__);
866 return -1; 853 return -1;
867 } 854 }
868 if ((_transport_set_identify(ioc, mpt3sas_phy->handle, 855 if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
869 &mpt3sas_phy->identify))) { 856 &mpt3sas_phy->identify))) {
870 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 857 ioc_err(ioc, "failure at %s:%d/%s()!\n",
871 ioc->name, __FILE__, __LINE__, __func__); 858 __FILE__, __LINE__, __func__);
872 sas_phy_free(phy); 859 sas_phy_free(phy);
873 return -1; 860 return -1;
874 } 861 }
@@ -890,8 +877,8 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
890 phy_pg0.ProgrammedLinkRate >> 4); 877 phy_pg0.ProgrammedLinkRate >> 4);
891 878
892 if ((sas_phy_add(phy))) { 879 if ((sas_phy_add(phy))) {
893 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 880 ioc_err(ioc, "failure at %s:%d/%s()!\n",
894 ioc->name, __FILE__, __LINE__, __func__); 881 __FILE__, __LINE__, __func__);
895 sas_phy_free(phy); 882 sas_phy_free(phy);
896 return -1; 883 return -1;
897 } 884 }
@@ -929,14 +916,14 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
929 INIT_LIST_HEAD(&mpt3sas_phy->port_siblings); 916 INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
930 phy = sas_phy_alloc(parent_dev, phy_index); 917 phy = sas_phy_alloc(parent_dev, phy_index);
931 if (!phy) { 918 if (!phy) {
932 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 919 ioc_err(ioc, "failure at %s:%d/%s()!\n",
933 ioc->name, __FILE__, __LINE__, __func__); 920 __FILE__, __LINE__, __func__);
934 return -1; 921 return -1;
935 } 922 }
936 if ((_transport_set_identify(ioc, mpt3sas_phy->handle, 923 if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
937 &mpt3sas_phy->identify))) { 924 &mpt3sas_phy->identify))) {
938 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 925 ioc_err(ioc, "failure at %s:%d/%s()!\n",
939 ioc->name, __FILE__, __LINE__, __func__); 926 __FILE__, __LINE__, __func__);
940 sas_phy_free(phy); 927 sas_phy_free(phy);
941 return -1; 928 return -1;
942 } 929 }
@@ -960,8 +947,8 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
960 expander_pg1.ProgrammedLinkRate >> 4); 947 expander_pg1.ProgrammedLinkRate >> 4);
961 948
962 if ((sas_phy_add(phy))) { 949 if ((sas_phy_add(phy))) {
963 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 950 ioc_err(ioc, "failure at %s:%d/%s()!\n",
964 ioc->name, __FILE__, __LINE__, __func__); 951 __FILE__, __LINE__, __func__);
965 sas_phy_free(phy); 952 sas_phy_free(phy);
966 return -1; 953 return -1;
967 } 954 }
@@ -1098,16 +1085,14 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
1098 u16 wait_state_count; 1085 u16 wait_state_count;
1099 1086
1100 if (ioc->shost_recovery || ioc->pci_error_recovery) { 1087 if (ioc->shost_recovery || ioc->pci_error_recovery) {
1101 pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", 1088 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
1102 __func__, ioc->name);
1103 return -EFAULT; 1089 return -EFAULT;
1104 } 1090 }
1105 1091
1106 mutex_lock(&ioc->transport_cmds.mutex); 1092 mutex_lock(&ioc->transport_cmds.mutex);
1107 1093
1108 if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { 1094 if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
1109 pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", 1095 ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
1110 ioc->name, __func__);
1111 rc = -EAGAIN; 1096 rc = -EAGAIN;
1112 goto out; 1097 goto out;
1113 } 1098 }
@@ -1117,26 +1102,22 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
1117 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1102 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1118 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1103 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1119 if (wait_state_count++ == 10) { 1104 if (wait_state_count++ == 10) {
1120 pr_err(MPT3SAS_FMT 1105 ioc_err(ioc, "%s: failed due to ioc not operational\n",
1121 "%s: failed due to ioc not operational\n", 1106 __func__);
1122 ioc->name, __func__);
1123 rc = -EFAULT; 1107 rc = -EFAULT;
1124 goto out; 1108 goto out;
1125 } 1109 }
1126 ssleep(1); 1110 ssleep(1);
1127 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1111 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1128 pr_info(MPT3SAS_FMT 1112 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
1129 "%s: waiting for operational state(count=%d)\n", 1113 __func__, wait_state_count);
1130 ioc->name, __func__, wait_state_count);
1131 } 1114 }
1132 if (wait_state_count) 1115 if (wait_state_count)
1133 pr_info(MPT3SAS_FMT "%s: ioc is operational\n", 1116 ioc_info(ioc, "%s: ioc is operational\n", __func__);
1134 ioc->name, __func__);
1135 1117
1136 smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); 1118 smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
1137 if (!smid) { 1119 if (!smid) {
1138 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 1120 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
1139 ioc->name, __func__);
1140 rc = -EAGAIN; 1121 rc = -EAGAIN;
1141 goto out; 1122 goto out;
1142 } 1123 }
@@ -1146,7 +1127,8 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
1146 1127
1147 sz = sizeof(struct phy_error_log_request) + 1128 sz = sizeof(struct phy_error_log_request) +
1148 sizeof(struct phy_error_log_reply); 1129 sizeof(struct phy_error_log_reply);
1149 data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma); 1130 data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma,
1131 GFP_KERNEL);
1150 if (!data_out) { 1132 if (!data_out) {
1151 pr_err("failure at %s:%d/%s()!\n", __FILE__, 1133 pr_err("failure at %s:%d/%s()!\n", __FILE__,
1152 __LINE__, __func__); 1134 __LINE__, __func__);
@@ -1179,17 +1161,16 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
1179 data_out_dma + sizeof(struct phy_error_log_request), 1161 data_out_dma + sizeof(struct phy_error_log_request),
1180 sizeof(struct phy_error_log_reply)); 1162 sizeof(struct phy_error_log_reply));
1181 1163
1182 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 1164 dtransportprintk(ioc,
1183 "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n", 1165 ioc_info(ioc, "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n",
1184 ioc->name, (unsigned long long)phy->identify.sas_address, 1166 (u64)phy->identify.sas_address,
1185 phy->number)); 1167 phy->number));
1186 init_completion(&ioc->transport_cmds.done); 1168 init_completion(&ioc->transport_cmds.done);
1187 mpt3sas_base_put_smid_default(ioc, smid); 1169 mpt3sas_base_put_smid_default(ioc, smid);
1188 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); 1170 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
1189 1171
1190 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { 1172 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
1191 pr_err(MPT3SAS_FMT "%s: timeout\n", 1173 ioc_err(ioc, "%s: timeout\n", __func__);
1192 ioc->name, __func__);
1193 _debug_dump_mf(mpi_request, 1174 _debug_dump_mf(mpi_request,
1194 sizeof(Mpi2SmpPassthroughRequest_t)/4); 1175 sizeof(Mpi2SmpPassthroughRequest_t)/4);
1195 if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) 1176 if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -1197,16 +1178,15 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
1197 goto issue_host_reset; 1178 goto issue_host_reset;
1198 } 1179 }
1199 1180
1200 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 1181 dtransportprintk(ioc, ioc_info(ioc, "phy_error_log - complete\n"));
1201 "phy_error_log - complete\n", ioc->name));
1202 1182
1203 if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { 1183 if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
1204 1184
1205 mpi_reply = ioc->transport_cmds.reply; 1185 mpi_reply = ioc->transport_cmds.reply;
1206 1186
1207 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 1187 dtransportprintk(ioc,
1208 "phy_error_log - reply data transfer size(%d)\n", 1188 ioc_info(ioc, "phy_error_log - reply data transfer size(%d)\n",
1209 ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength))); 1189 le16_to_cpu(mpi_reply->ResponseDataLength)));
1210 1190
1211 if (le16_to_cpu(mpi_reply->ResponseDataLength) != 1191 if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
1212 sizeof(struct phy_error_log_reply)) 1192 sizeof(struct phy_error_log_reply))
@@ -1215,9 +1195,9 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
1215 phy_error_log_reply = data_out + 1195 phy_error_log_reply = data_out +
1216 sizeof(struct phy_error_log_request); 1196 sizeof(struct phy_error_log_request);
1217 1197
1218 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 1198 dtransportprintk(ioc,
1219 "phy_error_log - function_result(%d)\n", 1199 ioc_info(ioc, "phy_error_log - function_result(%d)\n",
1220 ioc->name, phy_error_log_reply->function_result)); 1200 phy_error_log_reply->function_result));
1221 1201
1222 phy->invalid_dword_count = 1202 phy->invalid_dword_count =
1223 be32_to_cpu(phy_error_log_reply->invalid_dword); 1203 be32_to_cpu(phy_error_log_reply->invalid_dword);
@@ -1229,8 +1209,8 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
1229 be32_to_cpu(phy_error_log_reply->phy_reset_problem); 1209 be32_to_cpu(phy_error_log_reply->phy_reset_problem);
1230 rc = 0; 1210 rc = 0;
1231 } else 1211 } else
1232 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 1212 dtransportprintk(ioc,
1233 "phy_error_log - no reply\n", ioc->name)); 1213 ioc_info(ioc, "phy_error_log - no reply\n"));
1234 1214
1235 issue_host_reset: 1215 issue_host_reset:
1236 if (issue_reset) 1216 if (issue_reset)
@@ -1238,7 +1218,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
1238 out: 1218 out:
1239 ioc->transport_cmds.status = MPT3_CMD_NOT_USED; 1219 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
1240 if (data_out) 1220 if (data_out)
1241 pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma); 1221 dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma);
1242 1222
1243 mutex_unlock(&ioc->transport_cmds.mutex); 1223 mutex_unlock(&ioc->transport_cmds.mutex);
1244 return rc; 1224 return rc;
@@ -1273,17 +1253,16 @@ _transport_get_linkerrors(struct sas_phy *phy)
1273 /* get hba phy error logs */ 1253 /* get hba phy error logs */
1274 if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1, 1254 if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1,
1275 phy->number))) { 1255 phy->number))) {
1276 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 1256 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1277 ioc->name, __FILE__, __LINE__, __func__); 1257 __FILE__, __LINE__, __func__);
1278 return -ENXIO; 1258 return -ENXIO;
1279 } 1259 }
1280 1260
1281 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) 1261 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
1282 pr_info(MPT3SAS_FMT 1262 ioc_info(ioc, "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n",
1283 "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n", 1263 phy->number,
1284 ioc->name, phy->number, 1264 le16_to_cpu(mpi_reply.IOCStatus),
1285 le16_to_cpu(mpi_reply.IOCStatus), 1265 le32_to_cpu(mpi_reply.IOCLogInfo));
1286 le32_to_cpu(mpi_reply.IOCLogInfo));
1287 1266
1288 phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount); 1267 phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount);
1289 phy->running_disparity_error_count = 1268 phy->running_disparity_error_count =
@@ -1411,16 +1390,14 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
1411 u16 wait_state_count; 1390 u16 wait_state_count;
1412 1391
1413 if (ioc->shost_recovery || ioc->pci_error_recovery) { 1392 if (ioc->shost_recovery || ioc->pci_error_recovery) {
1414 pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", 1393 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
1415 __func__, ioc->name);
1416 return -EFAULT; 1394 return -EFAULT;
1417 } 1395 }
1418 1396
1419 mutex_lock(&ioc->transport_cmds.mutex); 1397 mutex_lock(&ioc->transport_cmds.mutex);
1420 1398
1421 if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { 1399 if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
1422 pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", 1400 ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
1423 ioc->name, __func__);
1424 rc = -EAGAIN; 1401 rc = -EAGAIN;
1425 goto out; 1402 goto out;
1426 } 1403 }
@@ -1430,26 +1407,22 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
1430 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1407 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1431 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1408 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1432 if (wait_state_count++ == 10) { 1409 if (wait_state_count++ == 10) {
1433 pr_err(MPT3SAS_FMT 1410 ioc_err(ioc, "%s: failed due to ioc not operational\n",
1434 "%s: failed due to ioc not operational\n", 1411 __func__);
1435 ioc->name, __func__);
1436 rc = -EFAULT; 1412 rc = -EFAULT;
1437 goto out; 1413 goto out;
1438 } 1414 }
1439 ssleep(1); 1415 ssleep(1);
1440 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1416 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1441 pr_info(MPT3SAS_FMT 1417 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
1442 "%s: waiting for operational state(count=%d)\n", 1418 __func__, wait_state_count);
1443 ioc->name, __func__, wait_state_count);
1444 } 1419 }
1445 if (wait_state_count) 1420 if (wait_state_count)
1446 pr_info(MPT3SAS_FMT "%s: ioc is operational\n", 1421 ioc_info(ioc, "%s: ioc is operational\n", __func__);
1447 ioc->name, __func__);
1448 1422
1449 smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); 1423 smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
1450 if (!smid) { 1424 if (!smid) {
1451 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 1425 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
1452 ioc->name, __func__);
1453 rc = -EAGAIN; 1426 rc = -EAGAIN;
1454 goto out; 1427 goto out;
1455 } 1428 }
@@ -1459,7 +1432,8 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
1459 1432
1460 sz = sizeof(struct phy_control_request) + 1433 sz = sizeof(struct phy_control_request) +
1461 sizeof(struct phy_control_reply); 1434 sizeof(struct phy_control_reply);
1462 data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma); 1435 data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma,
1436 GFP_KERNEL);
1463 if (!data_out) { 1437 if (!data_out) {
1464 pr_err("failure at %s:%d/%s()!\n", __FILE__, 1438 pr_err("failure at %s:%d/%s()!\n", __FILE__,
1465 __LINE__, __func__); 1439 __LINE__, __func__);
@@ -1497,17 +1471,16 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
1497 data_out_dma + sizeof(struct phy_control_request), 1471 data_out_dma + sizeof(struct phy_control_request),
1498 sizeof(struct phy_control_reply)); 1472 sizeof(struct phy_control_reply));
1499 1473
1500 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 1474 dtransportprintk(ioc,
1501 "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", 1475 ioc_info(ioc, "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
1502 ioc->name, (unsigned long long)phy->identify.sas_address, 1476 (u64)phy->identify.sas_address,
1503 phy->number, phy_operation)); 1477 phy->number, phy_operation));
1504 init_completion(&ioc->transport_cmds.done); 1478 init_completion(&ioc->transport_cmds.done);
1505 mpt3sas_base_put_smid_default(ioc, smid); 1479 mpt3sas_base_put_smid_default(ioc, smid);
1506 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); 1480 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
1507 1481
1508 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { 1482 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
1509 pr_err(MPT3SAS_FMT "%s: timeout\n", 1483 ioc_err(ioc, "%s: timeout\n", __func__);
1510 ioc->name, __func__);
1511 _debug_dump_mf(mpi_request, 1484 _debug_dump_mf(mpi_request,
1512 sizeof(Mpi2SmpPassthroughRequest_t)/4); 1485 sizeof(Mpi2SmpPassthroughRequest_t)/4);
1513 if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) 1486 if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -1515,16 +1488,15 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
1515 goto issue_host_reset; 1488 goto issue_host_reset;
1516 } 1489 }
1517 1490
1518 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 1491 dtransportprintk(ioc, ioc_info(ioc, "phy_control - complete\n"));
1519 "phy_control - complete\n", ioc->name));
1520 1492
1521 if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { 1493 if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
1522 1494
1523 mpi_reply = ioc->transport_cmds.reply; 1495 mpi_reply = ioc->transport_cmds.reply;
1524 1496
1525 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 1497 dtransportprintk(ioc,
1526 "phy_control - reply data transfer size(%d)\n", 1498 ioc_info(ioc, "phy_control - reply data transfer size(%d)\n",
1527 ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength))); 1499 le16_to_cpu(mpi_reply->ResponseDataLength)));
1528 1500
1529 if (le16_to_cpu(mpi_reply->ResponseDataLength) != 1501 if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
1530 sizeof(struct phy_control_reply)) 1502 sizeof(struct phy_control_reply))
@@ -1533,14 +1505,14 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
1533 phy_control_reply = data_out + 1505 phy_control_reply = data_out +
1534 sizeof(struct phy_control_request); 1506 sizeof(struct phy_control_request);
1535 1507
1536 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 1508 dtransportprintk(ioc,
1537 "phy_control - function_result(%d)\n", 1509 ioc_info(ioc, "phy_control - function_result(%d)\n",
1538 ioc->name, phy_control_reply->function_result)); 1510 phy_control_reply->function_result));
1539 1511
1540 rc = 0; 1512 rc = 0;
1541 } else 1513 } else
1542 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 1514 dtransportprintk(ioc,
1543 "phy_control - no reply\n", ioc->name)); 1515 ioc_info(ioc, "phy_control - no reply\n"));
1544 1516
1545 issue_host_reset: 1517 issue_host_reset:
1546 if (issue_reset) 1518 if (issue_reset)
@@ -1548,7 +1520,8 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
1548 out: 1520 out:
1549 ioc->transport_cmds.status = MPT3_CMD_NOT_USED; 1521 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
1550 if (data_out) 1522 if (data_out)
1551 pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma); 1523 dma_free_coherent(&ioc->pdev->dev, sz, data_out,
1524 data_out_dma);
1552 1525
1553 mutex_unlock(&ioc->transport_cmds.mutex); 1526 mutex_unlock(&ioc->transport_cmds.mutex);
1554 return rc; 1527 return rc;
@@ -1591,16 +1564,15 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
1591 mpi_request.PhyNum = phy->number; 1564 mpi_request.PhyNum = phy->number;
1592 1565
1593 if ((mpt3sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) { 1566 if ((mpt3sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) {
1594 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 1567 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1595 ioc->name, __FILE__, __LINE__, __func__); 1568 __FILE__, __LINE__, __func__);
1596 return -ENXIO; 1569 return -ENXIO;
1597 } 1570 }
1598 1571
1599 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) 1572 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
1600 pr_info(MPT3SAS_FMT 1573 ioc_info(ioc, "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
1601 "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", 1574 phy->number, le16_to_cpu(mpi_reply.IOCStatus),
1602 ioc->name, phy->number, le16_to_cpu(mpi_reply.IOCStatus), 1575 le32_to_cpu(mpi_reply.IOCLogInfo));
1603 le32_to_cpu(mpi_reply.IOCLogInfo));
1604 1576
1605 return 0; 1577 return 0;
1606} 1578}
@@ -1647,23 +1619,23 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
1647 sizeof(Mpi2SasIOUnit0PhyData_t)); 1619 sizeof(Mpi2SasIOUnit0PhyData_t));
1648 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 1620 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
1649 if (!sas_iounit_pg0) { 1621 if (!sas_iounit_pg0) {
1650 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 1622 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1651 ioc->name, __FILE__, __LINE__, __func__); 1623 __FILE__, __LINE__, __func__);
1652 rc = -ENOMEM; 1624 rc = -ENOMEM;
1653 goto out; 1625 goto out;
1654 } 1626 }
1655 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 1627 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
1656 sas_iounit_pg0, sz))) { 1628 sas_iounit_pg0, sz))) {
1657 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 1629 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1658 ioc->name, __FILE__, __LINE__, __func__); 1630 __FILE__, __LINE__, __func__);
1659 rc = -ENXIO; 1631 rc = -ENXIO;
1660 goto out; 1632 goto out;
1661 } 1633 }
1662 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 1634 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1663 MPI2_IOCSTATUS_MASK; 1635 MPI2_IOCSTATUS_MASK;
1664 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 1636 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1665 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 1637 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1666 ioc->name, __FILE__, __LINE__, __func__); 1638 __FILE__, __LINE__, __func__);
1667 rc = -EIO; 1639 rc = -EIO;
1668 goto out; 1640 goto out;
1669 } 1641 }
@@ -1672,10 +1644,8 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
1672 for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) { 1644 for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) {
1673 if (sas_iounit_pg0->PhyData[i].PortFlags & 1645 if (sas_iounit_pg0->PhyData[i].PortFlags &
1674 MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) { 1646 MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) {
1675 pr_err(MPT3SAS_FMT "discovery is active on " \ 1647 ioc_err(ioc, "discovery is active on port = %d, phy = %d: unable to enable/disable phys, try again later!\n",
1676 "port = %d, phy = %d: unable to enable/disable " 1648 sas_iounit_pg0->PhyData[i].Port, i);
1677 "phys, try again later!\n", ioc->name,
1678 sas_iounit_pg0->PhyData[i].Port, i);
1679 discovery_active = 1; 1649 discovery_active = 1;
1680 } 1650 }
1681 } 1651 }
@@ -1690,23 +1660,23 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
1690 sizeof(Mpi2SasIOUnit1PhyData_t)); 1660 sizeof(Mpi2SasIOUnit1PhyData_t));
1691 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 1661 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
1692 if (!sas_iounit_pg1) { 1662 if (!sas_iounit_pg1) {
1693 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 1663 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1694 ioc->name, __FILE__, __LINE__, __func__); 1664 __FILE__, __LINE__, __func__);
1695 rc = -ENOMEM; 1665 rc = -ENOMEM;
1696 goto out; 1666 goto out;
1697 } 1667 }
1698 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 1668 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
1699 sas_iounit_pg1, sz))) { 1669 sas_iounit_pg1, sz))) {
1700 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 1670 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1701 ioc->name, __FILE__, __LINE__, __func__); 1671 __FILE__, __LINE__, __func__);
1702 rc = -ENXIO; 1672 rc = -ENXIO;
1703 goto out; 1673 goto out;
1704 } 1674 }
1705 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 1675 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1706 MPI2_IOCSTATUS_MASK; 1676 MPI2_IOCSTATUS_MASK;
1707 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 1677 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1708 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 1678 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1709 ioc->name, __FILE__, __LINE__, __func__); 1679 __FILE__, __LINE__, __func__);
1710 rc = -EIO; 1680 rc = -EIO;
1711 goto out; 1681 goto out;
1712 } 1682 }
@@ -1798,23 +1768,23 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
1798 sizeof(Mpi2SasIOUnit1PhyData_t)); 1768 sizeof(Mpi2SasIOUnit1PhyData_t));
1799 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 1769 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
1800 if (!sas_iounit_pg1) { 1770 if (!sas_iounit_pg1) {
1801 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 1771 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1802 ioc->name, __FILE__, __LINE__, __func__); 1772 __FILE__, __LINE__, __func__);
1803 rc = -ENOMEM; 1773 rc = -ENOMEM;
1804 goto out; 1774 goto out;
1805 } 1775 }
1806 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 1776 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
1807 sas_iounit_pg1, sz))) { 1777 sas_iounit_pg1, sz))) {
1808 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 1778 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1809 ioc->name, __FILE__, __LINE__, __func__); 1779 __FILE__, __LINE__, __func__);
1810 rc = -ENXIO; 1780 rc = -ENXIO;
1811 goto out; 1781 goto out;
1812 } 1782 }
1813 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 1783 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1814 MPI2_IOCSTATUS_MASK; 1784 MPI2_IOCSTATUS_MASK;
1815 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 1785 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1816 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 1786 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1817 ioc->name, __FILE__, __LINE__, __func__); 1787 __FILE__, __LINE__, __func__);
1818 rc = -EIO; 1788 rc = -EIO;
1819 goto out; 1789 goto out;
1820 } 1790 }
@@ -1833,8 +1803,8 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
1833 1803
1834 if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, 1804 if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
1835 sz)) { 1805 sz)) {
1836 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 1806 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1837 ioc->name, __FILE__, __LINE__, __func__); 1807 __FILE__, __LINE__, __func__);
1838 rc = -ENXIO; 1808 rc = -ENXIO;
1839 goto out; 1809 goto out;
1840 } 1810 }
@@ -1922,8 +1892,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
1922 unsigned int reslen = 0; 1892 unsigned int reslen = 0;
1923 1893
1924 if (ioc->shost_recovery || ioc->pci_error_recovery) { 1894 if (ioc->shost_recovery || ioc->pci_error_recovery) {
1925 pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", 1895 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
1926 __func__, ioc->name);
1927 rc = -EFAULT; 1896 rc = -EFAULT;
1928 goto job_done; 1897 goto job_done;
1929 } 1898 }
@@ -1933,8 +1902,8 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
1933 goto job_done; 1902 goto job_done;
1934 1903
1935 if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { 1904 if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
1936 pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name, 1905 ioc_err(ioc, "%s: transport_cmds in use\n",
1937 __func__); 1906 __func__);
1938 rc = -EAGAIN; 1907 rc = -EAGAIN;
1939 goto out; 1908 goto out;
1940 } 1909 }
@@ -1959,26 +1928,22 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
1959 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1928 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1960 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1929 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1961 if (wait_state_count++ == 10) { 1930 if (wait_state_count++ == 10) {
1962 pr_err(MPT3SAS_FMT 1931 ioc_err(ioc, "%s: failed due to ioc not operational\n",
1963 "%s: failed due to ioc not operational\n", 1932 __func__);
1964 ioc->name, __func__);
1965 rc = -EFAULT; 1933 rc = -EFAULT;
1966 goto unmap_in; 1934 goto unmap_in;
1967 } 1935 }
1968 ssleep(1); 1936 ssleep(1);
1969 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1937 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1970 pr_info(MPT3SAS_FMT 1938 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
1971 "%s: waiting for operational state(count=%d)\n", 1939 __func__, wait_state_count);
1972 ioc->name, __func__, wait_state_count);
1973 } 1940 }
1974 if (wait_state_count) 1941 if (wait_state_count)
1975 pr_info(MPT3SAS_FMT "%s: ioc is operational\n", 1942 ioc_info(ioc, "%s: ioc is operational\n", __func__);
1976 ioc->name, __func__);
1977 1943
1978 smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); 1944 smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
1979 if (!smid) { 1945 if (!smid) {
1980 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 1946 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
1981 ioc->name, __func__);
1982 rc = -EAGAIN; 1947 rc = -EAGAIN;
1983 goto unmap_in; 1948 goto unmap_in;
1984 } 1949 }
@@ -1999,16 +1964,15 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
1999 ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in, 1964 ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in,
2000 dma_len_in - 4); 1965 dma_len_in - 4);
2001 1966
2002 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 1967 dtransportprintk(ioc,
2003 "%s - sending smp request\n", ioc->name, __func__)); 1968 ioc_info(ioc, "%s: sending smp request\n", __func__));
2004 1969
2005 init_completion(&ioc->transport_cmds.done); 1970 init_completion(&ioc->transport_cmds.done);
2006 mpt3sas_base_put_smid_default(ioc, smid); 1971 mpt3sas_base_put_smid_default(ioc, smid);
2007 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ); 1972 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
2008 1973
2009 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { 1974 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
2010 pr_err(MPT3SAS_FMT "%s : timeout\n", 1975 ioc_err(ioc, "%s: timeout\n", __func__);
2011 __func__, ioc->name);
2012 _debug_dump_mf(mpi_request, 1976 _debug_dump_mf(mpi_request,
2013 sizeof(Mpi2SmpPassthroughRequest_t)/4); 1977 sizeof(Mpi2SmpPassthroughRequest_t)/4);
2014 if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) { 1978 if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) {
@@ -2018,12 +1982,11 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
2018 } 1982 }
2019 } 1983 }
2020 1984
2021 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 1985 dtransportprintk(ioc, ioc_info(ioc, "%s - complete\n", __func__));
2022 "%s - complete\n", ioc->name, __func__));
2023 1986
2024 if (!(ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID)) { 1987 if (!(ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID)) {
2025 dtransportprintk(ioc, pr_info(MPT3SAS_FMT 1988 dtransportprintk(ioc,
2026 "%s - no reply\n", ioc->name, __func__)); 1989 ioc_info(ioc, "%s: no reply\n", __func__));
2027 rc = -ENXIO; 1990 rc = -ENXIO;
2028 goto unmap_in; 1991 goto unmap_in;
2029 } 1992 }
@@ -2031,9 +1994,9 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
2031 mpi_reply = ioc->transport_cmds.reply; 1994 mpi_reply = ioc->transport_cmds.reply;
2032 1995
2033 dtransportprintk(ioc, 1996 dtransportprintk(ioc,
2034 pr_info(MPT3SAS_FMT "%s - reply data transfer size(%d)\n", 1997 ioc_info(ioc, "%s: reply data transfer size(%d)\n",
2035 ioc->name, __func__, 1998 __func__,
2036 le16_to_cpu(mpi_reply->ResponseDataLength))); 1999 le16_to_cpu(mpi_reply->ResponseDataLength)));
2037 2000
2038 memcpy(job->reply, mpi_reply, sizeof(*mpi_reply)); 2001 memcpy(job->reply, mpi_reply, sizeof(*mpi_reply));
2039 job->reply_len = sizeof(*mpi_reply); 2002 job->reply_len = sizeof(*mpi_reply);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index cae7c1eaef34..6ac453fd5937 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -72,8 +72,7 @@ _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
72 u16 sz, event_data_sz; 72 u16 sz, event_data_sz;
73 unsigned long flags; 73 unsigned long flags;
74 74
75 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", 75 dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
76 ioc->name, __func__));
77 76
78 sz = offsetof(Mpi2EventNotificationReply_t, EventData) + 77 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
79 sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4; 78 sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4;
@@ -85,23 +84,23 @@ _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
85 mpi_reply->EventDataLength = cpu_to_le16(event_data_sz); 84 mpi_reply->EventDataLength = cpu_to_le16(event_data_sz);
86 memcpy(&mpi_reply->EventData, event_data, 85 memcpy(&mpi_reply->EventData, event_data,
87 sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); 86 sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
88 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT 87 dTriggerDiagPrintk(ioc,
89 "%s: add to driver event log\n", 88 ioc_info(ioc, "%s: add to driver event log\n",
90 ioc->name, __func__)); 89 __func__));
91 mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); 90 mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
92 kfree(mpi_reply); 91 kfree(mpi_reply);
93 out: 92 out:
94 93
95 /* clearing the diag_trigger_active flag */ 94 /* clearing the diag_trigger_active flag */
96 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 95 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
97 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT 96 dTriggerDiagPrintk(ioc,
98 "%s: clearing diag_trigger_active flag\n", 97 ioc_info(ioc, "%s: clearing diag_trigger_active flag\n",
99 ioc->name, __func__)); 98 __func__));
100 ioc->diag_trigger_active = 0; 99 ioc->diag_trigger_active = 0;
101 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 100 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
102 101
103 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, 102 dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
104 __func__)); 103 __func__));
105} 104}
106 105
107/** 106/**
@@ -115,22 +114,22 @@ mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
115{ 114{
116 u8 issue_reset = 0; 115 u8 issue_reset = 0;
117 116
118 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", 117 dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
119 ioc->name, __func__));
120 118
121 /* release the diag buffer trace */ 119 /* release the diag buffer trace */
122 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 120 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
123 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { 121 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
124 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT 122 dTriggerDiagPrintk(ioc,
125 "%s: release trace diag buffer\n", ioc->name, __func__)); 123 ioc_info(ioc, "%s: release trace diag buffer\n",
124 __func__));
126 mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, 125 mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
127 &issue_reset); 126 &issue_reset);
128 } 127 }
129 128
130 _mpt3sas_raise_sigio(ioc, event_data); 129 _mpt3sas_raise_sigio(ioc, event_data);
131 130
132 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, 131 dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
133 __func__)); 132 __func__));
134} 133}
135 134
136/** 135/**
@@ -168,9 +167,9 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
168 167
169 by_pass_checks: 168 by_pass_checks:
170 169
171 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT 170 dTriggerDiagPrintk(ioc,
172 "%s: enter - trigger_bitmask = 0x%08x\n", 171 ioc_info(ioc, "%s: enter - trigger_bitmask = 0x%08x\n",
173 ioc->name, __func__, trigger_bitmask)); 172 __func__, trigger_bitmask));
174 173
175 /* don't send trigger if an trigger is currently active */ 174 /* don't send trigger if an trigger is currently active */
176 if (ioc->diag_trigger_active) { 175 if (ioc->diag_trigger_active) {
@@ -182,9 +181,9 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
182 if (ioc->diag_trigger_master.MasterData & trigger_bitmask) { 181 if (ioc->diag_trigger_master.MasterData & trigger_bitmask) {
183 found_match = 1; 182 found_match = 1;
184 ioc->diag_trigger_active = 1; 183 ioc->diag_trigger_active = 1;
185 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT 184 dTriggerDiagPrintk(ioc,
186 "%s: setting diag_trigger_active flag\n", 185 ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
187 ioc->name, __func__)); 186 __func__));
188 } 187 }
189 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 188 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
190 189
@@ -202,8 +201,8 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
202 mpt3sas_send_trigger_data_event(ioc, &event_data); 201 mpt3sas_send_trigger_data_event(ioc, &event_data);
203 202
204 out: 203 out:
205 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, 204 dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
206 __func__)); 205 __func__));
207} 206}
208 207
209/** 208/**
@@ -239,9 +238,9 @@ mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
239 return; 238 return;
240 } 239 }
241 240
242 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT 241 dTriggerDiagPrintk(ioc,
243 "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n", 242 ioc_info(ioc, "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n",
244 ioc->name, __func__, event, log_entry_qualifier)); 243 __func__, event, log_entry_qualifier));
245 244
246 /* don't send trigger if an trigger is currently active */ 245 /* don't send trigger if an trigger is currently active */
247 if (ioc->diag_trigger_active) { 246 if (ioc->diag_trigger_active) {
@@ -263,26 +262,26 @@ mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
263 } 262 }
264 found_match = 1; 263 found_match = 1;
265 ioc->diag_trigger_active = 1; 264 ioc->diag_trigger_active = 1;
266 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT 265 dTriggerDiagPrintk(ioc,
267 "%s: setting diag_trigger_active flag\n", 266 ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
268 ioc->name, __func__)); 267 __func__));
269 } 268 }
270 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 269 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
271 270
272 if (!found_match) 271 if (!found_match)
273 goto out; 272 goto out;
274 273
275 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT 274 dTriggerDiagPrintk(ioc,
276 "%s: setting diag_trigger_active flag\n", 275 ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
277 ioc->name, __func__)); 276 __func__));
278 memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); 277 memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
279 event_data.trigger_type = MPT3SAS_TRIGGER_EVENT; 278 event_data.trigger_type = MPT3SAS_TRIGGER_EVENT;
280 event_data.u.event.EventValue = event; 279 event_data.u.event.EventValue = event;
281 event_data.u.event.LogEntryQualifier = log_entry_qualifier; 280 event_data.u.event.LogEntryQualifier = log_entry_qualifier;
282 mpt3sas_send_trigger_data_event(ioc, &event_data); 281 mpt3sas_send_trigger_data_event(ioc, &event_data);
283 out: 282 out:
284 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, 283 dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
285 __func__)); 284 __func__));
286} 285}
287 286
288/** 287/**
@@ -319,9 +318,9 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
319 return; 318 return;
320 } 319 }
321 320
322 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT 321 dTriggerDiagPrintk(ioc,
323 "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n", 322 ioc_info(ioc, "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n",
324 ioc->name, __func__, sense_key, asc, ascq)); 323 __func__, sense_key, asc, ascq));
325 324
326 /* don't send trigger if an trigger is currently active */ 325 /* don't send trigger if an trigger is currently active */
327 if (ioc->diag_trigger_active) { 326 if (ioc->diag_trigger_active) {
@@ -347,9 +346,9 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
347 if (!found_match) 346 if (!found_match)
348 goto out; 347 goto out;
349 348
350 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT 349 dTriggerDiagPrintk(ioc,
351 "%s: setting diag_trigger_active flag\n", 350 ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
352 ioc->name, __func__)); 351 __func__));
353 memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); 352 memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
354 event_data.trigger_type = MPT3SAS_TRIGGER_SCSI; 353 event_data.trigger_type = MPT3SAS_TRIGGER_SCSI;
355 event_data.u.scsi.SenseKey = sense_key; 354 event_data.u.scsi.SenseKey = sense_key;
@@ -357,8 +356,8 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
357 event_data.u.scsi.ASCQ = ascq; 356 event_data.u.scsi.ASCQ = ascq;
358 mpt3sas_send_trigger_data_event(ioc, &event_data); 357 mpt3sas_send_trigger_data_event(ioc, &event_data);
359 out: 358 out:
360 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, 359 dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
361 __func__)); 360 __func__));
362} 361}
363 362
364/** 363/**
@@ -393,9 +392,9 @@ mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo)
393 return; 392 return;
394 } 393 }
395 394
396 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT 395 dTriggerDiagPrintk(ioc,
397 "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n", 396 ioc_info(ioc, "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n",
398 ioc->name, __func__, ioc_status, loginfo)); 397 __func__, ioc_status, loginfo));
399 398
400 /* don't send trigger if an trigger is currently active */ 399 /* don't send trigger if an trigger is currently active */
401 if (ioc->diag_trigger_active) { 400 if (ioc->diag_trigger_active) {
@@ -420,15 +419,15 @@ mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo)
420 if (!found_match) 419 if (!found_match)
421 goto out; 420 goto out;
422 421
423 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT 422 dTriggerDiagPrintk(ioc,
424 "%s: setting diag_trigger_active flag\n", 423 ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
425 ioc->name, __func__)); 424 __func__));
426 memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); 425 memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
427 event_data.trigger_type = MPT3SAS_TRIGGER_MPI; 426 event_data.trigger_type = MPT3SAS_TRIGGER_MPI;
428 event_data.u.mpi.IOCStatus = ioc_status; 427 event_data.u.mpi.IOCStatus = ioc_status;
429 event_data.u.mpi.IocLogInfo = loginfo; 428 event_data.u.mpi.IocLogInfo = loginfo;
430 mpt3sas_send_trigger_data_event(ioc, &event_data); 429 mpt3sas_send_trigger_data_event(ioc, &event_data);
431 out: 430 out:
432 dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, 431 dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
433 __func__)); 432 __func__));
434} 433}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index b4927f2b7677..cc07ba41f507 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -127,20 +127,17 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
127 return; 127 return;
128 128
129 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) { 129 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) {
130 pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " 130 ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as drives are exposed\n");
131 "globally as drives are exposed\n", ioc->name);
132 return; 131 return;
133 } 132 }
134 if (mpt3sas_get_num_volumes(ioc) > 1) { 133 if (mpt3sas_get_num_volumes(ioc) > 1) {
135 _warpdrive_disable_ddio(ioc); 134 _warpdrive_disable_ddio(ioc);
136 pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " 135 ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as number of drives > 1\n");
137 "globally as number of drives > 1\n", ioc->name);
138 return; 136 return;
139 } 137 }
140 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, 138 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
141 &num_pds)) || !num_pds) { 139 &num_pds)) || !num_pds) {
142 pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " 140 ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in computing number of drives\n");
143 "Failure in computing number of drives\n", ioc->name);
144 return; 141 return;
145 } 142 }
146 143
@@ -148,15 +145,13 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
148 sizeof(Mpi2RaidVol0PhysDisk_t)); 145 sizeof(Mpi2RaidVol0PhysDisk_t));
149 vol_pg0 = kzalloc(sz, GFP_KERNEL); 146 vol_pg0 = kzalloc(sz, GFP_KERNEL);
150 if (!vol_pg0) { 147 if (!vol_pg0) {
151 pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " 148 ioc_info(ioc, "WarpDrive : Direct IO is disabled Memory allocation failure for RVPG0\n");
152 "Memory allocation failure for RVPG0\n", ioc->name);
153 return; 149 return;
154 } 150 }
155 151
156 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, 152 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
157 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { 153 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
158 pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " 154 ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in retrieving RVPG0\n");
159 "Failure in retrieving RVPG0\n", ioc->name);
160 kfree(vol_pg0); 155 kfree(vol_pg0);
161 return; 156 return;
162 } 157 }
@@ -166,10 +161,8 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
166 * assumed for WARPDRIVE, disable direct I/O 161 * assumed for WARPDRIVE, disable direct I/O
167 */ 162 */
168 if (num_pds > MPT_MAX_WARPDRIVE_PDS) { 163 if (num_pds > MPT_MAX_WARPDRIVE_PDS) {
169 pr_warn(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " 164 ioc_warn(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): num_mem=%d, max_mem_allowed=%d\n",
170 "for the drive with handle(0x%04x): num_mem=%d, " 165 raid_device->handle, num_pds, MPT_MAX_WARPDRIVE_PDS);
171 "max_mem_allowed=%d\n", ioc->name, raid_device->handle,
172 num_pds, MPT_MAX_WARPDRIVE_PDS);
173 kfree(vol_pg0); 166 kfree(vol_pg0);
174 return; 167 return;
175 } 168 }
@@ -179,22 +172,18 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
179 vol_pg0->PhysDisk[count].PhysDiskNum) || 172 vol_pg0->PhysDisk[count].PhysDiskNum) ||
180 le16_to_cpu(pd_pg0.DevHandle) == 173 le16_to_cpu(pd_pg0.DevHandle) ==
181 MPT3SAS_INVALID_DEVICE_HANDLE) { 174 MPT3SAS_INVALID_DEVICE_HANDLE) {
182 pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is " 175 ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle retrieval failed for member number=%d\n",
183 "disabled for the drive with handle(0x%04x) member" 176 raid_device->handle,
184 "handle retrieval failed for member number=%d\n", 177 vol_pg0->PhysDisk[count].PhysDiskNum);
185 ioc->name, raid_device->handle,
186 vol_pg0->PhysDisk[count].PhysDiskNum);
187 goto out_error; 178 goto out_error;
188 } 179 }
189 /* Disable direct I/O if member drive lba exceeds 4 bytes */ 180 /* Disable direct I/O if member drive lba exceeds 4 bytes */
190 dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA); 181 dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA);
191 if (dev_max_lba >> 32) { 182 if (dev_max_lba >> 32) {
192 pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is " 183 ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle (0x%04x) unsupported max lba 0x%016llx\n",
193 "disabled for the drive with handle(0x%04x) member" 184 raid_device->handle,
194 " handle (0x%04x) unsupported max lba 0x%016llx\n", 185 le16_to_cpu(pd_pg0.DevHandle),
195 ioc->name, raid_device->handle, 186 (u64)dev_max_lba);
196 le16_to_cpu(pd_pg0.DevHandle),
197 (unsigned long long)dev_max_lba);
198 goto out_error; 187 goto out_error;
199 } 188 }
200 189
@@ -206,41 +195,36 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
206 * not RAID0 195 * not RAID0
207 */ 196 */
208 if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) { 197 if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) {
209 pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " 198 ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): type=%d, s_sz=%uK, blk_size=%u\n",
210 "for the drive with handle(0x%04x): type=%d, " 199 raid_device->handle, raid_device->volume_type,
211 "s_sz=%uK, blk_size=%u\n", ioc->name, 200 (le32_to_cpu(vol_pg0->StripeSize) *
212 raid_device->handle, raid_device->volume_type, 201 le16_to_cpu(vol_pg0->BlockSize)) / 1024,
213 (le32_to_cpu(vol_pg0->StripeSize) * 202 le16_to_cpu(vol_pg0->BlockSize));
214 le16_to_cpu(vol_pg0->BlockSize)) / 1024,
215 le16_to_cpu(vol_pg0->BlockSize));
216 goto out_error; 203 goto out_error;
217 } 204 }
218 205
219 stripe_sz = le32_to_cpu(vol_pg0->StripeSize); 206 stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
220 stripe_exp = find_first_bit(&stripe_sz, 32); 207 stripe_exp = find_first_bit(&stripe_sz, 32);
221 if (stripe_exp == 32) { 208 if (stripe_exp == 32) {
222 pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " 209 ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid stripe sz %uK\n",
223 "for the drive with handle(0x%04x) invalid stripe sz %uK\n", 210 raid_device->handle,
224 ioc->name, raid_device->handle, 211 (le32_to_cpu(vol_pg0->StripeSize) *
225 (le32_to_cpu(vol_pg0->StripeSize) * 212 le16_to_cpu(vol_pg0->BlockSize)) / 1024);
226 le16_to_cpu(vol_pg0->BlockSize)) / 1024);
227 goto out_error; 213 goto out_error;
228 } 214 }
229 raid_device->stripe_exponent = stripe_exp; 215 raid_device->stripe_exponent = stripe_exp;
230 block_sz = le16_to_cpu(vol_pg0->BlockSize); 216 block_sz = le16_to_cpu(vol_pg0->BlockSize);
231 block_exp = find_first_bit(&block_sz, 16); 217 block_exp = find_first_bit(&block_sz, 16);
232 if (block_exp == 16) { 218 if (block_exp == 16) {
233 pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " 219 ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid block sz %u\n",
234 "for the drive with handle(0x%04x) invalid block sz %u\n", 220 raid_device->handle, le16_to_cpu(vol_pg0->BlockSize));
235 ioc->name, raid_device->handle,
236 le16_to_cpu(vol_pg0->BlockSize));
237 goto out_error; 221 goto out_error;
238 } 222 }
239 raid_device->block_exponent = block_exp; 223 raid_device->block_exponent = block_exp;
240 raid_device->direct_io_enabled = 1; 224 raid_device->direct_io_enabled = 1;
241 225
242 pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is Enabled for the drive" 226 ioc_info(ioc, "WarpDrive : Direct IO is Enabled for the drive with handle(0x%04x)\n",
243 " with handle(0x%04x)\n", ioc->name, raid_device->handle); 227 raid_device->handle);
244 /* 228 /*
245 * WARPDRIVE: Though the following fields are not used for direct IO, 229 * WARPDRIVE: Though the following fields are not used for direct IO,
246 * stored for future purpose: 230 * stored for future purpose:
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 8c91637cd598..3ac34373746c 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -403,29 +403,14 @@ static int pci_go_64(struct pci_dev *pdev)
403{ 403{
404 int rc; 404 int rc;
405 405
406 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 406 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
407 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 407 if (rc) {
408 if (rc) { 408 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
409 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
410 if (rc) {
411 dev_printk(KERN_ERR, &pdev->dev,
412 "64-bit DMA enable failed\n");
413 return rc;
414 }
415 }
416 } else {
417 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
418 if (rc) { 409 if (rc) {
419 dev_printk(KERN_ERR, &pdev->dev, 410 dev_printk(KERN_ERR, &pdev->dev,
420 "32-bit DMA enable failed\n"); 411 "32-bit DMA enable failed\n");
421 return rc; 412 return rc;
422 } 413 }
423 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
424 if (rc) {
425 dev_printk(KERN_ERR, &pdev->dev,
426 "32-bit consistent DMA enable failed\n");
427 return rc;
428 }
429 } 414 }
430 415
431 return rc; 416 return rc;
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index cff43bd9f675..3df1428df317 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -336,13 +336,13 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
336 * DMA-map SMP request, response buffers 336 * DMA-map SMP request, response buffers
337 */ 337 */
338 sg_req = &task->smp_task.smp_req; 338 sg_req = &task->smp_task.smp_req;
339 elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE); 339 elem = dma_map_sg(mvi->dev, sg_req, 1, DMA_TO_DEVICE);
340 if (!elem) 340 if (!elem)
341 return -ENOMEM; 341 return -ENOMEM;
342 req_len = sg_dma_len(sg_req); 342 req_len = sg_dma_len(sg_req);
343 343
344 sg_resp = &task->smp_task.smp_resp; 344 sg_resp = &task->smp_task.smp_resp;
345 elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); 345 elem = dma_map_sg(mvi->dev, sg_resp, 1, DMA_FROM_DEVICE);
346 if (!elem) { 346 if (!elem) {
347 rc = -ENOMEM; 347 rc = -ENOMEM;
348 goto err_out; 348 goto err_out;
@@ -416,10 +416,10 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
416 416
417err_out_2: 417err_out_2:
418 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1, 418 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
419 PCI_DMA_FROMDEVICE); 419 DMA_FROM_DEVICE);
420err_out: 420err_out:
421 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1, 421 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
422 PCI_DMA_TODEVICE); 422 DMA_TO_DEVICE);
423 return rc; 423 return rc;
424} 424}
425 425
@@ -904,9 +904,9 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
904 switch (task->task_proto) { 904 switch (task->task_proto) {
905 case SAS_PROTOCOL_SMP: 905 case SAS_PROTOCOL_SMP:
906 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1, 906 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
907 PCI_DMA_FROMDEVICE); 907 DMA_FROM_DEVICE);
908 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1, 908 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
909 PCI_DMA_TODEVICE); 909 DMA_TO_DEVICE);
910 break; 910 break;
911 911
912 case SAS_PROTOCOL_SATA: 912 case SAS_PROTOCOL_SATA:
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index b3cd9a6b1d30..2458974d1af6 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -143,8 +143,8 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
143 143
144 case RESOURCE_UNCACHED_MEMORY: 144 case RESOURCE_UNCACHED_MEMORY:
145 size = round_up(size, 8); 145 size = round_up(size, 8);
146 res->virt_addr = pci_zalloc_consistent(mhba->pdev, size, 146 res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size,
147 &res->bus_addr); 147 &res->bus_addr, GFP_KERNEL);
148 if (!res->virt_addr) { 148 if (!res->virt_addr) {
149 dev_err(&mhba->pdev->dev, 149 dev_err(&mhba->pdev->dev,
150 "unable to allocate consistent mem," 150 "unable to allocate consistent mem,"
@@ -175,7 +175,7 @@ static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
175 list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) { 175 list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
176 switch (res->type) { 176 switch (res->type) {
177 case RESOURCE_UNCACHED_MEMORY: 177 case RESOURCE_UNCACHED_MEMORY:
178 pci_free_consistent(mhba->pdev, res->size, 178 dma_free_coherent(&mhba->pdev->dev, res->size,
179 res->virt_addr, res->bus_addr); 179 res->virt_addr, res->bus_addr);
180 break; 180 break;
181 case RESOURCE_CACHED_MEMORY: 181 case RESOURCE_CACHED_MEMORY:
@@ -211,14 +211,14 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
211 dma_addr_t busaddr; 211 dma_addr_t busaddr;
212 212
213 sg = scsi_sglist(scmd); 213 sg = scsi_sglist(scmd);
214 *sg_count = pci_map_sg(mhba->pdev, sg, sgnum, 214 *sg_count = dma_map_sg(&mhba->pdev->dev, sg, sgnum,
215 (int) scmd->sc_data_direction); 215 scmd->sc_data_direction);
216 if (*sg_count > mhba->max_sge) { 216 if (*sg_count > mhba->max_sge) {
217 dev_err(&mhba->pdev->dev, 217 dev_err(&mhba->pdev->dev,
218 "sg count[0x%x] is bigger than max sg[0x%x].\n", 218 "sg count[0x%x] is bigger than max sg[0x%x].\n",
219 *sg_count, mhba->max_sge); 219 *sg_count, mhba->max_sge);
220 pci_unmap_sg(mhba->pdev, sg, sgnum, 220 dma_unmap_sg(&mhba->pdev->dev, sg, sgnum,
221 (int) scmd->sc_data_direction); 221 scmd->sc_data_direction);
222 return -1; 222 return -1;
223 } 223 }
224 for (i = 0; i < *sg_count; i++) { 224 for (i = 0; i < *sg_count; i++) {
@@ -246,7 +246,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
246 if (size == 0) 246 if (size == 0)
247 return 0; 247 return 0;
248 248
249 virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr); 249 virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr,
250 GFP_KERNEL);
250 if (!virt_addr) 251 if (!virt_addr)
251 return -1; 252 return -1;
252 253
@@ -274,8 +275,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
274 } 275 }
275 INIT_LIST_HEAD(&cmd->queue_pointer); 276 INIT_LIST_HEAD(&cmd->queue_pointer);
276 277
277 cmd->frame = pci_alloc_consistent(mhba->pdev, 278 cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size,
278 mhba->ib_max_size, &cmd->frame_phys); 279 &cmd->frame_phys, GFP_KERNEL);
279 if (!cmd->frame) { 280 if (!cmd->frame) {
280 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" 281 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
281 " frame,size = %d.\n", mhba->ib_max_size); 282 " frame,size = %d.\n", mhba->ib_max_size);
@@ -287,7 +288,7 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
287 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { 288 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
288 dev_err(&mhba->pdev->dev, "failed to allocate memory" 289 dev_err(&mhba->pdev->dev, "failed to allocate memory"
289 " for internal frame\n"); 290 " for internal frame\n");
290 pci_free_consistent(mhba->pdev, mhba->ib_max_size, 291 dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
291 cmd->frame, cmd->frame_phys); 292 cmd->frame, cmd->frame_phys);
292 kfree(cmd); 293 kfree(cmd);
293 return NULL; 294 return NULL;
@@ -313,10 +314,10 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
313 phy_addr = (dma_addr_t) m_sg->baseaddr_l | 314 phy_addr = (dma_addr_t) m_sg->baseaddr_l |
314 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16); 315 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
315 316
316 pci_free_consistent(mhba->pdev, size, cmd->data_buf, 317 dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf,
317 phy_addr); 318 phy_addr);
318 } 319 }
319 pci_free_consistent(mhba->pdev, mhba->ib_max_size, 320 dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
320 cmd->frame, cmd->frame_phys); 321 cmd->frame, cmd->frame_phys);
321 kfree(cmd); 322 kfree(cmd);
322 } 323 }
@@ -663,16 +664,17 @@ static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
663 } 664 }
664} 665}
665 666
666static unsigned int mvumi_pci_set_master(struct pci_dev *pdev) 667static int mvumi_pci_set_master(struct pci_dev *pdev)
667{ 668{
668 unsigned int ret = 0; 669 int ret = 0;
670
669 pci_set_master(pdev); 671 pci_set_master(pdev);
670 672
671 if (IS_DMA64) { 673 if (IS_DMA64) {
672 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 674 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
673 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 675 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
674 } else 676 } else
675 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 677 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
676 678
677 return ret; 679 return ret;
678} 680}
@@ -771,7 +773,7 @@ static void mvumi_release_fw(struct mvumi_hba *mhba)
771 mvumi_free_cmds(mhba); 773 mvumi_free_cmds(mhba);
772 mvumi_release_mem_resource(mhba); 774 mvumi_release_mem_resource(mhba);
773 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); 775 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
774 pci_free_consistent(mhba->pdev, HSP_MAX_SIZE, 776 dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
775 mhba->handshake_page, mhba->handshake_page_phys); 777 mhba->handshake_page, mhba->handshake_page_phys);
776 kfree(mhba->regs); 778 kfree(mhba->regs);
777 pci_release_regions(mhba->pdev); 779 pci_release_regions(mhba->pdev);
@@ -1339,9 +1341,9 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1339 } 1341 }
1340 1342
1341 if (scsi_bufflen(scmd)) 1343 if (scsi_bufflen(scmd))
1342 pci_unmap_sg(mhba->pdev, scsi_sglist(scmd), 1344 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
1343 scsi_sg_count(scmd), 1345 scsi_sg_count(scmd),
1344 (int) scmd->sc_data_direction); 1346 scmd->sc_data_direction);
1345 cmd->scmd->scsi_done(scmd); 1347 cmd->scmd->scsi_done(scmd);
1346 mvumi_return_cmd(mhba, cmd); 1348 mvumi_return_cmd(mhba, cmd);
1347} 1349}
@@ -2148,9 +2150,9 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
2148 scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16); 2150 scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
2149 scmd->SCp.ptr = NULL; 2151 scmd->SCp.ptr = NULL;
2150 if (scsi_bufflen(scmd)) { 2152 if (scsi_bufflen(scmd)) {
2151 pci_unmap_sg(mhba->pdev, scsi_sglist(scmd), 2153 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
2152 scsi_sg_count(scmd), 2154 scsi_sg_count(scmd),
2153 (int)scmd->sc_data_direction); 2155 scmd->sc_data_direction);
2154 } 2156 }
2155 mvumi_return_cmd(mhba, cmd); 2157 mvumi_return_cmd(mhba, cmd);
2156 spin_unlock_irqrestore(mhba->shost->host_lock, flags); 2158 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
@@ -2362,8 +2364,8 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
2362 ret = -ENOMEM; 2364 ret = -ENOMEM;
2363 goto fail_alloc_mem; 2365 goto fail_alloc_mem;
2364 } 2366 }
2365 mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE, 2367 mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev,
2366 &mhba->handshake_page_phys); 2368 HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL);
2367 if (!mhba->handshake_page) { 2369 if (!mhba->handshake_page) {
2368 dev_err(&mhba->pdev->dev, 2370 dev_err(&mhba->pdev->dev,
2369 "failed to allocate memory for handshake\n"); 2371 "failed to allocate memory for handshake\n");
@@ -2383,7 +2385,7 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
2383 2385
2384fail_ready_state: 2386fail_ready_state:
2385 mvumi_release_mem_resource(mhba); 2387 mvumi_release_mem_resource(mhba);
2386 pci_free_consistent(mhba->pdev, HSP_MAX_SIZE, 2388 dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
2387 mhba->handshake_page, mhba->handshake_page_phys); 2389 mhba->handshake_page, mhba->handshake_page_phys);
2388fail_alloc_page: 2390fail_alloc_page:
2389 kfree(mhba->regs); 2391 kfree(mhba->regs);
@@ -2480,20 +2482,9 @@ static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2480 if (ret) 2482 if (ret)
2481 return ret; 2483 return ret;
2482 2484
2483 pci_set_master(pdev); 2485 ret = mvumi_pci_set_master(pdev);
2484 2486 if (ret)
2485 if (IS_DMA64) { 2487 goto fail_set_dma_mask;
2486 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2487 if (ret) {
2488 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2489 if (ret)
2490 goto fail_set_dma_mask;
2491 }
2492 } else {
2493 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2494 if (ret)
2495 goto fail_set_dma_mask;
2496 }
2497 2488
2498 host = scsi_host_alloc(&mvumi_template, sizeof(*mhba)); 2489 host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
2499 if (!host) { 2490 if (!host) {
@@ -2627,19 +2618,11 @@ static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
2627 dev_err(&pdev->dev, "enable device failed\n"); 2618 dev_err(&pdev->dev, "enable device failed\n");
2628 return ret; 2619 return ret;
2629 } 2620 }
2630 pci_set_master(pdev); 2621
2631 if (IS_DMA64) { 2622 ret = mvumi_pci_set_master(pdev);
2632 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2623 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2633 if (ret) { 2624 if (ret)
2634 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2625 goto fail;
2635 if (ret)
2636 goto fail;
2637 }
2638 } else {
2639 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2640 if (ret)
2641 goto fail;
2642 }
2643 ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME); 2626 ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
2644 if (ret) 2627 if (ret)
2645 goto fail; 2628 goto fail;
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
new file mode 100644
index 000000000000..aeb282f617c5
--- /dev/null
+++ b/drivers/scsi/myrb.c
@@ -0,0 +1,3656 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4 *
5 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
6 *
7 * Based on the original DAC960 driver,
8 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/pci.h>
18#include <linux/raid_class.h>
19#include <asm/unaligned.h>
20#include <scsi/scsi.h>
21#include <scsi/scsi_host.h>
22#include <scsi/scsi_device.h>
23#include <scsi/scsi_cmnd.h>
24#include <scsi/scsi_tcq.h>
25#include "myrb.h"
26
27static struct raid_template *myrb_raid_template;
28
29static void myrb_monitor(struct work_struct *work);
30static inline void myrb_translate_devstate(void *DeviceState);
31
32static inline int myrb_logical_channel(struct Scsi_Host *shost)
33{
34 return shost->max_channel - 1;
35}
36
37static struct myrb_devstate_name_entry {
38 enum myrb_devstate state;
39 const char *name;
40} myrb_devstate_name_list[] = {
41 { MYRB_DEVICE_DEAD, "Dead" },
42 { MYRB_DEVICE_WO, "WriteOnly" },
43 { MYRB_DEVICE_ONLINE, "Online" },
44 { MYRB_DEVICE_CRITICAL, "Critical" },
45 { MYRB_DEVICE_STANDBY, "Standby" },
46 { MYRB_DEVICE_OFFLINE, "Offline" },
47};
48
49static const char *myrb_devstate_name(enum myrb_devstate state)
50{
51 struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
52 int i;
53
54 for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55 if (entry[i].state == state)
56 return entry[i].name;
57 }
58 return "Unknown";
59}
60
61static struct myrb_raidlevel_name_entry {
62 enum myrb_raidlevel level;
63 const char *name;
64} myrb_raidlevel_name_list[] = {
65 { MYRB_RAID_LEVEL0, "RAID0" },
66 { MYRB_RAID_LEVEL1, "RAID1" },
67 { MYRB_RAID_LEVEL3, "RAID3" },
68 { MYRB_RAID_LEVEL5, "RAID5" },
69 { MYRB_RAID_LEVEL6, "RAID6" },
70 { MYRB_RAID_JBOD, "JBOD" },
71};
72
73static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
74{
75 struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
76 int i;
77
78 for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79 if (entry[i].level == level)
80 return entry[i].name;
81 }
82 return NULL;
83}
84
85/**
86 * myrb_create_mempools - allocates auxiliary data structures
87 *
88 * Return: true on success, false otherwise.
89 */
90static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
91{
92 size_t elem_size, elem_align;
93
94 elem_align = sizeof(struct myrb_sge);
95 elem_size = cb->host->sg_tablesize * elem_align;
96 cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97 elem_size, elem_align, 0);
98 if (cb->sg_pool == NULL) {
99 shost_printk(KERN_ERR, cb->host,
100 "Failed to allocate SG pool\n");
101 return false;
102 }
103
104 cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105 sizeof(struct myrb_dcdb),
106 sizeof(unsigned int), 0);
107 if (!cb->dcdb_pool) {
108 dma_pool_destroy(cb->sg_pool);
109 cb->sg_pool = NULL;
110 shost_printk(KERN_ERR, cb->host,
111 "Failed to allocate DCDB pool\n");
112 return false;
113 }
114
115 snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 "myrb_wq_%d", cb->host->host_no);
117 cb->work_q = create_singlethread_workqueue(cb->work_q_name);
118 if (!cb->work_q) {
119 dma_pool_destroy(cb->dcdb_pool);
120 cb->dcdb_pool = NULL;
121 dma_pool_destroy(cb->sg_pool);
122 cb->sg_pool = NULL;
123 shost_printk(KERN_ERR, cb->host,
124 "Failed to create workqueue\n");
125 return false;
126 }
127
128 /*
129 * Initialize the Monitoring Timer.
130 */
131 INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
133
134 return true;
135}
136
137/**
138 * myrb_destroy_mempools - tears down the memory pools for the controller
139 */
140static void myrb_destroy_mempools(struct myrb_hba *cb)
141{
142 cancel_delayed_work_sync(&cb->monitor_work);
143 destroy_workqueue(cb->work_q);
144
145 dma_pool_destroy(cb->sg_pool);
146 dma_pool_destroy(cb->dcdb_pool);
147}
148
149/**
150 * myrb_reset_cmd - reset command block
151 */
152static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
153{
154 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
155
156 memset(mbox, 0, sizeof(union myrb_cmd_mbox));
157 cmd_blk->status = 0;
158}
159
160/**
161 * myrb_qcmd - queues command block for execution
162 */
163static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
164{
165 void __iomem *base = cb->io_base;
166 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167 union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
168
169 cb->write_cmd_mbox(next_mbox, mbox);
170 if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 cb->prev_cmd_mbox2->words[0] == 0)
172 cb->get_cmd_mbox(base);
173 cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 cb->prev_cmd_mbox1 = next_mbox;
175 if (++next_mbox > cb->last_cmd_mbox)
176 next_mbox = cb->first_cmd_mbox;
177 cb->next_cmd_mbox = next_mbox;
178}
179
180/**
181 * myrb_exec_cmd - executes command block and waits for completion.
182 *
183 * Return: command status
184 */
185static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186 struct myrb_cmdblk *cmd_blk)
187{
188 DECLARE_COMPLETION_ONSTACK(cmpl);
189 unsigned long flags;
190
191 cmd_blk->completion = &cmpl;
192
193 spin_lock_irqsave(&cb->queue_lock, flags);
194 cb->qcmd(cb, cmd_blk);
195 spin_unlock_irqrestore(&cb->queue_lock, flags);
196
197 WARN_ON(in_interrupt());
198 wait_for_completion(&cmpl);
199 return cmd_blk->status;
200}
201
202/**
203 * myrb_exec_type3 - executes a type 3 command and waits for completion.
204 *
205 * Return: command status
206 */
207static unsigned short myrb_exec_type3(struct myrb_hba *cb,
208 enum myrb_cmd_opcode op, dma_addr_t addr)
209{
210 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
211 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
212 unsigned short status;
213
214 mutex_lock(&cb->dcmd_mutex);
215 myrb_reset_cmd(cmd_blk);
216 mbox->type3.id = MYRB_DCMD_TAG;
217 mbox->type3.opcode = op;
218 mbox->type3.addr = addr;
219 status = myrb_exec_cmd(cb, cmd_blk);
220 mutex_unlock(&cb->dcmd_mutex);
221 return status;
222}
223
224/**
225 * myrb_exec_type3D - executes a type 3D command and waits for completion.
226 *
227 * Return: command status
228 */
229static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
230 enum myrb_cmd_opcode op, struct scsi_device *sdev,
231 struct myrb_pdev_state *pdev_info)
232{
233 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
234 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
235 unsigned short status;
236 dma_addr_t pdev_info_addr;
237
238 pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
239 sizeof(struct myrb_pdev_state),
240 DMA_FROM_DEVICE);
241 if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
242 return MYRB_STATUS_SUBSYS_FAILED;
243
244 mutex_lock(&cb->dcmd_mutex);
245 myrb_reset_cmd(cmd_blk);
246 mbox->type3D.id = MYRB_DCMD_TAG;
247 mbox->type3D.opcode = op;
248 mbox->type3D.channel = sdev->channel;
249 mbox->type3D.target = sdev->id;
250 mbox->type3D.addr = pdev_info_addr;
251 status = myrb_exec_cmd(cb, cmd_blk);
252 mutex_unlock(&cb->dcmd_mutex);
253 dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
254 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
255 if (status == MYRB_STATUS_SUCCESS &&
256 mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
257 myrb_translate_devstate(pdev_info);
258
259 return status;
260}
261
262static char *myrb_event_msg[] = {
263 "killed because write recovery failed",
264 "killed because of SCSI bus reset failure",
265 "killed because of double check condition",
266 "killed because it was removed",
267 "killed because of gross error on SCSI chip",
268 "killed because of bad tag returned from drive",
269 "killed because of timeout on SCSI command",
270 "killed because of reset SCSI command issued from system",
271 "killed because busy or parity error count exceeded limit",
272 "killed because of 'kill drive' command from system",
273 "killed because of selection timeout",
274 "killed due to SCSI phase sequence error",
275 "killed due to unknown status",
276};
277
278/**
279 * myrb_get_event - get event log from HBA
280 * @cb: pointer to the hba structure
281 * @event: number of the event
282 *
283 * Execute a type 3E command and logs the event message
284 */
285static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
286{
287 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
288 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
289 struct myrb_log_entry *ev_buf;
290 dma_addr_t ev_addr;
291 unsigned short status;
292
293 ev_buf = dma_alloc_coherent(&cb->pdev->dev,
294 sizeof(struct myrb_log_entry),
295 &ev_addr, GFP_KERNEL);
296 if (!ev_buf)
297 return;
298
299 myrb_reset_cmd(cmd_blk);
300 mbox->type3E.id = MYRB_MCMD_TAG;
301 mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
302 mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
303 mbox->type3E.opqual = 1;
304 mbox->type3E.ev_seq = event;
305 mbox->type3E.addr = ev_addr;
306 status = myrb_exec_cmd(cb, cmd_blk);
307 if (status != MYRB_STATUS_SUCCESS)
308 shost_printk(KERN_INFO, cb->host,
309 "Failed to get event log %d, status %04x\n",
310 event, status);
311
312 else if (ev_buf->seq_num == event) {
313 struct scsi_sense_hdr sshdr;
314
315 memset(&sshdr, 0, sizeof(sshdr));
316 scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
317
318 if (sshdr.sense_key == VENDOR_SPECIFIC &&
319 sshdr.asc == 0x80 &&
320 sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
321 shost_printk(KERN_CRIT, cb->host,
322 "Physical drive %d:%d: %s\n",
323 ev_buf->channel, ev_buf->target,
324 myrb_event_msg[sshdr.ascq]);
325 else
326 shost_printk(KERN_CRIT, cb->host,
327 "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
328 ev_buf->channel, ev_buf->target,
329 sshdr.sense_key, sshdr.asc, sshdr.ascq);
330 }
331
332 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
333 ev_buf, ev_addr);
334}
335
336/**
337 * myrb_get_errtable - retrieves the error table from the controller
338 *
339 * Executes a type 3 command and logs the error table from the controller.
340 */
341static void myrb_get_errtable(struct myrb_hba *cb)
342{
343 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
344 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
345 unsigned short status;
346 struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
347
348 memcpy(&old_table, cb->err_table, sizeof(old_table));
349
350 myrb_reset_cmd(cmd_blk);
351 mbox->type3.id = MYRB_MCMD_TAG;
352 mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
353 mbox->type3.addr = cb->err_table_addr;
354 status = myrb_exec_cmd(cb, cmd_blk);
355 if (status == MYRB_STATUS_SUCCESS) {
356 struct myrb_error_entry *table = cb->err_table;
357 struct myrb_error_entry *new, *old;
358 size_t err_table_offset;
359 struct scsi_device *sdev;
360
361 shost_for_each_device(sdev, cb->host) {
362 if (sdev->channel >= myrb_logical_channel(cb->host))
363 continue;
364 err_table_offset = sdev->channel * MYRB_MAX_TARGETS
365 + sdev->id;
366 new = table + err_table_offset;
367 old = &old_table[err_table_offset];
368 if (new->parity_err == old->parity_err &&
369 new->soft_err == old->soft_err &&
370 new->hard_err == old->hard_err &&
371 new->misc_err == old->misc_err)
372 continue;
373 sdev_printk(KERN_CRIT, sdev,
374 "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
375 new->parity_err, new->soft_err,
376 new->hard_err, new->misc_err);
377 }
378 }
379}
380
381/**
382 * myrb_get_ldev_info - retrieves the logical device table from the controller
383 *
384 * Executes a type 3 command and updates the logical device table.
385 *
386 * Return: command status
387 */
388static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
389{
390 unsigned short status;
391 int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
392 struct Scsi_Host *shost = cb->host;
393
394 status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
395 cb->ldev_info_addr);
396 if (status != MYRB_STATUS_SUCCESS)
397 return status;
398
399 for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
400 struct myrb_ldev_info *old = NULL;
401 struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
402 struct scsi_device *sdev;
403
404 sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
405 ldev_num, 0);
406 if (!sdev) {
407 if (new->state == MYRB_DEVICE_OFFLINE)
408 continue;
409 shost_printk(KERN_INFO, shost,
410 "Adding Logical Drive %d in state %s\n",
411 ldev_num, myrb_devstate_name(new->state));
412 scsi_add_device(shost, myrb_logical_channel(shost),
413 ldev_num, 0);
414 continue;
415 }
416 old = sdev->hostdata;
417 if (new->state != old->state)
418 shost_printk(KERN_INFO, shost,
419 "Logical Drive %d is now %s\n",
420 ldev_num, myrb_devstate_name(new->state));
421 if (new->wb_enabled != old->wb_enabled)
422 sdev_printk(KERN_INFO, sdev,
423 "Logical Drive is now WRITE %s\n",
424 (new->wb_enabled ? "BACK" : "THRU"));
425 memcpy(old, new, sizeof(*new));
426 scsi_device_put(sdev);
427 }
428 return status;
429}
430
431/**
432 * myrb_get_rbld_progress - get rebuild progress information
433 *
434 * Executes a type 3 command and returns the rebuild progress
435 * information.
436 *
437 * Return: command status
438 */
439static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
440 struct myrb_rbld_progress *rbld)
441{
442 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
443 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
444 struct myrb_rbld_progress *rbld_buf;
445 dma_addr_t rbld_addr;
446 unsigned short status;
447
448 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
449 sizeof(struct myrb_rbld_progress),
450 &rbld_addr, GFP_KERNEL);
451 if (!rbld_buf)
452 return MYRB_STATUS_RBLD_NOT_CHECKED;
453
454 myrb_reset_cmd(cmd_blk);
455 mbox->type3.id = MYRB_MCMD_TAG;
456 mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
457 mbox->type3.addr = rbld_addr;
458 status = myrb_exec_cmd(cb, cmd_blk);
459 if (rbld)
460 memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
461 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
462 rbld_buf, rbld_addr);
463 return status;
464}
465
466/**
467 * myrb_update_rbld_progress - updates the rebuild status
468 *
469 * Updates the rebuild status for the attached logical devices.
470 *
471 */
472static void myrb_update_rbld_progress(struct myrb_hba *cb)
473{
474 struct myrb_rbld_progress rbld_buf;
475 unsigned short status;
476
477 status = myrb_get_rbld_progress(cb, &rbld_buf);
478 if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
479 cb->last_rbld_status == MYRB_STATUS_SUCCESS)
480 status = MYRB_STATUS_RBLD_SUCCESS;
481 if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
482 unsigned int blocks_done =
483 rbld_buf.ldev_size - rbld_buf.blocks_left;
484 struct scsi_device *sdev;
485
486 sdev = scsi_device_lookup(cb->host,
487 myrb_logical_channel(cb->host),
488 rbld_buf.ldev_num, 0);
489 if (!sdev)
490 return;
491
492 switch (status) {
493 case MYRB_STATUS_SUCCESS:
494 sdev_printk(KERN_INFO, sdev,
495 "Rebuild in Progress, %d%% completed\n",
496 (100 * (blocks_done >> 7))
497 / (rbld_buf.ldev_size >> 7));
498 break;
499 case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
500 sdev_printk(KERN_INFO, sdev,
501 "Rebuild Failed due to Logical Drive Failure\n");
502 break;
503 case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
504 sdev_printk(KERN_INFO, sdev,
505 "Rebuild Failed due to Bad Blocks on Other Drives\n");
506 break;
507 case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
508 sdev_printk(KERN_INFO, sdev,
509 "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
510 break;
511 case MYRB_STATUS_RBLD_SUCCESS:
512 sdev_printk(KERN_INFO, sdev,
513 "Rebuild Completed Successfully\n");
514 break;
515 case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
516 sdev_printk(KERN_INFO, sdev,
517 "Rebuild Successfully Terminated\n");
518 break;
519 default:
520 break;
521 }
522 scsi_device_put(sdev);
523 }
524 cb->last_rbld_status = status;
525}
526
527/**
528 * myrb_get_cc_progress - retrieve the rebuild status
529 *
530 * Execute a type 3 Command and fetch the rebuild / consistency check
531 * status.
532 */
533static void myrb_get_cc_progress(struct myrb_hba *cb)
534{
535 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
536 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
537 struct myrb_rbld_progress *rbld_buf;
538 dma_addr_t rbld_addr;
539 unsigned short status;
540
541 rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
542 sizeof(struct myrb_rbld_progress),
543 &rbld_addr, GFP_KERNEL);
544 if (!rbld_buf) {
545 cb->need_cc_status = true;
546 return;
547 }
548 myrb_reset_cmd(cmd_blk);
549 mbox->type3.id = MYRB_MCMD_TAG;
550 mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
551 mbox->type3.addr = rbld_addr;
552 status = myrb_exec_cmd(cb, cmd_blk);
553 if (status == MYRB_STATUS_SUCCESS) {
554 unsigned int ldev_num = rbld_buf->ldev_num;
555 unsigned int ldev_size = rbld_buf->ldev_size;
556 unsigned int blocks_done =
557 ldev_size - rbld_buf->blocks_left;
558 struct scsi_device *sdev;
559
560 sdev = scsi_device_lookup(cb->host,
561 myrb_logical_channel(cb->host),
562 ldev_num, 0);
563 if (sdev) {
564 sdev_printk(KERN_INFO, sdev,
565 "Consistency Check in Progress: %d%% completed\n",
566 (100 * (blocks_done >> 7))
567 / (ldev_size >> 7));
568 scsi_device_put(sdev);
569 }
570 }
571 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
572 rbld_buf, rbld_addr);
573}
574
575/**
576 * myrb_bgi_control - updates background initialisation status
577 *
578 * Executes a type 3B command and updates the background initialisation status
579 */
580static void myrb_bgi_control(struct myrb_hba *cb)
581{
582 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
583 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
584 struct myrb_bgi_status *bgi, *last_bgi;
585 dma_addr_t bgi_addr;
586 struct scsi_device *sdev = NULL;
587 unsigned short status;
588
589 bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
590 &bgi_addr, GFP_KERNEL);
591 if (!bgi) {
592 shost_printk(KERN_ERR, cb->host,
593 "Failed to allocate bgi memory\n");
594 return;
595 }
596 myrb_reset_cmd(cmd_blk);
597 mbox->type3B.id = MYRB_DCMD_TAG;
598 mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
599 mbox->type3B.optype = 0x20;
600 mbox->type3B.addr = bgi_addr;
601 status = myrb_exec_cmd(cb, cmd_blk);
602 last_bgi = &cb->bgi_status;
603 sdev = scsi_device_lookup(cb->host,
604 myrb_logical_channel(cb->host),
605 bgi->ldev_num, 0);
606 switch (status) {
607 case MYRB_STATUS_SUCCESS:
608 switch (bgi->status) {
609 case MYRB_BGI_INVALID:
610 break;
611 case MYRB_BGI_STARTED:
612 if (!sdev)
613 break;
614 sdev_printk(KERN_INFO, sdev,
615 "Background Initialization Started\n");
616 break;
617 case MYRB_BGI_INPROGRESS:
618 if (!sdev)
619 break;
620 if (bgi->blocks_done == last_bgi->blocks_done &&
621 bgi->ldev_num == last_bgi->ldev_num)
622 break;
623 sdev_printk(KERN_INFO, sdev,
624 "Background Initialization in Progress: %d%% completed\n",
625 (100 * (bgi->blocks_done >> 7))
626 / (bgi->ldev_size >> 7));
627 break;
628 case MYRB_BGI_SUSPENDED:
629 if (!sdev)
630 break;
631 sdev_printk(KERN_INFO, sdev,
632 "Background Initialization Suspended\n");
633 break;
634 case MYRB_BGI_CANCELLED:
635 if (!sdev)
636 break;
637 sdev_printk(KERN_INFO, sdev,
638 "Background Initialization Cancelled\n");
639 break;
640 }
641 memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
642 break;
643 case MYRB_STATUS_BGI_SUCCESS:
644 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
645 sdev_printk(KERN_INFO, sdev,
646 "Background Initialization Completed Successfully\n");
647 cb->bgi_status.status = MYRB_BGI_INVALID;
648 break;
649 case MYRB_STATUS_BGI_ABORTED:
650 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
651 sdev_printk(KERN_INFO, sdev,
652 "Background Initialization Aborted\n");
653 /* Fallthrough */
654 case MYRB_STATUS_NO_BGI_INPROGRESS:
655 cb->bgi_status.status = MYRB_BGI_INVALID;
656 break;
657 }
658 if (sdev)
659 scsi_device_put(sdev);
660 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
661 bgi, bgi_addr);
662}
663
664/**
665 * myrb_hba_enquiry - updates the controller status
666 *
667 * Executes a DAC_V1_Enquiry command and updates the controller status.
668 *
669 * Return: command status
670 */
671static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
672{
673 struct myrb_enquiry old, *new;
674 unsigned short status;
675
676 memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
677
678 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
679 if (status != MYRB_STATUS_SUCCESS)
680 return status;
681
682 new = cb->enquiry;
683 if (new->ldev_count > old.ldev_count) {
684 int ldev_num = old.ldev_count - 1;
685
686 while (++ldev_num < new->ldev_count)
687 shost_printk(KERN_CRIT, cb->host,
688 "Logical Drive %d Now Exists\n",
689 ldev_num);
690 }
691 if (new->ldev_count < old.ldev_count) {
692 int ldev_num = new->ldev_count - 1;
693
694 while (++ldev_num < old.ldev_count)
695 shost_printk(KERN_CRIT, cb->host,
696 "Logical Drive %d No Longer Exists\n",
697 ldev_num);
698 }
699 if (new->status.deferred != old.status.deferred)
700 shost_printk(KERN_CRIT, cb->host,
701 "Deferred Write Error Flag is now %s\n",
702 (new->status.deferred ? "TRUE" : "FALSE"));
703 if (new->ev_seq != old.ev_seq) {
704 cb->new_ev_seq = new->ev_seq;
705 cb->need_err_info = true;
706 shost_printk(KERN_INFO, cb->host,
707 "Event log %d/%d (%d/%d) available\n",
708 cb->old_ev_seq, cb->new_ev_seq,
709 old.ev_seq, new->ev_seq);
710 }
711 if ((new->ldev_critical > 0 &&
712 new->ldev_critical != old.ldev_critical) ||
713 (new->ldev_offline > 0 &&
714 new->ldev_offline != old.ldev_offline) ||
715 (new->ldev_count != old.ldev_count)) {
716 shost_printk(KERN_INFO, cb->host,
717 "Logical drive count changed (%d/%d/%d)\n",
718 new->ldev_critical,
719 new->ldev_offline,
720 new->ldev_count);
721 cb->need_ldev_info = true;
722 }
723 if (new->pdev_dead > 0 ||
724 new->pdev_dead != old.pdev_dead ||
725 time_after_eq(jiffies, cb->secondary_monitor_time
726 + MYRB_SECONDARY_MONITOR_INTERVAL)) {
727 cb->need_bgi_status = cb->bgi_status_supported;
728 cb->secondary_monitor_time = jiffies;
729 }
730 if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
731 new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
732 old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
733 old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
734 cb->need_rbld = true;
735 cb->rbld_first = (new->ldev_critical < old.ldev_critical);
736 }
737 if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
738 switch (new->rbld) {
739 case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
740 shost_printk(KERN_INFO, cb->host,
741 "Consistency Check Completed Successfully\n");
742 break;
743 case MYRB_STDBY_RBLD_IN_PROGRESS:
744 case MYRB_BG_RBLD_IN_PROGRESS:
745 break;
746 case MYRB_BG_CHECK_IN_PROGRESS:
747 cb->need_cc_status = true;
748 break;
749 case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
750 shost_printk(KERN_INFO, cb->host,
751 "Consistency Check Completed with Error\n");
752 break;
753 case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
754 shost_printk(KERN_INFO, cb->host,
755 "Consistency Check Failed - Physical Device Failed\n");
756 break;
757 case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
758 shost_printk(KERN_INFO, cb->host,
759 "Consistency Check Failed - Logical Drive Failed\n");
760 break;
761 case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
762 shost_printk(KERN_INFO, cb->host,
763 "Consistency Check Failed - Other Causes\n");
764 break;
765 case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
766 shost_printk(KERN_INFO, cb->host,
767 "Consistency Check Successfully Terminated\n");
768 break;
769 }
770 else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
771 cb->need_cc_status = true;
772
773 return MYRB_STATUS_SUCCESS;
774}
775
776/**
777 * myrb_set_pdev_state - sets the device state for a physical device
778 *
779 * Return: command status
780 */
781static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
782 struct scsi_device *sdev, enum myrb_devstate state)
783{
784 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
785 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
786 unsigned short status;
787
788 mutex_lock(&cb->dcmd_mutex);
789 mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
790 mbox->type3D.id = MYRB_DCMD_TAG;
791 mbox->type3D.channel = sdev->channel;
792 mbox->type3D.target = sdev->id;
793 mbox->type3D.state = state & 0x1F;
794 status = myrb_exec_cmd(cb, cmd_blk);
795 mutex_unlock(&cb->dcmd_mutex);
796
797 return status;
798}
799
800/**
801 * myrb_enable_mmio - enables the Memory Mailbox Interface
802 *
803 * PD and P controller types have no memory mailbox, but still need the
804 * other dma mapped memory.
805 *
806 * Return: true on success, false otherwise.
807 */
808static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
809{
810 void __iomem *base = cb->io_base;
811 struct pci_dev *pdev = cb->pdev;
812 size_t err_table_size;
813 size_t ldev_info_size;
814 union myrb_cmd_mbox *cmd_mbox_mem;
815 struct myrb_stat_mbox *stat_mbox_mem;
816 union myrb_cmd_mbox mbox;
817 unsigned short status;
818
819 memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
820
821 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
822 dev_err(&pdev->dev, "DMA mask out of range\n");
823 return false;
824 }
825
826 cb->enquiry = dma_alloc_coherent(&pdev->dev,
827 sizeof(struct myrb_enquiry),
828 &cb->enquiry_addr, GFP_KERNEL);
829 if (!cb->enquiry)
830 return false;
831
832 err_table_size = sizeof(struct myrb_error_entry) *
833 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
834 cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
835 &cb->err_table_addr, GFP_KERNEL);
836 if (!cb->err_table)
837 return false;
838
839 ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
840 cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
841 &cb->ldev_info_addr, GFP_KERNEL);
842 if (!cb->ldev_info_buf)
843 return false;
844
845 /*
846 * Skip mailbox initialisation for PD and P Controllers
847 */
848 if (!mmio_init_fn)
849 return true;
850
851 /* These are the base addresses for the command memory mailbox array */
852 cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
853 cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
854 cb->cmd_mbox_size,
855 &cb->cmd_mbox_addr,
856 GFP_KERNEL);
857 if (!cb->first_cmd_mbox)
858 return false;
859
860 cmd_mbox_mem = cb->first_cmd_mbox;
861 cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
862 cb->last_cmd_mbox = cmd_mbox_mem;
863 cb->next_cmd_mbox = cb->first_cmd_mbox;
864 cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
865 cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
866
867 /* These are the base addresses for the status memory mailbox array */
868 cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
869 sizeof(struct myrb_stat_mbox);
870 cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
871 cb->stat_mbox_size,
872 &cb->stat_mbox_addr,
873 GFP_KERNEL);
874 if (!cb->first_stat_mbox)
875 return false;
876
877 stat_mbox_mem = cb->first_stat_mbox;
878 stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
879 cb->last_stat_mbox = stat_mbox_mem;
880 cb->next_stat_mbox = cb->first_stat_mbox;
881
882 /* Enable the Memory Mailbox Interface. */
883 cb->dual_mode_interface = true;
884 mbox.typeX.opcode = 0x2B;
885 mbox.typeX.id = 0;
886 mbox.typeX.opcode2 = 0x14;
887 mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
888 mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
889
890 status = mmio_init_fn(pdev, base, &mbox);
891 if (status != MYRB_STATUS_SUCCESS) {
892 cb->dual_mode_interface = false;
893 mbox.typeX.opcode2 = 0x10;
894 status = mmio_init_fn(pdev, base, &mbox);
895 if (status != MYRB_STATUS_SUCCESS) {
896 dev_err(&pdev->dev,
897 "Failed to enable mailbox, statux %02X\n",
898 status);
899 return false;
900 }
901 }
902 return true;
903}
904
905/**
906 * myrb_get_hba_config - reads the configuration information
907 *
908 * Reads the configuration information from the controller and
909 * initializes the controller structure.
910 *
911 * Return: 0 on success, errno otherwise
912 */
913static int myrb_get_hba_config(struct myrb_hba *cb)
914{
915 struct myrb_enquiry2 *enquiry2;
916 dma_addr_t enquiry2_addr;
917 struct myrb_config2 *config2;
918 dma_addr_t config2_addr;
919 struct Scsi_Host *shost = cb->host;
920 struct pci_dev *pdev = cb->pdev;
921 int pchan_max = 0, pchan_cur = 0;
922 unsigned short status;
923 int ret = -ENODEV, memsize = 0;
924
925 enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
926 &enquiry2_addr, GFP_KERNEL);
927 if (!enquiry2) {
928 shost_printk(KERN_ERR, cb->host,
929 "Failed to allocate V1 enquiry2 memory\n");
930 return -ENOMEM;
931 }
932 config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
933 &config2_addr, GFP_KERNEL);
934 if (!config2) {
935 shost_printk(KERN_ERR, cb->host,
936 "Failed to allocate V1 config2 memory\n");
937 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
938 enquiry2, enquiry2_addr);
939 return -ENOMEM;
940 }
941 mutex_lock(&cb->dma_mutex);
942 status = myrb_hba_enquiry(cb);
943 mutex_unlock(&cb->dma_mutex);
944 if (status != MYRB_STATUS_SUCCESS) {
945 shost_printk(KERN_WARNING, cb->host,
946 "Failed it issue V1 Enquiry\n");
947 goto out_free;
948 }
949
950 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
951 if (status != MYRB_STATUS_SUCCESS) {
952 shost_printk(KERN_WARNING, cb->host,
953 "Failed to issue V1 Enquiry2\n");
954 goto out_free;
955 }
956
957 status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
958 if (status != MYRB_STATUS_SUCCESS) {
959 shost_printk(KERN_WARNING, cb->host,
960 "Failed to issue ReadConfig2\n");
961 goto out_free;
962 }
963
964 status = myrb_get_ldev_info(cb);
965 if (status != MYRB_STATUS_SUCCESS) {
966 shost_printk(KERN_WARNING, cb->host,
967 "Failed to get logical drive information\n");
968 goto out_free;
969 }
970
971 /*
972 * Initialize the Controller Model Name and Full Model Name fields.
973 */
974 switch (enquiry2->hw.sub_model) {
975 case DAC960_V1_P_PD_PU:
976 if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
977 strcpy(cb->model_name, "DAC960PU");
978 else
979 strcpy(cb->model_name, "DAC960PD");
980 break;
981 case DAC960_V1_PL:
982 strcpy(cb->model_name, "DAC960PL");
983 break;
984 case DAC960_V1_PG:
985 strcpy(cb->model_name, "DAC960PG");
986 break;
987 case DAC960_V1_PJ:
988 strcpy(cb->model_name, "DAC960PJ");
989 break;
990 case DAC960_V1_PR:
991 strcpy(cb->model_name, "DAC960PR");
992 break;
993 case DAC960_V1_PT:
994 strcpy(cb->model_name, "DAC960PT");
995 break;
996 case DAC960_V1_PTL0:
997 strcpy(cb->model_name, "DAC960PTL0");
998 break;
999 case DAC960_V1_PRL:
1000 strcpy(cb->model_name, "DAC960PRL");
1001 break;
1002 case DAC960_V1_PTL1:
1003 strcpy(cb->model_name, "DAC960PTL1");
1004 break;
1005 case DAC960_V1_1164P:
1006 strcpy(cb->model_name, "eXtremeRAID 1100");
1007 break;
1008 default:
1009 shost_printk(KERN_WARNING, cb->host,
1010 "Unknown Model %X\n",
1011 enquiry2->hw.sub_model);
1012 goto out;
1013 }
1014 /*
1015 * Initialize the Controller Firmware Version field and verify that it
1016 * is a supported firmware version.
1017 * The supported firmware versions are:
1018 *
1019 * DAC1164P 5.06 and above
1020 * DAC960PTL/PRL/PJ/PG 4.06 and above
1021 * DAC960PU/PD/PL 3.51 and above
1022 * DAC960PU/PD/PL/P 2.73 and above
1023 */
1024#if defined(CONFIG_ALPHA)
1025 /*
1026 * DEC Alpha machines were often equipped with DAC960 cards that were
1027 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1028 * the last custom FW revision to be released by DEC for these older
1029 * controllers, appears to work quite well with this driver.
1030 *
1031 * Cards tested successfully were several versions each of the PD and
1032 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1033 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1034 * back of the board, of:
1035 *
1036 * KZPSC: D040347 (1-channel) or D040348 (2-channel)
1037 * or D040349 (3-channel)
1038 * KZPAC: D040395 (1-channel) or D040396 (2-channel)
1039 * or D040397 (3-channel)
1040 */
1041# define FIRMWARE_27X "2.70"
1042#else
1043# define FIRMWARE_27X "2.73"
1044#endif
1045
1046 if (enquiry2->fw.major_version == 0) {
1047 enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1048 enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1049 enquiry2->fw.firmware_type = '0';
1050 enquiry2->fw.turn_id = 0;
1051 }
1052 sprintf(cb->fw_version, "%d.%02d-%c-%02d",
1053 enquiry2->fw.major_version,
1054 enquiry2->fw.minor_version,
1055 enquiry2->fw.firmware_type,
1056 enquiry2->fw.turn_id);
1057 if (!((enquiry2->fw.major_version == 5 &&
1058 enquiry2->fw.minor_version >= 6) ||
1059 (enquiry2->fw.major_version == 4 &&
1060 enquiry2->fw.minor_version >= 6) ||
1061 (enquiry2->fw.major_version == 3 &&
1062 enquiry2->fw.minor_version >= 51) ||
1063 (enquiry2->fw.major_version == 2 &&
1064 strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1065 shost_printk(KERN_WARNING, cb->host,
1066 "Firmware Version '%s' unsupported\n",
1067 cb->fw_version);
1068 goto out;
1069 }
1070 /*
1071 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1072 * Enclosure Management Enabled fields.
1073 */
1074 switch (enquiry2->hw.model) {
1075 case MYRB_5_CHANNEL_BOARD:
1076 pchan_max = 5;
1077 break;
1078 case MYRB_3_CHANNEL_BOARD:
1079 case MYRB_3_CHANNEL_ASIC_DAC:
1080 pchan_max = 3;
1081 break;
1082 case MYRB_2_CHANNEL_BOARD:
1083 pchan_max = 2;
1084 break;
1085 default:
1086 pchan_max = enquiry2->cfg_chan;
1087 break;
1088 }
1089 pchan_cur = enquiry2->cur_chan;
1090 if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1091 cb->bus_width = 32;
1092 else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1093 cb->bus_width = 16;
1094 else
1095 cb->bus_width = 8;
1096 cb->ldev_block_size = enquiry2->ldev_block_size;
1097 shost->max_channel = pchan_cur;
1098 shost->max_id = enquiry2->max_targets;
1099 memsize = enquiry2->mem_size >> 20;
1100 cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1101 /*
1102 * Initialize the Controller Queue Depth, Driver Queue Depth,
1103 * Logical Drive Count, Maximum Blocks per Command, Controller
1104 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1105 * The Driver Queue Depth must be at most one less than the
1106 * Controller Queue Depth to allow for an automatic drive
1107 * rebuild operation.
1108 */
1109 shost->can_queue = cb->enquiry->max_tcq;
1110 if (shost->can_queue < 3)
1111 shost->can_queue = enquiry2->max_cmds;
1112 if (shost->can_queue < 3)
1113 /* Play safe and disable TCQ */
1114 shost->can_queue = 1;
1115
1116 if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1117 shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1118 shost->max_sectors = enquiry2->max_sectors;
1119 shost->sg_tablesize = enquiry2->max_sge;
1120 if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1121 shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1122 /*
1123 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1124 */
1125 cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1126 >> (10 - MYRB_BLKSIZE_BITS);
1127 cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1128 >> (10 - MYRB_BLKSIZE_BITS);
1129 /* Assume 255/63 translation */
1130 cb->ldev_geom_heads = 255;
1131 cb->ldev_geom_sectors = 63;
1132 if (config2->drive_geometry) {
1133 cb->ldev_geom_heads = 128;
1134 cb->ldev_geom_sectors = 32;
1135 }
1136
1137 /*
1138 * Initialize the Background Initialization Status.
1139 */
1140 if ((cb->fw_version[0] == '4' &&
1141 strcmp(cb->fw_version, "4.08") >= 0) ||
1142 (cb->fw_version[0] == '5' &&
1143 strcmp(cb->fw_version, "5.08") >= 0)) {
1144 cb->bgi_status_supported = true;
1145 myrb_bgi_control(cb);
1146 }
1147 cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1148 ret = 0;
1149
1150out:
1151 shost_printk(KERN_INFO, cb->host,
1152 "Configuring %s PCI RAID Controller\n", cb->model_name);
1153 shost_printk(KERN_INFO, cb->host,
1154 " Firmware Version: %s, Memory Size: %dMB\n",
1155 cb->fw_version, memsize);
1156 if (cb->io_addr == 0)
1157 shost_printk(KERN_INFO, cb->host,
1158 " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1159 (unsigned long)cb->pci_addr, cb->irq);
1160 else
1161 shost_printk(KERN_INFO, cb->host,
1162 " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1163 (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1164 cb->irq);
1165 shost_printk(KERN_INFO, cb->host,
1166 " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1167 cb->host->can_queue, cb->host->max_sectors);
1168 shost_printk(KERN_INFO, cb->host,
1169 " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1170 cb->host->can_queue, cb->host->sg_tablesize,
1171 MYRB_SCATTER_GATHER_LIMIT);
1172 shost_printk(KERN_INFO, cb->host,
1173 " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1174 cb->stripe_size, cb->segment_size,
1175 cb->ldev_geom_heads, cb->ldev_geom_sectors,
1176 cb->safte_enabled ?
1177 " SAF-TE Enclosure Management Enabled" : "");
1178 shost_printk(KERN_INFO, cb->host,
1179 " Physical: %d/%d channels %d/%d/%d devices\n",
1180 pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1181 cb->host->max_id);
1182
1183 shost_printk(KERN_INFO, cb->host,
1184 " Logical: 1/1 channels, %d/%d disks\n",
1185 cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1186
1187out_free:
1188 dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1189 enquiry2, enquiry2_addr);
1190 dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1191 config2, config2_addr);
1192
1193 return ret;
1194}
1195
1196/**
1197 * myrb_unmap - unmaps controller structures
1198 */
1199static void myrb_unmap(struct myrb_hba *cb)
1200{
1201 if (cb->ldev_info_buf) {
1202 size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1203 MYRB_MAX_LDEVS;
1204 dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1205 cb->ldev_info_buf, cb->ldev_info_addr);
1206 cb->ldev_info_buf = NULL;
1207 }
1208 if (cb->err_table) {
1209 size_t err_table_size = sizeof(struct myrb_error_entry) *
1210 MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1211 dma_free_coherent(&cb->pdev->dev, err_table_size,
1212 cb->err_table, cb->err_table_addr);
1213 cb->err_table = NULL;
1214 }
1215 if (cb->enquiry) {
1216 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1217 cb->enquiry, cb->enquiry_addr);
1218 cb->enquiry = NULL;
1219 }
1220 if (cb->first_stat_mbox) {
1221 dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1222 cb->first_stat_mbox, cb->stat_mbox_addr);
1223 cb->first_stat_mbox = NULL;
1224 }
1225 if (cb->first_cmd_mbox) {
1226 dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1227 cb->first_cmd_mbox, cb->cmd_mbox_addr);
1228 cb->first_cmd_mbox = NULL;
1229 }
1230}
1231
1232/**
1233 * myrb_cleanup - cleanup controller structures
1234 */
1235static void myrb_cleanup(struct myrb_hba *cb)
1236{
1237 struct pci_dev *pdev = cb->pdev;
1238
1239 /* Free the memory mailbox, status, and related structures */
1240 myrb_unmap(cb);
1241
1242 if (cb->mmio_base) {
1243 cb->disable_intr(cb->io_base);
1244 iounmap(cb->mmio_base);
1245 }
1246 if (cb->irq)
1247 free_irq(cb->irq, cb);
1248 if (cb->io_addr)
1249 release_region(cb->io_addr, 0x80);
1250 pci_set_drvdata(pdev, NULL);
1251 pci_disable_device(pdev);
1252 scsi_host_put(cb->host);
1253}
1254
1255static int myrb_host_reset(struct scsi_cmnd *scmd)
1256{
1257 struct Scsi_Host *shost = scmd->device->host;
1258 struct myrb_hba *cb = shost_priv(shost);
1259
1260 cb->reset(cb->io_base);
1261 return SUCCESS;
1262}
1263
1264static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1265 struct scsi_cmnd *scmd)
1266{
1267 struct myrb_hba *cb = shost_priv(shost);
1268 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1269 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1270 struct myrb_dcdb *dcdb;
1271 dma_addr_t dcdb_addr;
1272 struct scsi_device *sdev = scmd->device;
1273 struct scatterlist *sgl;
1274 unsigned long flags;
1275 int nsge;
1276
1277 myrb_reset_cmd(cmd_blk);
1278 dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1279 if (!dcdb)
1280 return SCSI_MLQUEUE_HOST_BUSY;
1281 nsge = scsi_dma_map(scmd);
1282 if (nsge > 1) {
1283 dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1284 scmd->result = (DID_ERROR << 16);
1285 scmd->scsi_done(scmd);
1286 return 0;
1287 }
1288
1289 mbox->type3.opcode = MYRB_CMD_DCDB;
1290 mbox->type3.id = scmd->request->tag + 3;
1291 mbox->type3.addr = dcdb_addr;
1292 dcdb->channel = sdev->channel;
1293 dcdb->target = sdev->id;
1294 switch (scmd->sc_data_direction) {
1295 case DMA_NONE:
1296 dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1297 break;
1298 case DMA_TO_DEVICE:
1299 dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1300 break;
1301 case DMA_FROM_DEVICE:
1302 dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1303 break;
1304 default:
1305 dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1306 break;
1307 }
1308 dcdb->early_status = false;
1309 if (scmd->request->timeout <= 10)
1310 dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1311 else if (scmd->request->timeout <= 60)
1312 dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1313 else if (scmd->request->timeout <= 600)
1314 dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1315 else
1316 dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1317 dcdb->no_autosense = false;
1318 dcdb->allow_disconnect = true;
1319 sgl = scsi_sglist(scmd);
1320 dcdb->dma_addr = sg_dma_address(sgl);
1321 if (sg_dma_len(sgl) > USHRT_MAX) {
1322 dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1323 dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1324 } else {
1325 dcdb->xfer_len_lo = sg_dma_len(sgl);
1326 dcdb->xfer_len_hi4 = 0;
1327 }
1328 dcdb->cdb_len = scmd->cmd_len;
1329 dcdb->sense_len = sizeof(dcdb->sense);
1330 memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1331
1332 spin_lock_irqsave(&cb->queue_lock, flags);
1333 cb->qcmd(cb, cmd_blk);
1334 spin_unlock_irqrestore(&cb->queue_lock, flags);
1335 return 0;
1336}
1337
1338static void myrb_inquiry(struct myrb_hba *cb,
1339 struct scsi_cmnd *scmd)
1340{
1341 unsigned char inq[36] = {
1342 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1343 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1344 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1345 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346 0x20, 0x20, 0x20, 0x20,
1347 };
1348
1349 if (cb->bus_width > 16)
1350 inq[7] |= 1 << 6;
1351 if (cb->bus_width > 8)
1352 inq[7] |= 1 << 5;
1353 memcpy(&inq[16], cb->model_name, 16);
1354 memcpy(&inq[32], cb->fw_version, 1);
1355 memcpy(&inq[33], &cb->fw_version[2], 2);
1356 memcpy(&inq[35], &cb->fw_version[7], 1);
1357
1358 scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1359}
1360
1361static void
1362myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1363 struct myrb_ldev_info *ldev_info)
1364{
1365 unsigned char modes[32], *mode_pg;
1366 bool dbd;
1367 size_t mode_len;
1368
1369 dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1370 if (dbd) {
1371 mode_len = 24;
1372 mode_pg = &modes[4];
1373 } else {
1374 mode_len = 32;
1375 mode_pg = &modes[12];
1376 }
1377 memset(modes, 0, sizeof(modes));
1378 modes[0] = mode_len - 1;
1379 if (!dbd) {
1380 unsigned char *block_desc = &modes[4];
1381
1382 modes[3] = 8;
1383 put_unaligned_be32(ldev_info->size, &block_desc[0]);
1384 put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1385 }
1386 mode_pg[0] = 0x08;
1387 mode_pg[1] = 0x12;
1388 if (ldev_info->wb_enabled)
1389 mode_pg[2] |= 0x04;
1390 if (cb->segment_size) {
1391 mode_pg[2] |= 0x08;
1392 put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1393 }
1394
1395 scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1396}
1397
1398static void myrb_request_sense(struct myrb_hba *cb,
1399 struct scsi_cmnd *scmd)
1400{
1401 scsi_build_sense_buffer(0, scmd->sense_buffer,
1402 NO_SENSE, 0, 0);
1403 scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1404 SCSI_SENSE_BUFFERSIZE);
1405}
1406
1407static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1408 struct myrb_ldev_info *ldev_info)
1409{
1410 unsigned char data[8];
1411
1412 dev_dbg(&scmd->device->sdev_gendev,
1413 "Capacity %u, blocksize %u\n",
1414 ldev_info->size, cb->ldev_block_size);
1415 put_unaligned_be32(ldev_info->size - 1, &data[0]);
1416 put_unaligned_be32(cb->ldev_block_size, &data[4]);
1417 scsi_sg_copy_from_buffer(scmd, data, 8);
1418}
1419
1420static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1421 struct scsi_cmnd *scmd)
1422{
1423 struct myrb_hba *cb = shost_priv(shost);
1424 struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1425 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1426 struct myrb_ldev_info *ldev_info;
1427 struct scsi_device *sdev = scmd->device;
1428 struct scatterlist *sgl;
1429 unsigned long flags;
1430 u64 lba;
1431 u32 block_cnt;
1432 int nsge;
1433
1434 ldev_info = sdev->hostdata;
1435 if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1436 ldev_info->state != MYRB_DEVICE_WO) {
1437 dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1438 sdev->id, ldev_info ? ldev_info->state : 0xff);
1439 scmd->result = (DID_BAD_TARGET << 16);
1440 scmd->scsi_done(scmd);
1441 return 0;
1442 }
1443 switch (scmd->cmnd[0]) {
1444 case TEST_UNIT_READY:
1445 scmd->result = (DID_OK << 16);
1446 scmd->scsi_done(scmd);
1447 return 0;
1448 case INQUIRY:
1449 if (scmd->cmnd[1] & 1) {
1450 /* Illegal request, invalid field in CDB */
1451 scsi_build_sense_buffer(0, scmd->sense_buffer,
1452 ILLEGAL_REQUEST, 0x24, 0);
1453 scmd->result = (DRIVER_SENSE << 24) |
1454 SAM_STAT_CHECK_CONDITION;
1455 } else {
1456 myrb_inquiry(cb, scmd);
1457 scmd->result = (DID_OK << 16);
1458 }
1459 scmd->scsi_done(scmd);
1460 return 0;
1461 case SYNCHRONIZE_CACHE:
1462 scmd->result = (DID_OK << 16);
1463 scmd->scsi_done(scmd);
1464 return 0;
1465 case MODE_SENSE:
1466 if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1467 (scmd->cmnd[2] & 0x3F) != 0x08) {
1468 /* Illegal request, invalid field in CDB */
1469 scsi_build_sense_buffer(0, scmd->sense_buffer,
1470 ILLEGAL_REQUEST, 0x24, 0);
1471 scmd->result = (DRIVER_SENSE << 24) |
1472 SAM_STAT_CHECK_CONDITION;
1473 } else {
1474 myrb_mode_sense(cb, scmd, ldev_info);
1475 scmd->result = (DID_OK << 16);
1476 }
1477 scmd->scsi_done(scmd);
1478 return 0;
1479 case READ_CAPACITY:
1480 if ((scmd->cmnd[1] & 1) ||
1481 (scmd->cmnd[8] & 1)) {
1482 /* Illegal request, invalid field in CDB */
1483 scsi_build_sense_buffer(0, scmd->sense_buffer,
1484 ILLEGAL_REQUEST, 0x24, 0);
1485 scmd->result = (DRIVER_SENSE << 24) |
1486 SAM_STAT_CHECK_CONDITION;
1487 scmd->scsi_done(scmd);
1488 return 0;
1489 }
1490 lba = get_unaligned_be32(&scmd->cmnd[2]);
1491 if (lba) {
1492 /* Illegal request, invalid field in CDB */
1493 scsi_build_sense_buffer(0, scmd->sense_buffer,
1494 ILLEGAL_REQUEST, 0x24, 0);
1495 scmd->result = (DRIVER_SENSE << 24) |
1496 SAM_STAT_CHECK_CONDITION;
1497 scmd->scsi_done(scmd);
1498 return 0;
1499 }
1500 myrb_read_capacity(cb, scmd, ldev_info);
1501 scmd->scsi_done(scmd);
1502 return 0;
1503 case REQUEST_SENSE:
1504 myrb_request_sense(cb, scmd);
1505 scmd->result = (DID_OK << 16);
1506 return 0;
1507 case SEND_DIAGNOSTIC:
1508 if (scmd->cmnd[1] != 0x04) {
1509 /* Illegal request, invalid field in CDB */
1510 scsi_build_sense_buffer(0, scmd->sense_buffer,
1511 ILLEGAL_REQUEST, 0x24, 0);
1512 scmd->result = (DRIVER_SENSE << 24) |
1513 SAM_STAT_CHECK_CONDITION;
1514 } else {
1515 /* Assume good status */
1516 scmd->result = (DID_OK << 16);
1517 }
1518 scmd->scsi_done(scmd);
1519 return 0;
1520 case READ_6:
1521 if (ldev_info->state == MYRB_DEVICE_WO) {
1522 /* Data protect, attempt to read invalid data */
1523 scsi_build_sense_buffer(0, scmd->sense_buffer,
1524 DATA_PROTECT, 0x21, 0x06);
1525 scmd->result = (DRIVER_SENSE << 24) |
1526 SAM_STAT_CHECK_CONDITION;
1527 scmd->scsi_done(scmd);
1528 return 0;
1529 }
1530 case WRITE_6:
1531 lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1532 (scmd->cmnd[2] << 8) |
1533 scmd->cmnd[3]);
1534 block_cnt = scmd->cmnd[4];
1535 break;
1536 case READ_10:
1537 if (ldev_info->state == MYRB_DEVICE_WO) {
1538 /* Data protect, attempt to read invalid data */
1539 scsi_build_sense_buffer(0, scmd->sense_buffer,
1540 DATA_PROTECT, 0x21, 0x06);
1541 scmd->result = (DRIVER_SENSE << 24) |
1542 SAM_STAT_CHECK_CONDITION;
1543 scmd->scsi_done(scmd);
1544 return 0;
1545 }
1546 case WRITE_10:
1547 case VERIFY: /* 0x2F */
1548 case WRITE_VERIFY: /* 0x2E */
1549 lba = get_unaligned_be32(&scmd->cmnd[2]);
1550 block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1551 break;
1552 case READ_12:
1553 if (ldev_info->state == MYRB_DEVICE_WO) {
1554 /* Data protect, attempt to read invalid data */
1555 scsi_build_sense_buffer(0, scmd->sense_buffer,
1556 DATA_PROTECT, 0x21, 0x06);
1557 scmd->result = (DRIVER_SENSE << 24) |
1558 SAM_STAT_CHECK_CONDITION;
1559 scmd->scsi_done(scmd);
1560 return 0;
1561 }
1562 case WRITE_12:
1563 case VERIFY_12: /* 0xAF */
1564 case WRITE_VERIFY_12: /* 0xAE */
1565 lba = get_unaligned_be32(&scmd->cmnd[2]);
1566 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1567 break;
1568 default:
1569 /* Illegal request, invalid opcode */
1570 scsi_build_sense_buffer(0, scmd->sense_buffer,
1571 ILLEGAL_REQUEST, 0x20, 0);
1572 scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1573 scmd->scsi_done(scmd);
1574 return 0;
1575 }
1576
1577 myrb_reset_cmd(cmd_blk);
1578 mbox->type5.id = scmd->request->tag + 3;
1579 if (scmd->sc_data_direction == DMA_NONE)
1580 goto submit;
1581 nsge = scsi_dma_map(scmd);
1582 if (nsge == 1) {
1583 sgl = scsi_sglist(scmd);
1584 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1585 mbox->type5.opcode = MYRB_CMD_READ;
1586 else
1587 mbox->type5.opcode = MYRB_CMD_WRITE;
1588
1589 mbox->type5.ld.xfer_len = block_cnt;
1590 mbox->type5.ld.ldev_num = sdev->id;
1591 mbox->type5.lba = lba;
1592 mbox->type5.addr = (u32)sg_dma_address(sgl);
1593 } else {
1594 struct myrb_sge *hw_sgl;
1595 dma_addr_t hw_sgl_addr;
1596 int i;
1597
1598 hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1599 if (!hw_sgl)
1600 return SCSI_MLQUEUE_HOST_BUSY;
1601
1602 cmd_blk->sgl = hw_sgl;
1603 cmd_blk->sgl_addr = hw_sgl_addr;
1604
1605 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1606 mbox->type5.opcode = MYRB_CMD_READ_SG;
1607 else
1608 mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1609
1610 mbox->type5.ld.xfer_len = block_cnt;
1611 mbox->type5.ld.ldev_num = sdev->id;
1612 mbox->type5.lba = lba;
1613 mbox->type5.addr = hw_sgl_addr;
1614 mbox->type5.sg_count = nsge;
1615
1616 scsi_for_each_sg(scmd, sgl, nsge, i) {
1617 hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1618 hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1619 hw_sgl++;
1620 }
1621 }
1622submit:
1623 spin_lock_irqsave(&cb->queue_lock, flags);
1624 cb->qcmd(cb, cmd_blk);
1625 spin_unlock_irqrestore(&cb->queue_lock, flags);
1626
1627 return 0;
1628}
1629
1630static int myrb_queuecommand(struct Scsi_Host *shost,
1631 struct scsi_cmnd *scmd)
1632{
1633 struct scsi_device *sdev = scmd->device;
1634
1635 if (sdev->channel > myrb_logical_channel(shost)) {
1636 scmd->result = (DID_BAD_TARGET << 16);
1637 scmd->scsi_done(scmd);
1638 return 0;
1639 }
1640 if (sdev->channel == myrb_logical_channel(shost))
1641 return myrb_ldev_queuecommand(shost, scmd);
1642
1643 return myrb_pthru_queuecommand(shost, scmd);
1644}
1645
1646static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1647{
1648 struct myrb_hba *cb = shost_priv(sdev->host);
1649 struct myrb_ldev_info *ldev_info;
1650 unsigned short ldev_num = sdev->id;
1651 enum raid_level level;
1652
1653 ldev_info = cb->ldev_info_buf + ldev_num;
1654 if (!ldev_info)
1655 return -ENXIO;
1656
1657 sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1658 if (!sdev->hostdata)
1659 return -ENOMEM;
1660 dev_dbg(&sdev->sdev_gendev,
1661 "slave alloc ldev %d state %x\n",
1662 ldev_num, ldev_info->state);
1663 memcpy(sdev->hostdata, ldev_info,
1664 sizeof(*ldev_info));
1665 switch (ldev_info->raid_level) {
1666 case MYRB_RAID_LEVEL0:
1667 level = RAID_LEVEL_LINEAR;
1668 break;
1669 case MYRB_RAID_LEVEL1:
1670 level = RAID_LEVEL_1;
1671 break;
1672 case MYRB_RAID_LEVEL3:
1673 level = RAID_LEVEL_3;
1674 break;
1675 case MYRB_RAID_LEVEL5:
1676 level = RAID_LEVEL_5;
1677 break;
1678 case MYRB_RAID_LEVEL6:
1679 level = RAID_LEVEL_6;
1680 break;
1681 case MYRB_RAID_JBOD:
1682 level = RAID_LEVEL_JBOD;
1683 break;
1684 default:
1685 level = RAID_LEVEL_UNKNOWN;
1686 break;
1687 }
1688 raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1689 return 0;
1690}
1691
1692static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1693{
1694 struct myrb_hba *cb = shost_priv(sdev->host);
1695 struct myrb_pdev_state *pdev_info;
1696 unsigned short status;
1697
1698 if (sdev->id > MYRB_MAX_TARGETS)
1699 return -ENXIO;
1700
1701 pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1702 if (!pdev_info)
1703 return -ENOMEM;
1704
1705 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1706 sdev, pdev_info);
1707 if (status != MYRB_STATUS_SUCCESS) {
1708 dev_dbg(&sdev->sdev_gendev,
1709 "Failed to get device state, status %x\n",
1710 status);
1711 kfree(pdev_info);
1712 return -ENXIO;
1713 }
1714 if (!pdev_info->present) {
1715 dev_dbg(&sdev->sdev_gendev,
1716 "device not present, skip\n");
1717 kfree(pdev_info);
1718 return -ENXIO;
1719 }
1720 dev_dbg(&sdev->sdev_gendev,
1721 "slave alloc pdev %d:%d state %x\n",
1722 sdev->channel, sdev->id, pdev_info->state);
1723 sdev->hostdata = pdev_info;
1724
1725 return 0;
1726}
1727
1728static int myrb_slave_alloc(struct scsi_device *sdev)
1729{
1730 if (sdev->channel > myrb_logical_channel(sdev->host))
1731 return -ENXIO;
1732
1733 if (sdev->lun > 0)
1734 return -ENXIO;
1735
1736 if (sdev->channel == myrb_logical_channel(sdev->host))
1737 return myrb_ldev_slave_alloc(sdev);
1738
1739 return myrb_pdev_slave_alloc(sdev);
1740}
1741
1742static int myrb_slave_configure(struct scsi_device *sdev)
1743{
1744 struct myrb_ldev_info *ldev_info;
1745
1746 if (sdev->channel > myrb_logical_channel(sdev->host))
1747 return -ENXIO;
1748
1749 if (sdev->channel < myrb_logical_channel(sdev->host)) {
1750 sdev->no_uld_attach = 1;
1751 return 0;
1752 }
1753 if (sdev->lun != 0)
1754 return -ENXIO;
1755
1756 ldev_info = sdev->hostdata;
1757 if (!ldev_info)
1758 return -ENXIO;
1759 if (ldev_info->state != MYRB_DEVICE_ONLINE)
1760 sdev_printk(KERN_INFO, sdev,
1761 "Logical drive is %s\n",
1762 myrb_devstate_name(ldev_info->state));
1763
1764 sdev->tagged_supported = 1;
1765 return 0;
1766}
1767
1768static void myrb_slave_destroy(struct scsi_device *sdev)
1769{
1770 kfree(sdev->hostdata);
1771}
1772
1773static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1774 sector_t capacity, int geom[])
1775{
1776 struct myrb_hba *cb = shost_priv(sdev->host);
1777
1778 geom[0] = cb->ldev_geom_heads;
1779 geom[1] = cb->ldev_geom_sectors;
1780 geom[2] = sector_div(capacity, geom[0] * geom[1]);
1781
1782 return 0;
1783}
1784
1785static ssize_t raid_state_show(struct device *dev,
1786 struct device_attribute *attr, char *buf)
1787{
1788 struct scsi_device *sdev = to_scsi_device(dev);
1789 struct myrb_hba *cb = shost_priv(sdev->host);
1790 int ret;
1791
1792 if (!sdev->hostdata)
1793 return snprintf(buf, 16, "Unknown\n");
1794
1795 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1796 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1797 const char *name;
1798
1799 name = myrb_devstate_name(ldev_info->state);
1800 if (name)
1801 ret = snprintf(buf, 32, "%s\n", name);
1802 else
1803 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1804 ldev_info->state);
1805 } else {
1806 struct myrb_pdev_state *pdev_info = sdev->hostdata;
1807 unsigned short status;
1808 const char *name;
1809
1810 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1811 sdev, pdev_info);
1812 if (status != MYRB_STATUS_SUCCESS)
1813 sdev_printk(KERN_INFO, sdev,
1814 "Failed to get device state, status %x\n",
1815 status);
1816
1817 if (!pdev_info->present)
1818 name = "Removed";
1819 else
1820 name = myrb_devstate_name(pdev_info->state);
1821 if (name)
1822 ret = snprintf(buf, 32, "%s\n", name);
1823 else
1824 ret = snprintf(buf, 32, "Invalid (%02X)\n",
1825 pdev_info->state);
1826 }
1827 return ret;
1828}
1829
1830static ssize_t raid_state_store(struct device *dev,
1831 struct device_attribute *attr, const char *buf, size_t count)
1832{
1833 struct scsi_device *sdev = to_scsi_device(dev);
1834 struct myrb_hba *cb = shost_priv(sdev->host);
1835 struct myrb_pdev_state *pdev_info;
1836 enum myrb_devstate new_state;
1837 unsigned short status;
1838
1839 if (!strncmp(buf, "kill", 4) ||
1840 !strncmp(buf, "offline", 7))
1841 new_state = MYRB_DEVICE_DEAD;
1842 else if (!strncmp(buf, "online", 6))
1843 new_state = MYRB_DEVICE_ONLINE;
1844 else if (!strncmp(buf, "standby", 7))
1845 new_state = MYRB_DEVICE_STANDBY;
1846 else
1847 return -EINVAL;
1848
1849 pdev_info = sdev->hostdata;
1850 if (!pdev_info) {
1851 sdev_printk(KERN_INFO, sdev,
1852 "Failed - no physical device information\n");
1853 return -ENXIO;
1854 }
1855 if (!pdev_info->present) {
1856 sdev_printk(KERN_INFO, sdev,
1857 "Failed - device not present\n");
1858 return -ENXIO;
1859 }
1860
1861 if (pdev_info->state == new_state)
1862 return count;
1863
1864 status = myrb_set_pdev_state(cb, sdev, new_state);
1865 switch (status) {
1866 case MYRB_STATUS_SUCCESS:
1867 break;
1868 case MYRB_STATUS_START_DEVICE_FAILED:
1869 sdev_printk(KERN_INFO, sdev,
1870 "Failed - Unable to Start Device\n");
1871 count = -EAGAIN;
1872 break;
1873 case MYRB_STATUS_NO_DEVICE:
1874 sdev_printk(KERN_INFO, sdev,
1875 "Failed - No Device at Address\n");
1876 count = -ENODEV;
1877 break;
1878 case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1879 sdev_printk(KERN_INFO, sdev,
1880 "Failed - Invalid Channel or Target or Modifier\n");
1881 count = -EINVAL;
1882 break;
1883 case MYRB_STATUS_CHANNEL_BUSY:
1884 sdev_printk(KERN_INFO, sdev,
1885 "Failed - Channel Busy\n");
1886 count = -EBUSY;
1887 break;
1888 default:
1889 sdev_printk(KERN_INFO, sdev,
1890 "Failed - Unexpected Status %04X\n", status);
1891 count = -EIO;
1892 break;
1893 }
1894 return count;
1895}
1896static DEVICE_ATTR_RW(raid_state);
1897
1898static ssize_t raid_level_show(struct device *dev,
1899 struct device_attribute *attr, char *buf)
1900{
1901 struct scsi_device *sdev = to_scsi_device(dev);
1902
1903 if (sdev->channel == myrb_logical_channel(sdev->host)) {
1904 struct myrb_ldev_info *ldev_info = sdev->hostdata;
1905 const char *name;
1906
1907 if (!ldev_info)
1908 return -ENXIO;
1909
1910 name = myrb_raidlevel_name(ldev_info->raid_level);
1911 if (!name)
1912 return snprintf(buf, 32, "Invalid (%02X)\n",
1913 ldev_info->state);
1914 return snprintf(buf, 32, "%s\n", name);
1915 }
1916 return snprintf(buf, 32, "Physical Drive\n");
1917}
1918static DEVICE_ATTR_RO(raid_level);
1919
1920static ssize_t rebuild_show(struct device *dev,
1921 struct device_attribute *attr, char *buf)
1922{
1923 struct scsi_device *sdev = to_scsi_device(dev);
1924 struct myrb_hba *cb = shost_priv(sdev->host);
1925 struct myrb_rbld_progress rbld_buf;
1926 unsigned char status;
1927
1928 if (sdev->channel < myrb_logical_channel(sdev->host))
1929 return snprintf(buf, 32, "physical device - not rebuilding\n");
1930
1931 status = myrb_get_rbld_progress(cb, &rbld_buf);
1932
1933 if (rbld_buf.ldev_num != sdev->id ||
1934 status != MYRB_STATUS_SUCCESS)
1935 return snprintf(buf, 32, "not rebuilding\n");
1936
1937 return snprintf(buf, 32, "rebuilding block %u of %u\n",
1938 rbld_buf.ldev_size - rbld_buf.blocks_left,
1939 rbld_buf.ldev_size);
1940}
1941
1942static ssize_t rebuild_store(struct device *dev,
1943 struct device_attribute *attr, const char *buf, size_t count)
1944{
1945 struct scsi_device *sdev = to_scsi_device(dev);
1946 struct myrb_hba *cb = shost_priv(sdev->host);
1947 struct myrb_cmdblk *cmd_blk;
1948 union myrb_cmd_mbox *mbox;
1949 unsigned short status;
1950 int rc, start;
1951 const char *msg;
1952
1953 rc = kstrtoint(buf, 0, &start);
1954 if (rc)
1955 return rc;
1956
1957 if (sdev->channel >= myrb_logical_channel(sdev->host))
1958 return -ENXIO;
1959
1960 status = myrb_get_rbld_progress(cb, NULL);
1961 if (start) {
1962 if (status == MYRB_STATUS_SUCCESS) {
1963 sdev_printk(KERN_INFO, sdev,
1964 "Rebuild Not Initiated; already in progress\n");
1965 return -EALREADY;
1966 }
1967 mutex_lock(&cb->dcmd_mutex);
1968 cmd_blk = &cb->dcmd_blk;
1969 myrb_reset_cmd(cmd_blk);
1970 mbox = &cmd_blk->mbox;
1971 mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1972 mbox->type3D.id = MYRB_DCMD_TAG;
1973 mbox->type3D.channel = sdev->channel;
1974 mbox->type3D.target = sdev->id;
1975 status = myrb_exec_cmd(cb, cmd_blk);
1976 mutex_unlock(&cb->dcmd_mutex);
1977 } else {
1978 struct pci_dev *pdev = cb->pdev;
1979 unsigned char *rate;
1980 dma_addr_t rate_addr;
1981
1982 if (status != MYRB_STATUS_SUCCESS) {
1983 sdev_printk(KERN_INFO, sdev,
1984 "Rebuild Not Cancelled; not in progress\n");
1985 return 0;
1986 }
1987
1988 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1989 &rate_addr, GFP_KERNEL);
1990 if (rate == NULL) {
1991 sdev_printk(KERN_INFO, sdev,
1992 "Cancellation of Rebuild Failed - Out of Memory\n");
1993 return -ENOMEM;
1994 }
1995 mutex_lock(&cb->dcmd_mutex);
1996 cmd_blk = &cb->dcmd_blk;
1997 myrb_reset_cmd(cmd_blk);
1998 mbox = &cmd_blk->mbox;
1999 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2000 mbox->type3R.id = MYRB_DCMD_TAG;
2001 mbox->type3R.rbld_rate = 0xFF;
2002 mbox->type3R.addr = rate_addr;
2003 status = myrb_exec_cmd(cb, cmd_blk);
2004 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2005 mutex_unlock(&cb->dcmd_mutex);
2006 }
2007 if (status == MYRB_STATUS_SUCCESS) {
2008 sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
2009 start ? "Initiated" : "Cancelled");
2010 return count;
2011 }
2012 if (!start) {
2013 sdev_printk(KERN_INFO, sdev,
2014 "Rebuild Not Cancelled, status 0x%x\n",
2015 status);
2016 return -EIO;
2017 }
2018
2019 switch (status) {
2020 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2021 msg = "Attempt to Rebuild Online or Unresponsive Drive";
2022 break;
2023 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2024 msg = "New Disk Failed During Rebuild";
2025 break;
2026 case MYRB_STATUS_INVALID_ADDRESS:
2027 msg = "Invalid Device Address";
2028 break;
2029 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2030 msg = "Already in Progress";
2031 break;
2032 default:
2033 msg = NULL;
2034 break;
2035 }
2036 if (msg)
2037 sdev_printk(KERN_INFO, sdev,
2038 "Rebuild Failed - %s\n", msg);
2039 else
2040 sdev_printk(KERN_INFO, sdev,
2041 "Rebuild Failed, status 0x%x\n", status);
2042
2043 return -EIO;
2044}
2045static DEVICE_ATTR_RW(rebuild);
2046
2047static ssize_t consistency_check_store(struct device *dev,
2048 struct device_attribute *attr, const char *buf, size_t count)
2049{
2050 struct scsi_device *sdev = to_scsi_device(dev);
2051 struct myrb_hba *cb = shost_priv(sdev->host);
2052 struct myrb_rbld_progress rbld_buf;
2053 struct myrb_cmdblk *cmd_blk;
2054 union myrb_cmd_mbox *mbox;
2055 unsigned short ldev_num = 0xFFFF;
2056 unsigned short status;
2057 int rc, start;
2058 const char *msg;
2059
2060 rc = kstrtoint(buf, 0, &start);
2061 if (rc)
2062 return rc;
2063
2064 if (sdev->channel < myrb_logical_channel(sdev->host))
2065 return -ENXIO;
2066
2067 status = myrb_get_rbld_progress(cb, &rbld_buf);
2068 if (start) {
2069 if (status == MYRB_STATUS_SUCCESS) {
2070 sdev_printk(KERN_INFO, sdev,
2071 "Check Consistency Not Initiated; already in progress\n");
2072 return -EALREADY;
2073 }
2074 mutex_lock(&cb->dcmd_mutex);
2075 cmd_blk = &cb->dcmd_blk;
2076 myrb_reset_cmd(cmd_blk);
2077 mbox = &cmd_blk->mbox;
2078 mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2079 mbox->type3C.id = MYRB_DCMD_TAG;
2080 mbox->type3C.ldev_num = sdev->id;
2081 mbox->type3C.auto_restore = true;
2082
2083 status = myrb_exec_cmd(cb, cmd_blk);
2084 mutex_unlock(&cb->dcmd_mutex);
2085 } else {
2086 struct pci_dev *pdev = cb->pdev;
2087 unsigned char *rate;
2088 dma_addr_t rate_addr;
2089
2090 if (ldev_num != sdev->id) {
2091 sdev_printk(KERN_INFO, sdev,
2092 "Check Consistency Not Cancelled; not in progress\n");
2093 return 0;
2094 }
2095 rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2096 &rate_addr, GFP_KERNEL);
2097 if (rate == NULL) {
2098 sdev_printk(KERN_INFO, sdev,
2099 "Cancellation of Check Consistency Failed - Out of Memory\n");
2100 return -ENOMEM;
2101 }
2102 mutex_lock(&cb->dcmd_mutex);
2103 cmd_blk = &cb->dcmd_blk;
2104 myrb_reset_cmd(cmd_blk);
2105 mbox = &cmd_blk->mbox;
2106 mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2107 mbox->type3R.id = MYRB_DCMD_TAG;
2108 mbox->type3R.rbld_rate = 0xFF;
2109 mbox->type3R.addr = rate_addr;
2110 status = myrb_exec_cmd(cb, cmd_blk);
2111 dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2112 mutex_unlock(&cb->dcmd_mutex);
2113 }
2114 if (status == MYRB_STATUS_SUCCESS) {
2115 sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2116 start ? "Initiated" : "Cancelled");
2117 return count;
2118 }
2119 if (!start) {
2120 sdev_printk(KERN_INFO, sdev,
2121 "Check Consistency Not Cancelled, status 0x%x\n",
2122 status);
2123 return -EIO;
2124 }
2125
2126 switch (status) {
2127 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2128 msg = "Dependent Physical Device is DEAD";
2129 break;
2130 case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2131 msg = "New Disk Failed During Rebuild";
2132 break;
2133 case MYRB_STATUS_INVALID_ADDRESS:
2134 msg = "Invalid or Nonredundant Logical Drive";
2135 break;
2136 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2137 msg = "Already in Progress";
2138 break;
2139 default:
2140 msg = NULL;
2141 break;
2142 }
2143 if (msg)
2144 sdev_printk(KERN_INFO, sdev,
2145 "Check Consistency Failed - %s\n", msg);
2146 else
2147 sdev_printk(KERN_INFO, sdev,
2148 "Check Consistency Failed, status 0x%x\n", status);
2149
2150 return -EIO;
2151}
2152
2153static ssize_t consistency_check_show(struct device *dev,
2154 struct device_attribute *attr, char *buf)
2155{
2156 return rebuild_show(dev, attr, buf);
2157}
2158static DEVICE_ATTR_RW(consistency_check);
2159
2160static ssize_t ctlr_num_show(struct device *dev,
2161 struct device_attribute *attr, char *buf)
2162{
2163 struct Scsi_Host *shost = class_to_shost(dev);
2164 struct myrb_hba *cb = shost_priv(shost);
2165
2166 return snprintf(buf, 20, "%d\n", cb->ctlr_num);
2167}
2168static DEVICE_ATTR_RO(ctlr_num);
2169
2170static ssize_t firmware_show(struct device *dev,
2171 struct device_attribute *attr, char *buf)
2172{
2173 struct Scsi_Host *shost = class_to_shost(dev);
2174 struct myrb_hba *cb = shost_priv(shost);
2175
2176 return snprintf(buf, 16, "%s\n", cb->fw_version);
2177}
2178static DEVICE_ATTR_RO(firmware);
2179
2180static ssize_t model_show(struct device *dev,
2181 struct device_attribute *attr, char *buf)
2182{
2183 struct Scsi_Host *shost = class_to_shost(dev);
2184 struct myrb_hba *cb = shost_priv(shost);
2185
2186 return snprintf(buf, 16, "%s\n", cb->model_name);
2187}
2188static DEVICE_ATTR_RO(model);
2189
2190static ssize_t flush_cache_store(struct device *dev,
2191 struct device_attribute *attr, const char *buf, size_t count)
2192{
2193 struct Scsi_Host *shost = class_to_shost(dev);
2194 struct myrb_hba *cb = shost_priv(shost);
2195 unsigned short status;
2196
2197 status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2198 if (status == MYRB_STATUS_SUCCESS) {
2199 shost_printk(KERN_INFO, shost,
2200 "Cache Flush Completed\n");
2201 return count;
2202 }
2203 shost_printk(KERN_INFO, shost,
2204 "Cache Flush Failed, status %x\n", status);
2205 return -EIO;
2206}
2207static DEVICE_ATTR_WO(flush_cache);
2208
2209static struct device_attribute *myrb_sdev_attrs[] = {
2210 &dev_attr_rebuild,
2211 &dev_attr_consistency_check,
2212 &dev_attr_raid_state,
2213 &dev_attr_raid_level,
2214 NULL,
2215};
2216
2217static struct device_attribute *myrb_shost_attrs[] = {
2218 &dev_attr_ctlr_num,
2219 &dev_attr_model,
2220 &dev_attr_firmware,
2221 &dev_attr_flush_cache,
2222 NULL,
2223};
2224
2225struct scsi_host_template myrb_template = {
2226 .module = THIS_MODULE,
2227 .name = "DAC960",
2228 .proc_name = "myrb",
2229 .queuecommand = myrb_queuecommand,
2230 .eh_host_reset_handler = myrb_host_reset,
2231 .slave_alloc = myrb_slave_alloc,
2232 .slave_configure = myrb_slave_configure,
2233 .slave_destroy = myrb_slave_destroy,
2234 .bios_param = myrb_biosparam,
2235 .cmd_size = sizeof(struct myrb_cmdblk),
2236 .shost_attrs = myrb_shost_attrs,
2237 .sdev_attrs = myrb_sdev_attrs,
2238 .this_id = -1,
2239};
2240
2241/**
2242 * myrb_is_raid - return boolean indicating device is raid volume
2243 * @dev the device struct object
2244 */
2245static int myrb_is_raid(struct device *dev)
2246{
2247 struct scsi_device *sdev = to_scsi_device(dev);
2248
2249 return sdev->channel == myrb_logical_channel(sdev->host);
2250}
2251
2252/**
2253 * myrb_get_resync - get raid volume resync percent complete
2254 * @dev the device struct object
2255 */
2256static void myrb_get_resync(struct device *dev)
2257{
2258 struct scsi_device *sdev = to_scsi_device(dev);
2259 struct myrb_hba *cb = shost_priv(sdev->host);
2260 struct myrb_rbld_progress rbld_buf;
2261 unsigned int percent_complete = 0;
2262 unsigned short status;
2263 unsigned int ldev_size = 0, remaining = 0;
2264
2265 if (sdev->channel < myrb_logical_channel(sdev->host))
2266 return;
2267 status = myrb_get_rbld_progress(cb, &rbld_buf);
2268 if (status == MYRB_STATUS_SUCCESS) {
2269 if (rbld_buf.ldev_num == sdev->id) {
2270 ldev_size = rbld_buf.ldev_size;
2271 remaining = rbld_buf.blocks_left;
2272 }
2273 }
2274 if (remaining && ldev_size)
2275 percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2276 raid_set_resync(myrb_raid_template, dev, percent_complete);
2277}
2278
2279/**
2280 * myrb_get_state - get raid volume status
2281 * @dev the device struct object
2282 */
2283static void myrb_get_state(struct device *dev)
2284{
2285 struct scsi_device *sdev = to_scsi_device(dev);
2286 struct myrb_hba *cb = shost_priv(sdev->host);
2287 struct myrb_ldev_info *ldev_info = sdev->hostdata;
2288 enum raid_state state = RAID_STATE_UNKNOWN;
2289 unsigned short status;
2290
2291 if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2292 state = RAID_STATE_UNKNOWN;
2293 else {
2294 status = myrb_get_rbld_progress(cb, NULL);
2295 if (status == MYRB_STATUS_SUCCESS)
2296 state = RAID_STATE_RESYNCING;
2297 else {
2298 switch (ldev_info->state) {
2299 case MYRB_DEVICE_ONLINE:
2300 state = RAID_STATE_ACTIVE;
2301 break;
2302 case MYRB_DEVICE_WO:
2303 case MYRB_DEVICE_CRITICAL:
2304 state = RAID_STATE_DEGRADED;
2305 break;
2306 default:
2307 state = RAID_STATE_OFFLINE;
2308 }
2309 }
2310 }
2311 raid_set_state(myrb_raid_template, dev, state);
2312}
2313
2314struct raid_function_template myrb_raid_functions = {
2315 .cookie = &myrb_template,
2316 .is_raid = myrb_is_raid,
2317 .get_resync = myrb_get_resync,
2318 .get_state = myrb_get_state,
2319};
2320
2321static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2322 struct scsi_cmnd *scmd)
2323{
2324 unsigned short status;
2325
2326 if (!cmd_blk)
2327 return;
2328
2329 scsi_dma_unmap(scmd);
2330
2331 if (cmd_blk->dcdb) {
2332 memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2333 dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2334 cmd_blk->dcdb_addr);
2335 cmd_blk->dcdb = NULL;
2336 }
2337 if (cmd_blk->sgl) {
2338 dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2339 cmd_blk->sgl = NULL;
2340 cmd_blk->sgl_addr = 0;
2341 }
2342 status = cmd_blk->status;
2343 switch (status) {
2344 case MYRB_STATUS_SUCCESS:
2345 case MYRB_STATUS_DEVICE_BUSY:
2346 scmd->result = (DID_OK << 16) | status;
2347 break;
2348 case MYRB_STATUS_BAD_DATA:
2349 dev_dbg(&scmd->device->sdev_gendev,
2350 "Bad Data Encountered\n");
2351 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2352 /* Unrecovered read error */
2353 scsi_build_sense_buffer(0, scmd->sense_buffer,
2354 MEDIUM_ERROR, 0x11, 0);
2355 else
2356 /* Write error */
2357 scsi_build_sense_buffer(0, scmd->sense_buffer,
2358 MEDIUM_ERROR, 0x0C, 0);
2359 scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2360 break;
2361 case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2362 scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2363 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2364 /* Unrecovered read error, auto-reallocation failed */
2365 scsi_build_sense_buffer(0, scmd->sense_buffer,
2366 MEDIUM_ERROR, 0x11, 0x04);
2367 else
2368 /* Write error, auto-reallocation failed */
2369 scsi_build_sense_buffer(0, scmd->sense_buffer,
2370 MEDIUM_ERROR, 0x0C, 0x02);
2371 scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2372 break;
2373 case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2374 dev_dbg(&scmd->device->sdev_gendev,
2375 "Logical Drive Nonexistent or Offline");
2376 scmd->result = (DID_BAD_TARGET << 16);
2377 break;
2378 case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2379 dev_dbg(&scmd->device->sdev_gendev,
2380 "Attempt to Access Beyond End of Logical Drive");
2381 /* Logical block address out of range */
2382 scsi_build_sense_buffer(0, scmd->sense_buffer,
2383 NOT_READY, 0x21, 0);
2384 break;
2385 case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2386 dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2387 scmd->result = (DID_BAD_TARGET << 16);
2388 break;
2389 default:
2390 scmd_printk(KERN_ERR, scmd,
2391 "Unexpected Error Status %04X", status);
2392 scmd->result = (DID_ERROR << 16);
2393 break;
2394 }
2395 scmd->scsi_done(scmd);
2396}
2397
2398static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2399{
2400 if (!cmd_blk)
2401 return;
2402
2403 if (cmd_blk->completion) {
2404 complete(cmd_blk->completion);
2405 cmd_blk->completion = NULL;
2406 }
2407}
2408
2409static void myrb_monitor(struct work_struct *work)
2410{
2411 struct myrb_hba *cb = container_of(work,
2412 struct myrb_hba, monitor_work.work);
2413 struct Scsi_Host *shost = cb->host;
2414 unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2415
2416 dev_dbg(&shost->shost_gendev, "monitor tick\n");
2417
2418 if (cb->new_ev_seq > cb->old_ev_seq) {
2419 int event = cb->old_ev_seq;
2420
2421 dev_dbg(&shost->shost_gendev,
2422 "get event log no %d/%d\n",
2423 cb->new_ev_seq, event);
2424 myrb_get_event(cb, event);
2425 cb->old_ev_seq = event + 1;
2426 interval = 10;
2427 } else if (cb->need_err_info) {
2428 cb->need_err_info = false;
2429 dev_dbg(&shost->shost_gendev, "get error table\n");
2430 myrb_get_errtable(cb);
2431 interval = 10;
2432 } else if (cb->need_rbld && cb->rbld_first) {
2433 cb->need_rbld = false;
2434 dev_dbg(&shost->shost_gendev,
2435 "get rebuild progress\n");
2436 myrb_update_rbld_progress(cb);
2437 interval = 10;
2438 } else if (cb->need_ldev_info) {
2439 cb->need_ldev_info = false;
2440 dev_dbg(&shost->shost_gendev,
2441 "get logical drive info\n");
2442 myrb_get_ldev_info(cb);
2443 interval = 10;
2444 } else if (cb->need_rbld) {
2445 cb->need_rbld = false;
2446 dev_dbg(&shost->shost_gendev,
2447 "get rebuild progress\n");
2448 myrb_update_rbld_progress(cb);
2449 interval = 10;
2450 } else if (cb->need_cc_status) {
2451 cb->need_cc_status = false;
2452 dev_dbg(&shost->shost_gendev,
2453 "get consistency check progress\n");
2454 myrb_get_cc_progress(cb);
2455 interval = 10;
2456 } else if (cb->need_bgi_status) {
2457 cb->need_bgi_status = false;
2458 dev_dbg(&shost->shost_gendev, "get background init status\n");
2459 myrb_bgi_control(cb);
2460 interval = 10;
2461 } else {
2462 dev_dbg(&shost->shost_gendev, "new enquiry\n");
2463 mutex_lock(&cb->dma_mutex);
2464 myrb_hba_enquiry(cb);
2465 mutex_unlock(&cb->dma_mutex);
2466 if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2467 cb->need_err_info || cb->need_rbld ||
2468 cb->need_ldev_info || cb->need_cc_status ||
2469 cb->need_bgi_status) {
2470 dev_dbg(&shost->shost_gendev,
2471 "reschedule monitor\n");
2472 interval = 0;
2473 }
2474 }
2475 if (interval > 1)
2476 cb->primary_monitor_time = jiffies;
2477 queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2478}
2479
2480/**
2481 * myrb_err_status - reports controller BIOS messages
2482 *
2483 * Controller BIOS messages are passed through the Error Status Register
2484 * when the driver performs the BIOS handshaking.
2485 *
2486 * Return: true for fatal errors and false otherwise.
2487 */
2488bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2489 unsigned char parm0, unsigned char parm1)
2490{
2491 struct pci_dev *pdev = cb->pdev;
2492
2493 switch (error) {
2494 case 0x00:
2495 dev_info(&pdev->dev,
2496 "Physical Device %d:%d Not Responding\n",
2497 parm1, parm0);
2498 break;
2499 case 0x08:
2500 dev_notice(&pdev->dev, "Spinning Up Drives\n");
2501 break;
2502 case 0x30:
2503 dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2504 break;
2505 case 0x60:
2506 dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2507 break;
2508 case 0x70:
2509 dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2510 break;
2511 case 0x90:
2512 dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2513 parm1, parm0);
2514 break;
2515 case 0xA0:
2516 dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2517 break;
2518 case 0xB0:
2519 dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2520 break;
2521 case 0xD0:
2522 dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2523 break;
2524 case 0xF0:
2525 dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2526 return true;
2527 default:
2528 dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2529 error);
2530 return true;
2531 }
2532 return false;
2533}
2534
2535/*
2536 * Hardware-specific functions
2537 */
2538
2539/*
2540 * DAC960 LA Series Controllers
2541 */
2542
2543static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2544{
2545 writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2546}
2547
2548static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2549{
2550 writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2551}
2552
2553static inline void DAC960_LA_gen_intr(void __iomem *base)
2554{
2555 writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
2556}
2557
2558static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2559{
2560 writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2561}
2562
2563static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2564{
2565 writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2566}
2567
2568static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2569{
2570 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2571
2572 return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2573}
2574
2575static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2576{
2577 unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2578
2579 return !(idb & DAC960_LA_IDB_INIT_DONE);
2580}
2581
2582static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2583{
2584 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2585}
2586
2587static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
2588{
2589 writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2590}
2591
2592static inline void DAC960_LA_ack_intr(void __iomem *base)
2593{
2594 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2595 base + DAC960_LA_ODB_OFFSET);
2596}
2597
2598static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2599{
2600 unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2601
2602 return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2603}
2604
2605static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
2606{
2607 unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2608
2609 return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
2610}
2611
2612static inline void DAC960_LA_enable_intr(void __iomem *base)
2613{
2614 unsigned char odb = 0xFF;
2615
2616 odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2617 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2618}
2619
2620static inline void DAC960_LA_disable_intr(void __iomem *base)
2621{
2622 unsigned char odb = 0xFF;
2623
2624 odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2625 writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2626}
2627
2628static inline bool DAC960_LA_intr_enabled(void __iomem *base)
2629{
2630 unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
2631
2632 return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
2633}
2634
2635static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2636 union myrb_cmd_mbox *mbox)
2637{
2638 mem_mbox->words[1] = mbox->words[1];
2639 mem_mbox->words[2] = mbox->words[2];
2640 mem_mbox->words[3] = mbox->words[3];
2641 /* Memory barrier to prevent reordering */
2642 wmb();
2643 mem_mbox->words[0] = mbox->words[0];
2644 /* Memory barrier to force PCI access */
2645 mb();
2646}
2647
2648static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2649 union myrb_cmd_mbox *mbox)
2650{
2651 writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2652 writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2653 writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2654 writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2655}
2656
2657static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
2658{
2659 return readb(base + DAC960_LA_STSID_OFFSET);
2660}
2661
2662static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2663{
2664 return readw(base + DAC960_LA_STS_OFFSET);
2665}
2666
2667static inline bool
2668DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2669 unsigned char *param0, unsigned char *param1)
2670{
2671 unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2672
2673 if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2674 return false;
2675 errsts &= ~DAC960_LA_ERRSTS_PENDING;
2676
2677 *error = errsts;
2678 *param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2679 *param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2680 writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2681 return true;
2682}
2683
2684static inline unsigned short
2685DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2686 union myrb_cmd_mbox *mbox)
2687{
2688 unsigned short status;
2689 int timeout = 0;
2690
2691 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2692 if (!DAC960_LA_hw_mbox_is_full(base))
2693 break;
2694 udelay(10);
2695 timeout++;
2696 }
2697 if (DAC960_LA_hw_mbox_is_full(base)) {
2698 dev_err(&pdev->dev,
2699 "Timeout waiting for empty mailbox\n");
2700 return MYRB_STATUS_SUBSYS_TIMEOUT;
2701 }
2702 DAC960_LA_write_hw_mbox(base, mbox);
2703 DAC960_LA_hw_mbox_new_cmd(base);
2704 timeout = 0;
2705 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2706 if (DAC960_LA_hw_mbox_status_available(base))
2707 break;
2708 udelay(10);
2709 timeout++;
2710 }
2711 if (!DAC960_LA_hw_mbox_status_available(base)) {
2712 dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2713 return MYRB_STATUS_SUBSYS_TIMEOUT;
2714 }
2715 status = DAC960_LA_read_status(base);
2716 DAC960_LA_ack_hw_mbox_intr(base);
2717 DAC960_LA_ack_hw_mbox_status(base);
2718
2719 return status;
2720}
2721
2722static int DAC960_LA_hw_init(struct pci_dev *pdev,
2723 struct myrb_hba *cb, void __iomem *base)
2724{
2725 int timeout = 0;
2726 unsigned char error, parm0, parm1;
2727
2728 DAC960_LA_disable_intr(base);
2729 DAC960_LA_ack_hw_mbox_status(base);
2730 udelay(1000);
2731 timeout = 0;
2732 while (DAC960_LA_init_in_progress(base) &&
2733 timeout < MYRB_MAILBOX_TIMEOUT) {
2734 if (DAC960_LA_read_error_status(base, &error,
2735 &parm0, &parm1) &&
2736 myrb_err_status(cb, error, parm0, parm1))
2737 return -ENODEV;
2738 udelay(10);
2739 timeout++;
2740 }
2741 if (timeout == MYRB_MAILBOX_TIMEOUT) {
2742 dev_err(&pdev->dev,
2743 "Timeout waiting for Controller Initialisation\n");
2744 return -ETIMEDOUT;
2745 }
2746 if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2747 dev_err(&pdev->dev,
2748 "Unable to Enable Memory Mailbox Interface\n");
2749 DAC960_LA_reset_ctrl(base);
2750 return -ENODEV;
2751 }
2752 DAC960_LA_enable_intr(base);
2753 cb->qcmd = myrb_qcmd;
2754 cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2755 if (cb->dual_mode_interface)
2756 cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2757 else
2758 cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2759 cb->disable_intr = DAC960_LA_disable_intr;
2760 cb->reset = DAC960_LA_reset_ctrl;
2761
2762 return 0;
2763}
2764
2765static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2766{
2767 struct myrb_hba *cb = arg;
2768 void __iomem *base = cb->io_base;
2769 struct myrb_stat_mbox *next_stat_mbox;
2770 unsigned long flags;
2771
2772 spin_lock_irqsave(&cb->queue_lock, flags);
2773 DAC960_LA_ack_intr(base);
2774 next_stat_mbox = cb->next_stat_mbox;
2775 while (next_stat_mbox->valid) {
2776 unsigned char id = next_stat_mbox->id;
2777 struct scsi_cmnd *scmd = NULL;
2778 struct myrb_cmdblk *cmd_blk = NULL;
2779
2780 if (id == MYRB_DCMD_TAG)
2781 cmd_blk = &cb->dcmd_blk;
2782 else if (id == MYRB_MCMD_TAG)
2783 cmd_blk = &cb->mcmd_blk;
2784 else {
2785 scmd = scsi_host_find_tag(cb->host, id - 3);
2786 if (scmd)
2787 cmd_blk = scsi_cmd_priv(scmd);
2788 }
2789 if (cmd_blk)
2790 cmd_blk->status = next_stat_mbox->status;
2791 else
2792 dev_err(&cb->pdev->dev,
2793 "Unhandled command completion %d\n", id);
2794
2795 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2796 if (++next_stat_mbox > cb->last_stat_mbox)
2797 next_stat_mbox = cb->first_stat_mbox;
2798
2799 if (cmd_blk) {
2800 if (id < 3)
2801 myrb_handle_cmdblk(cb, cmd_blk);
2802 else
2803 myrb_handle_scsi(cb, cmd_blk, scmd);
2804 }
2805 }
2806 cb->next_stat_mbox = next_stat_mbox;
2807 spin_unlock_irqrestore(&cb->queue_lock, flags);
2808 return IRQ_HANDLED;
2809}
2810
2811struct myrb_privdata DAC960_LA_privdata = {
2812 .hw_init = DAC960_LA_hw_init,
2813 .irq_handler = DAC960_LA_intr_handler,
2814 .mmio_size = DAC960_LA_mmio_size,
2815};
2816
2817/*
2818 * DAC960 PG Series Controllers
2819 */
2820static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2821{
2822 writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2823}
2824
2825static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2826{
2827 writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2828}
2829
2830static inline void DAC960_PG_gen_intr(void __iomem *base)
2831{
2832 writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
2833}
2834
2835static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2836{
2837 writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2838}
2839
2840static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2841{
2842 writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2843}
2844
2845static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2846{
2847 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2848
2849 return idb & DAC960_PG_IDB_HWMBOX_FULL;
2850}
2851
2852static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2853{
2854 unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2855
2856 return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2857}
2858
2859static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2860{
2861 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2862}
2863
2864static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
2865{
2866 writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2867}
2868
2869static inline void DAC960_PG_ack_intr(void __iomem *base)
2870{
2871 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2872 base + DAC960_PG_ODB_OFFSET);
2873}
2874
2875static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2876{
2877 unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2878
2879 return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2880}
2881
2882static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
2883{
2884 unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2885
2886 return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
2887}
2888
2889static inline void DAC960_PG_enable_intr(void __iomem *base)
2890{
2891 unsigned int imask = (unsigned int)-1;
2892
2893 imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2894 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2895}
2896
2897static inline void DAC960_PG_disable_intr(void __iomem *base)
2898{
2899 unsigned int imask = (unsigned int)-1;
2900
2901 writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2902}
2903
2904static inline bool DAC960_PG_intr_enabled(void __iomem *base)
2905{
2906 unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
2907
2908 return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
2909}
2910
2911static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2912 union myrb_cmd_mbox *mbox)
2913{
2914 mem_mbox->words[1] = mbox->words[1];
2915 mem_mbox->words[2] = mbox->words[2];
2916 mem_mbox->words[3] = mbox->words[3];
2917 /* Memory barrier to prevent reordering */
2918 wmb();
2919 mem_mbox->words[0] = mbox->words[0];
2920 /* Memory barrier to force PCI access */
2921 mb();
2922}
2923
2924static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2925 union myrb_cmd_mbox *mbox)
2926{
2927 writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2928 writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2929 writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2930 writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2931}
2932
2933static inline unsigned char
2934DAC960_PG_read_status_cmd_ident(void __iomem *base)
2935{
2936 return readb(base + DAC960_PG_STSID_OFFSET);
2937}
2938
2939static inline unsigned short
2940DAC960_PG_read_status(void __iomem *base)
2941{
2942 return readw(base + DAC960_PG_STS_OFFSET);
2943}
2944
2945static inline bool
2946DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2947 unsigned char *param0, unsigned char *param1)
2948{
2949 unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2950
2951 if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2952 return false;
2953 errsts &= ~DAC960_PG_ERRSTS_PENDING;
2954 *error = errsts;
2955 *param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2956 *param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2957 writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2958 return true;
2959}
2960
2961static inline unsigned short
2962DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2963 union myrb_cmd_mbox *mbox)
2964{
2965 unsigned short status;
2966 int timeout = 0;
2967
2968 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2969 if (!DAC960_PG_hw_mbox_is_full(base))
2970 break;
2971 udelay(10);
2972 timeout++;
2973 }
2974 if (DAC960_PG_hw_mbox_is_full(base)) {
2975 dev_err(&pdev->dev,
2976 "Timeout waiting for empty mailbox\n");
2977 return MYRB_STATUS_SUBSYS_TIMEOUT;
2978 }
2979 DAC960_PG_write_hw_mbox(base, mbox);
2980 DAC960_PG_hw_mbox_new_cmd(base);
2981
2982 timeout = 0;
2983 while (timeout < MYRB_MAILBOX_TIMEOUT) {
2984 if (DAC960_PG_hw_mbox_status_available(base))
2985 break;
2986 udelay(10);
2987 timeout++;
2988 }
2989 if (!DAC960_PG_hw_mbox_status_available(base)) {
2990 dev_err(&pdev->dev,
2991 "Timeout waiting for mailbox status\n");
2992 return MYRB_STATUS_SUBSYS_TIMEOUT;
2993 }
2994 status = DAC960_PG_read_status(base);
2995 DAC960_PG_ack_hw_mbox_intr(base);
2996 DAC960_PG_ack_hw_mbox_status(base);
2997
2998 return status;
2999}
3000
3001static int DAC960_PG_hw_init(struct pci_dev *pdev,
3002 struct myrb_hba *cb, void __iomem *base)
3003{
3004 int timeout = 0;
3005 unsigned char error, parm0, parm1;
3006
3007 DAC960_PG_disable_intr(base);
3008 DAC960_PG_ack_hw_mbox_status(base);
3009 udelay(1000);
3010 while (DAC960_PG_init_in_progress(base) &&
3011 timeout < MYRB_MAILBOX_TIMEOUT) {
3012 if (DAC960_PG_read_error_status(base, &error,
3013 &parm0, &parm1) &&
3014 myrb_err_status(cb, error, parm0, parm1))
3015 return -EIO;
3016 udelay(10);
3017 timeout++;
3018 }
3019 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3020 dev_err(&pdev->dev,
3021 "Timeout waiting for Controller Initialisation\n");
3022 return -ETIMEDOUT;
3023 }
3024 if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
3025 dev_err(&pdev->dev,
3026 "Unable to Enable Memory Mailbox Interface\n");
3027 DAC960_PG_reset_ctrl(base);
3028 return -ENODEV;
3029 }
3030 DAC960_PG_enable_intr(base);
3031 cb->qcmd = myrb_qcmd;
3032 cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
3033 if (cb->dual_mode_interface)
3034 cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
3035 else
3036 cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
3037 cb->disable_intr = DAC960_PG_disable_intr;
3038 cb->reset = DAC960_PG_reset_ctrl;
3039
3040 return 0;
3041}
3042
3043static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
3044{
3045 struct myrb_hba *cb = arg;
3046 void __iomem *base = cb->io_base;
3047 struct myrb_stat_mbox *next_stat_mbox;
3048 unsigned long flags;
3049
3050 spin_lock_irqsave(&cb->queue_lock, flags);
3051 DAC960_PG_ack_intr(base);
3052 next_stat_mbox = cb->next_stat_mbox;
3053 while (next_stat_mbox->valid) {
3054 unsigned char id = next_stat_mbox->id;
3055 struct scsi_cmnd *scmd = NULL;
3056 struct myrb_cmdblk *cmd_blk = NULL;
3057
3058 if (id == MYRB_DCMD_TAG)
3059 cmd_blk = &cb->dcmd_blk;
3060 else if (id == MYRB_MCMD_TAG)
3061 cmd_blk = &cb->mcmd_blk;
3062 else {
3063 scmd = scsi_host_find_tag(cb->host, id - 3);
3064 if (scmd)
3065 cmd_blk = scsi_cmd_priv(scmd);
3066 }
3067 if (cmd_blk)
3068 cmd_blk->status = next_stat_mbox->status;
3069 else
3070 dev_err(&cb->pdev->dev,
3071 "Unhandled command completion %d\n", id);
3072
3073 memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
3074 if (++next_stat_mbox > cb->last_stat_mbox)
3075 next_stat_mbox = cb->first_stat_mbox;
3076
3077 if (id < 3)
3078 myrb_handle_cmdblk(cb, cmd_blk);
3079 else
3080 myrb_handle_scsi(cb, cmd_blk, scmd);
3081 }
3082 cb->next_stat_mbox = next_stat_mbox;
3083 spin_unlock_irqrestore(&cb->queue_lock, flags);
3084 return IRQ_HANDLED;
3085}
3086
3087struct myrb_privdata DAC960_PG_privdata = {
3088 .hw_init = DAC960_PG_hw_init,
3089 .irq_handler = DAC960_PG_intr_handler,
3090 .mmio_size = DAC960_PG_mmio_size,
3091};
3092
3093
3094/*
3095 * DAC960 PD Series Controllers
3096 */
3097
3098static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3099{
3100 writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3101}
3102
3103static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3104{
3105 writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3106}
3107
3108static inline void DAC960_PD_gen_intr(void __iomem *base)
3109{
3110 writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
3111}
3112
3113static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3114{
3115 writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3116}
3117
3118static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3119{
3120 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3121
3122 return idb & DAC960_PD_IDB_HWMBOX_FULL;
3123}
3124
3125static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3126{
3127 unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3128
3129 return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3130}
3131
3132static inline void DAC960_PD_ack_intr(void __iomem *base)
3133{
3134 writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3135}
3136
3137static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3138{
3139 unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3140
3141 return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3142}
3143
3144static inline void DAC960_PD_enable_intr(void __iomem *base)
3145{
3146 writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3147}
3148
3149static inline void DAC960_PD_disable_intr(void __iomem *base)
3150{
3151 writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3152}
3153
3154static inline bool DAC960_PD_intr_enabled(void __iomem *base)
3155{
3156 unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
3157
3158 return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
3159}
3160
3161static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3162 union myrb_cmd_mbox *mbox)
3163{
3164 writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3165 writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3166 writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3167 writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3168}
3169
3170static inline unsigned char
3171DAC960_PD_read_status_cmd_ident(void __iomem *base)
3172{
3173 return readb(base + DAC960_PD_STSID_OFFSET);
3174}
3175
3176static inline unsigned short
3177DAC960_PD_read_status(void __iomem *base)
3178{
3179 return readw(base + DAC960_PD_STS_OFFSET);
3180}
3181
3182static inline bool
3183DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3184 unsigned char *param0, unsigned char *param1)
3185{
3186 unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3187
3188 if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3189 return false;
3190 errsts &= ~DAC960_PD_ERRSTS_PENDING;
3191 *error = errsts;
3192 *param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3193 *param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3194 writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3195 return true;
3196}
3197
3198static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3199{
3200 void __iomem *base = cb->io_base;
3201 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3202
3203 while (DAC960_PD_hw_mbox_is_full(base))
3204 udelay(1);
3205 DAC960_PD_write_cmd_mbox(base, mbox);
3206 DAC960_PD_hw_mbox_new_cmd(base);
3207}
3208
3209static int DAC960_PD_hw_init(struct pci_dev *pdev,
3210 struct myrb_hba *cb, void __iomem *base)
3211{
3212 int timeout = 0;
3213 unsigned char error, parm0, parm1;
3214
3215 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3216 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3217 (unsigned long)cb->io_addr);
3218 return -EBUSY;
3219 }
3220 DAC960_PD_disable_intr(base);
3221 DAC960_PD_ack_hw_mbox_status(base);
3222 udelay(1000);
3223 while (DAC960_PD_init_in_progress(base) &&
3224 timeout < MYRB_MAILBOX_TIMEOUT) {
3225 if (DAC960_PD_read_error_status(base, &error,
3226 &parm0, &parm1) &&
3227 myrb_err_status(cb, error, parm0, parm1))
3228 return -EIO;
3229 udelay(10);
3230 timeout++;
3231 }
3232 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3233 dev_err(&pdev->dev,
3234 "Timeout waiting for Controller Initialisation\n");
3235 return -ETIMEDOUT;
3236 }
3237 if (!myrb_enable_mmio(cb, NULL)) {
3238 dev_err(&pdev->dev,
3239 "Unable to Enable Memory Mailbox Interface\n");
3240 DAC960_PD_reset_ctrl(base);
3241 return -ENODEV;
3242 }
3243 DAC960_PD_enable_intr(base);
3244 cb->qcmd = DAC960_PD_qcmd;
3245 cb->disable_intr = DAC960_PD_disable_intr;
3246 cb->reset = DAC960_PD_reset_ctrl;
3247
3248 return 0;
3249}
3250
3251static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3252{
3253 struct myrb_hba *cb = arg;
3254 void __iomem *base = cb->io_base;
3255 unsigned long flags;
3256
3257 spin_lock_irqsave(&cb->queue_lock, flags);
3258 while (DAC960_PD_hw_mbox_status_available(base)) {
3259 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3260 struct scsi_cmnd *scmd = NULL;
3261 struct myrb_cmdblk *cmd_blk = NULL;
3262
3263 if (id == MYRB_DCMD_TAG)
3264 cmd_blk = &cb->dcmd_blk;
3265 else if (id == MYRB_MCMD_TAG)
3266 cmd_blk = &cb->mcmd_blk;
3267 else {
3268 scmd = scsi_host_find_tag(cb->host, id - 3);
3269 if (scmd)
3270 cmd_blk = scsi_cmd_priv(scmd);
3271 }
3272 if (cmd_blk)
3273 cmd_blk->status = DAC960_PD_read_status(base);
3274 else
3275 dev_err(&cb->pdev->dev,
3276 "Unhandled command completion %d\n", id);
3277
3278 DAC960_PD_ack_intr(base);
3279 DAC960_PD_ack_hw_mbox_status(base);
3280
3281 if (id < 3)
3282 myrb_handle_cmdblk(cb, cmd_blk);
3283 else
3284 myrb_handle_scsi(cb, cmd_blk, scmd);
3285 }
3286 spin_unlock_irqrestore(&cb->queue_lock, flags);
3287 return IRQ_HANDLED;
3288}
3289
3290struct myrb_privdata DAC960_PD_privdata = {
3291 .hw_init = DAC960_PD_hw_init,
3292 .irq_handler = DAC960_PD_intr_handler,
3293 .mmio_size = DAC960_PD_mmio_size,
3294};
3295
3296
3297/*
3298 * DAC960 P Series Controllers
3299 *
3300 * Similar to the DAC960 PD Series Controllers, but some commands have
3301 * to be translated.
3302 */
3303
3304static inline void myrb_translate_enquiry(void *enq)
3305{
3306 memcpy(enq + 132, enq + 36, 64);
3307 memset(enq + 36, 0, 96);
3308}
3309
3310static inline void myrb_translate_devstate(void *state)
3311{
3312 memcpy(state + 2, state + 3, 1);
3313 memmove(state + 4, state + 5, 2);
3314 memmove(state + 6, state + 8, 4);
3315}
3316
3317static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3318{
3319 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3320 int ldev_num = mbox->type5.ld.ldev_num;
3321
3322 mbox->bytes[3] &= 0x7;
3323 mbox->bytes[3] |= mbox->bytes[7] << 6;
3324 mbox->bytes[7] = ldev_num;
3325}
3326
3327static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3328{
3329 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3330 int ldev_num = mbox->bytes[7];
3331
3332 mbox->bytes[7] = mbox->bytes[3] >> 6;
3333 mbox->bytes[3] &= 0x7;
3334 mbox->bytes[3] |= ldev_num << 3;
3335}
3336
3337static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3338{
3339 void __iomem *base = cb->io_base;
3340 union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3341
3342 switch (mbox->common.opcode) {
3343 case MYRB_CMD_ENQUIRY:
3344 mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3345 break;
3346 case MYRB_CMD_GET_DEVICE_STATE:
3347 mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3348 break;
3349 case MYRB_CMD_READ:
3350 mbox->common.opcode = MYRB_CMD_READ_OLD;
3351 myrb_translate_to_rw_command(cmd_blk);
3352 break;
3353 case MYRB_CMD_WRITE:
3354 mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3355 myrb_translate_to_rw_command(cmd_blk);
3356 break;
3357 case MYRB_CMD_READ_SG:
3358 mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3359 myrb_translate_to_rw_command(cmd_blk);
3360 break;
3361 case MYRB_CMD_WRITE_SG:
3362 mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3363 myrb_translate_to_rw_command(cmd_blk);
3364 break;
3365 default:
3366 break;
3367 }
3368 while (DAC960_PD_hw_mbox_is_full(base))
3369 udelay(1);
3370 DAC960_PD_write_cmd_mbox(base, mbox);
3371 DAC960_PD_hw_mbox_new_cmd(base);
3372}
3373
3374
3375static int DAC960_P_hw_init(struct pci_dev *pdev,
3376 struct myrb_hba *cb, void __iomem *base)
3377{
3378 int timeout = 0;
3379 unsigned char error, parm0, parm1;
3380
3381 if (!request_region(cb->io_addr, 0x80, "myrb")) {
3382 dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3383 (unsigned long)cb->io_addr);
3384 return -EBUSY;
3385 }
3386 DAC960_PD_disable_intr(base);
3387 DAC960_PD_ack_hw_mbox_status(base);
3388 udelay(1000);
3389 while (DAC960_PD_init_in_progress(base) &&
3390 timeout < MYRB_MAILBOX_TIMEOUT) {
3391 if (DAC960_PD_read_error_status(base, &error,
3392 &parm0, &parm1) &&
3393 myrb_err_status(cb, error, parm0, parm1))
3394 return -EAGAIN;
3395 udelay(10);
3396 timeout++;
3397 }
3398 if (timeout == MYRB_MAILBOX_TIMEOUT) {
3399 dev_err(&pdev->dev,
3400 "Timeout waiting for Controller Initialisation\n");
3401 return -ETIMEDOUT;
3402 }
3403 if (!myrb_enable_mmio(cb, NULL)) {
3404 dev_err(&pdev->dev,
3405 "Unable to allocate DMA mapped memory\n");
3406 DAC960_PD_reset_ctrl(base);
3407 return -ETIMEDOUT;
3408 }
3409 DAC960_PD_enable_intr(base);
3410 cb->qcmd = DAC960_P_qcmd;
3411 cb->disable_intr = DAC960_PD_disable_intr;
3412 cb->reset = DAC960_PD_reset_ctrl;
3413
3414 return 0;
3415}
3416
3417static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3418{
3419 struct myrb_hba *cb = arg;
3420 void __iomem *base = cb->io_base;
3421 unsigned long flags;
3422
3423 spin_lock_irqsave(&cb->queue_lock, flags);
3424 while (DAC960_PD_hw_mbox_status_available(base)) {
3425 unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3426 struct scsi_cmnd *scmd = NULL;
3427 struct myrb_cmdblk *cmd_blk = NULL;
3428 union myrb_cmd_mbox *mbox;
3429 enum myrb_cmd_opcode op;
3430
3431
3432 if (id == MYRB_DCMD_TAG)
3433 cmd_blk = &cb->dcmd_blk;
3434 else if (id == MYRB_MCMD_TAG)
3435 cmd_blk = &cb->mcmd_blk;
3436 else {
3437 scmd = scsi_host_find_tag(cb->host, id - 3);
3438 if (scmd)
3439 cmd_blk = scsi_cmd_priv(scmd);
3440 }
3441 if (cmd_blk)
3442 cmd_blk->status = DAC960_PD_read_status(base);
3443 else
3444 dev_err(&cb->pdev->dev,
3445 "Unhandled command completion %d\n", id);
3446
3447 DAC960_PD_ack_intr(base);
3448 DAC960_PD_ack_hw_mbox_status(base);
3449
3450 if (!cmd_blk)
3451 continue;
3452
3453 mbox = &cmd_blk->mbox;
3454 op = mbox->common.opcode;
3455 switch (op) {
3456 case MYRB_CMD_ENQUIRY_OLD:
3457 mbox->common.opcode = MYRB_CMD_ENQUIRY;
3458 myrb_translate_enquiry(cb->enquiry);
3459 break;
3460 case MYRB_CMD_READ_OLD:
3461 mbox->common.opcode = MYRB_CMD_READ;
3462 myrb_translate_from_rw_command(cmd_blk);
3463 break;
3464 case MYRB_CMD_WRITE_OLD:
3465 mbox->common.opcode = MYRB_CMD_WRITE;
3466 myrb_translate_from_rw_command(cmd_blk);
3467 break;
3468 case MYRB_CMD_READ_SG_OLD:
3469 mbox->common.opcode = MYRB_CMD_READ_SG;
3470 myrb_translate_from_rw_command(cmd_blk);
3471 break;
3472 case MYRB_CMD_WRITE_SG_OLD:
3473 mbox->common.opcode = MYRB_CMD_WRITE_SG;
3474 myrb_translate_from_rw_command(cmd_blk);
3475 break;
3476 default:
3477 break;
3478 }
3479 if (id < 3)
3480 myrb_handle_cmdblk(cb, cmd_blk);
3481 else
3482 myrb_handle_scsi(cb, cmd_blk, scmd);
3483 }
3484 spin_unlock_irqrestore(&cb->queue_lock, flags);
3485 return IRQ_HANDLED;
3486}
3487
3488struct myrb_privdata DAC960_P_privdata = {
3489 .hw_init = DAC960_P_hw_init,
3490 .irq_handler = DAC960_P_intr_handler,
3491 .mmio_size = DAC960_PD_mmio_size,
3492};
3493
3494static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3495 const struct pci_device_id *entry)
3496{
3497 struct myrb_privdata *privdata =
3498 (struct myrb_privdata *)entry->driver_data;
3499 irq_handler_t irq_handler = privdata->irq_handler;
3500 unsigned int mmio_size = privdata->mmio_size;
3501 struct Scsi_Host *shost;
3502 struct myrb_hba *cb = NULL;
3503
3504 shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3505 if (!shost) {
3506 dev_err(&pdev->dev, "Unable to allocate Controller\n");
3507 return NULL;
3508 }
3509 shost->max_cmd_len = 12;
3510 shost->max_lun = 256;
3511 cb = shost_priv(shost);
3512 mutex_init(&cb->dcmd_mutex);
3513 mutex_init(&cb->dma_mutex);
3514 cb->pdev = pdev;
3515
3516 if (pci_enable_device(pdev))
3517 goto failure;
3518
3519 if (privdata->hw_init == DAC960_PD_hw_init ||
3520 privdata->hw_init == DAC960_P_hw_init) {
3521 cb->io_addr = pci_resource_start(pdev, 0);
3522 cb->pci_addr = pci_resource_start(pdev, 1);
3523 } else
3524 cb->pci_addr = pci_resource_start(pdev, 0);
3525
3526 pci_set_drvdata(pdev, cb);
3527 spin_lock_init(&cb->queue_lock);
3528 if (mmio_size < PAGE_SIZE)
3529 mmio_size = PAGE_SIZE;
3530 cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size);
3531 if (cb->mmio_base == NULL) {
3532 dev_err(&pdev->dev,
3533 "Unable to map Controller Register Window\n");
3534 goto failure;
3535 }
3536
3537 cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3538 if (privdata->hw_init(pdev, cb, cb->io_base))
3539 goto failure;
3540
3541 if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3542 dev_err(&pdev->dev,
3543 "Unable to acquire IRQ Channel %d\n", pdev->irq);
3544 goto failure;
3545 }
3546 cb->irq = pdev->irq;
3547 return cb;
3548
3549failure:
3550 dev_err(&pdev->dev,
3551 "Failed to initialize Controller\n");
3552 myrb_cleanup(cb);
3553 return NULL;
3554}
3555
3556static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3557{
3558 struct myrb_hba *cb;
3559 int ret;
3560
3561 cb = myrb_detect(dev, entry);
3562 if (!cb)
3563 return -ENODEV;
3564
3565 ret = myrb_get_hba_config(cb);
3566 if (ret < 0) {
3567 myrb_cleanup(cb);
3568 return ret;
3569 }
3570
3571 if (!myrb_create_mempools(dev, cb)) {
3572 ret = -ENOMEM;
3573 goto failed;
3574 }
3575
3576 ret = scsi_add_host(cb->host, &dev->dev);
3577 if (ret) {
3578 dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3579 myrb_destroy_mempools(cb);
3580 goto failed;
3581 }
3582 scsi_scan_host(cb->host);
3583 return 0;
3584failed:
3585 myrb_cleanup(cb);
3586 return ret;
3587}
3588
3589
3590static void myrb_remove(struct pci_dev *pdev)
3591{
3592 struct myrb_hba *cb = pci_get_drvdata(pdev);
3593
3594 shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3595 myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3596 myrb_cleanup(cb);
3597 myrb_destroy_mempools(cb);
3598}
3599
3600
3601static const struct pci_device_id myrb_id_table[] = {
3602 {
3603 PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3604 PCI_DEVICE_ID_DEC_21285,
3605 PCI_VENDOR_ID_MYLEX,
3606 PCI_DEVICE_ID_MYLEX_DAC960_LA),
3607 .driver_data = (unsigned long) &DAC960_LA_privdata,
3608 },
3609 {
3610 PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3611 },
3612 {
3613 PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3614 },
3615 {
3616 PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3617 },
3618 {0, },
3619};
3620
3621MODULE_DEVICE_TABLE(pci, myrb_id_table);
3622
3623static struct pci_driver myrb_pci_driver = {
3624 .name = "myrb",
3625 .id_table = myrb_id_table,
3626 .probe = myrb_probe,
3627 .remove = myrb_remove,
3628};
3629
3630static int __init myrb_init_module(void)
3631{
3632 int ret;
3633
3634 myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3635 if (!myrb_raid_template)
3636 return -ENODEV;
3637
3638 ret = pci_register_driver(&myrb_pci_driver);
3639 if (ret)
3640 raid_class_release(myrb_raid_template);
3641
3642 return ret;
3643}
3644
3645static void __exit myrb_cleanup_module(void)
3646{
3647 pci_unregister_driver(&myrb_pci_driver);
3648 raid_class_release(myrb_raid_template);
3649}
3650
3651module_init(myrb_init_module);
3652module_exit(myrb_cleanup_module);
3653
3654MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3655MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3656MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/myrb.h b/drivers/scsi/myrb.h
new file mode 100644
index 000000000000..9289c19fcb2f
--- /dev/null
+++ b/drivers/scsi/myrb.h
@@ -0,0 +1,958 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4 *
5 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
6 *
7 * Based on the original DAC960 driver,
8 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
10 *
11 */
12
13#ifndef MYRB_H
14#define MYRB_H
15
16#define MYRB_MAX_LDEVS 32
17#define MYRB_MAX_CHANNELS 3
18#define MYRB_MAX_TARGETS 16
19#define MYRB_MAX_PHYSICAL_DEVICES 45
20#define MYRB_SCATTER_GATHER_LIMIT 32
21#define MYRB_CMD_MBOX_COUNT 256
22#define MYRB_STAT_MBOX_COUNT 1024
23
24#define MYRB_BLKSIZE_BITS 9
25#define MYRB_MAILBOX_TIMEOUT 1000000
26
27#define MYRB_DCMD_TAG 1
28#define MYRB_MCMD_TAG 2
29
30#define MYRB_PRIMARY_MONITOR_INTERVAL (10 * HZ)
31#define MYRB_SECONDARY_MONITOR_INTERVAL (60 * HZ)
32
33/*
34 * DAC960 V1 Firmware Command Opcodes.
35 */
36enum myrb_cmd_opcode {
37 /* I/O Commands */
38 MYRB_CMD_READ_EXTENDED = 0x33,
39 MYRB_CMD_WRITE_EXTENDED = 0x34,
40 MYRB_CMD_READAHEAD_EXTENDED = 0x35,
41 MYRB_CMD_READ_EXTENDED_SG = 0xB3,
42 MYRB_CMD_WRITE_EXTENDED_SG = 0xB4,
43 MYRB_CMD_READ = 0x36,
44 MYRB_CMD_READ_SG = 0xB6,
45 MYRB_CMD_WRITE = 0x37,
46 MYRB_CMD_WRITE_SG = 0xB7,
47 MYRB_CMD_DCDB = 0x04,
48 MYRB_CMD_DCDB_SG = 0x84,
49 MYRB_CMD_FLUSH = 0x0A,
50 /* Controller Status Related Commands */
51 MYRB_CMD_ENQUIRY = 0x53,
52 MYRB_CMD_ENQUIRY2 = 0x1C,
53 MYRB_CMD_GET_LDRV_ELEMENT = 0x55,
54 MYRB_CMD_GET_LDEV_INFO = 0x19,
55 MYRB_CMD_IOPORTREAD = 0x39,
56 MYRB_CMD_IOPORTWRITE = 0x3A,
57 MYRB_CMD_GET_SD_STATS = 0x3E,
58 MYRB_CMD_GET_PD_STATS = 0x3F,
59 MYRB_CMD_EVENT_LOG_OPERATION = 0x72,
60 /* Device Related Commands */
61 MYRB_CMD_START_DEVICE = 0x10,
62 MYRB_CMD_GET_DEVICE_STATE = 0x50,
63 MYRB_CMD_STOP_CHANNEL = 0x13,
64 MYRB_CMD_START_CHANNEL = 0x12,
65 MYRB_CMD_RESET_CHANNEL = 0x1A,
66 /* Commands Associated with Data Consistency and Errors */
67 MYRB_CMD_REBUILD = 0x09,
68 MYRB_CMD_REBUILD_ASYNC = 0x16,
69 MYRB_CMD_CHECK_CONSISTENCY = 0x0F,
70 MYRB_CMD_CHECK_CONSISTENCY_ASYNC = 0x1E,
71 MYRB_CMD_REBUILD_STAT = 0x0C,
72 MYRB_CMD_GET_REBUILD_PROGRESS = 0x27,
73 MYRB_CMD_REBUILD_CONTROL = 0x1F,
74 MYRB_CMD_READ_BADBLOCK_TABLE = 0x0B,
75 MYRB_CMD_READ_BADDATA_TABLE = 0x25,
76 MYRB_CMD_CLEAR_BADDATA_TABLE = 0x26,
77 MYRB_CMD_GET_ERROR_TABLE = 0x17,
78 MYRB_CMD_ADD_CAPACITY_ASYNC = 0x2A,
79 MYRB_CMD_BGI_CONTROL = 0x2B,
80 /* Configuration Related Commands */
81 MYRB_CMD_READ_CONFIG2 = 0x3D,
82 MYRB_CMD_WRITE_CONFIG2 = 0x3C,
83 MYRB_CMD_READ_CONFIG_ONDISK = 0x4A,
84 MYRB_CMD_WRITE_CONFIG_ONDISK = 0x4B,
85 MYRB_CMD_READ_CONFIG = 0x4E,
86 MYRB_CMD_READ_BACKUP_CONFIG = 0x4D,
87 MYRB_CMD_WRITE_CONFIG = 0x4F,
88 MYRB_CMD_ADD_CONFIG = 0x4C,
89 MYRB_CMD_READ_CONFIG_LABEL = 0x48,
90 MYRB_CMD_WRITE_CONFIG_LABEL = 0x49,
91 /* Firmware Upgrade Related Commands */
92 MYRB_CMD_LOAD_IMAGE = 0x20,
93 MYRB_CMD_STORE_IMAGE = 0x21,
94 MYRB_CMD_PROGRAM_IMAGE = 0x22,
95 /* Diagnostic Commands */
96 MYRB_CMD_SET_DIAGNOSTIC_MODE = 0x31,
97 MYRB_CMD_RUN_DIAGNOSTIC = 0x32,
98 /* Subsystem Service Commands */
99 MYRB_CMD_GET_SUBSYS_DATA = 0x70,
100 MYRB_CMD_SET_SUBSYS_PARAM = 0x71,
101 /* Version 2.xx Firmware Commands */
102 MYRB_CMD_ENQUIRY_OLD = 0x05,
103 MYRB_CMD_GET_DEVICE_STATE_OLD = 0x14,
104 MYRB_CMD_READ_OLD = 0x02,
105 MYRB_CMD_WRITE_OLD = 0x03,
106 MYRB_CMD_READ_SG_OLD = 0x82,
107 MYRB_CMD_WRITE_SG_OLD = 0x83
108} __packed;
109
110/*
111 * DAC960 V1 Firmware Command Status Codes.
112 */
113#define MYRB_STATUS_SUCCESS 0x0000 /* Common */
114#define MYRB_STATUS_CHECK_CONDITION 0x0002 /* Common */
115#define MYRB_STATUS_NO_DEVICE 0x0102 /* Common */
116#define MYRB_STATUS_INVALID_ADDRESS 0x0105 /* Common */
117#define MYRB_STATUS_INVALID_PARAM 0x0105 /* Common */
118#define MYRB_STATUS_IRRECOVERABLE_DATA_ERROR 0x0001 /* I/O */
119#define MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE 0x0002 /* I/O */
120#define MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV 0x0105 /* I/O */
121#define MYRB_STATUS_BAD_DATA 0x010C /* I/O */
122#define MYRB_STATUS_DEVICE_BUSY 0x0008 /* DCDB */
123#define MYRB_STATUS_DEVICE_NONRESPONSIVE 0x000E /* DCDB */
124#define MYRB_STATUS_COMMAND_TERMINATED 0x000F /* DCDB */
125#define MYRB_STATUS_START_DEVICE_FAILED 0x0002 /* Device */
126#define MYRB_STATUS_INVALID_CHANNEL_OR_TARGET 0x0105 /* Device */
127#define MYRB_STATUS_CHANNEL_BUSY 0x0106 /* Device */
128#define MYRB_STATUS_OUT_OF_MEMORY 0x0107 /* Device */
129#define MYRB_STATUS_CHANNEL_NOT_STOPPED 0x0002 /* Device */
130#define MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE 0x0002 /* Consistency */
131#define MYRB_STATUS_RBLD_BADBLOCKS 0x0003 /* Consistency */
132#define MYRB_STATUS_RBLD_NEW_DISK_FAILED 0x0004 /* Consistency */
133#define MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS 0x0106 /* Consistency */
134#define MYRB_STATUS_DEPENDENT_DISK_DEAD 0x0002 /* Consistency */
135#define MYRB_STATUS_INCONSISTENT_BLOCKS 0x0003 /* Consistency */
136#define MYRB_STATUS_INVALID_OR_NONREDUNDANT_LDRV 0x0105 /* Consistency */
137#define MYRB_STATUS_NO_RBLD_OR_CHECK_INPROGRESS 0x0105 /* Consistency */
138#define MYRB_STATUS_RBLD_IN_PROGRESS_DATA_VALID 0x0000 /* Consistency */
139#define MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE 0x0002 /* Consistency */
140#define MYRB_STATUS_RBLD_FAILED_BADBLOCKS 0x0003 /* Consistency */
141#define MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED 0x0004 /* Consistency */
142#define MYRB_STATUS_RBLD_SUCCESS 0x0100 /* Consistency */
143#define MYRB_STATUS_RBLD_SUCCESS_TERMINATED 0x0107 /* Consistency */
144#define MYRB_STATUS_RBLD_NOT_CHECKED 0x0108 /* Consistency */
145#define MYRB_STATUS_BGI_SUCCESS 0x0100 /* Consistency */
146#define MYRB_STATUS_BGI_ABORTED 0x0005 /* Consistency */
147#define MYRB_STATUS_NO_BGI_INPROGRESS 0x0105 /* Consistency */
148#define MYRB_STATUS_ADD_CAPACITY_INPROGRESS 0x0004 /* Consistency */
149#define MYRB_STATUS_ADD_CAPACITY_FAILED_OR_SUSPENDED 0x00F4 /* Consistency */
150#define MYRB_STATUS_CONFIG2_CSUM_ERROR 0x0002 /* Configuration */
151#define MYRB_STATUS_CONFIGURATION_SUSPENDED 0x0106 /* Configuration */
152#define MYRB_STATUS_FAILED_TO_CONFIGURE_NVRAM 0x0105 /* Configuration */
153#define MYRB_STATUS_CONFIGURATION_NOT_SAVED 0x0106 /* Configuration */
154#define MYRB_STATUS_SUBSYS_NOTINSTALLED 0x0001 /* Subsystem */
155#define MYRB_STATUS_SUBSYS_FAILED 0x0002 /* Subsystem */
156#define MYRB_STATUS_SUBSYS_BUSY 0x0106 /* Subsystem */
157#define MYRB_STATUS_SUBSYS_TIMEOUT 0x0108 /* Subsystem */
158
159/*
160 * DAC960 V1 Firmware Enquiry Command reply structure.
161 */
162struct myrb_enquiry {
163 unsigned char ldev_count; /* Byte 0 */
164 unsigned int rsvd1:24; /* Bytes 1-3 */
165 unsigned int ldev_sizes[32]; /* Bytes 4-131 */
166 unsigned short flash_age; /* Bytes 132-133 */
167 struct {
168 unsigned char deferred:1; /* Byte 134 Bit 0 */
169 unsigned char low_bat:1; /* Byte 134 Bit 1 */
170 unsigned char rsvd2:6; /* Byte 134 Bits 2-7 */
171 } status;
172 unsigned char rsvd3:8; /* Byte 135 */
173 unsigned char fw_minor_version; /* Byte 136 */
174 unsigned char fw_major_version; /* Byte 137 */
175 enum {
176 MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS = 0x00,
177 MYRB_STDBY_RBLD_IN_PROGRESS = 0x01,
178 MYRB_BG_RBLD_IN_PROGRESS = 0x02,
179 MYRB_BG_CHECK_IN_PROGRESS = 0x03,
180 MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR = 0xFF,
181 MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED = 0xF0,
182 MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED = 0xF1,
183 MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER = 0xF2,
184 MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED = 0xF3
185 } __packed rbld; /* Byte 138 */
186 unsigned char max_tcq; /* Byte 139 */
187 unsigned char ldev_offline; /* Byte 140 */
188 unsigned char rsvd4:8; /* Byte 141 */
189 unsigned short ev_seq; /* Bytes 142-143 */
190 unsigned char ldev_critical; /* Byte 144 */
191 unsigned int rsvd5:24; /* Bytes 145-147 */
192 unsigned char pdev_dead; /* Byte 148 */
193 unsigned char rsvd6:8; /* Byte 149 */
194 unsigned char rbld_count; /* Byte 150 */
195 struct {
196 unsigned char rsvd7:3; /* Byte 151 Bits 0-2 */
197 unsigned char bbu_present:1; /* Byte 151 Bit 3 */
198 unsigned char rsvd8:4; /* Byte 151 Bits 4-7 */
199 } misc;
200 struct {
201 unsigned char target;
202 unsigned char channel;
203 } dead_drives[21]; /* Bytes 152-194 */
204 unsigned char rsvd9[62]; /* Bytes 195-255 */
205} __packed;
206
207/*
208 * DAC960 V1 Firmware Enquiry2 Command reply structure.
209 */
210struct myrb_enquiry2 {
211 struct {
212 enum {
213 DAC960_V1_P_PD_PU = 0x01,
214 DAC960_V1_PL = 0x02,
215 DAC960_V1_PG = 0x10,
216 DAC960_V1_PJ = 0x11,
217 DAC960_V1_PR = 0x12,
218 DAC960_V1_PT = 0x13,
219 DAC960_V1_PTL0 = 0x14,
220 DAC960_V1_PRL = 0x15,
221 DAC960_V1_PTL1 = 0x16,
222 DAC960_V1_1164P = 0x20
223 } __packed sub_model; /* Byte 0 */
224 unsigned char actual_channels; /* Byte 1 */
225 enum {
226 MYRB_5_CHANNEL_BOARD = 0x01,
227 MYRB_3_CHANNEL_BOARD = 0x02,
228 MYRB_2_CHANNEL_BOARD = 0x03,
229 MYRB_3_CHANNEL_ASIC_DAC = 0x04
230 } __packed model; /* Byte 2 */
231 enum {
232 MYRB_EISA_CONTROLLER = 0x01,
233 MYRB_MCA_CONTROLLER = 0x02,
234 MYRB_PCI_CONTROLLER = 0x03,
235 MYRB_SCSI_TO_SCSI = 0x08
236 } __packed controller; /* Byte 3 */
237 } hw; /* Bytes 0-3 */
238 /* MajorVersion.MinorVersion-FirmwareType-TurnID */
239 struct {
240 unsigned char major_version; /* Byte 4 */
241 unsigned char minor_version; /* Byte 5 */
242 unsigned char turn_id; /* Byte 6 */
243 char firmware_type; /* Byte 7 */
244 } fw; /* Bytes 4-7 */
245 unsigned int rsvd1; /* Byte 8-11 */
246 unsigned char cfg_chan; /* Byte 12 */
247 unsigned char cur_chan; /* Byte 13 */
248 unsigned char max_targets; /* Byte 14 */
249 unsigned char max_tcq; /* Byte 15 */
250 unsigned char max_ldev; /* Byte 16 */
251 unsigned char max_arms; /* Byte 17 */
252 unsigned char max_spans; /* Byte 18 */
253 unsigned char rsvd2; /* Byte 19 */
254 unsigned int rsvd3; /* Bytes 20-23 */
255 unsigned int mem_size; /* Bytes 24-27 */
256 unsigned int cache_size; /* Bytes 28-31 */
257 unsigned int flash_size; /* Bytes 32-35 */
258 unsigned int nvram_size; /* Bytes 36-39 */
259 struct {
260 enum {
261 MYRB_RAM_TYPE_DRAM = 0x0,
262 MYRB_RAM_TYPE_EDO = 0x1,
263 MYRB_RAM_TYPE_SDRAM = 0x2,
264 MYRB_RAM_TYPE_Last = 0x7
265 } __packed ram:3; /* Byte 40 Bits 0-2 */
266 enum {
267 MYRB_ERR_CORR_None = 0x0,
268 MYRB_ERR_CORR_Parity = 0x1,
269 MYRB_ERR_CORR_ECC = 0x2,
270 MYRB_ERR_CORR_Last = 0x7
271 } __packed ec:3; /* Byte 40 Bits 3-5 */
272 unsigned char fast_page:1; /* Byte 40 Bit 6 */
273 unsigned char low_power:1; /* Byte 40 Bit 7 */
274 unsigned char rsvd4; /* Bytes 41 */
275 } mem_type;
276 unsigned short clock_speed; /* Bytes 42-43 */
277 unsigned short mem_speed; /* Bytes 44-45 */
278 unsigned short hw_speed; /* Bytes 46-47 */
279 unsigned char rsvd5[12]; /* Bytes 48-59 */
280 unsigned short max_cmds; /* Bytes 60-61 */
281 unsigned short max_sge; /* Bytes 62-63 */
282 unsigned short max_drv_cmds; /* Bytes 64-65 */
283 unsigned short max_io_desc; /* Bytes 66-67 */
284 unsigned short max_sectors; /* Bytes 68-69 */
285 unsigned char latency; /* Byte 70 */
286 unsigned char rsvd6; /* Byte 71 */
287 unsigned char scsi_tmo; /* Byte 72 */
288 unsigned char rsvd7; /* Byte 73 */
289 unsigned short min_freelines; /* Bytes 74-75 */
290 unsigned char rsvd8[8]; /* Bytes 76-83 */
291 unsigned char rbld_rate_const; /* Byte 84 */
292 unsigned char rsvd9[11]; /* Byte 85-95 */
293 unsigned short pdrv_block_size; /* Bytes 96-97 */
294 unsigned short ldev_block_size; /* Bytes 98-99 */
295 unsigned short max_blocks_per_cmd; /* Bytes 100-101 */
296 unsigned short block_factor; /* Bytes 102-103 */
297 unsigned short cacheline_size; /* Bytes 104-105 */
298 struct {
299 enum {
300 MYRB_WIDTH_NARROW_8BIT = 0x0,
301 MYRB_WIDTH_WIDE_16BIT = 0x1,
302 MYRB_WIDTH_WIDE_32BIT = 0x2
303 } __packed bus_width:2; /* Byte 106 Bits 0-1 */
304 enum {
305 MYRB_SCSI_SPEED_FAST = 0x0,
306 MYRB_SCSI_SPEED_ULTRA = 0x1,
307 MYRB_SCSI_SPEED_ULTRA2 = 0x2
308 } __packed bus_speed:2; /* Byte 106 Bits 2-3 */
309 unsigned char differential:1; /* Byte 106 Bit 4 */
310 unsigned char rsvd10:3; /* Byte 106 Bits 5-7 */
311 } scsi_cap;
312 unsigned char rsvd11[5]; /* Byte 107-111 */
313 unsigned short fw_build; /* Bytes 112-113 */
314 enum {
315 MYRB_FAULT_AEMI = 0x01,
316 MYRB_FAULT_OEM1 = 0x02,
317 MYRB_FAULT_OEM2 = 0x04,
318 MYRB_FAULT_OEM3 = 0x08,
319 MYRB_FAULT_CONNER = 0x10,
320 MYRB_FAULT_SAFTE = 0x20
321 } __packed fault_mgmt; /* Byte 114 */
322 unsigned char rsvd12; /* Byte 115 */
323 struct {
324 unsigned int clustering:1; /* Byte 116 Bit 0 */
325 unsigned int online_RAID_expansion:1; /* Byte 116 Bit 1 */
326 unsigned int readahead:1; /* Byte 116 Bit 2 */
327 unsigned int bgi:1; /* Byte 116 Bit 3 */
328 unsigned int rsvd13:28; /* Bytes 116-119 */
329 } fw_features;
330 unsigned char rsvd14[8]; /* Bytes 120-127 */
331} __packed;
332
333/*
334 * DAC960 V1 Firmware Logical Drive State type.
335 */
336enum myrb_devstate {
337 MYRB_DEVICE_DEAD = 0x00,
338 MYRB_DEVICE_WO = 0x02,
339 MYRB_DEVICE_ONLINE = 0x03,
340 MYRB_DEVICE_CRITICAL = 0x04,
341 MYRB_DEVICE_STANDBY = 0x10,
342 MYRB_DEVICE_OFFLINE = 0xFF
343} __packed;
344
345/*
346 * DAC960 V1 RAID Levels
347 */
348enum myrb_raidlevel {
349 MYRB_RAID_LEVEL0 = 0x0, /* RAID 0 */
350 MYRB_RAID_LEVEL1 = 0x1, /* RAID 1 */
351 MYRB_RAID_LEVEL3 = 0x3, /* RAID 3 */
352 MYRB_RAID_LEVEL5 = 0x5, /* RAID 5 */
353 MYRB_RAID_LEVEL6 = 0x6, /* RAID 6 */
354 MYRB_RAID_JBOD = 0x7, /* RAID 7 (JBOD) */
355} __packed;
356
357/*
358 * DAC960 V1 Firmware Logical Drive Information structure.
359 */
360struct myrb_ldev_info {
361 unsigned int size; /* Bytes 0-3 */
362 enum myrb_devstate state; /* Byte 4 */
363 unsigned int raid_level:7; /* Byte 5 Bits 0-6 */
364 unsigned int wb_enabled:1; /* Byte 5 Bit 7 */
365 unsigned int rsvd:16; /* Bytes 6-7 */
366};
367
368/*
369 * DAC960 V1 Firmware Perform Event Log Operation Types.
370 */
371#define DAC960_V1_GetEventLogEntry 0x00
372
373/*
374 * DAC960 V1 Firmware Get Event Log Entry Command reply structure.
375 */
376struct myrb_log_entry {
377 unsigned char msg_type; /* Byte 0 */
378 unsigned char msg_len; /* Byte 1 */
379 unsigned char target:5; /* Byte 2 Bits 0-4 */
380 unsigned char channel:3; /* Byte 2 Bits 5-7 */
381 unsigned char lun:6; /* Byte 3 Bits 0-5 */
382 unsigned char rsvd1:2; /* Byte 3 Bits 6-7 */
383 unsigned short seq_num; /* Bytes 4-5 */
384 unsigned char sense[26]; /* Bytes 6-31 */
385};
386
387/*
388 * DAC960 V1 Firmware Get Device State Command reply structure.
389 * The structure is padded by 2 bytes for compatibility with Version 2.xx
390 * Firmware.
391 */
392struct myrb_pdev_state {
393 unsigned int present:1; /* Byte 0 Bit 0 */
394 unsigned int :7; /* Byte 0 Bits 1-7 */
395 enum {
396 MYRB_TYPE_OTHER = 0x0,
397 MYRB_TYPE_DISK = 0x1,
398 MYRB_TYPE_TAPE = 0x2,
399 MYRB_TYPE_CDROM_OR_WORM = 0x3
400 } __packed devtype:2; /* Byte 1 Bits 0-1 */
401 unsigned int rsvd1:1; /* Byte 1 Bit 2 */
402 unsigned int fast20:1; /* Byte 1 Bit 3 */
403 unsigned int sync:1; /* Byte 1 Bit 4 */
404 unsigned int fast:1; /* Byte 1 Bit 5 */
405 unsigned int wide:1; /* Byte 1 Bit 6 */
406 unsigned int tcq_supported:1; /* Byte 1 Bit 7 */
407 enum myrb_devstate state; /* Byte 2 */
408 unsigned int rsvd2:8; /* Byte 3 */
409 unsigned int sync_multiplier; /* Byte 4 */
410 unsigned int sync_offset:5; /* Byte 5 Bits 0-4 */
411 unsigned int rsvd3:3; /* Byte 5 Bits 5-7 */
412 unsigned int size; /* Bytes 6-9 */
413 unsigned int rsvd4:16; /* Bytes 10-11 */
414} __packed;
415
416/*
417 * DAC960 V1 Firmware Get Rebuild Progress Command reply structure.
418 */
419struct myrb_rbld_progress {
420 unsigned int ldev_num; /* Bytes 0-3 */
421 unsigned int ldev_size; /* Bytes 4-7 */
422 unsigned int blocks_left; /* Bytes 8-11 */
423};
424
425/*
426 * DAC960 V1 Firmware Background Initialization Status Command reply structure.
427 */
428struct myrb_bgi_status {
429 unsigned int ldev_size; /* Bytes 0-3 */
430 unsigned int blocks_done; /* Bytes 4-7 */
431 unsigned char rsvd1[12]; /* Bytes 8-19 */
432 unsigned int ldev_num; /* Bytes 20-23 */
433 unsigned char raid_level; /* Byte 24 */
434 enum {
435 MYRB_BGI_INVALID = 0x00,
436 MYRB_BGI_STARTED = 0x02,
437 MYRB_BGI_INPROGRESS = 0x04,
438 MYRB_BGI_SUSPENDED = 0x05,
439 MYRB_BGI_CANCELLED = 0x06
440 } __packed status; /* Byte 25 */
441 unsigned char rsvd2[6]; /* Bytes 26-31 */
442};
443
444/*
445 * DAC960 V1 Firmware Error Table Entry structure.
446 */
447struct myrb_error_entry {
448 unsigned char parity_err; /* Byte 0 */
449 unsigned char soft_err; /* Byte 1 */
450 unsigned char hard_err; /* Byte 2 */
451 unsigned char misc_err; /* Byte 3 */
452};
453
454/*
455 * DAC960 V1 Firmware Read Config2 Command reply structure.
456 */
457struct myrb_config2 {
458 unsigned rsvd1:1; /* Byte 0 Bit 0 */
459 unsigned active_negation:1; /* Byte 0 Bit 1 */
460 unsigned rsvd2:5; /* Byte 0 Bits 2-6 */
461 unsigned no_rescan_on_reset_during_scan:1; /* Byte 0 Bit 7 */
462 unsigned StorageWorks_support:1; /* Byte 1 Bit 0 */
463 unsigned HewlettPackard_support:1; /* Byte 1 Bit 1 */
464 unsigned no_disconnect_on_first_command:1; /* Byte 1 Bit 2 */
465 unsigned rsvd3:2; /* Byte 1 Bits 3-4 */
466 unsigned AEMI_ARM:1; /* Byte 1 Bit 5 */
467 unsigned AEMI_OFM:1; /* Byte 1 Bit 6 */
468 unsigned rsvd4:1; /* Byte 1 Bit 7 */
469 enum {
470 MYRB_OEMID_MYLEX = 0x00,
471 MYRB_OEMID_IBM = 0x08,
472 MYRB_OEMID_HP = 0x0A,
473 MYRB_OEMID_DEC = 0x0C,
474 MYRB_OEMID_SIEMENS = 0x10,
475 MYRB_OEMID_INTEL = 0x12
476 } __packed OEMID; /* Byte 2 */
477 unsigned char oem_model_number; /* Byte 3 */
478 unsigned char physical_sector; /* Byte 4 */
479 unsigned char logical_sector; /* Byte 5 */
480 unsigned char block_factor; /* Byte 6 */
481 unsigned readahead_enabled:1; /* Byte 7 Bit 0 */
482 unsigned low_BIOS_delay:1; /* Byte 7 Bit 1 */
483 unsigned rsvd5:2; /* Byte 7 Bits 2-3 */
484 unsigned restrict_reassign_to_one_sector:1; /* Byte 7 Bit 4 */
485 unsigned rsvd6:1; /* Byte 7 Bit 5 */
486 unsigned FUA_during_write_recovery:1; /* Byte 7 Bit 6 */
487 unsigned enable_LeftSymmetricRAID5Algorithm:1; /* Byte 7 Bit 7 */
488 unsigned char default_rebuild_rate; /* Byte 8 */
489 unsigned char rsvd7; /* Byte 9 */
490 unsigned char blocks_per_cacheline; /* Byte 10 */
491 unsigned char blocks_per_stripe; /* Byte 11 */
492 struct {
493 enum {
494 MYRB_SPEED_ASYNC = 0x0,
495 MYRB_SPEED_SYNC_8MHz = 0x1,
496 MYRB_SPEED_SYNC_5MHz = 0x2,
497 MYRB_SPEED_SYNC_10_OR_20MHz = 0x3
498 } __packed speed:2; /* Byte 11 Bits 0-1 */
499 unsigned force_8bit:1; /* Byte 11 Bit 2 */
500 unsigned disable_fast20:1; /* Byte 11 Bit 3 */
501 unsigned rsvd8:3; /* Byte 11 Bits 4-6 */
502 unsigned enable_tcq:1; /* Byte 11 Bit 7 */
503 } __packed channelparam[6]; /* Bytes 12-17 */
504 unsigned char SCSIInitiatorID; /* Byte 18 */
505 unsigned char rsvd9; /* Byte 19 */
506 enum {
507 MYRB_STARTUP_CONTROLLER_SPINUP = 0x00,
508 MYRB_STARTUP_POWERON_SPINUP = 0x01
509 } __packed startup; /* Byte 20 */
510 unsigned char simultaneous_device_spinup_count; /* Byte 21 */
511 unsigned char seconds_delay_between_spinups; /* Byte 22 */
512 unsigned char rsvd10[29]; /* Bytes 23-51 */
513 unsigned BIOS_disabled:1; /* Byte 52 Bit 0 */
514 unsigned CDROM_boot_enabled:1; /* Byte 52 Bit 1 */
515 unsigned rsvd11:3; /* Byte 52 Bits 2-4 */
516 enum {
517 MYRB_GEOM_128_32 = 0x0,
518 MYRB_GEOM_255_63 = 0x1,
519 MYRB_GEOM_RESERVED1 = 0x2,
520 MYRB_GEOM_RESERVED2 = 0x3
521 } __packed drive_geometry:2; /* Byte 52 Bits 5-6 */
522 unsigned rsvd12:1; /* Byte 52 Bit 7 */
523 unsigned char rsvd13[9]; /* Bytes 53-61 */
524 unsigned short csum; /* Bytes 62-63 */
525};
526
527/*
528 * DAC960 V1 Firmware DCDB request structure.
529 */
530struct myrb_dcdb {
531 unsigned target:4; /* Byte 0 Bits 0-3 */
532 unsigned channel:4; /* Byte 0 Bits 4-7 */
533 enum {
534 MYRB_DCDB_XFER_NONE = 0,
535 MYRB_DCDB_XFER_DEVICE_TO_SYSTEM = 1,
536 MYRB_DCDB_XFER_SYSTEM_TO_DEVICE = 2,
537 MYRB_DCDB_XFER_ILLEGAL = 3
538 } __packed data_xfer:2; /* Byte 1 Bits 0-1 */
539 unsigned early_status:1; /* Byte 1 Bit 2 */
540 unsigned rsvd1:1; /* Byte 1 Bit 3 */
541 enum {
542 MYRB_DCDB_TMO_24_HRS = 0,
543 MYRB_DCDB_TMO_10_SECS = 1,
544 MYRB_DCDB_TMO_60_SECS = 2,
545 MYRB_DCDB_TMO_10_MINS = 3
546 } __packed timeout:2; /* Byte 1 Bits 4-5 */
547 unsigned no_autosense:1; /* Byte 1 Bit 6 */
548 unsigned allow_disconnect:1; /* Byte 1 Bit 7 */
549 unsigned short xfer_len_lo; /* Bytes 2-3 */
550 u32 dma_addr; /* Bytes 4-7 */
551 unsigned char cdb_len:4; /* Byte 8 Bits 0-3 */
552 unsigned char xfer_len_hi4:4; /* Byte 8 Bits 4-7 */
553 unsigned char sense_len; /* Byte 9 */
554 unsigned char cdb[12]; /* Bytes 10-21 */
555 unsigned char sense[64]; /* Bytes 22-85 */
556 unsigned char status; /* Byte 86 */
557 unsigned char rsvd2; /* Byte 87 */
558};
559
560/*
561 * DAC960 V1 Firmware Scatter/Gather List Type 1 32 Bit Address
562 *32 Bit Byte Count structure.
563 */
564struct myrb_sge {
565 u32 sge_addr; /* Bytes 0-3 */
566 u32 sge_count; /* Bytes 4-7 */
567};
568
569/*
570 * 13 Byte DAC960 V1 Firmware Command Mailbox structure.
571 * Bytes 13-15 are not used. The structure is padded to 16 bytes for
572 * efficient access.
573 */
574union myrb_cmd_mbox {
575 unsigned int words[4]; /* Words 0-3 */
576 unsigned char bytes[16]; /* Bytes 0-15 */
577 struct {
578 enum myrb_cmd_opcode opcode; /* Byte 0 */
579 unsigned char id; /* Byte 1 */
580 unsigned char rsvd[14]; /* Bytes 2-15 */
581 } __packed common;
582 struct {
583 enum myrb_cmd_opcode opcode; /* Byte 0 */
584 unsigned char id; /* Byte 1 */
585 unsigned char rsvd1[6]; /* Bytes 2-7 */
586 u32 addr; /* Bytes 8-11 */
587 unsigned char rsvd2[4]; /* Bytes 12-15 */
588 } __packed type3;
589 struct {
590 enum myrb_cmd_opcode opcode; /* Byte 0 */
591 unsigned char id; /* Byte 1 */
592 unsigned char optype; /* Byte 2 */
593 unsigned char rsvd1[5]; /* Bytes 3-7 */
594 u32 addr; /* Bytes 8-11 */
595 unsigned char rsvd2[4]; /* Bytes 12-15 */
596 } __packed type3B;
597 struct {
598 enum myrb_cmd_opcode opcode; /* Byte 0 */
599 unsigned char id; /* Byte 1 */
600 unsigned char rsvd1[5]; /* Bytes 2-6 */
601 unsigned char ldev_num:6; /* Byte 7 Bits 0-6 */
602 unsigned char auto_restore:1; /* Byte 7 Bit 7 */
603 unsigned char rsvd2[8]; /* Bytes 8-15 */
604 } __packed type3C;
605 struct {
606 enum myrb_cmd_opcode opcode; /* Byte 0 */
607 unsigned char id; /* Byte 1 */
608 unsigned char channel; /* Byte 2 */
609 unsigned char target; /* Byte 3 */
610 enum myrb_devstate state; /* Byte 4 */
611 unsigned char rsvd1[3]; /* Bytes 5-7 */
612 u32 addr; /* Bytes 8-11 */
613 unsigned char rsvd2[4]; /* Bytes 12-15 */
614 } __packed type3D;
615 struct {
616 enum myrb_cmd_opcode opcode; /* Byte 0 */
617 unsigned char id; /* Byte 1 */
618 unsigned char optype; /* Byte 2 */
619 unsigned char opqual; /* Byte 3 */
620 unsigned short ev_seq; /* Bytes 4-5 */
621 unsigned char rsvd1[2]; /* Bytes 6-7 */
622 u32 addr; /* Bytes 8-11 */
623 unsigned char rsvd2[4]; /* Bytes 12-15 */
624 } __packed type3E;
625 struct {
626 enum myrb_cmd_opcode opcode; /* Byte 0 */
627 unsigned char id; /* Byte 1 */
628 unsigned char rsvd1[2]; /* Bytes 2-3 */
629 unsigned char rbld_rate; /* Byte 4 */
630 unsigned char rsvd2[3]; /* Bytes 5-7 */
631 u32 addr; /* Bytes 8-11 */
632 unsigned char rsvd3[4]; /* Bytes 12-15 */
633 } __packed type3R;
634 struct {
635 enum myrb_cmd_opcode opcode; /* Byte 0 */
636 unsigned char id; /* Byte 1 */
637 unsigned short xfer_len; /* Bytes 2-3 */
638 unsigned int lba; /* Bytes 4-7 */
639 u32 addr; /* Bytes 8-11 */
640 unsigned char ldev_num; /* Byte 12 */
641 unsigned char rsvd[3]; /* Bytes 13-15 */
642 } __packed type4;
643 struct {
644 enum myrb_cmd_opcode opcode; /* Byte 0 */
645 unsigned char id; /* Byte 1 */
646 struct {
647 unsigned short xfer_len:11; /* Bytes 2-3 */
648 unsigned char ldev_num:5; /* Byte 3 Bits 3-7 */
649 } __packed ld;
650 unsigned int lba; /* Bytes 4-7 */
651 u32 addr; /* Bytes 8-11 */
652 unsigned char sg_count:6; /* Byte 12 Bits 0-5 */
653 enum {
654 MYRB_SGL_ADDR32_COUNT32 = 0x0,
655 MYRB_SGL_ADDR32_COUNT16 = 0x1,
656 MYRB_SGL_COUNT32_ADDR32 = 0x2,
657 MYRB_SGL_COUNT16_ADDR32 = 0x3
658 } __packed sg_type:2; /* Byte 12 Bits 6-7 */
659 unsigned char rsvd[3]; /* Bytes 13-15 */
660 } __packed type5;
661 struct {
662 enum myrb_cmd_opcode opcode; /* Byte 0 */
663 unsigned char id; /* Byte 1 */
664 unsigned char opcode2; /* Byte 2 */
665 unsigned char rsvd1:8; /* Byte 3 */
666 u32 cmd_mbox_addr; /* Bytes 4-7 */
667 u32 stat_mbox_addr; /* Bytes 8-11 */
668 unsigned char rsvd2[4]; /* Bytes 12-15 */
669 } __packed typeX;
670};
671
672/*
673 * DAC960 V1 Firmware Controller Status Mailbox structure.
674 */
675struct myrb_stat_mbox {
676 unsigned char id; /* Byte 0 */
677 unsigned char rsvd:7; /* Byte 1 Bits 0-6 */
678 unsigned char valid:1; /* Byte 1 Bit 7 */
679 unsigned short status; /* Bytes 2-3 */
680};
681
682struct myrb_cmdblk {
683 union myrb_cmd_mbox mbox;
684 unsigned short status;
685 struct completion *completion;
686 struct myrb_dcdb *dcdb;
687 dma_addr_t dcdb_addr;
688 struct myrb_sge *sgl;
689 dma_addr_t sgl_addr;
690};
691
692struct myrb_hba {
693 unsigned int ldev_block_size;
694 unsigned char ldev_geom_heads;
695 unsigned char ldev_geom_sectors;
696 unsigned char bus_width;
697 unsigned short stripe_size;
698 unsigned short segment_size;
699 unsigned short new_ev_seq;
700 unsigned short old_ev_seq;
701 bool dual_mode_interface;
702 bool bgi_status_supported;
703 bool safte_enabled;
704 bool need_ldev_info;
705 bool need_err_info;
706 bool need_rbld;
707 bool need_cc_status;
708 bool need_bgi_status;
709 bool rbld_first;
710
711 struct pci_dev *pdev;
712 struct Scsi_Host *host;
713
714 struct workqueue_struct *work_q;
715 char work_q_name[20];
716 struct delayed_work monitor_work;
717 unsigned long primary_monitor_time;
718 unsigned long secondary_monitor_time;
719
720 struct dma_pool *sg_pool;
721 struct dma_pool *dcdb_pool;
722
723 spinlock_t queue_lock;
724
725 void (*qcmd)(struct myrb_hba *cs, struct myrb_cmdblk *cmd_blk);
726 void (*write_cmd_mbox)(union myrb_cmd_mbox *next_mbox,
727 union myrb_cmd_mbox *cmd_mbox);
728 void (*get_cmd_mbox)(void __iomem *base);
729 void (*disable_intr)(void __iomem *base);
730 void (*reset)(void __iomem *base);
731
732 unsigned int ctlr_num;
733 unsigned char model_name[20];
734 unsigned char fw_version[12];
735
736 unsigned int irq;
737 phys_addr_t io_addr;
738 phys_addr_t pci_addr;
739 void __iomem *io_base;
740 void __iomem *mmio_base;
741
742 size_t cmd_mbox_size;
743 dma_addr_t cmd_mbox_addr;
744 union myrb_cmd_mbox *first_cmd_mbox;
745 union myrb_cmd_mbox *last_cmd_mbox;
746 union myrb_cmd_mbox *next_cmd_mbox;
747 union myrb_cmd_mbox *prev_cmd_mbox1;
748 union myrb_cmd_mbox *prev_cmd_mbox2;
749
750 size_t stat_mbox_size;
751 dma_addr_t stat_mbox_addr;
752 struct myrb_stat_mbox *first_stat_mbox;
753 struct myrb_stat_mbox *last_stat_mbox;
754 struct myrb_stat_mbox *next_stat_mbox;
755
756 struct myrb_cmdblk dcmd_blk;
757 struct myrb_cmdblk mcmd_blk;
758 struct mutex dcmd_mutex;
759
760 struct myrb_enquiry *enquiry;
761 dma_addr_t enquiry_addr;
762
763 struct myrb_error_entry *err_table;
764 dma_addr_t err_table_addr;
765
766 unsigned short last_rbld_status;
767
768 struct myrb_ldev_info *ldev_info_buf;
769 dma_addr_t ldev_info_addr;
770
771 struct myrb_bgi_status bgi_status;
772
773 struct mutex dma_mutex;
774};
775
776/*
777 * DAC960 LA Series Controller Interface Register Offsets.
778 */
779#define DAC960_LA_mmio_size 0x80
780
781enum DAC960_LA_reg_offset {
782 DAC960_LA_IRQMASK_OFFSET = 0x34,
783 DAC960_LA_CMDOP_OFFSET = 0x50,
784 DAC960_LA_CMDID_OFFSET = 0x51,
785 DAC960_LA_MBOX2_OFFSET = 0x52,
786 DAC960_LA_MBOX3_OFFSET = 0x53,
787 DAC960_LA_MBOX4_OFFSET = 0x54,
788 DAC960_LA_MBOX5_OFFSET = 0x55,
789 DAC960_LA_MBOX6_OFFSET = 0x56,
790 DAC960_LA_MBOX7_OFFSET = 0x57,
791 DAC960_LA_MBOX8_OFFSET = 0x58,
792 DAC960_LA_MBOX9_OFFSET = 0x59,
793 DAC960_LA_MBOX10_OFFSET = 0x5A,
794 DAC960_LA_MBOX11_OFFSET = 0x5B,
795 DAC960_LA_MBOX12_OFFSET = 0x5C,
796 DAC960_LA_STSID_OFFSET = 0x5D,
797 DAC960_LA_STS_OFFSET = 0x5E,
798 DAC960_LA_IDB_OFFSET = 0x60,
799 DAC960_LA_ODB_OFFSET = 0x61,
800 DAC960_LA_ERRSTS_OFFSET = 0x63,
801};
802
803/*
804 * DAC960 LA Series Inbound Door Bell Register.
805 */
806#define DAC960_LA_IDB_HWMBOX_NEW_CMD 0x01
807#define DAC960_LA_IDB_HWMBOX_ACK_STS 0x02
808#define DAC960_LA_IDB_GEN_IRQ 0x04
809#define DAC960_LA_IDB_CTRL_RESET 0x08
810#define DAC960_LA_IDB_MMBOX_NEW_CMD 0x10
811
812#define DAC960_LA_IDB_HWMBOX_EMPTY 0x01
813#define DAC960_LA_IDB_INIT_DONE 0x02
814
815/*
816 * DAC960 LA Series Outbound Door Bell Register.
817 */
818#define DAC960_LA_ODB_HWMBOX_ACK_IRQ 0x01
819#define DAC960_LA_ODB_MMBOX_ACK_IRQ 0x02
820#define DAC960_LA_ODB_HWMBOX_STS_AVAIL 0x01
821#define DAC960_LA_ODB_MMBOX_STS_AVAIL 0x02
822
823/*
824 * DAC960 LA Series Interrupt Mask Register.
825 */
826#define DAC960_LA_IRQMASK_DISABLE_IRQ 0x04
827
828/*
829 * DAC960 LA Series Error Status Register.
830 */
831#define DAC960_LA_ERRSTS_PENDING 0x02
832
833/*
834 * DAC960 PG Series Controller Interface Register Offsets.
835 */
836#define DAC960_PG_mmio_size 0x2000
837
838enum DAC960_PG_reg_offset {
839 DAC960_PG_IDB_OFFSET = 0x0020,
840 DAC960_PG_ODB_OFFSET = 0x002C,
841 DAC960_PG_IRQMASK_OFFSET = 0x0034,
842 DAC960_PG_CMDOP_OFFSET = 0x1000,
843 DAC960_PG_CMDID_OFFSET = 0x1001,
844 DAC960_PG_MBOX2_OFFSET = 0x1002,
845 DAC960_PG_MBOX3_OFFSET = 0x1003,
846 DAC960_PG_MBOX4_OFFSET = 0x1004,
847 DAC960_PG_MBOX5_OFFSET = 0x1005,
848 DAC960_PG_MBOX6_OFFSET = 0x1006,
849 DAC960_PG_MBOX7_OFFSET = 0x1007,
850 DAC960_PG_MBOX8_OFFSET = 0x1008,
851 DAC960_PG_MBOX9_OFFSET = 0x1009,
852 DAC960_PG_MBOX10_OFFSET = 0x100A,
853 DAC960_PG_MBOX11_OFFSET = 0x100B,
854 DAC960_PG_MBOX12_OFFSET = 0x100C,
855 DAC960_PG_STSID_OFFSET = 0x1018,
856 DAC960_PG_STS_OFFSET = 0x101A,
857 DAC960_PG_ERRSTS_OFFSET = 0x103F,
858};
859
860/*
861 * DAC960 PG Series Inbound Door Bell Register.
862 */
863#define DAC960_PG_IDB_HWMBOX_NEW_CMD 0x01
864#define DAC960_PG_IDB_HWMBOX_ACK_STS 0x02
865#define DAC960_PG_IDB_GEN_IRQ 0x04
866#define DAC960_PG_IDB_CTRL_RESET 0x08
867#define DAC960_PG_IDB_MMBOX_NEW_CMD 0x10
868
869#define DAC960_PG_IDB_HWMBOX_FULL 0x01
870#define DAC960_PG_IDB_INIT_IN_PROGRESS 0x02
871
872/*
873 * DAC960 PG Series Outbound Door Bell Register.
874 */
875#define DAC960_PG_ODB_HWMBOX_ACK_IRQ 0x01
876#define DAC960_PG_ODB_MMBOX_ACK_IRQ 0x02
877#define DAC960_PG_ODB_HWMBOX_STS_AVAIL 0x01
878#define DAC960_PG_ODB_MMBOX_STS_AVAIL 0x02
879
880/*
881 * DAC960 PG Series Interrupt Mask Register.
882 */
883#define DAC960_PG_IRQMASK_MSI_MASK1 0x03
884#define DAC960_PG_IRQMASK_DISABLE_IRQ 0x04
885#define DAC960_PG_IRQMASK_MSI_MASK2 0xF8
886
887/*
888 * DAC960 PG Series Error Status Register.
889 */
890#define DAC960_PG_ERRSTS_PENDING 0x04
891
892/*
893 * DAC960 PD Series Controller Interface Register Offsets.
894 */
895#define DAC960_PD_mmio_size 0x80
896
897enum DAC960_PD_reg_offset {
898 DAC960_PD_CMDOP_OFFSET = 0x00,
899 DAC960_PD_CMDID_OFFSET = 0x01,
900 DAC960_PD_MBOX2_OFFSET = 0x02,
901 DAC960_PD_MBOX3_OFFSET = 0x03,
902 DAC960_PD_MBOX4_OFFSET = 0x04,
903 DAC960_PD_MBOX5_OFFSET = 0x05,
904 DAC960_PD_MBOX6_OFFSET = 0x06,
905 DAC960_PD_MBOX7_OFFSET = 0x07,
906 DAC960_PD_MBOX8_OFFSET = 0x08,
907 DAC960_PD_MBOX9_OFFSET = 0x09,
908 DAC960_PD_MBOX10_OFFSET = 0x0A,
909 DAC960_PD_MBOX11_OFFSET = 0x0B,
910 DAC960_PD_MBOX12_OFFSET = 0x0C,
911 DAC960_PD_STSID_OFFSET = 0x0D,
912 DAC960_PD_STS_OFFSET = 0x0E,
913 DAC960_PD_ERRSTS_OFFSET = 0x3F,
914 DAC960_PD_IDB_OFFSET = 0x40,
915 DAC960_PD_ODB_OFFSET = 0x41,
916 DAC960_PD_IRQEN_OFFSET = 0x43,
917};
918
919/*
920 * DAC960 PD Series Inbound Door Bell Register.
921 */
922#define DAC960_PD_IDB_HWMBOX_NEW_CMD 0x01
923#define DAC960_PD_IDB_HWMBOX_ACK_STS 0x02
924#define DAC960_PD_IDB_GEN_IRQ 0x04
925#define DAC960_PD_IDB_CTRL_RESET 0x08
926
927#define DAC960_PD_IDB_HWMBOX_FULL 0x01
928#define DAC960_PD_IDB_INIT_IN_PROGRESS 0x02
929
930/*
931 * DAC960 PD Series Outbound Door Bell Register.
932 */
933#define DAC960_PD_ODB_HWMBOX_ACK_IRQ 0x01
934#define DAC960_PD_ODB_HWMBOX_STS_AVAIL 0x01
935
936/*
937 * DAC960 PD Series Interrupt Enable Register.
938 */
939#define DAC960_PD_IRQMASK_ENABLE_IRQ 0x01
940
941/*
942 * DAC960 PD Series Error Status Register.
943 */
944#define DAC960_PD_ERRSTS_PENDING 0x04
945
946typedef int (*myrb_hw_init_t)(struct pci_dev *pdev,
947 struct myrb_hba *cb, void __iomem *base);
948typedef unsigned short (*mbox_mmio_init_t)(struct pci_dev *pdev,
949 void __iomem *base,
950 union myrb_cmd_mbox *mbox);
951
952struct myrb_privdata {
953 myrb_hw_init_t hw_init;
954 irq_handler_t irq_handler;
955 unsigned int mmio_size;
956};
957
958#endif /* MYRB_H */
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
new file mode 100644
index 000000000000..0264a2e2bc19
--- /dev/null
+++ b/drivers/scsi/myrs.c
@@ -0,0 +1,3268 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4 *
5 * This driver supports the newer, SCSI-based firmware interface only.
6 *
7 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
8 *
9 * Based on the original DAC960 driver, which has
10 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
11 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/delay.h>
17#include <linux/interrupt.h>
18#include <linux/pci.h>
19#include <linux/raid_class.h>
20#include <asm/unaligned.h>
21#include <scsi/scsi.h>
22#include <scsi/scsi_host.h>
23#include <scsi/scsi_device.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_tcq.h>
26#include "myrs.h"
27
28static struct raid_template *myrs_raid_template;
29
30static struct myrs_devstate_name_entry {
31 enum myrs_devstate state;
32 char *name;
33} myrs_devstate_name_list[] = {
34 { MYRS_DEVICE_UNCONFIGURED, "Unconfigured" },
35 { MYRS_DEVICE_ONLINE, "Online" },
36 { MYRS_DEVICE_REBUILD, "Rebuild" },
37 { MYRS_DEVICE_MISSING, "Missing" },
38 { MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" },
39 { MYRS_DEVICE_OFFLINE, "Offline" },
40 { MYRS_DEVICE_CRITICAL, "Critical" },
41 { MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" },
42 { MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" },
43 { MYRS_DEVICE_STANDBY, "Standby" },
44 { MYRS_DEVICE_INVALID_STATE, "Invalid" },
45};
46
47static char *myrs_devstate_name(enum myrs_devstate state)
48{
49 struct myrs_devstate_name_entry *entry = myrs_devstate_name_list;
50 int i;
51
52 for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) {
53 if (entry[i].state == state)
54 return entry[i].name;
55 }
56 return NULL;
57}
58
59static struct myrs_raid_level_name_entry {
60 enum myrs_raid_level level;
61 char *name;
62} myrs_raid_level_name_list[] = {
63 { MYRS_RAID_LEVEL0, "RAID0" },
64 { MYRS_RAID_LEVEL1, "RAID1" },
65 { MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" },
66 { MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" },
67 { MYRS_RAID_LEVEL6, "RAID6" },
68 { MYRS_RAID_JBOD, "JBOD" },
69 { MYRS_RAID_NEWSPAN, "New Mylex SPAN" },
70 { MYRS_RAID_LEVEL3F, "RAID3 fixed parity" },
71 { MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" },
72 { MYRS_RAID_SPAN, "Mylex SPAN" },
73 { MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" },
74 { MYRS_RAID_LEVELE, "RAIDE (concatenation)" },
75 { MYRS_RAID_PHYSICAL, "Physical device" },
76};
77
78static char *myrs_raid_level_name(enum myrs_raid_level level)
79{
80 struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list;
81 int i;
82
83 for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) {
84 if (entry[i].level == level)
85 return entry[i].name;
86 }
87 return NULL;
88}
89
90/**
91 * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk
92 */
93static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk)
94{
95 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
96
97 memset(mbox, 0, sizeof(union myrs_cmd_mbox));
98 cmd_blk->status = 0;
99}
100
101/**
102 * myrs_qcmd - queues Command for DAC960 V2 Series Controllers.
103 */
104static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
105{
106 void __iomem *base = cs->io_base;
107 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
108 union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox;
109
110 cs->write_cmd_mbox(next_mbox, mbox);
111
112 if (cs->prev_cmd_mbox1->words[0] == 0 ||
113 cs->prev_cmd_mbox2->words[0] == 0)
114 cs->get_cmd_mbox(base);
115
116 cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1;
117 cs->prev_cmd_mbox1 = next_mbox;
118
119 if (++next_mbox > cs->last_cmd_mbox)
120 next_mbox = cs->first_cmd_mbox;
121
122 cs->next_cmd_mbox = next_mbox;
123}
124
125/**
126 * myrs_exec_cmd - executes V2 Command and waits for completion.
127 */
128static void myrs_exec_cmd(struct myrs_hba *cs,
129 struct myrs_cmdblk *cmd_blk)
130{
131 DECLARE_COMPLETION_ONSTACK(complete);
132 unsigned long flags;
133
134 cmd_blk->complete = &complete;
135 spin_lock_irqsave(&cs->queue_lock, flags);
136 myrs_qcmd(cs, cmd_blk);
137 spin_unlock_irqrestore(&cs->queue_lock, flags);
138
139 WARN_ON(in_interrupt());
140 wait_for_completion(&complete);
141}
142
143/**
144 * myrs_report_progress - prints progress message
145 */
146static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num,
147 unsigned char *msg, unsigned long blocks,
148 unsigned long size)
149{
150 shost_printk(KERN_INFO, cs->host,
151 "Logical Drive %d: %s in Progress: %d%% completed\n",
152 ldev_num, msg,
153 (100 * (int)(blocks >> 7)) / (int)(size >> 7));
154}
155
156/**
157 * myrs_get_ctlr_info - executes a Controller Information IOCTL Command
158 */
159static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
160{
161 struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
162 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
163 dma_addr_t ctlr_info_addr;
164 union myrs_sgl *sgl;
165 unsigned char status;
166 struct myrs_ctlr_info old;
167
168 memcpy(&old, cs->ctlr_info, sizeof(struct myrs_ctlr_info));
169 ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
170 sizeof(struct myrs_ctlr_info),
171 DMA_FROM_DEVICE);
172 if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr))
173 return MYRS_STATUS_FAILED;
174
175 mutex_lock(&cs->dcmd_mutex);
176 myrs_reset_cmd(cmd_blk);
177 mbox->ctlr_info.id = MYRS_DCMD_TAG;
178 mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL;
179 mbox->ctlr_info.control.dma_ctrl_to_host = true;
180 mbox->ctlr_info.control.no_autosense = true;
181 mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info);
182 mbox->ctlr_info.ctlr_num = 0;
183 mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO;
184 sgl = &mbox->ctlr_info.dma_addr;
185 sgl->sge[0].sge_addr = ctlr_info_addr;
186 sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
187 dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n");
188 myrs_exec_cmd(cs, cmd_blk);
189 status = cmd_blk->status;
190 mutex_unlock(&cs->dcmd_mutex);
191 dma_unmap_single(&cs->pdev->dev, ctlr_info_addr,
192 sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE);
193 if (status == MYRS_STATUS_SUCCESS) {
194 if (cs->ctlr_info->bg_init_active +
195 cs->ctlr_info->ldev_init_active +
196 cs->ctlr_info->pdev_init_active +
197 cs->ctlr_info->cc_active +
198 cs->ctlr_info->rbld_active +
199 cs->ctlr_info->exp_active != 0)
200 cs->needs_update = true;
201 if (cs->ctlr_info->ldev_present != old.ldev_present ||
202 cs->ctlr_info->ldev_critical != old.ldev_critical ||
203 cs->ctlr_info->ldev_offline != old.ldev_offline)
204 shost_printk(KERN_INFO, cs->host,
205 "Logical drive count changes (%d/%d/%d)\n",
206 cs->ctlr_info->ldev_critical,
207 cs->ctlr_info->ldev_offline,
208 cs->ctlr_info->ldev_present);
209 }
210
211 return status;
212}
213
214/**
215 * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command
216 */
217static unsigned char myrs_get_ldev_info(struct myrs_hba *cs,
218 unsigned short ldev_num, struct myrs_ldev_info *ldev_info)
219{
220 struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
221 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
222 dma_addr_t ldev_info_addr;
223 struct myrs_ldev_info ldev_info_orig;
224 union myrs_sgl *sgl;
225 unsigned char status;
226
227 memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info));
228 ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info,
229 sizeof(struct myrs_ldev_info),
230 DMA_FROM_DEVICE);
231 if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr))
232 return MYRS_STATUS_FAILED;
233
234 mutex_lock(&cs->dcmd_mutex);
235 myrs_reset_cmd(cmd_blk);
236 mbox->ldev_info.id = MYRS_DCMD_TAG;
237 mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL;
238 mbox->ldev_info.control.dma_ctrl_to_host = true;
239 mbox->ldev_info.control.no_autosense = true;
240 mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info);
241 mbox->ldev_info.ldev.ldev_num = ldev_num;
242 mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID;
243 sgl = &mbox->ldev_info.dma_addr;
244 sgl->sge[0].sge_addr = ldev_info_addr;
245 sgl->sge[0].sge_count = mbox->ldev_info.dma_size;
246 dev_dbg(&cs->host->shost_gendev,
247 "Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num);
248 myrs_exec_cmd(cs, cmd_blk);
249 status = cmd_blk->status;
250 mutex_unlock(&cs->dcmd_mutex);
251 dma_unmap_single(&cs->pdev->dev, ldev_info_addr,
252 sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE);
253 if (status == MYRS_STATUS_SUCCESS) {
254 unsigned short ldev_num = ldev_info->ldev_num;
255 struct myrs_ldev_info *new = ldev_info;
256 struct myrs_ldev_info *old = &ldev_info_orig;
257 unsigned long ldev_size = new->cfg_devsize;
258
259 if (new->dev_state != old->dev_state) {
260 const char *name;
261
262 name = myrs_devstate_name(new->dev_state);
263 shost_printk(KERN_INFO, cs->host,
264 "Logical Drive %d is now %s\n",
265 ldev_num, name ? name : "Invalid");
266 }
267 if ((new->soft_errs != old->soft_errs) ||
268 (new->cmds_failed != old->cmds_failed) ||
269 (new->deferred_write_errs != old->deferred_write_errs))
270 shost_printk(KERN_INFO, cs->host,
271 "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n",
272 ldev_num, new->soft_errs,
273 new->cmds_failed,
274 new->deferred_write_errs);
275 if (new->bg_init_active)
276 myrs_report_progress(cs, ldev_num,
277 "Background Initialization",
278 new->bg_init_lba, ldev_size);
279 else if (new->fg_init_active)
280 myrs_report_progress(cs, ldev_num,
281 "Foreground Initialization",
282 new->fg_init_lba, ldev_size);
283 else if (new->migration_active)
284 myrs_report_progress(cs, ldev_num,
285 "Data Migration",
286 new->migration_lba, ldev_size);
287 else if (new->patrol_active)
288 myrs_report_progress(cs, ldev_num,
289 "Patrol Operation",
290 new->patrol_lba, ldev_size);
291 if (old->bg_init_active && !new->bg_init_active)
292 shost_printk(KERN_INFO, cs->host,
293 "Logical Drive %d: Background Initialization %s\n",
294 ldev_num,
295 (new->ldev_control.ldev_init_done ?
296 "Completed" : "Failed"));
297 }
298 return status;
299}
300
301/**
302 * myrs_get_pdev_info - executes a "Read Physical Device Information" Command
303 */
304static unsigned char myrs_get_pdev_info(struct myrs_hba *cs,
305 unsigned char channel, unsigned char target, unsigned char lun,
306 struct myrs_pdev_info *pdev_info)
307{
308 struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
309 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
310 dma_addr_t pdev_info_addr;
311 union myrs_sgl *sgl;
312 unsigned char status;
313
314 pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info,
315 sizeof(struct myrs_pdev_info),
316 DMA_FROM_DEVICE);
317 if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr))
318 return MYRS_STATUS_FAILED;
319
320 mutex_lock(&cs->dcmd_mutex);
321 myrs_reset_cmd(cmd_blk);
322 mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
323 mbox->pdev_info.id = MYRS_DCMD_TAG;
324 mbox->pdev_info.control.dma_ctrl_to_host = true;
325 mbox->pdev_info.control.no_autosense = true;
326 mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info);
327 mbox->pdev_info.pdev.lun = lun;
328 mbox->pdev_info.pdev.target = target;
329 mbox->pdev_info.pdev.channel = channel;
330 mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID;
331 sgl = &mbox->pdev_info.dma_addr;
332 sgl->sge[0].sge_addr = pdev_info_addr;
333 sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
334 dev_dbg(&cs->host->shost_gendev,
335 "Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n",
336 channel, target, lun);
337 myrs_exec_cmd(cs, cmd_blk);
338 status = cmd_blk->status;
339 mutex_unlock(&cs->dcmd_mutex);
340 dma_unmap_single(&cs->pdev->dev, pdev_info_addr,
341 sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE);
342 return status;
343}
344
345/**
346 * myrs_dev_op - executes a "Device Operation" Command
347 */
348static unsigned char myrs_dev_op(struct myrs_hba *cs,
349 enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev)
350{
351 struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
352 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
353 unsigned char status;
354
355 mutex_lock(&cs->dcmd_mutex);
356 myrs_reset_cmd(cmd_blk);
357 mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL;
358 mbox->dev_op.id = MYRS_DCMD_TAG;
359 mbox->dev_op.control.dma_ctrl_to_host = true;
360 mbox->dev_op.control.no_autosense = true;
361 mbox->dev_op.ioctl_opcode = opcode;
362 mbox->dev_op.opdev = opdev;
363 myrs_exec_cmd(cs, cmd_blk);
364 status = cmd_blk->status;
365 mutex_unlock(&cs->dcmd_mutex);
366 return status;
367}
368
369/**
370 * myrs_translate_pdev - translates a Physical Device Channel and
371 * TargetID into a Logical Device.
372 */
373static unsigned char myrs_translate_pdev(struct myrs_hba *cs,
374 unsigned char channel, unsigned char target, unsigned char lun,
375 struct myrs_devmap *devmap)
376{
377 struct pci_dev *pdev = cs->pdev;
378 dma_addr_t devmap_addr;
379 struct myrs_cmdblk *cmd_blk;
380 union myrs_cmd_mbox *mbox;
381 union myrs_sgl *sgl;
382 unsigned char status;
383
384 memset(devmap, 0x0, sizeof(struct myrs_devmap));
385 devmap_addr = dma_map_single(&pdev->dev, devmap,
386 sizeof(struct myrs_devmap),
387 DMA_FROM_DEVICE);
388 if (dma_mapping_error(&pdev->dev, devmap_addr))
389 return MYRS_STATUS_FAILED;
390
391 mutex_lock(&cs->dcmd_mutex);
392 cmd_blk = &cs->dcmd_blk;
393 mbox = &cmd_blk->mbox;
394 mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
395 mbox->pdev_info.control.dma_ctrl_to_host = true;
396 mbox->pdev_info.control.no_autosense = true;
397 mbox->pdev_info.dma_size = sizeof(struct myrs_devmap);
398 mbox->pdev_info.pdev.target = target;
399 mbox->pdev_info.pdev.channel = channel;
400 mbox->pdev_info.pdev.lun = lun;
401 mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV;
402 sgl = &mbox->pdev_info.dma_addr;
403 sgl->sge[0].sge_addr = devmap_addr;
404 sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
405
406 myrs_exec_cmd(cs, cmd_blk);
407 status = cmd_blk->status;
408 mutex_unlock(&cs->dcmd_mutex);
409 dma_unmap_single(&pdev->dev, devmap_addr,
410 sizeof(struct myrs_devmap), DMA_FROM_DEVICE);
411 return status;
412}
413
414/**
415 * myrs_get_event - executes a Get Event Command
416 */
417static unsigned char myrs_get_event(struct myrs_hba *cs,
418 unsigned int event_num, struct myrs_event *event_buf)
419{
420 struct pci_dev *pdev = cs->pdev;
421 dma_addr_t event_addr;
422 struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
423 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
424 union myrs_sgl *sgl;
425 unsigned char status;
426
427 event_addr = dma_map_single(&pdev->dev, event_buf,
428 sizeof(struct myrs_event), DMA_FROM_DEVICE);
429 if (dma_mapping_error(&pdev->dev, event_addr))
430 return MYRS_STATUS_FAILED;
431
432 mbox->get_event.opcode = MYRS_CMD_OP_IOCTL;
433 mbox->get_event.dma_size = sizeof(struct myrs_event);
434 mbox->get_event.evnum_upper = event_num >> 16;
435 mbox->get_event.ctlr_num = 0;
436 mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT;
437 mbox->get_event.evnum_lower = event_num & 0xFFFF;
438 sgl = &mbox->get_event.dma_addr;
439 sgl->sge[0].sge_addr = event_addr;
440 sgl->sge[0].sge_count = mbox->get_event.dma_size;
441 myrs_exec_cmd(cs, cmd_blk);
442 status = cmd_blk->status;
443 dma_unmap_single(&pdev->dev, event_addr,
444 sizeof(struct myrs_event), DMA_FROM_DEVICE);
445
446 return status;
447}
448
449/*
450 * myrs_get_fwstatus - executes a Get Health Status Command
451 */
452static unsigned char myrs_get_fwstatus(struct myrs_hba *cs)
453{
454 struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
455 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
456 union myrs_sgl *sgl;
457 unsigned char status = cmd_blk->status;
458
459 myrs_reset_cmd(cmd_blk);
460 mbox->common.opcode = MYRS_CMD_OP_IOCTL;
461 mbox->common.id = MYRS_MCMD_TAG;
462 mbox->common.control.dma_ctrl_to_host = true;
463 mbox->common.control.no_autosense = true;
464 mbox->common.dma_size = sizeof(struct myrs_fwstat);
465 mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS;
466 sgl = &mbox->common.dma_addr;
467 sgl->sge[0].sge_addr = cs->fwstat_addr;
468 sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
469 dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n");
470 myrs_exec_cmd(cs, cmd_blk);
471 status = cmd_blk->status;
472
473 return status;
474}
475
476/**
477 * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface
478 */
479static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
480 enable_mbox_t enable_mbox_fn)
481{
482 void __iomem *base = cs->io_base;
483 struct pci_dev *pdev = cs->pdev;
484 union myrs_cmd_mbox *cmd_mbox;
485 struct myrs_stat_mbox *stat_mbox;
486 union myrs_cmd_mbox *mbox;
487 dma_addr_t mbox_addr;
488 unsigned char status = MYRS_STATUS_FAILED;
489
490 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
491 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
492 dev_err(&pdev->dev, "DMA mask out of range\n");
493 return false;
494 }
495
496 /* Temporary dma mapping, used only in the scope of this function */
497 mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
498 &mbox_addr, GFP_KERNEL);
499 if (dma_mapping_error(&pdev->dev, mbox_addr))
500 return false;
501
502 /* These are the base addresses for the command memory mailbox array */
503 cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
504 cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
505 &cs->cmd_mbox_addr, GFP_KERNEL);
506 if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
507 dev_err(&pdev->dev, "Failed to map command mailbox\n");
508 goto out_free;
509 }
510 cs->first_cmd_mbox = cmd_mbox;
511 cmd_mbox += MYRS_MAX_CMD_MBOX - 1;
512 cs->last_cmd_mbox = cmd_mbox;
513 cs->next_cmd_mbox = cs->first_cmd_mbox;
514 cs->prev_cmd_mbox1 = cs->last_cmd_mbox;
515 cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1;
516
517 /* These are the base addresses for the status memory mailbox array */
518 cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
519 stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
520 &cs->stat_mbox_addr, GFP_KERNEL);
521 if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
522 dev_err(&pdev->dev, "Failed to map status mailbox\n");
523 goto out_free;
524 }
525
526 cs->first_stat_mbox = stat_mbox;
527 stat_mbox += MYRS_MAX_STAT_MBOX - 1;
528 cs->last_stat_mbox = stat_mbox;
529 cs->next_stat_mbox = cs->first_stat_mbox;
530
531 cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
532 sizeof(struct myrs_fwstat),
533 &cs->fwstat_addr, GFP_KERNEL);
534 if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
535 dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
536 cs->fwstat_buf = NULL;
537 goto out_free;
538 }
539 cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info),
540 GFP_KERNEL | GFP_DMA);
541 if (!cs->ctlr_info)
542 goto out_free;
543
544 cs->event_buf = kzalloc(sizeof(struct myrs_event),
545 GFP_KERNEL | GFP_DMA);
546 if (!cs->event_buf)
547 goto out_free;
548
549 /* Enable the Memory Mailbox Interface. */
550 memset(mbox, 0, sizeof(union myrs_cmd_mbox));
551 mbox->set_mbox.id = 1;
552 mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL;
553 mbox->set_mbox.control.no_autosense = true;
554 mbox->set_mbox.first_cmd_mbox_size_kb =
555 (MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10;
556 mbox->set_mbox.first_stat_mbox_size_kb =
557 (MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10;
558 mbox->set_mbox.second_cmd_mbox_size_kb = 0;
559 mbox->set_mbox.second_stat_mbox_size_kb = 0;
560 mbox->set_mbox.sense_len = 0;
561 mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX;
562 mbox->set_mbox.fwstat_buf_size_kb = 1;
563 mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr;
564 mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr;
565 mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr;
566 status = enable_mbox_fn(base, mbox_addr);
567
568out_free:
569 dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
570 mbox, mbox_addr);
571 if (status != MYRS_STATUS_SUCCESS)
572 dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n",
573 status);
574 return (status == MYRS_STATUS_SUCCESS);
575}
576
577/**
578 * myrs_get_config - reads the Configuration Information
579 */
580static int myrs_get_config(struct myrs_hba *cs)
581{
582 struct myrs_ctlr_info *info = cs->ctlr_info;
583 struct Scsi_Host *shost = cs->host;
584 unsigned char status;
585 unsigned char model[20];
586 unsigned char fw_version[12];
587 int i, model_len;
588
589 /* Get data into dma-able area, then copy into permanent location */
590 mutex_lock(&cs->cinfo_mutex);
591 status = myrs_get_ctlr_info(cs);
592 mutex_unlock(&cs->cinfo_mutex);
593 if (status != MYRS_STATUS_SUCCESS) {
594 shost_printk(KERN_ERR, shost,
595 "Failed to get controller information\n");
596 return -ENODEV;
597 }
598
599 /* Initialize the Controller Model Name and Full Model Name fields. */
600 model_len = sizeof(info->ctlr_name);
601 if (model_len > sizeof(model)-1)
602 model_len = sizeof(model)-1;
603 memcpy(model, info->ctlr_name, model_len);
604 model_len--;
605 while (model[model_len] == ' ' || model[model_len] == '\0')
606 model_len--;
607 model[++model_len] = '\0';
608 strcpy(cs->model_name, "DAC960 ");
609 strcat(cs->model_name, model);
610 /* Initialize the Controller Firmware Version field. */
611 sprintf(fw_version, "%d.%02d-%02d",
612 info->fw_major_version, info->fw_minor_version,
613 info->fw_turn_number);
614 if (info->fw_major_version == 6 &&
615 info->fw_minor_version == 0 &&
616 info->fw_turn_number < 1) {
617 shost_printk(KERN_WARNING, shost,
618 "FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n"
619 "STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n"
620 "PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
621 fw_version);
622 return -ENODEV;
623 }
624 /* Initialize the Controller Channels and Targets. */
625 shost->max_channel = info->physchan_present + info->virtchan_present;
626 shost->max_id = info->max_targets[0];
627 for (i = 1; i < 16; i++) {
628 if (!info->max_targets[i])
629 continue;
630 if (shost->max_id < info->max_targets[i])
631 shost->max_id = info->max_targets[i];
632 }
633
634 /*
635 * Initialize the Controller Queue Depth, Driver Queue Depth,
636 * Logical Drive Count, Maximum Blocks per Command, Controller
637 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
638 * The Driver Queue Depth must be at most three less than
639 * the Controller Queue Depth; tag '1' is reserved for
640 * direct commands, and tag '2' for monitoring commands.
641 */
642 shost->can_queue = info->max_tcq - 3;
643 if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3)
644 shost->can_queue = MYRS_MAX_CMD_MBOX - 3;
645 shost->max_sectors = info->max_transfer_size;
646 shost->sg_tablesize = info->max_sge;
647 if (shost->sg_tablesize > MYRS_SG_LIMIT)
648 shost->sg_tablesize = MYRS_SG_LIMIT;
649
650 shost_printk(KERN_INFO, shost,
651 "Configuring %s PCI RAID Controller\n", model);
652 shost_printk(KERN_INFO, shost,
653 " Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
654 fw_version, info->physchan_present, info->mem_size_mb);
655
656 shost_printk(KERN_INFO, shost,
657 " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
658 shost->can_queue, shost->max_sectors);
659
660 shost_printk(KERN_INFO, shost,
661 " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
662 shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT);
663 for (i = 0; i < info->physchan_max; i++) {
664 if (!info->max_targets[i])
665 continue;
666 shost_printk(KERN_INFO, shost,
667 " Device Channel %d: max %d devices\n",
668 i, info->max_targets[i]);
669 }
670 shost_printk(KERN_INFO, shost,
671 " Physical: %d/%d channels, %d disks, %d devices\n",
672 info->physchan_present, info->physchan_max,
673 info->pdisk_present, info->pdev_present);
674
675 shost_printk(KERN_INFO, shost,
676 " Logical: %d/%d channels, %d disks\n",
677 info->virtchan_present, info->virtchan_max,
678 info->ldev_present);
679 return 0;
680}
681
682/**
683 * myrs_log_event - prints a Controller Event message
684 */
685static struct {
686 int ev_code;
687 unsigned char *ev_msg;
688} myrs_ev_list[] = {
689 /* Physical Device Events (0x0000 - 0x007F) */
690 { 0x0001, "P Online" },
691 { 0x0002, "P Standby" },
692 { 0x0005, "P Automatic Rebuild Started" },
693 { 0x0006, "P Manual Rebuild Started" },
694 { 0x0007, "P Rebuild Completed" },
695 { 0x0008, "P Rebuild Cancelled" },
696 { 0x0009, "P Rebuild Failed for Unknown Reasons" },
697 { 0x000A, "P Rebuild Failed due to New Physical Device" },
698 { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
699 { 0x000C, "S Offline" },
700 { 0x000D, "P Found" },
701 { 0x000E, "P Removed" },
702 { 0x000F, "P Unconfigured" },
703 { 0x0010, "P Expand Capacity Started" },
704 { 0x0011, "P Expand Capacity Completed" },
705 { 0x0012, "P Expand Capacity Failed" },
706 { 0x0013, "P Command Timed Out" },
707 { 0x0014, "P Command Aborted" },
708 { 0x0015, "P Command Retried" },
709 { 0x0016, "P Parity Error" },
710 { 0x0017, "P Soft Error" },
711 { 0x0018, "P Miscellaneous Error" },
712 { 0x0019, "P Reset" },
713 { 0x001A, "P Active Spare Found" },
714 { 0x001B, "P Warm Spare Found" },
715 { 0x001C, "S Sense Data Received" },
716 { 0x001D, "P Initialization Started" },
717 { 0x001E, "P Initialization Completed" },
718 { 0x001F, "P Initialization Failed" },
719 { 0x0020, "P Initialization Cancelled" },
720 { 0x0021, "P Failed because Write Recovery Failed" },
721 { 0x0022, "P Failed because SCSI Bus Reset Failed" },
722 { 0x0023, "P Failed because of Double Check Condition" },
723 { 0x0024, "P Failed because Device Cannot Be Accessed" },
724 { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
725 { 0x0026, "P Failed because of Bad Tag from Device" },
726 { 0x0027, "P Failed because of Command Timeout" },
727 { 0x0028, "P Failed because of System Reset" },
728 { 0x0029, "P Failed because of Busy Status or Parity Error" },
729 { 0x002A, "P Failed because Host Set Device to Failed State" },
730 { 0x002B, "P Failed because of Selection Timeout" },
731 { 0x002C, "P Failed because of SCSI Bus Phase Error" },
732 { 0x002D, "P Failed because Device Returned Unknown Status" },
733 { 0x002E, "P Failed because Device Not Ready" },
734 { 0x002F, "P Failed because Device Not Found at Startup" },
735 { 0x0030, "P Failed because COD Write Operation Failed" },
736 { 0x0031, "P Failed because BDT Write Operation Failed" },
737 { 0x0039, "P Missing at Startup" },
738 { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
739 { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
740 { 0x003D, "P Standby Rebuild Started" },
741 /* Logical Device Events (0x0080 - 0x00FF) */
742 { 0x0080, "M Consistency Check Started" },
743 { 0x0081, "M Consistency Check Completed" },
744 { 0x0082, "M Consistency Check Cancelled" },
745 { 0x0083, "M Consistency Check Completed With Errors" },
746 { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
747 { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
748 { 0x0086, "L Offline" },
749 { 0x0087, "L Critical" },
750 { 0x0088, "L Online" },
751 { 0x0089, "M Automatic Rebuild Started" },
752 { 0x008A, "M Manual Rebuild Started" },
753 { 0x008B, "M Rebuild Completed" },
754 { 0x008C, "M Rebuild Cancelled" },
755 { 0x008D, "M Rebuild Failed for Unknown Reasons" },
756 { 0x008E, "M Rebuild Failed due to New Physical Device" },
757 { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
758 { 0x0090, "M Initialization Started" },
759 { 0x0091, "M Initialization Completed" },
760 { 0x0092, "M Initialization Cancelled" },
761 { 0x0093, "M Initialization Failed" },
762 { 0x0094, "L Found" },
763 { 0x0095, "L Deleted" },
764 { 0x0096, "M Expand Capacity Started" },
765 { 0x0097, "M Expand Capacity Completed" },
766 { 0x0098, "M Expand Capacity Failed" },
767 { 0x0099, "L Bad Block Found" },
768 { 0x009A, "L Size Changed" },
769 { 0x009B, "L Type Changed" },
770 { 0x009C, "L Bad Data Block Found" },
771 { 0x009E, "L Read of Data Block in BDT" },
772 { 0x009F, "L Write Back Data for Disk Block Lost" },
773 { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
774 { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
775 { 0x00A2, "L Standby Rebuild Started" },
776 /* Fault Management Events (0x0100 - 0x017F) */
777 { 0x0140, "E Fan %d Failed" },
778 { 0x0141, "E Fan %d OK" },
779 { 0x0142, "E Fan %d Not Present" },
780 { 0x0143, "E Power Supply %d Failed" },
781 { 0x0144, "E Power Supply %d OK" },
782 { 0x0145, "E Power Supply %d Not Present" },
783 { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
784 { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
785 { 0x0148, "E Temperature Sensor %d Temperature Normal" },
786 { 0x0149, "E Temperature Sensor %d Not Present" },
787 { 0x014A, "E Enclosure Management Unit %d Access Critical" },
788 { 0x014B, "E Enclosure Management Unit %d Access OK" },
789 { 0x014C, "E Enclosure Management Unit %d Access Offline" },
790 /* Controller Events (0x0180 - 0x01FF) */
791 { 0x0181, "C Cache Write Back Error" },
792 { 0x0188, "C Battery Backup Unit Found" },
793 { 0x0189, "C Battery Backup Unit Charge Level Low" },
794 { 0x018A, "C Battery Backup Unit Charge Level OK" },
795 { 0x0193, "C Installation Aborted" },
796 { 0x0195, "C Battery Backup Unit Physically Removed" },
797 { 0x0196, "C Memory Error During Warm Boot" },
798 { 0x019E, "C Memory Soft ECC Error Corrected" },
799 { 0x019F, "C Memory Hard ECC Error Corrected" },
800 { 0x01A2, "C Battery Backup Unit Failed" },
801 { 0x01AB, "C Mirror Race Recovery Failed" },
802 { 0x01AC, "C Mirror Race on Critical Drive" },
803 /* Controller Internal Processor Events */
804 { 0x0380, "C Internal Controller Hung" },
805 { 0x0381, "C Internal Controller Firmware Breakpoint" },
806 { 0x0390, "C Internal Controller i960 Processor Specific Error" },
807 { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
808 { 0, "" }
809};
810
811static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev)
812{
813 unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE];
814 int ev_idx = 0, ev_code;
815 unsigned char ev_type, *ev_msg;
816 struct Scsi_Host *shost = cs->host;
817 struct scsi_device *sdev;
818 struct scsi_sense_hdr sshdr;
819 unsigned char sense_info[4];
820 unsigned char cmd_specific[4];
821
822 if (ev->ev_code == 0x1C) {
823 if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) {
824 memset(&sshdr, 0x0, sizeof(sshdr));
825 memset(sense_info, 0x0, sizeof(sense_info));
826 memset(cmd_specific, 0x0, sizeof(cmd_specific));
827 } else {
828 memcpy(sense_info, &ev->sense_data[3], 4);
829 memcpy(cmd_specific, &ev->sense_data[7], 4);
830 }
831 }
832 if (sshdr.sense_key == VENDOR_SPECIFIC &&
833 (sshdr.asc == 0x80 || sshdr.asc == 0x81))
834 ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq);
835 while (true) {
836 ev_code = myrs_ev_list[ev_idx].ev_code;
837 if (ev_code == ev->ev_code || ev_code == 0)
838 break;
839 ev_idx++;
840 }
841 ev_type = myrs_ev_list[ev_idx].ev_msg[0];
842 ev_msg = &myrs_ev_list[ev_idx].ev_msg[2];
843 if (ev_code == 0) {
844 shost_printk(KERN_WARNING, shost,
845 "Unknown Controller Event Code %04X\n",
846 ev->ev_code);
847 return;
848 }
849 switch (ev_type) {
850 case 'P':
851 sdev = scsi_device_lookup(shost, ev->channel,
852 ev->target, 0);
853 sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n",
854 ev->ev_seq, ev_msg);
855 if (sdev && sdev->hostdata &&
856 sdev->channel < cs->ctlr_info->physchan_present) {
857 struct myrs_pdev_info *pdev_info = sdev->hostdata;
858
859 switch (ev->ev_code) {
860 case 0x0001:
861 case 0x0007:
862 pdev_info->dev_state = MYRS_DEVICE_ONLINE;
863 break;
864 case 0x0002:
865 pdev_info->dev_state = MYRS_DEVICE_STANDBY;
866 break;
867 case 0x000C:
868 pdev_info->dev_state = MYRS_DEVICE_OFFLINE;
869 break;
870 case 0x000E:
871 pdev_info->dev_state = MYRS_DEVICE_MISSING;
872 break;
873 case 0x000F:
874 pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED;
875 break;
876 }
877 }
878 break;
879 case 'L':
880 shost_printk(KERN_INFO, shost,
881 "event %d: Logical Drive %d %s\n",
882 ev->ev_seq, ev->lun, ev_msg);
883 cs->needs_update = true;
884 break;
885 case 'M':
886 shost_printk(KERN_INFO, shost,
887 "event %d: Logical Drive %d %s\n",
888 ev->ev_seq, ev->lun, ev_msg);
889 cs->needs_update = true;
890 break;
891 case 'S':
892 if (sshdr.sense_key == NO_SENSE ||
893 (sshdr.sense_key == NOT_READY &&
894 sshdr.asc == 0x04 && (sshdr.ascq == 0x01 ||
895 sshdr.ascq == 0x02)))
896 break;
897 shost_printk(KERN_INFO, shost,
898 "event %d: Physical Device %d:%d %s\n",
899 ev->ev_seq, ev->channel, ev->target, ev_msg);
900 shost_printk(KERN_INFO, shost,
901 "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
902 ev->channel, ev->target,
903 sshdr.sense_key, sshdr.asc, sshdr.ascq);
904 shost_printk(KERN_INFO, shost,
905 "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n",
906 ev->channel, ev->target,
907 sense_info[0], sense_info[1],
908 sense_info[2], sense_info[3],
909 cmd_specific[0], cmd_specific[1],
910 cmd_specific[2], cmd_specific[3]);
911 break;
912 case 'E':
913 if (cs->disable_enc_msg)
914 break;
915 sprintf(msg_buf, ev_msg, ev->lun);
916 shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n",
917 ev->ev_seq, ev->target, msg_buf);
918 break;
919 case 'C':
920 shost_printk(KERN_INFO, shost, "event %d: Controller %s\n",
921 ev->ev_seq, ev_msg);
922 break;
923 default:
924 shost_printk(KERN_INFO, shost,
925 "event %d: Unknown Event Code %04X\n",
926 ev->ev_seq, ev->ev_code);
927 break;
928 }
929}
930
931/*
932 * SCSI sysfs interface functions
933 */
934static ssize_t raid_state_show(struct device *dev,
935 struct device_attribute *attr, char *buf)
936{
937 struct scsi_device *sdev = to_scsi_device(dev);
938 struct myrs_hba *cs = shost_priv(sdev->host);
939 int ret;
940
941 if (!sdev->hostdata)
942 return snprintf(buf, 16, "Unknown\n");
943
944 if (sdev->channel >= cs->ctlr_info->physchan_present) {
945 struct myrs_ldev_info *ldev_info = sdev->hostdata;
946 const char *name;
947
948 name = myrs_devstate_name(ldev_info->dev_state);
949 if (name)
950 ret = snprintf(buf, 32, "%s\n", name);
951 else
952 ret = snprintf(buf, 32, "Invalid (%02X)\n",
953 ldev_info->dev_state);
954 } else {
955 struct myrs_pdev_info *pdev_info;
956 const char *name;
957
958 pdev_info = sdev->hostdata;
959 name = myrs_devstate_name(pdev_info->dev_state);
960 if (name)
961 ret = snprintf(buf, 32, "%s\n", name);
962 else
963 ret = snprintf(buf, 32, "Invalid (%02X)\n",
964 pdev_info->dev_state);
965 }
966 return ret;
967}
968
969static ssize_t raid_state_store(struct device *dev,
970 struct device_attribute *attr, const char *buf, size_t count)
971{
972 struct scsi_device *sdev = to_scsi_device(dev);
973 struct myrs_hba *cs = shost_priv(sdev->host);
974 struct myrs_cmdblk *cmd_blk;
975 union myrs_cmd_mbox *mbox;
976 enum myrs_devstate new_state;
977 unsigned short ldev_num;
978 unsigned char status;
979
980 if (!strncmp(buf, "offline", 7) ||
981 !strncmp(buf, "kill", 4))
982 new_state = MYRS_DEVICE_OFFLINE;
983 else if (!strncmp(buf, "online", 6))
984 new_state = MYRS_DEVICE_ONLINE;
985 else if (!strncmp(buf, "standby", 7))
986 new_state = MYRS_DEVICE_STANDBY;
987 else
988 return -EINVAL;
989
990 if (sdev->channel < cs->ctlr_info->physchan_present) {
991 struct myrs_pdev_info *pdev_info = sdev->hostdata;
992 struct myrs_devmap *pdev_devmap =
993 (struct myrs_devmap *)&pdev_info->rsvd13;
994
995 if (pdev_info->dev_state == new_state) {
996 sdev_printk(KERN_INFO, sdev,
997 "Device already in %s\n",
998 myrs_devstate_name(new_state));
999 return count;
1000 }
1001 status = myrs_translate_pdev(cs, sdev->channel, sdev->id,
1002 sdev->lun, pdev_devmap);
1003 if (status != MYRS_STATUS_SUCCESS)
1004 return -ENXIO;
1005 ldev_num = pdev_devmap->ldev_num;
1006 } else {
1007 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1008
1009 if (ldev_info->dev_state == new_state) {
1010 sdev_printk(KERN_INFO, sdev,
1011 "Device already in %s\n",
1012 myrs_devstate_name(new_state));
1013 return count;
1014 }
1015 ldev_num = ldev_info->ldev_num;
1016 }
1017 mutex_lock(&cs->dcmd_mutex);
1018 cmd_blk = &cs->dcmd_blk;
1019 myrs_reset_cmd(cmd_blk);
1020 mbox = &cmd_blk->mbox;
1021 mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1022 mbox->common.id = MYRS_DCMD_TAG;
1023 mbox->common.control.dma_ctrl_to_host = true;
1024 mbox->common.control.no_autosense = true;
1025 mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE;
1026 mbox->set_devstate.state = new_state;
1027 mbox->set_devstate.ldev.ldev_num = ldev_num;
1028 myrs_exec_cmd(cs, cmd_blk);
1029 status = cmd_blk->status;
1030 mutex_unlock(&cs->dcmd_mutex);
1031 if (status == MYRS_STATUS_SUCCESS) {
1032 if (sdev->channel < cs->ctlr_info->physchan_present) {
1033 struct myrs_pdev_info *pdev_info = sdev->hostdata;
1034
1035 pdev_info->dev_state = new_state;
1036 } else {
1037 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1038
1039 ldev_info->dev_state = new_state;
1040 }
1041 sdev_printk(KERN_INFO, sdev,
1042 "Set device state to %s\n",
1043 myrs_devstate_name(new_state));
1044 return count;
1045 }
1046 sdev_printk(KERN_INFO, sdev,
1047 "Failed to set device state to %s, status 0x%02x\n",
1048 myrs_devstate_name(new_state), status);
1049 return -EINVAL;
1050}
1051static DEVICE_ATTR_RW(raid_state);
1052
1053static ssize_t raid_level_show(struct device *dev,
1054 struct device_attribute *attr, char *buf)
1055{
1056 struct scsi_device *sdev = to_scsi_device(dev);
1057 struct myrs_hba *cs = shost_priv(sdev->host);
1058 const char *name = NULL;
1059
1060 if (!sdev->hostdata)
1061 return snprintf(buf, 16, "Unknown\n");
1062
1063 if (sdev->channel >= cs->ctlr_info->physchan_present) {
1064 struct myrs_ldev_info *ldev_info;
1065
1066 ldev_info = sdev->hostdata;
1067 name = myrs_raid_level_name(ldev_info->raid_level);
1068 if (!name)
1069 return snprintf(buf, 32, "Invalid (%02X)\n",
1070 ldev_info->dev_state);
1071
1072 } else
1073 name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
1074
1075 return snprintf(buf, 32, "%s\n", name);
1076}
1077static DEVICE_ATTR_RO(raid_level);
1078
1079static ssize_t rebuild_show(struct device *dev,
1080 struct device_attribute *attr, char *buf)
1081{
1082 struct scsi_device *sdev = to_scsi_device(dev);
1083 struct myrs_hba *cs = shost_priv(sdev->host);
1084 struct myrs_ldev_info *ldev_info;
1085 unsigned short ldev_num;
1086 unsigned char status;
1087
1088 if (sdev->channel < cs->ctlr_info->physchan_present)
1089 return snprintf(buf, 32, "physical device - not rebuilding\n");
1090
1091 ldev_info = sdev->hostdata;
1092 ldev_num = ldev_info->ldev_num;
1093 status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1094 if (status != MYRS_STATUS_SUCCESS) {
1095 sdev_printk(KERN_INFO, sdev,
1096 "Failed to get device information, status 0x%02x\n",
1097 status);
1098 return -EIO;
1099 }
1100 if (ldev_info->rbld_active) {
1101 return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
1102 (size_t)ldev_info->rbld_lba,
1103 (size_t)ldev_info->cfg_devsize);
1104 } else
1105 return snprintf(buf, 32, "not rebuilding\n");
1106}
1107
1108static ssize_t rebuild_store(struct device *dev,
1109 struct device_attribute *attr, const char *buf, size_t count)
1110{
1111 struct scsi_device *sdev = to_scsi_device(dev);
1112 struct myrs_hba *cs = shost_priv(sdev->host);
1113 struct myrs_ldev_info *ldev_info;
1114 struct myrs_cmdblk *cmd_blk;
1115 union myrs_cmd_mbox *mbox;
1116 unsigned short ldev_num;
1117 unsigned char status;
1118 int rebuild, ret;
1119
1120 if (sdev->channel < cs->ctlr_info->physchan_present)
1121 return -EINVAL;
1122
1123 ldev_info = sdev->hostdata;
1124 if (!ldev_info)
1125 return -ENXIO;
1126 ldev_num = ldev_info->ldev_num;
1127
1128 ret = kstrtoint(buf, 0, &rebuild);
1129 if (ret)
1130 return ret;
1131
1132 status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1133 if (status != MYRS_STATUS_SUCCESS) {
1134 sdev_printk(KERN_INFO, sdev,
1135 "Failed to get device information, status 0x%02x\n",
1136 status);
1137 return -EIO;
1138 }
1139
1140 if (rebuild && ldev_info->rbld_active) {
1141 sdev_printk(KERN_INFO, sdev,
1142 "Rebuild Not Initiated; already in progress\n");
1143 return -EALREADY;
1144 }
1145 if (!rebuild && !ldev_info->rbld_active) {
1146 sdev_printk(KERN_INFO, sdev,
1147 "Rebuild Not Cancelled; no rebuild in progress\n");
1148 return count;
1149 }
1150
1151 mutex_lock(&cs->dcmd_mutex);
1152 cmd_blk = &cs->dcmd_blk;
1153 myrs_reset_cmd(cmd_blk);
1154 mbox = &cmd_blk->mbox;
1155 mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1156 mbox->common.id = MYRS_DCMD_TAG;
1157 mbox->common.control.dma_ctrl_to_host = true;
1158 mbox->common.control.no_autosense = true;
1159 if (rebuild) {
1160 mbox->ldev_info.ldev.ldev_num = ldev_num;
1161 mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START;
1162 } else {
1163 mbox->ldev_info.ldev.ldev_num = ldev_num;
1164 mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP;
1165 }
1166 myrs_exec_cmd(cs, cmd_blk);
1167 status = cmd_blk->status;
1168 mutex_unlock(&cs->dcmd_mutex);
1169 if (status) {
1170 sdev_printk(KERN_INFO, sdev,
1171 "Rebuild Not %s, status 0x%02x\n",
1172 rebuild ? "Initiated" : "Cancelled", status);
1173 ret = -EIO;
1174 } else {
1175 sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
1176 rebuild ? "Initiated" : "Cancelled");
1177 ret = count;
1178 }
1179
1180 return ret;
1181}
1182static DEVICE_ATTR_RW(rebuild);
1183
1184static ssize_t consistency_check_show(struct device *dev,
1185 struct device_attribute *attr, char *buf)
1186{
1187 struct scsi_device *sdev = to_scsi_device(dev);
1188 struct myrs_hba *cs = shost_priv(sdev->host);
1189 struct myrs_ldev_info *ldev_info;
1190 unsigned short ldev_num;
1191 unsigned char status;
1192
1193 if (sdev->channel < cs->ctlr_info->physchan_present)
1194 return snprintf(buf, 32, "physical device - not checking\n");
1195
1196 ldev_info = sdev->hostdata;
1197 if (!ldev_info)
1198 return -ENXIO;
1199 ldev_num = ldev_info->ldev_num;
1200 status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1201 if (ldev_info->cc_active)
1202 return snprintf(buf, 32, "checking block %zu of %zu\n",
1203 (size_t)ldev_info->cc_lba,
1204 (size_t)ldev_info->cfg_devsize);
1205 else
1206 return snprintf(buf, 32, "not checking\n");
1207}
1208
1209static ssize_t consistency_check_store(struct device *dev,
1210 struct device_attribute *attr, const char *buf, size_t count)
1211{
1212 struct scsi_device *sdev = to_scsi_device(dev);
1213 struct myrs_hba *cs = shost_priv(sdev->host);
1214 struct myrs_ldev_info *ldev_info;
1215 struct myrs_cmdblk *cmd_blk;
1216 union myrs_cmd_mbox *mbox;
1217 unsigned short ldev_num;
1218 unsigned char status;
1219 int check, ret;
1220
1221 if (sdev->channel < cs->ctlr_info->physchan_present)
1222 return -EINVAL;
1223
1224 ldev_info = sdev->hostdata;
1225 if (!ldev_info)
1226 return -ENXIO;
1227 ldev_num = ldev_info->ldev_num;
1228
1229 ret = kstrtoint(buf, 0, &check);
1230 if (ret)
1231 return ret;
1232
1233 status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1234 if (status != MYRS_STATUS_SUCCESS) {
1235 sdev_printk(KERN_INFO, sdev,
1236 "Failed to get device information, status 0x%02x\n",
1237 status);
1238 return -EIO;
1239 }
1240 if (check && ldev_info->cc_active) {
1241 sdev_printk(KERN_INFO, sdev,
1242 "Consistency Check Not Initiated; "
1243 "already in progress\n");
1244 return -EALREADY;
1245 }
1246 if (!check && !ldev_info->cc_active) {
1247 sdev_printk(KERN_INFO, sdev,
1248 "Consistency Check Not Cancelled; "
1249 "check not in progress\n");
1250 return count;
1251 }
1252
1253 mutex_lock(&cs->dcmd_mutex);
1254 cmd_blk = &cs->dcmd_blk;
1255 myrs_reset_cmd(cmd_blk);
1256 mbox = &cmd_blk->mbox;
1257 mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1258 mbox->common.id = MYRS_DCMD_TAG;
1259 mbox->common.control.dma_ctrl_to_host = true;
1260 mbox->common.control.no_autosense = true;
1261 if (check) {
1262 mbox->cc.ldev.ldev_num = ldev_num;
1263 mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START;
1264 mbox->cc.restore_consistency = true;
1265 mbox->cc.initialized_area_only = false;
1266 } else {
1267 mbox->cc.ldev.ldev_num = ldev_num;
1268 mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP;
1269 }
1270 myrs_exec_cmd(cs, cmd_blk);
1271 status = cmd_blk->status;
1272 mutex_unlock(&cs->dcmd_mutex);
1273 if (status != MYRS_STATUS_SUCCESS) {
1274 sdev_printk(KERN_INFO, sdev,
1275 "Consistency Check Not %s, status 0x%02x\n",
1276 check ? "Initiated" : "Cancelled", status);
1277 ret = -EIO;
1278 } else {
1279 sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n",
1280 check ? "Initiated" : "Cancelled");
1281 ret = count;
1282 }
1283
1284 return ret;
1285}
1286static DEVICE_ATTR_RW(consistency_check);
1287
1288static struct device_attribute *myrs_sdev_attrs[] = {
1289 &dev_attr_consistency_check,
1290 &dev_attr_rebuild,
1291 &dev_attr_raid_state,
1292 &dev_attr_raid_level,
1293 NULL,
1294};
1295
1296static ssize_t serial_show(struct device *dev,
1297 struct device_attribute *attr, char *buf)
1298{
1299 struct Scsi_Host *shost = class_to_shost(dev);
1300 struct myrs_hba *cs = shost_priv(shost);
1301 char serial[17];
1302
1303 memcpy(serial, cs->ctlr_info->serial_number, 16);
1304 serial[16] = '\0';
1305 return snprintf(buf, 16, "%s\n", serial);
1306}
1307static DEVICE_ATTR_RO(serial);
1308
1309static ssize_t ctlr_num_show(struct device *dev,
1310 struct device_attribute *attr, char *buf)
1311{
1312 struct Scsi_Host *shost = class_to_shost(dev);
1313 struct myrs_hba *cs = shost_priv(shost);
1314
1315 return snprintf(buf, 20, "%d\n", cs->host->host_no);
1316}
1317static DEVICE_ATTR_RO(ctlr_num);
1318
1319static struct myrs_cpu_type_tbl {
1320 enum myrs_cpu_type type;
1321 char *name;
1322} myrs_cpu_type_names[] = {
1323 { MYRS_CPUTYPE_i960CA, "i960CA" },
1324 { MYRS_CPUTYPE_i960RD, "i960RD" },
1325 { MYRS_CPUTYPE_i960RN, "i960RN" },
1326 { MYRS_CPUTYPE_i960RP, "i960RP" },
1327 { MYRS_CPUTYPE_NorthBay, "NorthBay" },
1328 { MYRS_CPUTYPE_StrongArm, "StrongARM" },
1329 { MYRS_CPUTYPE_i960RM, "i960RM" },
1330};
1331
1332static ssize_t processor_show(struct device *dev,
1333 struct device_attribute *attr, char *buf)
1334{
1335 struct Scsi_Host *shost = class_to_shost(dev);
1336 struct myrs_hba *cs = shost_priv(shost);
1337 struct myrs_cpu_type_tbl *tbl;
1338 const char *first_processor = NULL;
1339 const char *second_processor = NULL;
1340 struct myrs_ctlr_info *info = cs->ctlr_info;
1341 ssize_t ret;
1342 int i;
1343
1344 if (info->cpu[0].cpu_count) {
1345 tbl = myrs_cpu_type_names;
1346 for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
1347 if (tbl[i].type == info->cpu[0].cpu_type) {
1348 first_processor = tbl[i].name;
1349 break;
1350 }
1351 }
1352 }
1353 if (info->cpu[1].cpu_count) {
1354 tbl = myrs_cpu_type_names;
1355 for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
1356 if (tbl[i].type == info->cpu[1].cpu_type) {
1357 second_processor = tbl[i].name;
1358 break;
1359 }
1360 }
1361 }
1362 if (first_processor && second_processor)
1363 ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n"
1364 "2: %s (%s, %d cpus)\n",
1365 info->cpu[0].cpu_name,
1366 first_processor, info->cpu[0].cpu_count,
1367 info->cpu[1].cpu_name,
1368 second_processor, info->cpu[1].cpu_count);
1369 else if (first_processor && !second_processor)
1370 ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n",
1371 info->cpu[0].cpu_name,
1372 first_processor, info->cpu[0].cpu_count);
1373 else if (!first_processor && second_processor)
1374 ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n",
1375 info->cpu[1].cpu_name,
1376 second_processor, info->cpu[1].cpu_count);
1377 else
1378 ret = snprintf(buf, 64, "1: absent\n2: absent\n");
1379
1380 return ret;
1381}
1382static DEVICE_ATTR_RO(processor);
1383
1384static ssize_t model_show(struct device *dev,
1385 struct device_attribute *attr, char *buf)
1386{
1387 struct Scsi_Host *shost = class_to_shost(dev);
1388 struct myrs_hba *cs = shost_priv(shost);
1389
1390 return snprintf(buf, 28, "%s\n", cs->model_name);
1391}
1392static DEVICE_ATTR_RO(model);
1393
1394static ssize_t ctlr_type_show(struct device *dev,
1395 struct device_attribute *attr, char *buf)
1396{
1397 struct Scsi_Host *shost = class_to_shost(dev);
1398 struct myrs_hba *cs = shost_priv(shost);
1399
1400 return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type);
1401}
1402static DEVICE_ATTR_RO(ctlr_type);
1403
1404static ssize_t cache_size_show(struct device *dev,
1405 struct device_attribute *attr, char *buf)
1406{
1407 struct Scsi_Host *shost = class_to_shost(dev);
1408 struct myrs_hba *cs = shost_priv(shost);
1409
1410 return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb);
1411}
1412static DEVICE_ATTR_RO(cache_size);
1413
1414static ssize_t firmware_show(struct device *dev,
1415 struct device_attribute *attr, char *buf)
1416{
1417 struct Scsi_Host *shost = class_to_shost(dev);
1418 struct myrs_hba *cs = shost_priv(shost);
1419
1420 return snprintf(buf, 16, "%d.%02d-%02d\n",
1421 cs->ctlr_info->fw_major_version,
1422 cs->ctlr_info->fw_minor_version,
1423 cs->ctlr_info->fw_turn_number);
1424}
1425static DEVICE_ATTR_RO(firmware);
1426
1427static ssize_t discovery_store(struct device *dev,
1428 struct device_attribute *attr, const char *buf, size_t count)
1429{
1430 struct Scsi_Host *shost = class_to_shost(dev);
1431 struct myrs_hba *cs = shost_priv(shost);
1432 struct myrs_cmdblk *cmd_blk;
1433 union myrs_cmd_mbox *mbox;
1434 unsigned char status;
1435
1436 mutex_lock(&cs->dcmd_mutex);
1437 cmd_blk = &cs->dcmd_blk;
1438 myrs_reset_cmd(cmd_blk);
1439 mbox = &cmd_blk->mbox;
1440 mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1441 mbox->common.id = MYRS_DCMD_TAG;
1442 mbox->common.control.dma_ctrl_to_host = true;
1443 mbox->common.control.no_autosense = true;
1444 mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY;
1445 myrs_exec_cmd(cs, cmd_blk);
1446 status = cmd_blk->status;
1447 mutex_unlock(&cs->dcmd_mutex);
1448 if (status != MYRS_STATUS_SUCCESS) {
1449 shost_printk(KERN_INFO, shost,
1450 "Discovery Not Initiated, status %02X\n",
1451 status);
1452 return -EINVAL;
1453 }
1454 shost_printk(KERN_INFO, shost, "Discovery Initiated\n");
1455 cs->next_evseq = 0;
1456 cs->needs_update = true;
1457 queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
1458 flush_delayed_work(&cs->monitor_work);
1459 shost_printk(KERN_INFO, shost, "Discovery Completed\n");
1460
1461 return count;
1462}
1463static DEVICE_ATTR_WO(discovery);
1464
1465static ssize_t flush_cache_store(struct device *dev,
1466 struct device_attribute *attr, const char *buf, size_t count)
1467{
1468 struct Scsi_Host *shost = class_to_shost(dev);
1469 struct myrs_hba *cs = shost_priv(shost);
1470 unsigned char status;
1471
1472 status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA,
1473 MYRS_RAID_CONTROLLER);
1474 if (status == MYRS_STATUS_SUCCESS) {
1475 shost_printk(KERN_INFO, shost, "Cache Flush Completed\n");
1476 return count;
1477 }
1478 shost_printk(KERN_INFO, shost,
1479 "Cache Flush failed, status 0x%02x\n", status);
1480 return -EIO;
1481}
1482static DEVICE_ATTR_WO(flush_cache);
1483
1484static ssize_t disable_enclosure_messages_show(struct device *dev,
1485 struct device_attribute *attr, char *buf)
1486{
1487 struct Scsi_Host *shost = class_to_shost(dev);
1488 struct myrs_hba *cs = shost_priv(shost);
1489
1490 return snprintf(buf, 3, "%d\n", cs->disable_enc_msg);
1491}
1492
1493static ssize_t disable_enclosure_messages_store(struct device *dev,
1494 struct device_attribute *attr, const char *buf, size_t count)
1495{
1496 struct scsi_device *sdev = to_scsi_device(dev);
1497 struct myrs_hba *cs = shost_priv(sdev->host);
1498 int value, ret;
1499
1500 ret = kstrtoint(buf, 0, &value);
1501 if (ret)
1502 return ret;
1503
1504 if (value > 2)
1505 return -EINVAL;
1506
1507 cs->disable_enc_msg = value;
1508 return count;
1509}
1510static DEVICE_ATTR_RW(disable_enclosure_messages);
1511
1512static struct device_attribute *myrs_shost_attrs[] = {
1513 &dev_attr_serial,
1514 &dev_attr_ctlr_num,
1515 &dev_attr_processor,
1516 &dev_attr_model,
1517 &dev_attr_ctlr_type,
1518 &dev_attr_cache_size,
1519 &dev_attr_firmware,
1520 &dev_attr_discovery,
1521 &dev_attr_flush_cache,
1522 &dev_attr_disable_enclosure_messages,
1523 NULL,
1524};
1525
1526/*
1527 * SCSI midlayer interface
1528 */
1529int myrs_host_reset(struct scsi_cmnd *scmd)
1530{
1531 struct Scsi_Host *shost = scmd->device->host;
1532 struct myrs_hba *cs = shost_priv(shost);
1533
1534 cs->reset(cs->io_base);
1535 return SUCCESS;
1536}
1537
1538static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
1539 struct myrs_ldev_info *ldev_info)
1540{
1541 unsigned char modes[32], *mode_pg;
1542 bool dbd;
1543 size_t mode_len;
1544
1545 dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1546 if (dbd) {
1547 mode_len = 24;
1548 mode_pg = &modes[4];
1549 } else {
1550 mode_len = 32;
1551 mode_pg = &modes[12];
1552 }
1553 memset(modes, 0, sizeof(modes));
1554 modes[0] = mode_len - 1;
1555 modes[2] = 0x10; /* Enable FUA */
1556 if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO)
1557 modes[2] |= 0x80;
1558 if (!dbd) {
1559 unsigned char *block_desc = &modes[4];
1560
1561 modes[3] = 8;
1562 put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]);
1563 put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]);
1564 }
1565 mode_pg[0] = 0x08;
1566 mode_pg[1] = 0x12;
1567 if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED)
1568 mode_pg[2] |= 0x01;
1569 if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
1570 ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
1571 mode_pg[2] |= 0x04;
1572 if (ldev_info->cacheline_size) {
1573 mode_pg[2] |= 0x08;
1574 put_unaligned_be16(1 << ldev_info->cacheline_size,
1575 &mode_pg[14]);
1576 }
1577
1578 scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1579}
1580
1581static int myrs_queuecommand(struct Scsi_Host *shost,
1582 struct scsi_cmnd *scmd)
1583{
1584 struct myrs_hba *cs = shost_priv(shost);
1585 struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1586 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
1587 struct scsi_device *sdev = scmd->device;
1588 union myrs_sgl *hw_sge;
1589 dma_addr_t sense_addr;
1590 struct scatterlist *sgl;
1591 unsigned long flags, timeout;
1592 int nsge;
1593
1594 if (!scmd->device->hostdata) {
1595 scmd->result = (DID_NO_CONNECT << 16);
1596 scmd->scsi_done(scmd);
1597 return 0;
1598 }
1599
1600 switch (scmd->cmnd[0]) {
1601 case REPORT_LUNS:
1602 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
1603 0x20, 0x0);
1604 scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1605 scmd->scsi_done(scmd);
1606 return 0;
1607 case MODE_SENSE:
1608 if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
1609 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1610
1611 if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1612 (scmd->cmnd[2] & 0x3F) != 0x08) {
1613 /* Illegal request, invalid field in CDB */
1614 scsi_build_sense_buffer(0, scmd->sense_buffer,
1615 ILLEGAL_REQUEST, 0x24, 0);
1616 scmd->result = (DRIVER_SENSE << 24) |
1617 SAM_STAT_CHECK_CONDITION;
1618 } else {
1619 myrs_mode_sense(cs, scmd, ldev_info);
1620 scmd->result = (DID_OK << 16);
1621 }
1622 scmd->scsi_done(scmd);
1623 return 0;
1624 }
1625 break;
1626 }
1627
1628 myrs_reset_cmd(cmd_blk);
1629 cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC,
1630 &sense_addr);
1631 if (!cmd_blk->sense)
1632 return SCSI_MLQUEUE_HOST_BUSY;
1633 cmd_blk->sense_addr = sense_addr;
1634
1635 timeout = scmd->request->timeout;
1636 if (scmd->cmd_len <= 10) {
1637 if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
1638 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1639
1640 mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10;
1641 mbox->SCSI_10.pdev.lun = ldev_info->lun;
1642 mbox->SCSI_10.pdev.target = ldev_info->target;
1643 mbox->SCSI_10.pdev.channel = ldev_info->channel;
1644 mbox->SCSI_10.pdev.ctlr = 0;
1645 } else {
1646 mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU;
1647 mbox->SCSI_10.pdev.lun = sdev->lun;
1648 mbox->SCSI_10.pdev.target = sdev->id;
1649 mbox->SCSI_10.pdev.channel = sdev->channel;
1650 }
1651 mbox->SCSI_10.id = scmd->request->tag + 3;
1652 mbox->SCSI_10.control.dma_ctrl_to_host =
1653 (scmd->sc_data_direction == DMA_FROM_DEVICE);
1654 if (scmd->request->cmd_flags & REQ_FUA)
1655 mbox->SCSI_10.control.fua = true;
1656 mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
1657 mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
1658 mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE;
1659 mbox->SCSI_10.cdb_len = scmd->cmd_len;
1660 if (timeout > 60) {
1661 mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
1662 mbox->SCSI_10.tmo.tmo_val = timeout / 60;
1663 } else {
1664 mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
1665 mbox->SCSI_10.tmo.tmo_val = timeout;
1666 }
1667 memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len);
1668 hw_sge = &mbox->SCSI_10.dma_addr;
1669 cmd_blk->dcdb = NULL;
1670 } else {
1671 dma_addr_t dcdb_dma;
1672
1673 cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC,
1674 &dcdb_dma);
1675 if (!cmd_blk->dcdb) {
1676 dma_pool_free(cs->sense_pool, cmd_blk->sense,
1677 cmd_blk->sense_addr);
1678 cmd_blk->sense = NULL;
1679 cmd_blk->sense_addr = 0;
1680 return SCSI_MLQUEUE_HOST_BUSY;
1681 }
1682 cmd_blk->dcdb_dma = dcdb_dma;
1683 if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
1684 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1685
1686 mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256;
1687 mbox->SCSI_255.pdev.lun = ldev_info->lun;
1688 mbox->SCSI_255.pdev.target = ldev_info->target;
1689 mbox->SCSI_255.pdev.channel = ldev_info->channel;
1690 mbox->SCSI_255.pdev.ctlr = 0;
1691 } else {
1692 mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU;
1693 mbox->SCSI_255.pdev.lun = sdev->lun;
1694 mbox->SCSI_255.pdev.target = sdev->id;
1695 mbox->SCSI_255.pdev.channel = sdev->channel;
1696 }
1697 mbox->SCSI_255.id = scmd->request->tag + 3;
1698 mbox->SCSI_255.control.dma_ctrl_to_host =
1699 (scmd->sc_data_direction == DMA_FROM_DEVICE);
1700 if (scmd->request->cmd_flags & REQ_FUA)
1701 mbox->SCSI_255.control.fua = true;
1702 mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
1703 mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
1704 mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE;
1705 mbox->SCSI_255.cdb_len = scmd->cmd_len;
1706 mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma;
1707 if (timeout > 60) {
1708 mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
1709 mbox->SCSI_255.tmo.tmo_val = timeout / 60;
1710 } else {
1711 mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
1712 mbox->SCSI_255.tmo.tmo_val = timeout;
1713 }
1714 memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len);
1715 hw_sge = &mbox->SCSI_255.dma_addr;
1716 }
1717 if (scmd->sc_data_direction == DMA_NONE)
1718 goto submit;
1719 nsge = scsi_dma_map(scmd);
1720 if (nsge == 1) {
1721 sgl = scsi_sglist(scmd);
1722 hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl);
1723 hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl);
1724 } else {
1725 struct myrs_sge *hw_sgl;
1726 dma_addr_t hw_sgl_addr;
1727 int i;
1728
1729 if (nsge > 2) {
1730 hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC,
1731 &hw_sgl_addr);
1732 if (WARN_ON(!hw_sgl)) {
1733 if (cmd_blk->dcdb) {
1734 dma_pool_free(cs->dcdb_pool,
1735 cmd_blk->dcdb,
1736 cmd_blk->dcdb_dma);
1737 cmd_blk->dcdb = NULL;
1738 cmd_blk->dcdb_dma = 0;
1739 }
1740 dma_pool_free(cs->sense_pool,
1741 cmd_blk->sense,
1742 cmd_blk->sense_addr);
1743 cmd_blk->sense = NULL;
1744 cmd_blk->sense_addr = 0;
1745 return SCSI_MLQUEUE_HOST_BUSY;
1746 }
1747 cmd_blk->sgl = hw_sgl;
1748 cmd_blk->sgl_addr = hw_sgl_addr;
1749 if (scmd->cmd_len <= 10)
1750 mbox->SCSI_10.control.add_sge_mem = true;
1751 else
1752 mbox->SCSI_255.control.add_sge_mem = true;
1753 hw_sge->ext.sge0_len = nsge;
1754 hw_sge->ext.sge0_addr = cmd_blk->sgl_addr;
1755 } else
1756 hw_sgl = hw_sge->sge;
1757
1758 scsi_for_each_sg(scmd, sgl, nsge, i) {
1759 if (WARN_ON(!hw_sgl)) {
1760 scsi_dma_unmap(scmd);
1761 scmd->result = (DID_ERROR << 16);
1762 scmd->scsi_done(scmd);
1763 return 0;
1764 }
1765 hw_sgl->sge_addr = (u64)sg_dma_address(sgl);
1766 hw_sgl->sge_count = (u64)sg_dma_len(sgl);
1767 hw_sgl++;
1768 }
1769 }
1770submit:
1771 spin_lock_irqsave(&cs->queue_lock, flags);
1772 myrs_qcmd(cs, cmd_blk);
1773 spin_unlock_irqrestore(&cs->queue_lock, flags);
1774
1775 return 0;
1776}
1777
1778static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
1779 struct scsi_device *sdev)
1780{
1781 unsigned short ldev_num;
1782 unsigned int chan_offset =
1783 sdev->channel - cs->ctlr_info->physchan_present;
1784
1785 ldev_num = sdev->id + chan_offset * sdev->host->max_id;
1786
1787 return ldev_num;
1788}
1789
1790static int myrs_slave_alloc(struct scsi_device *sdev)
1791{
1792 struct myrs_hba *cs = shost_priv(sdev->host);
1793 unsigned char status;
1794
1795 if (sdev->channel > sdev->host->max_channel)
1796 return 0;
1797
1798 if (sdev->channel >= cs->ctlr_info->physchan_present) {
1799 struct myrs_ldev_info *ldev_info;
1800 unsigned short ldev_num;
1801
1802 if (sdev->lun > 0)
1803 return -ENXIO;
1804
1805 ldev_num = myrs_translate_ldev(cs, sdev);
1806
1807 ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA);
1808 if (!ldev_info)
1809 return -ENOMEM;
1810
1811 status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1812 if (status != MYRS_STATUS_SUCCESS) {
1813 sdev->hostdata = NULL;
1814 kfree(ldev_info);
1815 } else {
1816 enum raid_level level;
1817
1818 dev_dbg(&sdev->sdev_gendev,
1819 "Logical device mapping %d:%d:%d -> %d\n",
1820 ldev_info->channel, ldev_info->target,
1821 ldev_info->lun, ldev_info->ldev_num);
1822
1823 sdev->hostdata = ldev_info;
1824 switch (ldev_info->raid_level) {
1825 case MYRS_RAID_LEVEL0:
1826 level = RAID_LEVEL_LINEAR;
1827 break;
1828 case MYRS_RAID_LEVEL1:
1829 level = RAID_LEVEL_1;
1830 break;
1831 case MYRS_RAID_LEVEL3:
1832 case MYRS_RAID_LEVEL3F:
1833 case MYRS_RAID_LEVEL3L:
1834 level = RAID_LEVEL_3;
1835 break;
1836 case MYRS_RAID_LEVEL5:
1837 case MYRS_RAID_LEVEL5L:
1838 level = RAID_LEVEL_5;
1839 break;
1840 case MYRS_RAID_LEVEL6:
1841 level = RAID_LEVEL_6;
1842 break;
1843 case MYRS_RAID_LEVELE:
1844 case MYRS_RAID_NEWSPAN:
1845 case MYRS_RAID_SPAN:
1846 level = RAID_LEVEL_LINEAR;
1847 break;
1848 case MYRS_RAID_JBOD:
1849 level = RAID_LEVEL_JBOD;
1850 break;
1851 default:
1852 level = RAID_LEVEL_UNKNOWN;
1853 break;
1854 }
1855 raid_set_level(myrs_raid_template,
1856 &sdev->sdev_gendev, level);
1857 if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) {
1858 const char *name;
1859
1860 name = myrs_devstate_name(ldev_info->dev_state);
1861 sdev_printk(KERN_DEBUG, sdev,
1862 "logical device in state %s\n",
1863 name ? name : "Invalid");
1864 }
1865 }
1866 } else {
1867 struct myrs_pdev_info *pdev_info;
1868
1869 pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1870 if (!pdev_info)
1871 return -ENOMEM;
1872
1873 status = myrs_get_pdev_info(cs, sdev->channel,
1874 sdev->id, sdev->lun,
1875 pdev_info);
1876 if (status != MYRS_STATUS_SUCCESS) {
1877 sdev->hostdata = NULL;
1878 kfree(pdev_info);
1879 return -ENXIO;
1880 }
1881 sdev->hostdata = pdev_info;
1882 }
1883 return 0;
1884}
1885
1886static int myrs_slave_configure(struct scsi_device *sdev)
1887{
1888 struct myrs_hba *cs = shost_priv(sdev->host);
1889 struct myrs_ldev_info *ldev_info;
1890
1891 if (sdev->channel > sdev->host->max_channel)
1892 return -ENXIO;
1893
1894 if (sdev->channel < cs->ctlr_info->physchan_present) {
1895 /* Skip HBA device */
1896 if (sdev->type == TYPE_RAID)
1897 return -ENXIO;
1898 sdev->no_uld_attach = 1;
1899 return 0;
1900 }
1901 if (sdev->lun != 0)
1902 return -ENXIO;
1903
1904 ldev_info = sdev->hostdata;
1905 if (!ldev_info)
1906 return -ENXIO;
1907 if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
1908 ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
1909 sdev->wce_default_on = 1;
1910 sdev->tagged_supported = 1;
1911 return 0;
1912}
1913
1914static void myrs_slave_destroy(struct scsi_device *sdev)
1915{
1916 kfree(sdev->hostdata);
1917}
1918
1919struct scsi_host_template myrs_template = {
1920 .module = THIS_MODULE,
1921 .name = "DAC960",
1922 .proc_name = "myrs",
1923 .queuecommand = myrs_queuecommand,
1924 .eh_host_reset_handler = myrs_host_reset,
1925 .slave_alloc = myrs_slave_alloc,
1926 .slave_configure = myrs_slave_configure,
1927 .slave_destroy = myrs_slave_destroy,
1928 .cmd_size = sizeof(struct myrs_cmdblk),
1929 .shost_attrs = myrs_shost_attrs,
1930 .sdev_attrs = myrs_sdev_attrs,
1931 .this_id = -1,
1932};
1933
1934static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
1935 const struct pci_device_id *entry)
1936{
1937 struct Scsi_Host *shost;
1938 struct myrs_hba *cs;
1939
1940 shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba));
1941 if (!shost)
1942 return NULL;
1943
1944 shost->max_cmd_len = 16;
1945 shost->max_lun = 256;
1946 cs = shost_priv(shost);
1947 mutex_init(&cs->dcmd_mutex);
1948 mutex_init(&cs->cinfo_mutex);
1949 cs->host = shost;
1950
1951 return cs;
1952}
1953
1954/*
1955 * RAID template functions
1956 */
1957
1958/**
1959 * myrs_is_raid - return boolean indicating device is raid volume
1960 * @dev the device struct object
1961 */
1962static int
1963myrs_is_raid(struct device *dev)
1964{
1965 struct scsi_device *sdev = to_scsi_device(dev);
1966 struct myrs_hba *cs = shost_priv(sdev->host);
1967
1968 return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0;
1969}
1970
1971/**
1972 * myrs_get_resync - get raid volume resync percent complete
1973 * @dev the device struct object
1974 */
1975static void
1976myrs_get_resync(struct device *dev)
1977{
1978 struct scsi_device *sdev = to_scsi_device(dev);
1979 struct myrs_hba *cs = shost_priv(sdev->host);
1980 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1981 u64 percent_complete = 0;
1982 u8 status;
1983
1984 if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
1985 return;
1986 if (ldev_info->rbld_active) {
1987 unsigned short ldev_num = ldev_info->ldev_num;
1988
1989 status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1990 percent_complete = ldev_info->rbld_lba * 100;
1991 do_div(percent_complete, ldev_info->cfg_devsize);
1992 }
1993 raid_set_resync(myrs_raid_template, dev, percent_complete);
1994}
1995
1996/**
1997 * myrs_get_state - get raid volume status
1998 * @dev the device struct object
1999 */
2000static void
2001myrs_get_state(struct device *dev)
2002{
2003 struct scsi_device *sdev = to_scsi_device(dev);
2004 struct myrs_hba *cs = shost_priv(sdev->host);
2005 struct myrs_ldev_info *ldev_info = sdev->hostdata;
2006 enum raid_state state = RAID_STATE_UNKNOWN;
2007
2008 if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
2009 state = RAID_STATE_UNKNOWN;
2010 else {
2011 switch (ldev_info->dev_state) {
2012 case MYRS_DEVICE_ONLINE:
2013 state = RAID_STATE_ACTIVE;
2014 break;
2015 case MYRS_DEVICE_SUSPECTED_CRITICAL:
2016 case MYRS_DEVICE_CRITICAL:
2017 state = RAID_STATE_DEGRADED;
2018 break;
2019 case MYRS_DEVICE_REBUILD:
2020 state = RAID_STATE_RESYNCING;
2021 break;
2022 case MYRS_DEVICE_UNCONFIGURED:
2023 case MYRS_DEVICE_INVALID_STATE:
2024 state = RAID_STATE_UNKNOWN;
2025 break;
2026 default:
2027 state = RAID_STATE_OFFLINE;
2028 }
2029 }
2030 raid_set_state(myrs_raid_template, dev, state);
2031}
2032
2033struct raid_function_template myrs_raid_functions = {
2034 .cookie = &myrs_template,
2035 .is_raid = myrs_is_raid,
2036 .get_resync = myrs_get_resync,
2037 .get_state = myrs_get_state,
2038};
2039
2040/*
2041 * PCI interface functions
2042 */
2043void myrs_flush_cache(struct myrs_hba *cs)
2044{
2045 myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
2046}
2047
2048static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk,
2049 struct scsi_cmnd *scmd)
2050{
2051 unsigned char status;
2052
2053 if (!cmd_blk)
2054 return;
2055
2056 scsi_dma_unmap(scmd);
2057 status = cmd_blk->status;
2058 if (cmd_blk->sense) {
2059 if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) {
2060 unsigned int sense_len = SCSI_SENSE_BUFFERSIZE;
2061
2062 if (sense_len > cmd_blk->sense_len)
2063 sense_len = cmd_blk->sense_len;
2064 memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len);
2065 }
2066 dma_pool_free(cs->sense_pool, cmd_blk->sense,
2067 cmd_blk->sense_addr);
2068 cmd_blk->sense = NULL;
2069 cmd_blk->sense_addr = 0;
2070 }
2071 if (cmd_blk->dcdb) {
2072 dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb,
2073 cmd_blk->dcdb_dma);
2074 cmd_blk->dcdb = NULL;
2075 cmd_blk->dcdb_dma = 0;
2076 }
2077 if (cmd_blk->sgl) {
2078 dma_pool_free(cs->sg_pool, cmd_blk->sgl,
2079 cmd_blk->sgl_addr);
2080 cmd_blk->sgl = NULL;
2081 cmd_blk->sgl_addr = 0;
2082 }
2083 if (cmd_blk->residual)
2084 scsi_set_resid(scmd, cmd_blk->residual);
2085 if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE ||
2086 status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2)
2087 scmd->result = (DID_BAD_TARGET << 16);
2088 else
2089 scmd->result = (DID_OK << 16) | status;
2090 scmd->scsi_done(scmd);
2091}
2092
2093static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
2094{
2095 if (!cmd_blk)
2096 return;
2097
2098 if (cmd_blk->complete) {
2099 complete(cmd_blk->complete);
2100 cmd_blk->complete = NULL;
2101 }
2102}
2103
2104static void myrs_monitor(struct work_struct *work)
2105{
2106 struct myrs_hba *cs = container_of(work, struct myrs_hba,
2107 monitor_work.work);
2108 struct Scsi_Host *shost = cs->host;
2109 struct myrs_ctlr_info *info = cs->ctlr_info;
2110 unsigned int epoch = cs->fwstat_buf->epoch;
2111 unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL;
2112 unsigned char status;
2113
2114 dev_dbg(&shost->shost_gendev, "monitor tick\n");
2115
2116 status = myrs_get_fwstatus(cs);
2117
2118 if (cs->needs_update) {
2119 cs->needs_update = false;
2120 mutex_lock(&cs->cinfo_mutex);
2121 status = myrs_get_ctlr_info(cs);
2122 mutex_unlock(&cs->cinfo_mutex);
2123 }
2124 if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) {
2125 status = myrs_get_event(cs, cs->next_evseq,
2126 cs->event_buf);
2127 if (status == MYRS_STATUS_SUCCESS) {
2128 myrs_log_event(cs, cs->event_buf);
2129 cs->next_evseq++;
2130 interval = 1;
2131 }
2132 }
2133
2134 if (time_after(jiffies, cs->secondary_monitor_time
2135 + MYRS_SECONDARY_MONITOR_INTERVAL))
2136 cs->secondary_monitor_time = jiffies;
2137
2138 if (info->bg_init_active +
2139 info->ldev_init_active +
2140 info->pdev_init_active +
2141 info->cc_active +
2142 info->rbld_active +
2143 info->exp_active != 0) {
2144 struct scsi_device *sdev;
2145
2146 shost_for_each_device(sdev, shost) {
2147 struct myrs_ldev_info *ldev_info;
2148 int ldev_num;
2149
2150 if (sdev->channel < info->physchan_present)
2151 continue;
2152 ldev_info = sdev->hostdata;
2153 if (!ldev_info)
2154 continue;
2155 ldev_num = ldev_info->ldev_num;
2156 myrs_get_ldev_info(cs, ldev_num, ldev_info);
2157 }
2158 cs->needs_update = true;
2159 }
2160 if (epoch == cs->epoch &&
2161 cs->fwstat_buf->next_evseq == cs->next_evseq &&
2162 (cs->needs_update == false ||
2163 time_before(jiffies, cs->primary_monitor_time
2164 + MYRS_PRIMARY_MONITOR_INTERVAL))) {
2165 interval = MYRS_SECONDARY_MONITOR_INTERVAL;
2166 }
2167
2168 if (interval > 1)
2169 cs->primary_monitor_time = jiffies;
2170 queue_delayed_work(cs->work_q, &cs->monitor_work, interval);
2171}
2172
2173static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
2174{
2175 struct Scsi_Host *shost = cs->host;
2176 size_t elem_size, elem_align;
2177
2178 elem_align = sizeof(struct myrs_sge);
2179 elem_size = shost->sg_tablesize * elem_align;
2180 cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev,
2181 elem_size, elem_align, 0);
2182 if (cs->sg_pool == NULL) {
2183 shost_printk(KERN_ERR, shost,
2184 "Failed to allocate SG pool\n");
2185 return false;
2186 }
2187
2188 cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev,
2189 MYRS_SENSE_SIZE, sizeof(int), 0);
2190 if (cs->sense_pool == NULL) {
2191 dma_pool_destroy(cs->sg_pool);
2192 cs->sg_pool = NULL;
2193 shost_printk(KERN_ERR, shost,
2194 "Failed to allocate sense data pool\n");
2195 return false;
2196 }
2197
2198 cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev,
2199 MYRS_DCDB_SIZE,
2200 sizeof(unsigned char), 0);
2201 if (!cs->dcdb_pool) {
2202 dma_pool_destroy(cs->sg_pool);
2203 cs->sg_pool = NULL;
2204 dma_pool_destroy(cs->sense_pool);
2205 cs->sense_pool = NULL;
2206 shost_printk(KERN_ERR, shost,
2207 "Failed to allocate DCDB pool\n");
2208 return false;
2209 }
2210
2211 snprintf(cs->work_q_name, sizeof(cs->work_q_name),
2212 "myrs_wq_%d", shost->host_no);
2213 cs->work_q = create_singlethread_workqueue(cs->work_q_name);
2214 if (!cs->work_q) {
2215 dma_pool_destroy(cs->dcdb_pool);
2216 cs->dcdb_pool = NULL;
2217 dma_pool_destroy(cs->sg_pool);
2218 cs->sg_pool = NULL;
2219 dma_pool_destroy(cs->sense_pool);
2220 cs->sense_pool = NULL;
2221 shost_printk(KERN_ERR, shost,
2222 "Failed to create workqueue\n");
2223 return false;
2224 }
2225
2226 /* Initialize the Monitoring Timer. */
2227 INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor);
2228 queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
2229
2230 return true;
2231}
2232
2233static void myrs_destroy_mempools(struct myrs_hba *cs)
2234{
2235 cancel_delayed_work_sync(&cs->monitor_work);
2236 destroy_workqueue(cs->work_q);
2237
2238 dma_pool_destroy(cs->sg_pool);
2239 dma_pool_destroy(cs->dcdb_pool);
2240 dma_pool_destroy(cs->sense_pool);
2241}
2242
2243static void myrs_unmap(struct myrs_hba *cs)
2244{
2245 kfree(cs->event_buf);
2246 kfree(cs->ctlr_info);
2247 if (cs->fwstat_buf) {
2248 dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat),
2249 cs->fwstat_buf, cs->fwstat_addr);
2250 cs->fwstat_buf = NULL;
2251 }
2252 if (cs->first_stat_mbox) {
2253 dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size,
2254 cs->first_stat_mbox, cs->stat_mbox_addr);
2255 cs->first_stat_mbox = NULL;
2256 }
2257 if (cs->first_cmd_mbox) {
2258 dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size,
2259 cs->first_cmd_mbox, cs->cmd_mbox_addr);
2260 cs->first_cmd_mbox = NULL;
2261 }
2262}
2263
2264static void myrs_cleanup(struct myrs_hba *cs)
2265{
2266 struct pci_dev *pdev = cs->pdev;
2267
2268 /* Free the memory mailbox, status, and related structures */
2269 myrs_unmap(cs);
2270
2271 if (cs->mmio_base) {
2272 cs->disable_intr(cs);
2273 iounmap(cs->mmio_base);
2274 }
2275 if (cs->irq)
2276 free_irq(cs->irq, cs);
2277 if (cs->io_addr)
2278 release_region(cs->io_addr, 0x80);
2279 iounmap(cs->mmio_base);
2280 pci_set_drvdata(pdev, NULL);
2281 pci_disable_device(pdev);
2282 scsi_host_put(cs->host);
2283}
2284
2285static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
2286 const struct pci_device_id *entry)
2287{
2288 struct myrs_privdata *privdata =
2289 (struct myrs_privdata *)entry->driver_data;
2290 irq_handler_t irq_handler = privdata->irq_handler;
2291 unsigned int mmio_size = privdata->mmio_size;
2292 struct myrs_hba *cs = NULL;
2293
2294 cs = myrs_alloc_host(pdev, entry);
2295 if (!cs) {
2296 dev_err(&pdev->dev, "Unable to allocate Controller\n");
2297 return NULL;
2298 }
2299 cs->pdev = pdev;
2300
2301 if (pci_enable_device(pdev))
2302 goto Failure;
2303
2304 cs->pci_addr = pci_resource_start(pdev, 0);
2305
2306 pci_set_drvdata(pdev, cs);
2307 spin_lock_init(&cs->queue_lock);
2308 /* Map the Controller Register Window. */
2309 if (mmio_size < PAGE_SIZE)
2310 mmio_size = PAGE_SIZE;
2311 cs->mmio_base = ioremap_nocache(cs->pci_addr & PAGE_MASK, mmio_size);
2312 if (cs->mmio_base == NULL) {
2313 dev_err(&pdev->dev,
2314 "Unable to map Controller Register Window\n");
2315 goto Failure;
2316 }
2317
2318 cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK);
2319 if (privdata->hw_init(pdev, cs, cs->io_base))
2320 goto Failure;
2321
2322 /* Acquire shared access to the IRQ Channel. */
2323 if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) {
2324 dev_err(&pdev->dev,
2325 "Unable to acquire IRQ Channel %d\n", pdev->irq);
2326 goto Failure;
2327 }
2328 cs->irq = pdev->irq;
2329 return cs;
2330
2331Failure:
2332 dev_err(&pdev->dev,
2333 "Failed to initialize Controller\n");
2334 myrs_cleanup(cs);
2335 return NULL;
2336}
2337
2338/**
2339 * myrs_err_status reports Controller BIOS Messages passed through
2340 the Error Status Register when the driver performs the BIOS handshaking.
2341 It returns true for fatal errors and false otherwise.
2342*/
2343
2344static bool myrs_err_status(struct myrs_hba *cs, unsigned char status,
2345 unsigned char parm0, unsigned char parm1)
2346{
2347 struct pci_dev *pdev = cs->pdev;
2348
2349 switch (status) {
2350 case 0x00:
2351 dev_info(&pdev->dev,
2352 "Physical Device %d:%d Not Responding\n",
2353 parm1, parm0);
2354 break;
2355 case 0x08:
2356 dev_notice(&pdev->dev, "Spinning Up Drives\n");
2357 break;
2358 case 0x30:
2359 dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2360 break;
2361 case 0x60:
2362 dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2363 break;
2364 case 0x70:
2365 dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2366 break;
2367 case 0x90:
2368 dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2369 parm1, parm0);
2370 break;
2371 case 0xA0:
2372 dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2373 break;
2374 case 0xB0:
2375 dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2376 break;
2377 case 0xD0:
2378 dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2379 break;
2380 case 0xF0:
2381 dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2382 return true;
2383 default:
2384 dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2385 status);
2386 return true;
2387 }
2388 return false;
2389}
2390
2391/*
2392 * Hardware-specific functions
2393 */
2394
2395/*
2396 * DAC960 GEM Series Controllers.
2397 */
2398
2399static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base)
2400{
2401 __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
2402
2403 writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2404}
2405
2406static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base)
2407{
2408 __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24);
2409
2410 writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET);
2411}
2412
2413static inline void DAC960_GEM_gen_intr(void __iomem *base)
2414{
2415 __le32 val = cpu_to_le32(DAC960_GEM_IDB_GEN_IRQ << 24);
2416
2417 writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2418}
2419
2420static inline void DAC960_GEM_reset_ctrl(void __iomem *base)
2421{
2422 __le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24);
2423
2424 writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2425}
2426
2427static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base)
2428{
2429 __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
2430
2431 writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2432}
2433
2434static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base)
2435{
2436 __le32 val;
2437
2438 val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
2439 return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL;
2440}
2441
2442static inline bool DAC960_GEM_init_in_progress(void __iomem *base)
2443{
2444 __le32 val;
2445
2446 val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
2447 return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS;
2448}
2449
2450static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base)
2451{
2452 __le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24);
2453
2454 writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
2455}
2456
2457static inline void DAC960_GEM_ack_mem_mbox_intr(void __iomem *base)
2458{
2459 __le32 val = cpu_to_le32(DAC960_GEM_ODB_MMBOX_ACK_IRQ << 24);
2460
2461 writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
2462}
2463
2464static inline void DAC960_GEM_ack_intr(void __iomem *base)
2465{
2466 __le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ |
2467 DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24);
2468
2469 writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
2470}
2471
2472static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base)
2473{
2474 __le32 val;
2475
2476 val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
2477 return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL;
2478}
2479
2480static inline bool DAC960_GEM_mem_mbox_status_available(void __iomem *base)
2481{
2482 __le32 val;
2483
2484 val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
2485 return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_MMBOX_STS_AVAIL;
2486}
2487
2488static inline void DAC960_GEM_enable_intr(void __iomem *base)
2489{
2490 __le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ |
2491 DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24);
2492 writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET);
2493}
2494
2495static inline void DAC960_GEM_disable_intr(void __iomem *base)
2496{
2497 __le32 val = 0;
2498
2499 writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET);
2500}
2501
2502static inline bool DAC960_GEM_intr_enabled(void __iomem *base)
2503{
2504 __le32 val;
2505
2506 val = readl(base + DAC960_GEM_IRQMASK_READ_OFFSET);
2507 return !((le32_to_cpu(val) >> 24) &
2508 (DAC960_GEM_IRQMASK_HWMBOX_IRQ |
2509 DAC960_GEM_IRQMASK_MMBOX_IRQ));
2510}
2511
2512static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
2513 union myrs_cmd_mbox *mbox)
2514{
2515 memcpy(&mem_mbox->words[1], &mbox->words[1],
2516 sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
2517 /* Barrier to avoid reordering */
2518 wmb();
2519 mem_mbox->words[0] = mbox->words[0];
2520 /* Barrier to force PCI access */
2521 mb();
2522}
2523
2524static inline void DAC960_GEM_write_hw_mbox(void __iomem *base,
2525 dma_addr_t cmd_mbox_addr)
2526{
2527 dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET);
2528}
2529
2530static inline unsigned short DAC960_GEM_read_cmd_ident(void __iomem *base)
2531{
2532 return readw(base + DAC960_GEM_CMDSTS_OFFSET);
2533}
2534
2535static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base)
2536{
2537 return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2);
2538}
2539
2540static inline bool
2541DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error,
2542 unsigned char *param0, unsigned char *param1)
2543{
2544 __le32 val;
2545
2546 val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET);
2547 if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING))
2548 return false;
2549 *error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24);
2550 *param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0);
2551 *param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1);
2552 writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET);
2553 return true;
2554}
2555
2556static inline unsigned char
2557DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
2558{
2559 unsigned char status;
2560
2561 while (DAC960_GEM_hw_mbox_is_full(base))
2562 udelay(1);
2563 DAC960_GEM_write_hw_mbox(base, mbox_addr);
2564 DAC960_GEM_hw_mbox_new_cmd(base);
2565 while (!DAC960_GEM_hw_mbox_status_available(base))
2566 udelay(1);
2567 status = DAC960_GEM_read_cmd_status(base);
2568 DAC960_GEM_ack_hw_mbox_intr(base);
2569 DAC960_GEM_ack_hw_mbox_status(base);
2570
2571 return status;
2572}
2573
2574static int DAC960_GEM_hw_init(struct pci_dev *pdev,
2575 struct myrs_hba *cs, void __iomem *base)
2576{
2577 int timeout = 0;
2578 unsigned char status, parm0, parm1;
2579
2580 DAC960_GEM_disable_intr(base);
2581 DAC960_GEM_ack_hw_mbox_status(base);
2582 udelay(1000);
2583 while (DAC960_GEM_init_in_progress(base) &&
2584 timeout < MYRS_MAILBOX_TIMEOUT) {
2585 if (DAC960_GEM_read_error_status(base, &status,
2586 &parm0, &parm1) &&
2587 myrs_err_status(cs, status, parm0, parm1))
2588 return -EIO;
2589 udelay(10);
2590 timeout++;
2591 }
2592 if (timeout == MYRS_MAILBOX_TIMEOUT) {
2593 dev_err(&pdev->dev,
2594 "Timeout waiting for Controller Initialisation\n");
2595 return -ETIMEDOUT;
2596 }
2597 if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) {
2598 dev_err(&pdev->dev,
2599 "Unable to Enable Memory Mailbox Interface\n");
2600 DAC960_GEM_reset_ctrl(base);
2601 return -EAGAIN;
2602 }
2603 DAC960_GEM_enable_intr(base);
2604 cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox;
2605 cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd;
2606 cs->disable_intr = DAC960_GEM_disable_intr;
2607 cs->reset = DAC960_GEM_reset_ctrl;
2608 return 0;
2609}
2610
2611static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
2612{
2613 struct myrs_hba *cs = arg;
2614 void __iomem *base = cs->io_base;
2615 struct myrs_stat_mbox *next_stat_mbox;
2616 unsigned long flags;
2617
2618 spin_lock_irqsave(&cs->queue_lock, flags);
2619 DAC960_GEM_ack_intr(base);
2620 next_stat_mbox = cs->next_stat_mbox;
2621 while (next_stat_mbox->id > 0) {
2622 unsigned short id = next_stat_mbox->id;
2623 struct scsi_cmnd *scmd = NULL;
2624 struct myrs_cmdblk *cmd_blk = NULL;
2625
2626 if (id == MYRS_DCMD_TAG)
2627 cmd_blk = &cs->dcmd_blk;
2628 else if (id == MYRS_MCMD_TAG)
2629 cmd_blk = &cs->mcmd_blk;
2630 else {
2631 scmd = scsi_host_find_tag(cs->host, id - 3);
2632 if (scmd)
2633 cmd_blk = scsi_cmd_priv(scmd);
2634 }
2635 if (cmd_blk) {
2636 cmd_blk->status = next_stat_mbox->status;
2637 cmd_blk->sense_len = next_stat_mbox->sense_len;
2638 cmd_blk->residual = next_stat_mbox->residual;
2639 } else
2640 dev_err(&cs->pdev->dev,
2641 "Unhandled command completion %d\n", id);
2642
2643 memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
2644 if (++next_stat_mbox > cs->last_stat_mbox)
2645 next_stat_mbox = cs->first_stat_mbox;
2646
2647 if (cmd_blk) {
2648 if (id < 3)
2649 myrs_handle_cmdblk(cs, cmd_blk);
2650 else
2651 myrs_handle_scsi(cs, cmd_blk, scmd);
2652 }
2653 }
2654 cs->next_stat_mbox = next_stat_mbox;
2655 spin_unlock_irqrestore(&cs->queue_lock, flags);
2656 return IRQ_HANDLED;
2657}
2658
2659struct myrs_privdata DAC960_GEM_privdata = {
2660 .hw_init = DAC960_GEM_hw_init,
2661 .irq_handler = DAC960_GEM_intr_handler,
2662 .mmio_size = DAC960_GEM_mmio_size,
2663};
2664
2665/*
2666 * DAC960 BA Series Controllers.
2667 */
2668
2669static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base)
2670{
2671 writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
2672}
2673
2674static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base)
2675{
2676 writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET);
2677}
2678
2679static inline void DAC960_BA_gen_intr(void __iomem *base)
2680{
2681 writeb(DAC960_BA_IDB_GEN_IRQ, base + DAC960_BA_IDB_OFFSET);
2682}
2683
2684static inline void DAC960_BA_reset_ctrl(void __iomem *base)
2685{
2686 writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET);
2687}
2688
2689static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base)
2690{
2691 writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
2692}
2693
2694static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base)
2695{
2696 u8 val;
2697
2698 val = readb(base + DAC960_BA_IDB_OFFSET);
2699 return !(val & DAC960_BA_IDB_HWMBOX_EMPTY);
2700}
2701
2702static inline bool DAC960_BA_init_in_progress(void __iomem *base)
2703{
2704 u8 val;
2705
2706 val = readb(base + DAC960_BA_IDB_OFFSET);
2707 return !(val & DAC960_BA_IDB_INIT_DONE);
2708}
2709
2710static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base)
2711{
2712 writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
2713}
2714
2715static inline void DAC960_BA_ack_mem_mbox_intr(void __iomem *base)
2716{
2717 writeb(DAC960_BA_ODB_MMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
2718}
2719
2720static inline void DAC960_BA_ack_intr(void __iomem *base)
2721{
2722 writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ,
2723 base + DAC960_BA_ODB_OFFSET);
2724}
2725
2726static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base)
2727{
2728 u8 val;
2729
2730 val = readb(base + DAC960_BA_ODB_OFFSET);
2731 return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL;
2732}
2733
2734static inline bool DAC960_BA_mem_mbox_status_available(void __iomem *base)
2735{
2736 u8 val;
2737
2738 val = readb(base + DAC960_BA_ODB_OFFSET);
2739 return val & DAC960_BA_ODB_MMBOX_STS_AVAIL;
2740}
2741
2742static inline void DAC960_BA_enable_intr(void __iomem *base)
2743{
2744 writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET);
2745}
2746
2747static inline void DAC960_BA_disable_intr(void __iomem *base)
2748{
2749 writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET);
2750}
2751
2752static inline bool DAC960_BA_intr_enabled(void __iomem *base)
2753{
2754 u8 val;
2755
2756 val = readb(base + DAC960_BA_IRQMASK_OFFSET);
2757 return !(val & DAC960_BA_IRQMASK_DISABLE_IRQ);
2758}
2759
2760static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
2761 union myrs_cmd_mbox *mbox)
2762{
2763 memcpy(&mem_mbox->words[1], &mbox->words[1],
2764 sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
2765 /* Barrier to avoid reordering */
2766 wmb();
2767 mem_mbox->words[0] = mbox->words[0];
2768 /* Barrier to force PCI access */
2769 mb();
2770}
2771
2772
2773static inline void DAC960_BA_write_hw_mbox(void __iomem *base,
2774 dma_addr_t cmd_mbox_addr)
2775{
2776 dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET);
2777}
2778
2779static inline unsigned short DAC960_BA_read_cmd_ident(void __iomem *base)
2780{
2781 return readw(base + DAC960_BA_CMDSTS_OFFSET);
2782}
2783
2784static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base)
2785{
2786 return readw(base + DAC960_BA_CMDSTS_OFFSET + 2);
2787}
2788
2789static inline bool
2790DAC960_BA_read_error_status(void __iomem *base, unsigned char *error,
2791 unsigned char *param0, unsigned char *param1)
2792{
2793 u8 val;
2794
2795 val = readb(base + DAC960_BA_ERRSTS_OFFSET);
2796 if (!(val & DAC960_BA_ERRSTS_PENDING))
2797 return false;
2798 val &= ~DAC960_BA_ERRSTS_PENDING;
2799 *error = val;
2800 *param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0);
2801 *param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1);
2802 writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET);
2803 return true;
2804}
2805
2806static inline unsigned char
2807DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
2808{
2809 unsigned char status;
2810
2811 while (DAC960_BA_hw_mbox_is_full(base))
2812 udelay(1);
2813 DAC960_BA_write_hw_mbox(base, mbox_addr);
2814 DAC960_BA_hw_mbox_new_cmd(base);
2815 while (!DAC960_BA_hw_mbox_status_available(base))
2816 udelay(1);
2817 status = DAC960_BA_read_cmd_status(base);
2818 DAC960_BA_ack_hw_mbox_intr(base);
2819 DAC960_BA_ack_hw_mbox_status(base);
2820
2821 return status;
2822}
2823
2824static int DAC960_BA_hw_init(struct pci_dev *pdev,
2825 struct myrs_hba *cs, void __iomem *base)
2826{
2827 int timeout = 0;
2828 unsigned char status, parm0, parm1;
2829
2830 DAC960_BA_disable_intr(base);
2831 DAC960_BA_ack_hw_mbox_status(base);
2832 udelay(1000);
2833 while (DAC960_BA_init_in_progress(base) &&
2834 timeout < MYRS_MAILBOX_TIMEOUT) {
2835 if (DAC960_BA_read_error_status(base, &status,
2836 &parm0, &parm1) &&
2837 myrs_err_status(cs, status, parm0, parm1))
2838 return -EIO;
2839 udelay(10);
2840 timeout++;
2841 }
2842 if (timeout == MYRS_MAILBOX_TIMEOUT) {
2843 dev_err(&pdev->dev,
2844 "Timeout waiting for Controller Initialisation\n");
2845 return -ETIMEDOUT;
2846 }
2847 if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) {
2848 dev_err(&pdev->dev,
2849 "Unable to Enable Memory Mailbox Interface\n");
2850 DAC960_BA_reset_ctrl(base);
2851 return -EAGAIN;
2852 }
2853 DAC960_BA_enable_intr(base);
2854 cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox;
2855 cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd;
2856 cs->disable_intr = DAC960_BA_disable_intr;
2857 cs->reset = DAC960_BA_reset_ctrl;
2858 return 0;
2859}
2860
2861static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
2862{
2863 struct myrs_hba *cs = arg;
2864 void __iomem *base = cs->io_base;
2865 struct myrs_stat_mbox *next_stat_mbox;
2866 unsigned long flags;
2867
2868 spin_lock_irqsave(&cs->queue_lock, flags);
2869 DAC960_BA_ack_intr(base);
2870 next_stat_mbox = cs->next_stat_mbox;
2871 while (next_stat_mbox->id > 0) {
2872 unsigned short id = next_stat_mbox->id;
2873 struct scsi_cmnd *scmd = NULL;
2874 struct myrs_cmdblk *cmd_blk = NULL;
2875
2876 if (id == MYRS_DCMD_TAG)
2877 cmd_blk = &cs->dcmd_blk;
2878 else if (id == MYRS_MCMD_TAG)
2879 cmd_blk = &cs->mcmd_blk;
2880 else {
2881 scmd = scsi_host_find_tag(cs->host, id - 3);
2882 if (scmd)
2883 cmd_blk = scsi_cmd_priv(scmd);
2884 }
2885 if (cmd_blk) {
2886 cmd_blk->status = next_stat_mbox->status;
2887 cmd_blk->sense_len = next_stat_mbox->sense_len;
2888 cmd_blk->residual = next_stat_mbox->residual;
2889 } else
2890 dev_err(&cs->pdev->dev,
2891 "Unhandled command completion %d\n", id);
2892
2893 memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
2894 if (++next_stat_mbox > cs->last_stat_mbox)
2895 next_stat_mbox = cs->first_stat_mbox;
2896
2897 if (cmd_blk) {
2898 if (id < 3)
2899 myrs_handle_cmdblk(cs, cmd_blk);
2900 else
2901 myrs_handle_scsi(cs, cmd_blk, scmd);
2902 }
2903 }
2904 cs->next_stat_mbox = next_stat_mbox;
2905 spin_unlock_irqrestore(&cs->queue_lock, flags);
2906 return IRQ_HANDLED;
2907}
2908
2909struct myrs_privdata DAC960_BA_privdata = {
2910 .hw_init = DAC960_BA_hw_init,
2911 .irq_handler = DAC960_BA_intr_handler,
2912 .mmio_size = DAC960_BA_mmio_size,
2913};
2914
2915/*
2916 * DAC960 LP Series Controllers.
2917 */
2918
2919static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base)
2920{
2921 writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
2922}
2923
2924static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base)
2925{
2926 writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET);
2927}
2928
2929static inline void DAC960_LP_gen_intr(void __iomem *base)
2930{
2931 writeb(DAC960_LP_IDB_GEN_IRQ, base + DAC960_LP_IDB_OFFSET);
2932}
2933
2934static inline void DAC960_LP_reset_ctrl(void __iomem *base)
2935{
2936 writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET);
2937}
2938
2939static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base)
2940{
2941 writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
2942}
2943
2944static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base)
2945{
2946 u8 val;
2947
2948 val = readb(base + DAC960_LP_IDB_OFFSET);
2949 return val & DAC960_LP_IDB_HWMBOX_FULL;
2950}
2951
2952static inline bool DAC960_LP_init_in_progress(void __iomem *base)
2953{
2954 u8 val;
2955
2956 val = readb(base + DAC960_LP_IDB_OFFSET);
2957 return val & DAC960_LP_IDB_INIT_IN_PROGRESS;
2958}
2959
2960static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base)
2961{
2962 writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
2963}
2964
2965static inline void DAC960_LP_ack_mem_mbox_intr(void __iomem *base)
2966{
2967 writeb(DAC960_LP_ODB_MMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
2968}
2969
2970static inline void DAC960_LP_ack_intr(void __iomem *base)
2971{
2972 writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ,
2973 base + DAC960_LP_ODB_OFFSET);
2974}
2975
2976static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base)
2977{
2978 u8 val;
2979
2980 val = readb(base + DAC960_LP_ODB_OFFSET);
2981 return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL;
2982}
2983
2984static inline bool DAC960_LP_mem_mbox_status_available(void __iomem *base)
2985{
2986 u8 val;
2987
2988 val = readb(base + DAC960_LP_ODB_OFFSET);
2989 return val & DAC960_LP_ODB_MMBOX_STS_AVAIL;
2990}
2991
2992static inline void DAC960_LP_enable_intr(void __iomem *base)
2993{
2994 writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET);
2995}
2996
2997static inline void DAC960_LP_disable_intr(void __iomem *base)
2998{
2999 writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET);
3000}
3001
3002static inline bool DAC960_LP_intr_enabled(void __iomem *base)
3003{
3004 u8 val;
3005
3006 val = readb(base + DAC960_LP_IRQMASK_OFFSET);
3007 return !(val & DAC960_LP_IRQMASK_DISABLE_IRQ);
3008}
3009
3010static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
3011 union myrs_cmd_mbox *mbox)
3012{
3013 memcpy(&mem_mbox->words[1], &mbox->words[1],
3014 sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
3015 /* Barrier to avoid reordering */
3016 wmb();
3017 mem_mbox->words[0] = mbox->words[0];
3018 /* Barrier to force PCI access */
3019 mb();
3020}
3021
3022static inline void DAC960_LP_write_hw_mbox(void __iomem *base,
3023 dma_addr_t cmd_mbox_addr)
3024{
3025 dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET);
3026}
3027
3028static inline unsigned short DAC960_LP_read_cmd_ident(void __iomem *base)
3029{
3030 return readw(base + DAC960_LP_CMDSTS_OFFSET);
3031}
3032
3033static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base)
3034{
3035 return readw(base + DAC960_LP_CMDSTS_OFFSET + 2);
3036}
3037
3038static inline bool
3039DAC960_LP_read_error_status(void __iomem *base, unsigned char *error,
3040 unsigned char *param0, unsigned char *param1)
3041{
3042 u8 val;
3043
3044 val = readb(base + DAC960_LP_ERRSTS_OFFSET);
3045 if (!(val & DAC960_LP_ERRSTS_PENDING))
3046 return false;
3047 val &= ~DAC960_LP_ERRSTS_PENDING;
3048 *error = val;
3049 *param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0);
3050 *param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1);
3051 writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET);
3052 return true;
3053}
3054
3055static inline unsigned char
3056DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
3057{
3058 unsigned char status;
3059
3060 while (DAC960_LP_hw_mbox_is_full(base))
3061 udelay(1);
3062 DAC960_LP_write_hw_mbox(base, mbox_addr);
3063 DAC960_LP_hw_mbox_new_cmd(base);
3064 while (!DAC960_LP_hw_mbox_status_available(base))
3065 udelay(1);
3066 status = DAC960_LP_read_cmd_status(base);
3067 DAC960_LP_ack_hw_mbox_intr(base);
3068 DAC960_LP_ack_hw_mbox_status(base);
3069
3070 return status;
3071}
3072
3073static int DAC960_LP_hw_init(struct pci_dev *pdev,
3074 struct myrs_hba *cs, void __iomem *base)
3075{
3076 int timeout = 0;
3077 unsigned char status, parm0, parm1;
3078
3079 DAC960_LP_disable_intr(base);
3080 DAC960_LP_ack_hw_mbox_status(base);
3081 udelay(1000);
3082 while (DAC960_LP_init_in_progress(base) &&
3083 timeout < MYRS_MAILBOX_TIMEOUT) {
3084 if (DAC960_LP_read_error_status(base, &status,
3085 &parm0, &parm1) &&
3086 myrs_err_status(cs, status, parm0, parm1))
3087 return -EIO;
3088 udelay(10);
3089 timeout++;
3090 }
3091 if (timeout == MYRS_MAILBOX_TIMEOUT) {
3092 dev_err(&pdev->dev,
3093 "Timeout waiting for Controller Initialisation\n");
3094 return -ETIMEDOUT;
3095 }
3096 if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) {
3097 dev_err(&pdev->dev,
3098 "Unable to Enable Memory Mailbox Interface\n");
3099 DAC960_LP_reset_ctrl(base);
3100 return -ENODEV;
3101 }
3102 DAC960_LP_enable_intr(base);
3103 cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox;
3104 cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd;
3105 cs->disable_intr = DAC960_LP_disable_intr;
3106 cs->reset = DAC960_LP_reset_ctrl;
3107
3108 return 0;
3109}
3110
3111static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
3112{
3113 struct myrs_hba *cs = arg;
3114 void __iomem *base = cs->io_base;
3115 struct myrs_stat_mbox *next_stat_mbox;
3116 unsigned long flags;
3117
3118 spin_lock_irqsave(&cs->queue_lock, flags);
3119 DAC960_LP_ack_intr(base);
3120 next_stat_mbox = cs->next_stat_mbox;
3121 while (next_stat_mbox->id > 0) {
3122 unsigned short id = next_stat_mbox->id;
3123 struct scsi_cmnd *scmd = NULL;
3124 struct myrs_cmdblk *cmd_blk = NULL;
3125
3126 if (id == MYRS_DCMD_TAG)
3127 cmd_blk = &cs->dcmd_blk;
3128 else if (id == MYRS_MCMD_TAG)
3129 cmd_blk = &cs->mcmd_blk;
3130 else {
3131 scmd = scsi_host_find_tag(cs->host, id - 3);
3132 if (scmd)
3133 cmd_blk = scsi_cmd_priv(scmd);
3134 }
3135 if (cmd_blk) {
3136 cmd_blk->status = next_stat_mbox->status;
3137 cmd_blk->sense_len = next_stat_mbox->sense_len;
3138 cmd_blk->residual = next_stat_mbox->residual;
3139 } else
3140 dev_err(&cs->pdev->dev,
3141 "Unhandled command completion %d\n", id);
3142
3143 memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
3144 if (++next_stat_mbox > cs->last_stat_mbox)
3145 next_stat_mbox = cs->first_stat_mbox;
3146
3147 if (cmd_blk) {
3148 if (id < 3)
3149 myrs_handle_cmdblk(cs, cmd_blk);
3150 else
3151 myrs_handle_scsi(cs, cmd_blk, scmd);
3152 }
3153 }
3154 cs->next_stat_mbox = next_stat_mbox;
3155 spin_unlock_irqrestore(&cs->queue_lock, flags);
3156 return IRQ_HANDLED;
3157}
3158
3159struct myrs_privdata DAC960_LP_privdata = {
3160 .hw_init = DAC960_LP_hw_init,
3161 .irq_handler = DAC960_LP_intr_handler,
3162 .mmio_size = DAC960_LP_mmio_size,
3163};
3164
3165/*
3166 * Module functions
3167 */
3168static int
3169myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3170{
3171 struct myrs_hba *cs;
3172 int ret;
3173
3174 cs = myrs_detect(dev, entry);
3175 if (!cs)
3176 return -ENODEV;
3177
3178 ret = myrs_get_config(cs);
3179 if (ret < 0) {
3180 myrs_cleanup(cs);
3181 return ret;
3182 }
3183
3184 if (!myrs_create_mempools(dev, cs)) {
3185 ret = -ENOMEM;
3186 goto failed;
3187 }
3188
3189 ret = scsi_add_host(cs->host, &dev->dev);
3190 if (ret) {
3191 dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3192 myrs_destroy_mempools(cs);
3193 goto failed;
3194 }
3195 scsi_scan_host(cs->host);
3196 return 0;
3197failed:
3198 myrs_cleanup(cs);
3199 return ret;
3200}
3201
3202
3203static void myrs_remove(struct pci_dev *pdev)
3204{
3205 struct myrs_hba *cs = pci_get_drvdata(pdev);
3206
3207 if (cs == NULL)
3208 return;
3209
3210 shost_printk(KERN_NOTICE, cs->host, "Flushing Cache...");
3211 myrs_flush_cache(cs);
3212 myrs_destroy_mempools(cs);
3213 myrs_cleanup(cs);
3214}
3215
3216
3217static const struct pci_device_id myrs_id_table[] = {
3218 {
3219 PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX,
3220 PCI_DEVICE_ID_MYLEX_DAC960_GEM,
3221 PCI_VENDOR_ID_MYLEX, PCI_ANY_ID),
3222 .driver_data = (unsigned long) &DAC960_GEM_privdata,
3223 },
3224 {
3225 PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata),
3226 },
3227 {
3228 PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata),
3229 },
3230 {0, },
3231};
3232
3233MODULE_DEVICE_TABLE(pci, myrs_id_table);
3234
3235static struct pci_driver myrs_pci_driver = {
3236 .name = "myrs",
3237 .id_table = myrs_id_table,
3238 .probe = myrs_probe,
3239 .remove = myrs_remove,
3240};
3241
3242static int __init myrs_init_module(void)
3243{
3244 int ret;
3245
3246 myrs_raid_template = raid_class_attach(&myrs_raid_functions);
3247 if (!myrs_raid_template)
3248 return -ENODEV;
3249
3250 ret = pci_register_driver(&myrs_pci_driver);
3251 if (ret)
3252 raid_class_release(myrs_raid_template);
3253
3254 return ret;
3255}
3256
3257static void __exit myrs_cleanup_module(void)
3258{
3259 pci_unregister_driver(&myrs_pci_driver);
3260 raid_class_release(myrs_raid_template);
3261}
3262
3263module_init(myrs_init_module);
3264module_exit(myrs_cleanup_module);
3265
3266MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)");
3267MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3268MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/myrs.h b/drivers/scsi/myrs.h
new file mode 100644
index 000000000000..e6702ee85e9f
--- /dev/null
+++ b/drivers/scsi/myrs.h
@@ -0,0 +1,1134 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4 *
5 * This driver supports the newer, SCSI-based firmware interface only.
6 *
7 * Copyright 2018 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
8 *
9 * Based on the original DAC960 driver, which has
10 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
11 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
12 */
13
14#ifndef _MYRS_H
15#define _MYRS_H
16
17#define MYRS_MAILBOX_TIMEOUT 1000000
18
19#define MYRS_DCMD_TAG 1
20#define MYRS_MCMD_TAG 2
21
22#define MYRS_LINE_BUFFER_SIZE 128
23
24#define MYRS_PRIMARY_MONITOR_INTERVAL (10 * HZ)
25#define MYRS_SECONDARY_MONITOR_INTERVAL (60 * HZ)
26
27/* Maximum number of Scatter/Gather Segments supported */
28#define MYRS_SG_LIMIT 128
29
30/*
31 * Number of Command and Status Mailboxes used by the
32 * DAC960 V2 Firmware Memory Mailbox Interface.
33 */
34#define MYRS_MAX_CMD_MBOX 512
35#define MYRS_MAX_STAT_MBOX 512
36
37#define MYRS_DCDB_SIZE 16
38#define MYRS_SENSE_SIZE 14
39
40/*
41 * DAC960 V2 Firmware Command Opcodes.
42 */
43enum myrs_cmd_opcode {
44 MYRS_CMD_OP_MEMCOPY = 0x01,
45 MYRS_CMD_OP_SCSI_10_PASSTHRU = 0x02,
46 MYRS_CMD_OP_SCSI_255_PASSTHRU = 0x03,
47 MYRS_CMD_OP_SCSI_10 = 0x04,
48 MYRS_CMD_OP_SCSI_256 = 0x05,
49 MYRS_CMD_OP_IOCTL = 0x20,
50} __packed;
51
52/*
53 * DAC960 V2 Firmware IOCTL Opcodes.
54 */
55enum myrs_ioctl_opcode {
56 MYRS_IOCTL_GET_CTLR_INFO = 0x01,
57 MYRS_IOCTL_GET_LDEV_INFO_VALID = 0x03,
58 MYRS_IOCTL_GET_PDEV_INFO_VALID = 0x05,
59 MYRS_IOCTL_GET_HEALTH_STATUS = 0x11,
60 MYRS_IOCTL_GET_EVENT = 0x15,
61 MYRS_IOCTL_START_DISCOVERY = 0x81,
62 MYRS_IOCTL_SET_DEVICE_STATE = 0x82,
63 MYRS_IOCTL_INIT_PDEV_START = 0x84,
64 MYRS_IOCTL_INIT_PDEV_STOP = 0x85,
65 MYRS_IOCTL_INIT_LDEV_START = 0x86,
66 MYRS_IOCTL_INIT_LDEV_STOP = 0x87,
67 MYRS_IOCTL_RBLD_DEVICE_START = 0x88,
68 MYRS_IOCTL_RBLD_DEVICE_STOP = 0x89,
69 MYRS_IOCTL_MAKE_CONSISTENT_START = 0x8A,
70 MYRS_IOCTL_MAKE_CONSISTENT_STOP = 0x8B,
71 MYRS_IOCTL_CC_START = 0x8C,
72 MYRS_IOCTL_CC_STOP = 0x8D,
73 MYRS_IOCTL_SET_MEM_MBOX = 0x8E,
74 MYRS_IOCTL_RESET_DEVICE = 0x90,
75 MYRS_IOCTL_FLUSH_DEVICE_DATA = 0x91,
76 MYRS_IOCTL_PAUSE_DEVICE = 0x92,
77 MYRS_IOCTL_UNPAUS_EDEVICE = 0x93,
78 MYRS_IOCTL_LOCATE_DEVICE = 0x94,
79 MYRS_IOCTL_CREATE_CONFIGURATION = 0xC0,
80 MYRS_IOCTL_DELETE_LDEV = 0xC1,
81 MYRS_IOCTL_REPLACE_INTERNALDEVICE = 0xC2,
82 MYRS_IOCTL_RENAME_LDEV = 0xC3,
83 MYRS_IOCTL_ADD_CONFIGURATION = 0xC4,
84 MYRS_IOCTL_XLATE_PDEV_TO_LDEV = 0xC5,
85 MYRS_IOCTL_CLEAR_CONFIGURATION = 0xCA,
86} __packed;
87
88/*
89 * DAC960 V2 Firmware Command Status Codes.
90 */
91#define MYRS_STATUS_SUCCESS 0x00
92#define MYRS_STATUS_FAILED 0x02
93#define MYRS_STATUS_DEVICE_BUSY 0x08
94#define MYRS_STATUS_DEVICE_NON_RESPONSIVE 0x0E
95#define MYRS_STATUS_DEVICE_NON_RESPONSIVE2 0x0F
96#define MYRS_STATUS_RESERVATION_CONFLICT 0x18
97
98/*
99 * DAC960 V2 Firmware Memory Type structure.
100 */
101struct myrs_mem_type {
102 enum {
103 MYRS_MEMTYPE_RESERVED = 0x00,
104 MYRS_MEMTYPE_DRAM = 0x01,
105 MYRS_MEMTYPE_EDRAM = 0x02,
106 MYRS_MEMTYPE_EDO = 0x03,
107 MYRS_MEMTYPE_SDRAM = 0x04,
108 MYRS_MEMTYPE_LAST = 0x1F,
109 } __packed mem_type:5; /* Byte 0 Bits 0-4 */
110 unsigned rsvd:1; /* Byte 0 Bit 5 */
111 unsigned mem_parity:1; /* Byte 0 Bit 6 */
112 unsigned mem_ecc:1; /* Byte 0 Bit 7 */
113};
114
115/*
116 * DAC960 V2 Firmware Processor Type structure.
117 */
118enum myrs_cpu_type {
119 MYRS_CPUTYPE_i960CA = 0x01,
120 MYRS_CPUTYPE_i960RD = 0x02,
121 MYRS_CPUTYPE_i960RN = 0x03,
122 MYRS_CPUTYPE_i960RP = 0x04,
123 MYRS_CPUTYPE_NorthBay = 0x05,
124 MYRS_CPUTYPE_StrongArm = 0x06,
125 MYRS_CPUTYPE_i960RM = 0x07,
126} __packed;
127
128/*
129 * DAC960 V2 Firmware Get Controller Info reply structure.
130 */
131struct myrs_ctlr_info {
132 unsigned char rsvd1; /* Byte 0 */
133 enum {
134 MYRS_SCSI_BUS = 0x00,
135 MYRS_Fibre_BUS = 0x01,
136 MYRS_PCI_BUS = 0x03
137 } __packed bus; /* Byte 1 */
138 enum {
139 MYRS_CTLR_DAC960E = 0x01,
140 MYRS_CTLR_DAC960M = 0x08,
141 MYRS_CTLR_DAC960PD = 0x10,
142 MYRS_CTLR_DAC960PL = 0x11,
143 MYRS_CTLR_DAC960PU = 0x12,
144 MYRS_CTLR_DAC960PE = 0x13,
145 MYRS_CTLR_DAC960PG = 0x14,
146 MYRS_CTLR_DAC960PJ = 0x15,
147 MYRS_CTLR_DAC960PTL0 = 0x16,
148 MYRS_CTLR_DAC960PR = 0x17,
149 MYRS_CTLR_DAC960PRL = 0x18,
150 MYRS_CTLR_DAC960PT = 0x19,
151 MYRS_CTLR_DAC1164P = 0x1A,
152 MYRS_CTLR_DAC960PTL1 = 0x1B,
153 MYRS_CTLR_EXR2000P = 0x1C,
154 MYRS_CTLR_EXR3000P = 0x1D,
155 MYRS_CTLR_ACCELERAID352 = 0x1E,
156 MYRS_CTLR_ACCELERAID170 = 0x1F,
157 MYRS_CTLR_ACCELERAID160 = 0x20,
158 MYRS_CTLR_DAC960S = 0x60,
159 MYRS_CTLR_DAC960SU = 0x61,
160 MYRS_CTLR_DAC960SX = 0x62,
161 MYRS_CTLR_DAC960SF = 0x63,
162 MYRS_CTLR_DAC960SS = 0x64,
163 MYRS_CTLR_DAC960FL = 0x65,
164 MYRS_CTLR_DAC960LL = 0x66,
165 MYRS_CTLR_DAC960FF = 0x67,
166 MYRS_CTLR_DAC960HP = 0x68,
167 MYRS_CTLR_RAIDBRICK = 0x69,
168 MYRS_CTLR_METEOR_FL = 0x6A,
169 MYRS_CTLR_METEOR_FF = 0x6B
170 } __packed ctlr_type; /* Byte 2 */
171 unsigned char rsvd2; /* Byte 3 */
172 unsigned short bus_speed_mhz; /* Bytes 4-5 */
173 unsigned char bus_width; /* Byte 6 */
174 unsigned char flash_code; /* Byte 7 */
175 unsigned char ports_present; /* Byte 8 */
176 unsigned char rsvd3[7]; /* Bytes 9-15 */
177 unsigned char bus_name[16]; /* Bytes 16-31 */
178 unsigned char ctlr_name[16]; /* Bytes 32-47 */
179 unsigned char rsvd4[16]; /* Bytes 48-63 */
180 /* Firmware Release Information */
181 unsigned char fw_major_version; /* Byte 64 */
182 unsigned char fw_minor_version; /* Byte 65 */
183 unsigned char fw_turn_number; /* Byte 66 */
184 unsigned char fw_build_number; /* Byte 67 */
185 unsigned char fw_release_day; /* Byte 68 */
186 unsigned char fw_release_month; /* Byte 69 */
187 unsigned char fw_release_year_hi; /* Byte 70 */
188 unsigned char fw_release_year_lo; /* Byte 71 */
189 /* Hardware Release Information */
190 unsigned char hw_rev; /* Byte 72 */
191 unsigned char rsvd5[3]; /* Bytes 73-75 */
192 unsigned char hw_release_day; /* Byte 76 */
193 unsigned char hw_release_month; /* Byte 77 */
194 unsigned char hw_release_year_hi; /* Byte 78 */
195 unsigned char hw_release_year_lo; /* Byte 79 */
196 /* Hardware Manufacturing Information */
197 unsigned char manuf_batch_num; /* Byte 80 */
198 unsigned char rsvd6; /* Byte 81 */
199 unsigned char manuf_plant_num; /* Byte 82 */
200 unsigned char rsvd7; /* Byte 83 */
201 unsigned char hw_manuf_day; /* Byte 84 */
202 unsigned char hw_manuf_month; /* Byte 85 */
203 unsigned char hw_manuf_year_hi; /* Byte 86 */
204 unsigned char hw_manuf_year_lo; /* Byte 87 */
205 unsigned char max_pd_per_xld; /* Byte 88 */
206 unsigned char max_ild_per_xld; /* Byte 89 */
207 unsigned short nvram_size_kb; /* Bytes 90-91 */
208 unsigned char max_xld; /* Byte 92 */
209 unsigned char rsvd8[3]; /* Bytes 93-95 */
210 /* Unique Information per Controller */
211 unsigned char serial_number[16]; /* Bytes 96-111 */
212 unsigned char rsvd9[16]; /* Bytes 112-127 */
213 /* Vendor Information */
214 unsigned char rsvd10[3]; /* Bytes 128-130 */
215 unsigned char oem_code; /* Byte 131 */
216 unsigned char vendor[16]; /* Bytes 132-147 */
217 /* Other Physical/Controller/Operation Information */
218 unsigned char bbu_present:1; /* Byte 148 Bit 0 */
219 unsigned char cluster_mode:1; /* Byte 148 Bit 1 */
220 unsigned char rsvd11:6; /* Byte 148 Bits 2-7 */
221 unsigned char rsvd12[3]; /* Bytes 149-151 */
222 /* Physical Device Scan Information */
223 unsigned char pscan_active:1; /* Byte 152 Bit 0 */
224 unsigned char rsvd13:7; /* Byte 152 Bits 1-7 */
225 unsigned char pscan_chan; /* Byte 153 */
226 unsigned char pscan_target; /* Byte 154 */
227 unsigned char pscan_lun; /* Byte 155 */
228 /* Maximum Command Data Transfer Sizes */
229 unsigned short max_transfer_size; /* Bytes 156-157 */
230 unsigned short max_sge; /* Bytes 158-159 */
231 /* Logical/Physical Device Counts */
232 unsigned short ldev_present; /* Bytes 160-161 */
233 unsigned short ldev_critical; /* Bytes 162-163 */
234 unsigned short ldev_offline; /* Bytes 164-165 */
235 unsigned short pdev_present; /* Bytes 166-167 */
236 unsigned short pdisk_present; /* Bytes 168-169 */
237 unsigned short pdisk_critical; /* Bytes 170-171 */
238 unsigned short pdisk_offline; /* Bytes 172-173 */
239 unsigned short max_tcq; /* Bytes 174-175 */
240 /* Channel and Target ID Information */
241 unsigned char physchan_present; /* Byte 176 */
242 unsigned char virtchan_present; /* Byte 177 */
243 unsigned char physchan_max; /* Byte 178 */
244 unsigned char virtchan_max; /* Byte 179 */
245 unsigned char max_targets[16]; /* Bytes 180-195 */
246 unsigned char rsvd14[12]; /* Bytes 196-207 */
247 /* Memory/Cache Information */
248 unsigned short mem_size_mb; /* Bytes 208-209 */
249 unsigned short cache_size_mb; /* Bytes 210-211 */
250 unsigned int valid_cache_bytes; /* Bytes 212-215 */
251 unsigned int dirty_cache_bytes; /* Bytes 216-219 */
252 unsigned short mem_speed_mhz; /* Bytes 220-221 */
253 unsigned char mem_data_width; /* Byte 222 */
254 struct myrs_mem_type mem_type; /* Byte 223 */
255 unsigned char cache_mem_type_name[16]; /* Bytes 224-239 */
256 /* Execution Memory Information */
257 unsigned short exec_mem_size_mb; /* Bytes 240-241 */
258 unsigned short exec_l2_cache_size_mb; /* Bytes 242-243 */
259 unsigned char rsvd15[8]; /* Bytes 244-251 */
260 unsigned short exec_mem_speed_mhz; /* Bytes 252-253 */
261 unsigned char exec_mem_data_width; /* Byte 254 */
262 struct myrs_mem_type exec_mem_type; /* Byte 255 */
263 unsigned char exec_mem_type_name[16]; /* Bytes 256-271 */
264 /* CPU Type Information */
265 struct { /* Bytes 272-335 */
266 unsigned short cpu_speed_mhz;
267 enum myrs_cpu_type cpu_type;
268 unsigned char cpu_count;
269 unsigned char rsvd16[12];
270 unsigned char cpu_name[16];
271 } __packed cpu[2];
272 /* Debugging/Profiling/Command Time Tracing Information */
273 unsigned short cur_prof_page_num; /* Bytes 336-337 */
274 unsigned short num_prof_waiters; /* Bytes 338-339 */
275 unsigned short cur_trace_page_num; /* Bytes 340-341 */
276 unsigned short num_trace_waiters; /* Bytes 342-343 */
277 unsigned char rsvd18[8]; /* Bytes 344-351 */
278 /* Error Counters on Physical Devices */
279 unsigned short pdev_bus_resets; /* Bytes 352-353 */
280 unsigned short pdev_parity_errors; /* Bytes 355-355 */
281 unsigned short pdev_soft_errors; /* Bytes 356-357 */
282 unsigned short pdev_cmds_failed; /* Bytes 358-359 */
283 unsigned short pdev_misc_errors; /* Bytes 360-361 */
284 unsigned short pdev_cmd_timeouts; /* Bytes 362-363 */
285 unsigned short pdev_sel_timeouts; /* Bytes 364-365 */
286 unsigned short pdev_retries_done; /* Bytes 366-367 */
287 unsigned short pdev_aborts_done; /* Bytes 368-369 */
288 unsigned short pdev_host_aborts_done; /* Bytes 370-371 */
289 unsigned short pdev_predicted_failures; /* Bytes 372-373 */
290 unsigned short pdev_host_cmds_failed; /* Bytes 374-375 */
291 unsigned short pdev_hard_errors; /* Bytes 376-377 */
292 unsigned char rsvd19[6]; /* Bytes 378-383 */
293 /* Error Counters on Logical Devices */
294 unsigned short ldev_soft_errors; /* Bytes 384-385 */
295 unsigned short ldev_cmds_failed; /* Bytes 386-387 */
296 unsigned short ldev_host_aborts_done; /* Bytes 388-389 */
297 unsigned char rsvd20[2]; /* Bytes 390-391 */
298 /* Error Counters on Controller */
299 unsigned short ctlr_mem_errors; /* Bytes 392-393 */
300 unsigned short ctlr_host_aborts_done; /* Bytes 394-395 */
301 unsigned char rsvd21[4]; /* Bytes 396-399 */
302 /* Long Duration Activity Information */
303 unsigned short bg_init_active; /* Bytes 400-401 */
304 unsigned short ldev_init_active; /* Bytes 402-403 */
305 unsigned short pdev_init_active; /* Bytes 404-405 */
306 unsigned short cc_active; /* Bytes 406-407 */
307 unsigned short rbld_active; /* Bytes 408-409 */
308 unsigned short exp_active; /* Bytes 410-411 */
309 unsigned short patrol_active; /* Bytes 412-413 */
310 unsigned char rsvd22[2]; /* Bytes 414-415 */
311 /* Flash ROM Information */
312 unsigned char flash_type; /* Byte 416 */
313 unsigned char rsvd23; /* Byte 417 */
314 unsigned short flash_size_MB; /* Bytes 418-419 */
315 unsigned int flash_limit; /* Bytes 420-423 */
316 unsigned int flash_count; /* Bytes 424-427 */
317 unsigned char rsvd24[4]; /* Bytes 428-431 */
318 unsigned char flash_type_name[16]; /* Bytes 432-447 */
319 /* Firmware Run Time Information */
320 unsigned char rbld_rate; /* Byte 448 */
321 unsigned char bg_init_rate; /* Byte 449 */
322 unsigned char fg_init_rate; /* Byte 450 */
323 unsigned char cc_rate; /* Byte 451 */
324 unsigned char rsvd25[4]; /* Bytes 452-455 */
325 unsigned int max_dp; /* Bytes 456-459 */
326 unsigned int free_dp; /* Bytes 460-463 */
327 unsigned int max_iop; /* Bytes 464-467 */
328 unsigned int free_iop; /* Bytes 468-471 */
329 unsigned short max_combined_len; /* Bytes 472-473 */
330 unsigned short num_cfg_groups; /* Bytes 474-475 */
331 unsigned installation_abort_status:1; /* Byte 476 Bit 0 */
332 unsigned maint_mode_status:1; /* Byte 476 Bit 1 */
333 unsigned rsvd26:6; /* Byte 476 Bits 2-7 */
334 unsigned char rsvd27[6]; /* Bytes 477-511 */
335 unsigned char rsvd28[512]; /* Bytes 512-1023 */
336};
337
338/*
339 * DAC960 V2 Firmware Device State type.
340 */
341enum myrs_devstate {
342 MYRS_DEVICE_UNCONFIGURED = 0x00,
343 MYRS_DEVICE_ONLINE = 0x01,
344 MYRS_DEVICE_REBUILD = 0x03,
345 MYRS_DEVICE_MISSING = 0x04,
346 MYRS_DEVICE_SUSPECTED_CRITICAL = 0x05,
347 MYRS_DEVICE_OFFLINE = 0x08,
348 MYRS_DEVICE_CRITICAL = 0x09,
349 MYRS_DEVICE_SUSPECTED_DEAD = 0x0C,
350 MYRS_DEVICE_COMMANDED_OFFLINE = 0x10,
351 MYRS_DEVICE_STANDBY = 0x21,
352 MYRS_DEVICE_INVALID_STATE = 0xFF,
353} __packed;
354
355/*
356 * DAC960 V2 RAID Levels
357 */
358enum myrs_raid_level {
359 MYRS_RAID_LEVEL0 = 0x0, /* RAID 0 */
360 MYRS_RAID_LEVEL1 = 0x1, /* RAID 1 */
361 MYRS_RAID_LEVEL3 = 0x3, /* RAID 3 right asymmetric parity */
362 MYRS_RAID_LEVEL5 = 0x5, /* RAID 5 right asymmetric parity */
363 MYRS_RAID_LEVEL6 = 0x6, /* RAID 6 (Mylex RAID 6) */
364 MYRS_RAID_JBOD = 0x7, /* RAID 7 (JBOD) */
365 MYRS_RAID_NEWSPAN = 0x8, /* New Mylex SPAN */
366 MYRS_RAID_LEVEL3F = 0x9, /* RAID 3 fixed parity */
367 MYRS_RAID_LEVEL3L = 0xb, /* RAID 3 left symmetric parity */
368 MYRS_RAID_SPAN = 0xc, /* current spanning implementation */
369 MYRS_RAID_LEVEL5L = 0xd, /* RAID 5 left symmetric parity */
370 MYRS_RAID_LEVELE = 0xe, /* RAID E (concatenation) */
371 MYRS_RAID_PHYSICAL = 0xf, /* physical device */
372} __packed;
373
374enum myrs_stripe_size {
375 MYRS_STRIPE_SIZE_0 = 0x0, /* no stripe (RAID 1, RAID 7, etc) */
376 MYRS_STRIPE_SIZE_512B = 0x1,
377 MYRS_STRIPE_SIZE_1K = 0x2,
378 MYRS_STRIPE_SIZE_2K = 0x3,
379 MYRS_STRIPE_SIZE_4K = 0x4,
380 MYRS_STRIPE_SIZE_8K = 0x5,
381 MYRS_STRIPE_SIZE_16K = 0x6,
382 MYRS_STRIPE_SIZE_32K = 0x7,
383 MYRS_STRIPE_SIZE_64K = 0x8,
384 MYRS_STRIPE_SIZE_128K = 0x9,
385 MYRS_STRIPE_SIZE_256K = 0xa,
386 MYRS_STRIPE_SIZE_512K = 0xb,
387 MYRS_STRIPE_SIZE_1M = 0xc,
388} __packed;
389
390enum myrs_cacheline_size {
391 MYRS_CACHELINE_ZERO = 0x0, /* caching cannot be enabled */
392 MYRS_CACHELINE_512B = 0x1,
393 MYRS_CACHELINE_1K = 0x2,
394 MYRS_CACHELINE_2K = 0x3,
395 MYRS_CACHELINE_4K = 0x4,
396 MYRS_CACHELINE_8K = 0x5,
397 MYRS_CACHELINE_16K = 0x6,
398 MYRS_CACHELINE_32K = 0x7,
399 MYRS_CACHELINE_64K = 0x8,
400} __packed;
401
402/*
403 * DAC960 V2 Firmware Get Logical Device Info reply structure.
404 */
405struct myrs_ldev_info {
406 unsigned char ctlr; /* Byte 0 */
407 unsigned char channel; /* Byte 1 */
408 unsigned char target; /* Byte 2 */
409 unsigned char lun; /* Byte 3 */
410 enum myrs_devstate dev_state; /* Byte 4 */
411 unsigned char raid_level; /* Byte 5 */
412 enum myrs_stripe_size stripe_size; /* Byte 6 */
413 enum myrs_cacheline_size cacheline_size; /* Byte 7 */
414 struct {
415 enum {
416 MYRS_READCACHE_DISABLED = 0x0,
417 MYRS_READCACHE_ENABLED = 0x1,
418 MYRS_READAHEAD_ENABLED = 0x2,
419 MYRS_INTELLIGENT_READAHEAD_ENABLED = 0x3,
420 MYRS_READCACHE_LAST = 0x7,
421 } __packed rce:3; /* Byte 8 Bits 0-2 */
422 enum {
423 MYRS_WRITECACHE_DISABLED = 0x0,
424 MYRS_LOGICALDEVICE_RO = 0x1,
425 MYRS_WRITECACHE_ENABLED = 0x2,
426 MYRS_INTELLIGENT_WRITECACHE_ENABLED = 0x3,
427 MYRS_WRITECACHE_LAST = 0x7,
428 } __packed wce:3; /* Byte 8 Bits 3-5 */
429 unsigned rsvd1:1; /* Byte 8 Bit 6 */
430 unsigned ldev_init_done:1; /* Byte 8 Bit 7 */
431 } ldev_control; /* Byte 8 */
432 /* Logical Device Operations Status */
433 unsigned char cc_active:1; /* Byte 9 Bit 0 */
434 unsigned char rbld_active:1; /* Byte 9 Bit 1 */
435 unsigned char bg_init_active:1; /* Byte 9 Bit 2 */
436 unsigned char fg_init_active:1; /* Byte 9 Bit 3 */
437 unsigned char migration_active:1; /* Byte 9 Bit 4 */
438 unsigned char patrol_active:1; /* Byte 9 Bit 5 */
439 unsigned char rsvd2:2; /* Byte 9 Bits 6-7 */
440 unsigned char raid5_writeupdate; /* Byte 10 */
441 unsigned char raid5_algo; /* Byte 11 */
442 unsigned short ldev_num; /* Bytes 12-13 */
443 /* BIOS Info */
444 unsigned char bios_disabled:1; /* Byte 14 Bit 0 */
445 unsigned char cdrom_boot:1; /* Byte 14 Bit 1 */
446 unsigned char drv_coercion:1; /* Byte 14 Bit 2 */
447 unsigned char write_same_disabled:1; /* Byte 14 Bit 3 */
448 unsigned char hba_mode:1; /* Byte 14 Bit 4 */
449 enum {
450 MYRS_GEOMETRY_128_32 = 0x0,
451 MYRS_GEOMETRY_255_63 = 0x1,
452 MYRS_GEOMETRY_RSVD1 = 0x2,
453 MYRS_GEOMETRY_RSVD2 = 0x3
454 } __packed drv_geom:2; /* Byte 14 Bits 5-6 */
455 unsigned char super_ra_enabled:1; /* Byte 14 Bit 7 */
456 unsigned char rsvd3; /* Byte 15 */
457 /* Error Counters */
458 unsigned short soft_errs; /* Bytes 16-17 */
459 unsigned short cmds_failed; /* Bytes 18-19 */
460 unsigned short cmds_aborted; /* Bytes 20-21 */
461 unsigned short deferred_write_errs; /* Bytes 22-23 */
462 unsigned int rsvd4; /* Bytes 24-27 */
463 unsigned int rsvd5; /* Bytes 28-31 */
464 /* Device Size Information */
465 unsigned short rsvd6; /* Bytes 32-33 */
466 unsigned short devsize_bytes; /* Bytes 34-35 */
467 unsigned int orig_devsize; /* Bytes 36-39 */
468 unsigned int cfg_devsize; /* Bytes 40-43 */
469 unsigned int rsvd7; /* Bytes 44-47 */
470 unsigned char ldev_name[32]; /* Bytes 48-79 */
471 unsigned char inquiry[36]; /* Bytes 80-115 */
472 unsigned char rsvd8[12]; /* Bytes 116-127 */
473 u64 last_read_lba; /* Bytes 128-135 */
474 u64 last_write_lba; /* Bytes 136-143 */
475 u64 cc_lba; /* Bytes 144-151 */
476 u64 rbld_lba; /* Bytes 152-159 */
477 u64 bg_init_lba; /* Bytes 160-167 */
478 u64 fg_init_lba; /* Bytes 168-175 */
479 u64 migration_lba; /* Bytes 176-183 */
480 u64 patrol_lba; /* Bytes 184-191 */
481 unsigned char rsvd9[64]; /* Bytes 192-255 */
482};
483
484/*
485 * DAC960 V2 Firmware Get Physical Device Info reply structure.
486 */
487struct myrs_pdev_info {
488 unsigned char rsvd1; /* Byte 0 */
489 unsigned char channel; /* Byte 1 */
490 unsigned char target; /* Byte 2 */
491 unsigned char lun; /* Byte 3 */
492 /* Configuration Status Bits */
493 unsigned char pdev_fault_tolerant:1; /* Byte 4 Bit 0 */
494 unsigned char pdev_connected:1; /* Byte 4 Bit 1 */
495 unsigned char pdev_local_to_ctlr:1; /* Byte 4 Bit 2 */
496 unsigned char rsvd2:5; /* Byte 4 Bits 3-7 */
497 /* Multiple Host/Controller Status Bits */
498 unsigned char remote_host_dead:1; /* Byte 5 Bit 0 */
499 unsigned char remove_ctlr_dead:1; /* Byte 5 Bit 1 */
500 unsigned char rsvd3:6; /* Byte 5 Bits 2-7 */
501 enum myrs_devstate dev_state; /* Byte 6 */
502 unsigned char nego_data_width; /* Byte 7 */
503 unsigned short nego_sync_rate; /* Bytes 8-9 */
504 /* Multiported Physical Device Information */
505 unsigned char num_ports; /* Byte 10 */
506 unsigned char drv_access_bitmap; /* Byte 11 */
507 unsigned int rsvd4; /* Bytes 12-15 */
508 unsigned char ip_address[16]; /* Bytes 16-31 */
509 unsigned short max_tags; /* Bytes 32-33 */
510 /* Physical Device Operations Status */
511 unsigned char cc_in_progress:1; /* Byte 34 Bit 0 */
512 unsigned char rbld_in_progress:1; /* Byte 34 Bit 1 */
513 unsigned char makecc_in_progress:1; /* Byte 34 Bit 2 */
514 unsigned char pdevinit_in_progress:1; /* Byte 34 Bit 3 */
515 unsigned char migration_in_progress:1; /* Byte 34 Bit 4 */
516 unsigned char patrol_in_progress:1; /* Byte 34 Bit 5 */
517 unsigned char rsvd5:2; /* Byte 34 Bits 6-7 */
518 unsigned char long_op_status; /* Byte 35 */
519 unsigned char parity_errs; /* Byte 36 */
520 unsigned char soft_errs; /* Byte 37 */
521 unsigned char hard_errs; /* Byte 38 */
522 unsigned char misc_errs; /* Byte 39 */
523 unsigned char cmd_timeouts; /* Byte 40 */
524 unsigned char retries; /* Byte 41 */
525 unsigned char aborts; /* Byte 42 */
526 unsigned char pred_failures; /* Byte 43 */
527 unsigned int rsvd6; /* Bytes 44-47 */
528 unsigned short rsvd7; /* Bytes 48-49 */
529 unsigned short devsize_bytes; /* Bytes 50-51 */
530 unsigned int orig_devsize; /* Bytes 52-55 */
531 unsigned int cfg_devsize; /* Bytes 56-59 */
532 unsigned int rsvd8; /* Bytes 60-63 */
533 unsigned char pdev_name[16]; /* Bytes 64-79 */
534 unsigned char rsvd9[16]; /* Bytes 80-95 */
535 unsigned char rsvd10[32]; /* Bytes 96-127 */
536 unsigned char inquiry[36]; /* Bytes 128-163 */
537 unsigned char rsvd11[20]; /* Bytes 164-183 */
538 unsigned char rsvd12[8]; /* Bytes 184-191 */
539 u64 last_read_lba; /* Bytes 192-199 */
540 u64 last_write_lba; /* Bytes 200-207 */
541 u64 cc_lba; /* Bytes 208-215 */
542 u64 rbld_lba; /* Bytes 216-223 */
543 u64 makecc_lba; /* Bytes 224-231 */
544 u64 devinit_lba; /* Bytes 232-239 */
545 u64 migration_lba; /* Bytes 240-247 */
546 u64 patrol_lba; /* Bytes 248-255 */
547 unsigned char rsvd13[256]; /* Bytes 256-511 */
548};
549
550/*
551 * DAC960 V2 Firmware Health Status Buffer structure.
552 */
553struct myrs_fwstat {
554 unsigned int uptime_usecs; /* Bytes 0-3 */
555 unsigned int uptime_msecs; /* Bytes 4-7 */
556 unsigned int seconds; /* Bytes 8-11 */
557 unsigned char rsvd1[4]; /* Bytes 12-15 */
558 unsigned int epoch; /* Bytes 16-19 */
559 unsigned char rsvd2[4]; /* Bytes 20-23 */
560 unsigned int dbg_msgbuf_idx; /* Bytes 24-27 */
561 unsigned int coded_msgbuf_idx; /* Bytes 28-31 */
562 unsigned int cur_timetrace_page; /* Bytes 32-35 */
563 unsigned int cur_prof_page; /* Bytes 36-39 */
564 unsigned int next_evseq; /* Bytes 40-43 */
565 unsigned char rsvd3[4]; /* Bytes 44-47 */
566 unsigned char rsvd4[16]; /* Bytes 48-63 */
567 unsigned char rsvd5[64]; /* Bytes 64-127 */
568};
569
570/*
571 * DAC960 V2 Firmware Get Event reply structure.
572 */
573struct myrs_event {
574 unsigned int ev_seq; /* Bytes 0-3 */
575 unsigned int ev_time; /* Bytes 4-7 */
576 unsigned int ev_code; /* Bytes 8-11 */
577 unsigned char rsvd1; /* Byte 12 */
578 unsigned char channel; /* Byte 13 */
579 unsigned char target; /* Byte 14 */
580 unsigned char lun; /* Byte 15 */
581 unsigned int rsvd2; /* Bytes 16-19 */
582 unsigned int ev_parm; /* Bytes 20-23 */
583 unsigned char sense_data[40]; /* Bytes 24-63 */
584};
585
586/*
587 * DAC960 V2 Firmware Command Control Bits structure.
588 */
589struct myrs_cmd_ctrl {
590 unsigned char fua:1; /* Byte 0 Bit 0 */
591 unsigned char disable_pgout:1; /* Byte 0 Bit 1 */
592 unsigned char rsvd1:1; /* Byte 0 Bit 2 */
593 unsigned char add_sge_mem:1; /* Byte 0 Bit 3 */
594 unsigned char dma_ctrl_to_host:1; /* Byte 0 Bit 4 */
595 unsigned char rsvd2:1; /* Byte 0 Bit 5 */
596 unsigned char no_autosense:1; /* Byte 0 Bit 6 */
597 unsigned char disc_prohibited:1; /* Byte 0 Bit 7 */
598};
599
600/*
601 * DAC960 V2 Firmware Command Timeout structure.
602 */
603struct myrs_cmd_tmo {
604 unsigned char tmo_val:6; /* Byte 0 Bits 0-5 */
605 enum {
606 MYRS_TMO_SCALE_SECONDS = 0,
607 MYRS_TMO_SCALE_MINUTES = 1,
608 MYRS_TMO_SCALE_HOURS = 2,
609 MYRS_TMO_SCALE_RESERVED = 3
610 } __packed tmo_scale:2; /* Byte 0 Bits 6-7 */
611};
612
613/*
614 * DAC960 V2 Firmware Physical Device structure.
615 */
616struct myrs_pdev {
617 unsigned char lun; /* Byte 0 */
618 unsigned char target; /* Byte 1 */
619 unsigned char channel:3; /* Byte 2 Bits 0-2 */
620 unsigned char ctlr:5; /* Byte 2 Bits 3-7 */
621} __packed;
622
623/*
624 * DAC960 V2 Firmware Logical Device structure.
625 */
626struct myrs_ldev {
627 unsigned short ldev_num; /* Bytes 0-1 */
628 unsigned char rsvd:3; /* Byte 2 Bits 0-2 */
629 unsigned char ctlr:5; /* Byte 2 Bits 3-7 */
630} __packed;
631
632/*
633 * DAC960 V2 Firmware Operation Device type.
634 */
635enum myrs_opdev {
636 MYRS_PHYSICAL_DEVICE = 0x00,
637 MYRS_RAID_DEVICE = 0x01,
638 MYRS_PHYSICAL_CHANNEL = 0x02,
639 MYRS_RAID_CHANNEL = 0x03,
640 MYRS_PHYSICAL_CONTROLLER = 0x04,
641 MYRS_RAID_CONTROLLER = 0x05,
642 MYRS_CONFIGURATION_GROUP = 0x10,
643 MYRS_ENCLOSURE = 0x11,
644} __packed;
645
646/*
647 * DAC960 V2 Firmware Translate Physical To Logical Device structure.
648 */
649struct myrs_devmap {
650 unsigned short ldev_num; /* Bytes 0-1 */
651 unsigned short rsvd; /* Bytes 2-3 */
652 unsigned char prev_boot_ctlr; /* Byte 4 */
653 unsigned char prev_boot_channel; /* Byte 5 */
654 unsigned char prev_boot_target; /* Byte 6 */
655 unsigned char prev_boot_lun; /* Byte 7 */
656};
657
658/*
659 * DAC960 V2 Firmware Scatter/Gather List Entry structure.
660 */
661struct myrs_sge {
662 u64 sge_addr; /* Bytes 0-7 */
663 u64 sge_count; /* Bytes 8-15 */
664};
665
666/*
667 * DAC960 V2 Firmware Data Transfer Memory Address structure.
668 */
669union myrs_sgl {
670 struct myrs_sge sge[2]; /* Bytes 0-31 */
671 struct {
672 unsigned short sge0_len; /* Bytes 0-1 */
673 unsigned short sge1_len; /* Bytes 2-3 */
674 unsigned short sge2_len; /* Bytes 4-5 */
675 unsigned short rsvd; /* Bytes 6-7 */
676 u64 sge0_addr; /* Bytes 8-15 */
677 u64 sge1_addr; /* Bytes 16-23 */
678 u64 sge2_addr; /* Bytes 24-31 */
679 } ext;
680};
681
682/*
683 * 64 Byte DAC960 V2 Firmware Command Mailbox structure.
684 */
685union myrs_cmd_mbox {
686 unsigned int words[16]; /* Words 0-15 */
687 struct {
688 unsigned short id; /* Bytes 0-1 */
689 enum myrs_cmd_opcode opcode; /* Byte 2 */
690 struct myrs_cmd_ctrl control; /* Byte 3 */
691 u32 dma_size:24; /* Bytes 4-6 */
692 unsigned char dma_num; /* Byte 7 */
693 u64 sense_addr; /* Bytes 8-15 */
694 unsigned int rsvd1:24; /* Bytes 16-18 */
695 struct myrs_cmd_tmo tmo; /* Byte 19 */
696 unsigned char sense_len; /* Byte 20 */
697 enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
698 unsigned char rsvd2[10]; /* Bytes 22-31 */
699 union myrs_sgl dma_addr; /* Bytes 32-63 */
700 } common;
701 struct {
702 unsigned short id; /* Bytes 0-1 */
703 enum myrs_cmd_opcode opcode; /* Byte 2 */
704 struct myrs_cmd_ctrl control; /* Byte 3 */
705 u32 dma_size; /* Bytes 4-7 */
706 u64 sense_addr; /* Bytes 8-15 */
707 struct myrs_pdev pdev; /* Bytes 16-18 */
708 struct myrs_cmd_tmo tmo; /* Byte 19 */
709 unsigned char sense_len; /* Byte 20 */
710 unsigned char cdb_len; /* Byte 21 */
711 unsigned char cdb[10]; /* Bytes 22-31 */
712 union myrs_sgl dma_addr; /* Bytes 32-63 */
713 } SCSI_10;
714 struct {
715 unsigned short id; /* Bytes 0-1 */
716 enum myrs_cmd_opcode opcode; /* Byte 2 */
717 struct myrs_cmd_ctrl control; /* Byte 3 */
718 u32 dma_size; /* Bytes 4-7 */
719 u64 sense_addr; /* Bytes 8-15 */
720 struct myrs_pdev pdev; /* Bytes 16-18 */
721 struct myrs_cmd_tmo tmo; /* Byte 19 */
722 unsigned char sense_len; /* Byte 20 */
723 unsigned char cdb_len; /* Byte 21 */
724 unsigned short rsvd; /* Bytes 22-23 */
725 u64 cdb_addr; /* Bytes 24-31 */
726 union myrs_sgl dma_addr; /* Bytes 32-63 */
727 } SCSI_255;
728 struct {
729 unsigned short id; /* Bytes 0-1 */
730 enum myrs_cmd_opcode opcode; /* Byte 2 */
731 struct myrs_cmd_ctrl control; /* Byte 3 */
732 u32 dma_size:24; /* Bytes 4-6 */
733 unsigned char dma_num; /* Byte 7 */
734 u64 sense_addr; /* Bytes 8-15 */
735 unsigned short rsvd1; /* Bytes 16-17 */
736 unsigned char ctlr_num; /* Byte 18 */
737 struct myrs_cmd_tmo tmo; /* Byte 19 */
738 unsigned char sense_len; /* Byte 20 */
739 enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
740 unsigned char rsvd2[10]; /* Bytes 22-31 */
741 union myrs_sgl dma_addr; /* Bytes 32-63 */
742 } ctlr_info;
743 struct {
744 unsigned short id; /* Bytes 0-1 */
745 enum myrs_cmd_opcode opcode; /* Byte 2 */
746 struct myrs_cmd_ctrl control; /* Byte 3 */
747 u32 dma_size:24; /* Bytes 4-6 */
748 unsigned char dma_num; /* Byte 7 */
749 u64 sense_addr; /* Bytes 8-15 */
750 struct myrs_ldev ldev; /* Bytes 16-18 */
751 struct myrs_cmd_tmo tmo; /* Byte 19 */
752 unsigned char sense_len; /* Byte 20 */
753 enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
754 unsigned char rsvd[10]; /* Bytes 22-31 */
755 union myrs_sgl dma_addr; /* Bytes 32-63 */
756 } ldev_info;
757 struct {
758 unsigned short id; /* Bytes 0-1 */
759 enum myrs_cmd_opcode opcode; /* Byte 2 */
760 struct myrs_cmd_ctrl control; /* Byte 3 */
761 u32 dma_size:24; /* Bytes 4-6 */
762 unsigned char dma_num; /* Byte 7 */
763 u64 sense_addr; /* Bytes 8-15 */
764 struct myrs_pdev pdev; /* Bytes 16-18 */
765 struct myrs_cmd_tmo tmo; /* Byte 19 */
766 unsigned char sense_len; /* Byte 20 */
767 enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
768 unsigned char rsvd[10]; /* Bytes 22-31 */
769 union myrs_sgl dma_addr; /* Bytes 32-63 */
770 } pdev_info;
771 struct {
772 unsigned short id; /* Bytes 0-1 */
773 enum myrs_cmd_opcode opcode; /* Byte 2 */
774 struct myrs_cmd_ctrl control; /* Byte 3 */
775 u32 dma_size:24; /* Bytes 4-6 */
776 unsigned char dma_num; /* Byte 7 */
777 u64 sense_addr; /* Bytes 8-15 */
778 unsigned short evnum_upper; /* Bytes 16-17 */
779 unsigned char ctlr_num; /* Byte 18 */
780 struct myrs_cmd_tmo tmo; /* Byte 19 */
781 unsigned char sense_len; /* Byte 20 */
782 enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
783 unsigned short evnum_lower; /* Bytes 22-23 */
784 unsigned char rsvd[8]; /* Bytes 24-31 */
785 union myrs_sgl dma_addr; /* Bytes 32-63 */
786 } get_event;
787 struct {
788 unsigned short id; /* Bytes 0-1 */
789 enum myrs_cmd_opcode opcode; /* Byte 2 */
790 struct myrs_cmd_ctrl control; /* Byte 3 */
791 u32 dma_size:24; /* Bytes 4-6 */
792 unsigned char dma_num; /* Byte 7 */
793 u64 sense_addr; /* Bytes 8-15 */
794 union {
795 struct myrs_ldev ldev; /* Bytes 16-18 */
796 struct myrs_pdev pdev; /* Bytes 16-18 */
797 };
798 struct myrs_cmd_tmo tmo; /* Byte 19 */
799 unsigned char sense_len; /* Byte 20 */
800 enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
801 enum myrs_devstate state; /* Byte 22 */
802 unsigned char rsvd[9]; /* Bytes 23-31 */
803 union myrs_sgl dma_addr; /* Bytes 32-63 */
804 } set_devstate;
805 struct {
806 unsigned short id; /* Bytes 0-1 */
807 enum myrs_cmd_opcode opcode; /* Byte 2 */
808 struct myrs_cmd_ctrl control; /* Byte 3 */
809 u32 dma_size:24; /* Bytes 4-6 */
810 unsigned char dma_num; /* Byte 7 */
811 u64 sense_addr; /* Bytes 8-15 */
812 struct myrs_ldev ldev; /* Bytes 16-18 */
813 struct myrs_cmd_tmo tmo; /* Byte 19 */
814 unsigned char sense_len; /* Byte 20 */
815 enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
816 unsigned char restore_consistency:1; /* Byte 22 Bit 0 */
817 unsigned char initialized_area_only:1; /* Byte 22 Bit 1 */
818 unsigned char rsvd1:6; /* Byte 22 Bits 2-7 */
819 unsigned char rsvd2[9]; /* Bytes 23-31 */
820 union myrs_sgl dma_addr; /* Bytes 32-63 */
821 } cc;
822 struct {
823 unsigned short id; /* Bytes 0-1 */
824 enum myrs_cmd_opcode opcode; /* Byte 2 */
825 struct myrs_cmd_ctrl control; /* Byte 3 */
826 unsigned char first_cmd_mbox_size_kb; /* Byte 4 */
827 unsigned char first_stat_mbox_size_kb; /* Byte 5 */
828 unsigned char second_cmd_mbox_size_kb; /* Byte 6 */
829 unsigned char second_stat_mbox_size_kb; /* Byte 7 */
830 u64 sense_addr; /* Bytes 8-15 */
831 unsigned int rsvd1:24; /* Bytes 16-18 */
832 struct myrs_cmd_tmo tmo; /* Byte 19 */
833 unsigned char sense_len; /* Byte 20 */
834 enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
835 unsigned char fwstat_buf_size_kb; /* Byte 22 */
836 unsigned char rsvd2; /* Byte 23 */
837 u64 fwstat_buf_addr; /* Bytes 24-31 */
838 u64 first_cmd_mbox_addr; /* Bytes 32-39 */
839 u64 first_stat_mbox_addr; /* Bytes 40-47 */
840 u64 second_cmd_mbox_addr; /* Bytes 48-55 */
841 u64 second_stat_mbox_addr; /* Bytes 56-63 */
842 } set_mbox;
843 struct {
844 unsigned short id; /* Bytes 0-1 */
845 enum myrs_cmd_opcode opcode; /* Byte 2 */
846 struct myrs_cmd_ctrl control; /* Byte 3 */
847 u32 dma_size:24; /* Bytes 4-6 */
848 unsigned char dma_num; /* Byte 7 */
849 u64 sense_addr; /* Bytes 8-15 */
850 struct myrs_pdev pdev; /* Bytes 16-18 */
851 struct myrs_cmd_tmo tmo; /* Byte 19 */
852 unsigned char sense_len; /* Byte 20 */
853 enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
854 enum myrs_opdev opdev; /* Byte 22 */
855 unsigned char rsvd[9]; /* Bytes 23-31 */
856 union myrs_sgl dma_addr; /* Bytes 32-63 */
857 } dev_op;
858};
859
860/*
861 * DAC960 V2 Firmware Controller Status Mailbox structure.
862 */
863struct myrs_stat_mbox {
864 unsigned short id; /* Bytes 0-1 */
865 unsigned char status; /* Byte 2 */
866 unsigned char sense_len; /* Byte 3 */
867 int residual; /* Bytes 4-7 */
868};
869
870struct myrs_cmdblk {
871 union myrs_cmd_mbox mbox;
872 unsigned char status;
873 unsigned char sense_len;
874 int residual;
875 struct completion *complete;
876 struct myrs_sge *sgl;
877 dma_addr_t sgl_addr;
878 unsigned char *dcdb;
879 dma_addr_t dcdb_dma;
880 unsigned char *sense;
881 dma_addr_t sense_addr;
882};
883
884/*
885 * DAC960 Driver Controller structure.
886 */
887struct myrs_hba {
888 void __iomem *io_base;
889 void __iomem *mmio_base;
890 phys_addr_t io_addr;
891 phys_addr_t pci_addr;
892 unsigned int irq;
893
894 unsigned char model_name[28];
895 unsigned char fw_version[12];
896
897 struct Scsi_Host *host;
898 struct pci_dev *pdev;
899
900 unsigned int epoch;
901 unsigned int next_evseq;
902 /* Monitor flags */
903 bool needs_update;
904 bool disable_enc_msg;
905
906 struct workqueue_struct *work_q;
907 char work_q_name[20];
908 struct delayed_work monitor_work;
909 unsigned long primary_monitor_time;
910 unsigned long secondary_monitor_time;
911
912 spinlock_t queue_lock;
913
914 struct dma_pool *sg_pool;
915 struct dma_pool *sense_pool;
916 struct dma_pool *dcdb_pool;
917
918 void (*write_cmd_mbox)(union myrs_cmd_mbox *next_mbox,
919 union myrs_cmd_mbox *cmd_mbox);
920 void (*get_cmd_mbox)(void __iomem *base);
921 void (*disable_intr)(void __iomem *base);
922 void (*reset)(void __iomem *base);
923
924 dma_addr_t cmd_mbox_addr;
925 size_t cmd_mbox_size;
926 union myrs_cmd_mbox *first_cmd_mbox;
927 union myrs_cmd_mbox *last_cmd_mbox;
928 union myrs_cmd_mbox *next_cmd_mbox;
929 union myrs_cmd_mbox *prev_cmd_mbox1;
930 union myrs_cmd_mbox *prev_cmd_mbox2;
931
932 dma_addr_t stat_mbox_addr;
933 size_t stat_mbox_size;
934 struct myrs_stat_mbox *first_stat_mbox;
935 struct myrs_stat_mbox *last_stat_mbox;
936 struct myrs_stat_mbox *next_stat_mbox;
937
938 struct myrs_cmdblk dcmd_blk;
939 struct myrs_cmdblk mcmd_blk;
940 struct mutex dcmd_mutex;
941
942 struct myrs_fwstat *fwstat_buf;
943 dma_addr_t fwstat_addr;
944
945 struct myrs_ctlr_info *ctlr_info;
946 struct mutex cinfo_mutex;
947
948 struct myrs_event *event_buf;
949};
950
951typedef unsigned char (*enable_mbox_t)(void __iomem *base, dma_addr_t addr);
952typedef int (*myrs_hwinit_t)(struct pci_dev *pdev,
953 struct myrs_hba *c, void __iomem *base);
954
955struct myrs_privdata {
956 myrs_hwinit_t hw_init;
957 irq_handler_t irq_handler;
958 unsigned int mmio_size;
959};
960
961/*
962 * DAC960 GEM Series Controller Interface Register Offsets.
963 */
964
965#define DAC960_GEM_mmio_size 0x600
966
967enum DAC960_GEM_reg_offset {
968 DAC960_GEM_IDB_READ_OFFSET = 0x214,
969 DAC960_GEM_IDB_CLEAR_OFFSET = 0x218,
970 DAC960_GEM_ODB_READ_OFFSET = 0x224,
971 DAC960_GEM_ODB_CLEAR_OFFSET = 0x228,
972 DAC960_GEM_IRQSTS_OFFSET = 0x208,
973 DAC960_GEM_IRQMASK_READ_OFFSET = 0x22C,
974 DAC960_GEM_IRQMASK_CLEAR_OFFSET = 0x230,
975 DAC960_GEM_CMDMBX_OFFSET = 0x510,
976 DAC960_GEM_CMDSTS_OFFSET = 0x518,
977 DAC960_GEM_ERRSTS_READ_OFFSET = 0x224,
978 DAC960_GEM_ERRSTS_CLEAR_OFFSET = 0x228,
979};
980
981/*
982 * DAC960 GEM Series Inbound Door Bell Register.
983 */
984#define DAC960_GEM_IDB_HWMBOX_NEW_CMD 0x01
985#define DAC960_GEM_IDB_HWMBOX_ACK_STS 0x02
986#define DAC960_GEM_IDB_GEN_IRQ 0x04
987#define DAC960_GEM_IDB_CTRL_RESET 0x08
988#define DAC960_GEM_IDB_MMBOX_NEW_CMD 0x10
989
990#define DAC960_GEM_IDB_HWMBOX_FULL 0x01
991#define DAC960_GEM_IDB_INIT_IN_PROGRESS 0x02
992
993/*
994 * DAC960 GEM Series Outbound Door Bell Register.
995 */
996#define DAC960_GEM_ODB_HWMBOX_ACK_IRQ 0x01
997#define DAC960_GEM_ODB_MMBOX_ACK_IRQ 0x02
998#define DAC960_GEM_ODB_HWMBOX_STS_AVAIL 0x01
999#define DAC960_GEM_ODB_MMBOX_STS_AVAIL 0x02
1000
1001/*
1002 * DAC960 GEM Series Interrupt Mask Register.
1003 */
1004#define DAC960_GEM_IRQMASK_HWMBOX_IRQ 0x01
1005#define DAC960_GEM_IRQMASK_MMBOX_IRQ 0x02
1006
1007/*
1008 * DAC960 GEM Series Error Status Register.
1009 */
1010#define DAC960_GEM_ERRSTS_PENDING 0x20
1011
1012/*
1013 * dma_addr_writeql is provided to write dma_addr_t types
1014 * to a 64-bit pci address space register. The controller
1015 * will accept having the register written as two 32-bit
1016 * values.
1017 *
1018 * In HIGHMEM kernels, dma_addr_t is a 64-bit value.
1019 * without HIGHMEM, dma_addr_t is a 32-bit value.
1020 *
1021 * The compiler should always fix up the assignment
1022 * to u.wq appropriately, depending upon the size of
1023 * dma_addr_t.
1024 */
1025static inline
1026void dma_addr_writeql(dma_addr_t addr, void __iomem *write_address)
1027{
1028 union {
1029 u64 wq;
1030 uint wl[2];
1031 } u;
1032
1033 u.wq = addr;
1034
1035 writel(u.wl[0], write_address);
1036 writel(u.wl[1], write_address + 4);
1037}
1038
1039/*
1040 * DAC960 BA Series Controller Interface Register Offsets.
1041 */
1042
1043#define DAC960_BA_mmio_size 0x80
1044
1045enum DAC960_BA_reg_offset {
1046 DAC960_BA_IRQSTS_OFFSET = 0x30,
1047 DAC960_BA_IRQMASK_OFFSET = 0x34,
1048 DAC960_BA_CMDMBX_OFFSET = 0x50,
1049 DAC960_BA_CMDSTS_OFFSET = 0x58,
1050 DAC960_BA_IDB_OFFSET = 0x60,
1051 DAC960_BA_ODB_OFFSET = 0x61,
1052 DAC960_BA_ERRSTS_OFFSET = 0x63,
1053};
1054
1055/*
1056 * DAC960 BA Series Inbound Door Bell Register.
1057 */
1058#define DAC960_BA_IDB_HWMBOX_NEW_CMD 0x01
1059#define DAC960_BA_IDB_HWMBOX_ACK_STS 0x02
1060#define DAC960_BA_IDB_GEN_IRQ 0x04
1061#define DAC960_BA_IDB_CTRL_RESET 0x08
1062#define DAC960_BA_IDB_MMBOX_NEW_CMD 0x10
1063
1064#define DAC960_BA_IDB_HWMBOX_EMPTY 0x01
1065#define DAC960_BA_IDB_INIT_DONE 0x02
1066
1067/*
1068 * DAC960 BA Series Outbound Door Bell Register.
1069 */
1070#define DAC960_BA_ODB_HWMBOX_ACK_IRQ 0x01
1071#define DAC960_BA_ODB_MMBOX_ACK_IRQ 0x02
1072
1073#define DAC960_BA_ODB_HWMBOX_STS_AVAIL 0x01
1074#define DAC960_BA_ODB_MMBOX_STS_AVAIL 0x02
1075
1076/*
1077 * DAC960 BA Series Interrupt Mask Register.
1078 */
1079#define DAC960_BA_IRQMASK_DISABLE_IRQ 0x04
1080#define DAC960_BA_IRQMASK_DISABLEW_I2O 0x08
1081
1082/*
1083 * DAC960 BA Series Error Status Register.
1084 */
1085#define DAC960_BA_ERRSTS_PENDING 0x04
1086
1087/*
1088 * DAC960 LP Series Controller Interface Register Offsets.
1089 */
1090
1091#define DAC960_LP_mmio_size 0x80
1092
1093enum DAC960_LP_reg_offset {
1094 DAC960_LP_CMDMBX_OFFSET = 0x10,
1095 DAC960_LP_CMDSTS_OFFSET = 0x18,
1096 DAC960_LP_IDB_OFFSET = 0x20,
1097 DAC960_LP_ODB_OFFSET = 0x2C,
1098 DAC960_LP_ERRSTS_OFFSET = 0x2E,
1099 DAC960_LP_IRQSTS_OFFSET = 0x30,
1100 DAC960_LP_IRQMASK_OFFSET = 0x34,
1101};
1102
1103/*
1104 * DAC960 LP Series Inbound Door Bell Register.
1105 */
1106#define DAC960_LP_IDB_HWMBOX_NEW_CMD 0x01
1107#define DAC960_LP_IDB_HWMBOX_ACK_STS 0x02
1108#define DAC960_LP_IDB_GEN_IRQ 0x04
1109#define DAC960_LP_IDB_CTRL_RESET 0x08
1110#define DAC960_LP_IDB_MMBOX_NEW_CMD 0x10
1111
1112#define DAC960_LP_IDB_HWMBOX_FULL 0x01
1113#define DAC960_LP_IDB_INIT_IN_PROGRESS 0x02
1114
1115/*
1116 * DAC960 LP Series Outbound Door Bell Register.
1117 */
1118#define DAC960_LP_ODB_HWMBOX_ACK_IRQ 0x01
1119#define DAC960_LP_ODB_MMBOX_ACK_IRQ 0x02
1120
1121#define DAC960_LP_ODB_HWMBOX_STS_AVAIL 0x01
1122#define DAC960_LP_ODB_MMBOX_STS_AVAIL 0x02
1123
1124/*
1125 * DAC960 LP Series Interrupt Mask Register.
1126 */
1127#define DAC960_LP_IRQMASK_DISABLE_IRQ 0x04
1128
1129/*
1130 * DAC960 LP Series Error Status Register.
1131 */
1132#define DAC960_LP_ERRSTS_PENDING 0x04
1133
1134#endif /* _MYRS_H */
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 8620ac5d6e41..5aac3e801903 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -2638,7 +2638,7 @@ static int nsp32_detect(struct pci_dev *pdev)
2638 /* 2638 /*
2639 * setup DMA 2639 * setup DMA
2640 */ 2640 */
2641 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 2641 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
2642 nsp32_msg (KERN_ERR, "failed to set PCI DMA mask"); 2642 nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
2643 goto scsi_unregister; 2643 goto scsi_unregister;
2644 } 2644 }
@@ -2646,7 +2646,9 @@ static int nsp32_detect(struct pci_dev *pdev)
2646 /* 2646 /*
2647 * allocate autoparam DMA resource. 2647 * allocate autoparam DMA resource.
2648 */ 2648 */
2649 data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr)); 2649 data->autoparam = dma_alloc_coherent(&pdev->dev,
2650 sizeof(nsp32_autoparam), &(data->auto_paddr),
2651 GFP_KERNEL);
2650 if (data->autoparam == NULL) { 2652 if (data->autoparam == NULL) {
2651 nsp32_msg(KERN_ERR, "failed to allocate DMA memory"); 2653 nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
2652 goto scsi_unregister; 2654 goto scsi_unregister;
@@ -2655,8 +2657,8 @@ static int nsp32_detect(struct pci_dev *pdev)
2655 /* 2657 /*
2656 * allocate scatter-gather DMA resource. 2658 * allocate scatter-gather DMA resource.
2657 */ 2659 */
2658 data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE, 2660 data->sg_list = dma_alloc_coherent(&pdev->dev, NSP32_SG_TABLE_SIZE,
2659 &(data->sg_paddr)); 2661 &data->sg_paddr, GFP_KERNEL);
2660 if (data->sg_list == NULL) { 2662 if (data->sg_list == NULL) {
2661 nsp32_msg(KERN_ERR, "failed to allocate DMA memory"); 2663 nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
2662 goto free_autoparam; 2664 goto free_autoparam;
@@ -2761,11 +2763,11 @@ static int nsp32_detect(struct pci_dev *pdev)
2761 free_irq(host->irq, data); 2763 free_irq(host->irq, data);
2762 2764
2763 free_sg_list: 2765 free_sg_list:
2764 pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE, 2766 dma_free_coherent(&pdev->dev, NSP32_SG_TABLE_SIZE,
2765 data->sg_list, data->sg_paddr); 2767 data->sg_list, data->sg_paddr);
2766 2768
2767 free_autoparam: 2769 free_autoparam:
2768 pci_free_consistent(pdev, sizeof(nsp32_autoparam), 2770 dma_free_coherent(&pdev->dev, sizeof(nsp32_autoparam),
2769 data->autoparam, data->auto_paddr); 2771 data->autoparam, data->auto_paddr);
2770 2772
2771 scsi_unregister: 2773 scsi_unregister:
@@ -2780,12 +2782,12 @@ static int nsp32_release(struct Scsi_Host *host)
2780 nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata; 2782 nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
2781 2783
2782 if (data->autoparam) { 2784 if (data->autoparam) {
2783 pci_free_consistent(data->Pci, sizeof(nsp32_autoparam), 2785 dma_free_coherent(&data->Pci->dev, sizeof(nsp32_autoparam),
2784 data->autoparam, data->auto_paddr); 2786 data->autoparam, data->auto_paddr);
2785 } 2787 }
2786 2788
2787 if (data->sg_list) { 2789 if (data->sg_list) {
2788 pci_free_consistent(data->Pci, NSP32_SG_TABLE_SIZE, 2790 dma_free_coherent(&data->Pci->dev, NSP32_SG_TABLE_SIZE,
2789 data->sg_list, data->sg_paddr); 2791 data->sg_list, data->sg_paddr);
2790 } 2792 }
2791 2793
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 67b14576fff2..e19fa883376f 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -445,7 +445,7 @@ static void _put_request(struct request *rq)
445 * code paths. 445 * code paths.
446 */ 446 */
447 if (unlikely(rq->bio)) 447 if (unlikely(rq->bio))
448 blk_end_request(rq, BLK_STS_IOERR, blk_rq_bytes(rq)); 448 blk_mq_end_request(rq, BLK_STS_IOERR);
449 else 449 else
450 blk_put_request(rq); 450 blk_put_request(rq);
451} 451}
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 199527dbaaa1..48e0624ecc68 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -132,4 +132,12 @@ enum pm8001_hba_info_flags {
132 PM8001F_RUN_TIME = (1U << 1), 132 PM8001F_RUN_TIME = (1U << 1),
133}; 133};
134 134
135/**
136 * Phy Status
137 */
138#define PHY_LINK_DISABLE 0x00
139#define PHY_LINK_DOWN 0x01
140#define PHY_STATE_LINK_UP_SPCV 0x2
141#define PHY_STATE_LINK_UP_SPC 0x1
142
135#endif 143#endif
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 4dd6cad330e8..d0bb357034d8 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1479,6 +1479,12 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
1479 } else { 1479 } else {
1480 u32 producer_index; 1480 u32 producer_index;
1481 void *pi_virt = circularQ->pi_virt; 1481 void *pi_virt = circularQ->pi_virt;
1482 /* spurious interrupt during setup if
1483 * kexec-ing and driver doing a doorbell access
1484 * with the pre-kexec oq interrupt setup
1485 */
1486 if (!pi_virt)
1487 break;
1482 /* Update the producer index from SPC */ 1488 /* Update the producer index from SPC */
1483 producer_index = pm8001_read_32(pi_virt); 1489 producer_index = pm8001_read_32(pi_virt);
1484 circularQ->producer_index = cpu_to_le32(producer_index); 1490 circularQ->producer_index = cpu_to_le32(producer_index);
@@ -2414,7 +2420,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2414 sata_resp = &psataPayload->sata_resp[0]; 2420 sata_resp = &psataPayload->sata_resp[0];
2415 resp = (struct ata_task_resp *)ts->buf; 2421 resp = (struct ata_task_resp *)ts->buf;
2416 if (t->ata_task.dma_xfer == 0 && 2422 if (t->ata_task.dma_xfer == 0 &&
2417 t->data_dir == PCI_DMA_FROMDEVICE) { 2423 t->data_dir == DMA_FROM_DEVICE) {
2418 len = sizeof(struct pio_setup_fis); 2424 len = sizeof(struct pio_setup_fis);
2419 PM8001_IO_DBG(pm8001_ha, 2425 PM8001_IO_DBG(pm8001_ha,
2420 pm8001_printk("PIO read len = %d\n", len)); 2426 pm8001_printk("PIO read len = %d\n", len));
@@ -3810,7 +3816,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
3810 " status = %x\n", status)); 3816 " status = %x\n", status));
3811 if (status == 0) { 3817 if (status == 0) {
3812 phy->phy_state = 1; 3818 phy->phy_state = 1;
3813 if (pm8001_ha->flags == PM8001F_RUN_TIME) 3819 if (pm8001_ha->flags == PM8001F_RUN_TIME &&
3820 phy->enable_completion != NULL)
3814 complete(phy->enable_completion); 3821 complete(phy->enable_completion);
3815 } 3822 }
3816 break; 3823 break;
@@ -4196,12 +4203,12 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
4196 return ret; 4203 return ret;
4197} 4204}
4198 4205
4199/* PCI_DMA_... to our direction translation. */ 4206/* DMA_... to our direction translation. */
4200static const u8 data_dir_flags[] = { 4207static const u8 data_dir_flags[] = {
4201 [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */ 4208 [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
4202 [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */ 4209 [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */
4203 [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ 4210 [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */
4204 [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ 4211 [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
4205}; 4212};
4206void 4213void
4207pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd) 4214pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
@@ -4248,13 +4255,13 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
4248 * DMA-map SMP request, response buffers 4255 * DMA-map SMP request, response buffers
4249 */ 4256 */
4250 sg_req = &task->smp_task.smp_req; 4257 sg_req = &task->smp_task.smp_req;
4251 elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE); 4258 elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE);
4252 if (!elem) 4259 if (!elem)
4253 return -ENOMEM; 4260 return -ENOMEM;
4254 req_len = sg_dma_len(sg_req); 4261 req_len = sg_dma_len(sg_req);
4255 4262
4256 sg_resp = &task->smp_task.smp_resp; 4263 sg_resp = &task->smp_task.smp_resp;
4257 elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); 4264 elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE);
4258 if (!elem) { 4265 if (!elem) {
4259 rc = -ENOMEM; 4266 rc = -ENOMEM;
4260 goto err_out; 4267 goto err_out;
@@ -4287,10 +4294,10 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
4287 4294
4288err_out_2: 4295err_out_2:
4289 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, 4296 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
4290 PCI_DMA_FROMDEVICE); 4297 DMA_FROM_DEVICE);
4291err_out: 4298err_out:
4292 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, 4299 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
4293 PCI_DMA_TODEVICE); 4300 DMA_TO_DEVICE);
4294 return rc; 4301 return rc;
4295} 4302}
4296 4303
@@ -4369,7 +4376,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
4369 u32 opc = OPC_INB_SATA_HOST_OPSTART; 4376 u32 opc = OPC_INB_SATA_HOST_OPSTART;
4370 memset(&sata_cmd, 0, sizeof(sata_cmd)); 4377 memset(&sata_cmd, 0, sizeof(sata_cmd));
4371 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4378 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4372 if (task->data_dir == PCI_DMA_NONE) { 4379 if (task->data_dir == DMA_NONE) {
4373 ATAP = 0x04; /* no data*/ 4380 ATAP = 0x04; /* no data*/
4374 PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n")); 4381 PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
4375 } else if (likely(!task->ata_task.device_control_reg_update)) { 4382 } else if (likely(!task->ata_task.device_control_reg_update)) {
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index e4867e690c84..6d91e2446542 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -131,10 +131,6 @@
131#define LINKRATE_30 (0x02 << 8) 131#define LINKRATE_30 (0x02 << 8)
132#define LINKRATE_60 (0x04 << 8) 132#define LINKRATE_60 (0x04 << 8)
133 133
134/* for phy state */
135
136#define PHY_STATE_LINK_UP_SPC 0x1
137
138/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */ 134/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
139#define GSM_SM_BASE 0x4F0000 135#define GSM_SM_BASE 0x4F0000
140struct mpi_msg_hdr{ 136struct mpi_msg_hdr{
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 7a697ca68501..d71e7e4ec29c 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -121,7 +121,7 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
121{ 121{
122 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 122 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
123 struct asd_sas_phy *sas_phy = &phy->sas_phy; 123 struct asd_sas_phy *sas_phy = &phy->sas_phy;
124 phy->phy_state = 0; 124 phy->phy_state = PHY_LINK_DISABLE;
125 phy->pm8001_ha = pm8001_ha; 125 phy->pm8001_ha = pm8001_ha;
126 sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0; 126 sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
127 sas_phy->class = SAS; 127 sas_phy->class = SAS;
@@ -152,7 +152,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
152 152
153 for (i = 0; i < USI_MAX_MEMCNT; i++) { 153 for (i = 0; i < USI_MAX_MEMCNT; i++) {
154 if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) { 154 if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
155 pci_free_consistent(pm8001_ha->pdev, 155 dma_free_coherent(&pm8001_ha->pdev->dev,
156 (pm8001_ha->memoryMap.region[i].total_len + 156 (pm8001_ha->memoryMap.region[i].total_len +
157 pm8001_ha->memoryMap.region[i].alignment), 157 pm8001_ha->memoryMap.region[i].alignment),
158 pm8001_ha->memoryMap.region[i].virt_ptr, 158 pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -501,30 +501,12 @@ static int pci_go_44(struct pci_dev *pdev)
501{ 501{
502 int rc; 502 int rc;
503 503
504 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(44))) { 504 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
505 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(44)); 505 if (rc) {
506 if (rc) { 506 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
507 rc = pci_set_consistent_dma_mask(pdev, 507 if (rc)
508 DMA_BIT_MASK(32));
509 if (rc) {
510 dev_printk(KERN_ERR, &pdev->dev,
511 "44-bit DMA enable failed\n");
512 return rc;
513 }
514 }
515 } else {
516 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
517 if (rc) {
518 dev_printk(KERN_ERR, &pdev->dev, 508 dev_printk(KERN_ERR, &pdev->dev,
519 "32-bit DMA enable failed\n"); 509 "32-bit DMA enable failed\n");
520 return rc;
521 }
522 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
523 if (rc) {
524 dev_printk(KERN_ERR, &pdev->dev,
525 "32-bit consistent DMA enable failed\n");
526 return rc;
527 }
528 } 510 }
529 return rc; 511 return rc;
530} 512}
@@ -1067,6 +1049,7 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
1067 if (rc) 1049 if (rc)
1068 goto err_out_shost; 1050 goto err_out_shost;
1069 scsi_scan_host(pm8001_ha->shost); 1051 scsi_scan_host(pm8001_ha->shost);
1052 pm8001_ha->flags = PM8001F_RUN_TIME;
1070 return 0; 1053 return 0;
1071 1054
1072err_out_shost: 1055err_out_shost:
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 947d6017d004..b3be49d41375 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -116,8 +116,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
116 u64 align_offset = 0; 116 u64 align_offset = 0;
117 if (align) 117 if (align)
118 align_offset = (dma_addr_t)align - 1; 118 align_offset = (dma_addr_t)align - 1;
119 mem_virt_alloc = pci_zalloc_consistent(pdev, mem_size + align, 119 mem_virt_alloc = dma_zalloc_coherent(&pdev->dev, mem_size + align,
120 &mem_dma_handle); 120 &mem_dma_handle, GFP_KERNEL);
121 if (!mem_virt_alloc) { 121 if (!mem_virt_alloc) {
122 pm8001_printk("memory allocation error\n"); 122 pm8001_printk("memory allocation error\n");
123 return -1; 123 return -1;
@@ -157,9 +157,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
157 int rc = 0, phy_id = sas_phy->id; 157 int rc = 0, phy_id = sas_phy->id;
158 struct pm8001_hba_info *pm8001_ha = NULL; 158 struct pm8001_hba_info *pm8001_ha = NULL;
159 struct sas_phy_linkrates *rates; 159 struct sas_phy_linkrates *rates;
160 struct sas_ha_struct *sas_ha;
161 struct pm8001_phy *phy;
160 DECLARE_COMPLETION_ONSTACK(completion); 162 DECLARE_COMPLETION_ONSTACK(completion);
161 unsigned long flags; 163 unsigned long flags;
162 pm8001_ha = sas_phy->ha->lldd_ha; 164 pm8001_ha = sas_phy->ha->lldd_ha;
165 phy = &pm8001_ha->phy[phy_id];
163 pm8001_ha->phy[phy_id].enable_completion = &completion; 166 pm8001_ha->phy[phy_id].enable_completion = &completion;
164 switch (func) { 167 switch (func) {
165 case PHY_FUNC_SET_LINK_RATE: 168 case PHY_FUNC_SET_LINK_RATE:
@@ -172,7 +175,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
172 pm8001_ha->phy[phy_id].maximum_linkrate = 175 pm8001_ha->phy[phy_id].maximum_linkrate =
173 rates->maximum_linkrate; 176 rates->maximum_linkrate;
174 } 177 }
175 if (pm8001_ha->phy[phy_id].phy_state == 0) { 178 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
176 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 179 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
177 wait_for_completion(&completion); 180 wait_for_completion(&completion);
178 } 181 }
@@ -180,7 +183,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
180 PHY_LINK_RESET); 183 PHY_LINK_RESET);
181 break; 184 break;
182 case PHY_FUNC_HARD_RESET: 185 case PHY_FUNC_HARD_RESET:
183 if (pm8001_ha->phy[phy_id].phy_state == 0) { 186 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
184 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 187 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
185 wait_for_completion(&completion); 188 wait_for_completion(&completion);
186 } 189 }
@@ -188,7 +191,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
188 PHY_HARD_RESET); 191 PHY_HARD_RESET);
189 break; 192 break;
190 case PHY_FUNC_LINK_RESET: 193 case PHY_FUNC_LINK_RESET:
191 if (pm8001_ha->phy[phy_id].phy_state == 0) { 194 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
192 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 195 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
193 wait_for_completion(&completion); 196 wait_for_completion(&completion);
194 } 197 }
@@ -200,6 +203,25 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
200 PHY_LINK_RESET); 203 PHY_LINK_RESET);
201 break; 204 break;
202 case PHY_FUNC_DISABLE: 205 case PHY_FUNC_DISABLE:
206 if (pm8001_ha->chip_id != chip_8001) {
207 if (pm8001_ha->phy[phy_id].phy_state ==
208 PHY_STATE_LINK_UP_SPCV) {
209 sas_ha = pm8001_ha->sas;
210 sas_phy_disconnected(&phy->sas_phy);
211 sas_ha->notify_phy_event(&phy->sas_phy,
212 PHYE_LOSS_OF_SIGNAL);
213 phy->phy_attached = 0;
214 }
215 } else {
216 if (pm8001_ha->phy[phy_id].phy_state ==
217 PHY_STATE_LINK_UP_SPC) {
218 sas_ha = pm8001_ha->sas;
219 sas_phy_disconnected(&phy->sas_phy);
220 sas_ha->notify_phy_event(&phy->sas_phy,
221 PHYE_LOSS_OF_SIGNAL);
222 phy->phy_attached = 0;
223 }
224 }
203 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id); 225 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
204 break; 226 break;
205 case PHY_FUNC_GET_EVENTS: 227 case PHY_FUNC_GET_EVENTS:
@@ -374,6 +396,13 @@ static int pm8001_task_exec(struct sas_task *task,
374 return 0; 396 return 0;
375 } 397 }
376 pm8001_ha = pm8001_find_ha_by_dev(task->dev); 398 pm8001_ha = pm8001_find_ha_by_dev(task->dev);
399 if (pm8001_ha->controller_fatal_error) {
400 struct task_status_struct *ts = &t->task_status;
401
402 ts->resp = SAS_TASK_UNDELIVERED;
403 t->task_done(t);
404 return 0;
405 }
377 PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n ")); 406 PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n "));
378 spin_lock_irqsave(&pm8001_ha->lock, flags); 407 spin_lock_irqsave(&pm8001_ha->lock, flags);
379 do { 408 do {
@@ -466,7 +495,7 @@ err_out:
466 dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc); 495 dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
467 if (!sas_protocol_ata(t->task_proto)) 496 if (!sas_protocol_ata(t->task_proto))
468 if (n_elem) 497 if (n_elem)
469 dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem, 498 dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
470 t->data_dir); 499 t->data_dir);
471out_done: 500out_done:
472 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 501 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -504,9 +533,9 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
504 switch (task->task_proto) { 533 switch (task->task_proto) {
505 case SAS_PROTOCOL_SMP: 534 case SAS_PROTOCOL_SMP:
506 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1, 535 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
507 PCI_DMA_FROMDEVICE); 536 DMA_FROM_DEVICE);
508 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1, 537 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
509 PCI_DMA_TODEVICE); 538 DMA_TO_DEVICE);
510 break; 539 break;
511 540
512 case SAS_PROTOCOL_SATA: 541 case SAS_PROTOCOL_SATA:
@@ -1020,13 +1049,11 @@ int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
1020 struct pm8001_device *pm8001_dev; 1049 struct pm8001_device *pm8001_dev;
1021 struct pm8001_hba_info *pm8001_ha; 1050 struct pm8001_hba_info *pm8001_ha;
1022 struct sas_phy *phy; 1051 struct sas_phy *phy;
1023 u32 device_id = 0;
1024 1052
1025 if (!dev || !dev->lldd_dev) 1053 if (!dev || !dev->lldd_dev)
1026 return -1; 1054 return -1;
1027 1055
1028 pm8001_dev = dev->lldd_dev; 1056 pm8001_dev = dev->lldd_dev;
1029 device_id = pm8001_dev->device_id;
1030 pm8001_ha = pm8001_find_ha_by_dev(dev); 1057 pm8001_ha = pm8001_find_ha_by_dev(dev);
1031 1058
1032 PM8001_EH_DBG(pm8001_ha, 1059 PM8001_EH_DBG(pm8001_ha,
@@ -1159,7 +1186,6 @@ int pm8001_abort_task(struct sas_task *task)
1159{ 1186{
1160 unsigned long flags; 1187 unsigned long flags;
1161 u32 tag; 1188 u32 tag;
1162 u32 device_id;
1163 struct domain_device *dev ; 1189 struct domain_device *dev ;
1164 struct pm8001_hba_info *pm8001_ha; 1190 struct pm8001_hba_info *pm8001_ha;
1165 struct scsi_lun lun; 1191 struct scsi_lun lun;
@@ -1173,7 +1199,6 @@ int pm8001_abort_task(struct sas_task *task)
1173 dev = task->dev; 1199 dev = task->dev;
1174 pm8001_dev = dev->lldd_dev; 1200 pm8001_dev = dev->lldd_dev;
1175 pm8001_ha = pm8001_find_ha_by_dev(dev); 1201 pm8001_ha = pm8001_find_ha_by_dev(dev);
1176 device_id = pm8001_dev->device_id;
1177 phy_id = pm8001_dev->attached_phy; 1202 phy_id = pm8001_dev->attached_phy;
1178 rc = pm8001_find_tag(task, &tag); 1203 rc = pm8001_find_tag(task, &tag);
1179 if (rc == 0) { 1204 if (rc == 0) {
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 80b4dd6df0c2..f88b0d33c385 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -58,7 +58,7 @@
58#include "pm8001_defs.h" 58#include "pm8001_defs.h"
59 59
60#define DRV_NAME "pm80xx" 60#define DRV_NAME "pm80xx"
61#define DRV_VERSION "0.1.38" 61#define DRV_VERSION "0.1.39"
62#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */ 62#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
63#define PM8001_INIT_LOGGING 0x02 /* driver init logging */ 63#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
64#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ 64#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
@@ -538,6 +538,7 @@ struct pm8001_hba_info {
538 u32 logging_level; 538 u32 logging_level;
539 u32 fw_status; 539 u32 fw_status;
540 u32 smp_exp_mode; 540 u32 smp_exp_mode;
541 bool controller_fatal_error;
541 const struct firmware *fw_image; 542 const struct firmware *fw_image;
542 struct isr_param irq_vector[PM8001_MAX_MSIX_VEC]; 543 struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
543 u32 reset_in_progress; 544 u32 reset_in_progress;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 42f0405601ad..63e4f7d34d6c 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -577,6 +577,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
577 pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size); 577 pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size);
578 pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION, 578 pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION,
579 pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity); 579 pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
580 /* Update Fatal error interrupt vector */
581 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
582 ((pm8001_ha->number_of_intr - 1) << 8);
580 pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT, 583 pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
581 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt); 584 pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
582 pm8001_mw32(address, MAIN_EVENT_CRC_CHECK, 585 pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
@@ -1110,6 +1113,9 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
1110 return -EBUSY; 1113 return -EBUSY;
1111 } 1114 }
1112 1115
1116 /* Initialize the controller fatal error flag */
1117 pm8001_ha->controller_fatal_error = false;
1118
1113 /* Initialize pci space address eg: mpi offset */ 1119 /* Initialize pci space address eg: mpi offset */
1114 init_pci_device_addresses(pm8001_ha); 1120 init_pci_device_addresses(pm8001_ha);
1115 init_default_table_values(pm8001_ha); 1121 init_default_table_values(pm8001_ha);
@@ -1218,13 +1224,17 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
1218 u32 bootloader_state; 1224 u32 bootloader_state;
1219 u32 ibutton0, ibutton1; 1225 u32 ibutton0, ibutton1;
1220 1226
1221 /* Check if MPI is in ready state to reset */ 1227 /* Process MPI table uninitialization only if FW is ready */
1222 if (mpi_uninit_check(pm8001_ha) != 0) { 1228 if (!pm8001_ha->controller_fatal_error) {
1223 PM8001_FAIL_DBG(pm8001_ha, 1229 /* Check if MPI is in ready state to reset */
1224 pm8001_printk("MPI state is not ready\n")); 1230 if (mpi_uninit_check(pm8001_ha) != 0) {
1225 return -1; 1231 regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
1232 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
1233 "MPI state is not ready scratch1 :0x%x\n",
1234 regval));
1235 return -1;
1236 }
1226 } 1237 }
1227
1228 /* checked for reset register normal state; 0x0 */ 1238 /* checked for reset register normal state; 0x0 */
1229 regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); 1239 regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
1230 PM8001_INIT_DBG(pm8001_ha, 1240 PM8001_INIT_DBG(pm8001_ha,
@@ -2123,7 +2133,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2123 sata_resp = &psataPayload->sata_resp[0]; 2133 sata_resp = &psataPayload->sata_resp[0];
2124 resp = (struct ata_task_resp *)ts->buf; 2134 resp = (struct ata_task_resp *)ts->buf;
2125 if (t->ata_task.dma_xfer == 0 && 2135 if (t->ata_task.dma_xfer == 0 &&
2126 t->data_dir == PCI_DMA_FROMDEVICE) { 2136 t->data_dir == DMA_FROM_DEVICE) {
2127 len = sizeof(struct pio_setup_fis); 2137 len = sizeof(struct pio_setup_fis);
2128 PM8001_IO_DBG(pm8001_ha, 2138 PM8001_IO_DBG(pm8001_ha,
2129 pm8001_printk("PIO read len = %d\n", len)); 2139 pm8001_printk("PIO read len = %d\n", len));
@@ -3118,8 +3128,9 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3118 pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n", 3128 pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n",
3119 status, phy_id)); 3129 status, phy_id));
3120 if (status == 0) { 3130 if (status == 0) {
3121 phy->phy_state = 1; 3131 phy->phy_state = PHY_LINK_DOWN;
3122 if (pm8001_ha->flags == PM8001F_RUN_TIME) 3132 if (pm8001_ha->flags == PM8001F_RUN_TIME &&
3133 phy->enable_completion != NULL)
3123 complete(phy->enable_completion); 3134 complete(phy->enable_completion);
3124 } 3135 }
3125 return 0; 3136 return 0;
@@ -3211,7 +3222,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3211 return 0; 3222 return 0;
3212 } 3223 }
3213 phy->phy_attached = 0; 3224 phy->phy_attached = 0;
3214 phy->phy_state = 0; 3225 phy->phy_state = PHY_LINK_DISABLE;
3215 break; 3226 break;
3216 case HW_EVENT_PORT_INVALID: 3227 case HW_EVENT_PORT_INVALID:
3217 PM8001_MSG_DBG(pm8001_ha, 3228 PM8001_MSG_DBG(pm8001_ha,
@@ -3384,13 +3395,14 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3384 u32 status = 3395 u32 status =
3385 le32_to_cpu(pPayload->status); 3396 le32_to_cpu(pPayload->status);
3386 u32 phyid = 3397 u32 phyid =
3387 le32_to_cpu(pPayload->phyid); 3398 le32_to_cpu(pPayload->phyid) & 0xFF;
3388 struct pm8001_phy *phy = &pm8001_ha->phy[phyid]; 3399 struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
3389 PM8001_MSG_DBG(pm8001_ha, 3400 PM8001_MSG_DBG(pm8001_ha,
3390 pm8001_printk("phy:0x%x status:0x%x\n", 3401 pm8001_printk("phy:0x%x status:0x%x\n",
3391 phyid, status)); 3402 phyid, status));
3392 if (status == 0) 3403 if (status == PHY_STOP_SUCCESS ||
3393 phy->phy_state = 0; 3404 status == PHY_STOP_ERR_DEVICE_ATTACHED)
3405 phy->phy_state = PHY_LINK_DISABLE;
3394 return 0; 3406 return 0;
3395} 3407}
3396 3408
@@ -3752,6 +3764,46 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3752 } 3764 }
3753} 3765}
3754 3766
3767static void print_scratchpad_registers(struct pm8001_hba_info *pm8001_ha)
3768{
3769 PM8001_FAIL_DBG(pm8001_ha,
3770 pm8001_printk("MSGU_SCRATCH_PAD_0: 0x%x\n",
3771 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)));
3772 PM8001_FAIL_DBG(pm8001_ha,
3773 pm8001_printk("MSGU_SCRATCH_PAD_1:0x%x\n",
3774 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)));
3775 PM8001_FAIL_DBG(pm8001_ha,
3776 pm8001_printk("MSGU_SCRATCH_PAD_2: 0x%x\n",
3777 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)));
3778 PM8001_FAIL_DBG(pm8001_ha,
3779 pm8001_printk("MSGU_SCRATCH_PAD_3: 0x%x\n",
3780 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
3781 PM8001_FAIL_DBG(pm8001_ha,
3782 pm8001_printk("MSGU_HOST_SCRATCH_PAD_0: 0x%x\n",
3783 pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0)));
3784 PM8001_FAIL_DBG(pm8001_ha,
3785 pm8001_printk("MSGU_HOST_SCRATCH_PAD_1: 0x%x\n",
3786 pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_1)));
3787 PM8001_FAIL_DBG(pm8001_ha,
3788 pm8001_printk("MSGU_HOST_SCRATCH_PAD_2: 0x%x\n",
3789 pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_2)));
3790 PM8001_FAIL_DBG(pm8001_ha,
3791 pm8001_printk("MSGU_HOST_SCRATCH_PAD_3: 0x%x\n",
3792 pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_3)));
3793 PM8001_FAIL_DBG(pm8001_ha,
3794 pm8001_printk("MSGU_HOST_SCRATCH_PAD_4: 0x%x\n",
3795 pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_4)));
3796 PM8001_FAIL_DBG(pm8001_ha,
3797 pm8001_printk("MSGU_HOST_SCRATCH_PAD_5: 0x%x\n",
3798 pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5)));
3799 PM8001_FAIL_DBG(pm8001_ha,
3800 pm8001_printk("MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n",
3801 pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_6)));
3802 PM8001_FAIL_DBG(pm8001_ha,
3803 pm8001_printk("MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n",
3804 pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_7)));
3805}
3806
3755static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) 3807static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
3756{ 3808{
3757 struct outbound_queue_table *circularQ; 3809 struct outbound_queue_table *circularQ;
@@ -3759,10 +3811,28 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
3759 u8 uninitialized_var(bc); 3811 u8 uninitialized_var(bc);
3760 u32 ret = MPI_IO_STATUS_FAIL; 3812 u32 ret = MPI_IO_STATUS_FAIL;
3761 unsigned long flags; 3813 unsigned long flags;
3814 u32 regval;
3762 3815
3816 if (vec == (pm8001_ha->number_of_intr - 1)) {
3817 regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
3818 if ((regval & SCRATCH_PAD_MIPSALL_READY) !=
3819 SCRATCH_PAD_MIPSALL_READY) {
3820 pm8001_ha->controller_fatal_error = true;
3821 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
3822 "Firmware Fatal error! Regval:0x%x\n", regval));
3823 print_scratchpad_registers(pm8001_ha);
3824 return ret;
3825 }
3826 }
3763 spin_lock_irqsave(&pm8001_ha->lock, flags); 3827 spin_lock_irqsave(&pm8001_ha->lock, flags);
3764 circularQ = &pm8001_ha->outbnd_q_tbl[vec]; 3828 circularQ = &pm8001_ha->outbnd_q_tbl[vec];
3765 do { 3829 do {
3830 /* spurious interrupt during setup if kexec-ing and
3831 * driver doing a doorbell access w/ the pre-kexec oq
3832 * interrupt setup.
3833 */
3834 if (!circularQ->pi_virt)
3835 break;
3766 ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); 3836 ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
3767 if (MPI_IO_STATUS_SUCCESS == ret) { 3837 if (MPI_IO_STATUS_SUCCESS == ret) {
3768 /* process the outbound message */ 3838 /* process the outbound message */
@@ -3785,12 +3855,12 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
3785 return ret; 3855 return ret;
3786} 3856}
3787 3857
3788/* PCI_DMA_... to our direction translation. */ 3858/* DMA_... to our direction translation. */
3789static const u8 data_dir_flags[] = { 3859static const u8 data_dir_flags[] = {
3790 [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */ 3860 [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
3791 [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */ 3861 [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */
3792 [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ 3862 [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */
3793 [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ 3863 [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
3794}; 3864};
3795 3865
3796static void build_smp_cmd(u32 deviceID, __le32 hTag, 3866static void build_smp_cmd(u32 deviceID, __le32 hTag,
@@ -3832,13 +3902,13 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
3832 * DMA-map SMP request, response buffers 3902 * DMA-map SMP request, response buffers
3833 */ 3903 */
3834 sg_req = &task->smp_task.smp_req; 3904 sg_req = &task->smp_task.smp_req;
3835 elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE); 3905 elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE);
3836 if (!elem) 3906 if (!elem)
3837 return -ENOMEM; 3907 return -ENOMEM;
3838 req_len = sg_dma_len(sg_req); 3908 req_len = sg_dma_len(sg_req);
3839 3909
3840 sg_resp = &task->smp_task.smp_resp; 3910 sg_resp = &task->smp_task.smp_resp;
3841 elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); 3911 elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE);
3842 if (!elem) { 3912 if (!elem) {
3843 rc = -ENOMEM; 3913 rc = -ENOMEM;
3844 goto err_out; 3914 goto err_out;
@@ -3929,10 +3999,10 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
3929 3999
3930err_out_2: 4000err_out_2:
3931 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, 4001 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
3932 PCI_DMA_FROMDEVICE); 4002 DMA_FROM_DEVICE);
3933err_out: 4003err_out:
3934 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, 4004 dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
3935 PCI_DMA_TODEVICE); 4005 DMA_TO_DEVICE);
3936 return rc; 4006 return rc;
3937} 4007}
3938 4008
@@ -4156,7 +4226,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
4156 q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM; 4226 q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
4157 circularQ = &pm8001_ha->inbnd_q_tbl[q_index]; 4227 circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
4158 4228
4159 if (task->data_dir == PCI_DMA_NONE) { 4229 if (task->data_dir == DMA_NONE) {
4160 ATAP = 0x04; /* no data*/ 4230 ATAP = 0x04; /* no data*/
4161 PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n")); 4231 PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
4162 } else if (likely(!task->ata_task.device_control_reg_update)) { 4232 } else if (likely(!task->ata_task.device_control_reg_update)) {
@@ -4606,9 +4676,8 @@ void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
4606void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha, 4676void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
4607 u32 length, u8 *buf) 4677 u32 length, u8 *buf)
4608{ 4678{
4609 u32 page_code, i; 4679 u32 i;
4610 4680
4611 page_code = SAS_PHY_ANALOG_SETTINGS_PAGE;
4612 for (i = 0; i < pm8001_ha->chip->n_phy; i++) { 4681 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
4613 mpi_set_phy_profile_req(pm8001_ha, 4682 mpi_set_phy_profile_req(pm8001_ha,
4614 SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf); 4683 SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 889e69ce3689..84d7426441bf 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -170,6 +170,10 @@
170#define LINKRATE_60 (0x04 << 8) 170#define LINKRATE_60 (0x04 << 8)
171#define LINKRATE_120 (0x08 << 8) 171#define LINKRATE_120 (0x08 << 8)
172 172
173/*phy_stop*/
174#define PHY_STOP_SUCCESS 0x00
175#define PHY_STOP_ERR_DEVICE_ATTACHED 0x1046
176
173/* phy_profile */ 177/* phy_profile */
174#define SAS_PHY_ANALOG_SETTINGS_PAGE 0x04 178#define SAS_PHY_ANALOG_SETTINGS_PAGE 0x04
175#define PHY_DWORD_LENGTH 0xC 179#define PHY_DWORD_LENGTH 0xC
@@ -216,8 +220,6 @@
216#define SAS_DOPNRJT_RTRY_TMO 128 220#define SAS_DOPNRJT_RTRY_TMO 128
217#define SAS_COPNRJT_RTRY_TMO 128 221#define SAS_COPNRJT_RTRY_TMO 128
218 222
219/* for phy state */
220#define PHY_STATE_LINK_UP_SPCV 0x2
221/* 223/*
222 Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second. 224 Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
223 Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128 225 Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
@@ -1384,6 +1386,9 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
1384#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0 1386#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0
1385#define SCRATCH_PAD_IOP0_READY 0xC00 1387#define SCRATCH_PAD_IOP0_READY 0xC00
1386#define SCRATCH_PAD_IOP1_READY 0x3000 1388#define SCRATCH_PAD_IOP1_READY 0x3000
1389#define SCRATCH_PAD_MIPSALL_READY (SCRATCH_PAD_IOP1_READY | \
1390 SCRATCH_PAD_IOP0_READY | \
1391 SCRATCH_PAD_RAAE_READY)
1387 1392
1388/* boot loader state */ 1393/* boot loader state */
1389#define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */ 1394#define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 0a5dd5595dd3..d5a4f17fce51 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2855,12 +2855,12 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
2855 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", 2855 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
2856 qedf->num_queues); 2856 qedf->num_queues);
2857 2857
2858 qedf->p_cpuq = pci_alloc_consistent(qedf->pdev, 2858 qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
2859 qedf->num_queues * sizeof(struct qedf_glbl_q_params), 2859 qedf->num_queues * sizeof(struct qedf_glbl_q_params),
2860 &qedf->hw_p_cpuq); 2860 &qedf->hw_p_cpuq, GFP_KERNEL);
2861 2861
2862 if (!qedf->p_cpuq) { 2862 if (!qedf->p_cpuq) {
2863 QEDF_ERR(&(qedf->dbg_ctx), "pci_alloc_consistent failed.\n"); 2863 QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
2864 return 1; 2864 return 1;
2865 } 2865 }
2866 2866
@@ -2929,7 +2929,7 @@ static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
2929 2929
2930 if (qedf->p_cpuq) { 2930 if (qedf->p_cpuq) {
2931 size = qedf->num_queues * sizeof(struct qedf_glbl_q_params); 2931 size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
2932 pci_free_consistent(qedf->pdev, size, qedf->p_cpuq, 2932 dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
2933 qedf->hw_p_cpuq); 2933 qedf->hw_p_cpuq);
2934 } 2934 }
2935 2935
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index e5bd035ebad0..105b0e4d7818 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -806,11 +806,11 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
806 memset(&qedi->pf_params.iscsi_pf_params, 0, 806 memset(&qedi->pf_params.iscsi_pf_params, 0,
807 sizeof(qedi->pf_params.iscsi_pf_params)); 807 sizeof(qedi->pf_params.iscsi_pf_params));
808 808
809 qedi->p_cpuq = pci_alloc_consistent(qedi->pdev, 809 qedi->p_cpuq = dma_alloc_coherent(&qedi->pdev->dev,
810 qedi->num_queues * sizeof(struct qedi_glbl_q_params), 810 qedi->num_queues * sizeof(struct qedi_glbl_q_params),
811 &qedi->hw_p_cpuq); 811 &qedi->hw_p_cpuq, GFP_KERNEL);
812 if (!qedi->p_cpuq) { 812 if (!qedi->p_cpuq) {
813 QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n"); 813 QEDI_ERR(&qedi->dbg_ctx, "dma_alloc_coherent fail\n");
814 rval = -1; 814 rval = -1;
815 goto err_alloc_mem; 815 goto err_alloc_mem;
816 } 816 }
@@ -871,7 +871,7 @@ static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
871 871
872 if (qedi->p_cpuq) { 872 if (qedi->p_cpuq) {
873 size = qedi->num_queues * sizeof(struct qedi_glbl_q_params); 873 size = qedi->num_queues * sizeof(struct qedi_glbl_q_params);
874 pci_free_consistent(qedi->pdev, size, qedi->p_cpuq, 874 dma_free_coherent(&qedi->pdev->dev, size, qedi->p_cpuq,
875 qedi->hw_p_cpuq); 875 qedi->hw_p_cpuq);
876 } 876 }
877 877
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 390775d5c918..15a50cc7e4b3 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1750,7 +1750,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1750 uint8_t *sp, *tbuf; 1750 uint8_t *sp, *tbuf;
1751 dma_addr_t p_tbuf; 1751 dma_addr_t p_tbuf;
1752 1752
1753 tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf); 1753 tbuf = dma_alloc_coherent(&ha->pdev->dev, 8000, &p_tbuf, GFP_KERNEL);
1754 if (!tbuf) 1754 if (!tbuf)
1755 return -ENOMEM; 1755 return -ENOMEM;
1756#endif 1756#endif
@@ -1841,7 +1841,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1841 1841
1842 out: 1842 out:
1843#if DUMP_IT_BACK 1843#if DUMP_IT_BACK
1844 pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf); 1844 dma_free_coherent(&ha->pdev->dev, 8000, tbuf, p_tbuf);
1845#endif 1845#endif
1846 return err; 1846 return err;
1847} 1847}
@@ -4259,8 +4259,8 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4259 ha->devnum = devnum; /* specifies microcode load address */ 4259 ha->devnum = devnum; /* specifies microcode load address */
4260 4260
4261#ifdef QLA_64BIT_PTR 4261#ifdef QLA_64BIT_PTR
4262 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { 4262 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
4263 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) { 4263 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
4264 printk(KERN_WARNING "scsi(%li): Unable to set a " 4264 printk(KERN_WARNING "scsi(%li): Unable to set a "
4265 "suitable DMA mask - aborting\n", ha->host_no); 4265 "suitable DMA mask - aborting\n", ha->host_no);
4266 error = -ENODEV; 4266 error = -ENODEV;
@@ -4270,7 +4270,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4270 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n", 4270 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
4271 ha->host_no); 4271 ha->host_no);
4272#else 4272#else
4273 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) { 4273 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
4274 printk(KERN_WARNING "scsi(%li): Unable to set a " 4274 printk(KERN_WARNING "scsi(%li): Unable to set a "
4275 "suitable DMA mask - aborting\n", ha->host_no); 4275 "suitable DMA mask - aborting\n", ha->host_no);
4276 error = -ENODEV; 4276 error = -ENODEV;
@@ -4278,17 +4278,17 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4278 } 4278 }
4279#endif 4279#endif
4280 4280
4281 ha->request_ring = pci_alloc_consistent(ha->pdev, 4281 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
4282 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)), 4282 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4283 &ha->request_dma); 4283 &ha->request_dma, GFP_KERNEL);
4284 if (!ha->request_ring) { 4284 if (!ha->request_ring) {
4285 printk(KERN_INFO "qla1280: Failed to get request memory\n"); 4285 printk(KERN_INFO "qla1280: Failed to get request memory\n");
4286 goto error_put_host; 4286 goto error_put_host;
4287 } 4287 }
4288 4288
4289 ha->response_ring = pci_alloc_consistent(ha->pdev, 4289 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
4290 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)), 4290 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4291 &ha->response_dma); 4291 &ha->response_dma, GFP_KERNEL);
4292 if (!ha->response_ring) { 4292 if (!ha->response_ring) {
4293 printk(KERN_INFO "qla1280: Failed to get response memory\n"); 4293 printk(KERN_INFO "qla1280: Failed to get response memory\n");
4294 goto error_free_request_ring; 4294 goto error_free_request_ring;
@@ -4370,11 +4370,11 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4370 release_region(host->io_port, 0xff); 4370 release_region(host->io_port, 0xff);
4371#endif 4371#endif
4372 error_free_response_ring: 4372 error_free_response_ring:
4373 pci_free_consistent(ha->pdev, 4373 dma_free_coherent(&ha->pdev->dev,
4374 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)), 4374 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4375 ha->response_ring, ha->response_dma); 4375 ha->response_ring, ha->response_dma);
4376 error_free_request_ring: 4376 error_free_request_ring:
4377 pci_free_consistent(ha->pdev, 4377 dma_free_coherent(&ha->pdev->dev,
4378 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)), 4378 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4379 ha->request_ring, ha->request_dma); 4379 ha->request_ring, ha->request_dma);
4380 error_put_host: 4380 error_put_host:
@@ -4404,10 +4404,10 @@ qla1280_remove_one(struct pci_dev *pdev)
4404 release_region(host->io_port, 0xff); 4404 release_region(host->io_port, 0xff);
4405#endif 4405#endif
4406 4406
4407 pci_free_consistent(ha->pdev, 4407 dma_free_coherent(&ha->pdev->dev,
4408 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))), 4408 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
4409 ha->request_ring, ha->request_dma); 4409 ha->request_ring, ha->request_dma);
4410 pci_free_consistent(ha->pdev, 4410 dma_free_coherent(&ha->pdev->dev,
4411 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))), 4411 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
4412 ha->response_ring, ha->response_dma); 4412 ha->response_ring, ha->response_dma);
4413 4413
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 4888b999e82f..b28f159fdaee 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -158,9 +158,17 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
158 if (!capable(CAP_SYS_ADMIN)) 158 if (!capable(CAP_SYS_ADMIN))
159 return 0; 159 return 0;
160 160
161 mutex_lock(&ha->optrom_mutex);
162 if (qla2x00_chip_is_down(vha)) {
163 mutex_unlock(&ha->optrom_mutex);
164 return -EAGAIN;
165 }
166
161 if (IS_NOCACHE_VPD_TYPE(ha)) 167 if (IS_NOCACHE_VPD_TYPE(ha))
162 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, 168 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
163 ha->nvram_size); 169 ha->nvram_size);
170 mutex_unlock(&ha->optrom_mutex);
171
164 return memory_read_from_buffer(buf, count, &off, ha->nvram, 172 return memory_read_from_buffer(buf, count, &off, ha->nvram,
165 ha->nvram_size); 173 ha->nvram_size);
166} 174}
@@ -208,10 +216,17 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
208 return -EAGAIN; 216 return -EAGAIN;
209 } 217 }
210 218
219 mutex_lock(&ha->optrom_mutex);
220 if (qla2x00_chip_is_down(vha)) {
221 mutex_unlock(&vha->hw->optrom_mutex);
222 return -EAGAIN;
223 }
224
211 /* Write NVRAM. */ 225 /* Write NVRAM. */
212 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count); 226 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
213 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base, 227 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
214 count); 228 count);
229 mutex_unlock(&ha->optrom_mutex);
215 230
216 ql_dbg(ql_dbg_user, vha, 0x7060, 231 ql_dbg(ql_dbg_user, vha, 0x7060,
217 "Setting ISP_ABORT_NEEDED\n"); 232 "Setting ISP_ABORT_NEEDED\n");
@@ -322,6 +337,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
322 size = ha->optrom_size - start; 337 size = ha->optrom_size - start;
323 338
324 mutex_lock(&ha->optrom_mutex); 339 mutex_lock(&ha->optrom_mutex);
340 if (qla2x00_chip_is_down(vha)) {
341 mutex_unlock(&ha->optrom_mutex);
342 return -EAGAIN;
343 }
325 switch (val) { 344 switch (val) {
326 case 0: 345 case 0:
327 if (ha->optrom_state != QLA_SREADING && 346 if (ha->optrom_state != QLA_SREADING &&
@@ -499,8 +518,14 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
499 qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE) 518 qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
500 faddr = ha->flt_region_vpd_sec << 2; 519 faddr = ha->flt_region_vpd_sec << 2;
501 520
521 mutex_lock(&ha->optrom_mutex);
522 if (qla2x00_chip_is_down(vha)) {
523 mutex_unlock(&ha->optrom_mutex);
524 return -EAGAIN;
525 }
502 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, 526 ha->isp_ops->read_optrom(vha, ha->vpd, faddr,
503 ha->vpd_size); 527 ha->vpd_size);
528 mutex_unlock(&ha->optrom_mutex);
504 } 529 }
505 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size); 530 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
506} 531}
@@ -518,9 +543,6 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
518 if (unlikely(pci_channel_offline(ha->pdev))) 543 if (unlikely(pci_channel_offline(ha->pdev)))
519 return 0; 544 return 0;
520 545
521 if (qla2x00_chip_is_down(vha))
522 return 0;
523
524 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || 546 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
525 !ha->isp_ops->write_nvram) 547 !ha->isp_ops->write_nvram)
526 return 0; 548 return 0;
@@ -531,16 +553,25 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
531 return -EAGAIN; 553 return -EAGAIN;
532 } 554 }
533 555
556 mutex_lock(&ha->optrom_mutex);
557 if (qla2x00_chip_is_down(vha)) {
558 mutex_unlock(&ha->optrom_mutex);
559 return -EAGAIN;
560 }
561
534 /* Write NVRAM. */ 562 /* Write NVRAM. */
535 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count); 563 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
536 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count); 564 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
537 565
538 /* Update flash version information for 4Gb & above. */ 566 /* Update flash version information for 4Gb & above. */
539 if (!IS_FWI2_CAPABLE(ha)) 567 if (!IS_FWI2_CAPABLE(ha)) {
568 mutex_unlock(&ha->optrom_mutex);
540 return -EINVAL; 569 return -EINVAL;
570 }
541 571
542 tmp_data = vmalloc(256); 572 tmp_data = vmalloc(256);
543 if (!tmp_data) { 573 if (!tmp_data) {
574 mutex_unlock(&ha->optrom_mutex);
544 ql_log(ql_log_warn, vha, 0x706b, 575 ql_log(ql_log_warn, vha, 0x706b,
545 "Unable to allocate memory for VPD information update.\n"); 576 "Unable to allocate memory for VPD information update.\n");
546 return -ENOMEM; 577 return -ENOMEM;
@@ -548,6 +579,8 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
548 ha->isp_ops->get_flash_version(vha, tmp_data); 579 ha->isp_ops->get_flash_version(vha, tmp_data);
549 vfree(tmp_data); 580 vfree(tmp_data);
550 581
582 mutex_unlock(&ha->optrom_mutex);
583
551 return count; 584 return count;
552} 585}
553 586
@@ -573,10 +606,15 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
573 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE) 606 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
574 return 0; 607 return 0;
575 608
576 if (qla2x00_chip_is_down(vha)) 609 mutex_lock(&vha->hw->optrom_mutex);
610 if (qla2x00_chip_is_down(vha)) {
611 mutex_unlock(&vha->hw->optrom_mutex);
577 return 0; 612 return 0;
613 }
578 614
579 rval = qla2x00_read_sfp_dev(vha, buf, count); 615 rval = qla2x00_read_sfp_dev(vha, buf, count);
616 mutex_unlock(&vha->hw->optrom_mutex);
617
580 if (rval) 618 if (rval)
581 return -EIO; 619 return -EIO;
582 620
@@ -785,9 +823,11 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
785 823
786 if (unlikely(pci_channel_offline(ha->pdev))) 824 if (unlikely(pci_channel_offline(ha->pdev)))
787 return 0; 825 return 0;
788 826 mutex_lock(&vha->hw->optrom_mutex);
789 if (qla2x00_chip_is_down(vha)) 827 if (qla2x00_chip_is_down(vha)) {
828 mutex_unlock(&vha->hw->optrom_mutex);
790 return 0; 829 return 0;
830 }
791 831
792 if (ha->xgmac_data) 832 if (ha->xgmac_data)
793 goto do_read; 833 goto do_read;
@@ -795,6 +835,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
795 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, 835 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
796 &ha->xgmac_data_dma, GFP_KERNEL); 836 &ha->xgmac_data_dma, GFP_KERNEL);
797 if (!ha->xgmac_data) { 837 if (!ha->xgmac_data) {
838 mutex_unlock(&vha->hw->optrom_mutex);
798 ql_log(ql_log_warn, vha, 0x7076, 839 ql_log(ql_log_warn, vha, 0x7076,
799 "Unable to allocate memory for XGMAC read-data.\n"); 840 "Unable to allocate memory for XGMAC read-data.\n");
800 return 0; 841 return 0;
@@ -806,6 +847,8 @@ do_read:
806 847
807 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, 848 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
808 XGMAC_DATA_SIZE, &actual_size); 849 XGMAC_DATA_SIZE, &actual_size);
850
851 mutex_unlock(&vha->hw->optrom_mutex);
809 if (rval != QLA_SUCCESS) { 852 if (rval != QLA_SUCCESS) {
810 ql_log(ql_log_warn, vha, 0x7077, 853 ql_log(ql_log_warn, vha, 0x7077,
811 "Unable to read XGMAC data (%x).\n", rval); 854 "Unable to read XGMAC data (%x).\n", rval);
@@ -842,13 +885,16 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
842 885
843 if (ha->dcbx_tlv) 886 if (ha->dcbx_tlv)
844 goto do_read; 887 goto do_read;
845 888 mutex_lock(&vha->hw->optrom_mutex);
846 if (qla2x00_chip_is_down(vha)) 889 if (qla2x00_chip_is_down(vha)) {
890 mutex_unlock(&vha->hw->optrom_mutex);
847 return 0; 891 return 0;
892 }
848 893
849 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 894 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
850 &ha->dcbx_tlv_dma, GFP_KERNEL); 895 &ha->dcbx_tlv_dma, GFP_KERNEL);
851 if (!ha->dcbx_tlv) { 896 if (!ha->dcbx_tlv) {
897 mutex_unlock(&vha->hw->optrom_mutex);
852 ql_log(ql_log_warn, vha, 0x7078, 898 ql_log(ql_log_warn, vha, 0x7078,
853 "Unable to allocate memory for DCBX TLV read-data.\n"); 899 "Unable to allocate memory for DCBX TLV read-data.\n");
854 return -ENOMEM; 900 return -ENOMEM;
@@ -859,6 +905,9 @@ do_read:
859 905
860 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, 906 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
861 DCBX_TLV_DATA_SIZE); 907 DCBX_TLV_DATA_SIZE);
908
909 mutex_unlock(&vha->hw->optrom_mutex);
910
862 if (rval != QLA_SUCCESS) { 911 if (rval != QLA_SUCCESS) {
863 ql_log(ql_log_warn, vha, 0x7079, 912 ql_log(ql_log_warn, vha, 0x7079,
864 "Unable to read DCBX TLV (%x).\n", rval); 913 "Unable to read DCBX TLV (%x).\n", rval);
@@ -1159,6 +1208,34 @@ qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1159} 1208}
1160 1209
1161static ssize_t 1210static ssize_t
1211qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
1212 char *buf)
1213{
1214 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1215
1216 return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
1217 vha->hw->last_zio_threshold);
1218}
1219
1220static ssize_t
1221qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
1222 const char *buf, size_t count)
1223{
1224 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1225 int val = 0;
1226
1227 if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
1228 return -EINVAL;
1229 if (sscanf(buf, "%d", &val) != 1)
1230 return -EINVAL;
1231 if (val < 0 || val > 256)
1232 return -ERANGE;
1233
1234 atomic_set(&vha->hw->zio_threshold, val);
1235 return strlen(buf);
1236}
1237
1238static ssize_t
1162qla2x00_beacon_show(struct device *dev, struct device_attribute *attr, 1239qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1163 char *buf) 1240 char *buf)
1164{ 1241{
@@ -1184,15 +1261,17 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1184 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1261 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1185 return -EPERM; 1262 return -EPERM;
1186 1263
1264 if (sscanf(buf, "%d", &val) != 1)
1265 return -EINVAL;
1266
1267 mutex_lock(&vha->hw->optrom_mutex);
1187 if (qla2x00_chip_is_down(vha)) { 1268 if (qla2x00_chip_is_down(vha)) {
1269 mutex_unlock(&vha->hw->optrom_mutex);
1188 ql_log(ql_log_warn, vha, 0x707a, 1270 ql_log(ql_log_warn, vha, 0x707a,
1189 "Abort ISP active -- ignoring beacon request.\n"); 1271 "Abort ISP active -- ignoring beacon request.\n");
1190 return -EBUSY; 1272 return -EBUSY;
1191 } 1273 }
1192 1274
1193 if (sscanf(buf, "%d", &val) != 1)
1194 return -EINVAL;
1195
1196 if (val) 1275 if (val)
1197 rval = ha->isp_ops->beacon_on(vha); 1276 rval = ha->isp_ops->beacon_on(vha);
1198 else 1277 else
@@ -1201,6 +1280,8 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1201 if (rval != QLA_SUCCESS) 1280 if (rval != QLA_SUCCESS)
1202 count = 0; 1281 count = 0;
1203 1282
1283 mutex_unlock(&vha->hw->optrom_mutex);
1284
1204 return count; 1285 return count;
1205} 1286}
1206 1287
@@ -1370,18 +1451,24 @@ qla2x00_thermal_temp_show(struct device *dev,
1370{ 1451{
1371 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1452 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1372 uint16_t temp = 0; 1453 uint16_t temp = 0;
1454 int rc;
1373 1455
1456 mutex_lock(&vha->hw->optrom_mutex);
1374 if (qla2x00_chip_is_down(vha)) { 1457 if (qla2x00_chip_is_down(vha)) {
1458 mutex_unlock(&vha->hw->optrom_mutex);
1375 ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n"); 1459 ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
1376 goto done; 1460 goto done;
1377 } 1461 }
1378 1462
1379 if (vha->hw->flags.eeh_busy) { 1463 if (vha->hw->flags.eeh_busy) {
1464 mutex_unlock(&vha->hw->optrom_mutex);
1380 ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n"); 1465 ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
1381 goto done; 1466 goto done;
1382 } 1467 }
1383 1468
1384 if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS) 1469 rc = qla2x00_get_thermal_temp(vha, &temp);
1470 mutex_unlock(&vha->hw->optrom_mutex);
1471 if (rc == QLA_SUCCESS)
1385 return scnprintf(buf, PAGE_SIZE, "%d\n", temp); 1472 return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
1386 1473
1387done: 1474done:
@@ -1402,13 +1489,24 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1402 return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate); 1489 return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
1403 } 1490 }
1404 1491
1405 if (qla2x00_chip_is_down(vha)) 1492 mutex_lock(&vha->hw->optrom_mutex);
1493 if (qla2x00_chip_is_down(vha)) {
1494 mutex_unlock(&vha->hw->optrom_mutex);
1406 ql_log(ql_log_warn, vha, 0x707c, 1495 ql_log(ql_log_warn, vha, 0x707c,
1407 "ISP reset active.\n"); 1496 "ISP reset active.\n");
1408 else if (!vha->hw->flags.eeh_busy) 1497 goto out;
1409 rval = qla2x00_get_firmware_state(vha, state); 1498 } else if (vha->hw->flags.eeh_busy) {
1410 if (rval != QLA_SUCCESS) 1499 mutex_unlock(&vha->hw->optrom_mutex);
1500 goto out;
1501 }
1502
1503 rval = qla2x00_get_firmware_state(vha, state);
1504 mutex_unlock(&vha->hw->optrom_mutex);
1505out:
1506 if (rval != QLA_SUCCESS) {
1411 memset(state, -1, sizeof(state)); 1507 memset(state, -1, sizeof(state));
1508 rval = qla2x00_get_firmware_state(vha, state);
1509 }
1412 1510
1413 return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 1511 return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1414 state[0], state[1], state[2], state[3], state[4], state[5]); 1512 state[0], state[1], state[2], state[3], state[4], state[5]);
@@ -1534,6 +1632,433 @@ qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr,
1534 ha->max_speed_sup ? "32Gps" : "16Gps"); 1632 ha->max_speed_sup ? "32Gps" : "16Gps");
1535} 1633}
1536 1634
1635/* ----- */
1636
1637static ssize_t
1638qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1639{
1640 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1641 int len = 0;
1642
1643 len += scnprintf(buf + len, PAGE_SIZE-len,
1644 "Supported options: enabled | disabled | dual | exclusive\n");
1645
1646 /* --- */
1647 len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
1648
1649 switch (vha->qlini_mode) {
1650 case QLA2XXX_INI_MODE_EXCLUSIVE:
1651 len += scnprintf(buf + len, PAGE_SIZE-len,
1652 QLA2XXX_INI_MODE_STR_EXCLUSIVE);
1653 break;
1654 case QLA2XXX_INI_MODE_DISABLED:
1655 len += scnprintf(buf + len, PAGE_SIZE-len,
1656 QLA2XXX_INI_MODE_STR_DISABLED);
1657 break;
1658 case QLA2XXX_INI_MODE_ENABLED:
1659 len += scnprintf(buf + len, PAGE_SIZE-len,
1660 QLA2XXX_INI_MODE_STR_ENABLED);
1661 break;
1662 case QLA2XXX_INI_MODE_DUAL:
1663 len += scnprintf(buf + len, PAGE_SIZE-len,
1664 QLA2XXX_INI_MODE_STR_DUAL);
1665 break;
1666 }
1667 len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
1668
1669 return len;
1670}
1671
1672static char *mode_to_str[] = {
1673 "exclusive",
1674 "disabled",
1675 "enabled",
1676 "dual",
1677};
1678
1679#define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
1680static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
1681{
1682 int rc = 0;
1683 enum {
1684 NO_ACTION,
1685 MODE_CHANGE_ACCEPT,
1686 MODE_CHANGE_NO_ACTION,
1687 TARGET_STILL_ACTIVE,
1688 };
1689 int action = NO_ACTION;
1690 int set_mode = 0;
1691 u8 eo_toggle = 0; /* exchange offload flipped */
1692
1693 switch (vha->qlini_mode) {
1694 case QLA2XXX_INI_MODE_DISABLED:
1695 switch (op) {
1696 case QLA2XXX_INI_MODE_DISABLED:
1697 if (qla_tgt_mode_enabled(vha)) {
1698 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1699 vha->hw->flags.exchoffld_enabled)
1700 eo_toggle = 1;
1701 if (((vha->ql2xexchoffld !=
1702 vha->u_ql2xexchoffld) &&
1703 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1704 eo_toggle) {
1705 /*
1706 * The number of exchange to be offload
1707 * was tweaked or offload option was
1708 * flipped
1709 */
1710 action = MODE_CHANGE_ACCEPT;
1711 } else {
1712 action = MODE_CHANGE_NO_ACTION;
1713 }
1714 } else {
1715 action = MODE_CHANGE_NO_ACTION;
1716 }
1717 break;
1718 case QLA2XXX_INI_MODE_EXCLUSIVE:
1719 if (qla_tgt_mode_enabled(vha)) {
1720 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1721 vha->hw->flags.exchoffld_enabled)
1722 eo_toggle = 1;
1723 if (((vha->ql2xexchoffld !=
1724 vha->u_ql2xexchoffld) &&
1725 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1726 eo_toggle) {
1727 /*
1728 * The number of exchange to be offload
1729 * was tweaked or offload option was
1730 * flipped
1731 */
1732 action = MODE_CHANGE_ACCEPT;
1733 } else {
1734 action = MODE_CHANGE_NO_ACTION;
1735 }
1736 } else {
1737 action = MODE_CHANGE_ACCEPT;
1738 }
1739 break;
1740 case QLA2XXX_INI_MODE_DUAL:
1741 action = MODE_CHANGE_ACCEPT;
1742 /* active_mode is target only, reset it to dual */
1743 if (qla_tgt_mode_enabled(vha)) {
1744 set_mode = 1;
1745 action = MODE_CHANGE_ACCEPT;
1746 } else {
1747 action = MODE_CHANGE_NO_ACTION;
1748 }
1749 break;
1750
1751 case QLA2XXX_INI_MODE_ENABLED:
1752 if (qla_tgt_mode_enabled(vha))
1753 action = TARGET_STILL_ACTIVE;
1754 else {
1755 action = MODE_CHANGE_ACCEPT;
1756 set_mode = 1;
1757 }
1758 break;
1759 }
1760 break;
1761
1762 case QLA2XXX_INI_MODE_EXCLUSIVE:
1763 switch (op) {
1764 case QLA2XXX_INI_MODE_EXCLUSIVE:
1765 if (qla_tgt_mode_enabled(vha)) {
1766 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1767 vha->hw->flags.exchoffld_enabled)
1768 eo_toggle = 1;
1769 if (((vha->ql2xexchoffld !=
1770 vha->u_ql2xexchoffld) &&
1771 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1772 eo_toggle)
1773 /*
1774 * The number of exchange to be offload
1775 * was tweaked or offload option was
1776 * flipped
1777 */
1778 action = MODE_CHANGE_ACCEPT;
1779 else
1780 action = NO_ACTION;
1781 } else
1782 action = NO_ACTION;
1783
1784 break;
1785
1786 case QLA2XXX_INI_MODE_DISABLED:
1787 if (qla_tgt_mode_enabled(vha)) {
1788 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1789 vha->hw->flags.exchoffld_enabled)
1790 eo_toggle = 1;
1791 if (((vha->ql2xexchoffld !=
1792 vha->u_ql2xexchoffld) &&
1793 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1794 eo_toggle)
1795 action = MODE_CHANGE_ACCEPT;
1796 else
1797 action = MODE_CHANGE_NO_ACTION;
1798 } else
1799 action = MODE_CHANGE_NO_ACTION;
1800 break;
1801
1802 case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
1803 if (qla_tgt_mode_enabled(vha)) {
1804 action = MODE_CHANGE_ACCEPT;
1805 set_mode = 1;
1806 } else
1807 action = MODE_CHANGE_ACCEPT;
1808 break;
1809
1810 case QLA2XXX_INI_MODE_ENABLED:
1811 if (qla_tgt_mode_enabled(vha))
1812 action = TARGET_STILL_ACTIVE;
1813 else {
1814 if (vha->hw->flags.fw_started)
1815 action = MODE_CHANGE_NO_ACTION;
1816 else
1817 action = MODE_CHANGE_ACCEPT;
1818 }
1819 break;
1820 }
1821 break;
1822
1823 case QLA2XXX_INI_MODE_ENABLED:
1824 switch (op) {
1825 case QLA2XXX_INI_MODE_ENABLED:
1826 if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
1827 vha->hw->flags.exchoffld_enabled)
1828 eo_toggle = 1;
1829 if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
1830 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
1831 eo_toggle)
1832 action = MODE_CHANGE_ACCEPT;
1833 else
1834 action = NO_ACTION;
1835 break;
1836 case QLA2XXX_INI_MODE_DUAL:
1837 case QLA2XXX_INI_MODE_DISABLED:
1838 action = MODE_CHANGE_ACCEPT;
1839 break;
1840 default:
1841 action = MODE_CHANGE_NO_ACTION;
1842 break;
1843 }
1844 break;
1845
1846 case QLA2XXX_INI_MODE_DUAL:
1847 switch (op) {
1848 case QLA2XXX_INI_MODE_DUAL:
1849 if (qla_tgt_mode_enabled(vha) ||
1850 qla_dual_mode_enabled(vha)) {
1851 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
1852 vha->u_ql2xiniexchg) !=
1853 vha->hw->flags.exchoffld_enabled)
1854 eo_toggle = 1;
1855
1856 if ((((vha->ql2xexchoffld +
1857 vha->ql2xiniexchg) !=
1858 (vha->u_ql2xiniexchg +
1859 vha->u_ql2xexchoffld)) &&
1860 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
1861 vha->u_ql2xexchoffld)) || eo_toggle)
1862 action = MODE_CHANGE_ACCEPT;
1863 else
1864 action = NO_ACTION;
1865 } else {
1866 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
1867 vha->u_ql2xiniexchg) !=
1868 vha->hw->flags.exchoffld_enabled)
1869 eo_toggle = 1;
1870
1871 if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
1872 != (vha->u_ql2xiniexchg +
1873 vha->u_ql2xexchoffld)) &&
1874 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
1875 vha->u_ql2xexchoffld)) || eo_toggle)
1876 action = MODE_CHANGE_NO_ACTION;
1877 else
1878 action = NO_ACTION;
1879 }
1880 break;
1881
1882 case QLA2XXX_INI_MODE_DISABLED:
1883 if (qla_tgt_mode_enabled(vha) ||
1884 qla_dual_mode_enabled(vha)) {
1885 /* turning off initiator mode */
1886 set_mode = 1;
1887 action = MODE_CHANGE_ACCEPT;
1888 } else {
1889 action = MODE_CHANGE_NO_ACTION;
1890 }
1891 break;
1892
1893 case QLA2XXX_INI_MODE_EXCLUSIVE:
1894 if (qla_tgt_mode_enabled(vha) ||
1895 qla_dual_mode_enabled(vha)) {
1896 set_mode = 1;
1897 action = MODE_CHANGE_ACCEPT;
1898 } else {
1899 action = MODE_CHANGE_ACCEPT;
1900 }
1901 break;
1902
1903 case QLA2XXX_INI_MODE_ENABLED:
1904 if (qla_tgt_mode_enabled(vha) ||
1905 qla_dual_mode_enabled(vha)) {
1906 action = TARGET_STILL_ACTIVE;
1907 } else {
1908 action = MODE_CHANGE_ACCEPT;
1909 }
1910 }
1911 break;
1912 }
1913
1914 switch (action) {
1915 case MODE_CHANGE_ACCEPT:
1916 ql_log(ql_log_warn, vha, 0xffff,
1917 "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
1918 mode_to_str[vha->qlini_mode], mode_to_str[op],
1919 vha->ql2xexchoffld, vha->u_ql2xexchoffld,
1920 vha->ql2xiniexchg, vha->u_ql2xiniexchg);
1921
1922 vha->qlini_mode = op;
1923 vha->ql2xexchoffld = vha->u_ql2xexchoffld;
1924 vha->ql2xiniexchg = vha->u_ql2xiniexchg;
1925 if (set_mode)
1926 qlt_set_mode(vha);
1927 vha->flags.online = 1;
1928 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1929 break;
1930
1931 case MODE_CHANGE_NO_ACTION:
1932 ql_log(ql_log_warn, vha, 0xffff,
1933 "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
1934 mode_to_str[vha->qlini_mode], mode_to_str[op],
1935 vha->ql2xexchoffld, vha->u_ql2xexchoffld,
1936 vha->ql2xiniexchg, vha->u_ql2xiniexchg);
1937 vha->qlini_mode = op;
1938 vha->ql2xexchoffld = vha->u_ql2xexchoffld;
1939 vha->ql2xiniexchg = vha->u_ql2xiniexchg;
1940 break;
1941
1942 case TARGET_STILL_ACTIVE:
1943 ql_log(ql_log_warn, vha, 0xffff,
1944 "Target Mode is active. Unable to change Mode.\n");
1945 break;
1946
1947 case NO_ACTION:
1948 default:
1949 ql_log(ql_log_warn, vha, 0xffff,
1950 "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
1951 vha->qlini_mode, op,
1952 vha->ql2xexchoffld, vha->u_ql2xexchoffld);
1953 break;
1954 }
1955
1956 return rc;
1957}
1958
1959static ssize_t
1960qlini_mode_store(struct device *dev, struct device_attribute *attr,
1961 const char *buf, size_t count)
1962{
1963 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1964 int ini;
1965
1966 if (!buf)
1967 return -EINVAL;
1968
1969 if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
1970 strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
1971 ini = QLA2XXX_INI_MODE_EXCLUSIVE;
1972 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
1973 strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
1974 ini = QLA2XXX_INI_MODE_DISABLED;
1975 else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
1976 strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
1977 ini = QLA2XXX_INI_MODE_ENABLED;
1978 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
1979 strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
1980 ini = QLA2XXX_INI_MODE_DUAL;
1981 else
1982 return -EINVAL;
1983
1984 qla_set_ini_mode(vha, ini);
1985 return strlen(buf);
1986}
1987
1988static ssize_t
1989ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
1990 char *buf)
1991{
1992 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1993 int len = 0;
1994
1995 len += scnprintf(buf + len, PAGE_SIZE-len,
1996 "target exchange: new %d : current: %d\n\n",
1997 vha->u_ql2xexchoffld, vha->ql2xexchoffld);
1998
1999 len += scnprintf(buf + len, PAGE_SIZE-len,
2000 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2001 vha->host_no);
2002
2003 return len;
2004}
2005
2006static ssize_t
2007ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
2008 const char *buf, size_t count)
2009{
2010 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2011 int val = 0;
2012
2013 if (sscanf(buf, "%d", &val) != 1)
2014 return -EINVAL;
2015
2016 if (val > FW_MAX_EXCHANGES_CNT)
2017 val = FW_MAX_EXCHANGES_CNT;
2018 else if (val < 0)
2019 val = 0;
2020
2021 vha->u_ql2xexchoffld = val;
2022 return strlen(buf);
2023}
2024
2025static ssize_t
2026ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
2027 char *buf)
2028{
2029 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2030 int len = 0;
2031
2032 len += scnprintf(buf + len, PAGE_SIZE-len,
2033 "target exchange: new %d : current: %d\n\n",
2034 vha->u_ql2xiniexchg, vha->ql2xiniexchg);
2035
2036 len += scnprintf(buf + len, PAGE_SIZE-len,
2037 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2038 vha->host_no);
2039
2040 return len;
2041}
2042
2043static ssize_t
2044ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
2045 const char *buf, size_t count)
2046{
2047 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2048 int val = 0;
2049
2050 if (sscanf(buf, "%d", &val) != 1)
2051 return -EINVAL;
2052
2053 if (val > FW_MAX_EXCHANGES_CNT)
2054 val = FW_MAX_EXCHANGES_CNT;
2055 else if (val < 0)
2056 val = 0;
2057
2058 vha->u_ql2xiniexchg = val;
2059 return strlen(buf);
2060}
2061
1537static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 2062static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1538static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 2063static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1539static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 2064static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1581,6 +2106,13 @@ static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
1581static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL); 2106static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
1582static DEVICE_ATTR(min_link_speed, S_IRUGO, qla2x00_min_link_speed_show, NULL); 2107static DEVICE_ATTR(min_link_speed, S_IRUGO, qla2x00_min_link_speed_show, NULL);
1583static DEVICE_ATTR(max_speed_sup, S_IRUGO, qla2x00_max_speed_sup_show, NULL); 2108static DEVICE_ATTR(max_speed_sup, S_IRUGO, qla2x00_max_speed_sup_show, NULL);
2109static DEVICE_ATTR(zio_threshold, 0644,
2110 qla_zio_threshold_show,
2111 qla_zio_threshold_store);
2112static DEVICE_ATTR_RW(qlini_mode);
2113static DEVICE_ATTR_RW(ql2xexchoffld);
2114static DEVICE_ATTR_RW(ql2xiniexchg);
2115
1584 2116
1585struct device_attribute *qla2x00_host_attrs[] = { 2117struct device_attribute *qla2x00_host_attrs[] = {
1586 &dev_attr_driver_version, 2118 &dev_attr_driver_version,
@@ -1617,9 +2149,28 @@ struct device_attribute *qla2x00_host_attrs[] = {
1617 &dev_attr_pep_version, 2149 &dev_attr_pep_version,
1618 &dev_attr_min_link_speed, 2150 &dev_attr_min_link_speed,
1619 &dev_attr_max_speed_sup, 2151 &dev_attr_max_speed_sup,
2152 &dev_attr_zio_threshold,
2153 NULL, /* reserve for qlini_mode */
2154 NULL, /* reserve for ql2xiniexchg */
2155 NULL, /* reserve for ql2xexchoffld */
1620 NULL, 2156 NULL,
1621}; 2157};
1622 2158
2159void qla_insert_tgt_attrs(void)
2160{
2161 struct device_attribute **attr;
2162
2163 /* advance to empty slot */
2164 for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
2165 continue;
2166
2167 *attr = &dev_attr_qlini_mode;
2168 attr++;
2169 *attr = &dev_attr_ql2xiniexchg;
2170 attr++;
2171 *attr = &dev_attr_ql2xexchoffld;
2172}
2173
1623/* Host attributes. */ 2174/* Host attributes. */
1624 2175
1625static void 2176static void
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index c11a89be292c..4a9fd8d944d6 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2487,7 +2487,7 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
2487 vha = shost_priv(host); 2487 vha = shost_priv(host);
2488 } 2488 }
2489 2489
2490 if (qla2x00_reset_active(vha)) { 2490 if (qla2x00_chip_is_down(vha)) {
2491 ql_dbg(ql_dbg_user, vha, 0x709f, 2491 ql_dbg(ql_dbg_user, vha, 0x709f,
2492 "BSG: ISP abort active/needed -- cmd=%d.\n", 2492 "BSG: ISP abort active/needed -- cmd=%d.\n",
2493 bsg_request->msgcode); 2493 bsg_request->msgcode);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a9dc9c4a6382..26b93c563f92 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -262,8 +262,8 @@ struct name_list_extended {
262 struct get_name_list_extended *l; 262 struct get_name_list_extended *l;
263 dma_addr_t ldma; 263 dma_addr_t ldma;
264 struct list_head fcports; 264 struct list_head fcports;
265 spinlock_t fcports_lock;
266 u32 size; 265 u32 size;
266 u8 sent;
267}; 267};
268/* 268/*
269 * Timeout timer counts in seconds 269 * Timeout timer counts in seconds
@@ -519,6 +519,7 @@ struct srb_iocb {
519enum { 519enum {
520 TYPE_SRB, 520 TYPE_SRB,
521 TYPE_TGT_CMD, 521 TYPE_TGT_CMD,
522 TYPE_TGT_TMCMD, /* task management */
522}; 523};
523 524
524typedef struct srb { 525typedef struct srb {
@@ -2280,7 +2281,6 @@ struct ct_sns_desc {
2280enum discovery_state { 2281enum discovery_state {
2281 DSC_DELETED, 2282 DSC_DELETED,
2282 DSC_GNN_ID, 2283 DSC_GNN_ID,
2283 DSC_GID_PN,
2284 DSC_GNL, 2284 DSC_GNL,
2285 DSC_LOGIN_PEND, 2285 DSC_LOGIN_PEND,
2286 DSC_LOGIN_FAILED, 2286 DSC_LOGIN_FAILED,
@@ -2305,7 +2305,6 @@ enum login_state { /* FW control Target side */
2305enum fcport_mgt_event { 2305enum fcport_mgt_event {
2306 FCME_RELOGIN = 1, 2306 FCME_RELOGIN = 1,
2307 FCME_RSCN, 2307 FCME_RSCN,
2308 FCME_GIDPN_DONE,
2309 FCME_PLOGI_DONE, /* Initiator side sent LLIOCB */ 2308 FCME_PLOGI_DONE, /* Initiator side sent LLIOCB */
2310 FCME_PRLI_DONE, 2309 FCME_PRLI_DONE,
2311 FCME_GNL_DONE, 2310 FCME_GNL_DONE,
@@ -2351,7 +2350,7 @@ typedef struct fc_port {
2351 unsigned int login_succ:1; 2350 unsigned int login_succ:1;
2352 unsigned int query:1; 2351 unsigned int query:1;
2353 unsigned int id_changed:1; 2352 unsigned int id_changed:1;
2354 unsigned int rscn_rcvd:1; 2353 unsigned int scan_needed:1;
2355 2354
2356 struct work_struct nvme_del_work; 2355 struct work_struct nvme_del_work;
2357 struct completion nvme_del_done; 2356 struct completion nvme_del_done;
@@ -2375,11 +2374,13 @@ typedef struct fc_port {
2375 unsigned long expires; 2374 unsigned long expires;
2376 struct list_head del_list_entry; 2375 struct list_head del_list_entry;
2377 struct work_struct free_work; 2376 struct work_struct free_work;
2378 2377 struct work_struct reg_work;
2378 uint64_t jiffies_at_registration;
2379 struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; 2379 struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
2380 2380
2381 uint16_t tgt_id; 2381 uint16_t tgt_id;
2382 uint16_t old_tgt_id; 2382 uint16_t old_tgt_id;
2383 uint16_t sec_since_registration;
2383 2384
2384 uint8_t fcp_prio; 2385 uint8_t fcp_prio;
2385 2386
@@ -2412,6 +2413,7 @@ typedef struct fc_port {
2412 struct qla_tgt_sess *tgt_session; 2413 struct qla_tgt_sess *tgt_session;
2413 struct ct_sns_desc ct_desc; 2414 struct ct_sns_desc ct_desc;
2414 enum discovery_state disc_state; 2415 enum discovery_state disc_state;
2416 enum discovery_state next_disc_state;
2415 enum login_state fw_login_state; 2417 enum login_state fw_login_state;
2416 unsigned long dm_login_expire; 2418 unsigned long dm_login_expire;
2417 unsigned long plogi_nack_done_deadline; 2419 unsigned long plogi_nack_done_deadline;
@@ -3212,17 +3214,14 @@ enum qla_work_type {
3212 QLA_EVT_ASYNC_LOGOUT, 3214 QLA_EVT_ASYNC_LOGOUT,
3213 QLA_EVT_ASYNC_LOGOUT_DONE, 3215 QLA_EVT_ASYNC_LOGOUT_DONE,
3214 QLA_EVT_ASYNC_ADISC, 3216 QLA_EVT_ASYNC_ADISC,
3215 QLA_EVT_ASYNC_ADISC_DONE,
3216 QLA_EVT_UEVENT, 3217 QLA_EVT_UEVENT,
3217 QLA_EVT_AENFX, 3218 QLA_EVT_AENFX,
3218 QLA_EVT_GIDPN,
3219 QLA_EVT_GPNID, 3219 QLA_EVT_GPNID,
3220 QLA_EVT_UNMAP, 3220 QLA_EVT_UNMAP,
3221 QLA_EVT_NEW_SESS, 3221 QLA_EVT_NEW_SESS,
3222 QLA_EVT_GPDB, 3222 QLA_EVT_GPDB,
3223 QLA_EVT_PRLI, 3223 QLA_EVT_PRLI,
3224 QLA_EVT_GPSC, 3224 QLA_EVT_GPSC,
3225 QLA_EVT_UPD_FCPORT,
3226 QLA_EVT_GNL, 3225 QLA_EVT_GNL,
3227 QLA_EVT_NACK, 3226 QLA_EVT_NACK,
3228 QLA_EVT_RELOGIN, 3227 QLA_EVT_RELOGIN,
@@ -3483,6 +3482,9 @@ struct qla_qpair {
3483 struct list_head qp_list_elem; /* vha->qp_list */ 3482 struct list_head qp_list_elem; /* vha->qp_list */
3484 struct list_head hints_list; 3483 struct list_head hints_list;
3485 uint16_t cpuid; 3484 uint16_t cpuid;
3485 uint16_t retry_term_cnt;
3486 uint32_t retry_term_exchg_addr;
3487 uint64_t retry_term_jiff;
3486 struct qla_tgt_counters tgt_counters; 3488 struct qla_tgt_counters tgt_counters;
3487}; 3489};
3488 3490
@@ -4184,6 +4186,10 @@ struct qla_hw_data {
4184 4186
4185 atomic_t nvme_active_aen_cnt; 4187 atomic_t nvme_active_aen_cnt;
4186 uint16_t nvme_last_rptd_aen; /* Last recorded aen count */ 4188 uint16_t nvme_last_rptd_aen; /* Last recorded aen count */
4189
4190 atomic_t zio_threshold;
4191 uint16_t last_zio_threshold;
4192#define DEFAULT_ZIO_THRESHOLD 64
4187}; 4193};
4188 4194
4189#define FW_ABILITY_MAX_SPEED_MASK 0xFUL 4195#define FW_ABILITY_MAX_SPEED_MASK 0xFUL
@@ -4263,10 +4269,11 @@ typedef struct scsi_qla_host {
4263#define FX00_CRITEMP_RECOVERY 25 4269#define FX00_CRITEMP_RECOVERY 25
4264#define FX00_HOST_INFO_RESEND 26 4270#define FX00_HOST_INFO_RESEND 26
4265#define QPAIR_ONLINE_CHECK_NEEDED 27 4271#define QPAIR_ONLINE_CHECK_NEEDED 27
4266#define SET_ZIO_THRESHOLD_NEEDED 28 4272#define SET_NVME_ZIO_THRESHOLD_NEEDED 28
4267#define DETECT_SFP_CHANGE 29 4273#define DETECT_SFP_CHANGE 29
4268#define N2N_LOGIN_NEEDED 30 4274#define N2N_LOGIN_NEEDED 30
4269#define IOCB_WORK_ACTIVE 31 4275#define IOCB_WORK_ACTIVE 31
4276#define SET_ZIO_THRESHOLD_NEEDED 32
4270 4277
4271 unsigned long pci_flags; 4278 unsigned long pci_flags;
4272#define PFLG_DISCONNECTED 0 /* PCI device removed */ 4279#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -4369,6 +4376,13 @@ typedef struct scsi_qla_host {
4369 atomic_t vref_count; 4376 atomic_t vref_count;
4370 struct qla8044_reset_template reset_tmplt; 4377 struct qla8044_reset_template reset_tmplt;
4371 uint16_t bbcr; 4378 uint16_t bbcr;
4379
4380 uint16_t u_ql2xexchoffld;
4381 uint16_t u_ql2xiniexchg;
4382 uint16_t qlini_mode;
4383 uint16_t ql2xexchoffld;
4384 uint16_t ql2xiniexchg;
4385
4372 struct name_list_extended gnl; 4386 struct name_list_extended gnl;
4373 /* Count of active session/fcport */ 4387 /* Count of active session/fcport */
4374 int fcport_count; 4388 int fcport_count;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 178974896b5c..3673fcdb033a 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -54,7 +54,7 @@ extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
54extern void qla2x00_quiesce_io(scsi_qla_host_t *); 54extern void qla2x00_quiesce_io(scsi_qla_host_t *);
55 55
56extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); 56extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
57 57void qla_register_fcport_fn(struct work_struct *);
58extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *); 58extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
59extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *); 59extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
60 60
@@ -73,8 +73,6 @@ extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
73 uint16_t *); 73 uint16_t *);
74extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *, 74extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
75 uint16_t *); 75 uint16_t *);
76extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
77 uint16_t *);
78struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *, 76struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
79 enum qla_work_type); 77 enum qla_work_type);
80extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *); 78extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
@@ -109,6 +107,7 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*,
109int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *); 107int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
110int qla24xx_detect_sfp(scsi_qla_host_t *vha); 108int qla24xx_detect_sfp(scsi_qla_host_t *vha);
111int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8); 109int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
110
112void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *, 111void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *,
113 uint16_t *); 112 uint16_t *);
114extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *, 113extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
@@ -118,6 +117,8 @@ extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *,
118int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport); 117int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
119void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport); 118void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
120int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *); 119int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *);
120void qla_rscn_replay(fc_port_t *fcport);
121
121/* 122/*
122 * Global Data in qla_os.c source file. 123 * Global Data in qla_os.c source file.
123 */ 124 */
@@ -158,6 +159,7 @@ extern int ql2xnvmeenable;
158extern int ql2xautodetectsfp; 159extern int ql2xautodetectsfp;
159extern int ql2xenablemsix; 160extern int ql2xenablemsix;
160extern int qla2xuseresexchforels; 161extern int qla2xuseresexchforels;
162extern int ql2xexlogins;
161 163
162extern int qla2x00_loop_reset(scsi_qla_host_t *); 164extern int qla2x00_loop_reset(scsi_qla_host_t *);
163extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 165extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -208,7 +210,7 @@ extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
208extern void qla2x00_sp_compl(void *, int); 210extern void qla2x00_sp_compl(void *, int);
209extern void qla2xxx_qpair_sp_free_dma(void *); 211extern void qla2xxx_qpair_sp_free_dma(void *);
210extern void qla2xxx_qpair_sp_compl(void *, int); 212extern void qla2xxx_qpair_sp_compl(void *, int);
211extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *); 213extern void qla24xx_sched_upd_fcport(fc_port_t *);
212void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, 214void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
213 uint16_t *); 215 uint16_t *);
214int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); 216int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
@@ -644,9 +646,6 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
644extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *, 646extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
645 struct ct_sns_rsp *, const char *); 647 struct ct_sns_rsp *, const char *);
646extern void qla2x00_async_iocb_timeout(void *data); 648extern void qla2x00_async_iocb_timeout(void *data);
647extern int qla24xx_async_gidpn(scsi_qla_host_t *, fc_port_t *);
648int qla24xx_post_gidpn_work(struct scsi_qla_host *, fc_port_t *);
649void qla24xx_handle_gidpn_event(scsi_qla_host_t *, struct event_arg *);
650 649
651extern void qla2x00_free_fcport(fc_port_t *); 650extern void qla2x00_free_fcport(fc_port_t *);
652 651
@@ -677,6 +676,7 @@ void qla_scan_work_fn(struct work_struct *);
677 */ 676 */
678struct device_attribute; 677struct device_attribute;
679extern struct device_attribute *qla2x00_host_attrs[]; 678extern struct device_attribute *qla2x00_host_attrs[];
679extern struct device_attribute *qla2x00_host_attrs_dm[];
680struct fc_function_template; 680struct fc_function_template;
681extern struct fc_function_template qla2xxx_transport_functions; 681extern struct fc_function_template qla2xxx_transport_functions;
682extern struct fc_function_template qla2xxx_transport_vport_functions; 682extern struct fc_function_template qla2xxx_transport_vport_functions;
@@ -690,7 +690,7 @@ extern int qla2x00_echo_test(scsi_qla_host_t *,
690extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *); 690extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
691extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *, 691extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
692 struct qla_fcp_prio_cfg *, uint8_t); 692 struct qla_fcp_prio_cfg *, uint8_t);
693 693void qla_insert_tgt_attrs(void);
694/* 694/*
695 * Global Function Prototypes in qla_dfs.c source file. 695 * Global Function Prototypes in qla_dfs.c source file.
696 */ 696 */
@@ -897,5 +897,6 @@ void qlt_unknown_atio_work_fn(struct work_struct *);
897void qlt_update_host_map(struct scsi_qla_host *, port_id_t); 897void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
898void qlt_remove_target_resources(struct qla_hw_data *); 898void qlt_remove_target_resources(struct qla_hw_data *);
899void qlt_clr_qp_table(struct scsi_qla_host *vha); 899void qlt_clr_qp_table(struct scsi_qla_host *vha);
900void qlt_set_mode(struct scsi_qla_host *);
900 901
901#endif /* _QLA_GBL_H */ 902#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index a0038d879b9d..90cfa394f942 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -2973,237 +2973,6 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
2973 } 2973 }
2974} 2974}
2975 2975
2976/* GID_PN completion processing. */
2977void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
2978{
2979 fc_port_t *fcport = ea->fcport;
2980
2981 ql_dbg(ql_dbg_disc, vha, 0x201d,
2982 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
2983 __func__, fcport->port_name, fcport->disc_state,
2984 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
2985 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
2986
2987 if (fcport->disc_state == DSC_DELETE_PEND)
2988 return;
2989
2990 if (ea->sp->gen2 != fcport->login_gen) {
2991 /* PLOGI/PRLI/LOGO came in while cmd was out.*/
2992 ql_dbg(ql_dbg_disc, vha, 0x201e,
2993 "%s %8phC generation changed rscn %d|%d n",
2994 __func__, fcport->port_name, fcport->last_rscn_gen,
2995 fcport->rscn_gen);
2996 return;
2997 }
2998
2999 if (!ea->rc) {
3000 if (ea->sp->gen1 == fcport->rscn_gen) {
3001 fcport->scan_state = QLA_FCPORT_FOUND;
3002 fcport->flags |= FCF_FABRIC_DEVICE;
3003
3004 if (fcport->d_id.b24 == ea->id.b24) {
3005 /* cable plugged into the same place */
3006 switch (vha->host->active_mode) {
3007 case MODE_TARGET:
3008 if (fcport->fw_login_state ==
3009 DSC_LS_PRLI_COMP) {
3010 u16 data[2];
3011 /*
3012 * Late RSCN was delivered.
3013 * Remote port already login'ed.
3014 */
3015 ql_dbg(ql_dbg_disc, vha, 0x201f,
3016 "%s %d %8phC post adisc\n",
3017 __func__, __LINE__,
3018 fcport->port_name);
3019 data[0] = data[1] = 0;
3020 qla2x00_post_async_adisc_work(
3021 vha, fcport, data);
3022 }
3023 break;
3024 case MODE_INITIATOR:
3025 case MODE_DUAL:
3026 default:
3027 ql_dbg(ql_dbg_disc, vha, 0x201f,
3028 "%s %d %8phC post %s\n", __func__,
3029 __LINE__, fcport->port_name,
3030 (atomic_read(&fcport->state) ==
3031 FCS_ONLINE) ? "adisc" : "gnl");
3032
3033 if (atomic_read(&fcport->state) ==
3034 FCS_ONLINE) {
3035 u16 data[2];
3036
3037 data[0] = data[1] = 0;
3038 qla2x00_post_async_adisc_work(
3039 vha, fcport, data);
3040 } else {
3041 qla24xx_post_gnl_work(vha,
3042 fcport);
3043 }
3044 break;
3045 }
3046 } else { /* fcport->d_id.b24 != ea->id.b24 */
3047 fcport->d_id.b24 = ea->id.b24;
3048 fcport->id_changed = 1;
3049 if (fcport->deleted != QLA_SESS_DELETED) {
3050 ql_dbg(ql_dbg_disc, vha, 0x2021,
3051 "%s %d %8phC post del sess\n",
3052 __func__, __LINE__, fcport->port_name);
3053 qlt_schedule_sess_for_deletion(fcport);
3054 }
3055 }
3056 } else { /* ea->sp->gen1 != fcport->rscn_gen */
3057 ql_dbg(ql_dbg_disc, vha, 0x2022,
3058 "%s %d %8phC post gidpn\n",
3059 __func__, __LINE__, fcport->port_name);
3060 /* rscn came in while cmd was out */
3061 qla24xx_post_gidpn_work(vha, fcport);
3062 }
3063 } else { /* ea->rc */
3064 /* cable pulled */
3065 if (ea->sp->gen1 == fcport->rscn_gen) {
3066 if (ea->sp->gen2 == fcport->login_gen) {
3067 ql_dbg(ql_dbg_disc, vha, 0x2042,
3068 "%s %d %8phC post del sess\n", __func__,
3069 __LINE__, fcport->port_name);
3070 qlt_schedule_sess_for_deletion(fcport);
3071 } else {
3072 ql_dbg(ql_dbg_disc, vha, 0x2045,
3073 "%s %d %8phC login\n", __func__, __LINE__,
3074 fcport->port_name);
3075 qla24xx_fcport_handle_login(vha, fcport);
3076 }
3077 } else {
3078 ql_dbg(ql_dbg_disc, vha, 0x2049,
3079 "%s %d %8phC post gidpn\n", __func__, __LINE__,
3080 fcport->port_name);
3081 qla24xx_post_gidpn_work(vha, fcport);
3082 }
3083 }
3084} /* gidpn_event */
3085
3086static void qla2x00_async_gidpn_sp_done(void *s, int res)
3087{
3088 struct srb *sp = s;
3089 struct scsi_qla_host *vha = sp->vha;
3090 fc_port_t *fcport = sp->fcport;
3091 u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
3092 struct event_arg ea;
3093
3094 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3095
3096 memset(&ea, 0, sizeof(ea));
3097 ea.fcport = fcport;
3098 ea.id.b.domain = id[0];
3099 ea.id.b.area = id[1];
3100 ea.id.b.al_pa = id[2];
3101 ea.sp = sp;
3102 ea.rc = res;
3103 ea.event = FCME_GIDPN_DONE;
3104
3105 if (res == QLA_FUNCTION_TIMEOUT) {
3106 ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
3107 "Async done-%s WWPN %8phC timed out.\n",
3108 sp->name, fcport->port_name);
3109 qla24xx_post_gidpn_work(sp->vha, fcport);
3110 sp->free(sp);
3111 return;
3112 } else if (res) {
3113 ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
3114 "Async done-%s fail res %x, WWPN %8phC\n",
3115 sp->name, res, fcport->port_name);
3116 } else {
3117 ql_dbg(ql_dbg_disc, vha, 0x204f,
3118 "Async done-%s good WWPN %8phC ID %3phC\n",
3119 sp->name, fcport->port_name, id);
3120 }
3121
3122 qla2x00_fcport_event_handler(vha, &ea);
3123
3124 sp->free(sp);
3125}
3126
3127int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
3128{
3129 int rval = QLA_FUNCTION_FAILED;
3130 struct ct_sns_req *ct_req;
3131 srb_t *sp;
3132
3133 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3134 return rval;
3135
3136 fcport->disc_state = DSC_GID_PN;
3137 fcport->scan_state = QLA_FCPORT_SCAN;
3138 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
3139 if (!sp)
3140 goto done;
3141
3142 fcport->flags |= FCF_ASYNC_SENT;
3143 sp->type = SRB_CT_PTHRU_CMD;
3144 sp->name = "gidpn";
3145 sp->gen1 = fcport->rscn_gen;
3146 sp->gen2 = fcport->login_gen;
3147
3148 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3149
3150 /* CT_IU preamble */
3151 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GID_PN_CMD,
3152 GID_PN_RSP_SIZE);
3153
3154 /* GIDPN req */
3155 memcpy(ct_req->req.gid_pn.port_name, fcport->port_name,
3156 WWN_SIZE);
3157
3158 /* req & rsp use the same buffer */
3159 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3160 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3161 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3162 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3163 sp->u.iocb_cmd.u.ctarg.req_size = GID_PN_REQ_SIZE;
3164 sp->u.iocb_cmd.u.ctarg.rsp_size = GID_PN_RSP_SIZE;
3165 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3166
3167 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3168 sp->done = qla2x00_async_gidpn_sp_done;
3169
3170 rval = qla2x00_start_sp(sp);
3171 if (rval != QLA_SUCCESS)
3172 goto done_free_sp;
3173
3174 ql_dbg(ql_dbg_disc, vha, 0x20a4,
3175 "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
3176 sp->name, fcport->port_name,
3177 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
3178 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3179 return rval;
3180
3181done_free_sp:
3182 sp->free(sp);
3183done:
3184 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3185 return rval;
3186}
3187
3188int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
3189{
3190 struct qla_work_evt *e;
3191 int ls;
3192
3193 ls = atomic_read(&vha->loop_state);
3194 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
3195 test_bit(UNLOADING, &vha->dpc_flags))
3196 return 0;
3197
3198 e = qla2x00_alloc_work(vha, QLA_EVT_GIDPN);
3199 if (!e)
3200 return QLA_FUNCTION_FAILED;
3201
3202 e->u.fcport.fcport = fcport;
3203 fcport->flags |= FCF_ASYNC_ACTIVE;
3204 return qla2x00_post_work(vha, e);
3205}
3206
3207int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport) 2976int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
3208{ 2977{
3209 struct qla_work_evt *e; 2978 struct qla_work_evt *e;
@@ -3237,9 +3006,6 @@ void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
3237 __func__, fcport->port_name); 3006 __func__, fcport->port_name);
3238 return; 3007 return;
3239 } else if (ea->sp->gen1 != fcport->rscn_gen) { 3008 } else if (ea->sp->gen1 != fcport->rscn_gen) {
3240 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
3241 __func__, __LINE__, fcport->port_name);
3242 qla24xx_post_gidpn_work(vha, fcport);
3243 return; 3009 return;
3244 } 3010 }
3245 3011
@@ -3261,6 +3027,9 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
3261 "Async done-%s res %x, WWPN %8phC \n", 3027 "Async done-%s res %x, WWPN %8phC \n",
3262 sp->name, res, fcport->port_name); 3028 sp->name, res, fcport->port_name);
3263 3029
3030 if (res == QLA_FUNCTION_TIMEOUT)
3031 return;
3032
3264 if (res == (DID_ERROR << 16)) { 3033 if (res == (DID_ERROR << 16)) {
3265 /* entry status error */ 3034 /* entry status error */
3266 goto done; 3035 goto done;
@@ -3272,7 +3041,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
3272 ql_dbg(ql_dbg_disc, vha, 0x2019, 3041 ql_dbg(ql_dbg_disc, vha, 0x2019,
3273 "GPSC command unsupported, disabling query.\n"); 3042 "GPSC command unsupported, disabling query.\n");
3274 ha->flags.gpsc_supported = 0; 3043 ha->flags.gpsc_supported = 0;
3275 res = QLA_SUCCESS; 3044 goto done;
3276 } 3045 }
3277 } else { 3046 } else {
3278 switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) { 3047 switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
@@ -3305,7 +3074,6 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
3305 be16_to_cpu(ct_rsp->rsp.gpsc.speeds), 3074 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
3306 be16_to_cpu(ct_rsp->rsp.gpsc.speed)); 3075 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
3307 } 3076 }
3308done:
3309 memset(&ea, 0, sizeof(ea)); 3077 memset(&ea, 0, sizeof(ea));
3310 ea.event = FCME_GPSC_DONE; 3078 ea.event = FCME_GPSC_DONE;
3311 ea.rc = res; 3079 ea.rc = res;
@@ -3313,6 +3081,7 @@ done:
3313 ea.sp = sp; 3081 ea.sp = sp;
3314 qla2x00_fcport_event_handler(vha, &ea); 3082 qla2x00_fcport_event_handler(vha, &ea);
3315 3083
3084done:
3316 sp->free(sp); 3085 sp->free(sp);
3317} 3086}
3318 3087
@@ -3355,15 +3124,15 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
3355 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 3124 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3356 sp->done = qla24xx_async_gpsc_sp_done; 3125 sp->done = qla24xx_async_gpsc_sp_done;
3357 3126
3358 rval = qla2x00_start_sp(sp);
3359 if (rval != QLA_SUCCESS)
3360 goto done_free_sp;
3361
3362 ql_dbg(ql_dbg_disc, vha, 0x205e, 3127 ql_dbg(ql_dbg_disc, vha, 0x205e,
3363 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n", 3128 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
3364 sp->name, fcport->port_name, sp->handle, 3129 sp->name, fcport->port_name, sp->handle,
3365 fcport->loop_id, fcport->d_id.b.domain, 3130 fcport->loop_id, fcport->d_id.b.domain,
3366 fcport->d_id.b.area, fcport->d_id.b.al_pa); 3131 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3132
3133 rval = qla2x00_start_sp(sp);
3134 if (rval != QLA_SUCCESS)
3135 goto done_free_sp;
3367 return rval; 3136 return rval;
3368 3137
3369done_free_sp: 3138done_free_sp:
@@ -3442,26 +3211,10 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3442 if (ea->rc) { 3211 if (ea->rc) {
3443 /* cable is disconnected */ 3212 /* cable is disconnected */
3444 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) { 3213 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
3445 if (fcport->d_id.b24 == ea->id.b24) { 3214 if (fcport->d_id.b24 == ea->id.b24)
3446 ql_dbg(ql_dbg_disc, vha, 0xffff,
3447 "%s %d %8phC DS %d\n",
3448 __func__, __LINE__,
3449 fcport->port_name,
3450 fcport->disc_state);
3451 fcport->scan_state = QLA_FCPORT_SCAN; 3215 fcport->scan_state = QLA_FCPORT_SCAN;
3452 switch (fcport->disc_state) { 3216
3453 case DSC_DELETED: 3217 qlt_schedule_sess_for_deletion(fcport);
3454 case DSC_DELETE_PEND:
3455 break;
3456 default:
3457 ql_dbg(ql_dbg_disc, vha, 0xffff,
3458 "%s %d %8phC post del sess\n",
3459 __func__, __LINE__,
3460 fcport->port_name);
3461 qlt_schedule_sess_for_deletion(fcport);
3462 break;
3463 }
3464 }
3465 } 3218 }
3466 } else { 3219 } else {
3467 /* cable is connected */ 3220 /* cable is connected */
@@ -3470,34 +3223,19 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3470 list_for_each_entry_safe(conflict, t, &vha->vp_fcports, 3223 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3471 list) { 3224 list) {
3472 if ((conflict->d_id.b24 == ea->id.b24) && 3225 if ((conflict->d_id.b24 == ea->id.b24) &&
3473 (fcport != conflict)) { 3226 (fcport != conflict))
3474 /* 2 fcports with conflict Nport ID or 3227 /*
3228 * 2 fcports with conflict Nport ID or
3475 * an existing fcport is having nport ID 3229 * an existing fcport is having nport ID
3476 * conflict with new fcport. 3230 * conflict with new fcport.
3477 */ 3231 */
3478 3232
3479 ql_dbg(ql_dbg_disc, vha, 0xffff,
3480 "%s %d %8phC DS %d\n",
3481 __func__, __LINE__,
3482 conflict->port_name,
3483 conflict->disc_state);
3484 conflict->scan_state = QLA_FCPORT_SCAN; 3233 conflict->scan_state = QLA_FCPORT_SCAN;
3485 switch (conflict->disc_state) { 3234
3486 case DSC_DELETED: 3235 qlt_schedule_sess_for_deletion(conflict);
3487 case DSC_DELETE_PEND:
3488 break;
3489 default:
3490 ql_dbg(ql_dbg_disc, vha, 0xffff,
3491 "%s %d %8phC post del sess\n",
3492 __func__, __LINE__,
3493 conflict->port_name);
3494 qlt_schedule_sess_for_deletion
3495 (conflict);
3496 break;
3497 }
3498 }
3499 } 3236 }
3500 3237
3238 fcport->scan_needed = 0;
3501 fcport->rscn_gen++; 3239 fcport->rscn_gen++;
3502 fcport->scan_state = QLA_FCPORT_FOUND; 3240 fcport->scan_state = QLA_FCPORT_FOUND;
3503 fcport->flags |= FCF_FABRIC_DEVICE; 3241 fcport->flags |= FCF_FABRIC_DEVICE;
@@ -3548,19 +3286,7 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3548 conflict->disc_state); 3286 conflict->disc_state);
3549 3287
3550 conflict->scan_state = QLA_FCPORT_SCAN; 3288 conflict->scan_state = QLA_FCPORT_SCAN;
3551 switch (conflict->disc_state) { 3289 qlt_schedule_sess_for_deletion(conflict);
3552 case DSC_DELETED:
3553 case DSC_DELETE_PEND:
3554 break;
3555 default:
3556 ql_dbg(ql_dbg_disc, vha, 0xffff,
3557 "%s %d %8phC post del sess\n",
3558 __func__, __LINE__,
3559 conflict->port_name);
3560 qlt_schedule_sess_for_deletion
3561 (conflict);
3562 break;
3563 }
3564 } 3290 }
3565 } 3291 }
3566 3292
@@ -3724,13 +3450,14 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3724 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 3450 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3725 sp->done = qla2x00_async_gpnid_sp_done; 3451 sp->done = qla2x00_async_gpnid_sp_done;
3726 3452
3453 ql_dbg(ql_dbg_disc, vha, 0x2067,
3454 "Async-%s hdl=%x ID %3phC.\n", sp->name,
3455 sp->handle, ct_req->req.port_id.port_id);
3456
3727 rval = qla2x00_start_sp(sp); 3457 rval = qla2x00_start_sp(sp);
3728 if (rval != QLA_SUCCESS) 3458 if (rval != QLA_SUCCESS)
3729 goto done_free_sp; 3459 goto done_free_sp;
3730 3460
3731 ql_dbg(ql_dbg_disc, vha, 0x2067,
3732 "Async-%s hdl=%x ID %3phC.\n", sp->name,
3733 sp->handle, ct_req->req.port_id.port_id);
3734 return rval; 3461 return rval;
3735 3462
3736done_free_sp: 3463done_free_sp:
@@ -3896,9 +3623,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3896 fc_port_t *fcport; 3623 fc_port_t *fcport;
3897 u32 i, rc; 3624 u32 i, rc;
3898 bool found; 3625 bool found;
3899 struct fab_scan_rp *rp; 3626 struct fab_scan_rp *rp, *trp;
3900 unsigned long flags; 3627 unsigned long flags;
3901 u8 recheck = 0; 3628 u8 recheck = 0;
3629 u16 dup = 0, dup_cnt = 0;
3902 3630
3903 ql_dbg(ql_dbg_disc, vha, 0xffff, 3631 ql_dbg(ql_dbg_disc, vha, 0xffff,
3904 "%s enter\n", __func__); 3632 "%s enter\n", __func__);
@@ -3929,6 +3657,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3929 3657
3930 for (i = 0; i < vha->hw->max_fibre_devices; i++) { 3658 for (i = 0; i < vha->hw->max_fibre_devices; i++) {
3931 u64 wwn; 3659 u64 wwn;
3660 int k;
3932 3661
3933 rp = &vha->scan.l[i]; 3662 rp = &vha->scan.l[i];
3934 found = false; 3663 found = false;
@@ -3937,6 +3666,20 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3937 if (wwn == 0) 3666 if (wwn == 0)
3938 continue; 3667 continue;
3939 3668
3669 /* Remove duplicate NPORT ID entries from switch data base */
3670 for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
3671 trp = &vha->scan.l[k];
3672 if (rp->id.b24 == trp->id.b24) {
3673 dup = 1;
3674 dup_cnt++;
3675 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
3676 vha, 0xffff,
3677 "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
3678 rp->id.b24, rp->port_name, trp->port_name);
3679 memset(trp, 0, sizeof(*trp));
3680 }
3681 }
3682
3940 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE)) 3683 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
3941 continue; 3684 continue;
3942 3685
@@ -3951,7 +3694,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3951 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3694 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3952 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE)) 3695 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3953 continue; 3696 continue;
3954 fcport->rscn_rcvd = 0; 3697 fcport->scan_needed = 0;
3955 fcport->scan_state = QLA_FCPORT_FOUND; 3698 fcport->scan_state = QLA_FCPORT_FOUND;
3956 found = true; 3699 found = true;
3957 /* 3700 /*
@@ -3976,25 +3719,30 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3976 } 3719 }
3977 } 3720 }
3978 3721
3722 if (dup) {
3723 ql_log(ql_log_warn, vha, 0xffff,
3724 "Detected %d duplicate NPORT ID(s) from switch data base\n",
3725 dup_cnt);
3726 }
3727
3979 /* 3728 /*
3980 * Logout all previous fabric dev marked lost, except FCP2 devices. 3729 * Logout all previous fabric dev marked lost, except FCP2 devices.
3981 */ 3730 */
3982 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3731 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3983 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 3732 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3984 fcport->rscn_rcvd = 0; 3733 fcport->scan_needed = 0;
3985 continue; 3734 continue;
3986 } 3735 }
3987 3736
3988 if (fcport->scan_state != QLA_FCPORT_FOUND) { 3737 if (fcport->scan_state != QLA_FCPORT_FOUND) {
3989 fcport->rscn_rcvd = 0; 3738 fcport->scan_needed = 0;
3990 if ((qla_dual_mode_enabled(vha) || 3739 if ((qla_dual_mode_enabled(vha) ||
3991 qla_ini_mode_enabled(vha)) && 3740 qla_ini_mode_enabled(vha)) &&
3992 atomic_read(&fcport->state) == FCS_ONLINE) { 3741 atomic_read(&fcport->state) == FCS_ONLINE) {
3993 qla2x00_mark_device_lost(vha, fcport, 3742 if (fcport->loop_id != FC_NO_LOOP_ID) {
3994 ql2xplogiabsentdevice, 0); 3743 if (fcport->flags & FCF_FCP2_DEVICE)
3744 fcport->logout_on_delete = 0;
3995 3745
3996 if (fcport->loop_id != FC_NO_LOOP_ID &&
3997 (fcport->flags & FCF_FCP2_DEVICE) == 0) {
3998 ql_dbg(ql_dbg_disc, vha, 0x20f0, 3746 ql_dbg(ql_dbg_disc, vha, 0x20f0,
3999 "%s %d %8phC post del sess\n", 3747 "%s %d %8phC post del sess\n",
4000 __func__, __LINE__, 3748 __func__, __LINE__,
@@ -4005,7 +3753,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
4005 } 3753 }
4006 } 3754 }
4007 } else { 3755 } else {
4008 if (fcport->rscn_rcvd || 3756 if (fcport->scan_needed ||
4009 fcport->disc_state != DSC_LOGIN_COMPLETE) { 3757 fcport->disc_state != DSC_LOGIN_COMPLETE) {
4010 if (fcport->login_retry == 0) { 3758 if (fcport->login_retry == 0) {
4011 fcport->login_retry = 3759 fcport->login_retry =
@@ -4015,7 +3763,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
4015 fcport->port_name, fcport->loop_id, 3763 fcport->port_name, fcport->loop_id,
4016 fcport->login_retry); 3764 fcport->login_retry);
4017 } 3765 }
4018 fcport->rscn_rcvd = 0; 3766 fcport->scan_needed = 0;
4019 qla24xx_fcport_handle_login(vha, fcport); 3767 qla24xx_fcport_handle_login(vha, fcport);
4020 } 3768 }
4021 } 3769 }
@@ -4030,7 +3778,7 @@ out:
4030 3778
4031 if (recheck) { 3779 if (recheck) {
4032 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3780 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4033 if (fcport->rscn_rcvd) { 3781 if (fcport->scan_needed) {
4034 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3782 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4035 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3783 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4036 break; 3784 break;
@@ -4039,6 +3787,41 @@ out:
4039 } 3787 }
4040} 3788}
4041 3789
3790static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
3791 srb_t *sp, int cmd)
3792{
3793 struct qla_work_evt *e;
3794
3795 if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
3796 return QLA_PARAMETER_ERROR;
3797
3798 e = qla2x00_alloc_work(vha, cmd);
3799 if (!e)
3800 return QLA_FUNCTION_FAILED;
3801
3802 e->u.iosb.sp = sp;
3803
3804 return qla2x00_post_work(vha, e);
3805}
3806
3807static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
3808 srb_t *sp, int cmd)
3809{
3810 struct qla_work_evt *e;
3811
3812 if (cmd != QLA_EVT_GPNFT)
3813 return QLA_PARAMETER_ERROR;
3814
3815 e = qla2x00_alloc_work(vha, cmd);
3816 if (!e)
3817 return QLA_FUNCTION_FAILED;
3818
3819 e->u.gpnft.fc4_type = FC4_TYPE_NVME;
3820 e->u.gpnft.sp = sp;
3821
3822 return qla2x00_post_work(vha, e);
3823}
3824
4042static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha, 3825static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
4043 struct srb *sp) 3826 struct srb *sp)
4044{ 3827{
@@ -4139,120 +3922,85 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
4139{ 3922{
4140 struct srb *sp = s; 3923 struct srb *sp = s;
4141 struct scsi_qla_host *vha = sp->vha; 3924 struct scsi_qla_host *vha = sp->vha;
4142 struct qla_work_evt *e;
4143 struct ct_sns_req *ct_req = 3925 struct ct_sns_req *ct_req =
4144 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; 3926 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
4145 u16 cmd = be16_to_cpu(ct_req->command); 3927 u16 cmd = be16_to_cpu(ct_req->command);
4146 u8 fc4_type = sp->gen2; 3928 u8 fc4_type = sp->gen2;
4147 unsigned long flags; 3929 unsigned long flags;
3930 int rc;
4148 3931
4149 /* gen2 field is holding the fc4type */ 3932 /* gen2 field is holding the fc4type */
4150 ql_dbg(ql_dbg_disc, vha, 0xffff, 3933 ql_dbg(ql_dbg_disc, vha, 0xffff,
4151 "Async done-%s res %x FC4Type %x\n", 3934 "Async done-%s res %x FC4Type %x\n",
4152 sp->name, res, sp->gen2); 3935 sp->name, res, sp->gen2);
4153 3936
3937 del_timer(&sp->u.iocb_cmd.timer);
3938 sp->rc = res;
4154 if (res) { 3939 if (res) {
4155 unsigned long flags; 3940 unsigned long flags;
3941 const char *name = sp->name;
4156 3942
4157 sp->free(sp); 3943 /*
4158 spin_lock_irqsave(&vha->work_lock, flags); 3944 * We are in an Interrupt context, queue up this
4159 vha->scan.scan_flags &= ~SF_SCANNING; 3945 * sp for GNNFT_DONE work. This will allow all
4160 vha->scan.scan_retry++; 3946 * the resource to get freed up.
4161 spin_unlock_irqrestore(&vha->work_lock, flags); 3947 */
3948 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3949 QLA_EVT_GNNFT_DONE);
3950 if (rc) {
3951 /* Cleanup here to prevent memory leak */
3952 qla24xx_sp_unmap(vha, sp);
4162 3953
4163 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { 3954 spin_lock_irqsave(&vha->work_lock, flags);
4164 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3955 vha->scan.scan_flags &= ~SF_SCANNING;
4165 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3956 vha->scan.scan_retry++;
4166 qla2xxx_wake_dpc(vha); 3957 spin_unlock_irqrestore(&vha->work_lock, flags);
4167 } else { 3958
4168 ql_dbg(ql_dbg_disc, sp->vha, 0xffff, 3959 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
4169 "Async done-%s rescan failed on all retries\n", 3960 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4170 sp->name); 3961 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3962 qla2xxx_wake_dpc(vha);
3963 } else {
3964 ql_dbg(ql_dbg_disc, vha, 0xffff,
3965 "Async done-%s rescan failed on all retries.\n",
3966 name);
3967 }
4171 } 3968 }
4172 return; 3969 return;
4173 } 3970 }
4174 3971
4175 if (!res) 3972 qla2x00_find_free_fcp_nvme_slot(vha, sp);
4176 qla2x00_find_free_fcp_nvme_slot(vha, sp);
4177 3973
4178 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled && 3974 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
4179 cmd == GNN_FT_CMD) { 3975 cmd == GNN_FT_CMD) {
4180 del_timer(&sp->u.iocb_cmd.timer);
4181 spin_lock_irqsave(&vha->work_lock, flags); 3976 spin_lock_irqsave(&vha->work_lock, flags);
4182 vha->scan.scan_flags &= ~SF_SCANNING; 3977 vha->scan.scan_flags &= ~SF_SCANNING;
4183 spin_unlock_irqrestore(&vha->work_lock, flags); 3978 spin_unlock_irqrestore(&vha->work_lock, flags);
4184 3979
4185 e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT); 3980 sp->rc = res;
4186 if (!e) { 3981 rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
4187 /* 3982 if (rc) {
4188 * please ignore kernel warning. Otherwise, 3983 qla24xx_sp_unmap(vha, sp);
4189 * we have mem leak.
4190 */
4191 if (sp->u.iocb_cmd.u.ctarg.req) {
4192 dma_free_coherent(&vha->hw->pdev->dev,
4193 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4194 sp->u.iocb_cmd.u.ctarg.req,
4195 sp->u.iocb_cmd.u.ctarg.req_dma);
4196 sp->u.iocb_cmd.u.ctarg.req = NULL;
4197 }
4198 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4199 dma_free_coherent(&vha->hw->pdev->dev,
4200 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4201 sp->u.iocb_cmd.u.ctarg.rsp,
4202 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4203 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4204 }
4205
4206 ql_dbg(ql_dbg_disc, vha, 0xffff,
4207 "Async done-%s unable to alloc work element\n",
4208 sp->name);
4209 sp->free(sp);
4210 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 3984 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4211 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 3985 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4212 return;
4213 } 3986 }
4214 e->u.gpnft.fc4_type = FC4_TYPE_NVME;
4215 sp->rc = res;
4216 e->u.gpnft.sp = sp;
4217
4218 qla2x00_post_work(vha, e);
4219 return; 3987 return;
4220 } 3988 }
4221 3989
4222 if (cmd == GPN_FT_CMD) 3990 if (cmd == GPN_FT_CMD) {
4223 e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE); 3991 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
4224 else 3992 QLA_EVT_GPNFT_DONE);
4225 e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE); 3993 } else {
4226 if (!e) { 3994 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
4227 /* please ignore kernel warning. Otherwise, we have mem leak. */ 3995 QLA_EVT_GNNFT_DONE);
4228 if (sp->u.iocb_cmd.u.ctarg.req) { 3996 }
4229 dma_free_coherent(&vha->hw->pdev->dev,
4230 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4231 sp->u.iocb_cmd.u.ctarg.req,
4232 sp->u.iocb_cmd.u.ctarg.req_dma);
4233 sp->u.iocb_cmd.u.ctarg.req = NULL;
4234 }
4235 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4236 dma_free_coherent(&vha->hw->pdev->dev,
4237 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4238 sp->u.iocb_cmd.u.ctarg.rsp,
4239 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4240 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4241 }
4242 3997
4243 ql_dbg(ql_dbg_disc, vha, 0xffff, 3998 if (rc) {
4244 "Async done-%s unable to alloc work element\n", 3999 qla24xx_sp_unmap(vha, sp);
4245 sp->name);
4246 sp->free(sp);
4247 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4000 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4248 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4001 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4249 return; 4002 return;
4250 } 4003 }
4251
4252 sp->rc = res;
4253 e->u.iosb.sp = sp;
4254
4255 qla2x00_post_work(vha, e);
4256} 4004}
4257 4005
4258/* 4006/*
@@ -4285,11 +4033,13 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
4285 vha->scan.scan_flags &= ~SF_SCANNING; 4033 vha->scan.scan_flags &= ~SF_SCANNING;
4286 spin_unlock_irqrestore(&vha->work_lock, flags); 4034 spin_unlock_irqrestore(&vha->work_lock, flags);
4287 WARN_ON(1); 4035 WARN_ON(1);
4036 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4037 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4288 goto done_free_sp; 4038 goto done_free_sp;
4289 } 4039 }
4290 4040
4291 ql_dbg(ql_dbg_disc, vha, 0xfffff, 4041 ql_dbg(ql_dbg_disc, vha, 0xfffff,
4292 "%s: FC4Type %x, CT-PASSTRHU %s command ctarg rsp size %d, ctarg req size %d\n", 4042 "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
4293 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size, 4043 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
4294 sp->u.iocb_cmd.u.ctarg.req_size); 4044 sp->u.iocb_cmd.u.ctarg.req_size);
4295 4045
@@ -4318,8 +4068,12 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
4318 sp->done = qla2x00_async_gpnft_gnnft_sp_done; 4068 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4319 4069
4320 rval = qla2x00_start_sp(sp); 4070 rval = qla2x00_start_sp(sp);
4321 if (rval != QLA_SUCCESS) 4071 if (rval != QLA_SUCCESS) {
4072 spin_lock_irqsave(&vha->work_lock, flags);
4073 vha->scan.scan_flags &= ~SF_SCANNING;
4074 spin_unlock_irqrestore(&vha->work_lock, flags);
4322 goto done_free_sp; 4075 goto done_free_sp;
4076 }
4323 4077
4324 ql_dbg(ql_dbg_disc, vha, 0xffff, 4078 ql_dbg(ql_dbg_disc, vha, 0xffff,
4325 "Async-%s hdl=%x FC4Type %x.\n", sp->name, 4079 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
@@ -4351,7 +4105,6 @@ void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
4351{ 4105{
4352 ql_dbg(ql_dbg_disc, vha, 0xffff, 4106 ql_dbg(ql_dbg_disc, vha, 0xffff,
4353 "%s enter\n", __func__); 4107 "%s enter\n", __func__);
4354 del_timer(&sp->u.iocb_cmd.timer);
4355 qla24xx_async_gnnft(vha, sp, sp->gen2); 4108 qla24xx_async_gnnft(vha, sp, sp->gen2);
4356} 4109}
4357 4110
@@ -4444,9 +4197,9 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4444 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 4197 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4445 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 4198 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4446 4199
4447 rspsz = sizeof(struct ct_sns_gpnft_rsp) + 4200 rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
4448 ((vha->hw->max_fibre_devices - 1) * 4201 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4449 sizeof(struct ct_sns_gpn_ft_data)); 4202 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4450 4203
4451 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; 4204 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4452 /* CT_IU preamble */ 4205 /* CT_IU preamble */
@@ -4644,9 +4397,6 @@ void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4644 __func__, fcport->port_name); 4397 __func__, fcport->port_name);
4645 return; 4398 return;
4646 } else if (ea->sp->gen1 != fcport->rscn_gen) { 4399 } else if (ea->sp->gen1 != fcport->rscn_gen) {
4647 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
4648 __func__, __LINE__, fcport->port_name);
4649 qla24xx_post_gidpn_work(vha, fcport);
4650 return; 4400 return;
4651 } 4401 }
4652 4402
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b934977c5c26..c72d8012fe2a 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -52,12 +52,14 @@ qla2x00_sp_timeout(struct timer_list *t)
52 struct srb_iocb *iocb; 52 struct srb_iocb *iocb;
53 struct req_que *req; 53 struct req_que *req;
54 unsigned long flags; 54 unsigned long flags;
55 struct qla_hw_data *ha = sp->vha->hw;
55 56
56 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 57 WARN_ON_ONCE(irqs_disabled());
58 spin_lock_irqsave(&ha->hardware_lock, flags);
57 req = sp->qpair->req; 59 req = sp->qpair->req;
58 req->outstanding_cmds[sp->handle] = NULL; 60 req->outstanding_cmds[sp->handle] = NULL;
59 iocb = &sp->u.iocb_cmd; 61 iocb = &sp->u.iocb_cmd;
60 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 62 spin_unlock_irqrestore(&ha->hardware_lock, flags);
61 iocb->timeout(sp); 63 iocb->timeout(sp);
62} 64}
63 65
@@ -245,6 +247,12 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
245 247
246 } 248 }
247 249
250 ql_dbg(ql_dbg_disc, vha, 0x2072,
251 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
252 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
253 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
254 fcport->login_retry);
255
248 rval = qla2x00_start_sp(sp); 256 rval = qla2x00_start_sp(sp);
249 if (rval != QLA_SUCCESS) { 257 if (rval != QLA_SUCCESS) {
250 fcport->flags |= FCF_LOGIN_NEEDED; 258 fcport->flags |= FCF_LOGIN_NEEDED;
@@ -252,11 +260,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
252 goto done_free_sp; 260 goto done_free_sp;
253 } 261 }
254 262
255 ql_dbg(ql_dbg_disc, vha, 0x2072,
256 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
257 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
258 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
259 fcport->login_retry);
260 return rval; 263 return rval;
261 264
262done_free_sp: 265done_free_sp:
@@ -301,15 +304,16 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
301 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 304 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
302 305
303 sp->done = qla2x00_async_logout_sp_done; 306 sp->done = qla2x00_async_logout_sp_done;
304 rval = qla2x00_start_sp(sp);
305 if (rval != QLA_SUCCESS)
306 goto done_free_sp;
307 307
308 ql_dbg(ql_dbg_disc, vha, 0x2070, 308 ql_dbg(ql_dbg_disc, vha, 0x2070,
309 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n", 309 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
310 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 310 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
311 fcport->d_id.b.area, fcport->d_id.b.al_pa, 311 fcport->d_id.b.area, fcport->d_id.b.al_pa,
312 fcport->port_name); 312 fcport->port_name);
313
314 rval = qla2x00_start_sp(sp);
315 if (rval != QLA_SUCCESS)
316 goto done_free_sp;
313 return rval; 317 return rval;
314 318
315done_free_sp: 319done_free_sp:
@@ -396,6 +400,9 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
396 ql_dbg(ql_dbg_disc, vha, 0x2066, 400 ql_dbg(ql_dbg_disc, vha, 0x2066,
397 "%s %8phC: adisc fail: post delete\n", 401 "%s %8phC: adisc fail: post delete\n",
398 __func__, ea->fcport->port_name); 402 __func__, ea->fcport->port_name);
403 /* deleted = 0 & logout_on_delete = force fw cleanup */
404 fcport->deleted = 0;
405 fcport->logout_on_delete = 1;
399 qlt_schedule_sess_for_deletion(ea->fcport); 406 qlt_schedule_sess_for_deletion(ea->fcport);
400 return; 407 return;
401 } 408 }
@@ -410,9 +417,8 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
410 __func__, ea->fcport->port_name); 417 __func__, ea->fcport->port_name);
411 return; 418 return;
412 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) { 419 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
413 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n", 420 qla_rscn_replay(fcport);
414 __func__, __LINE__, ea->fcport->port_name); 421 qlt_schedule_sess_for_deletion(fcport);
415 qla24xx_post_gidpn_work(vha, ea->fcport);
416 return; 422 return;
417 } 423 }
418 424
@@ -487,13 +493,15 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
487 sp->done = qla2x00_async_adisc_sp_done; 493 sp->done = qla2x00_async_adisc_sp_done;
488 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 494 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
489 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 495 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
490 rval = qla2x00_start_sp(sp);
491 if (rval != QLA_SUCCESS)
492 goto done_free_sp;
493 496
494 ql_dbg(ql_dbg_disc, vha, 0x206f, 497 ql_dbg(ql_dbg_disc, vha, 0x206f,
495 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n", 498 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
496 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name); 499 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
500
501 rval = qla2x00_start_sp(sp);
502 if (rval != QLA_SUCCESS)
503 goto done_free_sp;
504
497 return rval; 505 return rval;
498 506
499done_free_sp: 507done_free_sp:
@@ -536,11 +544,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
536 } 544 }
537 545
538 if (fcport->last_rscn_gen != fcport->rscn_gen) { 546 if (fcport->last_rscn_gen != fcport->rscn_gen) {
539 ql_dbg(ql_dbg_disc, vha, 0x20df, 547 qla_rscn_replay(fcport);
540 "%s %8phC rscn gen changed rscn %d|%d \n", 548 qlt_schedule_sess_for_deletion(fcport);
541 __func__, fcport->port_name,
542 fcport->last_rscn_gen, fcport->rscn_gen);
543 qla24xx_post_gidpn_work(vha, fcport);
544 return; 549 return;
545 } else if (fcport->last_login_gen != fcport->login_gen) { 550 } else if (fcport->last_login_gen != fcport->login_gen) {
546 ql_dbg(ql_dbg_disc, vha, 0x20e0, 551 ql_dbg(ql_dbg_disc, vha, 0x20e0,
@@ -787,6 +792,10 @@ qla24xx_async_gnl_sp_done(void *s, int res)
787 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1], 792 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
788 sp->u.iocb_cmd.u.mbx.in_mb[2]); 793 sp->u.iocb_cmd.u.mbx.in_mb[2]);
789 794
795 if (res == QLA_FUNCTION_TIMEOUT)
796 return;
797
798 sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
790 memset(&ea, 0, sizeof(ea)); 799 memset(&ea, 0, sizeof(ea));
791 ea.sp = sp; 800 ea.sp = sp;
792 ea.rc = res; 801 ea.rc = res;
@@ -814,25 +823,24 @@ qla24xx_async_gnl_sp_done(void *s, int res)
814 (loop_id & 0x7fff)); 823 (loop_id & 0x7fff));
815 } 824 }
816 825
817 spin_lock_irqsave(&vha->gnl.fcports_lock, flags); 826 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
818 827
819 INIT_LIST_HEAD(&h); 828 INIT_LIST_HEAD(&h);
820 fcport = tf = NULL; 829 fcport = tf = NULL;
821 if (!list_empty(&vha->gnl.fcports)) 830 if (!list_empty(&vha->gnl.fcports))
822 list_splice_init(&vha->gnl.fcports, &h); 831 list_splice_init(&vha->gnl.fcports, &h);
832 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
823 833
824 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { 834 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
825 list_del_init(&fcport->gnl_entry); 835 list_del_init(&fcport->gnl_entry);
826 spin_lock(&vha->hw->tgt.sess_lock); 836 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
827 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 837 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
828 spin_unlock(&vha->hw->tgt.sess_lock); 838 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
829 ea.fcport = fcport; 839 ea.fcport = fcport;
830 840
831 qla2x00_fcport_event_handler(vha, &ea); 841 qla2x00_fcport_event_handler(vha, &ea);
832 } 842 }
833 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
834 843
835 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
836 /* create new fcport if fw has knowledge of new sessions */ 844 /* create new fcport if fw has knowledge of new sessions */
837 for (i = 0; i < n; i++) { 845 for (i = 0; i < n; i++) {
838 port_id_t id; 846 port_id_t id;
@@ -865,6 +873,8 @@ qla24xx_async_gnl_sp_done(void *s, int res)
865 } 873 }
866 } 874 }
867 875
876 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
877 vha->gnl.sent = 0;
868 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 878 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
869 879
870 sp->free(sp); 880 sp->free(sp);
@@ -884,27 +894,24 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
884 ql_dbg(ql_dbg_disc, vha, 0x20d9, 894 ql_dbg(ql_dbg_disc, vha, 0x20d9,
885 "Async-gnlist WWPN %8phC \n", fcport->port_name); 895 "Async-gnlist WWPN %8phC \n", fcport->port_name);
886 896
887 spin_lock_irqsave(&vha->gnl.fcports_lock, flags); 897 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
888 if (!list_empty(&fcport->gnl_entry)) { 898 fcport->flags |= FCF_ASYNC_SENT;
889 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
890 rval = QLA_SUCCESS;
891 goto done;
892 }
893
894 spin_lock(&vha->hw->tgt.sess_lock);
895 fcport->disc_state = DSC_GNL; 899 fcport->disc_state = DSC_GNL;
896 fcport->last_rscn_gen = fcport->rscn_gen; 900 fcport->last_rscn_gen = fcport->rscn_gen;
897 fcport->last_login_gen = fcport->login_gen; 901 fcport->last_login_gen = fcport->login_gen;
898 spin_unlock(&vha->hw->tgt.sess_lock);
899 902
900 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); 903 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
901 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); 904 if (vha->gnl.sent) {
905 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
906 return QLA_SUCCESS;
907 }
908 vha->gnl.sent = 1;
909 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
902 910
903 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 911 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
904 if (!sp) 912 if (!sp)
905 goto done; 913 goto done;
906 914
907 fcport->flags |= FCF_ASYNC_SENT;
908 sp->type = SRB_MB_IOCB; 915 sp->type = SRB_MB_IOCB;
909 sp->name = "gnlist"; 916 sp->name = "gnlist";
910 sp->gen1 = fcport->rscn_gen; 917 sp->gen1 = fcport->rscn_gen;
@@ -970,8 +977,13 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
970 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", 977 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
971 sp->name, res, fcport->port_name, mb[1], mb[2]); 978 sp->name, res, fcport->port_name, mb[1], mb[2]);
972 979
973 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 980 if (res == QLA_FUNCTION_TIMEOUT) {
981 dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
982 sp->u.iocb_cmd.u.mbx.in_dma);
983 return;
984 }
974 985
986 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
975 memset(&ea, 0, sizeof(ea)); 987 memset(&ea, 0, sizeof(ea));
976 ea.event = FCME_GPDB_DONE; 988 ea.event = FCME_GPDB_DONE;
977 ea.fcport = fcport; 989 ea.fcport = fcport;
@@ -1147,14 +1159,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1147 1159
1148 sp->done = qla24xx_async_gpdb_sp_done; 1160 sp->done = qla24xx_async_gpdb_sp_done;
1149 1161
1150 rval = qla2x00_start_sp(sp);
1151 if (rval != QLA_SUCCESS)
1152 goto done_free_sp;
1153
1154 ql_dbg(ql_dbg_disc, vha, 0x20dc, 1162 ql_dbg(ql_dbg_disc, vha, 0x20dc,
1155 "Async-%s %8phC hndl %x opt %x\n", 1163 "Async-%s %8phC hndl %x opt %x\n",
1156 sp->name, fcport->port_name, sp->handle, opt); 1164 sp->name, fcport->port_name, sp->handle, opt);
1157 1165
1166 rval = qla2x00_start_sp(sp);
1167 if (rval != QLA_SUCCESS)
1168 goto done_free_sp;
1158 return rval; 1169 return rval;
1159 1170
1160done_free_sp: 1171done_free_sp:
@@ -1182,11 +1193,9 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1182 vha->fcport_count++; 1193 vha->fcport_count++;
1183 ea->fcport->login_succ = 1; 1194 ea->fcport->login_succ = 1;
1184 1195
1185 ql_dbg(ql_dbg_disc, vha, 0x20d6, 1196 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1186 "%s %d %8phC post upd_fcport fcp_cnt %d\n", 1197 qla24xx_sched_upd_fcport(ea->fcport);
1187 __func__, __LINE__, ea->fcport->port_name, 1198 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1188 vha->fcport_count);
1189 qla24xx_post_upd_fcport_work(vha, ea->fcport);
1190 } else if (ea->fcport->login_succ) { 1199 } else if (ea->fcport->login_succ) {
1191 /* 1200 /*
1192 * We have an existing session. A late RSCN delivery 1201 * We have an existing session. A late RSCN delivery
@@ -1226,6 +1235,19 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1226 else 1235 else
1227 ls = pd->current_login_state & 0xf; 1236 ls = pd->current_login_state & 0xf;
1228 1237
1238 if (ea->sp->gen2 != fcport->login_gen) {
1239 /* target side must have changed it. */
1240
1241 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1242 "%s %8phC generation changed\n",
1243 __func__, fcport->port_name);
1244 return;
1245 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1246 qla_rscn_replay(fcport);
1247 qlt_schedule_sess_for_deletion(fcport);
1248 return;
1249 }
1250
1229 switch (ls) { 1251 switch (ls) {
1230 case PDS_PRLI_COMPLETE: 1252 case PDS_PRLI_COMPLETE:
1231 __qla24xx_parse_gpdb(vha, fcport, pd); 1253 __qla24xx_parse_gpdb(vha, fcport, pd);
@@ -1280,7 +1302,8 @@ static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1280 login = 1; 1302 login = 1;
1281 } 1303 }
1282 1304
1283 if (login) { 1305 if (login && fcport->login_retry) {
1306 fcport->login_retry--;
1284 if (fcport->loop_id == FC_NO_LOOP_ID) { 1307 if (fcport->loop_id == FC_NO_LOOP_ID) {
1285 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 1308 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
1286 rc = qla2x00_find_new_loop_id(vha, fcport); 1309 rc = qla2x00_find_new_loop_id(vha, fcport);
@@ -1304,14 +1327,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1304{ 1327{
1305 u16 data[2]; 1328 u16 data[2];
1306 u64 wwn; 1329 u64 wwn;
1330 u16 sec;
1307 1331
1308 ql_dbg(ql_dbg_disc, vha, 0x20d8, 1332 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20d8,
1309 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n", 1333 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
1310 __func__, fcport->port_name, fcport->disc_state, 1334 __func__, fcport->port_name, fcport->disc_state,
1311 fcport->fw_login_state, fcport->login_pause, fcport->flags, 1335 fcport->fw_login_state, fcport->login_pause, fcport->flags,
1312 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, 1336 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
1313 fcport->login_gen, fcport->login_retry, 1337 fcport->login_gen, fcport->loop_id, fcport->scan_state);
1314 fcport->loop_id, fcport->scan_state);
1315 1338
1316 if (fcport->scan_state != QLA_FCPORT_FOUND) 1339 if (fcport->scan_state != QLA_FCPORT_FOUND)
1317 return 0; 1340 return 0;
@@ -1410,22 +1433,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1410 break; 1433 break;
1411 1434
1412 case DSC_LOGIN_FAILED: 1435 case DSC_LOGIN_FAILED:
1413 fcport->login_retry--;
1414 ql_dbg(ql_dbg_disc, vha, 0x20d0,
1415 "%s %d %8phC post gidpn\n",
1416 __func__, __LINE__, fcport->port_name);
1417 if (N2N_TOPO(vha->hw)) 1436 if (N2N_TOPO(vha->hw))
1418 qla_chk_n2n_b4_login(vha, fcport); 1437 qla_chk_n2n_b4_login(vha, fcport);
1419 else 1438 else
1420 qla24xx_post_gidpn_work(vha, fcport); 1439 qlt_schedule_sess_for_deletion(fcport);
1421 break; 1440 break;
1422 1441
1423 case DSC_LOGIN_COMPLETE: 1442 case DSC_LOGIN_COMPLETE:
1424 /* recheck login state */ 1443 /* recheck login state */
1425 ql_dbg(ql_dbg_disc, vha, 0x20d1,
1426 "%s %d %8phC post adisc\n",
1427 __func__, __LINE__, fcport->port_name);
1428 fcport->login_retry--;
1429 data[0] = data[1] = 0; 1444 data[0] = data[1] = 0;
1430 qla2x00_post_async_adisc_work(vha, fcport, data); 1445 qla2x00_post_async_adisc_work(vha, fcport, data);
1431 break; 1446 break;
@@ -1435,6 +1450,22 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1435 qla24xx_post_prli_work(vha, fcport); 1450 qla24xx_post_prli_work(vha, fcport);
1436 break; 1451 break;
1437 1452
1453 case DSC_UPD_FCPORT:
1454 sec = jiffies_to_msecs(jiffies -
1455 fcport->jiffies_at_registration)/1000;
1456 if (fcport->sec_since_registration < sec && sec &&
1457 !(sec % 60)) {
1458 fcport->sec_since_registration = sec;
1459 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
1460 "%s %8phC - Slow Rport registration(%d Sec)\n",
1461 __func__, fcport->port_name, sec);
1462 }
1463
1464 if (fcport->next_disc_state != DSC_DELETE_PEND)
1465 fcport->next_disc_state = DSC_ADISC;
1466 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1467 break;
1468
1438 default: 1469 default:
1439 break; 1470 break;
1440 } 1471 }
@@ -1513,7 +1544,6 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1513 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n", 1544 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
1514 __func__, __LINE__, fcport->port_name); 1545 __func__, __LINE__, fcport->port_name);
1515 1546
1516 qla24xx_post_gidpn_work(vha, fcport);
1517 return; 1547 return;
1518 } 1548 }
1519 1549
@@ -1533,7 +1563,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1533{ 1563{
1534 fc_port_t *f, *tf; 1564 fc_port_t *f, *tf;
1535 uint32_t id = 0, mask, rid; 1565 uint32_t id = 0, mask, rid;
1536 unsigned long flags;
1537 fc_port_t *fcport; 1566 fc_port_t *fcport;
1538 1567
1539 switch (ea->event) { 1568 switch (ea->event) {
@@ -1548,10 +1577,16 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1548 return; 1577 return;
1549 switch (ea->id.b.rsvd_1) { 1578 switch (ea->id.b.rsvd_1) {
1550 case RSCN_PORT_ADDR: 1579 case RSCN_PORT_ADDR:
1580#define BIGSCAN 1
1581#if defined BIGSCAN & BIGSCAN > 0
1582 {
1583 unsigned long flags;
1551 fcport = qla2x00_find_fcport_by_nportid 1584 fcport = qla2x00_find_fcport_by_nportid
1552 (vha, &ea->id, 1); 1585 (vha, &ea->id, 1);
1553 if (fcport) 1586 if (fcport) {
1554 fcport->rscn_rcvd = 1; 1587 fcport->scan_needed = 1;
1588 fcport->rscn_gen++;
1589 }
1555 1590
1556 spin_lock_irqsave(&vha->work_lock, flags); 1591 spin_lock_irqsave(&vha->work_lock, flags);
1557 if (vha->scan.scan_flags == 0) { 1592 if (vha->scan.scan_flags == 0) {
@@ -1561,7 +1596,26 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1561 schedule_delayed_work(&vha->scan.scan_work, 5); 1596 schedule_delayed_work(&vha->scan.scan_work, 5);
1562 } 1597 }
1563 spin_unlock_irqrestore(&vha->work_lock, flags); 1598 spin_unlock_irqrestore(&vha->work_lock, flags);
1564 1599 }
1600#else
1601 {
1602 int rc;
1603 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1604 if (!fcport) {
1605 /* cable moved */
1606 rc = qla24xx_post_gpnid_work(vha, &ea->id);
1607 if (rc) {
1608 ql_log(ql_log_warn, vha, 0xd044,
1609 "RSCN GPNID work failed %06x\n",
1610 ea->id.b24);
1611 }
1612 } else {
1613 ea->fcport = fcport;
1614 fcport->scan_needed = 1;
1615 qla24xx_handle_rscn_event(fcport, ea);
1616 }
1617 }
1618#endif
1565 break; 1619 break;
1566 case RSCN_AREA_ADDR: 1620 case RSCN_AREA_ADDR:
1567 case RSCN_DOM_ADDR: 1621 case RSCN_DOM_ADDR:
@@ -1597,9 +1651,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1597 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1651 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1598 } 1652 }
1599 break; 1653 break;
1600 case FCME_GIDPN_DONE:
1601 qla24xx_handle_gidpn_event(vha, ea);
1602 break;
1603 case FCME_GNL_DONE: 1654 case FCME_GNL_DONE:
1604 qla24xx_handle_gnl_done_event(vha, ea); 1655 qla24xx_handle_gnl_done_event(vha, ea);
1605 break; 1656 break;
@@ -1639,6 +1690,34 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1639 } 1690 }
1640} 1691}
1641 1692
1693/*
1694 * RSCN(s) came in for this fcport, but the RSCN(s) was not able
1695 * to be consumed by the fcport
1696 */
1697void qla_rscn_replay(fc_port_t *fcport)
1698{
1699 struct event_arg ea;
1700
1701 switch (fcport->disc_state) {
1702 case DSC_DELETE_PEND:
1703 return;
1704 default:
1705 break;
1706 }
1707
1708 if (fcport->scan_needed) {
1709 memset(&ea, 0, sizeof(ea));
1710 ea.event = FCME_RSCN;
1711 ea.id = fcport->d_id;
1712 ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
1713#if defined BIGSCAN & BIGSCAN > 0
1714 qla2x00_fcport_event_handler(fcport->vha, &ea);
1715#else
1716 qla24xx_post_gpnid_work(fcport->vha, &ea.id);
1717#endif
1718 }
1719}
1720
1642static void 1721static void
1643qla2x00_tmf_iocb_timeout(void *data) 1722qla2x00_tmf_iocb_timeout(void *data)
1644{ 1723{
@@ -1684,15 +1763,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1684 tm_iocb->u.tmf.data = tag; 1763 tm_iocb->u.tmf.data = tag;
1685 sp->done = qla2x00_tmf_sp_done; 1764 sp->done = qla2x00_tmf_sp_done;
1686 1765
1687 rval = qla2x00_start_sp(sp);
1688 if (rval != QLA_SUCCESS)
1689 goto done_free_sp;
1690
1691 ql_dbg(ql_dbg_taskm, vha, 0x802f, 1766 ql_dbg(ql_dbg_taskm, vha, 0x802f,
1692 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", 1767 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
1693 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 1768 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
1694 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1769 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1695 1770
1771 rval = qla2x00_start_sp(sp);
1772 if (rval != QLA_SUCCESS)
1773 goto done_free_sp;
1696 wait_for_completion(&tm_iocb->u.tmf.comp); 1774 wait_for_completion(&tm_iocb->u.tmf.comp);
1697 1775
1698 rval = tm_iocb->u.tmf.data; 1776 rval = tm_iocb->u.tmf.data;
@@ -1747,47 +1825,46 @@ int
1747qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) 1825qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
1748{ 1826{
1749 scsi_qla_host_t *vha = cmd_sp->vha; 1827 scsi_qla_host_t *vha = cmd_sp->vha;
1750 fc_port_t *fcport = cmd_sp->fcport;
1751 struct srb_iocb *abt_iocb; 1828 struct srb_iocb *abt_iocb;
1752 srb_t *sp; 1829 srb_t *sp;
1753 int rval = QLA_FUNCTION_FAILED; 1830 int rval = QLA_FUNCTION_FAILED;
1754 1831
1755 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1832 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
1833 GFP_KERNEL);
1756 if (!sp) 1834 if (!sp)
1757 goto done; 1835 goto done;
1758 1836
1759 abt_iocb = &sp->u.iocb_cmd; 1837 abt_iocb = &sp->u.iocb_cmd;
1760 sp->type = SRB_ABT_CMD; 1838 sp->type = SRB_ABT_CMD;
1761 sp->name = "abort"; 1839 sp->name = "abort";
1840 sp->qpair = cmd_sp->qpair;
1762 if (wait) 1841 if (wait)
1763 sp->flags = SRB_WAKEUP_ON_COMP; 1842 sp->flags = SRB_WAKEUP_ON_COMP;
1764 1843
1765 abt_iocb->timeout = qla24xx_abort_iocb_timeout; 1844 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
1766 init_completion(&abt_iocb->u.abt.comp); 1845 init_completion(&abt_iocb->u.abt.comp);
1767 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); 1846 /* FW can send 2 x ABTS's timeout/20s */
1847 qla2x00_init_timer(sp, 42);
1768 1848
1769 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; 1849 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
1770 1850 abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
1771 if (vha->flags.qpairs_available && cmd_sp->qpair)
1772 abt_iocb->u.abt.req_que_no =
1773 cpu_to_le16(cmd_sp->qpair->req->id);
1774 else
1775 abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id);
1776 1851
1777 sp->done = qla24xx_abort_sp_done; 1852 sp->done = qla24xx_abort_sp_done;
1778 1853
1854 ql_dbg(ql_dbg_async, vha, 0x507c,
1855 "Abort command issued - hdl=%x, type=%x\n",
1856 cmd_sp->handle, cmd_sp->type);
1857
1779 rval = qla2x00_start_sp(sp); 1858 rval = qla2x00_start_sp(sp);
1780 if (rval != QLA_SUCCESS) 1859 if (rval != QLA_SUCCESS)
1781 goto done_free_sp; 1860 goto done_free_sp;
1782 1861
1783 ql_dbg(ql_dbg_async, vha, 0x507c,
1784 "Abort command issued - hdl=%x, target_id=%x\n",
1785 cmd_sp->handle, fcport->tgt_id);
1786
1787 if (wait) { 1862 if (wait) {
1788 wait_for_completion(&abt_iocb->u.abt.comp); 1863 wait_for_completion(&abt_iocb->u.abt.comp);
1789 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? 1864 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
1790 QLA_SUCCESS : QLA_FUNCTION_FAILED; 1865 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1866 } else {
1867 goto done;
1791 } 1868 }
1792 1869
1793done_free_sp: 1870done_free_sp:
@@ -1803,19 +1880,17 @@ qla24xx_async_abort_command(srb_t *sp)
1803 1880
1804 uint32_t handle; 1881 uint32_t handle;
1805 fc_port_t *fcport = sp->fcport; 1882 fc_port_t *fcport = sp->fcport;
1883 struct qla_qpair *qpair = sp->qpair;
1806 struct scsi_qla_host *vha = fcport->vha; 1884 struct scsi_qla_host *vha = fcport->vha;
1807 struct qla_hw_data *ha = vha->hw; 1885 struct req_que *req = qpair->req;
1808 struct req_que *req = vha->req;
1809
1810 if (vha->flags.qpairs_available && sp->qpair)
1811 req = sp->qpair->req;
1812 1886
1813 spin_lock_irqsave(&ha->hardware_lock, flags); 1887 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1814 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1888 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1815 if (req->outstanding_cmds[handle] == sp) 1889 if (req->outstanding_cmds[handle] == sp)
1816 break; 1890 break;
1817 } 1891 }
1818 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1892 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1893
1819 if (handle == req->num_outstanding_cmds) { 1894 if (handle == req->num_outstanding_cmds) {
1820 /* Command not found. */ 1895 /* Command not found. */
1821 return QLA_FUNCTION_FAILED; 1896 return QLA_FUNCTION_FAILED;
@@ -1876,7 +1951,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1876 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n", 1951 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
1877 __func__, fcport->port_name, fcport->disc_state, 1952 __func__, fcport->port_name, fcport->disc_state,
1878 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen, 1953 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
1879 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, 1954 ea->sp->gen1, fcport->rscn_gen,
1880 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]); 1955 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
1881 1956
1882 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1957 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
@@ -1898,9 +1973,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1898 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1973 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1899 return; 1974 return;
1900 } else if (ea->sp->gen1 != fcport->rscn_gen) { 1975 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1901 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n", 1976 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1902 __func__, __LINE__, fcport->port_name); 1977 "%s %8phC RSCN generation changed\n",
1903 qla24xx_post_gidpn_work(vha, fcport); 1978 __func__, fcport->port_name);
1979 qla_rscn_replay(fcport);
1980 qlt_schedule_sess_for_deletion(fcport);
1904 return; 1981 return;
1905 } 1982 }
1906 1983
@@ -1952,25 +2029,15 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1952 cid.b.rsvd_1 = 0; 2029 cid.b.rsvd_1 = 0;
1953 2030
1954 ql_dbg(ql_dbg_disc, vha, 0x20ec, 2031 ql_dbg(ql_dbg_disc, vha, 0x20ec,
1955 "%s %d %8phC LoopID 0x%x in use post gnl\n", 2032 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
1956 __func__, __LINE__, ea->fcport->port_name, 2033 __func__, __LINE__, ea->fcport->port_name,
1957 ea->fcport->loop_id); 2034 ea->fcport->loop_id, cid.b24);
1958 2035
1959 if (IS_SW_RESV_ADDR(cid)) { 2036 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1960 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); 2037 ea->fcport->loop_id = FC_NO_LOOP_ID;
1961 ea->fcport->loop_id = FC_NO_LOOP_ID;
1962 } else {
1963 qla2x00_clear_loop_id(ea->fcport);
1964 }
1965 qla24xx_post_gnl_work(vha, ea->fcport); 2038 qla24xx_post_gnl_work(vha, ea->fcport);
1966 break; 2039 break;
1967 case MBS_PORT_ID_USED: 2040 case MBS_PORT_ID_USED:
1968 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1969 "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
1970 __func__, __LINE__, ea->fcport->port_name,
1971 ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
1972 ea->fcport->d_id.b.al_pa);
1973
1974 lid = ea->iop[1] & 0xffff; 2041 lid = ea->iop[1] & 0xffff;
1975 qlt_find_sess_invalidate_other(vha, 2042 qlt_find_sess_invalidate_other(vha,
1976 wwn_to_u64(ea->fcport->port_name), 2043 wwn_to_u64(ea->fcport->port_name),
@@ -1989,8 +2056,6 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1989 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n", 2056 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
1990 __func__, __LINE__, ea->fcport->port_name, 2057 __func__, __LINE__, ea->fcport->port_name,
1991 ea->fcport->d_id.b24, lid); 2058 ea->fcport->d_id.b24, lid);
1992 qla2x00_clear_loop_id(ea->fcport);
1993 qla24xx_post_gidpn_work(vha, ea->fcport);
1994 } else { 2059 } else {
1995 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2060 ql_dbg(ql_dbg_disc, vha, 0x20ed,
1996 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n", 2061 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
@@ -2018,26 +2083,6 @@ qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
2018 return; 2083 return;
2019} 2084}
2020 2085
2021void
2022qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
2023 uint16_t *data)
2024{
2025 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2026 if (data[0] == MBS_COMMAND_COMPLETE) {
2027 qla2x00_update_fcport(vha, fcport);
2028
2029 return;
2030 }
2031
2032 /* Retry login. */
2033 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
2034 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2035 else
2036 qla2x00_mark_device_lost(vha, fcport, 1, 0);
2037
2038 return;
2039}
2040
2041/****************************************************************************/ 2086/****************************************************************************/
2042/* QLogic ISP2x00 Hardware Support Functions. */ 2087/* QLogic ISP2x00 Hardware Support Functions. */
2043/****************************************************************************/ 2088/****************************************************************************/
@@ -3527,6 +3572,11 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
3527 if (rval == QLA_SUCCESS) { 3572 if (rval == QLA_SUCCESS) {
3528 qla24xx_detect_sfp(vha); 3573 qla24xx_detect_sfp(vha);
3529 3574
3575 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
3576 (ha->zio_mode == QLA_ZIO_MODE_6))
3577 qla27xx_set_zio_threshold(vha,
3578 ha->last_zio_threshold);
3579
3530 rval = qla2x00_set_exlogins_buffer(vha); 3580 rval = qla2x00_set_exlogins_buffer(vha);
3531 if (rval != QLA_SUCCESS) 3581 if (rval != QLA_SUCCESS)
3532 goto failed; 3582 goto failed;
@@ -4015,6 +4065,7 @@ next_check:
4015 ql_dbg(ql_dbg_init, vha, 0x00d3, 4065 ql_dbg(ql_dbg_init, vha, 0x00d3,
4016 "Init Firmware -- success.\n"); 4066 "Init Firmware -- success.\n");
4017 QLA_FW_STARTED(ha); 4067 QLA_FW_STARTED(ha);
4068 vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
4018 } 4069 }
4019 4070
4020 return (rval); 4071 return (rval);
@@ -4728,6 +4779,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
4728 fcport = NULL; 4779 fcport = NULL;
4729 } 4780 }
4730 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); 4781 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
4782 INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
4731 INIT_LIST_HEAD(&fcport->gnl_entry); 4783 INIT_LIST_HEAD(&fcport->gnl_entry);
4732 INIT_LIST_HEAD(&fcport->list); 4784 INIT_LIST_HEAD(&fcport->list);
4733 4785
@@ -4853,19 +4905,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
4853 */ 4905 */
4854 if (qla_tgt_mode_enabled(vha) || 4906 if (qla_tgt_mode_enabled(vha) ||
4855 qla_dual_mode_enabled(vha)) { 4907 qla_dual_mode_enabled(vha)) {
4856 if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) { 4908 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4857 spin_lock_irqsave(&ha->tgt.atio_lock, 4909 qlt_24xx_process_atio_queue(vha, 0);
4858 flags); 4910 spin_unlock_irqrestore(&ha->tgt.atio_lock,
4859 qlt_24xx_process_atio_queue(vha, 0); 4911 flags);
4860 spin_unlock_irqrestore(
4861 &ha->tgt.atio_lock, flags);
4862 } else {
4863 spin_lock_irqsave(&ha->hardware_lock,
4864 flags);
4865 qlt_24xx_process_atio_queue(vha, 1);
4866 spin_unlock_irqrestore(
4867 &ha->hardware_lock, flags);
4868 }
4869 } 4912 }
4870 } 4913 }
4871 } 4914 }
@@ -4958,6 +5001,19 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
4958 (uint8_t *)ha->gid_list, 5001 (uint8_t *)ha->gid_list,
4959 entries * sizeof(struct gid_list_info)); 5002 entries * sizeof(struct gid_list_info));
4960 5003
5004 if (entries == 0) {
5005 spin_lock_irqsave(&vha->work_lock, flags);
5006 vha->scan.scan_retry++;
5007 spin_unlock_irqrestore(&vha->work_lock, flags);
5008
5009 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5010 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5011 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5012 }
5013 } else {
5014 vha->scan.scan_retry = 0;
5015 }
5016
4961 list_for_each_entry(fcport, &vha->vp_fcports, list) { 5017 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4962 fcport->scan_state = QLA_FCPORT_SCAN; 5018 fcport->scan_state = QLA_FCPORT_SCAN;
4963 } 5019 }
@@ -5223,20 +5279,20 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
5223void 5279void
5224qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 5280qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5225{ 5281{
5226 fcport->vha = vha;
5227
5228 if (IS_SW_RESV_ADDR(fcport->d_id)) 5282 if (IS_SW_RESV_ADDR(fcport->d_id))
5229 return; 5283 return;
5230 5284
5285 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
5286 __func__, fcport->port_name);
5287
5288 fcport->disc_state = DSC_UPD_FCPORT;
5289 fcport->login_retry = vha->hw->login_retry_count;
5231 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 5290 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5232 fcport->disc_state = DSC_LOGIN_COMPLETE;
5233 fcport->deleted = 0; 5291 fcport->deleted = 0;
5234 fcport->logout_on_delete = 1; 5292 fcport->logout_on_delete = 1;
5235 fcport->login_retry = vha->hw->login_retry_count; 5293 fcport->login_retry = vha->hw->login_retry_count;
5236 fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0; 5294 fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
5237 5295
5238 qla2x00_iidma_fcport(vha, fcport);
5239
5240 switch (vha->hw->current_topology) { 5296 switch (vha->hw->current_topology) {
5241 case ISP_CFG_N: 5297 case ISP_CFG_N:
5242 case ISP_CFG_NL: 5298 case ISP_CFG_NL:
@@ -5246,6 +5302,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5246 break; 5302 break;
5247 } 5303 }
5248 5304
5305 qla2x00_iidma_fcport(vha, fcport);
5306
5249 if (fcport->fc4f_nvme) { 5307 if (fcport->fc4f_nvme) {
5250 qla_nvme_register_remote(vha, fcport); 5308 qla_nvme_register_remote(vha, fcport);
5251 fcport->disc_state = DSC_LOGIN_COMPLETE; 5309 fcport->disc_state = DSC_LOGIN_COMPLETE;
@@ -5274,6 +5332,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5274 break; 5332 break;
5275 } 5333 }
5276 5334
5335 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5336
5277 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) { 5337 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
5278 if (fcport->id_changed) { 5338 if (fcport->id_changed) {
5279 fcport->id_changed = 0; 5339 fcport->id_changed = 0;
@@ -5290,7 +5350,36 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5290 qla24xx_post_gpsc_work(vha, fcport); 5350 qla24xx_post_gpsc_work(vha, fcport);
5291 } 5351 }
5292 } 5352 }
5293 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 5353
5354 fcport->disc_state = DSC_LOGIN_COMPLETE;
5355}
5356
5357void qla_register_fcport_fn(struct work_struct *work)
5358{
5359 fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
5360 u32 rscn_gen = fcport->rscn_gen;
5361 u16 data[2];
5362
5363 if (IS_SW_RESV_ADDR(fcport->d_id))
5364 return;
5365
5366 qla2x00_update_fcport(fcport->vha, fcport);
5367
5368 if (rscn_gen != fcport->rscn_gen) {
5369 /* RSCN(s) came in while registration */
5370 switch (fcport->next_disc_state) {
5371 case DSC_DELETE_PEND:
5372 qlt_schedule_sess_for_deletion(fcport);
5373 break;
5374 case DSC_ADISC:
5375 data[0] = data[1] = 0;
5376 qla2x00_post_async_adisc_work(fcport->vha, fcport,
5377 data);
5378 break;
5379 default:
5380 break;
5381 }
5382 }
5294} 5383}
5295 5384
5296/* 5385/*
@@ -6494,6 +6583,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
6494 if (!(IS_P3P_TYPE(ha))) 6583 if (!(IS_P3P_TYPE(ha)))
6495 ha->isp_ops->reset_chip(vha); 6584 ha->isp_ops->reset_chip(vha);
6496 6585
6586 ha->link_data_rate = PORT_SPEED_UNKNOWN;
6497 SAVE_TOPO(ha); 6587 SAVE_TOPO(ha);
6498 ha->flags.rida_fmt2 = 0; 6588 ha->flags.rida_fmt2 = 0;
6499 ha->flags.n2n_ae = 0; 6589 ha->flags.n2n_ae = 0;
@@ -6622,6 +6712,20 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
6622 return status; 6712 return status;
6623 } 6713 }
6624 6714
6715 switch (vha->qlini_mode) {
6716 case QLA2XXX_INI_MODE_DISABLED:
6717 if (!qla_tgt_mode_enabled(vha))
6718 return 0;
6719 break;
6720 case QLA2XXX_INI_MODE_DUAL:
6721 if (!qla_dual_mode_enabled(vha))
6722 return 0;
6723 break;
6724 case QLA2XXX_INI_MODE_ENABLED:
6725 default:
6726 break;
6727 }
6728
6625 ha->isp_ops->get_flash_version(vha, req->ring); 6729 ha->isp_ops->get_flash_version(vha, req->ring);
6626 6730
6627 ha->isp_ops->nvram_config(vha); 6731 ha->isp_ops->nvram_config(vha);
@@ -6682,7 +6786,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
6682 * The next call disables the board 6786 * The next call disables the board
6683 * completely. 6787 * completely.
6684 */ 6788 */
6685 ha->isp_ops->reset_adapter(vha); 6789 qla2x00_abort_isp_cleanup(vha);
6686 vha->flags.online = 0; 6790 vha->flags.online = 0;
6687 clear_bit(ISP_ABORT_RETRY, 6791 clear_bit(ISP_ABORT_RETRY,
6688 &vha->dpc_flags); 6792 &vha->dpc_flags);
@@ -7142,7 +7246,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
7142 } 7246 }
7143 icb->firmware_options_2 &= cpu_to_le32( 7247 icb->firmware_options_2 &= cpu_to_le32(
7144 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 7248 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
7145 vha->flags.process_response_queue = 0;
7146 if (ha->zio_mode != QLA_ZIO_DISABLED) { 7249 if (ha->zio_mode != QLA_ZIO_DISABLED) {
7147 ha->zio_mode = QLA_ZIO_MODE_6; 7250 ha->zio_mode = QLA_ZIO_MODE_6;
7148 7251
@@ -7153,7 +7256,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
7153 icb->firmware_options_2 |= cpu_to_le32( 7256 icb->firmware_options_2 |= cpu_to_le32(
7154 (uint32_t)ha->zio_mode); 7257 (uint32_t)ha->zio_mode);
7155 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 7258 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
7156 vha->flags.process_response_queue = 1;
7157 } 7259 }
7158 7260
7159 if (rval) { 7261 if (rval) {
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 4351736b2426..512c3c37b447 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -209,7 +209,8 @@ qla2x00_chip_is_down(scsi_qla_host_t *vha)
209} 209}
210 210
211static inline srb_t * 211static inline srb_t *
212qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag) 212qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
213 fc_port_t *fcport, gfp_t flag)
213{ 214{
214 srb_t *sp = NULL; 215 srb_t *sp = NULL;
215 uint8_t bail; 216 uint8_t bail;
@@ -225,7 +226,9 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
225 memset(sp, 0, sizeof(*sp)); 226 memset(sp, 0, sizeof(*sp));
226 sp->fcport = fcport; 227 sp->fcport = fcport;
227 sp->iocbs = 1; 228 sp->iocbs = 1;
228 sp->vha = qpair->vha; 229 sp->vha = vha;
230 sp->qpair = qpair;
231 sp->cmd_type = TYPE_SRB;
229 INIT_LIST_HEAD(&sp->elem); 232 INIT_LIST_HEAD(&sp->elem);
230 233
231done: 234done:
@@ -246,19 +249,17 @@ qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
246{ 249{
247 srb_t *sp = NULL; 250 srb_t *sp = NULL;
248 uint8_t bail; 251 uint8_t bail;
252 struct qla_qpair *qpair;
249 253
250 QLA_VHA_MARK_BUSY(vha, bail); 254 QLA_VHA_MARK_BUSY(vha, bail);
251 if (unlikely(bail)) 255 if (unlikely(bail))
252 return NULL; 256 return NULL;
253 257
254 sp = mempool_alloc(vha->hw->srb_mempool, flag); 258 qpair = vha->hw->base_qpair;
259 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag);
255 if (!sp) 260 if (!sp)
256 goto done; 261 goto done;
257 262
258 memset(sp, 0, sizeof(*sp));
259 sp->fcport = fcport;
260 sp->cmd_type = TYPE_SRB;
261 sp->iocbs = 1;
262 sp->vha = vha; 263 sp->vha = vha;
263done: 264done:
264 if (!sp) 265 if (!sp)
@@ -270,7 +271,7 @@ static inline void
270qla2x00_rel_sp(srb_t *sp) 271qla2x00_rel_sp(srb_t *sp)
271{ 272{
272 QLA_VHA_MARK_NOT_BUSY(sp->vha); 273 QLA_VHA_MARK_NOT_BUSY(sp->vha);
273 mempool_free(sp, sp->vha->hw->srb_mempool); 274 qla2xxx_rel_qpair_sp(sp->qpair, sp);
274} 275}
275 276
276static inline void 277static inline void
@@ -317,13 +318,13 @@ static inline bool
317qla_is_exch_offld_enabled(struct scsi_qla_host *vha) 318qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
318{ 319{
319 if (qla_ini_mode_enabled(vha) && 320 if (qla_ini_mode_enabled(vha) &&
320 (ql2xiniexchg > FW_DEF_EXCHANGES_CNT)) 321 (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
321 return true; 322 return true;
322 else if (qla_tgt_mode_enabled(vha) && 323 else if (qla_tgt_mode_enabled(vha) &&
323 (ql2xexchoffld > FW_DEF_EXCHANGES_CNT)) 324 (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
324 return true; 325 return true;
325 else if (qla_dual_mode_enabled(vha) && 326 else if (qla_dual_mode_enabled(vha) &&
326 ((ql2xiniexchg + ql2xexchoffld) > FW_DEF_EXCHANGES_CNT)) 327 ((vha->ql2xiniexchg + vha->ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
327 return true; 328 return true;
328 else 329 else
329 return false; 330 return false;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 42ac8e097419..86fb8b21aa71 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1526,12 +1526,6 @@ qla24xx_start_scsi(srb_t *sp)
1526 1526
1527 /* Set chip new ring index. */ 1527 /* Set chip new ring index. */
1528 WRT_REG_DWORD(req->req_q_in, req->ring_index); 1528 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1529 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1530
1531 /* Manage unprocessed RIO/ZIO commands in response queue. */
1532 if (vha->flags.process_response_queue &&
1533 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1534 qla24xx_process_response_queue(vha, rsp);
1535 1529
1536 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1530 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1537 return QLA_SUCCESS; 1531 return QLA_SUCCESS;
@@ -1725,12 +1719,6 @@ qla24xx_dif_start_scsi(srb_t *sp)
1725 1719
1726 /* Set chip new ring index. */ 1720 /* Set chip new ring index. */
1727 WRT_REG_DWORD(req->req_q_in, req->ring_index); 1721 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1728 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1729
1730 /* Manage unprocessed RIO/ZIO commands in response queue. */
1731 if (vha->flags.process_response_queue &&
1732 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1733 qla24xx_process_response_queue(vha, rsp);
1734 1722
1735 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1723 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1736 1724
@@ -1880,11 +1868,6 @@ qla2xxx_start_scsi_mq(srb_t *sp)
1880 /* Set chip new ring index. */ 1868 /* Set chip new ring index. */
1881 WRT_REG_DWORD(req->req_q_in, req->ring_index); 1869 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1882 1870
1883 /* Manage unprocessed RIO/ZIO commands in response queue. */
1884 if (vha->flags.process_response_queue &&
1885 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1886 qla24xx_process_response_queue(vha, rsp);
1887
1888 spin_unlock_irqrestore(&qpair->qp_lock, flags); 1871 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1889 return QLA_SUCCESS; 1872 return QLA_SUCCESS;
1890 1873
@@ -2287,8 +2270,7 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2287 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2270 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2288 logio->control_flags = 2271 logio->control_flags =
2289 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 2272 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2290 if (!sp->fcport->se_sess || 2273 if (!sp->fcport->keep_nport_handle)
2291 !sp->fcport->keep_nport_handle)
2292 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT); 2274 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2293 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2275 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2294 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2276 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2659,7 +2641,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2659 struct qla_hw_data *ha = vha->hw; 2641 struct qla_hw_data *ha = vha->hw;
2660 int rval = QLA_SUCCESS; 2642 int rval = QLA_SUCCESS;
2661 void *ptr, *resp_ptr; 2643 void *ptr, *resp_ptr;
2662 dma_addr_t ptr_dma;
2663 2644
2664 /* Alloc SRB structure */ 2645 /* Alloc SRB structure */
2665 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2646 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
@@ -2691,7 +2672,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2691 ptr = elsio->u.els_plogi.els_plogi_pyld = 2672 ptr = elsio->u.els_plogi.els_plogi_pyld =
2692 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE, 2673 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2693 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL); 2674 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2694 ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
2695 2675
2696 if (!elsio->u.els_plogi.els_plogi_pyld) { 2676 if (!elsio->u.els_plogi.els_plogi_pyld) {
2697 rval = QLA_FUNCTION_FAILED; 2677 rval = QLA_FUNCTION_FAILED;
@@ -3314,19 +3294,21 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3314{ 3294{
3315 struct srb_iocb *aio = &sp->u.iocb_cmd; 3295 struct srb_iocb *aio = &sp->u.iocb_cmd;
3316 scsi_qla_host_t *vha = sp->vha; 3296 scsi_qla_host_t *vha = sp->vha;
3317 struct req_que *req = vha->req; 3297 struct req_que *req = sp->qpair->req;
3318 3298
3319 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); 3299 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3320 abt_iocb->entry_type = ABORT_IOCB_TYPE; 3300 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3321 abt_iocb->entry_count = 1; 3301 abt_iocb->entry_count = 1;
3322 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); 3302 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3323 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3303 if (sp->fcport) {
3304 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3305 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3306 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3307 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3308 }
3324 abt_iocb->handle_to_abort = 3309 abt_iocb->handle_to_abort =
3325 cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no, 3310 cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3326 aio->u.abt.cmd_hndl)); 3311 aio->u.abt.cmd_hndl));
3327 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3328 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3329 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3330 abt_iocb->vp_index = vha->vp_idx; 3312 abt_iocb->vp_index = vha->vp_idx;
3331 abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no); 3313 abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3332 /* Send the command to the firmware */ 3314 /* Send the command to the firmware */
@@ -3455,12 +3437,13 @@ qla2x00_start_sp(srb_t *sp)
3455 int rval; 3437 int rval;
3456 scsi_qla_host_t *vha = sp->vha; 3438 scsi_qla_host_t *vha = sp->vha;
3457 struct qla_hw_data *ha = vha->hw; 3439 struct qla_hw_data *ha = vha->hw;
3440 struct qla_qpair *qp = sp->qpair;
3458 void *pkt; 3441 void *pkt;
3459 unsigned long flags; 3442 unsigned long flags;
3460 3443
3461 rval = QLA_FUNCTION_FAILED; 3444 rval = QLA_FUNCTION_FAILED;
3462 spin_lock_irqsave(&ha->hardware_lock, flags); 3445 spin_lock_irqsave(qp->qp_lock_ptr, flags);
3463 pkt = qla2x00_alloc_iocbs(vha, sp); 3446 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3464 if (!pkt) { 3447 if (!pkt) {
3465 ql_log(ql_log_warn, vha, 0x700c, 3448 ql_log(ql_log_warn, vha, 0x700c,
3466 "qla2x00_alloc_iocbs failed.\n"); 3449 "qla2x00_alloc_iocbs failed.\n");
@@ -3538,9 +3521,9 @@ qla2x00_start_sp(srb_t *sp)
3538 } 3521 }
3539 3522
3540 wmb(); 3523 wmb();
3541 qla2x00_start_iocbs(vha, ha->req_q_map[0]); 3524 qla2x00_start_iocbs(vha, qp->req);
3542done: 3525done:
3543 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3526 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3544 return rval; 3527 return rval;
3545} 3528}
3546 3529
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 36cbb29c84f6..d73b04e40590 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1850,11 +1850,12 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1850 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1850 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1851 uint16_t state_flags; 1851 uint16_t state_flags;
1852 struct nvmefc_fcp_req *fd; 1852 struct nvmefc_fcp_req *fd;
1853 uint16_t ret = 0; 1853 uint16_t ret = QLA_SUCCESS;
1854 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1854 1855
1855 iocb = &sp->u.iocb_cmd; 1856 iocb = &sp->u.iocb_cmd;
1856 fcport = sp->fcport; 1857 fcport = sp->fcport;
1857 iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status); 1858 iocb->u.nvme.comp_status = comp_status;
1858 state_flags = le16_to_cpu(sts->state_flags); 1859 state_flags = le16_to_cpu(sts->state_flags);
1859 fd = iocb->u.nvme.desc; 1860 fd = iocb->u.nvme.desc;
1860 1861
@@ -1892,28 +1893,35 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1892 fd->transferred_length = fd->payload_length - 1893 fd->transferred_length = fd->payload_length -
1893 le32_to_cpu(sts->residual_len); 1894 le32_to_cpu(sts->residual_len);
1894 1895
1895 switch (le16_to_cpu(sts->comp_status)) { 1896 if (unlikely(comp_status != CS_COMPLETE))
1897 ql_log(ql_log_warn, fcport->vha, 0x5060,
1898 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
1899 sp->name, sp->handle, comp_status,
1900 fd->transferred_length, le32_to_cpu(sts->residual_len),
1901 sts->ox_id);
1902
1903 /*
1904 * If transport error then Failure (HBA rejects request)
1905 * otherwise transport will handle.
1906 */
1907 switch (comp_status) {
1896 case CS_COMPLETE: 1908 case CS_COMPLETE:
1897 ret = QLA_SUCCESS;
1898 break; 1909 break;
1899 case CS_ABORTED: 1910
1900 case CS_RESET: 1911 case CS_RESET:
1901 case CS_PORT_UNAVAILABLE: 1912 case CS_PORT_UNAVAILABLE:
1902 case CS_PORT_LOGGED_OUT: 1913 case CS_PORT_LOGGED_OUT:
1914 fcport->nvme_flag |= NVME_FLAG_RESETTING;
1915 /* fall through */
1916 case CS_ABORTED:
1903 case CS_PORT_BUSY: 1917 case CS_PORT_BUSY:
1904 ql_log(ql_log_warn, fcport->vha, 0x5060,
1905 "NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
1906 sp->name, sp->handle, sts->comp_status,
1907 le32_to_cpu(sts->residual_len), sts->ox_id);
1908 fd->transferred_length = 0; 1918 fd->transferred_length = 0;
1909 iocb->u.nvme.rsp_pyld_len = 0; 1919 iocb->u.nvme.rsp_pyld_len = 0;
1910 ret = QLA_ABORTED; 1920 ret = QLA_ABORTED;
1911 break; 1921 break;
1922 case CS_DATA_UNDERRUN:
1923 break;
1912 default: 1924 default:
1913 ql_log(ql_log_warn, fcport->vha, 0x5060,
1914 "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
1915 sp->name, sp->handle, sts->comp_status,
1916 le32_to_cpu(sts->residual_len), sts->ox_id);
1917 ret = QLA_FUNCTION_FAILED; 1925 ret = QLA_FUNCTION_FAILED;
1918 break; 1926 break;
1919 } 1927 }
@@ -2837,6 +2845,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2837 case ELS_IOCB_TYPE: 2845 case ELS_IOCB_TYPE:
2838 case ABORT_IOCB_TYPE: 2846 case ABORT_IOCB_TYPE:
2839 case MBX_IOCB_TYPE: 2847 case MBX_IOCB_TYPE:
2848 default:
2840 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2849 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2841 if (sp) { 2850 if (sp) {
2842 sp->done(sp, res); 2851 sp->done(sp, res);
@@ -2847,7 +2856,6 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2847 case ABTS_RESP_24XX: 2856 case ABTS_RESP_24XX:
2848 case CTIO_TYPE7: 2857 case CTIO_TYPE7:
2849 case CTIO_CRC2: 2858 case CTIO_CRC2:
2850 default:
2851 return 1; 2859 return 1;
2852 } 2860 }
2853fatal: 2861fatal:
@@ -3121,6 +3129,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
3121 uint16_t mb[8]; 3129 uint16_t mb[8];
3122 struct rsp_que *rsp; 3130 struct rsp_que *rsp;
3123 unsigned long flags; 3131 unsigned long flags;
3132 bool process_atio = false;
3124 3133
3125 rsp = (struct rsp_que *) dev_id; 3134 rsp = (struct rsp_que *) dev_id;
3126 if (!rsp) { 3135 if (!rsp) {
@@ -3181,22 +3190,13 @@ qla24xx_intr_handler(int irq, void *dev_id)
3181 qla24xx_process_response_queue(vha, rsp); 3190 qla24xx_process_response_queue(vha, rsp);
3182 break; 3191 break;
3183 case INTR_ATIO_QUE_UPDATE_27XX: 3192 case INTR_ATIO_QUE_UPDATE_27XX:
3184 case INTR_ATIO_QUE_UPDATE:{ 3193 case INTR_ATIO_QUE_UPDATE:
3185 unsigned long flags2; 3194 process_atio = true;
3186 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
3187 qlt_24xx_process_atio_queue(vha, 1);
3188 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
3189 break; 3195 break;
3190 } 3196 case INTR_ATIO_RSP_QUE_UPDATE:
3191 case INTR_ATIO_RSP_QUE_UPDATE: { 3197 process_atio = true;
3192 unsigned long flags2;
3193 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
3194 qlt_24xx_process_atio_queue(vha, 1);
3195 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
3196
3197 qla24xx_process_response_queue(vha, rsp); 3198 qla24xx_process_response_queue(vha, rsp);
3198 break; 3199 break;
3199 }
3200 default: 3200 default:
3201 ql_dbg(ql_dbg_async, vha, 0x504f, 3201 ql_dbg(ql_dbg_async, vha, 0x504f,
3202 "Unrecognized interrupt type (%d).\n", stat * 0xff); 3202 "Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -3210,6 +3210,12 @@ qla24xx_intr_handler(int irq, void *dev_id)
3210 qla2x00_handle_mbx_completion(ha, status); 3210 qla2x00_handle_mbx_completion(ha, status);
3211 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3211 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3212 3212
3213 if (process_atio) {
3214 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3215 qlt_24xx_process_atio_queue(vha, 0);
3216 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3217 }
3218
3213 return IRQ_HANDLED; 3219 return IRQ_HANDLED;
3214} 3220}
3215 3221
@@ -3256,6 +3262,7 @@ qla24xx_msix_default(int irq, void *dev_id)
3256 uint32_t hccr; 3262 uint32_t hccr;
3257 uint16_t mb[8]; 3263 uint16_t mb[8];
3258 unsigned long flags; 3264 unsigned long flags;
3265 bool process_atio = false;
3259 3266
3260 rsp = (struct rsp_que *) dev_id; 3267 rsp = (struct rsp_que *) dev_id;
3261 if (!rsp) { 3268 if (!rsp) {
@@ -3312,22 +3319,13 @@ qla24xx_msix_default(int irq, void *dev_id)
3312 qla24xx_process_response_queue(vha, rsp); 3319 qla24xx_process_response_queue(vha, rsp);
3313 break; 3320 break;
3314 case INTR_ATIO_QUE_UPDATE_27XX: 3321 case INTR_ATIO_QUE_UPDATE_27XX:
3315 case INTR_ATIO_QUE_UPDATE:{ 3322 case INTR_ATIO_QUE_UPDATE:
3316 unsigned long flags2; 3323 process_atio = true;
3317 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
3318 qlt_24xx_process_atio_queue(vha, 1);
3319 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
3320 break; 3324 break;
3321 } 3325 case INTR_ATIO_RSP_QUE_UPDATE:
3322 case INTR_ATIO_RSP_QUE_UPDATE: { 3326 process_atio = true;
3323 unsigned long flags2;
3324 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
3325 qlt_24xx_process_atio_queue(vha, 1);
3326 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
3327
3328 qla24xx_process_response_queue(vha, rsp); 3327 qla24xx_process_response_queue(vha, rsp);
3329 break; 3328 break;
3330 }
3331 default: 3329 default:
3332 ql_dbg(ql_dbg_async, vha, 0x5051, 3330 ql_dbg(ql_dbg_async, vha, 0x5051,
3333 "Unrecognized interrupt type (%d).\n", stat & 0xff); 3331 "Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -3338,6 +3336,12 @@ qla24xx_msix_default(int irq, void *dev_id)
3338 qla2x00_handle_mbx_completion(ha, status); 3336 qla2x00_handle_mbx_completion(ha, status);
3339 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3337 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3340 3338
3339 if (process_atio) {
3340 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3341 qlt_24xx_process_atio_queue(vha, 0);
3342 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3343 }
3344
3341 return IRQ_HANDLED; 3345 return IRQ_HANDLED;
3342} 3346}
3343 3347
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 2c6c2cd5a0d0..2f3e5075ae76 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -60,6 +60,7 @@ static struct rom_cmd {
60 { MBC_GET_ADAPTER_LOOP_ID }, 60 { MBC_GET_ADAPTER_LOOP_ID },
61 { MBC_READ_SFP }, 61 { MBC_READ_SFP },
62 { MBC_GET_RNID_PARAMS }, 62 { MBC_GET_RNID_PARAMS },
63 { MBC_GET_SET_ZIO_THRESHOLD },
63}; 64};
64 65
65static int is_rom_cmd(uint16_t cmd) 66static int is_rom_cmd(uint16_t cmd)
@@ -189,7 +190,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
189 goto premature_exit; 190 goto premature_exit;
190 } 191 }
191 192
192 ha->flags.mbox_busy = 1; 193
193 /* Save mailbox command for debug */ 194 /* Save mailbox command for debug */
194 ha->mcp = mcp; 195 ha->mcp = mcp;
195 196
@@ -198,12 +199,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
198 199
199 spin_lock_irqsave(&ha->hardware_lock, flags); 200 spin_lock_irqsave(&ha->hardware_lock, flags);
200 201
201 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { 202 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
203 ha->flags.mbox_busy) {
202 rval = QLA_ABORTED; 204 rval = QLA_ABORTED;
203 ha->flags.mbox_busy = 0;
204 spin_unlock_irqrestore(&ha->hardware_lock, flags); 205 spin_unlock_irqrestore(&ha->hardware_lock, flags);
205 goto premature_exit; 206 goto premature_exit;
206 } 207 }
208 ha->flags.mbox_busy = 1;
207 209
208 /* Load mailbox registers. */ 210 /* Load mailbox registers. */
209 if (IS_P3P_TYPE(ha)) 211 if (IS_P3P_TYPE(ha))
@@ -254,9 +256,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
254 if (IS_P3P_TYPE(ha)) { 256 if (IS_P3P_TYPE(ha)) {
255 if (RD_REG_DWORD(&reg->isp82.hint) & 257 if (RD_REG_DWORD(&reg->isp82.hint) &
256 HINT_MBX_INT_PENDING) { 258 HINT_MBX_INT_PENDING) {
259 ha->flags.mbox_busy = 0;
257 spin_unlock_irqrestore(&ha->hardware_lock, 260 spin_unlock_irqrestore(&ha->hardware_lock,
258 flags); 261 flags);
259 ha->flags.mbox_busy = 0; 262
260 atomic_dec(&ha->num_pend_mbx_stage2); 263 atomic_dec(&ha->num_pend_mbx_stage2);
261 ql_dbg(ql_dbg_mbx, vha, 0x1010, 264 ql_dbg(ql_dbg_mbx, vha, 0x1010,
262 "Pending mailbox timeout, exiting.\n"); 265 "Pending mailbox timeout, exiting.\n");
@@ -274,6 +277,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
274 atomic_inc(&ha->num_pend_mbx_stage3); 277 atomic_inc(&ha->num_pend_mbx_stage3);
275 if (!wait_for_completion_timeout(&ha->mbx_intr_comp, 278 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
276 mcp->tov * HZ)) { 279 mcp->tov * HZ)) {
280 if (chip_reset != ha->chip_reset) {
281 spin_lock_irqsave(&ha->hardware_lock, flags);
282 ha->flags.mbox_busy = 0;
283 spin_unlock_irqrestore(&ha->hardware_lock,
284 flags);
285 atomic_dec(&ha->num_pend_mbx_stage2);
286 atomic_dec(&ha->num_pend_mbx_stage3);
287 rval = QLA_ABORTED;
288 goto premature_exit;
289 }
277 ql_dbg(ql_dbg_mbx, vha, 0x117a, 290 ql_dbg(ql_dbg_mbx, vha, 0x117a,
278 "cmd=%x Timeout.\n", command); 291 "cmd=%x Timeout.\n", command);
279 spin_lock_irqsave(&ha->hardware_lock, flags); 292 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -282,7 +295,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
282 295
283 } else if (ha->flags.purge_mbox || 296 } else if (ha->flags.purge_mbox ||
284 chip_reset != ha->chip_reset) { 297 chip_reset != ha->chip_reset) {
298 spin_lock_irqsave(&ha->hardware_lock, flags);
285 ha->flags.mbox_busy = 0; 299 ha->flags.mbox_busy = 0;
300 spin_unlock_irqrestore(&ha->hardware_lock, flags);
286 atomic_dec(&ha->num_pend_mbx_stage2); 301 atomic_dec(&ha->num_pend_mbx_stage2);
287 atomic_dec(&ha->num_pend_mbx_stage3); 302 atomic_dec(&ha->num_pend_mbx_stage3);
288 rval = QLA_ABORTED; 303 rval = QLA_ABORTED;
@@ -300,9 +315,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
300 if (IS_P3P_TYPE(ha)) { 315 if (IS_P3P_TYPE(ha)) {
301 if (RD_REG_DWORD(&reg->isp82.hint) & 316 if (RD_REG_DWORD(&reg->isp82.hint) &
302 HINT_MBX_INT_PENDING) { 317 HINT_MBX_INT_PENDING) {
318 ha->flags.mbox_busy = 0;
303 spin_unlock_irqrestore(&ha->hardware_lock, 319 spin_unlock_irqrestore(&ha->hardware_lock,
304 flags); 320 flags);
305 ha->flags.mbox_busy = 0;
306 atomic_dec(&ha->num_pend_mbx_stage2); 321 atomic_dec(&ha->num_pend_mbx_stage2);
307 ql_dbg(ql_dbg_mbx, vha, 0x1012, 322 ql_dbg(ql_dbg_mbx, vha, 0x1012,
308 "Pending mailbox timeout, exiting.\n"); 323 "Pending mailbox timeout, exiting.\n");
@@ -320,7 +335,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
320 while (!ha->flags.mbox_int) { 335 while (!ha->flags.mbox_int) {
321 if (ha->flags.purge_mbox || 336 if (ha->flags.purge_mbox ||
322 chip_reset != ha->chip_reset) { 337 chip_reset != ha->chip_reset) {
338 spin_lock_irqsave(&ha->hardware_lock, flags);
323 ha->flags.mbox_busy = 0; 339 ha->flags.mbox_busy = 0;
340 spin_unlock_irqrestore(&ha->hardware_lock,
341 flags);
324 atomic_dec(&ha->num_pend_mbx_stage2); 342 atomic_dec(&ha->num_pend_mbx_stage2);
325 rval = QLA_ABORTED; 343 rval = QLA_ABORTED;
326 goto premature_exit; 344 goto premature_exit;
@@ -363,7 +381,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
363 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 381 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
364 382
365 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 383 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
384 spin_lock_irqsave(&ha->hardware_lock, flags);
366 ha->flags.mbox_busy = 0; 385 ha->flags.mbox_busy = 0;
386 spin_unlock_irqrestore(&ha->hardware_lock, flags);
387
367 /* Setting Link-Down error */ 388 /* Setting Link-Down error */
368 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 389 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
369 ha->mcp = NULL; 390 ha->mcp = NULL;
@@ -436,7 +457,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
436 * then only PCI ERR flag would be set. 457 * then only PCI ERR flag would be set.
437 * we will do premature exit for above case. 458 * we will do premature exit for above case.
438 */ 459 */
460 spin_lock_irqsave(&ha->hardware_lock, flags);
439 ha->flags.mbox_busy = 0; 461 ha->flags.mbox_busy = 0;
462 spin_unlock_irqrestore(&ha->hardware_lock,
463 flags);
440 rval = QLA_FUNCTION_TIMEOUT; 464 rval = QLA_FUNCTION_TIMEOUT;
441 goto premature_exit; 465 goto premature_exit;
442 } 466 }
@@ -451,8 +475,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
451 rval = QLA_FUNCTION_TIMEOUT; 475 rval = QLA_FUNCTION_TIMEOUT;
452 } 476 }
453 } 477 }
454 478 spin_lock_irqsave(&ha->hardware_lock, flags);
455 ha->flags.mbox_busy = 0; 479 ha->flags.mbox_busy = 0;
480 spin_unlock_irqrestore(&ha->hardware_lock, flags);
456 481
457 /* Clean up */ 482 /* Clean up */
458 ha->mcp = NULL; 483 ha->mcp = NULL;
@@ -493,7 +518,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
493 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 518 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
494 qla2xxx_wake_dpc(vha); 519 qla2xxx_wake_dpc(vha);
495 } 520 }
496 } else if (!abort_active) { 521 } else if (current == ha->dpc_thread) {
497 /* call abort directly since we are in the DPC thread */ 522 /* call abort directly since we are in the DPC thread */
498 ql_dbg(ql_dbg_mbx, vha, 0x101d, 523 ql_dbg(ql_dbg_mbx, vha, 0x101d,
499 "Timeout, calling abort_isp.\n"); 524 "Timeout, calling abort_isp.\n");
@@ -1486,7 +1511,6 @@ qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1486 struct req_que *req; 1511 struct req_que *req;
1487 struct rsp_que *rsp; 1512 struct rsp_que *rsp;
1488 1513
1489 l = l;
1490 vha = fcport->vha; 1514 vha = fcport->vha;
1491 1515
1492 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, 1516 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
@@ -3072,22 +3096,25 @@ qla24xx_abort_command(srb_t *sp)
3072 struct scsi_qla_host *vha = fcport->vha; 3096 struct scsi_qla_host *vha = fcport->vha;
3073 struct qla_hw_data *ha = vha->hw; 3097 struct qla_hw_data *ha = vha->hw;
3074 struct req_que *req = vha->req; 3098 struct req_que *req = vha->req;
3099 struct qla_qpair *qpair = sp->qpair;
3075 3100
3076 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 3101 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3077 "Entered %s.\n", __func__); 3102 "Entered %s.\n", __func__);
3078 3103
3079 if (vha->flags.qpairs_available && sp->qpair) 3104 if (vha->flags.qpairs_available && sp->qpair)
3080 req = sp->qpair->req; 3105 req = sp->qpair->req;
3106 else
3107 return QLA_FUNCTION_FAILED;
3081 3108
3082 if (ql2xasynctmfenable) 3109 if (ql2xasynctmfenable)
3083 return qla24xx_async_abort_command(sp); 3110 return qla24xx_async_abort_command(sp);
3084 3111
3085 spin_lock_irqsave(&ha->hardware_lock, flags); 3112 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3086 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 3113 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3087 if (req->outstanding_cmds[handle] == sp) 3114 if (req->outstanding_cmds[handle] == sp)
3088 break; 3115 break;
3089 } 3116 }
3090 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3117 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3091 if (handle == req->num_outstanding_cmds) { 3118 if (handle == req->num_outstanding_cmds) {
3092 /* Command not found. */ 3119 /* Command not found. */
3093 return QLA_FUNCTION_FAILED; 3120 return QLA_FUNCTION_FAILED;
@@ -3762,10 +3789,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3762 mcp->mb[0] = MBC_PORT_PARAMS; 3789 mcp->mb[0] = MBC_PORT_PARAMS;
3763 mcp->mb[1] = loop_id; 3790 mcp->mb[1] = loop_id;
3764 mcp->mb[2] = BIT_0; 3791 mcp->mb[2] = BIT_0;
3765 if (IS_CNA_CAPABLE(vha->hw)) 3792 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
3766 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
3767 else
3768 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
3769 mcp->mb[9] = vha->vp_idx; 3793 mcp->mb[9] = vha->vp_idx;
3770 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3794 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3771 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3795 mcp->in_mb = MBX_3|MBX_1|MBX_0;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 20d9dc39f0fb..7e78e7eff783 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -506,7 +506,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
506 return -EBUSY; 506 return -EBUSY;
507 507
508 /* Alloc SRB structure */ 508 /* Alloc SRB structure */
509 sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC); 509 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
510 if (!sp) 510 if (!sp)
511 return -EBUSY; 511 return -EBUSY;
512 512
@@ -607,7 +607,7 @@ void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res)
607{ 607{
608 int rval; 608 int rval;
609 609
610 if (!test_bit(ABORT_ISP_ACTIVE, &sp->vha->dpc_flags)) { 610 if (ha->flags.fw_started) {
611 rval = ha->isp_ops->abort_command(sp); 611 rval = ha->isp_ops->abort_command(sp);
612 if (!rval && !qla_nvme_wait_on_command(sp)) 612 if (!rval && !qla_nvme_wait_on_command(sp))
613 ql_log(ql_log_warn, NULL, 0x2112, 613 ql_log(ql_log_warn, NULL, 0x2112,
@@ -660,9 +660,6 @@ void qla_nvme_delete(struct scsi_qla_host *vha)
660 __func__, fcport); 660 __func__, fcport);
661 661
662 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0); 662 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
663 init_completion(&fcport->nvme_del_done);
664 nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
665 wait_for_completion(&fcport->nvme_del_done);
666 } 663 }
667 664
668 if (vha->nvme_local_port) { 665 if (vha->nvme_local_port) {
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index de2bc78449e7..121e18b3b9f8 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -3699,8 +3699,8 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3699 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3699 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3700 3700
3701 /* Wait for pending cmds (physical and virtual) to complete */ 3701 /* Wait for pending cmds (physical and virtual) to complete */
3702 if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0, 3702 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
3703 WAIT_HOST) == QLA_SUCCESS) { 3703 WAIT_HOST)) {
3704 ql_dbg(ql_dbg_init, vha, 0x00b3, 3704 ql_dbg(ql_dbg_init, vha, 0x00b3,
3705 "Done wait for " 3705 "Done wait for "
3706 "pending commands.\n"); 3706 "pending commands.\n");
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8fe2d7329bfe..8794e54f43a9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -14,6 +14,8 @@
14#include <linux/kobject.h> 14#include <linux/kobject.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/blk-mq-pci.h> 16#include <linux/blk-mq-pci.h>
17#include <linux/refcount.h>
18
17#include <scsi/scsi_tcq.h> 19#include <scsi/scsi_tcq.h>
18#include <scsi/scsicam.h> 20#include <scsi/scsicam.h>
19#include <scsi/scsi_transport.h> 21#include <scsi/scsi_transport.h>
@@ -204,7 +206,7 @@ int ql2xasynctmfenable = 1;
204module_param(ql2xasynctmfenable, int, S_IRUGO); 206module_param(ql2xasynctmfenable, int, S_IRUGO);
205MODULE_PARM_DESC(ql2xasynctmfenable, 207MODULE_PARM_DESC(ql2xasynctmfenable,
206 "Enables issue of TM IOCBs asynchronously via IOCB mechanism" 208 "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
207 "Default is 0 - Issue TM IOCBs via mailbox mechanism."); 209 "Default is 1 - Issue TM IOCBs via mailbox mechanism.");
208 210
209int ql2xdontresethba; 211int ql2xdontresethba;
210module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR); 212module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
@@ -391,12 +393,14 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
391 struct qla_hw_data *ha = vha->hw; 393 struct qla_hw_data *ha = vha->hw;
392 rsp->qpair = ha->base_qpair; 394 rsp->qpair = ha->base_qpair;
393 rsp->req = req; 395 rsp->req = req;
396 ha->base_qpair->hw = ha;
394 ha->base_qpair->req = req; 397 ha->base_qpair->req = req;
395 ha->base_qpair->rsp = rsp; 398 ha->base_qpair->rsp = rsp;
396 ha->base_qpair->vha = vha; 399 ha->base_qpair->vha = vha;
397 ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; 400 ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
398 ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; 401 ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
399 ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; 402 ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
403 ha->base_qpair->srb_mempool = ha->srb_mempool;
400 INIT_LIST_HEAD(&ha->base_qpair->hints_list); 404 INIT_LIST_HEAD(&ha->base_qpair->hints_list);
401 ha->base_qpair->enable_class_2 = ql2xenableclass2; 405 ha->base_qpair->enable_class_2 = ql2xenableclass2;
402 /* init qpair to this cpu. Will adjust at run time. */ 406 /* init qpair to this cpu. Will adjust at run time. */
@@ -1012,7 +1016,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
1012 else 1016 else
1013 goto qc24_target_busy; 1017 goto qc24_target_busy;
1014 1018
1015 sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC); 1019 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
1016 if (!sp) 1020 if (!sp)
1017 goto qc24_host_busy; 1021 goto qc24_host_busy;
1018 1022
@@ -1212,10 +1216,14 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
1212 return return_status; 1216 return return_status;
1213} 1217}
1214 1218
1215static void 1219static int
1216sp_get(struct srb *sp) 1220sp_get(struct srb *sp)
1217{ 1221{
1218 atomic_inc(&sp->ref_count); 1222 if (!refcount_inc_not_zero((refcount_t*)&sp->ref_count))
1223 /* kref get fail */
1224 return ENXIO;
1225 else
1226 return 0;
1219} 1227}
1220 1228
1221#define ISP_REG_DISCONNECT 0xffffffffU 1229#define ISP_REG_DISCONNECT 0xffffffffU
@@ -1273,38 +1281,51 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
1273 unsigned long flags; 1281 unsigned long flags;
1274 int rval, wait = 0; 1282 int rval, wait = 0;
1275 struct qla_hw_data *ha = vha->hw; 1283 struct qla_hw_data *ha = vha->hw;
1284 struct qla_qpair *qpair;
1276 1285
1277 if (qla2x00_isp_reg_stat(ha)) { 1286 if (qla2x00_isp_reg_stat(ha)) {
1278 ql_log(ql_log_info, vha, 0x8042, 1287 ql_log(ql_log_info, vha, 0x8042,
1279 "PCI/Register disconnect, exiting.\n"); 1288 "PCI/Register disconnect, exiting.\n");
1280 return FAILED; 1289 return FAILED;
1281 } 1290 }
1282 if (!CMD_SP(cmd))
1283 return SUCCESS;
1284 1291
1285 ret = fc_block_scsi_eh(cmd); 1292 ret = fc_block_scsi_eh(cmd);
1286 if (ret != 0) 1293 if (ret != 0)
1287 return ret; 1294 return ret;
1288 ret = SUCCESS; 1295 ret = SUCCESS;
1289 1296
1290 id = cmd->device->id;
1291 lun = cmd->device->lun;
1292
1293 spin_lock_irqsave(&ha->hardware_lock, flags);
1294 sp = (srb_t *) CMD_SP(cmd); 1297 sp = (srb_t *) CMD_SP(cmd);
1295 if (!sp) { 1298 if (!sp)
1296 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1299 return SUCCESS;
1300
1301 qpair = sp->qpair;
1302 if (!qpair)
1303 return SUCCESS;
1304
1305 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1306 if (!CMD_SP(cmd)) {
1307 /* there's a chance an interrupt could clear
1308 the ptr as part of done & free */
1309 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1310 return SUCCESS;
1311 }
1312
1313 if (sp_get(sp)){
1314 /* ref_count is already 0 */
1315 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1297 return SUCCESS; 1316 return SUCCESS;
1298 } 1317 }
1318 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1319
1320 id = cmd->device->id;
1321 lun = cmd->device->lun;
1299 1322
1300 ql_dbg(ql_dbg_taskm, vha, 0x8002, 1323 ql_dbg(ql_dbg_taskm, vha, 0x8002,
1301 "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n", 1324 "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
1302 vha->host_no, id, lun, sp, cmd, sp->handle); 1325 vha->host_no, id, lun, sp, cmd, sp->handle);
1303 1326
1304 /* Get a reference to the sp and drop the lock.*/ 1327 /* Get a reference to the sp and drop the lock.*/
1305 sp_get(sp);
1306 1328
1307 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1308 rval = ha->isp_ops->abort_command(sp); 1329 rval = ha->isp_ops->abort_command(sp);
1309 if (rval) { 1330 if (rval) {
1310 if (rval == QLA_FUNCTION_PARAMETER_ERROR) 1331 if (rval == QLA_FUNCTION_PARAMETER_ERROR)
@@ -1320,14 +1341,29 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
1320 wait = 1; 1341 wait = 1;
1321 } 1342 }
1322 1343
1323 spin_lock_irqsave(&ha->hardware_lock, flags); 1344 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1324 sp->done(sp, 0); 1345 /*
1325 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1346 * Clear the slot in the oustanding_cmds array if we can't find the
1347 * command to reclaim the resources.
1348 */
1349 if (rval == QLA_FUNCTION_PARAMETER_ERROR)
1350 vha->req->outstanding_cmds[sp->handle] = NULL;
1351
1352 /*
1353 * sp->done will do ref_count--
1354 * sp_get() took an extra count above
1355 */
1356 sp->done(sp, DID_RESET << 16);
1326 1357
1327 /* Did the command return during mailbox execution? */ 1358 /* Did the command return during mailbox execution? */
1328 if (ret == FAILED && !CMD_SP(cmd)) 1359 if (ret == FAILED && !CMD_SP(cmd))
1329 ret = SUCCESS; 1360 ret = SUCCESS;
1330 1361
1362 if (!CMD_SP(cmd))
1363 wait = 0;
1364
1365 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1366
1331 /* Wait for the command to be returned. */ 1367 /* Wait for the command to be returned. */
1332 if (wait) { 1368 if (wait) {
1333 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { 1369 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
@@ -1721,7 +1757,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
1721 struct req_que *req; 1757 struct req_que *req;
1722 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1758 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1723 struct qla_tgt_cmd *cmd; 1759 struct qla_tgt_cmd *cmd;
1724 uint8_t trace = 0;
1725 1760
1726 if (!ha->req_q_map) 1761 if (!ha->req_q_map)
1727 return; 1762 return;
@@ -1731,64 +1766,68 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
1731 sp = req->outstanding_cmds[cnt]; 1766 sp = req->outstanding_cmds[cnt];
1732 if (sp) { 1767 if (sp) {
1733 req->outstanding_cmds[cnt] = NULL; 1768 req->outstanding_cmds[cnt] = NULL;
1734 if (sp->cmd_type == TYPE_SRB) { 1769 switch (sp->cmd_type) {
1770 case TYPE_SRB:
1735 if (sp->type == SRB_NVME_CMD || 1771 if (sp->type == SRB_NVME_CMD ||
1736 sp->type == SRB_NVME_LS) { 1772 sp->type == SRB_NVME_LS) {
1737 sp_get(sp); 1773 if (!sp_get(sp)) {
1738 spin_unlock_irqrestore(qp->qp_lock_ptr, 1774 /* got sp */
1739 flags); 1775 spin_unlock_irqrestore
1740 qla_nvme_abort(ha, sp, res); 1776 (qp->qp_lock_ptr,
1741 spin_lock_irqsave(qp->qp_lock_ptr, 1777 flags);
1742 flags); 1778 qla_nvme_abort(ha, sp, res);
1779 spin_lock_irqsave
1780 (qp->qp_lock_ptr, flags);
1781 }
1743 } else if (GET_CMD_SP(sp) && 1782 } else if (GET_CMD_SP(sp) &&
1744 !ha->flags.eeh_busy && 1783 !ha->flags.eeh_busy &&
1745 (!test_bit(ABORT_ISP_ACTIVE, 1784 (!test_bit(ABORT_ISP_ACTIVE,
1746 &vha->dpc_flags)) && 1785 &vha->dpc_flags)) &&
1786 !qla2x00_isp_reg_stat(ha) &&
1747 (sp->type == SRB_SCSI_CMD)) { 1787 (sp->type == SRB_SCSI_CMD)) {
1748 /* 1788 /*
1749 * Don't abort commands in 1789 * Don't abort commands in adapter
1750 * adapter during EEH 1790 * during EEH recovery as it's not
1751 * recovery as it's not
1752 * accessible/responding. 1791 * accessible/responding.
1753 * 1792 *
1754 * Get a reference to the sp 1793 * Get a reference to the sp and drop
1755 * and drop the lock. The 1794 * the lock. The reference ensures this
1756 * reference ensures this 1795 * sp->done() call and not the call in
1757 * sp->done() call and not the 1796 * qla2xxx_eh_abort() ends the SCSI cmd
1758 * call in qla2xxx_eh_abort() 1797 * (with result 'res').
1759 * ends the SCSI command (with
1760 * result 'res').
1761 */ 1798 */
1762 sp_get(sp); 1799 if (!sp_get(sp)) {
1763 spin_unlock_irqrestore(qp->qp_lock_ptr, 1800 spin_unlock_irqrestore
1764 flags); 1801 (qp->qp_lock_ptr, flags);
1765 status = qla2xxx_eh_abort( 1802 status = qla2xxx_eh_abort(
1766 GET_CMD_SP(sp)); 1803 GET_CMD_SP(sp));
1767 spin_lock_irqsave(qp->qp_lock_ptr, 1804 spin_lock_irqsave
1768 flags); 1805 (qp->qp_lock_ptr, flags);
1769 /* 1806 }
1770 * Get rid of extra reference
1771 * if immediate exit from
1772 * ql2xxx_eh_abort
1773 */
1774 if (status == FAILED &&
1775 (qla2x00_isp_reg_stat(ha)))
1776 atomic_dec(
1777 &sp->ref_count);
1778 } 1807 }
1779 sp->done(sp, res); 1808 sp->done(sp, res);
1780 } else { 1809 break;
1810 case TYPE_TGT_CMD:
1781 if (!vha->hw->tgt.tgt_ops || !tgt || 1811 if (!vha->hw->tgt.tgt_ops || !tgt ||
1782 qla_ini_mode_enabled(vha)) { 1812 qla_ini_mode_enabled(vha)) {
1783 if (!trace) 1813 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
1784 ql_dbg(ql_dbg_tgt_mgt, 1814 "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
1785 vha, 0xf003, 1815 vha->dpc_flags);
1786 "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
1787 vha->dpc_flags);
1788 continue; 1816 continue;
1789 } 1817 }
1790 cmd = (struct qla_tgt_cmd *)sp; 1818 cmd = (struct qla_tgt_cmd *)sp;
1791 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 1819 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
1820 break;
1821 case TYPE_TGT_TMCMD:
1822 /*
1823 * Currently, only ABTS response gets on the
1824 * outstanding_cmds[]
1825 */
1826 ha->tgt.tgt_ops->free_mcmd(
1827 (struct qla_tgt_mgmt_cmd *)sp);
1828 break;
1829 default:
1830 break;
1792 } 1831 }
1793 } 1832 }
1794 } 1833 }
@@ -2708,7 +2747,7 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
2708 struct scsi_qla_host, iocb_work); 2747 struct scsi_qla_host, iocb_work);
2709 struct qla_hw_data *ha = vha->hw; 2748 struct qla_hw_data *ha = vha->hw;
2710 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2749 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2711 int i = 20; 2750 int i = 2;
2712 unsigned long flags; 2751 unsigned long flags;
2713 2752
2714 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 2753 if (test_bit(UNLOADING, &base_vha->dpc_flags))
@@ -2819,6 +2858,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2819 atomic_set(&ha->num_pend_mbx_stage1, 0); 2858 atomic_set(&ha->num_pend_mbx_stage1, 0);
2820 atomic_set(&ha->num_pend_mbx_stage2, 0); 2859 atomic_set(&ha->num_pend_mbx_stage2, 0);
2821 atomic_set(&ha->num_pend_mbx_stage3, 0); 2860 atomic_set(&ha->num_pend_mbx_stage3, 0);
2861 atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
2862 ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
2822 2863
2823 /* Assign ISP specific operations. */ 2864 /* Assign ISP specific operations. */
2824 if (IS_QLA2100(ha)) { 2865 if (IS_QLA2100(ha)) {
@@ -4249,29 +4290,34 @@ static void
4249qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt) 4290qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
4250{ 4291{
4251 u32 temp; 4292 u32 temp;
4293 struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb;
4252 *ret_cnt = FW_DEF_EXCHANGES_CNT; 4294 *ret_cnt = FW_DEF_EXCHANGES_CNT;
4253 4295
4254 if (max_cnt > vha->hw->max_exchg) 4296 if (max_cnt > vha->hw->max_exchg)
4255 max_cnt = vha->hw->max_exchg; 4297 max_cnt = vha->hw->max_exchg;
4256 4298
4257 if (qla_ini_mode_enabled(vha)) { 4299 if (qla_ini_mode_enabled(vha)) {
4258 if (ql2xiniexchg > max_cnt) 4300 if (vha->ql2xiniexchg > max_cnt)
4259 ql2xiniexchg = max_cnt; 4301 vha->ql2xiniexchg = max_cnt;
4302
4303 if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
4304 *ret_cnt = vha->ql2xiniexchg;
4260 4305
4261 if (ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
4262 *ret_cnt = ql2xiniexchg;
4263 } else if (qla_tgt_mode_enabled(vha)) { 4306 } else if (qla_tgt_mode_enabled(vha)) {
4264 if (ql2xexchoffld > max_cnt) 4307 if (vha->ql2xexchoffld > max_cnt) {
4265 ql2xexchoffld = max_cnt; 4308 vha->ql2xexchoffld = max_cnt;
4309 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
4310 }
4266 4311
4267 if (ql2xexchoffld > FW_DEF_EXCHANGES_CNT) 4312 if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
4268 *ret_cnt = ql2xexchoffld; 4313 *ret_cnt = vha->ql2xexchoffld;
4269 } else if (qla_dual_mode_enabled(vha)) { 4314 } else if (qla_dual_mode_enabled(vha)) {
4270 temp = ql2xiniexchg + ql2xexchoffld; 4315 temp = vha->ql2xiniexchg + vha->ql2xexchoffld;
4271 if (temp > max_cnt) { 4316 if (temp > max_cnt) {
4272 ql2xiniexchg -= (temp - max_cnt)/2; 4317 vha->ql2xiniexchg -= (temp - max_cnt)/2;
4273 ql2xexchoffld -= (((temp - max_cnt)/2) + 1); 4318 vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
4274 temp = max_cnt; 4319 temp = max_cnt;
4320 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
4275 } 4321 }
4276 4322
4277 if (temp > FW_DEF_EXCHANGES_CNT) 4323 if (temp > FW_DEF_EXCHANGES_CNT)
@@ -4309,6 +4355,12 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
4309 4355
4310 if (totsz != ha->exchoffld_size) { 4356 if (totsz != ha->exchoffld_size) {
4311 qla2x00_free_exchoffld_buffer(ha); 4357 qla2x00_free_exchoffld_buffer(ha);
4358 if (actual_cnt <= FW_DEF_EXCHANGES_CNT) {
4359 ha->exchoffld_size = 0;
4360 ha->flags.exchoffld_enabled = 0;
4361 return QLA_SUCCESS;
4362 }
4363
4312 ha->exchoffld_size = totsz; 4364 ha->exchoffld_size = totsz;
4313 4365
4314 ql_log(ql_log_info, vha, 0xd016, 4366 ql_log(ql_log_info, vha, 0xd016,
@@ -4341,6 +4393,15 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
4341 4393
4342 return -ENOMEM; 4394 return -ENOMEM;
4343 } 4395 }
4396 } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) {
4397 /* pathological case */
4398 qla2x00_free_exchoffld_buffer(ha);
4399 ha->exchoffld_size = 0;
4400 ha->flags.exchoffld_enabled = 0;
4401 ql_log(ql_log_info, vha, 0xd016,
4402 "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n",
4403 ha->exchoffld_size, actual_cnt, size, totsz);
4404 return 0;
4344 } 4405 }
4345 4406
4346 /* Now configure the dma buffer */ 4407 /* Now configure the dma buffer */
@@ -4356,7 +4417,7 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
4356 if (qla_ini_mode_enabled(vha)) 4417 if (qla_ini_mode_enabled(vha))
4357 icb->exchange_count = 0; 4418 icb->exchange_count = 0;
4358 else 4419 else
4359 icb->exchange_count = cpu_to_le16(ql2xexchoffld); 4420 icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
4360 } 4421 }
4361 4422
4362 return rval; 4423 return rval;
@@ -4564,6 +4625,10 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4564 vha->host_no = host->host_no; 4625 vha->host_no = host->host_no;
4565 vha->hw = ha; 4626 vha->hw = ha;
4566 4627
4628 vha->qlini_mode = ql2x_ini_mode;
4629 vha->ql2xexchoffld = ql2xexchoffld;
4630 vha->ql2xiniexchg = ql2xiniexchg;
4631
4567 INIT_LIST_HEAD(&vha->vp_fcports); 4632 INIT_LIST_HEAD(&vha->vp_fcports);
4568 INIT_LIST_HEAD(&vha->work_list); 4633 INIT_LIST_HEAD(&vha->work_list);
4569 INIT_LIST_HEAD(&vha->list); 4634 INIT_LIST_HEAD(&vha->list);
@@ -4579,7 +4644,6 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4579 4644
4580 spin_lock_init(&vha->work_lock); 4645 spin_lock_init(&vha->work_lock);
4581 spin_lock_init(&vha->cmd_list_lock); 4646 spin_lock_init(&vha->cmd_list_lock);
4582 spin_lock_init(&vha->gnl.fcports_lock);
4583 init_waitqueue_head(&vha->fcport_waitQ); 4647 init_waitqueue_head(&vha->fcport_waitQ);
4584 init_waitqueue_head(&vha->vref_waitq); 4648 init_waitqueue_head(&vha->vref_waitq);
4585 4649
@@ -4710,7 +4774,6 @@ qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
4710qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); 4774qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
4711qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); 4775qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
4712qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); 4776qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
4713qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
4714qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO); 4777qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
4715qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE); 4778qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
4716 4779
@@ -4761,16 +4824,25 @@ qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode,
4761 return qla2x00_post_work(vha, e); 4824 return qla2x00_post_work(vha, e);
4762} 4825}
4763 4826
4764int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport) 4827void qla24xx_sched_upd_fcport(fc_port_t *fcport)
4765{ 4828{
4766 struct qla_work_evt *e; 4829 unsigned long flags;
4767 4830
4768 e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT); 4831 if (IS_SW_RESV_ADDR(fcport->d_id))
4769 if (!e) 4832 return;
4770 return QLA_FUNCTION_FAILED;
4771 4833
4772 e->u.fcport.fcport = fcport; 4834 spin_lock_irqsave(&fcport->vha->work_lock, flags);
4773 return qla2x00_post_work(vha, e); 4835 if (fcport->disc_state == DSC_UPD_FCPORT) {
4836 spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
4837 return;
4838 }
4839 fcport->jiffies_at_registration = jiffies;
4840 fcport->sec_since_registration = 0;
4841 fcport->next_disc_state = DSC_DELETED;
4842 fcport->disc_state = DSC_UPD_FCPORT;
4843 spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
4844
4845 queue_work(system_unbound_wq, &fcport->reg_work);
4774} 4846}
4775 4847
4776static 4848static
@@ -4808,10 +4880,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4808 fcport->d_id = e->u.new_sess.id; 4880 fcport->d_id = e->u.new_sess.id;
4809 fcport->flags |= FCF_FABRIC_DEVICE; 4881 fcport->flags |= FCF_FABRIC_DEVICE;
4810 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 4882 fcport->fw_login_state = DSC_LS_PLOGI_PEND;
4811 if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP) 4883 if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
4812 fcport->fc4_type = FC4_TYPE_FCP_SCSI; 4884 fcport->fc4_type = FC4_TYPE_FCP_SCSI;
4813 4885
4814 if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) { 4886 if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
4815 fcport->fc4_type = FC4_TYPE_OTHER; 4887 fcport->fc4_type = FC4_TYPE_OTHER;
4816 fcport->fc4f_nvme = FC4_TYPE_NVME; 4888 fcport->fc4f_nvme = FC4_TYPE_NVME;
4817 } 4889 }
@@ -4990,19 +5062,12 @@ qla2x00_do_work(struct scsi_qla_host *vha)
4990 qla2x00_async_adisc(vha, e->u.logio.fcport, 5062 qla2x00_async_adisc(vha, e->u.logio.fcport,
4991 e->u.logio.data); 5063 e->u.logio.data);
4992 break; 5064 break;
4993 case QLA_EVT_ASYNC_ADISC_DONE:
4994 qla2x00_async_adisc_done(vha, e->u.logio.fcport,
4995 e->u.logio.data);
4996 break;
4997 case QLA_EVT_UEVENT: 5065 case QLA_EVT_UEVENT:
4998 qla2x00_uevent_emit(vha, e->u.uevent.code); 5066 qla2x00_uevent_emit(vha, e->u.uevent.code);
4999 break; 5067 break;
5000 case QLA_EVT_AENFX: 5068 case QLA_EVT_AENFX:
5001 qlafx00_process_aen(vha, e); 5069 qlafx00_process_aen(vha, e);
5002 break; 5070 break;
5003 case QLA_EVT_GIDPN:
5004 qla24xx_async_gidpn(vha, e->u.fcport.fcport);
5005 break;
5006 case QLA_EVT_GPNID: 5071 case QLA_EVT_GPNID:
5007 qla24xx_async_gpnid(vha, &e->u.gpnid.id); 5072 qla24xx_async_gpnid(vha, &e->u.gpnid.id);
5008 break; 5073 break;
@@ -5025,9 +5090,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
5025 case QLA_EVT_GPSC: 5090 case QLA_EVT_GPSC:
5026 qla24xx_async_gpsc(vha, e->u.fcport.fcport); 5091 qla24xx_async_gpsc(vha, e->u.fcport.fcport);
5027 break; 5092 break;
5028 case QLA_EVT_UPD_FCPORT:
5029 qla2x00_update_fcport(vha, e->u.fcport.fcport);
5030 break;
5031 case QLA_EVT_GNL: 5093 case QLA_EVT_GNL:
5032 qla24xx_async_gnl(vha, e->u.fcport.fcport); 5094 qla24xx_async_gnl(vha, e->u.fcport.fcport);
5033 break; 5095 break;
@@ -6041,12 +6103,29 @@ qla2x00_do_dpc(void *data)
6041 if (test_and_clear_bit 6103 if (test_and_clear_bit
6042 (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && 6104 (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
6043 !test_bit(UNLOADING, &base_vha->dpc_flags)) { 6105 !test_bit(UNLOADING, &base_vha->dpc_flags)) {
6106 bool do_reset = true;
6107
6108 switch (base_vha->qlini_mode) {
6109 case QLA2XXX_INI_MODE_ENABLED:
6110 break;
6111 case QLA2XXX_INI_MODE_DISABLED:
6112 if (!qla_tgt_mode_enabled(base_vha) &&
6113 !ha->flags.fw_started)
6114 do_reset = false;
6115 break;
6116 case QLA2XXX_INI_MODE_DUAL:
6117 if (!qla_dual_mode_enabled(base_vha) &&
6118 !ha->flags.fw_started)
6119 do_reset = false;
6120 break;
6121 default:
6122 break;
6123 }
6044 6124
6045 ql_dbg(ql_dbg_dpc, base_vha, 0x4007, 6125 if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
6046 "ISP abort scheduled.\n");
6047 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
6048 &base_vha->dpc_flags))) { 6126 &base_vha->dpc_flags))) {
6049 6127 ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
6128 "ISP abort scheduled.\n");
6050 if (ha->isp_ops->abort_isp(base_vha)) { 6129 if (ha->isp_ops->abort_isp(base_vha)) {
6051 /* failed. retry later */ 6130 /* failed. retry later */
6052 set_bit(ISP_ABORT_NEEDED, 6131 set_bit(ISP_ABORT_NEEDED,
@@ -6054,10 +6133,9 @@ qla2x00_do_dpc(void *data)
6054 } 6133 }
6055 clear_bit(ABORT_ISP_ACTIVE, 6134 clear_bit(ABORT_ISP_ACTIVE,
6056 &base_vha->dpc_flags); 6135 &base_vha->dpc_flags);
6136 ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
6137 "ISP abort end.\n");
6057 } 6138 }
6058
6059 ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
6060 "ISP abort end.\n");
6061 } 6139 }
6062 6140
6063 if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, 6141 if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
@@ -6183,17 +6261,28 @@ intr_on_check:
6183 mutex_unlock(&ha->mq_lock); 6261 mutex_unlock(&ha->mq_lock);
6184 } 6262 }
6185 6263
6186 if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, &base_vha->dpc_flags)) { 6264 if (test_and_clear_bit(SET_NVME_ZIO_THRESHOLD_NEEDED,
6265 &base_vha->dpc_flags)) {
6187 ql_log(ql_log_info, base_vha, 0xffffff, 6266 ql_log(ql_log_info, base_vha, 0xffffff,
6188 "nvme: SET ZIO Activity exchange threshold to %d.\n", 6267 "nvme: SET ZIO Activity exchange threshold to %d.\n",
6189 ha->nvme_last_rptd_aen); 6268 ha->nvme_last_rptd_aen);
6190 if (qla27xx_set_zio_threshold(base_vha, ha->nvme_last_rptd_aen)) { 6269 if (qla27xx_set_zio_threshold(base_vha,
6270 ha->nvme_last_rptd_aen)) {
6191 ql_log(ql_log_info, base_vha, 0xffffff, 6271 ql_log(ql_log_info, base_vha, 0xffffff,
6192 "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n", 6272 "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
6193 ha->nvme_last_rptd_aen); 6273 ha->nvme_last_rptd_aen);
6194 } 6274 }
6195 } 6275 }
6196 6276
6277 if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
6278 &base_vha->dpc_flags)) {
6279 ql_log(ql_log_info, base_vha, 0xffffff,
6280 "SET ZIO Activity exchange threshold to %d.\n",
6281 ha->last_zio_threshold);
6282 qla27xx_set_zio_threshold(base_vha,
6283 ha->last_zio_threshold);
6284 }
6285
6197 if (!IS_QLAFX00(ha)) 6286 if (!IS_QLAFX00(ha))
6198 qla2x00_do_dpc_all_vps(base_vha); 6287 qla2x00_do_dpc_all_vps(base_vha);
6199 6288
@@ -6406,13 +6495,24 @@ qla2x00_timer(struct timer_list *t)
6406 * FC-NVME 6495 * FC-NVME
6407 * see if the active AEN count has changed from what was last reported. 6496 * see if the active AEN count has changed from what was last reported.
6408 */ 6497 */
6409 if (!vha->vp_idx && 6498 if (!vha->vp_idx && (atomic_read(&ha->nvme_active_aen_cnt) !=
6410 atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen && 6499 ha->nvme_last_rptd_aen) && ha->zio_mode == QLA_ZIO_MODE_6) {
6411 ha->zio_mode == QLA_ZIO_MODE_6) {
6412 ql_log(ql_log_info, vha, 0x3002, 6500 ql_log(ql_log_info, vha, 0x3002,
6413 "nvme: Sched: Set ZIO exchange threshold to %d.\n", 6501 "nvme: Sched: Set ZIO exchange threshold to %d.\n",
6414 ha->nvme_last_rptd_aen); 6502 ha->nvme_last_rptd_aen);
6415 ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt); 6503 ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
6504 set_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
6505 start_dpc++;
6506 }
6507
6508 if (!vha->vp_idx &&
6509 (atomic_read(&ha->zio_threshold) != ha->last_zio_threshold) &&
6510 (ha->zio_mode == QLA_ZIO_MODE_6) &&
6511 (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
6512 ql_log(ql_log_info, vha, 0x3002,
6513 "Sched: Set ZIO exchange threshold to %d.\n",
6514 ha->last_zio_threshold);
6515 ha->last_zio_threshold = atomic_read(&ha->zio_threshold);
6416 set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); 6516 set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
6417 start_dpc++; 6517 start_dpc++;
6418 } 6518 }
@@ -6944,6 +7044,9 @@ qla2x00_module_init(void)
6944 if (ql2xextended_error_logging == 1) 7044 if (ql2xextended_error_logging == 1)
6945 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; 7045 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
6946 7046
7047 if (ql2x_ini_mode == QLA2XXX_INI_MODE_DUAL)
7048 qla_insert_tgt_attrs();
7049
6947 qla2xxx_transport_template = 7050 qla2xxx_transport_template =
6948 fc_attach_transport(&qla2xxx_transport_functions); 7051 fc_attach_transport(&qla2xxx_transport_functions);
6949 if (!qla2xxx_transport_template) { 7052 if (!qla2xxx_transport_template) {
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 8c811b251d42..39828207bc1d 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -141,6 +141,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *,
141 struct abts_recv_from_24xx *); 141 struct abts_recv_from_24xx *);
142static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *, 142static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
143 uint16_t); 143 uint16_t);
144static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
145static inline uint32_t qlt_make_handle(struct qla_qpair *);
144 146
145/* 147/*
146 * Global Variables 148 * Global Variables
@@ -541,7 +543,6 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
541 qlt_response_pkt(host, rsp, pkt); 543 qlt_response_pkt(host, rsp, pkt);
542 break; 544 break;
543 } 545 }
544
545 default: 546 default:
546 qlt_response_pkt(vha, rsp, pkt); 547 qlt_response_pkt(vha, rsp, pkt);
547 break; 548 break;
@@ -600,14 +601,9 @@ void qla2x00_async_nack_sp_done(void *s, int res)
600 sp->fcport->login_succ = 1; 601 sp->fcport->login_succ = 1;
601 602
602 vha->fcport_count++; 603 vha->fcport_count++;
603 604 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
604 ql_dbg(ql_dbg_disc, vha, 0x20f3, 605 qla24xx_sched_upd_fcport(sp->fcport);
605 "%s %d %8phC post upd_fcport fcp_cnt %d\n", 606 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
606 __func__, __LINE__,
607 sp->fcport->port_name,
608 vha->fcport_count);
609 sp->fcport->disc_state = DSC_UPD_FCPORT;
610 qla24xx_post_upd_fcport_work(vha, sp->fcport);
611 } else { 607 } else {
612 sp->fcport->login_retry = 0; 608 sp->fcport->login_retry = 0;
613 sp->fcport->disc_state = DSC_LOGIN_COMPLETE; 609 sp->fcport->disc_state = DSC_LOGIN_COMPLETE;
@@ -1230,11 +1226,12 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1230{ 1226{
1231 struct qla_tgt *tgt = sess->tgt; 1227 struct qla_tgt *tgt = sess->tgt;
1232 unsigned long flags; 1228 unsigned long flags;
1229 u16 sec;
1233 1230
1234 if (sess->disc_state == DSC_DELETE_PEND) 1231 switch (sess->disc_state) {
1232 case DSC_DELETE_PEND:
1235 return; 1233 return;
1236 1234 case DSC_DELETED:
1237 if (sess->disc_state == DSC_DELETED) {
1238 if (tgt && tgt->tgt_stop && (tgt->sess_count == 0)) 1235 if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
1239 wake_up_all(&tgt->waitQ); 1236 wake_up_all(&tgt->waitQ);
1240 if (sess->vha->fcport_count == 0) 1237 if (sess->vha->fcport_count == 0)
@@ -1243,11 +1240,26 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1243 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && 1240 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
1244 !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) 1241 !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
1245 return; 1242 return;
1243 break;
1244 case DSC_UPD_FCPORT:
1245 /*
1246 * This port is not done reporting to upper layer.
1247 * let it finish
1248 */
1249 sess->next_disc_state = DSC_DELETE_PEND;
1250 sec = jiffies_to_msecs(jiffies -
1251 sess->jiffies_at_registration)/1000;
1252 if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
1253 sess->sec_since_registration = sec;
1254 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
1255 "%s %8phC : Slow Rport registration(%d Sec)\n",
1256 __func__, sess->port_name, sec);
1257 }
1258 return;
1259 default:
1260 break;
1246 } 1261 }
1247 1262
1248 if (sess->deleted == QLA_SESS_DELETED)
1249 sess->logout_on_delete = 0;
1250
1251 spin_lock_irqsave(&sess->vha->work_lock, flags); 1263 spin_lock_irqsave(&sess->vha->work_lock, flags);
1252 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 1264 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1253 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1265 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
@@ -1261,7 +1273,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1261 qla24xx_chk_fcp_state(sess); 1273 qla24xx_chk_fcp_state(sess);
1262 1274
1263 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 1275 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
1264 "Scheduling sess %p for deletion\n", sess); 1276 "Scheduling sess %p for deletion %8phC\n",
1277 sess, sess->port_name);
1265 1278
1266 INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); 1279 INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
1267 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); 1280 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
@@ -1479,27 +1492,14 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
1479 struct qla_hw_data *ha = tgt->ha; 1492 struct qla_hw_data *ha = tgt->ha;
1480 unsigned long flags; 1493 unsigned long flags;
1481 1494
1495 mutex_lock(&ha->optrom_mutex);
1482 mutex_lock(&qla_tgt_mutex); 1496 mutex_lock(&qla_tgt_mutex);
1483 if (!vha->fc_vport) { 1497
1484 struct Scsi_Host *sh = vha->host;
1485 struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
1486 bool npiv_vports;
1487
1488 spin_lock_irqsave(sh->host_lock, flags);
1489 npiv_vports = (fc_host->npiv_vports_inuse);
1490 spin_unlock_irqrestore(sh->host_lock, flags);
1491
1492 if (npiv_vports) {
1493 mutex_unlock(&qla_tgt_mutex);
1494 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
1495 "NPIV is in use. Can not stop target\n");
1496 return -EPERM;
1497 }
1498 }
1499 if (tgt->tgt_stop || tgt->tgt_stopped) { 1498 if (tgt->tgt_stop || tgt->tgt_stopped) {
1500 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 1499 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1501 "Already in tgt->tgt_stop or tgt_stopped state\n"); 1500 "Already in tgt->tgt_stop or tgt_stopped state\n");
1502 mutex_unlock(&qla_tgt_mutex); 1501 mutex_unlock(&qla_tgt_mutex);
1502 mutex_unlock(&ha->optrom_mutex);
1503 return -EPERM; 1503 return -EPERM;
1504 } 1504 }
1505 1505
@@ -1537,6 +1537,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
1537 1537
1538 /* Wait for sessions to clear out (just in case) */ 1538 /* Wait for sessions to clear out (just in case) */
1539 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); 1539 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1540 mutex_unlock(&ha->optrom_mutex);
1541
1540 return 0; 1542 return 0;
1541} 1543}
1542EXPORT_SYMBOL(qlt_stop_phase1); 1544EXPORT_SYMBOL(qlt_stop_phase1);
@@ -1566,6 +1568,15 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
1566 1568
1567 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n", 1569 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1568 tgt); 1570 tgt);
1571
1572 switch (vha->qlini_mode) {
1573 case QLA2XXX_INI_MODE_EXCLUSIVE:
1574 vha->flags.online = 1;
1575 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1576 break;
1577 default:
1578 break;
1579 }
1569} 1580}
1570EXPORT_SYMBOL(qlt_stop_phase2); 1581EXPORT_SYMBOL(qlt_stop_phase2);
1571 1582
@@ -1715,6 +1726,94 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair,
1715 qla2x00_start_iocbs(vha, qpair->req); 1726 qla2x00_start_iocbs(vha, qpair->req);
1716} 1727}
1717 1728
1729static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
1730{
1731 struct scsi_qla_host *vha = mcmd->vha;
1732 struct qla_hw_data *ha = vha->hw;
1733 struct abts_resp_to_24xx *resp;
1734 uint32_t f_ctl, h;
1735 uint8_t *p;
1736 int rc;
1737 struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
1738 struct qla_qpair *qpair = mcmd->qpair;
1739
1740 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1741 "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
1742 ha, mcmd->fc_tm_rsp);
1743
1744 rc = qlt_check_reserve_free_req(qpair, 1);
1745 if (rc) {
1746 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1747 "qla_target(%d): %s failed: unable to allocate request packet\n",
1748 vha->vp_idx, __func__);
1749 return -EAGAIN;
1750 }
1751
1752 resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
1753 memset(resp, 0, sizeof(*resp));
1754
1755 h = qlt_make_handle(qpair);
1756 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1757 /*
1758 * CTIO type 7 from the firmware doesn't provide a way to
1759 * know the initiator's LOOP ID, hence we can't find
1760 * the session and, so, the command.
1761 */
1762 return -EAGAIN;
1763 } else {
1764 qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
1765 }
1766
1767 resp->handle = MAKE_HANDLE(qpair->req->id, h);
1768 resp->entry_type = ABTS_RESP_24XX;
1769 resp->entry_count = 1;
1770 resp->nport_handle = abts->nport_handle;
1771 resp->vp_index = vha->vp_idx;
1772 resp->sof_type = abts->sof_type;
1773 resp->exchange_address = abts->exchange_address;
1774 resp->fcp_hdr_le = abts->fcp_hdr_le;
1775 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1776 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1777 F_CTL_SEQ_INITIATIVE);
1778 p = (uint8_t *)&f_ctl;
1779 resp->fcp_hdr_le.f_ctl[0] = *p++;
1780 resp->fcp_hdr_le.f_ctl[1] = *p++;
1781 resp->fcp_hdr_le.f_ctl[2] = *p;
1782
1783 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1784 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1785 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1786 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1787 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1788 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1789
1790 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1791 if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
1792 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1793 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1794 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1795 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1796 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1797 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1798 } else {
1799 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1800 resp->payload.ba_rjt.reason_code =
1801 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1802 /* Other bytes are zero */
1803 }
1804
1805 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1806
1807 /* Memory Barrier */
1808 wmb();
1809 if (qpair->reqq_start_iocbs)
1810 qpair->reqq_start_iocbs(qpair);
1811 else
1812 qla2x00_start_iocbs(vha, qpair->req);
1813
1814 return rc;
1815}
1816
1718/* 1817/*
1719 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1818 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1720 */ 1819 */
@@ -1742,6 +1841,7 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
1742 } 1841 }
1743 1842
1744 resp->entry_type = ABTS_RESP_24XX; 1843 resp->entry_type = ABTS_RESP_24XX;
1844 resp->handle = QLA_TGT_SKIP_HANDLE;
1745 resp->entry_count = 1; 1845 resp->entry_count = 1;
1746 resp->nport_handle = abts->nport_handle; 1846 resp->nport_handle = abts->nport_handle;
1747 resp->vp_index = vha->vp_idx; 1847 resp->vp_index = vha->vp_idx;
@@ -1799,15 +1899,13 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
1799 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1899 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1800 */ 1900 */
1801static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, 1901static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1802 struct abts_resp_from_24xx_fw *entry) 1902 struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
1803{ 1903{
1804 struct ctio7_to_24xx *ctio; 1904 struct ctio7_to_24xx *ctio;
1905 u16 tmp;
1906 struct abts_recv_from_24xx *entry;
1805 1907
1806 ql_dbg(ql_dbg_tgt, vha, 0xe007, 1908 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL);
1807 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1808
1809 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(
1810 vha->hw->base_qpair, NULL);
1811 if (ctio == NULL) { 1909 if (ctio == NULL) {
1812 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1910 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1813 "qla_target(%d): %s failed: unable to allocate " 1911 "qla_target(%d): %s failed: unable to allocate "
@@ -1815,6 +1913,13 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1815 return; 1913 return;
1816 } 1914 }
1817 1915
1916 if (mcmd)
1917 /* abts from remote port */
1918 entry = &mcmd->orig_iocb.abts;
1919 else
1920 /* abts from this driver. */
1921 entry = (struct abts_recv_from_24xx *)pkt;
1922
1818 /* 1923 /*
1819 * We've got on entrance firmware's response on by us generated 1924 * We've got on entrance firmware's response on by us generated
1820 * ABTS response. So, in it ID fields are reversed. 1925 * ABTS response. So, in it ID fields are reversed.
@@ -1826,56 +1931,48 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1826 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1931 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1827 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 1932 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1828 ctio->vp_index = vha->vp_idx; 1933 ctio->vp_index = vha->vp_idx;
1829 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1830 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1831 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1832 ctio->exchange_addr = entry->exchange_addr_to_abort; 1934 ctio->exchange_addr = entry->exchange_addr_to_abort;
1833 ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1935 tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
1834 CTIO7_FLAGS_TERMINATE);
1835 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
1836 1936
1837 /* Memory Barrier */ 1937 if (mcmd) {
1838 wmb(); 1938 ctio->initiator_id[0] = entry->fcp_hdr_le.s_id[0];
1839 qla2x00_start_iocbs(vha, vha->req); 1939 ctio->initiator_id[1] = entry->fcp_hdr_le.s_id[1];
1940 ctio->initiator_id[2] = entry->fcp_hdr_le.s_id[2];
1840 1941
1841 qlt_24xx_send_abts_resp(vha->hw->base_qpair, 1942 if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
1842 (struct abts_recv_from_24xx *)entry, 1943 tmp |= (mcmd->abort_io_attr << 9);
1843 FCP_TMF_CMPL, true); 1944 else if (qpair->retry_term_cnt & 1)
1844} 1945 tmp |= (0x4 << 9);
1845 1946 } else {
1846static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag) 1947 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1847{ 1948 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1848 struct qla_tgt_sess_op *op; 1949 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1849 struct qla_tgt_cmd *cmd;
1850 unsigned long flags;
1851 1950
1852 spin_lock_irqsave(&vha->cmd_list_lock, flags); 1951 if (qpair->retry_term_cnt & 1)
1853 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 1952 tmp |= (0x4 << 9);
1854 if (tag == op->atio.u.isp24.exchange_addr) {
1855 op->aborted = true;
1856 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1857 return 1;
1858 }
1859 } 1953 }
1954 ctio->u.status1.flags = cpu_to_le16(tmp);
1955 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
1860 1956
1861 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 1957 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1862 if (tag == op->atio.u.isp24.exchange_addr) { 1958 "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
1863 op->aborted = true; 1959 le16_to_cpu(ctio->u.status1.flags),
1864 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 1960 le16_to_cpu(ctio->u.status1.ox_id),
1865 return 1; 1961 (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0);
1866 }
1867 }
1868 1962
1869 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 1963 /* Memory Barrier */
1870 if (tag == cmd->atio.u.isp24.exchange_addr) { 1964 wmb();
1871 cmd->aborted = 1; 1965 if (qpair->reqq_start_iocbs)
1872 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 1966 qpair->reqq_start_iocbs(qpair);
1873 return 1; 1967 else
1874 } 1968 qla2x00_start_iocbs(vha, qpair->req);
1875 } 1969
1876 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 1970 if (mcmd)
1971 qlt_build_abts_resp_iocb(mcmd);
1972 else
1973 qlt_24xx_send_abts_resp(qpair,
1974 (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true);
1877 1975
1878 return 0;
1879} 1976}
1880 1977
1881/* drop cmds for the given lun 1978/* drop cmds for the given lun
@@ -1970,9 +2067,8 @@ static void qlt_do_tmr_work(struct work_struct *work)
1970 spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags); 2067 spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
1971 switch (mcmd->tmr_func) { 2068 switch (mcmd->tmr_func) {
1972 case QLA_TGT_ABTS: 2069 case QLA_TGT_ABTS:
1973 qlt_24xx_send_abts_resp(mcmd->qpair, 2070 mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
1974 &mcmd->orig_iocb.abts, 2071 qlt_build_abts_resp_iocb(mcmd);
1975 FCP_TMF_REJECTED, false);
1976 break; 2072 break;
1977 case QLA_TGT_LUN_RESET: 2073 case QLA_TGT_LUN_RESET:
1978 case QLA_TGT_CLEAR_TS: 2074 case QLA_TGT_CLEAR_TS:
@@ -2007,12 +2103,6 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2007 struct qla_tgt_mgmt_cmd *mcmd; 2103 struct qla_tgt_mgmt_cmd *mcmd;
2008 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; 2104 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
2009 2105
2010 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
2011 /* send TASK_ABORT response immediately */
2012 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_CMPL, false);
2013 return 0;
2014 }
2015
2016 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 2106 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
2017 "qla_target(%d): task abort (tag=%d)\n", 2107 "qla_target(%d): task abort (tag=%d)\n",
2018 vha->vp_idx, abts->exchange_addr_to_abort); 2108 vha->vp_idx, abts->exchange_addr_to_abort);
@@ -2025,7 +2115,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2025 return -ENOMEM; 2115 return -ENOMEM;
2026 } 2116 }
2027 memset(mcmd, 0, sizeof(*mcmd)); 2117 memset(mcmd, 0, sizeof(*mcmd));
2028 2118 mcmd->cmd_type = TYPE_TGT_TMCMD;
2029 mcmd->sess = sess; 2119 mcmd->sess = sess;
2030 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 2120 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
2031 mcmd->reset_count = ha->base_qpair->chip_reset; 2121 mcmd->reset_count = ha->base_qpair->chip_reset;
@@ -2047,6 +2137,8 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2047 if (abort_cmd && abort_cmd->qpair) { 2137 if (abort_cmd && abort_cmd->qpair) {
2048 mcmd->qpair = abort_cmd->qpair; 2138 mcmd->qpair = abort_cmd->qpair;
2049 mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid; 2139 mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
2140 mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
2141 mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
2050 } 2142 }
2051 } 2143 }
2052 2144
@@ -2264,6 +2356,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2264 struct qla_hw_data *ha = vha->hw; 2356 struct qla_hw_data *ha = vha->hw;
2265 unsigned long flags; 2357 unsigned long flags;
2266 struct qla_qpair *qpair = mcmd->qpair; 2358 struct qla_qpair *qpair = mcmd->qpair;
2359 bool free_mcmd = true;
2267 2360
2268 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, 2361 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
2269 "TM response mcmd (%p) status %#x state %#x", 2362 "TM response mcmd (%p) status %#x state %#x",
@@ -2302,10 +2395,10 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2302 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); 2395 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2303 } 2396 }
2304 } else { 2397 } else {
2305 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) 2398 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
2306 qlt_24xx_send_abts_resp(qpair, &mcmd->orig_iocb.abts, 2399 qlt_build_abts_resp_iocb(mcmd);
2307 mcmd->fc_tm_rsp, false); 2400 free_mcmd = false;
2308 else 2401 } else
2309 qlt_24xx_send_task_mgmt_ctio(qpair, mcmd, 2402 qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
2310 mcmd->fc_tm_rsp); 2403 mcmd->fc_tm_rsp);
2311 } 2404 }
@@ -2317,7 +2410,9 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2317 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> 2410 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
2318 * qlt_xmit_tm_rsp() returns here.. 2411 * qlt_xmit_tm_rsp() returns here..
2319 */ 2412 */
2320 ha->tgt.tgt_ops->free_mcmd(mcmd); 2413 if (free_mcmd)
2414 ha->tgt.tgt_ops->free_mcmd(mcmd);
2415
2321 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 2416 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2322} 2417}
2323EXPORT_SYMBOL(qlt_xmit_tm_rsp); 2418EXPORT_SYMBOL(qlt_xmit_tm_rsp);
@@ -2330,7 +2425,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
2330 BUG_ON(cmd->sg_cnt == 0); 2425 BUG_ON(cmd->sg_cnt == 0);
2331 2426
2332 prm->sg = (struct scatterlist *)cmd->sg; 2427 prm->sg = (struct scatterlist *)cmd->sg;
2333 prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg, 2428 prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
2334 cmd->sg_cnt, cmd->dma_data_direction); 2429 cmd->sg_cnt, cmd->dma_data_direction);
2335 if (unlikely(prm->seg_cnt == 0)) 2430 if (unlikely(prm->seg_cnt == 0))
2336 goto out_err; 2431 goto out_err;
@@ -2357,7 +2452,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
2357 2452
2358 if (cmd->prot_sg_cnt) { 2453 if (cmd->prot_sg_cnt) {
2359 prm->prot_sg = cmd->prot_sg; 2454 prm->prot_sg = cmd->prot_sg;
2360 prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev, 2455 prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
2361 cmd->prot_sg, cmd->prot_sg_cnt, 2456 cmd->prot_sg, cmd->prot_sg_cnt,
2362 cmd->dma_data_direction); 2457 cmd->dma_data_direction);
2363 if (unlikely(prm->prot_seg_cnt == 0)) 2458 if (unlikely(prm->prot_seg_cnt == 0))
@@ -2392,12 +2487,12 @@ static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2392 2487
2393 qpair = cmd->qpair; 2488 qpair = cmd->qpair;
2394 2489
2395 pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt, 2490 dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
2396 cmd->dma_data_direction); 2491 cmd->dma_data_direction);
2397 cmd->sg_mapped = 0; 2492 cmd->sg_mapped = 0;
2398 2493
2399 if (cmd->prot_sg_cnt) 2494 if (cmd->prot_sg_cnt)
2400 pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt, 2495 dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
2401 cmd->dma_data_direction); 2496 cmd->dma_data_direction);
2402 2497
2403 if (!cmd->ctx) 2498 if (!cmd->ctx)
@@ -3289,7 +3384,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
3289 3384
3290 3385
3291 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 3386 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
3387 spin_lock(&cmd->cmd_lock);
3292 cmd->cmd_sent_to_fw = 1; 3388 cmd->cmd_sent_to_fw = 1;
3389 spin_unlock(&cmd->cmd_lock);
3390 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3293 3391
3294 /* Memory Barrier */ 3392 /* Memory Barrier */
3295 wmb(); 3393 wmb();
@@ -3367,7 +3465,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
3367 qlt_load_data_segments(&prm); 3465 qlt_load_data_segments(&prm);
3368 3466
3369 cmd->state = QLA_TGT_STATE_NEED_DATA; 3467 cmd->state = QLA_TGT_STATE_NEED_DATA;
3468 spin_lock(&cmd->cmd_lock);
3370 cmd->cmd_sent_to_fw = 1; 3469 cmd->cmd_sent_to_fw = 1;
3470 spin_unlock(&cmd->cmd_lock);
3471 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3371 3472
3372 /* Memory Barrier */ 3473 /* Memory Barrier */
3373 wmb(); 3474 wmb();
@@ -3825,10 +3926,10 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
3825 3926
3826 3927
3827/* ha->hardware_lock supposed to be held on entry */ 3928/* ha->hardware_lock supposed to be held on entry */
3828static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, 3929static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3829 struct rsp_que *rsp, uint32_t handle, void *ctio) 3930 struct rsp_que *rsp, uint32_t handle, void *ctio)
3830{ 3931{
3831 struct qla_tgt_cmd *cmd = NULL; 3932 void *cmd = NULL;
3832 struct req_que *req; 3933 struct req_que *req;
3833 int qid = GET_QID(handle); 3934 int qid = GET_QID(handle);
3834 uint32_t h = handle & ~QLA_TGT_HANDLE_MASK; 3935 uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
@@ -3857,7 +3958,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3857 return NULL; 3958 return NULL;
3858 } 3959 }
3859 3960
3860 cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[h]; 3961 cmd = (void *) req->outstanding_cmds[h];
3861 if (unlikely(cmd == NULL)) { 3962 if (unlikely(cmd == NULL)) {
3862 ql_dbg(ql_dbg_async, vha, 0xe053, 3963 ql_dbg(ql_dbg_async, vha, 0xe053,
3863 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n", 3964 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
@@ -3930,7 +4031,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
3930 return; 4031 return;
3931 } 4032 }
3932 4033
3933 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio); 4034 cmd = (struct qla_tgt_cmd *)qlt_ctio_to_cmd(vha, rsp, handle, ctio);
3934 if (cmd == NULL) 4035 if (cmd == NULL)
3935 return; 4036 return;
3936 4037
@@ -3941,12 +4042,20 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
3941 4042
3942 if (unlikely(status != CTIO_SUCCESS)) { 4043 if (unlikely(status != CTIO_SUCCESS)) {
3943 switch (status & 0xFFFF) { 4044 switch (status & 0xFFFF) {
4045 case CTIO_INVALID_RX_ID:
4046 if (printk_ratelimit())
4047 dev_info(&vha->hw->pdev->dev,
4048 "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
4049 vha->vp_idx, cmd->atio.u.isp24.attr,
4050 ((cmd->ctio_flags >> 9) & 0xf),
4051 cmd->ctio_flags);
4052
4053 break;
3944 case CTIO_LIP_RESET: 4054 case CTIO_LIP_RESET:
3945 case CTIO_TARGET_RESET: 4055 case CTIO_TARGET_RESET:
3946 case CTIO_ABORTED: 4056 case CTIO_ABORTED:
3947 /* driver request abort via Terminate exchange */ 4057 /* driver request abort via Terminate exchange */
3948 case CTIO_TIMEOUT: 4058 case CTIO_TIMEOUT:
3949 case CTIO_INVALID_RX_ID:
3950 /* They are OK */ 4059 /* They are OK */
3951 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, 4060 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
3952 "qla_target(%d): CTIO with " 4061 "qla_target(%d): CTIO with "
@@ -3973,7 +4082,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
3973 * Session is already logged out, but we need 4082 * Session is already logged out, but we need
3974 * to notify initiator, who's not aware of this 4083 * to notify initiator, who's not aware of this
3975 */ 4084 */
3976 cmd->sess->logout_on_delete = 0;
3977 cmd->sess->send_els_logo = 1; 4085 cmd->sess->send_els_logo = 1;
3978 ql_dbg(ql_dbg_disc, vha, 0x20f8, 4086 ql_dbg(ql_dbg_disc, vha, 0x20f8,
3979 "%s %d %8phC post del sess\n", 4087 "%s %d %8phC post del sess\n",
@@ -4711,6 +4819,12 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
4711 sess = qlt_find_sess_invalidate_other(vha, wwn, 4819 sess = qlt_find_sess_invalidate_other(vha, wwn,
4712 port_id, loop_id, &conflict_sess); 4820 port_id, loop_id, &conflict_sess);
4713 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4821 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4822 } else {
4823 ql_dbg(ql_dbg_disc, vha, 0xffff,
4824 "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
4825 __func__, __LINE__, loop_id, port_id.b24);
4826 qlt_send_term_imm_notif(vha, iocb, 1);
4827 goto out;
4714 } 4828 }
4715 4829
4716 if (IS_SW_RESV_ADDR(port_id)) { 4830 if (IS_SW_RESV_ADDR(port_id)) {
@@ -4752,6 +4866,32 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
4752 goto out; 4866 goto out;
4753 } 4867 }
4754 4868
4869 if (sess->disc_state == DSC_UPD_FCPORT) {
4870 u16 sec;
4871
4872 /*
4873 * Remote port registration is still going on from
4874 * previous login. Allow it to finish before we
4875 * accept the new login.
4876 */
4877 sess->next_disc_state = DSC_DELETE_PEND;
4878 sec = jiffies_to_msecs(jiffies -
4879 sess->jiffies_at_registration) / 1000;
4880 if (sess->sec_since_registration < sec && sec &&
4881 !(sec % 5)) {
4882 sess->sec_since_registration = sec;
4883 ql_dbg(ql_dbg_disc, vha, 0xffff,
4884 "%s %8phC - Slow Rport registration (%d Sec)\n",
4885 __func__, sess->port_name, sec);
4886 }
4887
4888 if (!conflict_sess)
4889 kmem_cache_free(qla_tgt_plogi_cachep, pla);
4890
4891 qlt_send_term_imm_notif(vha, iocb, 1);
4892 goto out;
4893 }
4894
4755 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); 4895 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4756 sess->d_id = port_id; 4896 sess->d_id = port_id;
4757 sess->login_gen++; 4897 sess->login_gen++;
@@ -4910,6 +5050,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4910 5050
4911 if (sess != NULL) { 5051 if (sess != NULL) {
4912 bool delete = false; 5052 bool delete = false;
5053 int sec;
4913 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 5054 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4914 switch (sess->fw_login_state) { 5055 switch (sess->fw_login_state) {
4915 case DSC_LS_PLOGI_PEND: 5056 case DSC_LS_PLOGI_PEND:
@@ -4922,9 +5063,24 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4922 } 5063 }
4923 5064
4924 switch (sess->disc_state) { 5065 switch (sess->disc_state) {
5066 case DSC_UPD_FCPORT:
5067 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
5068 flags);
5069
5070 sec = jiffies_to_msecs(jiffies -
5071 sess->jiffies_at_registration)/1000;
5072 if (sess->sec_since_registration < sec && sec &&
5073 !(sec % 5)) {
5074 sess->sec_since_registration = sec;
5075 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
5076 "%s %8phC : Slow Rport registration(%d Sec)\n",
5077 __func__, sess->port_name, sec);
5078 }
5079 qlt_send_term_imm_notif(vha, iocb, 1);
5080 return 0;
5081
4925 case DSC_LOGIN_PEND: 5082 case DSC_LOGIN_PEND:
4926 case DSC_GPDB: 5083 case DSC_GPDB:
4927 case DSC_UPD_FCPORT:
4928 case DSC_LOGIN_COMPLETE: 5084 case DSC_LOGIN_COMPLETE:
4929 case DSC_ADISC: 5085 case DSC_ADISC:
4930 delete = false; 5086 delete = false;
@@ -5608,6 +5764,101 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5608 tgt->atio_irq_cmd_count--; 5764 tgt->atio_irq_cmd_count--;
5609} 5765}
5610 5766
5767/*
5768 * qpair lock is assume to be held
5769 * rc = 0 : send terminate & abts respond
5770 * rc != 0: do not send term & abts respond
5771 */
5772static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
5773 struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry)
5774{
5775 struct qla_hw_data *ha = vha->hw;
5776 int rc = 0;
5777
5778 /*
5779 * Detect unresolved exchange. If the same ABTS is unable
5780 * to terminate an existing command and the same ABTS loops
5781 * between FW & Driver, then force FW dump. Under 1 jiff,
5782 * we should see multiple loops.
5783 */
5784 if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort &&
5785 qpair->retry_term_jiff == jiffies) {
5786 /* found existing exchange */
5787 qpair->retry_term_cnt++;
5788 if (qpair->retry_term_cnt >= 5) {
5789 rc = EIO;
5790 qpair->retry_term_cnt = 0;
5791 ql_log(ql_log_warn, vha, 0xffff,
5792 "Unable to send ABTS Respond. Dumping firmware.\n");
5793 ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer,
5794 vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
5795
5796 if (qpair == ha->base_qpair)
5797 ha->isp_ops->fw_dump(vha, 1);
5798 else
5799 ha->isp_ops->fw_dump(vha, 0);
5800
5801 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5802 qla2xxx_wake_dpc(vha);
5803 }
5804 } else if (qpair->retry_term_jiff != jiffies) {
5805 qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort;
5806 qpair->retry_term_cnt = 0;
5807 qpair->retry_term_jiff = jiffies;
5808 }
5809
5810 return rc;
5811}
5812
5813
5814static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
5815 struct rsp_que *rsp, response_t *pkt)
5816{
5817 struct abts_resp_from_24xx_fw *entry =
5818 (struct abts_resp_from_24xx_fw *)pkt;
5819 u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
5820 struct qla_tgt_mgmt_cmd *mcmd;
5821 struct qla_hw_data *ha = vha->hw;
5822
5823 mcmd = (struct qla_tgt_mgmt_cmd *)qlt_ctio_to_cmd(vha, rsp,
5824 pkt->handle, pkt);
5825 if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
5826 ql_dbg(ql_dbg_async, vha, 0xe064,
5827 "qla_target(%d): ABTS Comp without mcmd\n",
5828 vha->vp_idx);
5829 return;
5830 }
5831
5832 if (mcmd)
5833 vha = mcmd->vha;
5834 vha->vha_tgt.qla_tgt->abts_resp_expected--;
5835
5836 ql_dbg(ql_dbg_tgt, vha, 0xe038,
5837 "ABTS_RESP_24XX: compl_status %x\n",
5838 entry->compl_status);
5839
5840 if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
5841 if ((entry->error_subcode1 == 0x1E) &&
5842 (entry->error_subcode2 == 0)) {
5843 if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
5844 ha->tgt.tgt_ops->free_mcmd(mcmd);
5845 return;
5846 }
5847 qlt_24xx_retry_term_exchange(vha, rsp->qpair,
5848 pkt, mcmd);
5849 } else {
5850 ql_dbg(ql_dbg_tgt, vha, 0xe063,
5851 "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
5852 vha->vp_idx, entry->compl_status,
5853 entry->error_subcode1,
5854 entry->error_subcode2);
5855 ha->tgt.tgt_ops->free_mcmd(mcmd);
5856 }
5857 } else {
5858 ha->tgt.tgt_ops->free_mcmd(mcmd);
5859 }
5860}
5861
5611/* ha->hardware_lock supposed to be held on entry */ 5862/* ha->hardware_lock supposed to be held on entry */
5612/* called via callback from qla2xxx */ 5863/* called via callback from qla2xxx */
5613static void qlt_response_pkt(struct scsi_qla_host *vha, 5864static void qlt_response_pkt(struct scsi_qla_host *vha,
@@ -5740,41 +5991,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
5740 5991
5741 case ABTS_RESP_24XX: 5992 case ABTS_RESP_24XX:
5742 if (tgt->abts_resp_expected > 0) { 5993 if (tgt->abts_resp_expected > 0) {
5743 struct abts_resp_from_24xx_fw *entry = 5994 qlt_handle_abts_completion(vha, rsp, pkt);
5744 (struct abts_resp_from_24xx_fw *)pkt;
5745 ql_dbg(ql_dbg_tgt, vha, 0xe038,
5746 "ABTS_RESP_24XX: compl_status %x\n",
5747 entry->compl_status);
5748 tgt->abts_resp_expected--;
5749 if (le16_to_cpu(entry->compl_status) !=
5750 ABTS_RESP_COMPL_SUCCESS) {
5751 if ((entry->error_subcode1 == 0x1E) &&
5752 (entry->error_subcode2 == 0)) {
5753 /*
5754 * We've got a race here: aborted
5755 * exchange not terminated, i.e.
5756 * response for the aborted command was
5757 * sent between the abort request was
5758 * received and processed.
5759 * Unfortunately, the firmware has a
5760 * silly requirement that all aborted
5761 * exchanges must be explicitely
5762 * terminated, otherwise it refuses to
5763 * send responses for the abort
5764 * requests. So, we have to
5765 * (re)terminate the exchange and retry
5766 * the abort response.
5767 */
5768 qlt_24xx_retry_term_exchange(vha,
5769 entry);
5770 } else
5771 ql_dbg(ql_dbg_tgt, vha, 0xe063,
5772 "qla_target(%d): ABTS_RESP_24XX "
5773 "failed %x (subcode %x:%x)",
5774 vha->vp_idx, entry->compl_status,
5775 entry->error_subcode1,
5776 entry->error_subcode2);
5777 }
5778 } else { 5995 } else {
5779 ql_dbg(ql_dbg_tgt, vha, 0xe064, 5996 ql_dbg(ql_dbg_tgt, vha, 0xe064,
5780 "qla_target(%d): Unexpected ABTS_RESP_24XX " 5997 "qla_target(%d): Unexpected ABTS_RESP_24XX "
@@ -5964,10 +6181,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
5964 case MODE_DUAL: 6181 case MODE_DUAL:
5965 if (newfcport) { 6182 if (newfcport) {
5966 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { 6183 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
5967 ql_dbg(ql_dbg_disc, vha, 0x20fe, 6184 qla24xx_sched_upd_fcport(fcport);
5968 "%s %d %8phC post upd_fcport fcp_cnt %d\n",
5969 __func__, __LINE__, fcport->port_name, vha->fcport_count);
5970 qla24xx_post_upd_fcport_work(vha, fcport);
5971 } else { 6185 } else {
5972 ql_dbg(ql_dbg_disc, vha, 0x20ff, 6186 ql_dbg(ql_dbg_disc, vha, 0x20ff,
5973 "%s %d %8phC post gpsc fcp_cnt %d\n", 6187 "%s %d %8phC post gpsc fcp_cnt %d\n",
@@ -6413,6 +6627,9 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
6413 if (!(host->hostt->supported_mode & MODE_TARGET)) 6627 if (!(host->hostt->supported_mode & MODE_TARGET))
6414 continue; 6628 continue;
6415 6629
6630 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6631 continue;
6632
6416 spin_lock_irqsave(&ha->hardware_lock, flags); 6633 spin_lock_irqsave(&ha->hardware_lock, flags);
6417 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { 6634 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6418 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 6635 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
@@ -6475,15 +6692,15 @@ void qlt_lport_deregister(struct scsi_qla_host *vha)
6475EXPORT_SYMBOL(qlt_lport_deregister); 6692EXPORT_SYMBOL(qlt_lport_deregister);
6476 6693
6477/* Must be called under HW lock */ 6694/* Must be called under HW lock */
6478static void qlt_set_mode(struct scsi_qla_host *vha) 6695void qlt_set_mode(struct scsi_qla_host *vha)
6479{ 6696{
6480 switch (ql2x_ini_mode) { 6697 switch (vha->qlini_mode) {
6481 case QLA2XXX_INI_MODE_DISABLED: 6698 case QLA2XXX_INI_MODE_DISABLED:
6482 case QLA2XXX_INI_MODE_EXCLUSIVE: 6699 case QLA2XXX_INI_MODE_EXCLUSIVE:
6483 vha->host->active_mode = MODE_TARGET; 6700 vha->host->active_mode = MODE_TARGET;
6484 break; 6701 break;
6485 case QLA2XXX_INI_MODE_ENABLED: 6702 case QLA2XXX_INI_MODE_ENABLED:
6486 vha->host->active_mode = MODE_UNKNOWN; 6703 vha->host->active_mode = MODE_INITIATOR;
6487 break; 6704 break;
6488 case QLA2XXX_INI_MODE_DUAL: 6705 case QLA2XXX_INI_MODE_DUAL:
6489 vha->host->active_mode = MODE_DUAL; 6706 vha->host->active_mode = MODE_DUAL;
@@ -6496,7 +6713,7 @@ static void qlt_set_mode(struct scsi_qla_host *vha)
6496/* Must be called under HW lock */ 6713/* Must be called under HW lock */
6497static void qlt_clear_mode(struct scsi_qla_host *vha) 6714static void qlt_clear_mode(struct scsi_qla_host *vha)
6498{ 6715{
6499 switch (ql2x_ini_mode) { 6716 switch (vha->qlini_mode) {
6500 case QLA2XXX_INI_MODE_DISABLED: 6717 case QLA2XXX_INI_MODE_DISABLED:
6501 vha->host->active_mode = MODE_UNKNOWN; 6718 vha->host->active_mode = MODE_UNKNOWN;
6502 break; 6719 break;
@@ -6532,12 +6749,17 @@ qlt_enable_vha(struct scsi_qla_host *vha)
6532 dump_stack(); 6749 dump_stack();
6533 return; 6750 return;
6534 } 6751 }
6752 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6753 return;
6535 6754
6536 spin_lock_irqsave(&ha->hardware_lock, flags); 6755 spin_lock_irqsave(&ha->hardware_lock, flags);
6537 tgt->tgt_stopped = 0; 6756 tgt->tgt_stopped = 0;
6538 qlt_set_mode(vha); 6757 qlt_set_mode(vha);
6539 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6758 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6540 6759
6760 mutex_lock(&ha->optrom_mutex);
6761 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
6762 "%s.\n", __func__);
6541 if (vha->vp_idx) { 6763 if (vha->vp_idx) {
6542 qla24xx_disable_vp(vha); 6764 qla24xx_disable_vp(vha);
6543 qla24xx_enable_vp(vha); 6765 qla24xx_enable_vp(vha);
@@ -6546,6 +6768,7 @@ qlt_enable_vha(struct scsi_qla_host *vha)
6546 qla2xxx_wake_dpc(base_vha); 6768 qla2xxx_wake_dpc(base_vha);
6547 qla2x00_wait_for_hba_online(base_vha); 6769 qla2x00_wait_for_hba_online(base_vha);
6548 } 6770 }
6771 mutex_unlock(&ha->optrom_mutex);
6549} 6772}
6550EXPORT_SYMBOL(qlt_enable_vha); 6773EXPORT_SYMBOL(qlt_enable_vha);
6551 6774
@@ -6767,7 +6990,7 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6767 if (qla_tgt_mode_enabled(vha)) 6990 if (qla_tgt_mode_enabled(vha))
6768 nv->exchange_count = cpu_to_le16(0xFFFF); 6991 nv->exchange_count = cpu_to_le16(0xFFFF);
6769 else /* dual */ 6992 else /* dual */
6770 nv->exchange_count = cpu_to_le16(ql2xexchoffld); 6993 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6771 6994
6772 /* Enable target mode */ 6995 /* Enable target mode */
6773 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 6996 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
@@ -6846,14 +7069,6 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6846 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 7069 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6847 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 7070 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6848 } 7071 }
6849
6850 /* disable ZIO at start time. */
6851 if (!vha->flags.init_done) {
6852 uint32_t tmp;
6853 tmp = le32_to_cpu(icb->firmware_options_2);
6854 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
6855 icb->firmware_options_2 = cpu_to_le32(tmp);
6856 }
6857} 7072}
6858 7073
6859void 7074void
@@ -6881,7 +7096,7 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6881 if (qla_tgt_mode_enabled(vha)) 7096 if (qla_tgt_mode_enabled(vha))
6882 nv->exchange_count = cpu_to_le16(0xFFFF); 7097 nv->exchange_count = cpu_to_le16(0xFFFF);
6883 else /* dual */ 7098 else /* dual */
6884 nv->exchange_count = cpu_to_le16(ql2xexchoffld); 7099 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6885 7100
6886 /* Enable target mode */ 7101 /* Enable target mode */
6887 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 7102 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
@@ -6957,15 +7172,6 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
6957 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 7172 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6958 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 7173 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6959 } 7174 }
6960
6961 /* disable ZIO at start time. */
6962 if (!vha->flags.init_done) {
6963 uint32_t tmp;
6964 tmp = le32_to_cpu(icb->firmware_options_2);
6965 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
6966 icb->firmware_options_2 = cpu_to_le32(tmp);
6967 }
6968
6969} 7175}
6970 7176
6971void 7177void
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 199d3ba1916d..721da593b1bc 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -900,6 +900,7 @@ struct qla_tgt_cmd {
900 unsigned int aborted:1; 900 unsigned int aborted:1;
901 unsigned int data_work:1; 901 unsigned int data_work:1;
902 unsigned int data_work_free:1; 902 unsigned int data_work_free:1;
903 unsigned int released:1;
903 904
904 struct scatterlist *sg; /* cmd data buffer SG vector */ 905 struct scatterlist *sg; /* cmd data buffer SG vector */
905 int sg_cnt; /* SG segments count */ 906 int sg_cnt; /* SG segments count */
@@ -908,6 +909,7 @@ struct qla_tgt_cmd {
908 u64 unpacked_lun; 909 u64 unpacked_lun;
909 enum dma_data_direction dma_data_direction; 910 enum dma_data_direction dma_data_direction;
910 911
912 uint16_t ctio_flags;
911 uint16_t vp_idx; 913 uint16_t vp_idx;
912 uint16_t loop_id; /* to save extra sess dereferences */ 914 uint16_t loop_id; /* to save extra sess dereferences */
913 struct qla_tgt *tgt; /* to save extra sess dereferences */ 915 struct qla_tgt *tgt; /* to save extra sess dereferences */
@@ -956,16 +958,20 @@ struct qla_tgt_sess_work_param {
956}; 958};
957 959
958struct qla_tgt_mgmt_cmd { 960struct qla_tgt_mgmt_cmd {
961 uint8_t cmd_type;
962 uint8_t pad[3];
959 uint16_t tmr_func; 963 uint16_t tmr_func;
960 uint8_t fc_tm_rsp; 964 uint8_t fc_tm_rsp;
965 uint8_t abort_io_attr;
961 struct fc_port *sess; 966 struct fc_port *sess;
962 struct qla_qpair *qpair; 967 struct qla_qpair *qpair;
963 struct scsi_qla_host *vha; 968 struct scsi_qla_host *vha;
964 struct se_cmd se_cmd; 969 struct se_cmd se_cmd;
965 struct work_struct free_work; 970 struct work_struct free_work;
966 unsigned int flags; 971 unsigned int flags;
972#define QLA24XX_MGMT_SEND_NACK BIT_0
973#define QLA24XX_MGMT_ABORT_IO_ATTR_VALID BIT_1
967 uint32_t reset_count; 974 uint32_t reset_count;
968#define QLA24XX_MGMT_SEND_NACK 1
969 struct work_struct work; 975 struct work_struct work;
970 uint64_t unpacked_lun; 976 uint64_t unpacked_lun;
971 union { 977 union {
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 3850b28518e5..12bafff71a1a 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "10.00.00.08-k" 10#define QLA2XXX_VERSION "10.00.00.11-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 10 12#define QLA_DRIVER_MAJOR_VER 10
13#define QLA_DRIVER_MINOR_VER 0 13#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index e03d12a5f986..65053c066680 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -277,14 +277,25 @@ static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
277static void tcm_qla2xxx_complete_free(struct work_struct *work) 277static void tcm_qla2xxx_complete_free(struct work_struct *work)
278{ 278{
279 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 279 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
280 bool released = false;
281 unsigned long flags;
280 282
281 cmd->cmd_in_wq = 0; 283 cmd->cmd_in_wq = 0;
282 284
283 WARN_ON(cmd->trc_flags & TRC_CMD_FREE); 285 WARN_ON(cmd->trc_flags & TRC_CMD_FREE);
284 286
287 spin_lock_irqsave(&cmd->cmd_lock, flags);
285 cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++; 288 cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++;
286 cmd->trc_flags |= TRC_CMD_FREE; 289 cmd->trc_flags |= TRC_CMD_FREE;
287 transport_generic_free_cmd(&cmd->se_cmd, 0); 290 cmd->cmd_sent_to_fw = 0;
291 if (cmd->released)
292 released = true;
293 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
294
295 if (released)
296 qlt_free_cmd(cmd);
297 else
298 transport_generic_free_cmd(&cmd->se_cmd, 0);
288} 299}
289 300
290/* 301/*
@@ -325,6 +336,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
325static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) 336static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
326{ 337{
327 struct qla_tgt_cmd *cmd; 338 struct qla_tgt_cmd *cmd;
339 unsigned long flags;
328 340
329 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { 341 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
330 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, 342 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
@@ -332,9 +344,16 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
332 qlt_free_mcmd(mcmd); 344 qlt_free_mcmd(mcmd);
333 return; 345 return;
334 } 346 }
335
336 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); 347 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
337 qlt_free_cmd(cmd); 348
349 spin_lock_irqsave(&cmd->cmd_lock, flags);
350 if (cmd->cmd_sent_to_fw) {
351 cmd->released = 1;
352 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
353 } else {
354 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
355 qlt_free_cmd(cmd);
356 }
338} 357}
339 358
340static void tcm_qla2xxx_release_session(struct kref *kref) 359static void tcm_qla2xxx_release_session(struct kref *kref)
@@ -405,7 +424,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
405 se_cmd->pi_err = 0; 424 se_cmd->pi_err = 0;
406 425
407 /* 426 /*
408 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup 427 * qla_target.c:qlt_rdy_to_xfer() will call dma_map_sg() to setup
409 * the SGL mappings into PCIe memory for incoming FCP WRITE data. 428 * the SGL mappings into PCIe memory for incoming FCP WRITE data.
410 */ 429 */
411 return qlt_rdy_to_xfer(cmd); 430 return qlt_rdy_to_xfer(cmd);
@@ -499,6 +518,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
499static void tcm_qla2xxx_handle_data_work(struct work_struct *work) 518static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
500{ 519{
501 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 520 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
521 unsigned long flags;
502 522
503 /* 523 /*
504 * Ensure that the complete FCP WRITE payload has been received. 524 * Ensure that the complete FCP WRITE payload has been received.
@@ -506,6 +526,25 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
506 */ 526 */
507 cmd->cmd_in_wq = 0; 527 cmd->cmd_in_wq = 0;
508 528
529 spin_lock_irqsave(&cmd->cmd_lock, flags);
530 cmd->cmd_sent_to_fw = 0;
531
532 if (cmd->released) {
533 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
534 qlt_free_cmd(cmd);
535 return;
536 }
537
538 cmd->data_work = 1;
539 if (cmd->aborted) {
540 cmd->data_work_free = 1;
541 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
542
543 tcm_qla2xxx_free_cmd(cmd);
544 return;
545 }
546 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
547
509 cmd->qpair->tgt_counters.qla_core_ret_ctio++; 548 cmd->qpair->tgt_counters.qla_core_ret_ctio++;
510 if (!cmd->write_data_transferred) { 549 if (!cmd->write_data_transferred) {
511 /* 550 /*
@@ -718,10 +757,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
718 cmd->sg_cnt = 0; 757 cmd->sg_cnt = 0;
719 cmd->offset = 0; 758 cmd->offset = 0;
720 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 759 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
721 if (cmd->trc_flags & TRC_XMIT_STATUS) {
722 pr_crit("Multiple calls for status = %p.\n", cmd);
723 dump_stack();
724 }
725 cmd->trc_flags |= TRC_XMIT_STATUS; 760 cmd->trc_flags |= TRC_XMIT_STATUS;
726 761
727 if (se_cmd->data_direction == DMA_FROM_DEVICE) { 762 if (se_cmd->data_direction == DMA_FROM_DEVICE) {
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 52b1a0bc93c9..1ef74aa2d00a 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -766,12 +766,10 @@ int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
766 while (drvr_wait) { 766 while (drvr_wait) {
767 if (ql4xxx_lock_drvr(a) == 0) { 767 if (ql4xxx_lock_drvr(a) == 0) {
768 ssleep(QL4_LOCK_DRVR_SLEEP); 768 ssleep(QL4_LOCK_DRVR_SLEEP);
769 if (drvr_wait) { 769 DEBUG2(printk("scsi%ld: %s: Waiting for "
770 DEBUG2(printk("scsi%ld: %s: Waiting for " 770 "Global Init Semaphore(%d)...\n",
771 "Global Init Semaphore(%d)...\n", 771 a->host_no,
772 a->host_no, 772 __func__, drvr_wait));
773 __func__, drvr_wait));
774 }
775 drvr_wait -= QL4_LOCK_DRVR_SLEEP; 773 drvr_wait -= QL4_LOCK_DRVR_SLEEP;
776 } else { 774 } else {
777 DEBUG2(printk("scsi%ld: %s: Global Init Semaphore " 775 DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ab3a924e3e11..051164f755a4 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -3382,7 +3382,7 @@ static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3382 if (task->data_count) { 3382 if (task->data_count) {
3383 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, 3383 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
3384 task->data_count, 3384 task->data_count,
3385 PCI_DMA_TODEVICE); 3385 DMA_TO_DEVICE);
3386 } 3386 }
3387 3387
3388 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3388 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
@@ -3437,7 +3437,7 @@ static void qla4xxx_task_cleanup(struct iscsi_task *task)
3437 3437
3438 if (task->data_count) { 3438 if (task->data_count) {
3439 dma_unmap_single(&ha->pdev->dev, task_data->data_dma, 3439 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
3440 task->data_count, PCI_DMA_TODEVICE); 3440 task->data_count, DMA_TO_DEVICE);
3441 } 3441 }
3442 3442
3443 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", 3443 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
@@ -9020,25 +9020,16 @@ static void qla4xxx_remove_adapter(struct pci_dev *pdev)
9020/** 9020/**
9021 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method. 9021 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
9022 * @ha: HA context 9022 * @ha: HA context
9023 *
9024 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
9025 * supported addressing method.
9026 */ 9023 */
9027static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha) 9024static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
9028{ 9025{
9029 int retval;
9030
9031 /* Update our PCI device dma_mask for full 64 bit mask */ 9026 /* Update our PCI device dma_mask for full 64 bit mask */
9032 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) { 9027 if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
9033 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { 9028 dev_dbg(&ha->pdev->dev,
9034 dev_dbg(&ha->pdev->dev, 9029 "Failed to set 64 bit PCI consistent mask; "
9035 "Failed to set 64 bit PCI consistent mask; " 9030 "using 32 bit.\n");
9036 "using 32 bit.\n"); 9031 dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(32));
9037 retval = pci_set_consistent_dma_mask(ha->pdev, 9032 }
9038 DMA_BIT_MASK(32));
9039 }
9040 } else
9041 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
9042} 9033}
9043 9034
9044static int qla4xxx_slave_alloc(struct scsi_device *sdev) 9035static int qla4xxx_slave_alloc(struct scsi_device *sdev)
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index ea88906d2cc5..5c3d6e1e0145 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -63,8 +63,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
63 * emulated RAID devices, so start with SCSI */ 63 * emulated RAID devices, so start with SCSI */
64 struct raid_internal *i = ac_to_raid_internal(cont); 64 struct raid_internal *i = ac_to_raid_internal(cont);
65 65
66#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE) 66 if (IS_ENABLED(CONFIG_SCSI) && scsi_is_sdev_device(dev)) {
67 if (scsi_is_sdev_device(dev)) {
68 struct scsi_device *sdev = to_scsi_device(dev); 67 struct scsi_device *sdev = to_scsi_device(dev);
69 68
70 if (i->f->cookie != sdev->host->hostt) 69 if (i->f->cookie != sdev->host->hostt)
@@ -72,7 +71,6 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
72 71
73 return i->f->is_raid(dev); 72 return i->f->is_raid(dev);
74 } 73 }
75#endif
76 /* FIXME: look at other subsystems too */ 74 /* FIXME: look at other subsystems too */
77 return 0; 75 return 0;
78} 76}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index b7a8fdfeb2f4..c736d61b1648 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -338,9 +338,6 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
338 338
339 online = scsi_device_online(sdev); 339 online = scsi_device_online(sdev);
340 340
341 SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_INFO, sdev,
342 "%s: rtn: %d\n", __func__, online));
343
344 return online; 341 return online;
345} 342}
346EXPORT_SYMBOL(scsi_block_when_processing_errors); 343EXPORT_SYMBOL(scsi_block_when_processing_errors);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 62348412ed1b..c7fccbb8f554 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1201,8 +1201,8 @@ int scsi_init_io(struct scsi_cmnd *cmd)
1201 1201
1202 count = blk_rq_map_integrity_sg(rq->q, rq->bio, 1202 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1203 prot_sdb->table.sgl); 1203 prot_sdb->table.sgl);
1204 BUG_ON(unlikely(count > ivecs)); 1204 BUG_ON(count > ivecs);
1205 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q))); 1205 BUG_ON(count > queue_max_integrity_segments(rq->q));
1206 1206
1207 cmd->prot_sdb = prot_sdb; 1207 cmd->prot_sdb = prot_sdb;
1208 cmd->prot_sdb->table.nents = count; 1208 cmd->prot_sdb->table.nents = count;
@@ -2753,6 +2753,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2753 switch (oldstate) { 2753 switch (oldstate) {
2754 case SDEV_RUNNING: 2754 case SDEV_RUNNING:
2755 case SDEV_CREATED_BLOCK: 2755 case SDEV_CREATED_BLOCK:
2756 case SDEV_OFFLINE:
2756 break; 2757 break;
2757 default: 2758 default:
2758 goto illegal; 2759 goto illegal;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 0cd16e80b019..0a165b2b3e81 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -612,7 +612,6 @@ sas_phy_protocol_attr(identify.target_port_protocols,
612sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", 612sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
613 unsigned long long); 613 unsigned long long);
614sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); 614sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
615//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int);
616sas_phy_linkspeed_attr(negotiated_linkrate); 615sas_phy_linkspeed_attr(negotiated_linkrate);
617sas_phy_linkspeed_attr(minimum_linkrate_hw); 616sas_phy_linkspeed_attr(minimum_linkrate_hw);
618sas_phy_linkspeed_rw_attr(minimum_linkrate); 617sas_phy_linkspeed_rw_attr(minimum_linkrate);
@@ -1802,7 +1801,6 @@ sas_attach_transport(struct sas_function_template *ft)
1802 SETUP_PHY_ATTRIBUTE(device_type); 1801 SETUP_PHY_ATTRIBUTE(device_type);
1803 SETUP_PHY_ATTRIBUTE(sas_address); 1802 SETUP_PHY_ATTRIBUTE(sas_address);
1804 SETUP_PHY_ATTRIBUTE(phy_identifier); 1803 SETUP_PHY_ATTRIBUTE(phy_identifier);
1805 //SETUP_PHY_ATTRIBUTE(port_identifier);
1806 SETUP_PHY_ATTRIBUTE(negotiated_linkrate); 1804 SETUP_PHY_ATTRIBUTE(negotiated_linkrate);
1807 SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw); 1805 SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw);
1808 SETUP_PHY_ATTRIBUTE_RW(minimum_linkrate); 1806 SETUP_PHY_ATTRIBUTE_RW(minimum_linkrate);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8a254bb46a9b..c6ad00703c5b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -822,7 +822,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
822 if (atomic_read(&sdp->detaching)) { 822 if (atomic_read(&sdp->detaching)) {
823 if (srp->bio) { 823 if (srp->bio) {
824 scsi_req_free_cmd(scsi_req(srp->rq)); 824 scsi_req_free_cmd(scsi_req(srp->rq));
825 blk_end_request_all(srp->rq, BLK_STS_IOERR); 825 blk_put_request(srp->rq);
826 srp->rq = NULL; 826 srp->rq = NULL;
827 } 827 }
828 828
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 2112ea6723c6..a25a07a0b7f0 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -349,16 +349,16 @@ static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
349 349
350static int pqi_map_single(struct pci_dev *pci_dev, 350static int pqi_map_single(struct pci_dev *pci_dev,
351 struct pqi_sg_descriptor *sg_descriptor, void *buffer, 351 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
352 size_t buffer_length, int data_direction) 352 size_t buffer_length, enum dma_data_direction data_direction)
353{ 353{
354 dma_addr_t bus_address; 354 dma_addr_t bus_address;
355 355
356 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE) 356 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
357 return 0; 357 return 0;
358 358
359 bus_address = pci_map_single(pci_dev, buffer, buffer_length, 359 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
360 data_direction); 360 data_direction);
361 if (pci_dma_mapping_error(pci_dev, bus_address)) 361 if (dma_mapping_error(&pci_dev->dev, bus_address))
362 return -ENOMEM; 362 return -ENOMEM;
363 363
364 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); 364 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
@@ -370,15 +370,15 @@ static int pqi_map_single(struct pci_dev *pci_dev,
370 370
371static void pqi_pci_unmap(struct pci_dev *pci_dev, 371static void pqi_pci_unmap(struct pci_dev *pci_dev,
372 struct pqi_sg_descriptor *descriptors, int num_descriptors, 372 struct pqi_sg_descriptor *descriptors, int num_descriptors,
373 int data_direction) 373 enum dma_data_direction data_direction)
374{ 374{
375 int i; 375 int i;
376 376
377 if (data_direction == PCI_DMA_NONE) 377 if (data_direction == DMA_NONE)
378 return; 378 return;
379 379
380 for (i = 0; i < num_descriptors; i++) 380 for (i = 0; i < num_descriptors; i++)
381 pci_unmap_single(pci_dev, 381 dma_unmap_single(&pci_dev->dev,
382 (dma_addr_t)get_unaligned_le64(&descriptors[i].address), 382 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
383 get_unaligned_le32(&descriptors[i].length), 383 get_unaligned_le32(&descriptors[i].length),
384 data_direction); 384 data_direction);
@@ -387,10 +387,9 @@ static void pqi_pci_unmap(struct pci_dev *pci_dev,
387static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, 387static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
388 struct pqi_raid_path_request *request, u8 cmd, 388 struct pqi_raid_path_request *request, u8 cmd,
389 u8 *scsi3addr, void *buffer, size_t buffer_length, 389 u8 *scsi3addr, void *buffer, size_t buffer_length,
390 u16 vpd_page, int *pci_direction) 390 u16 vpd_page, enum dma_data_direction *dir)
391{ 391{
392 u8 *cdb; 392 u8 *cdb;
393 int pci_dir;
394 393
395 memset(request, 0, sizeof(*request)); 394 memset(request, 0, sizeof(*request));
396 395
@@ -458,23 +457,21 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
458 457
459 switch (request->data_direction) { 458 switch (request->data_direction) {
460 case SOP_READ_FLAG: 459 case SOP_READ_FLAG:
461 pci_dir = PCI_DMA_FROMDEVICE; 460 *dir = DMA_FROM_DEVICE;
462 break; 461 break;
463 case SOP_WRITE_FLAG: 462 case SOP_WRITE_FLAG:
464 pci_dir = PCI_DMA_TODEVICE; 463 *dir = DMA_TO_DEVICE;
465 break; 464 break;
466 case SOP_NO_DIRECTION_FLAG: 465 case SOP_NO_DIRECTION_FLAG:
467 pci_dir = PCI_DMA_NONE; 466 *dir = DMA_NONE;
468 break; 467 break;
469 default: 468 default:
470 pci_dir = PCI_DMA_BIDIRECTIONAL; 469 *dir = DMA_BIDIRECTIONAL;
471 break; 470 break;
472 } 471 }
473 472
474 *pci_direction = pci_dir;
475
476 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], 473 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
477 buffer, buffer_length, pci_dir); 474 buffer, buffer_length, *dir);
478} 475}
479 476
480static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) 477static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
@@ -516,21 +513,19 @@ static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
516 struct bmic_identify_controller *buffer) 513 struct bmic_identify_controller *buffer)
517{ 514{
518 int rc; 515 int rc;
519 int pci_direction; 516 enum dma_data_direction dir;
520 struct pqi_raid_path_request request; 517 struct pqi_raid_path_request request;
521 518
522 rc = pqi_build_raid_path_request(ctrl_info, &request, 519 rc = pqi_build_raid_path_request(ctrl_info, &request,
523 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer, 520 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
524 sizeof(*buffer), 0, &pci_direction); 521 sizeof(*buffer), 0, &dir);
525 if (rc) 522 if (rc)
526 return rc; 523 return rc;
527 524
528 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 525 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
529 NULL, NO_TIMEOUT); 526 NULL, NO_TIMEOUT);
530 527
531 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 528 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
532 pci_direction);
533
534 return rc; 529 return rc;
535} 530}
536 531
@@ -538,21 +533,19 @@ static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
538 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 533 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
539{ 534{
540 int rc; 535 int rc;
541 int pci_direction; 536 enum dma_data_direction dir;
542 struct pqi_raid_path_request request; 537 struct pqi_raid_path_request request;
543 538
544 rc = pqi_build_raid_path_request(ctrl_info, &request, 539 rc = pqi_build_raid_path_request(ctrl_info, &request,
545 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page, 540 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
546 &pci_direction); 541 &dir);
547 if (rc) 542 if (rc)
548 return rc; 543 return rc;
549 544
550 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 545 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
551 NULL, NO_TIMEOUT); 546 NULL, NO_TIMEOUT);
552 547
553 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 548 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
554 pci_direction);
555
556 return rc; 549 return rc;
557} 550}
558 551
@@ -562,13 +555,13 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
562 size_t buffer_length) 555 size_t buffer_length)
563{ 556{
564 int rc; 557 int rc;
565 int pci_direction; 558 enum dma_data_direction dir;
566 u16 bmic_device_index; 559 u16 bmic_device_index;
567 struct pqi_raid_path_request request; 560 struct pqi_raid_path_request request;
568 561
569 rc = pqi_build_raid_path_request(ctrl_info, &request, 562 rc = pqi_build_raid_path_request(ctrl_info, &request,
570 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, 563 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
571 buffer_length, 0, &pci_direction); 564 buffer_length, 0, &dir);
572 if (rc) 565 if (rc)
573 return rc; 566 return rc;
574 567
@@ -579,9 +572,7 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
579 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 572 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
580 0, NULL, NO_TIMEOUT); 573 0, NULL, NO_TIMEOUT);
581 574
582 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 575 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
583 pci_direction);
584
585 return rc; 576 return rc;
586} 577}
587 578
@@ -590,8 +581,8 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
590{ 581{
591 int rc; 582 int rc;
592 struct pqi_raid_path_request request; 583 struct pqi_raid_path_request request;
593 int pci_direction;
594 struct bmic_flush_cache *flush_cache; 584 struct bmic_flush_cache *flush_cache;
585 enum dma_data_direction dir;
595 586
596 /* 587 /*
597 * Don't bother trying to flush the cache if the controller is 588 * Don't bother trying to flush the cache if the controller is
@@ -608,16 +599,14 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
608 599
609 rc = pqi_build_raid_path_request(ctrl_info, &request, 600 rc = pqi_build_raid_path_request(ctrl_info, &request,
610 SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache, 601 SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache,
611 sizeof(*flush_cache), 0, &pci_direction); 602 sizeof(*flush_cache), 0, &dir);
612 if (rc) 603 if (rc)
613 goto out; 604 goto out;
614 605
615 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 606 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
616 0, NULL, NO_TIMEOUT); 607 0, NULL, NO_TIMEOUT);
617 608
618 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 609 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
619 pci_direction);
620
621out: 610out:
622 kfree(flush_cache); 611 kfree(flush_cache);
623 612
@@ -629,20 +618,18 @@ static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
629{ 618{
630 int rc; 619 int rc;
631 struct pqi_raid_path_request request; 620 struct pqi_raid_path_request request;
632 int pci_direction; 621 enum dma_data_direction dir;
633 622
634 rc = pqi_build_raid_path_request(ctrl_info, &request, 623 rc = pqi_build_raid_path_request(ctrl_info, &request,
635 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer, 624 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
636 buffer_length, 0, &pci_direction); 625 buffer_length, 0, &dir);
637 if (rc) 626 if (rc)
638 return rc; 627 return rc;
639 628
640 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 629 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
641 0, NULL, NO_TIMEOUT); 630 0, NULL, NO_TIMEOUT);
642 631
643 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 632 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
644 pci_direction);
645
646 return rc; 633 return rc;
647} 634}
648 635
@@ -793,20 +780,18 @@ static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
793 void *buffer, size_t buffer_length) 780 void *buffer, size_t buffer_length)
794{ 781{
795 int rc; 782 int rc;
796 int pci_direction; 783 enum dma_data_direction dir;
797 struct pqi_raid_path_request request; 784 struct pqi_raid_path_request request;
798 785
799 rc = pqi_build_raid_path_request(ctrl_info, &request, 786 rc = pqi_build_raid_path_request(ctrl_info, &request,
800 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction); 787 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &dir);
801 if (rc) 788 if (rc)
802 return rc; 789 return rc;
803 790
804 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 791 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
805 NULL, NO_TIMEOUT); 792 NULL, NO_TIMEOUT);
806 793
807 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 794 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
808 pci_direction);
809
810 return rc; 795 return rc;
811} 796}
812 797
@@ -1089,7 +1074,7 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1089 struct pqi_scsi_dev *device) 1074 struct pqi_scsi_dev *device)
1090{ 1075{
1091 int rc; 1076 int rc;
1092 int pci_direction; 1077 enum dma_data_direction dir;
1093 struct pqi_raid_path_request request; 1078 struct pqi_raid_path_request request;
1094 struct raid_map *raid_map; 1079 struct raid_map *raid_map;
1095 1080
@@ -1099,15 +1084,14 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1099 1084
1100 rc = pqi_build_raid_path_request(ctrl_info, &request, 1085 rc = pqi_build_raid_path_request(ctrl_info, &request,
1101 CISS_GET_RAID_MAP, device->scsi3addr, raid_map, 1086 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
1102 sizeof(*raid_map), 0, &pci_direction); 1087 sizeof(*raid_map), 0, &dir);
1103 if (rc) 1088 if (rc)
1104 goto error; 1089 goto error;
1105 1090
1106 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 1091 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
1107 NULL, NO_TIMEOUT); 1092 NULL, NO_TIMEOUT);
1108 1093
1109 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 1094 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
1110 pci_direction);
1111 1095
1112 if (rc) 1096 if (rc)
1113 goto error; 1097 goto error;
@@ -3822,7 +3806,7 @@ static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3822 rc = pqi_map_single(ctrl_info->pci_dev, 3806 rc = pqi_map_single(ctrl_info->pci_dev,
3823 &request.data.report_device_capability.sg_descriptor, 3807 &request.data.report_device_capability.sg_descriptor,
3824 capability, sizeof(*capability), 3808 capability, sizeof(*capability),
3825 PCI_DMA_FROMDEVICE); 3809 DMA_FROM_DEVICE);
3826 if (rc) 3810 if (rc)
3827 goto out; 3811 goto out;
3828 3812
@@ -3831,7 +3815,7 @@ static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3831 3815
3832 pqi_pci_unmap(ctrl_info->pci_dev, 3816 pqi_pci_unmap(ctrl_info->pci_dev,
3833 &request.data.report_device_capability.sg_descriptor, 1, 3817 &request.data.report_device_capability.sg_descriptor, 1,
3834 PCI_DMA_FROMDEVICE); 3818 DMA_FROM_DEVICE);
3835 3819
3836 if (rc) 3820 if (rc)
3837 goto out; 3821 goto out;
@@ -4158,7 +4142,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4158 rc = pqi_map_single(ctrl_info->pci_dev, 4142 rc = pqi_map_single(ctrl_info->pci_dev,
4159 request.data.report_event_configuration.sg_descriptors, 4143 request.data.report_event_configuration.sg_descriptors,
4160 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4144 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4161 PCI_DMA_FROMDEVICE); 4145 DMA_FROM_DEVICE);
4162 if (rc) 4146 if (rc)
4163 goto out; 4147 goto out;
4164 4148
@@ -4167,7 +4151,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4167 4151
4168 pqi_pci_unmap(ctrl_info->pci_dev, 4152 pqi_pci_unmap(ctrl_info->pci_dev,
4169 request.data.report_event_configuration.sg_descriptors, 1, 4153 request.data.report_event_configuration.sg_descriptors, 1,
4170 PCI_DMA_FROMDEVICE); 4154 DMA_FROM_DEVICE);
4171 4155
4172 if (rc) 4156 if (rc)
4173 goto out; 4157 goto out;
@@ -4194,7 +4178,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4194 rc = pqi_map_single(ctrl_info->pci_dev, 4178 rc = pqi_map_single(ctrl_info->pci_dev,
4195 request.data.report_event_configuration.sg_descriptors, 4179 request.data.report_event_configuration.sg_descriptors,
4196 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4180 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4197 PCI_DMA_TODEVICE); 4181 DMA_TO_DEVICE);
4198 if (rc) 4182 if (rc)
4199 goto out; 4183 goto out;
4200 4184
@@ -4203,7 +4187,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4203 4187
4204 pqi_pci_unmap(ctrl_info->pci_dev, 4188 pqi_pci_unmap(ctrl_info->pci_dev,
4205 request.data.report_event_configuration.sg_descriptors, 1, 4189 request.data.report_event_configuration.sg_descriptors, 1,
4206 PCI_DMA_TODEVICE); 4190 DMA_TO_DEVICE);
4207 4191
4208out: 4192out:
4209 kfree(event_config); 4193 kfree(event_config);
@@ -5534,7 +5518,7 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5534 5518
5535 rc = pqi_map_single(ctrl_info->pci_dev, 5519 rc = pqi_map_single(ctrl_info->pci_dev,
5536 &request.sg_descriptors[0], kernel_buffer, 5520 &request.sg_descriptors[0], kernel_buffer,
5537 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 5521 iocommand.buf_size, DMA_BIDIRECTIONAL);
5538 if (rc) 5522 if (rc)
5539 goto out; 5523 goto out;
5540 5524
@@ -5548,7 +5532,7 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5548 5532
5549 if (iocommand.buf_size > 0) 5533 if (iocommand.buf_size > 0)
5550 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 5534 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
5551 PCI_DMA_BIDIRECTIONAL); 5535 DMA_BIDIRECTIONAL);
5552 5536
5553 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); 5537 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
5554 5538
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index 5141bd4c9f06..ea91658c7060 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -316,9 +316,9 @@ int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
316 put_unaligned_le32(ctrl_info->max_io_slots, 316 put_unaligned_le32(ctrl_info->max_io_slots,
317 &base_struct->error_buffer_num_elements); 317 &base_struct->error_buffer_num_elements);
318 318
319 bus_address = pci_map_single(ctrl_info->pci_dev, base_struct, 319 bus_address = dma_map_single(&ctrl_info->pci_dev->dev, base_struct,
320 sizeof(*base_struct), PCI_DMA_TODEVICE); 320 sizeof(*base_struct), DMA_TO_DEVICE);
321 if (pci_dma_mapping_error(ctrl_info->pci_dev, bus_address)) { 321 if (dma_mapping_error(&ctrl_info->pci_dev->dev, bus_address)) {
322 rc = -ENOMEM; 322 rc = -ENOMEM;
323 goto out; 323 goto out;
324 } 324 }
@@ -331,9 +331,8 @@ int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
331 rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS, 331 rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS,
332 &params); 332 &params);
333 333
334 pci_unmap_single(ctrl_info->pci_dev, bus_address, sizeof(*base_struct), 334 dma_unmap_single(&ctrl_info->pci_dev->dev, bus_address,
335 PCI_DMA_TODEVICE); 335 sizeof(*base_struct), DMA_TO_DEVICE);
336
337out: 336out:
338 kfree(base_struct_unaligned); 337 kfree(base_struct_unaligned);
339 338
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index b106596cc0cf..e9ccfb97773f 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -111,8 +111,8 @@ snic_queue_report_tgt_req(struct snic *snic)
111 111
112 SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0); 112 SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
113 113
114 pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE); 114 pa = dma_map_single(&snic->pdev->dev, buf, buf_len, DMA_FROM_DEVICE);
115 if (pci_dma_mapping_error(snic->pdev, pa)) { 115 if (dma_mapping_error(&snic->pdev->dev, pa)) {
116 SNIC_HOST_ERR(snic->shost, 116 SNIC_HOST_ERR(snic->shost,
117 "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n", 117 "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
118 buf); 118 buf);
@@ -138,7 +138,8 @@ snic_queue_report_tgt_req(struct snic *snic)
138 138
139 ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len); 139 ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
140 if (ret) { 140 if (ret) {
141 pci_unmap_single(snic->pdev, pa, buf_len, PCI_DMA_FROMDEVICE); 141 dma_unmap_single(&snic->pdev->dev, pa, buf_len,
142 DMA_FROM_DEVICE);
142 kfree(buf); 143 kfree(buf);
143 rqi->sge_va = 0; 144 rqi->sge_va = 0;
144 snic_release_untagged_req(snic, rqi); 145 snic_release_untagged_req(snic, rqi);
diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c
index 8e69548395b9..159ee94d2a55 100644
--- a/drivers/scsi/snic/snic_io.c
+++ b/drivers/scsi/snic/snic_io.c
@@ -102,7 +102,8 @@ snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
102 struct snic_req_info *rqi = NULL; 102 struct snic_req_info *rqi = NULL;
103 unsigned long flags; 103 unsigned long flags;
104 104
105 pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); 105 dma_unmap_single(&snic->pdev->dev, buf->dma_addr, buf->len,
106 DMA_TO_DEVICE);
106 107
107 rqi = req_to_rqi(req); 108 rqi = req_to_rqi(req);
108 spin_lock_irqsave(&snic->spl_cmd_lock, flags); 109 spin_lock_irqsave(&snic->spl_cmd_lock, flags);
@@ -172,8 +173,8 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
172 snic_print_desc(__func__, os_buf, len); 173 snic_print_desc(__func__, os_buf, len);
173 174
174 /* Map request buffer */ 175 /* Map request buffer */
175 pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE); 176 pa = dma_map_single(&snic->pdev->dev, os_buf, len, DMA_TO_DEVICE);
176 if (pci_dma_mapping_error(snic->pdev, pa)) { 177 if (dma_mapping_error(&snic->pdev->dev, pa)) {
177 SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n"); 178 SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
178 179
179 return -ENOMEM; 180 return -ENOMEM;
@@ -186,7 +187,7 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
186 spin_lock_irqsave(&snic->wq_lock[q_num], flags); 187 spin_lock_irqsave(&snic->wq_lock[q_num], flags);
187 desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type); 188 desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type);
188 if (desc_avail <= 0) { 189 if (desc_avail <= 0) {
189 pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE); 190 dma_unmap_single(&snic->pdev->dev, pa, len, DMA_TO_DEVICE);
190 req->req_pa = 0; 191 req->req_pa = 0;
191 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); 192 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
192 atomic64_inc(&snic->s_stats.misc.wq_alloc_fail); 193 atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
@@ -350,29 +351,29 @@ snic_req_free(struct snic *snic, struct snic_req_info *rqi)
350 351
351 if (rqi->abort_req) { 352 if (rqi->abort_req) {
352 if (rqi->abort_req->req_pa) 353 if (rqi->abort_req->req_pa)
353 pci_unmap_single(snic->pdev, 354 dma_unmap_single(&snic->pdev->dev,
354 rqi->abort_req->req_pa, 355 rqi->abort_req->req_pa,
355 sizeof(struct snic_host_req), 356 sizeof(struct snic_host_req),
356 PCI_DMA_TODEVICE); 357 DMA_TO_DEVICE);
357 358
358 mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); 359 mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
359 } 360 }
360 361
361 if (rqi->dr_req) { 362 if (rqi->dr_req) {
362 if (rqi->dr_req->req_pa) 363 if (rqi->dr_req->req_pa)
363 pci_unmap_single(snic->pdev, 364 dma_unmap_single(&snic->pdev->dev,
364 rqi->dr_req->req_pa, 365 rqi->dr_req->req_pa,
365 sizeof(struct snic_host_req), 366 sizeof(struct snic_host_req),
366 PCI_DMA_TODEVICE); 367 DMA_TO_DEVICE);
367 368
368 mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]); 369 mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
369 } 370 }
370 371
371 if (rqi->req->req_pa) 372 if (rqi->req->req_pa)
372 pci_unmap_single(snic->pdev, 373 dma_unmap_single(&snic->pdev->dev,
373 rqi->req->req_pa, 374 rqi->req->req_pa,
374 rqi->req_len, 375 rqi->req_len,
375 PCI_DMA_TODEVICE); 376 DMA_TO_DEVICE);
376 377
377 mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]); 378 mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
378} 379}
@@ -384,10 +385,10 @@ snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi)
384 385
385 sgd = req_to_sgl(rqi_to_req(rqi)); 386 sgd = req_to_sgl(rqi_to_req(rqi));
386 SNIC_BUG_ON(sgd[0].addr == 0); 387 SNIC_BUG_ON(sgd[0].addr == 0);
387 pci_unmap_single(snic->pdev, 388 dma_unmap_single(&snic->pdev->dev,
388 le64_to_cpu(sgd[0].addr), 389 le64_to_cpu(sgd[0].addr),
389 le32_to_cpu(sgd[0].len), 390 le32_to_cpu(sgd[0].len),
390 PCI_DMA_FROMDEVICE); 391 DMA_FROM_DEVICE);
391} 392}
392 393
393/* 394/*
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 7cf70aaec0ba..5295277d6325 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -435,37 +435,17 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
435 * limitation for the device. Try 43-bit first, and 435 * limitation for the device. Try 43-bit first, and
436 * fail to 32-bit. 436 * fail to 32-bit.
437 */ 437 */
438 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43)); 438 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43));
439 if (ret) { 439 if (ret) {
440 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 440 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
441 if (ret) { 441 if (ret) {
442 SNIC_HOST_ERR(shost, 442 SNIC_HOST_ERR(shost,
443 "No Usable DMA Configuration, aborting %d\n", 443 "No Usable DMA Configuration, aborting %d\n",
444 ret); 444 ret);
445
446 goto err_rel_regions;
447 }
448
449 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
450 if (ret) {
451 SNIC_HOST_ERR(shost,
452 "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n",
453 ret);
454
455 goto err_rel_regions;
456 }
457 } else {
458 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43));
459 if (ret) {
460 SNIC_HOST_ERR(shost,
461 "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n",
462 ret);
463
464 goto err_rel_regions; 445 goto err_rel_regions;
465 } 446 }
466 } 447 }
467 448
468
469 /* Map vNIC resources from BAR0 */ 449 /* Map vNIC resources from BAR0 */
470 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 450 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
471 SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n"); 451 SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index d9b2e46424aa..b3650c989ed4 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -146,10 +146,10 @@ snic_release_req_buf(struct snic *snic,
146 CMD_FLAGS(sc)); 146 CMD_FLAGS(sc));
147 147
148 if (req->u.icmnd.sense_addr) 148 if (req->u.icmnd.sense_addr)
149 pci_unmap_single(snic->pdev, 149 dma_unmap_single(&snic->pdev->dev,
150 le64_to_cpu(req->u.icmnd.sense_addr), 150 le64_to_cpu(req->u.icmnd.sense_addr),
151 SCSI_SENSE_BUFFERSIZE, 151 SCSI_SENSE_BUFFERSIZE,
152 PCI_DMA_FROMDEVICE); 152 DMA_FROM_DEVICE);
153 153
154 scsi_dma_unmap(sc); 154 scsi_dma_unmap(sc);
155 155
@@ -185,12 +185,11 @@ snic_queue_icmnd_req(struct snic *snic,
185 } 185 }
186 } 186 }
187 187
188 pa = pci_map_single(snic->pdev, 188 pa = dma_map_single(&snic->pdev->dev,
189 sc->sense_buffer, 189 sc->sense_buffer,
190 SCSI_SENSE_BUFFERSIZE, 190 SCSI_SENSE_BUFFERSIZE,
191 PCI_DMA_FROMDEVICE); 191 DMA_FROM_DEVICE);
192 192 if (dma_mapping_error(&snic->pdev->dev, pa)) {
193 if (pci_dma_mapping_error(snic->pdev, pa)) {
194 SNIC_HOST_ERR(snic->shost, 193 SNIC_HOST_ERR(snic->shost,
195 "QIcmnd:PCI Map Failed for sns buf %p tag %x\n", 194 "QIcmnd:PCI Map Failed for sns buf %p tag %x\n",
196 sc->sense_buffer, snic_cmd_tag(sc)); 195 sc->sense_buffer, snic_cmd_tag(sc));
@@ -2001,7 +2000,7 @@ snic_dr_finish(struct snic *snic, struct scsi_cmnd *sc)
2001 } 2000 }
2002 2001
2003dr_failed: 2002dr_failed:
2004 SNIC_BUG_ON(!spin_is_locked(io_lock)); 2003 lockdep_assert_held(io_lock);
2005 if (rqi) 2004 if (rqi)
2006 CMD_SP(sc) = NULL; 2005 CMD_SP(sc) = NULL;
2007 spin_unlock_irqrestore(io_lock, flags); 2006 spin_unlock_irqrestore(io_lock, flags);
@@ -2604,7 +2603,7 @@ snic_internal_abort_io(struct snic *snic, struct scsi_cmnd *sc, int tmf)
2604 ret = SUCCESS; 2603 ret = SUCCESS;
2605 2604
2606skip_internal_abts: 2605skip_internal_abts:
2607 SNIC_BUG_ON(!spin_is_locked(io_lock)); 2606 lockdep_assert_held(io_lock);
2608 spin_unlock_irqrestore(io_lock, flags); 2607 spin_unlock_irqrestore(io_lock, flags);
2609 2608
2610 return ret; 2609 return ret;
diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c
index dad5fc66effb..05e374f80946 100644
--- a/drivers/scsi/snic/vnic_dev.c
+++ b/drivers/scsi/snic/vnic_dev.c
@@ -225,10 +225,9 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
225{ 225{
226 svnic_dev_desc_ring_size(ring, desc_count, desc_size); 226 svnic_dev_desc_ring_size(ring, desc_count, desc_size);
227 227
228 ring->descs_unaligned = pci_alloc_consistent(vdev->pdev, 228 ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
229 ring->size_unaligned, 229 ring->size_unaligned, &ring->base_addr_unaligned,
230 &ring->base_addr_unaligned); 230 GFP_KERNEL);
231
232 if (!ring->descs_unaligned) { 231 if (!ring->descs_unaligned) {
233 pr_err("Failed to allocate ring (size=%d), aborting\n", 232 pr_err("Failed to allocate ring (size=%d), aborting\n",
234 (int)ring->size); 233 (int)ring->size);
@@ -251,7 +250,7 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
251void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) 250void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
252{ 251{
253 if (ring->descs) { 252 if (ring->descs) {
254 pci_free_consistent(vdev->pdev, 253 dma_free_coherent(&vdev->pdev->dev,
255 ring->size_unaligned, 254 ring->size_unaligned,
256 ring->descs_unaligned, 255 ring->descs_unaligned,
257 ring->base_addr_unaligned); 256 ring->base_addr_unaligned);
@@ -470,9 +469,9 @@ int svnic_dev_fw_info(struct vnic_dev *vdev,
470 int err = 0; 469 int err = 0;
471 470
472 if (!vdev->fw_info) { 471 if (!vdev->fw_info) {
473 vdev->fw_info = pci_alloc_consistent(vdev->pdev, 472 vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
474 sizeof(struct vnic_devcmd_fw_info), 473 sizeof(struct vnic_devcmd_fw_info),
475 &vdev->fw_info_pa); 474 &vdev->fw_info_pa, GFP_KERNEL);
476 if (!vdev->fw_info) 475 if (!vdev->fw_info)
477 return -ENOMEM; 476 return -ENOMEM;
478 477
@@ -534,8 +533,8 @@ int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
534 int wait = VNIC_DVCMD_TMO; 533 int wait = VNIC_DVCMD_TMO;
535 534
536 if (!vdev->stats) { 535 if (!vdev->stats) {
537 vdev->stats = pci_alloc_consistent(vdev->pdev, 536 vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
538 sizeof(struct vnic_stats), &vdev->stats_pa); 537 sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
539 if (!vdev->stats) 538 if (!vdev->stats)
540 return -ENOMEM; 539 return -ENOMEM;
541 } 540 }
@@ -607,9 +606,9 @@ int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
607 int wait = VNIC_DVCMD_TMO; 606 int wait = VNIC_DVCMD_TMO;
608 607
609 if (!vdev->notify) { 608 if (!vdev->notify) {
610 vdev->notify = pci_alloc_consistent(vdev->pdev, 609 vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
611 sizeof(struct vnic_devcmd_notify), 610 sizeof(struct vnic_devcmd_notify),
612 &vdev->notify_pa); 611 &vdev->notify_pa, GFP_KERNEL);
613 if (!vdev->notify) 612 if (!vdev->notify)
614 return -ENOMEM; 613 return -ENOMEM;
615 } 614 }
@@ -697,21 +696,21 @@ void svnic_dev_unregister(struct vnic_dev *vdev)
697{ 696{
698 if (vdev) { 697 if (vdev) {
699 if (vdev->notify) 698 if (vdev->notify)
700 pci_free_consistent(vdev->pdev, 699 dma_free_coherent(&vdev->pdev->dev,
701 sizeof(struct vnic_devcmd_notify), 700 sizeof(struct vnic_devcmd_notify),
702 vdev->notify, 701 vdev->notify,
703 vdev->notify_pa); 702 vdev->notify_pa);
704 if (vdev->linkstatus) 703 if (vdev->linkstatus)
705 pci_free_consistent(vdev->pdev, 704 dma_free_coherent(&vdev->pdev->dev,
706 sizeof(u32), 705 sizeof(u32),
707 vdev->linkstatus, 706 vdev->linkstatus,
708 vdev->linkstatus_pa); 707 vdev->linkstatus_pa);
709 if (vdev->stats) 708 if (vdev->stats)
710 pci_free_consistent(vdev->pdev, 709 dma_free_coherent(&vdev->pdev->dev,
711 sizeof(struct vnic_stats), 710 sizeof(struct vnic_stats),
712 vdev->stats, vdev->stats_pa); 711 vdev->stats, vdev->stats_pa);
713 if (vdev->fw_info) 712 if (vdev->fw_info)
714 pci_free_consistent(vdev->pdev, 713 dma_free_coherent(&vdev->pdev->dev,
715 sizeof(struct vnic_devcmd_fw_info), 714 sizeof(struct vnic_devcmd_fw_info),
716 vdev->fw_info, vdev->fw_info_pa); 715 vdev->fw_info, vdev->fw_info_pa);
717 if (vdev->devcmd2) 716 if (vdev->devcmd2)
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 0b1421cdf8a0..c9a55d0f076d 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -60,30 +60,6 @@ static u8 sun3x_esp_read8(struct esp *esp, unsigned long reg)
60 return readb(esp->regs + (reg * 4UL)); 60 return readb(esp->regs + (reg * 4UL));
61} 61}
62 62
63static dma_addr_t sun3x_esp_map_single(struct esp *esp, void *buf,
64 size_t sz, int dir)
65{
66 return dma_map_single(esp->dev, buf, sz, dir);
67}
68
69static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg,
70 int num_sg, int dir)
71{
72 return dma_map_sg(esp->dev, sg, num_sg, dir);
73}
74
75static void sun3x_esp_unmap_single(struct esp *esp, dma_addr_t addr,
76 size_t sz, int dir)
77{
78 dma_unmap_single(esp->dev, addr, sz, dir);
79}
80
81static void sun3x_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
82 int num_sg, int dir)
83{
84 dma_unmap_sg(esp->dev, sg, num_sg, dir);
85}
86
87static int sun3x_esp_irq_pending(struct esp *esp) 63static int sun3x_esp_irq_pending(struct esp *esp)
88{ 64{
89 if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)) 65 if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
@@ -182,10 +158,6 @@ static int sun3x_esp_dma_error(struct esp *esp)
182static const struct esp_driver_ops sun3x_esp_ops = { 158static const struct esp_driver_ops sun3x_esp_ops = {
183 .esp_write8 = sun3x_esp_write8, 159 .esp_write8 = sun3x_esp_write8,
184 .esp_read8 = sun3x_esp_read8, 160 .esp_read8 = sun3x_esp_read8,
185 .map_single = sun3x_esp_map_single,
186 .map_sg = sun3x_esp_map_sg,
187 .unmap_single = sun3x_esp_unmap_single,
188 .unmap_sg = sun3x_esp_unmap_sg,
189 .irq_pending = sun3x_esp_irq_pending, 161 .irq_pending = sun3x_esp_irq_pending,
190 .reset_dma = sun3x_esp_reset_dma, 162 .reset_dma = sun3x_esp_reset_dma,
191 .dma_drain = sun3x_esp_dma_drain, 163 .dma_drain = sun3x_esp_dma_drain,
@@ -246,7 +218,7 @@ static int esp_sun3x_probe(struct platform_device *dev)
246 218
247 dev_set_drvdata(&dev->dev, esp); 219 dev_set_drvdata(&dev->dev, esp);
248 220
249 err = scsi_esp_register(esp, &dev->dev); 221 err = scsi_esp_register(esp);
250 if (err) 222 if (err)
251 goto fail_free_irq; 223 goto fail_free_irq;
252 224
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 747ee64a78e1..a11efbcb7f8b 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -80,7 +80,7 @@ static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of)
80 80
81static int esp_sbus_map_regs(struct esp *esp, int hme) 81static int esp_sbus_map_regs(struct esp *esp, int hme)
82{ 82{
83 struct platform_device *op = esp->dev; 83 struct platform_device *op = to_platform_device(esp->dev);
84 struct resource *res; 84 struct resource *res;
85 85
86 /* On HME, two reg sets exist, first is DVMA, 86 /* On HME, two reg sets exist, first is DVMA,
@@ -100,11 +100,9 @@ static int esp_sbus_map_regs(struct esp *esp, int hme)
100 100
101static int esp_sbus_map_command_block(struct esp *esp) 101static int esp_sbus_map_command_block(struct esp *esp)
102{ 102{
103 struct platform_device *op = esp->dev; 103 esp->command_block = dma_alloc_coherent(esp->dev, 16,
104
105 esp->command_block = dma_alloc_coherent(&op->dev, 16,
106 &esp->command_block_dma, 104 &esp->command_block_dma,
107 GFP_ATOMIC); 105 GFP_KERNEL);
108 if (!esp->command_block) 106 if (!esp->command_block)
109 return -ENOMEM; 107 return -ENOMEM;
110 return 0; 108 return 0;
@@ -113,7 +111,7 @@ static int esp_sbus_map_command_block(struct esp *esp)
113static int esp_sbus_register_irq(struct esp *esp) 111static int esp_sbus_register_irq(struct esp *esp)
114{ 112{
115 struct Scsi_Host *host = esp->host; 113 struct Scsi_Host *host = esp->host;
116 struct platform_device *op = esp->dev; 114 struct platform_device *op = to_platform_device(esp->dev);
117 115
118 host->irq = op->archdata.irqs[0]; 116 host->irq = op->archdata.irqs[0];
119 return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); 117 return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
@@ -121,7 +119,7 @@ static int esp_sbus_register_irq(struct esp *esp)
121 119
122static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma) 120static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
123{ 121{
124 struct platform_device *op = esp->dev; 122 struct platform_device *op = to_platform_device(esp->dev);
125 struct device_node *dp; 123 struct device_node *dp;
126 124
127 dp = op->dev.of_node; 125 dp = op->dev.of_node;
@@ -143,7 +141,7 @@ done:
143 141
144static void esp_get_differential(struct esp *esp) 142static void esp_get_differential(struct esp *esp)
145{ 143{
146 struct platform_device *op = esp->dev; 144 struct platform_device *op = to_platform_device(esp->dev);
147 struct device_node *dp; 145 struct device_node *dp;
148 146
149 dp = op->dev.of_node; 147 dp = op->dev.of_node;
@@ -155,7 +153,7 @@ static void esp_get_differential(struct esp *esp)
155 153
156static void esp_get_clock_params(struct esp *esp) 154static void esp_get_clock_params(struct esp *esp)
157{ 155{
158 struct platform_device *op = esp->dev; 156 struct platform_device *op = to_platform_device(esp->dev);
159 struct device_node *bus_dp, *dp; 157 struct device_node *bus_dp, *dp;
160 int fmhz; 158 int fmhz;
161 159
@@ -172,7 +170,7 @@ static void esp_get_clock_params(struct esp *esp)
172static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of) 170static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
173{ 171{
174 struct device_node *dma_dp = dma_of->dev.of_node; 172 struct device_node *dma_dp = dma_of->dev.of_node;
175 struct platform_device *op = esp->dev; 173 struct platform_device *op = to_platform_device(esp->dev);
176 struct device_node *dp; 174 struct device_node *dp;
177 u8 bursts, val; 175 u8 bursts, val;
178 176
@@ -212,38 +210,6 @@ static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
212 return sbus_readb(esp->regs + (reg * 4UL)); 210 return sbus_readb(esp->regs + (reg * 4UL));
213} 211}
214 212
215static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
216 size_t sz, int dir)
217{
218 struct platform_device *op = esp->dev;
219
220 return dma_map_single(&op->dev, buf, sz, dir);
221}
222
223static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
224 int num_sg, int dir)
225{
226 struct platform_device *op = esp->dev;
227
228 return dma_map_sg(&op->dev, sg, num_sg, dir);
229}
230
231static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
232 size_t sz, int dir)
233{
234 struct platform_device *op = esp->dev;
235
236 dma_unmap_single(&op->dev, addr, sz, dir);
237}
238
239static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
240 int num_sg, int dir)
241{
242 struct platform_device *op = esp->dev;
243
244 dma_unmap_sg(&op->dev, sg, num_sg, dir);
245}
246
247static int sbus_esp_irq_pending(struct esp *esp) 213static int sbus_esp_irq_pending(struct esp *esp)
248{ 214{
249 if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)) 215 if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
@@ -255,14 +221,13 @@ static void sbus_esp_reset_dma(struct esp *esp)
255{ 221{
256 int can_do_burst16, can_do_burst32, can_do_burst64; 222 int can_do_burst16, can_do_burst32, can_do_burst64;
257 int can_do_sbus64, lim; 223 int can_do_sbus64, lim;
258 struct platform_device *op; 224 struct platform_device *op = to_platform_device(esp->dev);
259 u32 val; 225 u32 val;
260 226
261 can_do_burst16 = (esp->bursts & DMA_BURST16) != 0; 227 can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
262 can_do_burst32 = (esp->bursts & DMA_BURST32) != 0; 228 can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
263 can_do_burst64 = 0; 229 can_do_burst64 = 0;
264 can_do_sbus64 = 0; 230 can_do_sbus64 = 0;
265 op = esp->dev;
266 if (sbus_can_dma_64bit()) 231 if (sbus_can_dma_64bit())
267 can_do_sbus64 = 1; 232 can_do_sbus64 = 1;
268 if (sbus_can_burst64()) 233 if (sbus_can_burst64())
@@ -474,10 +439,6 @@ static int sbus_esp_dma_error(struct esp *esp)
474static const struct esp_driver_ops sbus_esp_ops = { 439static const struct esp_driver_ops sbus_esp_ops = {
475 .esp_write8 = sbus_esp_write8, 440 .esp_write8 = sbus_esp_write8,
476 .esp_read8 = sbus_esp_read8, 441 .esp_read8 = sbus_esp_read8,
477 .map_single = sbus_esp_map_single,
478 .map_sg = sbus_esp_map_sg,
479 .unmap_single = sbus_esp_unmap_single,
480 .unmap_sg = sbus_esp_unmap_sg,
481 .irq_pending = sbus_esp_irq_pending, 442 .irq_pending = sbus_esp_irq_pending,
482 .reset_dma = sbus_esp_reset_dma, 443 .reset_dma = sbus_esp_reset_dma,
483 .dma_drain = sbus_esp_dma_drain, 444 .dma_drain = sbus_esp_dma_drain,
@@ -504,7 +465,7 @@ static int esp_sbus_probe_one(struct platform_device *op,
504 esp = shost_priv(host); 465 esp = shost_priv(host);
505 466
506 esp->host = host; 467 esp->host = host;
507 esp->dev = op; 468 esp->dev = &op->dev;
508 esp->ops = &sbus_esp_ops; 469 esp->ops = &sbus_esp_ops;
509 470
510 if (hme) 471 if (hme)
@@ -540,7 +501,7 @@ static int esp_sbus_probe_one(struct platform_device *op,
540 501
541 dev_set_drvdata(&op->dev, esp); 502 dev_set_drvdata(&op->dev, esp);
542 503
543 err = scsi_esp_register(esp, &op->dev); 504 err = scsi_esp_register(esp);
544 if (err) 505 if (err)
545 goto fail_free_irq; 506 goto fail_free_irq;
546 507
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index bd3f6e2d6834..0a2a54517b15 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -4370,6 +4370,13 @@ static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym
4370 OUTB(np, HS_PRT, HS_BUSY); 4370 OUTB(np, HS_PRT, HS_BUSY);
4371} 4371}
4372 4372
4373#define sym_printk(lvl, tp, cp, fmt, v...) do { \
4374 if (cp) \
4375 scmd_printk(lvl, cp->cmd, fmt, ##v); \
4376 else \
4377 starget_printk(lvl, tp->starget, fmt, ##v); \
4378} while (0)
4379
4373/* 4380/*
4374 * chip exception handler for programmed interrupts. 4381 * chip exception handler for programmed interrupts.
4375 */ 4382 */
@@ -4415,7 +4422,7 @@ static void sym_int_sir(struct sym_hcb *np)
4415 * been selected with ATN. We do not want to handle that. 4422 * been selected with ATN. We do not want to handle that.
4416 */ 4423 */
4417 case SIR_SEL_ATN_NO_MSG_OUT: 4424 case SIR_SEL_ATN_NO_MSG_OUT:
4418 scmd_printk(KERN_WARNING, cp->cmd, 4425 sym_printk(KERN_WARNING, tp, cp,
4419 "No MSG OUT phase after selection with ATN\n"); 4426 "No MSG OUT phase after selection with ATN\n");
4420 goto out_stuck; 4427 goto out_stuck;
4421 /* 4428 /*
@@ -4423,7 +4430,7 @@ static void sym_int_sir(struct sym_hcb *np)
4423 * having reselected the initiator. 4430 * having reselected the initiator.
4424 */ 4431 */
4425 case SIR_RESEL_NO_MSG_IN: 4432 case SIR_RESEL_NO_MSG_IN:
4426 scmd_printk(KERN_WARNING, cp->cmd, 4433 sym_printk(KERN_WARNING, tp, cp,
4427 "No MSG IN phase after reselection\n"); 4434 "No MSG IN phase after reselection\n");
4428 goto out_stuck; 4435 goto out_stuck;
4429 /* 4436 /*
@@ -4431,7 +4438,7 @@ static void sym_int_sir(struct sym_hcb *np)
4431 * an IDENTIFY. 4438 * an IDENTIFY.
4432 */ 4439 */
4433 case SIR_RESEL_NO_IDENTIFY: 4440 case SIR_RESEL_NO_IDENTIFY:
4434 scmd_printk(KERN_WARNING, cp->cmd, 4441 sym_printk(KERN_WARNING, tp, cp,
4435 "No IDENTIFY after reselection\n"); 4442 "No IDENTIFY after reselection\n");
4436 goto out_stuck; 4443 goto out_stuck;
4437 /* 4444 /*
@@ -4460,7 +4467,7 @@ static void sym_int_sir(struct sym_hcb *np)
4460 case SIR_RESEL_ABORTED: 4467 case SIR_RESEL_ABORTED:
4461 np->lastmsg = np->msgout[0]; 4468 np->lastmsg = np->msgout[0];
4462 np->msgout[0] = M_NOOP; 4469 np->msgout[0] = M_NOOP;
4463 scmd_printk(KERN_WARNING, cp->cmd, 4470 sym_printk(KERN_WARNING, tp, cp,
4464 "message %x sent on bad reselection\n", np->lastmsg); 4471 "message %x sent on bad reselection\n", np->lastmsg);
4465 goto out; 4472 goto out;
4466 /* 4473 /*
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index e09fe6ab3572..2ddd426323e9 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -109,3 +109,22 @@ config SCSI_UFS_HISI
109 109
110 Select this if you have UFS controller on Hisilicon chipset. 110 Select this if you have UFS controller on Hisilicon chipset.
111 If unsure, say N. 111 If unsure, say N.
112
113config SCSI_UFS_BSG
114 bool "Universal Flash Storage BSG device node"
115 depends on SCSI_UFSHCD
116 select BLK_DEV_BSGLIB
117 help
118 Universal Flash Storage (UFS) is SCSI transport specification for
119 accessing flash storage on digital cameras, mobile phones and
120 consumer electronic devices.
121 A UFS controller communicates with a UFS device by exchanging
122 UFS Protocol Information Units (UPIUs).
123 UPIUs can not only be used as a transport layer for the SCSI protocol
124 but are also used by the UFS native command set.
125 This transport driver supports exchanging UFS protocol information units
126 with a UFS device. See also the ufshcd driver, which is a SCSI driver
127 that supports UFS devices.
128
129 Select this if you need a bsg device node for your UFS controller.
130 If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 2c50f03d8c4a..aca481329828 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -4,7 +4,8 @@ obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.
4obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o 4obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
5obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o 5obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
6obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o 6obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
7ufshcd-core-objs := ufshcd.o ufs-sysfs.o 7ufshcd-core-y += ufshcd.o ufs-sysfs.o
8ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
8obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o 9obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
9obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o 10obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
10obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o 11obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 75ee5906b966..4a6b2b350ace 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -70,20 +70,27 @@ static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
70} 70}
71 71
72static int ufs_qcom_host_clk_get(struct device *dev, 72static int ufs_qcom_host_clk_get(struct device *dev,
73 const char *name, struct clk **clk_out) 73 const char *name, struct clk **clk_out, bool optional)
74{ 74{
75 struct clk *clk; 75 struct clk *clk;
76 int err = 0; 76 int err = 0;
77 77
78 clk = devm_clk_get(dev, name); 78 clk = devm_clk_get(dev, name);
79 if (IS_ERR(clk)) { 79 if (!IS_ERR(clk)) {
80 err = PTR_ERR(clk);
81 dev_err(dev, "%s: failed to get %s err %d",
82 __func__, name, err);
83 } else {
84 *clk_out = clk; 80 *clk_out = clk;
81 return 0;
85 } 82 }
86 83
84 err = PTR_ERR(clk);
85
86 if (optional && err == -ENOENT) {
87 *clk_out = NULL;
88 return 0;
89 }
90
91 if (err != -EPROBE_DEFER)
92 dev_err(dev, "failed to get %s err %d\n", name, err);
93
87 return err; 94 return err;
88} 95}
89 96
@@ -104,11 +111,9 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
104 if (!host->is_lane_clks_enabled) 111 if (!host->is_lane_clks_enabled)
105 return; 112 return;
106 113
107 if (host->hba->lanes_per_direction > 1) 114 clk_disable_unprepare(host->tx_l1_sync_clk);
108 clk_disable_unprepare(host->tx_l1_sync_clk);
109 clk_disable_unprepare(host->tx_l0_sync_clk); 115 clk_disable_unprepare(host->tx_l0_sync_clk);
110 if (host->hba->lanes_per_direction > 1) 116 clk_disable_unprepare(host->rx_l1_sync_clk);
111 clk_disable_unprepare(host->rx_l1_sync_clk);
112 clk_disable_unprepare(host->rx_l0_sync_clk); 117 clk_disable_unprepare(host->rx_l0_sync_clk);
113 118
114 host->is_lane_clks_enabled = false; 119 host->is_lane_clks_enabled = false;
@@ -132,24 +137,21 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
132 if (err) 137 if (err)
133 goto disable_rx_l0; 138 goto disable_rx_l0;
134 139
135 if (host->hba->lanes_per_direction > 1) { 140 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
136 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
137 host->rx_l1_sync_clk); 141 host->rx_l1_sync_clk);
138 if (err) 142 if (err)
139 goto disable_tx_l0; 143 goto disable_tx_l0;
140 144
141 err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", 145 err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
142 host->tx_l1_sync_clk); 146 host->tx_l1_sync_clk);
143 if (err) 147 if (err)
144 goto disable_rx_l1; 148 goto disable_rx_l1;
145 }
146 149
147 host->is_lane_clks_enabled = true; 150 host->is_lane_clks_enabled = true;
148 goto out; 151 goto out;
149 152
150disable_rx_l1: 153disable_rx_l1:
151 if (host->hba->lanes_per_direction > 1) 154 clk_disable_unprepare(host->rx_l1_sync_clk);
152 clk_disable_unprepare(host->rx_l1_sync_clk);
153disable_tx_l0: 155disable_tx_l0:
154 clk_disable_unprepare(host->tx_l0_sync_clk); 156 clk_disable_unprepare(host->tx_l0_sync_clk);
155disable_rx_l0: 157disable_rx_l0:
@@ -163,25 +165,25 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
163 int err = 0; 165 int err = 0;
164 struct device *dev = host->hba->dev; 166 struct device *dev = host->hba->dev;
165 167
166 err = ufs_qcom_host_clk_get(dev, 168 err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
167 "rx_lane0_sync_clk", &host->rx_l0_sync_clk); 169 &host->rx_l0_sync_clk, false);
168 if (err) 170 if (err)
169 goto out; 171 goto out;
170 172
171 err = ufs_qcom_host_clk_get(dev, 173 err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
172 "tx_lane0_sync_clk", &host->tx_l0_sync_clk); 174 &host->tx_l0_sync_clk, false);
173 if (err) 175 if (err)
174 goto out; 176 goto out;
175 177
176 /* In case of single lane per direction, don't read lane1 clocks */ 178 /* In case of single lane per direction, don't read lane1 clocks */
177 if (host->hba->lanes_per_direction > 1) { 179 if (host->hba->lanes_per_direction > 1) {
178 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", 180 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
179 &host->rx_l1_sync_clk); 181 &host->rx_l1_sync_clk, false);
180 if (err) 182 if (err)
181 goto out; 183 goto out;
182 184
183 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", 185 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
184 &host->tx_l1_sync_clk); 186 &host->tx_l1_sync_clk, true);
185 } 187 }
186out: 188out:
187 return err; 189 return err;
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 14e5bf7af0bb..58087d3916d0 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -38,9 +38,9 @@
38 38
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40#include <linux/types.h> 40#include <linux/types.h>
41#include <uapi/scsi/scsi_bsg_ufs.h>
41 42
42#define MAX_CDB_SIZE 16 43#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
43#define GENERAL_UPIU_REQUEST_SIZE 32
44#define QUERY_DESC_MAX_SIZE 255 44#define QUERY_DESC_MAX_SIZE 255
45#define QUERY_DESC_MIN_SIZE 2 45#define QUERY_DESC_MIN_SIZE 2
46#define QUERY_DESC_HDR_SIZE 2 46#define QUERY_DESC_HDR_SIZE 2
@@ -414,6 +414,7 @@ enum {
414 MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF, 414 MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
415 MASK_RSP_EXCEPTION_EVENT = 0x10000, 415 MASK_RSP_EXCEPTION_EVENT = 0x10000,
416 MASK_TM_SERVICE_RESP = 0xFF, 416 MASK_TM_SERVICE_RESP = 0xFF,
417 MASK_TM_FUNC = 0xFF,
417}; 418};
418 419
419/* Task management service response */ 420/* Task management service response */
@@ -433,65 +434,6 @@ enum ufs_dev_pwr_mode {
433}; 434};
434 435
435/** 436/**
436 * struct utp_upiu_header - UPIU header structure
437 * @dword_0: UPIU header DW-0
438 * @dword_1: UPIU header DW-1
439 * @dword_2: UPIU header DW-2
440 */
441struct utp_upiu_header {
442 __be32 dword_0;
443 __be32 dword_1;
444 __be32 dword_2;
445};
446
447/**
448 * struct utp_upiu_cmd - Command UPIU structure
449 * @data_transfer_len: Data Transfer Length DW-3
450 * @cdb: Command Descriptor Block CDB DW-4 to DW-7
451 */
452struct utp_upiu_cmd {
453 __be32 exp_data_transfer_len;
454 u8 cdb[MAX_CDB_SIZE];
455};
456
457/**
458 * struct utp_upiu_query - upiu request buffer structure for
459 * query request.
460 * @opcode: command to perform B-0
461 * @idn: a value that indicates the particular type of data B-1
462 * @index: Index to further identify data B-2
463 * @selector: Index to further identify data B-3
464 * @reserved_osf: spec reserved field B-4,5
465 * @length: number of descriptor bytes to read/write B-6,7
466 * @value: Attribute value to be written DW-5
467 * @reserved: spec reserved DW-6,7
468 */
469struct utp_upiu_query {
470 u8 opcode;
471 u8 idn;
472 u8 index;
473 u8 selector;
474 __be16 reserved_osf;
475 __be16 length;
476 __be32 value;
477 __be32 reserved[2];
478};
479
480/**
481 * struct utp_upiu_req - general upiu request structure
482 * @header:UPIU header structure DW-0 to DW-2
483 * @sc: fields structure for scsi command DW-3 to DW-7
484 * @qr: fields structure for query request DW-3 to DW-7
485 */
486struct utp_upiu_req {
487 struct utp_upiu_header header;
488 union {
489 struct utp_upiu_cmd sc;
490 struct utp_upiu_query qr;
491 };
492};
493
494/**
495 * struct utp_cmd_rsp - Response UPIU structure 437 * struct utp_cmd_rsp - Response UPIU structure
496 * @residual_transfer_count: Residual transfer count DW-3 438 * @residual_transfer_count: Residual transfer count DW-3
497 * @reserved: Reserved double words DW-4 to DW-7 439 * @reserved: Reserved double words DW-4 to DW-7
@@ -520,36 +462,6 @@ struct utp_upiu_rsp {
520}; 462};
521 463
522/** 464/**
523 * struct utp_upiu_task_req - Task request UPIU structure
524 * @header - UPIU header structure DW0 to DW-2
525 * @input_param1: Input parameter 1 DW-3
526 * @input_param2: Input parameter 2 DW-4
527 * @input_param3: Input parameter 3 DW-5
528 * @reserved: Reserved double words DW-6 to DW-7
529 */
530struct utp_upiu_task_req {
531 struct utp_upiu_header header;
532 __be32 input_param1;
533 __be32 input_param2;
534 __be32 input_param3;
535 __be32 reserved[2];
536};
537
538/**
539 * struct utp_upiu_task_rsp - Task Management Response UPIU structure
540 * @header: UPIU header structure DW0-DW-2
541 * @output_param1: Ouput parameter 1 DW3
542 * @output_param2: Output parameter 2 DW4
543 * @reserved: Reserved double words DW-5 to DW-7
544 */
545struct utp_upiu_task_rsp {
546 struct utp_upiu_header header;
547 __be32 output_param1;
548 __be32 output_param2;
549 __be32 reserved[3];
550};
551
552/**
553 * struct ufs_query_req - parameters for building a query request 465 * struct ufs_query_req - parameters for building a query request
554 * @query_func: UPIU header query function 466 * @query_func: UPIU header query function
555 * @upiu_req: the query request data 467 * @upiu_req: the query request data
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
new file mode 100644
index 000000000000..e5f8e54bf644
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -0,0 +1,210 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bsg endpoint that supports UPIUs
4 *
5 * Copyright (C) 2018 Western Digital Corporation
6 */
7#include "ufs_bsg.h"
8
9static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
10 struct utp_upiu_query *qr)
11{
12 int desc_size = be16_to_cpu(qr->length);
13 int desc_id = qr->idn;
14 int ret;
15
16 if (desc_size <= 0)
17 return -EINVAL;
18
19 ret = ufshcd_map_desc_id_to_length(hba, desc_id, desc_len);
20 if (ret || !*desc_len)
21 return -EINVAL;
22
23 *desc_len = min_t(int, *desc_len, desc_size);
24
25 return 0;
26}
27
28static int ufs_bsg_verify_query_size(struct ufs_hba *hba,
29 unsigned int request_len,
30 unsigned int reply_len,
31 int desc_len, enum query_opcode desc_op)
32{
33 int min_req_len = sizeof(struct ufs_bsg_request);
34 int min_rsp_len = sizeof(struct ufs_bsg_reply);
35
36 if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC)
37 min_req_len += desc_len;
38
39 if (min_req_len > request_len || min_rsp_len > reply_len) {
40 dev_err(hba->dev, "not enough space assigned\n");
41 return -EINVAL;
42 }
43
44 return 0;
45}
46
47static int ufs_bsg_verify_query_params(struct ufs_hba *hba,
48 struct ufs_bsg_request *bsg_request,
49 unsigned int request_len,
50 unsigned int reply_len,
51 uint8_t *desc_buff, int *desc_len,
52 enum query_opcode desc_op)
53{
54 struct utp_upiu_query *qr;
55
56 if (desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
57 dev_err(hba->dev, "unsupported opcode %d\n", desc_op);
58 return -ENOTSUPP;
59 }
60
61 if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC)
62 goto out;
63
64 qr = &bsg_request->upiu_req.qr;
65 if (ufs_bsg_get_query_desc_size(hba, desc_len, qr)) {
66 dev_err(hba->dev, "Illegal desc size\n");
67 return -EINVAL;
68 }
69
70 if (ufs_bsg_verify_query_size(hba, request_len, reply_len, *desc_len,
71 desc_op))
72 return -EINVAL;
73
74 desc_buff = (uint8_t *)(bsg_request + 1);
75
76out:
77 return 0;
78}
79
80static int ufs_bsg_request(struct bsg_job *job)
81{
82 struct ufs_bsg_request *bsg_request = job->request;
83 struct ufs_bsg_reply *bsg_reply = job->reply;
84 struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
85 unsigned int req_len = job->request_len;
86 unsigned int reply_len = job->reply_len;
87 struct uic_command uc = {};
88 int msgcode;
89 uint8_t *desc_buff = NULL;
90 int desc_len = 0;
91 enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
92 int ret;
93
94 ret = ufs_bsg_verify_query_size(hba, req_len, reply_len, 0, desc_op);
95 if (ret)
96 goto out;
97
98 bsg_reply->reply_payload_rcv_len = 0;
99
100 msgcode = bsg_request->msgcode;
101 switch (msgcode) {
102 case UPIU_TRANSACTION_QUERY_REQ:
103 desc_op = bsg_request->upiu_req.qr.opcode;
104 ret = ufs_bsg_verify_query_params(hba, bsg_request, req_len,
105 reply_len, desc_buff,
106 &desc_len, desc_op);
107 if (ret)
108 goto out;
109
110 /* fall through */
111 case UPIU_TRANSACTION_NOP_OUT:
112 case UPIU_TRANSACTION_TASK_REQ:
113 ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
114 &bsg_reply->upiu_rsp, msgcode,
115 desc_buff, &desc_len, desc_op);
116 if (ret)
117 dev_err(hba->dev,
118 "exe raw upiu: error code %d\n", ret);
119
120 break;
121 case UPIU_TRANSACTION_UIC_CMD:
122 memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
123 ret = ufshcd_send_uic_cmd(hba, &uc);
124 if (ret)
125 dev_dbg(hba->dev,
126 "send uic cmd: error code %d\n", ret);
127
128 memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
129
130 break;
131 default:
132 ret = -ENOTSUPP;
133 dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);
134
135 break;
136 }
137
138out:
139 bsg_reply->result = ret;
140 job->reply_len = sizeof(struct ufs_bsg_reply) +
141 bsg_reply->reply_payload_rcv_len;
142
143 bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
144
145 return ret;
146}
147
148/**
149 * ufs_bsg_remove - detach and remove the added ufs-bsg node
150 *
151 * Should be called when unloading the driver.
152 */
153void ufs_bsg_remove(struct ufs_hba *hba)
154{
155 struct device *bsg_dev = &hba->bsg_dev;
156
157 if (!hba->bsg_queue)
158 return;
159
160 bsg_unregister_queue(hba->bsg_queue);
161
162 device_del(bsg_dev);
163 put_device(bsg_dev);
164}
165
166static inline void ufs_bsg_node_release(struct device *dev)
167{
168 put_device(dev->parent);
169}
170
171/**
172 * ufs_bsg_probe - Add ufs bsg device node
173 * @hba: per adapter object
174 *
175 * Called during initial loading of the driver, and before scsi_scan_host.
176 */
177int ufs_bsg_probe(struct ufs_hba *hba)
178{
179 struct device *bsg_dev = &hba->bsg_dev;
180 struct Scsi_Host *shost = hba->host;
181 struct device *parent = &shost->shost_gendev;
182 struct request_queue *q;
183 int ret;
184
185 device_initialize(bsg_dev);
186
187 bsg_dev->parent = get_device(parent);
188 bsg_dev->release = ufs_bsg_node_release;
189
190 dev_set_name(bsg_dev, "ufs-bsg");
191
192 ret = device_add(bsg_dev);
193 if (ret)
194 goto out;
195
196 q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, 0);
197 if (IS_ERR(q)) {
198 ret = PTR_ERR(q);
199 goto out;
200 }
201
202 hba->bsg_queue = q;
203
204 return 0;
205
206out:
207 dev_err(bsg_dev, "fail to initialize a bsg dev %d\n", shost->host_no);
208 put_device(bsg_dev);
209 return ret;
210}
diff --git a/drivers/scsi/ufs/ufs_bsg.h b/drivers/scsi/ufs/ufs_bsg.h
new file mode 100644
index 000000000000..d09918758631
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_bsg.h
@@ -0,0 +1,23 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2018 Western Digital Corporation
4 */
5#ifndef UFS_BSG_H
6#define UFS_BSG_H
7
8#include <linux/bsg-lib.h>
9#include <scsi/scsi.h>
10#include <scsi/scsi_host.h>
11
12#include "ufshcd.h"
13#include "ufs.h"
14
15#ifdef CONFIG_SCSI_UFS_BSG
16void ufs_bsg_remove(struct ufs_hba *hba);
17int ufs_bsg_probe(struct ufs_hba *hba);
18#else
19static inline void ufs_bsg_remove(struct ufs_hba *hba) {}
20static inline int ufs_bsg_probe(struct ufs_hba *hba) {return 0; }
21#endif
22
23#endif /* UFS_BSG_H */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index c55f38ec391c..23d7cca36ff0 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -46,6 +46,7 @@
46#include "ufs_quirks.h" 46#include "ufs_quirks.h"
47#include "unipro.h" 47#include "unipro.h"
48#include "ufs-sysfs.h" 48#include "ufs-sysfs.h"
49#include "ufs_bsg.h"
49 50
50#define CREATE_TRACE_POINTS 51#define CREATE_TRACE_POINTS
51#include <trace/events/ufs.h> 52#include <trace/events/ufs.h>
@@ -326,14 +327,11 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
326static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, 327static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
327 const char *str) 328 const char *str)
328{ 329{
329 struct utp_task_req_desc *descp;
330 struct utp_upiu_task_req *task_req;
331 int off = (int)tag - hba->nutrs; 330 int off = (int)tag - hba->nutrs;
331 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
332 332
333 descp = &hba->utmrdl_base_addr[off]; 333 trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
334 task_req = (struct utp_upiu_task_req *)descp->task_req_upiu; 334 &descp->input_param1);
335 trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
336 &task_req->input_param1);
337} 335}
338 336
339static void ufshcd_add_command_trace(struct ufs_hba *hba, 337static void ufshcd_add_command_trace(struct ufs_hba *hba,
@@ -475,22 +473,13 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
475 473
476static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) 474static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
477{ 475{
478 struct utp_task_req_desc *tmrdp;
479 int tag; 476 int tag;
480 477
481 for_each_set_bit(tag, &bitmap, hba->nutmrs) { 478 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
482 tmrdp = &hba->utmrdl_base_addr[tag]; 479 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
480
483 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag); 481 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
484 ufshcd_hex_dump("TM TRD: ", &tmrdp->header, 482 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
485 sizeof(struct request_desc_header));
486 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
487 tag);
488 ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
489 sizeof(struct utp_upiu_req));
490 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
491 tag);
492 ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
493 sizeof(struct utp_task_req_desc));
494 } 483 }
495} 484}
496 485
@@ -646,19 +635,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
646} 635}
647 636
648/** 637/**
649 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
650 * @task_req_descp: pointer to utp_task_req_desc structure
651 *
652 * This function is used to get the OCS field from UTMRD
653 * Returns the OCS field in the UTMRD
654 */
655static inline int
656ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
657{
658 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
659}
660
661/**
662 * ufshcd_get_tm_free_slot - get a free slot for task management request 638 * ufshcd_get_tm_free_slot - get a free slot for task management request
663 * @hba: per adapter instance 639 * @hba: per adapter instance
664 * @free_slot: pointer to variable with available slot value 640 * @free_slot: pointer to variable with available slot value
@@ -1691,8 +1667,9 @@ static void __ufshcd_release(struct ufs_hba *hba)
1691 1667
1692 hba->clk_gating.state = REQ_CLKS_OFF; 1668 hba->clk_gating.state = REQ_CLKS_OFF;
1693 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); 1669 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1694 schedule_delayed_work(&hba->clk_gating.gate_work, 1670 queue_delayed_work(hba->clk_gating.clk_gating_workq,
1695 msecs_to_jiffies(hba->clk_gating.delay_ms)); 1671 &hba->clk_gating.gate_work,
1672 msecs_to_jiffies(hba->clk_gating.delay_ms));
1696} 1673}
1697 1674
1698void ufshcd_release(struct ufs_hba *hba) 1675void ufshcd_release(struct ufs_hba *hba)
@@ -1763,6 +1740,34 @@ out:
1763 return count; 1740 return count;
1764} 1741}
1765 1742
1743static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1744{
1745 char wq_name[sizeof("ufs_clkscaling_00")];
1746
1747 if (!ufshcd_is_clkscaling_supported(hba))
1748 return;
1749
1750 INIT_WORK(&hba->clk_scaling.suspend_work,
1751 ufshcd_clk_scaling_suspend_work);
1752 INIT_WORK(&hba->clk_scaling.resume_work,
1753 ufshcd_clk_scaling_resume_work);
1754
1755 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1756 hba->host->host_no);
1757 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1758
1759 ufshcd_clkscaling_init_sysfs(hba);
1760}
1761
1762static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1763{
1764 if (!ufshcd_is_clkscaling_supported(hba))
1765 return;
1766
1767 destroy_workqueue(hba->clk_scaling.workq);
1768 ufshcd_devfreq_remove(hba);
1769}
1770
1766static void ufshcd_init_clk_gating(struct ufs_hba *hba) 1771static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1767{ 1772{
1768 char wq_name[sizeof("ufs_clk_gating_00")]; 1773 char wq_name[sizeof("ufs_clk_gating_00")];
@@ -2055,8 +2060,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2055 * 2060 *
2056 * Returns 0 only if success. 2061 * Returns 0 only if success.
2057 */ 2062 */
2058static int 2063int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2059ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2060{ 2064{
2061 int ret; 2065 int ret;
2062 unsigned long flags; 2066 unsigned long flags;
@@ -2238,8 +2242,8 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2238 ucd_req_ptr->sc.exp_data_transfer_len = 2242 ucd_req_ptr->sc.exp_data_transfer_len =
2239 cpu_to_be32(lrbp->cmd->sdb.length); 2243 cpu_to_be32(lrbp->cmd->sdb.length);
2240 2244
2241 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE); 2245 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE);
2242 memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE); 2246 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2243 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len); 2247 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2244 2248
2245 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 2249 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
@@ -2258,7 +2262,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2258 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; 2262 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2259 struct ufs_query *query = &hba->dev_cmd.query; 2263 struct ufs_query *query = &hba->dev_cmd.query;
2260 u16 len = be16_to_cpu(query->request.upiu_req.length); 2264 u16 len = be16_to_cpu(query->request.upiu_req.length);
2261 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
2262 2265
2263 /* Query request header */ 2266 /* Query request header */
2264 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( 2267 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
@@ -2280,7 +2283,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2280 2283
2281 /* Copy the Descriptor */ 2284 /* Copy the Descriptor */
2282 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) 2285 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2283 memcpy(descp, query->descriptor, len); 2286 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2284 2287
2285 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); 2288 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2286} 2289}
@@ -4601,46 +4604,6 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
4601} 4604}
4602 4605
4603/** 4606/**
4604 * ufshcd_task_req_compl - handle task management request completion
4605 * @hba: per adapter instance
4606 * @index: index of the completed request
4607 * @resp: task management service response
4608 *
4609 * Returns non-zero value on error, zero on success
4610 */
4611static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
4612{
4613 struct utp_task_req_desc *task_req_descp;
4614 struct utp_upiu_task_rsp *task_rsp_upiup;
4615 unsigned long flags;
4616 int ocs_value;
4617 int task_result;
4618
4619 spin_lock_irqsave(hba->host->host_lock, flags);
4620
4621 /* Clear completed tasks from outstanding_tasks */
4622 __clear_bit(index, &hba->outstanding_tasks);
4623
4624 task_req_descp = hba->utmrdl_base_addr;
4625 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
4626
4627 if (ocs_value == OCS_SUCCESS) {
4628 task_rsp_upiup = (struct utp_upiu_task_rsp *)
4629 task_req_descp[index].task_rsp_upiu;
4630 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
4631 task_result = task_result & MASK_TM_SERVICE_RESP;
4632 if (resp)
4633 *resp = (u8)task_result;
4634 } else {
4635 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
4636 __func__, ocs_value);
4637 }
4638 spin_unlock_irqrestore(hba->host->host_lock, flags);
4639
4640 return ocs_value;
4641}
4642
4643/**
4644 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status 4607 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4645 * @lrbp: pointer to local reference block of completed command 4608 * @lrbp: pointer to local reference block of completed command
4646 * @scsi_status: SCSI command status 4609 * @scsi_status: SCSI command status
@@ -5597,28 +5560,12 @@ out:
5597 return err; 5560 return err;
5598} 5561}
5599 5562
5600/** 5563static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
5601 * ufshcd_issue_tm_cmd - issues task management commands to controller 5564 struct utp_task_req_desc *treq, u8 tm_function)
5602 * @hba: per adapter instance
5603 * @lun_id: LUN ID to which TM command is sent
5604 * @task_id: task ID to which the TM command is applicable
5605 * @tm_function: task management function opcode
5606 * @tm_response: task management service response return value
5607 *
5608 * Returns non-zero value on error, zero on success.
5609 */
5610static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5611 u8 tm_function, u8 *tm_response)
5612{ 5565{
5613 struct utp_task_req_desc *task_req_descp; 5566 struct Scsi_Host *host = hba->host;
5614 struct utp_upiu_task_req *task_req_upiup;
5615 struct Scsi_Host *host;
5616 unsigned long flags; 5567 unsigned long flags;
5617 int free_slot; 5568 int free_slot, task_tag, err;
5618 int err;
5619 int task_tag;
5620
5621 host = hba->host;
5622 5569
5623 /* 5570 /*
5624 * Get free slot, sleep if slots are unavailable. 5571 * Get free slot, sleep if slots are unavailable.
@@ -5629,30 +5576,11 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5629 ufshcd_hold(hba, false); 5576 ufshcd_hold(hba, false);
5630 5577
5631 spin_lock_irqsave(host->host_lock, flags); 5578 spin_lock_irqsave(host->host_lock, flags);
5632 task_req_descp = hba->utmrdl_base_addr;
5633 task_req_descp += free_slot;
5634
5635 /* Configure task request descriptor */
5636 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5637 task_req_descp->header.dword_2 =
5638 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5639
5640 /* Configure task request UPIU */
5641 task_req_upiup =
5642 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
5643 task_tag = hba->nutrs + free_slot; 5579 task_tag = hba->nutrs + free_slot;
5644 task_req_upiup->header.dword_0 =
5645 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
5646 lun_id, task_tag);
5647 task_req_upiup->header.dword_1 =
5648 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
5649 /*
5650 * The host shall provide the same value for LUN field in the basic
5651 * header and for Input Parameter.
5652 */
5653 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
5654 task_req_upiup->input_param2 = cpu_to_be32(task_id);
5655 5580
5581 treq->req_header.dword_0 |= cpu_to_be32(task_tag);
5582
5583 memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
5656 ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function); 5584 ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
5657 5585
5658 /* send command to the controller */ 5586 /* send command to the controller */
@@ -5682,8 +5610,15 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5682 __func__, free_slot); 5610 __func__, free_slot);
5683 err = -ETIMEDOUT; 5611 err = -ETIMEDOUT;
5684 } else { 5612 } else {
5685 err = ufshcd_task_req_compl(hba, free_slot, tm_response); 5613 err = 0;
5614 memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
5615
5686 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete"); 5616 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
5617
5618 spin_lock_irqsave(hba->host->host_lock, flags);
5619 __clear_bit(free_slot, &hba->outstanding_tasks);
5620 spin_unlock_irqrestore(hba->host->host_lock, flags);
5621
5687 } 5622 }
5688 5623
5689 clear_bit(free_slot, &hba->tm_condition); 5624 clear_bit(free_slot, &hba->tm_condition);
@@ -5695,6 +5630,228 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5695} 5630}
5696 5631
5697/** 5632/**
5633 * ufshcd_issue_tm_cmd - issues task management commands to controller
5634 * @hba: per adapter instance
5635 * @lun_id: LUN ID to which TM command is sent
5636 * @task_id: task ID to which the TM command is applicable
5637 * @tm_function: task management function opcode
5638 * @tm_response: task management service response return value
5639 *
5640 * Returns non-zero value on error, zero on success.
5641 */
5642static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5643 u8 tm_function, u8 *tm_response)
5644{
5645 struct utp_task_req_desc treq = { { 0 }, };
5646 int ocs_value, err;
5647
5648 /* Configure task request descriptor */
5649 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5650 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5651
5652 /* Configure task request UPIU */
5653 treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
5654 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
5655 treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
5656
5657 /*
5658 * The host shall provide the same value for LUN field in the basic
5659 * header and for Input Parameter.
5660 */
5661 treq.input_param1 = cpu_to_be32(lun_id);
5662 treq.input_param2 = cpu_to_be32(task_id);
5663
5664 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
5665 if (err == -ETIMEDOUT)
5666 return err;
5667
5668 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
5669 if (ocs_value != OCS_SUCCESS)
5670 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
5671 __func__, ocs_value);
5672 else if (tm_response)
5673 *tm_response = be32_to_cpu(treq.output_param1) &
5674 MASK_TM_SERVICE_RESP;
5675 return err;
5676}
5677
5678/**
5679 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
5680 * @hba: per-adapter instance
5681 * @req_upiu: upiu request
5682 * @rsp_upiu: upiu reply
5683 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
5684 * @desc_buff: pointer to descriptor buffer, NULL if NA
5685 * @buff_len: descriptor size, 0 if NA
5686 * @desc_op: descriptor operation
5687 *
5688 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
5689 * Therefore, it "rides" the device management infrastructure: uses its tag and
5690 * tasks work queues.
5691 *
5692 * Since there is only one available tag for device management commands,
5693 * the caller is expected to hold the hba->dev_cmd.lock mutex.
5694 */
5695static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
5696 struct utp_upiu_req *req_upiu,
5697 struct utp_upiu_req *rsp_upiu,
5698 u8 *desc_buff, int *buff_len,
5699 int cmd_type,
5700 enum query_opcode desc_op)
5701{
5702 struct ufshcd_lrb *lrbp;
5703 int err = 0;
5704 int tag;
5705 struct completion wait;
5706 unsigned long flags;
5707 u32 upiu_flags;
5708
5709 down_read(&hba->clk_scaling_lock);
5710
5711 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
5712
5713 init_completion(&wait);
5714 lrbp = &hba->lrb[tag];
5715 WARN_ON(lrbp->cmd);
5716
5717 lrbp->cmd = NULL;
5718 lrbp->sense_bufflen = 0;
5719 lrbp->sense_buffer = NULL;
5720 lrbp->task_tag = tag;
5721 lrbp->lun = 0;
5722 lrbp->intr_cmd = true;
5723 hba->dev_cmd.type = cmd_type;
5724
5725 switch (hba->ufs_version) {
5726 case UFSHCI_VERSION_10:
5727 case UFSHCI_VERSION_11:
5728 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
5729 break;
5730 default:
5731 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
5732 break;
5733 }
5734
5735 /* update the task tag in the request upiu */
5736 req_upiu->header.dword_0 |= cpu_to_be32(tag);
5737
5738 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
5739
5740 /* just copy the upiu request as it is */
5741 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
5742 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
5743 /* The Data Segment Area is optional depending upon the query
5744 * function value. for WRITE DESCRIPTOR, the data segment
5745 * follows right after the tsf.
5746 */
5747 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
5748 *buff_len = 0;
5749 }
5750
5751 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5752
5753 hba->dev_cmd.complete = &wait;
5754
5755 /* Make sure descriptors are ready before ringing the doorbell */
5756 wmb();
5757 spin_lock_irqsave(hba->host->host_lock, flags);
5758 ufshcd_send_command(hba, tag);
5759 spin_unlock_irqrestore(hba->host->host_lock, flags);
5760
5761 /*
5762 * ignore the returning value here - ufshcd_check_query_response is
5763 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
5764 * read the response directly ignoring all errors.
5765 */
5766 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
5767
5768 /* just copy the upiu response as it is */
5769 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
5770
5771 ufshcd_put_dev_cmd_tag(hba, tag);
5772 wake_up(&hba->dev_cmd.tag_wq);
5773 up_read(&hba->clk_scaling_lock);
5774 return err;
5775}
5776
5777/**
5778 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
5779 * @hba: per-adapter instance
5780 * @req_upiu: upiu request
5781 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
5782 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
5783 * @desc_buff: pointer to descriptor buffer, NULL if NA
5784 * @buff_len: descriptor size, 0 if NA
5785 * @desc_op: descriptor operation
5786 *
5787 * Supports UTP Transfer requests (nop and query), and UTP Task
5788 * Management requests.
5789 * It is up to the caller to fill the upiu conent properly, as it will
5790 * be copied without any further input validations.
5791 */
5792int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
5793 struct utp_upiu_req *req_upiu,
5794 struct utp_upiu_req *rsp_upiu,
5795 int msgcode,
5796 u8 *desc_buff, int *buff_len,
5797 enum query_opcode desc_op)
5798{
5799 int err;
5800 int cmd_type = DEV_CMD_TYPE_QUERY;
5801 struct utp_task_req_desc treq = { { 0 }, };
5802 int ocs_value;
5803 u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
5804
5805 if (desc_buff && desc_op != UPIU_QUERY_OPCODE_WRITE_DESC) {
5806 err = -ENOTSUPP;
5807 goto out;
5808 }
5809
5810 switch (msgcode) {
5811 case UPIU_TRANSACTION_NOP_OUT:
5812 cmd_type = DEV_CMD_TYPE_NOP;
5813 /* fall through */
5814 case UPIU_TRANSACTION_QUERY_REQ:
5815 ufshcd_hold(hba, false);
5816 mutex_lock(&hba->dev_cmd.lock);
5817 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
5818 desc_buff, buff_len,
5819 cmd_type, desc_op);
5820 mutex_unlock(&hba->dev_cmd.lock);
5821 ufshcd_release(hba);
5822
5823 break;
5824 case UPIU_TRANSACTION_TASK_REQ:
5825 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5826 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5827
5828 memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
5829
5830 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
5831 if (err == -ETIMEDOUT)
5832 break;
5833
5834 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
5835 if (ocs_value != OCS_SUCCESS) {
5836 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
5837 ocs_value);
5838 break;
5839 }
5840
5841 memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
5842
5843 break;
5844 default:
5845 err = -EINVAL;
5846
5847 break;
5848 }
5849
5850out:
5851 return err;
5852}
5853
5854/**
5698 * ufshcd_eh_device_reset_handler - device reset handler registered to 5855 * ufshcd_eh_device_reset_handler - device reset handler registered to
5699 * scsi layer. 5856 * scsi layer.
5700 * @cmd: SCSI command pointer 5857 * @cmd: SCSI command pointer
@@ -6652,6 +6809,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
6652 hba->clk_scaling.is_allowed = true; 6809 hba->clk_scaling.is_allowed = true;
6653 } 6810 }
6654 6811
6812 ufs_bsg_probe(hba);
6813
6655 scsi_scan_host(hba->host); 6814 scsi_scan_host(hba->host);
6656 pm_runtime_put_sync(hba->dev); 6815 pm_runtime_put_sync(hba->dev);
6657 } 6816 }
@@ -6666,6 +6825,7 @@ out:
6666 */ 6825 */
6667 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { 6826 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6668 pm_runtime_put_sync(hba->dev); 6827 pm_runtime_put_sync(hba->dev);
6828 ufshcd_exit_clk_scaling(hba);
6669 ufshcd_hba_exit(hba); 6829 ufshcd_hba_exit(hba);
6670 } 6830 }
6671 6831
@@ -7201,12 +7361,9 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
7201 ufshcd_variant_hba_exit(hba); 7361 ufshcd_variant_hba_exit(hba);
7202 ufshcd_setup_vreg(hba, false); 7362 ufshcd_setup_vreg(hba, false);
7203 ufshcd_suspend_clkscaling(hba); 7363 ufshcd_suspend_clkscaling(hba);
7204 if (ufshcd_is_clkscaling_supported(hba)) { 7364 if (ufshcd_is_clkscaling_supported(hba))
7205 if (hba->devfreq) 7365 if (hba->devfreq)
7206 ufshcd_suspend_clkscaling(hba); 7366 ufshcd_suspend_clkscaling(hba);
7207 destroy_workqueue(hba->clk_scaling.workq);
7208 ufshcd_devfreq_remove(hba);
7209 }
7210 ufshcd_setup_clocks(hba, false); 7367 ufshcd_setup_clocks(hba, false);
7211 ufshcd_setup_hba_vreg(hba, false); 7368 ufshcd_setup_hba_vreg(hba, false);
7212 hba->is_powered = false; 7369 hba->is_powered = false;
@@ -7875,12 +8032,14 @@ EXPORT_SYMBOL(ufshcd_shutdown);
7875 */ 8032 */
7876void ufshcd_remove(struct ufs_hba *hba) 8033void ufshcd_remove(struct ufs_hba *hba)
7877{ 8034{
8035 ufs_bsg_remove(hba);
7878 ufs_sysfs_remove_nodes(hba->dev); 8036 ufs_sysfs_remove_nodes(hba->dev);
7879 scsi_remove_host(hba->host); 8037 scsi_remove_host(hba->host);
7880 /* disable interrupts */ 8038 /* disable interrupts */
7881 ufshcd_disable_intr(hba, hba->intr_mask); 8039 ufshcd_disable_intr(hba, hba->intr_mask);
7882 ufshcd_hba_stop(hba, true); 8040 ufshcd_hba_stop(hba, true);
7883 8041
8042 ufshcd_exit_clk_scaling(hba);
7884 ufshcd_exit_clk_gating(hba); 8043 ufshcd_exit_clk_gating(hba);
7885 if (ufshcd_is_clkscaling_supported(hba)) 8044 if (ufshcd_is_clkscaling_supported(hba))
7886 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); 8045 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
@@ -8027,7 +8186,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8027 host->max_lun = UFS_MAX_LUNS; 8186 host->max_lun = UFS_MAX_LUNS;
8028 host->max_channel = UFSHCD_MAX_CHANNEL; 8187 host->max_channel = UFSHCD_MAX_CHANNEL;
8029 host->unique_id = host->host_no; 8188 host->unique_id = host->host_no;
8030 host->max_cmd_len = MAX_CDB_SIZE; 8189 host->max_cmd_len = UFS_CDB_SIZE;
8031 8190
8032 hba->max_pwr_info.is_valid = false; 8191 hba->max_pwr_info.is_valid = false;
8033 8192
@@ -8052,6 +8211,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8052 8211
8053 ufshcd_init_clk_gating(hba); 8212 ufshcd_init_clk_gating(hba);
8054 8213
8214 ufshcd_init_clk_scaling(hba);
8215
8055 /* 8216 /*
8056 * In order to avoid any spurious interrupt immediately after 8217 * In order to avoid any spurious interrupt immediately after
8057 * registering UFS controller interrupt handler, clear any pending UFS 8218 * registering UFS controller interrupt handler, clear any pending UFS
@@ -8090,21 +8251,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8090 goto out_remove_scsi_host; 8251 goto out_remove_scsi_host;
8091 } 8252 }
8092 8253
8093 if (ufshcd_is_clkscaling_supported(hba)) {
8094 char wq_name[sizeof("ufs_clkscaling_00")];
8095
8096 INIT_WORK(&hba->clk_scaling.suspend_work,
8097 ufshcd_clk_scaling_suspend_work);
8098 INIT_WORK(&hba->clk_scaling.resume_work,
8099 ufshcd_clk_scaling_resume_work);
8100
8101 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
8102 host->host_no);
8103 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
8104
8105 ufshcd_clkscaling_init_sysfs(hba);
8106 }
8107
8108 /* 8254 /*
8109 * Set the default power management level for runtime and system PM. 8255 * Set the default power management level for runtime and system PM.
8110 * Default power saving mode is to keep UFS link in Hibern8 state 8256 * Default power saving mode is to keep UFS link in Hibern8 state
@@ -8142,6 +8288,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8142out_remove_scsi_host: 8288out_remove_scsi_host:
8143 scsi_remove_host(hba->host); 8289 scsi_remove_host(hba->host);
8144exit_gating: 8290exit_gating:
8291 ufshcd_exit_clk_scaling(hba);
8145 ufshcd_exit_clk_gating(hba); 8292 ufshcd_exit_clk_gating(hba);
8146out_disable: 8293out_disable:
8147 hba->is_irq_enabled = false; 8294 hba->is_irq_enabled = false;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 33fdd3f281ae..1a1c2b487a4e 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -702,6 +702,9 @@ struct ufs_hba {
702 struct rw_semaphore clk_scaling_lock; 702 struct rw_semaphore clk_scaling_lock;
703 struct ufs_desc_size desc_size; 703 struct ufs_desc_size desc_size;
704 atomic_t scsi_block_reqs_cnt; 704 atomic_t scsi_block_reqs_cnt;
705
706 struct device bsg_dev;
707 struct request_queue *bsg_queue;
705}; 708};
706 709
707/* Returns true if clocks can be gated. Otherwise false */ 710/* Returns true if clocks can be gated. Otherwise false */
@@ -892,6 +895,15 @@ int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
892 895
893u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); 896u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
894 897
898int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
899
900int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
901 struct utp_upiu_req *req_upiu,
902 struct utp_upiu_req *rsp_upiu,
903 int msgcode,
904 u8 *desc_buff, int *buff_len,
905 enum query_opcode desc_op);
906
895/* Wrapper functions for safely calling variant operations */ 907/* Wrapper functions for safely calling variant operations */
896static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) 908static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
897{ 909{
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index bb5d9c7f3353..6fa889de5ee5 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -433,22 +433,25 @@ struct utp_transfer_req_desc {
433 __le16 prd_table_offset; 433 __le16 prd_table_offset;
434}; 434};
435 435
436/** 436/*
437 * struct utp_task_req_desc - UTMRD structure 437 * UTMRD structure.
438 * @header: UTMRD header DW-0 to DW-3
439 * @task_req_upiu: Pointer to task request UPIU DW-4 to DW-11
440 * @task_rsp_upiu: Pointer to task response UPIU DW12 to DW-19
441 */ 438 */
442struct utp_task_req_desc { 439struct utp_task_req_desc {
443
444 /* DW 0-3 */ 440 /* DW 0-3 */
445 struct request_desc_header header; 441 struct request_desc_header header;
446 442
447 /* DW 4-11 */ 443 /* DW 4-11 - Task request UPIU structure */
448 __le32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS]; 444 struct utp_upiu_header req_header;
449 445 __be32 input_param1;
450 /* DW 12-19 */ 446 __be32 input_param2;
451 __le32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS]; 447 __be32 input_param3;
448 __be32 __reserved1[2];
449
450 /* DW 12-19 - Task Management Response UPIU structure */
451 struct utp_upiu_header rsp_header;
452 __be32 output_param1;
453 __be32 output_param2;
454 __be32 __reserved2[3];
452}; 455};
453 456
454#endif /* End of Header */ 457#endif /* End of Header */
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 0cd947f78b5b..6e491023fdd8 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -372,9 +372,9 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
372 pvscsi_create_sg(ctx, sg, segs); 372 pvscsi_create_sg(ctx, sg, segs);
373 373
374 e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; 374 e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
375 ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl, 375 ctx->sglPA = dma_map_single(&adapter->dev->dev,
376 SGL_SIZE, PCI_DMA_TODEVICE); 376 ctx->sgl, SGL_SIZE, DMA_TO_DEVICE);
377 if (pci_dma_mapping_error(adapter->dev, ctx->sglPA)) { 377 if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) {
378 scmd_printk(KERN_ERR, cmd, 378 scmd_printk(KERN_ERR, cmd,
379 "vmw_pvscsi: Failed to map ctx sglist for DMA.\n"); 379 "vmw_pvscsi: Failed to map ctx sglist for DMA.\n");
380 scsi_dma_unmap(cmd); 380 scsi_dma_unmap(cmd);
@@ -389,9 +389,9 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
389 * In case there is no S/G list, scsi_sglist points 389 * In case there is no S/G list, scsi_sglist points
390 * directly to the buffer. 390 * directly to the buffer.
391 */ 391 */
392 ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen, 392 ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen,
393 cmd->sc_data_direction); 393 cmd->sc_data_direction);
394 if (pci_dma_mapping_error(adapter->dev, ctx->dataPA)) { 394 if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) {
395 scmd_printk(KERN_ERR, cmd, 395 scmd_printk(KERN_ERR, cmd,
396 "vmw_pvscsi: Failed to map direct data buffer for DMA.\n"); 396 "vmw_pvscsi: Failed to map direct data buffer for DMA.\n");
397 return -ENOMEM; 397 return -ENOMEM;
@@ -417,23 +417,23 @@ static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
417 if (count != 0) { 417 if (count != 0) {
418 scsi_dma_unmap(cmd); 418 scsi_dma_unmap(cmd);
419 if (ctx->sglPA) { 419 if (ctx->sglPA) {
420 pci_unmap_single(adapter->dev, ctx->sglPA, 420 dma_unmap_single(&adapter->dev->dev, ctx->sglPA,
421 SGL_SIZE, PCI_DMA_TODEVICE); 421 SGL_SIZE, DMA_TO_DEVICE);
422 ctx->sglPA = 0; 422 ctx->sglPA = 0;
423 } 423 }
424 } else 424 } else
425 pci_unmap_single(adapter->dev, ctx->dataPA, bufflen, 425 dma_unmap_single(&adapter->dev->dev, ctx->dataPA,
426 cmd->sc_data_direction); 426 bufflen, cmd->sc_data_direction);
427 } 427 }
428 if (cmd->sense_buffer) 428 if (cmd->sense_buffer)
429 pci_unmap_single(adapter->dev, ctx->sensePA, 429 dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
430 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); 430 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
431} 431}
432 432
433static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter) 433static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
434{ 434{
435 adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE, 435 adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
436 &adapter->ringStatePA); 436 &adapter->ringStatePA, GFP_KERNEL);
437 if (!adapter->rings_state) 437 if (!adapter->rings_state)
438 return -ENOMEM; 438 return -ENOMEM;
439 439
@@ -441,17 +441,17 @@ static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
441 pvscsi_ring_pages); 441 pvscsi_ring_pages);
442 adapter->req_depth = adapter->req_pages 442 adapter->req_depth = adapter->req_pages
443 * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; 443 * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
444 adapter->req_ring = pci_alloc_consistent(adapter->dev, 444 adapter->req_ring = dma_alloc_coherent(&adapter->dev->dev,
445 adapter->req_pages * PAGE_SIZE, 445 adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA,
446 &adapter->reqRingPA); 446 GFP_KERNEL);
447 if (!adapter->req_ring) 447 if (!adapter->req_ring)
448 return -ENOMEM; 448 return -ENOMEM;
449 449
450 adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING, 450 adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
451 pvscsi_ring_pages); 451 pvscsi_ring_pages);
452 adapter->cmp_ring = pci_alloc_consistent(adapter->dev, 452 adapter->cmp_ring = dma_alloc_coherent(&adapter->dev->dev,
453 adapter->cmp_pages * PAGE_SIZE, 453 adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA,
454 &adapter->cmpRingPA); 454 GFP_KERNEL);
455 if (!adapter->cmp_ring) 455 if (!adapter->cmp_ring)
456 return -ENOMEM; 456 return -ENOMEM;
457 457
@@ -464,9 +464,9 @@ static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
464 464
465 adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING, 465 adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
466 pvscsi_msg_ring_pages); 466 pvscsi_msg_ring_pages);
467 adapter->msg_ring = pci_alloc_consistent(adapter->dev, 467 adapter->msg_ring = dma_alloc_coherent(&adapter->dev->dev,
468 adapter->msg_pages * PAGE_SIZE, 468 adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA,
469 &adapter->msgRingPA); 469 GFP_KERNEL);
470 if (!adapter->msg_ring) 470 if (!adapter->msg_ring)
471 return -ENOMEM; 471 return -ENOMEM;
472 BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE)); 472 BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
@@ -708,10 +708,10 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
708 e->lun[1] = sdev->lun; 708 e->lun[1] = sdev->lun;
709 709
710 if (cmd->sense_buffer) { 710 if (cmd->sense_buffer) {
711 ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer, 711 ctx->sensePA = dma_map_single(&adapter->dev->dev,
712 SCSI_SENSE_BUFFERSIZE, 712 cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
713 PCI_DMA_FROMDEVICE); 713 DMA_FROM_DEVICE);
714 if (pci_dma_mapping_error(adapter->dev, ctx->sensePA)) { 714 if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) {
715 scmd_printk(KERN_ERR, cmd, 715 scmd_printk(KERN_ERR, cmd,
716 "vmw_pvscsi: Failed to map sense buffer for DMA.\n"); 716 "vmw_pvscsi: Failed to map sense buffer for DMA.\n");
717 ctx->sensePA = 0; 717 ctx->sensePA = 0;
@@ -740,9 +740,9 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
740 740
741 if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) { 741 if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) {
742 if (cmd->sense_buffer) { 742 if (cmd->sense_buffer) {
743 pci_unmap_single(adapter->dev, ctx->sensePA, 743 dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
744 SCSI_SENSE_BUFFERSIZE, 744 SCSI_SENSE_BUFFERSIZE,
745 PCI_DMA_FROMDEVICE); 745 DMA_FROM_DEVICE);
746 ctx->sensePA = 0; 746 ctx->sensePA = 0;
747 } 747 }
748 return -ENOMEM; 748 return -ENOMEM;
@@ -1218,21 +1218,21 @@ static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
1218 } 1218 }
1219 1219
1220 if (adapter->rings_state) 1220 if (adapter->rings_state)
1221 pci_free_consistent(adapter->dev, PAGE_SIZE, 1221 dma_free_coherent(&adapter->dev->dev, PAGE_SIZE,
1222 adapter->rings_state, adapter->ringStatePA); 1222 adapter->rings_state, adapter->ringStatePA);
1223 1223
1224 if (adapter->req_ring) 1224 if (adapter->req_ring)
1225 pci_free_consistent(adapter->dev, 1225 dma_free_coherent(&adapter->dev->dev,
1226 adapter->req_pages * PAGE_SIZE, 1226 adapter->req_pages * PAGE_SIZE,
1227 adapter->req_ring, adapter->reqRingPA); 1227 adapter->req_ring, adapter->reqRingPA);
1228 1228
1229 if (adapter->cmp_ring) 1229 if (adapter->cmp_ring)
1230 pci_free_consistent(adapter->dev, 1230 dma_free_coherent(&adapter->dev->dev,
1231 adapter->cmp_pages * PAGE_SIZE, 1231 adapter->cmp_pages * PAGE_SIZE,
1232 adapter->cmp_ring, adapter->cmpRingPA); 1232 adapter->cmp_ring, adapter->cmpRingPA);
1233 1233
1234 if (adapter->msg_ring) 1234 if (adapter->msg_ring)
1235 pci_free_consistent(adapter->dev, 1235 dma_free_coherent(&adapter->dev->dev,
1236 adapter->msg_pages * PAGE_SIZE, 1236 adapter->msg_pages * PAGE_SIZE,
1237 adapter->msg_ring, adapter->msgRingPA); 1237 adapter->msg_ring, adapter->msgRingPA);
1238} 1238}
@@ -1291,8 +1291,8 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
1291 u32 numPhys = 16; 1291 u32 numPhys = 16;
1292 1292
1293 dev = pvscsi_dev(adapter); 1293 dev = pvscsi_dev(adapter);
1294 config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE, 1294 config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
1295 &configPagePA); 1295 &configPagePA, GFP_KERNEL);
1296 if (!config_page) { 1296 if (!config_page) {
1297 dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n"); 1297 dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
1298 goto exit; 1298 goto exit;
@@ -1326,7 +1326,8 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
1326 } else 1326 } else
1327 dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n", 1327 dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
1328 header->hostStatus, header->scsiStatus); 1328 header->hostStatus, header->scsiStatus);
1329 pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA); 1329 dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page,
1330 configPagePA);
1330exit: 1331exit:
1331 return numPhys; 1332 return numPhys;
1332} 1333}
@@ -1346,11 +1347,9 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1346 if (pci_enable_device(pdev)) 1347 if (pci_enable_device(pdev))
1347 return error; 1348 return error;
1348 1349
1349 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 && 1350 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1350 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
1351 printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n"); 1351 printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
1352 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 && 1352 } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
1353 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
1354 printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n"); 1353 printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
1355 } else { 1354 } else {
1356 printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n"); 1355 printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index bb70882e6b56..ca8e3abeb2c7 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -9,8 +9,6 @@
9 * 9 *
10 * Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for 10 * Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for
11 * Blizzard 1230 DMA and probe function fixes 11 * Blizzard 1230 DMA and probe function fixes
12 *
13 * Copyright (C) 2017 Finn Thain for PIO code from Mac ESP driver adapted here
14 */ 12 */
15/* 13/*
16 * ZORRO bus code from: 14 * ZORRO bus code from:
@@ -159,7 +157,6 @@ struct fastlane_dma_registers {
159struct zorro_esp_priv { 157struct zorro_esp_priv {
160 struct esp *esp; /* our ESP instance - for Scsi_host* */ 158 struct esp *esp; /* our ESP instance - for Scsi_host* */
161 void __iomem *board_base; /* virtual address (Zorro III board) */ 159 void __iomem *board_base; /* virtual address (Zorro III board) */
162 int error; /* PIO error flag */
163 int zorro3; /* board is Zorro III */ 160 int zorro3; /* board is Zorro III */
164 unsigned char ctrl_data; /* shadow copy of ctrl_reg */ 161 unsigned char ctrl_data; /* shadow copy of ctrl_reg */
165}; 162};
@@ -182,30 +179,6 @@ static u8 zorro_esp_read8(struct esp *esp, unsigned long reg)
182 return readb(esp->regs + (reg * 4UL)); 179 return readb(esp->regs + (reg * 4UL));
183} 180}
184 181
185static dma_addr_t zorro_esp_map_single(struct esp *esp, void *buf,
186 size_t sz, int dir)
187{
188 return dma_map_single(esp->dev, buf, sz, dir);
189}
190
191static int zorro_esp_map_sg(struct esp *esp, struct scatterlist *sg,
192 int num_sg, int dir)
193{
194 return dma_map_sg(esp->dev, sg, num_sg, dir);
195}
196
197static void zorro_esp_unmap_single(struct esp *esp, dma_addr_t addr,
198 size_t sz, int dir)
199{
200 dma_unmap_single(esp->dev, addr, sz, dir);
201}
202
203static void zorro_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
204 int num_sg, int dir)
205{
206 dma_unmap_sg(esp->dev, sg, num_sg, dir);
207}
208
209static int zorro_esp_irq_pending(struct esp *esp) 182static int zorro_esp_irq_pending(struct esp *esp)
210{ 183{
211 /* check ESP status register; DMA has no status reg. */ 184 /* check ESP status register; DMA has no status reg. */
@@ -245,7 +218,7 @@ static int fastlane_esp_irq_pending(struct esp *esp)
245static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr, 218static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
246 u32 dma_len) 219 u32 dma_len)
247{ 220{
248 return dma_len > 0xFFFFFF ? 0xFFFFFF : dma_len; 221 return dma_len > 0xFFFF ? 0xFFFF : dma_len;
249} 222}
250 223
251static void zorro_esp_reset_dma(struct esp *esp) 224static void zorro_esp_reset_dma(struct esp *esp)
@@ -274,192 +247,29 @@ static void fastlane_esp_dma_invalidate(struct esp *esp)
274 z_writel(0, zep->board_base); 247 z_writel(0, zep->board_base);
275} 248}
276 249
277/*
278 * Programmed IO routines follow.
279 */
280
281static inline unsigned int zorro_esp_wait_for_fifo(struct esp *esp)
282{
283 int i = 500000;
284
285 do {
286 unsigned int fbytes = zorro_esp_read8(esp, ESP_FFLAGS)
287 & ESP_FF_FBYTES;
288
289 if (fbytes)
290 return fbytes;
291
292 udelay(2);
293 } while (--i);
294
295 pr_err("FIFO is empty (sreg %02x)\n",
296 zorro_esp_read8(esp, ESP_STATUS));
297 return 0;
298}
299
300static inline int zorro_esp_wait_for_intr(struct esp *esp)
301{
302 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
303 int i = 500000;
304
305 do {
306 esp->sreg = zorro_esp_read8(esp, ESP_STATUS);
307 if (esp->sreg & ESP_STAT_INTR)
308 return 0;
309
310 udelay(2);
311 } while (--i);
312
313 pr_err("IRQ timeout (sreg %02x)\n", esp->sreg);
314 zep->error = 1;
315 return 1;
316}
317
318/*
319 * PIO macros as used in mac_esp.c.
320 * Note that addr and fifo arguments are local-scope variables declared
321 * in zorro_esp_send_pio_cmd(), the macros are only used in that function,
322 * and addr and fifo are referenced in each use of the macros so there
323 * is no need to pass them as macro parameters.
324 */
325#define ZORRO_ESP_PIO_LOOP(operands, reg1) \
326 asm volatile ( \
327 "1: moveb " operands "\n" \
328 " subqw #1,%1 \n" \
329 " jbne 1b \n" \
330 : "+a" (addr), "+r" (reg1) \
331 : "a" (fifo));
332
333#define ZORRO_ESP_PIO_FILL(operands, reg1) \
334 asm volatile ( \
335 " moveb " operands "\n" \
336 " moveb " operands "\n" \
337 " moveb " operands "\n" \
338 " moveb " operands "\n" \
339 " moveb " operands "\n" \
340 " moveb " operands "\n" \
341 " moveb " operands "\n" \
342 " moveb " operands "\n" \
343 " moveb " operands "\n" \
344 " moveb " operands "\n" \
345 " moveb " operands "\n" \
346 " moveb " operands "\n" \
347 " moveb " operands "\n" \
348 " moveb " operands "\n" \
349 " moveb " operands "\n" \
350 " moveb " operands "\n" \
351 " subqw #8,%1 \n" \
352 " subqw #8,%1 \n" \
353 : "+a" (addr), "+r" (reg1) \
354 : "a" (fifo));
355
356#define ZORRO_ESP_FIFO_SIZE 16
357
358static void zorro_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
359 u32 dma_count, int write, u8 cmd)
360{
361 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
362 u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
363 u8 phase = esp->sreg & ESP_STAT_PMASK;
364
365 cmd &= ~ESP_CMD_DMA;
366
367 if (write) {
368 u8 *dst = (u8 *)addr;
369 u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
370
371 scsi_esp_cmd(esp, cmd);
372
373 while (1) {
374 if (!zorro_esp_wait_for_fifo(esp))
375 break;
376
377 *dst++ = zorro_esp_read8(esp, ESP_FDATA);
378 --esp_count;
379
380 if (!esp_count)
381 break;
382
383 if (zorro_esp_wait_for_intr(esp))
384 break;
385
386 if ((esp->sreg & ESP_STAT_PMASK) != phase)
387 break;
388
389 esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
390 if (esp->ireg & mask) {
391 zep->error = 1;
392 break;
393 }
394
395 if (phase == ESP_MIP)
396 scsi_esp_cmd(esp, ESP_CMD_MOK);
397
398 scsi_esp_cmd(esp, ESP_CMD_TI);
399 }
400 } else { /* unused, as long as we only handle MIP here */
401 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
402
403 if (esp_count >= ZORRO_ESP_FIFO_SIZE)
404 ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
405 else
406 ZORRO_ESP_PIO_LOOP("%0@+,%2@", esp_count)
407
408 scsi_esp_cmd(esp, cmd);
409
410 while (esp_count) {
411 unsigned int n;
412
413 if (zorro_esp_wait_for_intr(esp))
414 break;
415
416 if ((esp->sreg & ESP_STAT_PMASK) != phase)
417 break;
418
419 esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
420 if (esp->ireg & ~ESP_INTR_BSERV) {
421 zep->error = 1;
422 break;
423 }
424
425 n = ZORRO_ESP_FIFO_SIZE -
426 (zorro_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES);
427 if (n > esp_count)
428 n = esp_count;
429
430 if (n == ZORRO_ESP_FIFO_SIZE)
431 ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
432 else {
433 esp_count -= n;
434 ZORRO_ESP_PIO_LOOP("%0@+,%2@", n)
435 }
436
437 scsi_esp_cmd(esp, ESP_CMD_TI);
438 }
439 }
440}
441
442/* Blizzard 1230/60 SCSI-IV DMA */ 250/* Blizzard 1230/60 SCSI-IV DMA */
443 251
444static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr, 252static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
445 u32 esp_count, u32 dma_count, int write, u8 cmd) 253 u32 esp_count, u32 dma_count, int write, u8 cmd)
446{ 254{
447 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
448 struct blz1230_dma_registers __iomem *dregs = esp->dma_regs; 255 struct blz1230_dma_registers __iomem *dregs = esp->dma_regs;
449 u8 phase = esp->sreg & ESP_STAT_PMASK; 256 u8 phase = esp->sreg & ESP_STAT_PMASK;
450 257
451 zep->error = 0;
452 /* 258 /*
453 * Use PIO if transferring message bytes to esp->command_block_dma. 259 * Use PIO if transferring message bytes to esp->command_block_dma.
454 * PIO requires a virtual address, so substitute esp->command_block 260 * PIO requires a virtual address, so substitute esp->command_block
455 * for addr. 261 * for addr.
456 */ 262 */
457 if (phase == ESP_MIP && addr == esp->command_block_dma) { 263 if (phase == ESP_MIP && addr == esp->command_block_dma) {
458 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block, 264 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
459 esp_count, dma_count, write, cmd); 265 dma_count, write, cmd);
460 return; 266 return;
461 } 267 }
462 268
269 /* Clear the results of a possible prior esp->ops->send_dma_cmd() */
270 esp->send_cmd_error = 0;
271 esp->send_cmd_residual = 0;
272
463 if (write) 273 if (write)
464 /* DMA receive */ 274 /* DMA receive */
465 dma_sync_single_for_device(esp->dev, addr, esp_count, 275 dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -484,7 +294,6 @@ static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
484 scsi_esp_cmd(esp, ESP_CMD_DMA); 294 scsi_esp_cmd(esp, ESP_CMD_DMA);
485 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); 295 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
486 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); 296 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
487 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
488 297
489 scsi_esp_cmd(esp, cmd); 298 scsi_esp_cmd(esp, cmd);
490} 299}
@@ -494,18 +303,19 @@ static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
494static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr, 303static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
495 u32 esp_count, u32 dma_count, int write, u8 cmd) 304 u32 esp_count, u32 dma_count, int write, u8 cmd)
496{ 305{
497 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
498 struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs; 306 struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs;
499 u8 phase = esp->sreg & ESP_STAT_PMASK; 307 u8 phase = esp->sreg & ESP_STAT_PMASK;
500 308
501 zep->error = 0;
502 /* Use PIO if transferring message bytes to esp->command_block_dma */ 309 /* Use PIO if transferring message bytes to esp->command_block_dma */
503 if (phase == ESP_MIP && addr == esp->command_block_dma) { 310 if (phase == ESP_MIP && addr == esp->command_block_dma) {
504 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block, 311 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
505 esp_count, dma_count, write, cmd); 312 dma_count, write, cmd);
506 return; 313 return;
507 } 314 }
508 315
316 esp->send_cmd_error = 0;
317 esp->send_cmd_residual = 0;
318
509 if (write) 319 if (write)
510 /* DMA receive */ 320 /* DMA receive */
511 dma_sync_single_for_device(esp->dev, addr, esp_count, 321 dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -529,7 +339,6 @@ static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
529 scsi_esp_cmd(esp, ESP_CMD_DMA); 339 scsi_esp_cmd(esp, ESP_CMD_DMA);
530 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); 340 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
531 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); 341 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
532 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
533 342
534 scsi_esp_cmd(esp, cmd); 343 scsi_esp_cmd(esp, cmd);
535} 344}
@@ -539,18 +348,19 @@ static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
539static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr, 348static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
540 u32 esp_count, u32 dma_count, int write, u8 cmd) 349 u32 esp_count, u32 dma_count, int write, u8 cmd)
541{ 350{
542 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
543 struct blz2060_dma_registers __iomem *dregs = esp->dma_regs; 351 struct blz2060_dma_registers __iomem *dregs = esp->dma_regs;
544 u8 phase = esp->sreg & ESP_STAT_PMASK; 352 u8 phase = esp->sreg & ESP_STAT_PMASK;
545 353
546 zep->error = 0;
547 /* Use PIO if transferring message bytes to esp->command_block_dma */ 354 /* Use PIO if transferring message bytes to esp->command_block_dma */
548 if (phase == ESP_MIP && addr == esp->command_block_dma) { 355 if (phase == ESP_MIP && addr == esp->command_block_dma) {
549 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block, 356 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
550 esp_count, dma_count, write, cmd); 357 dma_count, write, cmd);
551 return; 358 return;
552 } 359 }
553 360
361 esp->send_cmd_error = 0;
362 esp->send_cmd_residual = 0;
363
554 if (write) 364 if (write)
555 /* DMA receive */ 365 /* DMA receive */
556 dma_sync_single_for_device(esp->dev, addr, esp_count, 366 dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -574,7 +384,6 @@ static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
574 scsi_esp_cmd(esp, ESP_CMD_DMA); 384 scsi_esp_cmd(esp, ESP_CMD_DMA);
575 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); 385 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
576 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); 386 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
577 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
578 387
579 scsi_esp_cmd(esp, cmd); 388 scsi_esp_cmd(esp, cmd);
580} 389}
@@ -589,17 +398,18 @@ static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
589 u8 phase = esp->sreg & ESP_STAT_PMASK; 398 u8 phase = esp->sreg & ESP_STAT_PMASK;
590 unsigned char *ctrl_data = &zep->ctrl_data; 399 unsigned char *ctrl_data = &zep->ctrl_data;
591 400
592 zep->error = 0;
593 /* Use PIO if transferring message bytes to esp->command_block_dma */ 401 /* Use PIO if transferring message bytes to esp->command_block_dma */
594 if (phase == ESP_MIP && addr == esp->command_block_dma) { 402 if (phase == ESP_MIP && addr == esp->command_block_dma) {
595 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block, 403 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
596 esp_count, dma_count, write, cmd); 404 dma_count, write, cmd);
597 return; 405 return;
598 } 406 }
599 407
408 esp->send_cmd_error = 0;
409 esp->send_cmd_residual = 0;
410
600 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); 411 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
601 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); 412 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
602 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
603 413
604 if (write) { 414 if (write) {
605 /* DMA receive */ 415 /* DMA receive */
@@ -635,21 +445,21 @@ static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
635static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr, 445static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr,
636 u32 esp_count, u32 dma_count, int write, u8 cmd) 446 u32 esp_count, u32 dma_count, int write, u8 cmd)
637{ 447{
638 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
639 struct cyberII_dma_registers __iomem *dregs = esp->dma_regs; 448 struct cyberII_dma_registers __iomem *dregs = esp->dma_regs;
640 u8 phase = esp->sreg & ESP_STAT_PMASK; 449 u8 phase = esp->sreg & ESP_STAT_PMASK;
641 450
642 zep->error = 0;
643 /* Use PIO if transferring message bytes to esp->command_block_dma */ 451 /* Use PIO if transferring message bytes to esp->command_block_dma */
644 if (phase == ESP_MIP && addr == esp->command_block_dma) { 452 if (phase == ESP_MIP && addr == esp->command_block_dma) {
645 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block, 453 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
646 esp_count, dma_count, write, cmd); 454 dma_count, write, cmd);
647 return; 455 return;
648 } 456 }
649 457
458 esp->send_cmd_error = 0;
459 esp->send_cmd_residual = 0;
460
650 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); 461 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
651 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); 462 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
652 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
653 463
654 if (write) { 464 if (write) {
655 /* DMA receive */ 465 /* DMA receive */
@@ -681,17 +491,18 @@ static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
681 u8 phase = esp->sreg & ESP_STAT_PMASK; 491 u8 phase = esp->sreg & ESP_STAT_PMASK;
682 unsigned char *ctrl_data = &zep->ctrl_data; 492 unsigned char *ctrl_data = &zep->ctrl_data;
683 493
684 zep->error = 0;
685 /* Use PIO if transferring message bytes to esp->command_block_dma */ 494 /* Use PIO if transferring message bytes to esp->command_block_dma */
686 if (phase == ESP_MIP && addr == esp->command_block_dma) { 495 if (phase == ESP_MIP && addr == esp->command_block_dma) {
687 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block, 496 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
688 esp_count, dma_count, write, cmd); 497 dma_count, write, cmd);
689 return; 498 return;
690 } 499 }
691 500
501 esp->send_cmd_error = 0;
502 esp->send_cmd_residual = 0;
503
692 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); 504 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
693 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); 505 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
694 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
695 506
696 if (write) { 507 if (write) {
697 /* DMA receive */ 508 /* DMA receive */
@@ -724,14 +535,7 @@ static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
724 535
725static int zorro_esp_dma_error(struct esp *esp) 536static int zorro_esp_dma_error(struct esp *esp)
726{ 537{
727 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev); 538 return esp->send_cmd_error;
728
729 /* check for error in case we've been doing PIO */
730 if (zep->error == 1)
731 return 1;
732
733 /* do nothing - there seems to be no way to check for DMA errors */
734 return 0;
735} 539}
736 540
737/* per-board ESP driver ops */ 541/* per-board ESP driver ops */
@@ -739,10 +543,6 @@ static int zorro_esp_dma_error(struct esp *esp)
739static const struct esp_driver_ops blz1230_esp_ops = { 543static const struct esp_driver_ops blz1230_esp_ops = {
740 .esp_write8 = zorro_esp_write8, 544 .esp_write8 = zorro_esp_write8,
741 .esp_read8 = zorro_esp_read8, 545 .esp_read8 = zorro_esp_read8,
742 .map_single = zorro_esp_map_single,
743 .map_sg = zorro_esp_map_sg,
744 .unmap_single = zorro_esp_unmap_single,
745 .unmap_sg = zorro_esp_unmap_sg,
746 .irq_pending = zorro_esp_irq_pending, 546 .irq_pending = zorro_esp_irq_pending,
747 .dma_length_limit = zorro_esp_dma_length_limit, 547 .dma_length_limit = zorro_esp_dma_length_limit,
748 .reset_dma = zorro_esp_reset_dma, 548 .reset_dma = zorro_esp_reset_dma,
@@ -755,10 +555,6 @@ static const struct esp_driver_ops blz1230_esp_ops = {
755static const struct esp_driver_ops blz1230II_esp_ops = { 555static const struct esp_driver_ops blz1230II_esp_ops = {
756 .esp_write8 = zorro_esp_write8, 556 .esp_write8 = zorro_esp_write8,
757 .esp_read8 = zorro_esp_read8, 557 .esp_read8 = zorro_esp_read8,
758 .map_single = zorro_esp_map_single,
759 .map_sg = zorro_esp_map_sg,
760 .unmap_single = zorro_esp_unmap_single,
761 .unmap_sg = zorro_esp_unmap_sg,
762 .irq_pending = zorro_esp_irq_pending, 558 .irq_pending = zorro_esp_irq_pending,
763 .dma_length_limit = zorro_esp_dma_length_limit, 559 .dma_length_limit = zorro_esp_dma_length_limit,
764 .reset_dma = zorro_esp_reset_dma, 560 .reset_dma = zorro_esp_reset_dma,
@@ -771,10 +567,6 @@ static const struct esp_driver_ops blz1230II_esp_ops = {
771static const struct esp_driver_ops blz2060_esp_ops = { 567static const struct esp_driver_ops blz2060_esp_ops = {
772 .esp_write8 = zorro_esp_write8, 568 .esp_write8 = zorro_esp_write8,
773 .esp_read8 = zorro_esp_read8, 569 .esp_read8 = zorro_esp_read8,
774 .map_single = zorro_esp_map_single,
775 .map_sg = zorro_esp_map_sg,
776 .unmap_single = zorro_esp_unmap_single,
777 .unmap_sg = zorro_esp_unmap_sg,
778 .irq_pending = zorro_esp_irq_pending, 570 .irq_pending = zorro_esp_irq_pending,
779 .dma_length_limit = zorro_esp_dma_length_limit, 571 .dma_length_limit = zorro_esp_dma_length_limit,
780 .reset_dma = zorro_esp_reset_dma, 572 .reset_dma = zorro_esp_reset_dma,
@@ -787,10 +579,6 @@ static const struct esp_driver_ops blz2060_esp_ops = {
787static const struct esp_driver_ops cyber_esp_ops = { 579static const struct esp_driver_ops cyber_esp_ops = {
788 .esp_write8 = zorro_esp_write8, 580 .esp_write8 = zorro_esp_write8,
789 .esp_read8 = zorro_esp_read8, 581 .esp_read8 = zorro_esp_read8,
790 .map_single = zorro_esp_map_single,
791 .map_sg = zorro_esp_map_sg,
792 .unmap_single = zorro_esp_unmap_single,
793 .unmap_sg = zorro_esp_unmap_sg,
794 .irq_pending = cyber_esp_irq_pending, 582 .irq_pending = cyber_esp_irq_pending,
795 .dma_length_limit = zorro_esp_dma_length_limit, 583 .dma_length_limit = zorro_esp_dma_length_limit,
796 .reset_dma = zorro_esp_reset_dma, 584 .reset_dma = zorro_esp_reset_dma,
@@ -803,10 +591,6 @@ static const struct esp_driver_ops cyber_esp_ops = {
803static const struct esp_driver_ops cyberII_esp_ops = { 591static const struct esp_driver_ops cyberII_esp_ops = {
804 .esp_write8 = zorro_esp_write8, 592 .esp_write8 = zorro_esp_write8,
805 .esp_read8 = zorro_esp_read8, 593 .esp_read8 = zorro_esp_read8,
806 .map_single = zorro_esp_map_single,
807 .map_sg = zorro_esp_map_sg,
808 .unmap_single = zorro_esp_unmap_single,
809 .unmap_sg = zorro_esp_unmap_sg,
810 .irq_pending = zorro_esp_irq_pending, 594 .irq_pending = zorro_esp_irq_pending,
811 .dma_length_limit = zorro_esp_dma_length_limit, 595 .dma_length_limit = zorro_esp_dma_length_limit,
812 .reset_dma = zorro_esp_reset_dma, 596 .reset_dma = zorro_esp_reset_dma,
@@ -819,10 +603,6 @@ static const struct esp_driver_ops cyberII_esp_ops = {
819static const struct esp_driver_ops fastlane_esp_ops = { 603static const struct esp_driver_ops fastlane_esp_ops = {
820 .esp_write8 = zorro_esp_write8, 604 .esp_write8 = zorro_esp_write8,
821 .esp_read8 = zorro_esp_read8, 605 .esp_read8 = zorro_esp_read8,
822 .map_single = zorro_esp_map_single,
823 .map_sg = zorro_esp_map_sg,
824 .unmap_single = zorro_esp_unmap_single,
825 .unmap_sg = zorro_esp_unmap_sg,
826 .irq_pending = fastlane_esp_irq_pending, 606 .irq_pending = fastlane_esp_irq_pending,
827 .dma_length_limit = zorro_esp_dma_length_limit, 607 .dma_length_limit = zorro_esp_dma_length_limit,
828 .reset_dma = zorro_esp_reset_dma, 608 .reset_dma = zorro_esp_reset_dma,
@@ -1039,6 +819,8 @@ static int zorro_esp_probe(struct zorro_dev *z,
1039 goto fail_unmap_fastlane; 819 goto fail_unmap_fastlane;
1040 } 820 }
1041 821
822 esp->fifo_reg = esp->regs + ESP_FDATA * 4;
823
1042 /* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */ 824 /* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */
1043 if (zdd->scsi_option) { 825 if (zdd->scsi_option) {
1044 zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1); 826 zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1);
@@ -1082,7 +864,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
1082 } 864 }
1083 865
1084 /* register the chip */ 866 /* register the chip */
1085 err = scsi_esp_register(esp, &z->dev); 867 err = scsi_esp_register(esp);
1086 868
1087 if (err) { 869 if (err) {
1088 err = -ENOMEM; 870 err = -ENOMEM;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 8de16016b6de..71888b979ab5 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -598,9 +598,12 @@ out:
598 mutex_unlock(&cdev_list_lock); 598 mutex_unlock(&cdev_list_lock);
599} 599}
600 600
601static void __cxgbit_free_conn(struct cxgbit_sock *csk);
602
601void cxgbit_free_np(struct iscsi_np *np) 603void cxgbit_free_np(struct iscsi_np *np)
602{ 604{
603 struct cxgbit_np *cnp = np->np_context; 605 struct cxgbit_np *cnp = np->np_context;
606 struct cxgbit_sock *csk, *tmp;
604 607
605 cnp->com.state = CSK_STATE_DEAD; 608 cnp->com.state = CSK_STATE_DEAD;
606 if (cnp->com.cdev) 609 if (cnp->com.cdev)
@@ -608,6 +611,13 @@ void cxgbit_free_np(struct iscsi_np *np)
608 else 611 else
609 cxgbit_free_all_np(cnp); 612 cxgbit_free_all_np(cnp);
610 613
614 spin_lock_bh(&cnp->np_accept_lock);
615 list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
616 list_del_init(&csk->accept_node);
617 __cxgbit_free_conn(csk);
618 }
619 spin_unlock_bh(&cnp->np_accept_lock);
620
611 np->np_context = NULL; 621 np->np_context = NULL;
612 cxgbit_put_cnp(cnp); 622 cxgbit_put_cnp(cnp);
613} 623}
@@ -705,9 +715,9 @@ void cxgbit_abort_conn(struct cxgbit_sock *csk)
705 csk->tid, 600, __func__); 715 csk->tid, 600, __func__);
706} 716}
707 717
708void cxgbit_free_conn(struct iscsi_conn *conn) 718static void __cxgbit_free_conn(struct cxgbit_sock *csk)
709{ 719{
710 struct cxgbit_sock *csk = conn->context; 720 struct iscsi_conn *conn = csk->conn;
711 bool release = false; 721 bool release = false;
712 722
713 pr_debug("%s: state %d\n", 723 pr_debug("%s: state %d\n",
@@ -716,7 +726,7 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
716 spin_lock_bh(&csk->lock); 726 spin_lock_bh(&csk->lock);
717 switch (csk->com.state) { 727 switch (csk->com.state) {
718 case CSK_STATE_ESTABLISHED: 728 case CSK_STATE_ESTABLISHED:
719 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { 729 if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
720 csk->com.state = CSK_STATE_CLOSING; 730 csk->com.state = CSK_STATE_CLOSING;
721 cxgbit_send_halfclose(csk); 731 cxgbit_send_halfclose(csk);
722 } else { 732 } else {
@@ -741,6 +751,11 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
741 cxgbit_put_csk(csk); 751 cxgbit_put_csk(csk);
742} 752}
743 753
754void cxgbit_free_conn(struct iscsi_conn *conn)
755{
756 __cxgbit_free_conn(conn->context);
757}
758
744static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt) 759static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
745{ 760{
746 csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] - 761 csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
@@ -803,6 +818,7 @@ void _cxgbit_free_csk(struct kref *kref)
803 spin_unlock_bh(&cdev->cskq.lock); 818 spin_unlock_bh(&cdev->cskq.lock);
804 819
805 cxgbit_free_skb(csk); 820 cxgbit_free_skb(csk);
821 cxgbit_put_cnp(csk->cnp);
806 cxgbit_put_cdev(cdev); 822 cxgbit_put_cdev(cdev);
807 823
808 kfree(csk); 824 kfree(csk);
@@ -1351,6 +1367,7 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
1351 goto rel_skb; 1367 goto rel_skb;
1352 } 1368 }
1353 1369
1370 cxgbit_get_cnp(cnp);
1354 cxgbit_get_cdev(cdev); 1371 cxgbit_get_cdev(cdev);
1355 1372
1356 spin_lock(&cdev->cskq.lock); 1373 spin_lock(&cdev->cskq.lock);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index cc756a123fd8..c1d5a173553d 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4355,7 +4355,7 @@ int iscsit_close_session(struct iscsi_session *sess)
4355 transport_deregister_session(sess->se_sess); 4355 transport_deregister_session(sess->se_sess);
4356 4356
4357 if (sess->sess_ops->ErrorRecoveryLevel == 2) 4357 if (sess->sess_ops->ErrorRecoveryLevel == 2)
4358 iscsit_free_connection_recovery_entires(sess); 4358 iscsit_free_connection_recovery_entries(sess);
4359 4359
4360 iscsit_free_all_ooo_cmdsns(sess); 4360 iscsit_free_all_ooo_cmdsns(sess);
4361 4361
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 718fe9a1b709..1193cf884a28 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -770,21 +770,8 @@ void iscsit_handle_time2retain_timeout(struct timer_list *t)
770 770
771 pr_err("Time2Retain timer expired for SID: %u, cleaning up" 771 pr_err("Time2Retain timer expired for SID: %u, cleaning up"
772 " iSCSI session.\n", sess->sid); 772 " iSCSI session.\n", sess->sid);
773 {
774 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
775
776 if (tiqn) {
777 spin_lock(&tiqn->sess_err_stats.lock);
778 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
779 (void *)sess->sess_ops->InitiatorName);
780 tiqn->sess_err_stats.last_sess_failure_type =
781 ISCSI_SESS_ERR_CXN_TIMEOUT;
782 tiqn->sess_err_stats.cxn_timeout_errors++;
783 atomic_long_inc(&sess->conn_timeout_errors);
784 spin_unlock(&tiqn->sess_err_stats.lock);
785 }
786 }
787 773
774 iscsit_fill_cxn_timeout_err_stats(sess);
788 spin_unlock_bh(&se_tpg->session_lock); 775 spin_unlock_bh(&se_tpg->session_lock);
789 iscsit_close_session(sess); 776 iscsit_close_session(sess);
790} 777}
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 5efa42b939a1..a211e8154f4c 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -1169,15 +1169,21 @@ void iscsit_handle_dataout_timeout(struct timer_list *t)
1169 na = iscsit_tpg_get_node_attrib(sess); 1169 na = iscsit_tpg_get_node_attrib(sess);
1170 1170
1171 if (!sess->sess_ops->ErrorRecoveryLevel) { 1171 if (!sess->sess_ops->ErrorRecoveryLevel) {
1172 pr_debug("Unable to recover from DataOut timeout while" 1172 pr_err("Unable to recover from DataOut timeout while"
1173 " in ERL=0.\n"); 1173 " in ERL=0, closing iSCSI connection for I_T Nexus"
1174 " %s,i,0x%6phN,%s,t,0x%02x\n",
1175 sess->sess_ops->InitiatorName, sess->isid,
1176 sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
1174 goto failure; 1177 goto failure;
1175 } 1178 }
1176 1179
1177 if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) { 1180 if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) {
1178 pr_debug("Command ITT: 0x%08x exceeded max retries" 1181 pr_err("Command ITT: 0x%08x exceeded max retries"
1179 " for DataOUT timeout %u, closing iSCSI connection.\n", 1182 " for DataOUT timeout %u, closing iSCSI connection for"
1180 cmd->init_task_tag, na->dataout_timeout_retries); 1183 " I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
1184 cmd->init_task_tag, na->dataout_timeout_retries,
1185 sess->sess_ops->InitiatorName, sess->isid,
1186 sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
1181 goto failure; 1187 goto failure;
1182 } 1188 }
1183 1189
@@ -1224,6 +1230,7 @@ void iscsit_handle_dataout_timeout(struct timer_list *t)
1224 1230
1225failure: 1231failure:
1226 spin_unlock_bh(&cmd->dataout_timeout_lock); 1232 spin_unlock_bh(&cmd->dataout_timeout_lock);
1233 iscsit_fill_cxn_timeout_err_stats(sess);
1227 iscsit_cause_connection_reinstatement(conn, 0); 1234 iscsit_cause_connection_reinstatement(conn, 0);
1228 iscsit_dec_conn_usage_count(conn); 1235 iscsit_dec_conn_usage_count(conn);
1229} 1236}
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 8df9c90f3db3..b08b620b1bf0 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -125,7 +125,7 @@ struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
125 return NULL; 125 return NULL;
126} 126}
127 127
128void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) 128void iscsit_free_connection_recovery_entries(struct iscsi_session *sess)
129{ 129{
130 struct iscsi_cmd *cmd, *cmd_tmp; 130 struct iscsi_cmd *cmd, *cmd_tmp;
131 struct iscsi_conn_recovery *cr, *cr_tmp; 131 struct iscsi_conn_recovery *cr, *cr_tmp;
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
index 93e180d68d07..a39b0caf2337 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.h
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -13,7 +13,7 @@ extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32
13extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *); 13extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
14extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry( 14extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
15 struct iscsi_session *, u16); 15 struct iscsi_session *, u16);
16extern void iscsit_free_connection_recovery_entires(struct iscsi_session *); 16extern void iscsit_free_connection_recovery_entries(struct iscsi_session *);
17extern int iscsit_remove_active_connection_recovery_entry( 17extern int iscsit_remove_active_connection_recovery_entry(
18 struct iscsi_conn_recovery *, struct iscsi_session *); 18 struct iscsi_conn_recovery *, struct iscsi_session *);
19extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *, 19extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index bb90c80ff388..ae3209efd0e0 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -578,7 +578,7 @@ int iscsi_login_post_auth_non_zero_tsih(
578 } 578 }
579 579
580 /* 580 /*
581 * Check for any connection recovery entires containing CID. 581 * Check for any connection recovery entries containing CID.
582 * We use the original ExpStatSN sent in the first login request 582 * We use the original ExpStatSN sent in the first login request
583 * to acknowledge commands for the failed connection. 583 * to acknowledge commands for the failed connection.
584 * 584 *
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index df0a39811dc2..bb98882bdaa7 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -328,10 +328,10 @@ static ssize_t iscsi_stat_tgt_attr_fail_intr_name_show(struct config_item *item,
328{ 328{
329 struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item); 329 struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
330 struct iscsi_login_stats *lstat = &tiqn->login_stats; 330 struct iscsi_login_stats *lstat = &tiqn->login_stats;
331 unsigned char buf[224]; 331 unsigned char buf[ISCSI_IQN_LEN];
332 332
333 spin_lock(&lstat->lock); 333 spin_lock(&lstat->lock);
334 snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ? 334 snprintf(buf, ISCSI_IQN_LEN, "%s", lstat->last_intr_fail_name[0] ?
335 lstat->last_intr_fail_name : NONE); 335 lstat->last_intr_fail_name : NONE);
336 spin_unlock(&lstat->lock); 336 spin_unlock(&lstat->lock);
337 337
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 49be1e41290c..1227872227dc 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -915,6 +915,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
915void iscsit_handle_nopin_response_timeout(struct timer_list *t) 915void iscsit_handle_nopin_response_timeout(struct timer_list *t)
916{ 916{
917 struct iscsi_conn *conn = from_timer(conn, t, nopin_response_timer); 917 struct iscsi_conn *conn = from_timer(conn, t, nopin_response_timer);
918 struct iscsi_session *sess = conn->sess;
918 919
919 iscsit_inc_conn_usage_count(conn); 920 iscsit_inc_conn_usage_count(conn);
920 921
@@ -925,28 +926,14 @@ void iscsit_handle_nopin_response_timeout(struct timer_list *t)
925 return; 926 return;
926 } 927 }
927 928
928 pr_debug("Did not receive response to NOPIN on CID: %hu on" 929 pr_err("Did not receive response to NOPIN on CID: %hu, failing"
929 " SID: %u, failing connection.\n", conn->cid, 930 " connection for I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
930 conn->sess->sid); 931 conn->cid, sess->sess_ops->InitiatorName, sess->isid,
932 sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
931 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; 933 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
932 spin_unlock_bh(&conn->nopin_timer_lock); 934 spin_unlock_bh(&conn->nopin_timer_lock);
933 935
934 { 936 iscsit_fill_cxn_timeout_err_stats(sess);
935 struct iscsi_portal_group *tpg = conn->sess->tpg;
936 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
937
938 if (tiqn) {
939 spin_lock_bh(&tiqn->sess_err_stats.lock);
940 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
941 conn->sess->sess_ops->InitiatorName);
942 tiqn->sess_err_stats.last_sess_failure_type =
943 ISCSI_SESS_ERR_CXN_TIMEOUT;
944 tiqn->sess_err_stats.cxn_timeout_errors++;
945 atomic_long_inc(&conn->sess->conn_timeout_errors);
946 spin_unlock_bh(&tiqn->sess_err_stats.lock);
947 }
948 }
949
950 iscsit_cause_connection_reinstatement(conn, 0); 937 iscsit_cause_connection_reinstatement(conn, 0);
951 iscsit_dec_conn_usage_count(conn); 938 iscsit_dec_conn_usage_count(conn);
952} 939}
@@ -1405,3 +1392,22 @@ struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
1405 1392
1406 return tpg->tpg_tiqn; 1393 return tpg->tpg_tiqn;
1407} 1394}
1395
1396void iscsit_fill_cxn_timeout_err_stats(struct iscsi_session *sess)
1397{
1398 struct iscsi_portal_group *tpg = sess->tpg;
1399 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
1400
1401 if (!tiqn)
1402 return;
1403
1404 spin_lock_bh(&tiqn->sess_err_stats.lock);
1405 strlcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
1406 sess->sess_ops->InitiatorName,
1407 sizeof(tiqn->sess_err_stats.last_sess_fail_rem_name));
1408 tiqn->sess_err_stats.last_sess_failure_type =
1409 ISCSI_SESS_ERR_CXN_TIMEOUT;
1410 tiqn->sess_err_stats.cxn_timeout_errors++;
1411 atomic_long_inc(&sess->conn_timeout_errors);
1412 spin_unlock_bh(&tiqn->sess_err_stats.lock);
1413}
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index d66dfc212624..68e84803b0a1 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -67,5 +67,6 @@ extern int rx_data(struct iscsi_conn *, struct kvec *, int, int);
67extern int tx_data(struct iscsi_conn *, struct kvec *, int, int); 67extern int tx_data(struct iscsi_conn *, struct kvec *, int, int);
68extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8); 68extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8);
69extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *); 69extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *);
70extern void iscsit_fill_cxn_timeout_err_stats(struct iscsi_session *);
70 71
71#endif /*** ISCSI_TARGET_UTIL_H ***/ 72#endif /*** ISCSI_TARGET_UTIL_H ***/
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index ce1321a5cb7b..b5ed9c377060 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -514,7 +514,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
514 } 514 }
515 515
516 /* Always in 512 byte units for Linux/Block */ 516 /* Always in 512 byte units for Linux/Block */
517 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 517 block_lba += sg->length >> SECTOR_SHIFT;
518 sectors -= 1; 518 sectors -= 1;
519 } 519 }
520 520
@@ -635,14 +635,15 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
635} 635}
636 636
637static int 637static int
638iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio) 638iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
639 struct sg_mapping_iter *miter)
639{ 640{
640 struct se_device *dev = cmd->se_dev; 641 struct se_device *dev = cmd->se_dev;
641 struct blk_integrity *bi; 642 struct blk_integrity *bi;
642 struct bio_integrity_payload *bip; 643 struct bio_integrity_payload *bip;
643 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 644 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
644 struct scatterlist *sg; 645 int rc;
645 int i, rc; 646 size_t resid, len;
646 647
647 bi = bdev_get_integrity(ib_dev->ibd_bd); 648 bi = bdev_get_integrity(ib_dev->ibd_bd);
648 if (!bi) { 649 if (!bi) {
@@ -650,31 +651,39 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
650 return -ENODEV; 651 return -ENODEV;
651 } 652 }
652 653
653 bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents); 654 bip = bio_integrity_alloc(bio, GFP_NOIO,
655 min_t(unsigned int, cmd->t_prot_nents, BIO_MAX_PAGES));
654 if (IS_ERR(bip)) { 656 if (IS_ERR(bip)) {
655 pr_err("Unable to allocate bio_integrity_payload\n"); 657 pr_err("Unable to allocate bio_integrity_payload\n");
656 return PTR_ERR(bip); 658 return PTR_ERR(bip);
657 } 659 }
658 660
659 bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) * 661 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
660 dev->prot_length; 662 bip_set_seed(bip, bio->bi_iter.bi_sector);
661 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
662 663
663 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size, 664 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
664 (unsigned long long)bip->bip_iter.bi_sector); 665 (unsigned long long)bip->bip_iter.bi_sector);
665 666
666 for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) { 667 resid = bip->bip_iter.bi_size;
668 while (resid > 0 && sg_miter_next(miter)) {
667 669
668 rc = bio_integrity_add_page(bio, sg_page(sg), sg->length, 670 len = min_t(size_t, miter->length, resid);
669 sg->offset); 671 rc = bio_integrity_add_page(bio, miter->page, len,
670 if (rc != sg->length) { 672 offset_in_page(miter->addr));
673 if (rc != len) {
671 pr_err("bio_integrity_add_page() failed; %d\n", rc); 674 pr_err("bio_integrity_add_page() failed; %d\n", rc);
675 sg_miter_stop(miter);
672 return -ENOMEM; 676 return -ENOMEM;
673 } 677 }
674 678
675 pr_debug("Added bio integrity page: %p length: %d offset; %d\n", 679 pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
676 sg_page(sg), sg->length, sg->offset); 680 miter->page, len, offset_in_page(miter->addr));
681
682 resid -= len;
683 if (len < miter->length)
684 miter->consumed -= miter->length - len;
677 } 685 }
686 sg_miter_stop(miter);
678 687
679 return 0; 688 return 0;
680} 689}
@@ -686,12 +695,13 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
686 struct se_device *dev = cmd->se_dev; 695 struct se_device *dev = cmd->se_dev;
687 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); 696 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
688 struct iblock_req *ibr; 697 struct iblock_req *ibr;
689 struct bio *bio, *bio_start; 698 struct bio *bio;
690 struct bio_list list; 699 struct bio_list list;
691 struct scatterlist *sg; 700 struct scatterlist *sg;
692 u32 sg_num = sgl_nents; 701 u32 sg_num = sgl_nents;
693 unsigned bio_cnt; 702 unsigned bio_cnt;
694 int i, op, op_flags = 0; 703 int i, rc, op, op_flags = 0;
704 struct sg_mapping_iter prot_miter;
695 705
696 if (data_direction == DMA_TO_DEVICE) { 706 if (data_direction == DMA_TO_DEVICE) {
697 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 707 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -726,13 +736,17 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
726 if (!bio) 736 if (!bio)
727 goto fail_free_ibr; 737 goto fail_free_ibr;
728 738
729 bio_start = bio;
730 bio_list_init(&list); 739 bio_list_init(&list);
731 bio_list_add(&list, bio); 740 bio_list_add(&list, bio);
732 741
733 refcount_set(&ibr->pending, 2); 742 refcount_set(&ibr->pending, 2);
734 bio_cnt = 1; 743 bio_cnt = 1;
735 744
745 if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
746 sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
747 op == REQ_OP_READ ? SG_MITER_FROM_SG :
748 SG_MITER_TO_SG);
749
736 for_each_sg(sgl, sg, sgl_nents, i) { 750 for_each_sg(sgl, sg, sgl_nents, i) {
737 /* 751 /*
738 * XXX: if the length the device accepts is shorter than the 752 * XXX: if the length the device accepts is shorter than the
@@ -741,6 +755,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
741 */ 755 */
742 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 756 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
743 != sg->length) { 757 != sg->length) {
758 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
759 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
760 if (rc)
761 goto fail_put_bios;
762 }
763
744 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { 764 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
745 iblock_submit_bios(&list); 765 iblock_submit_bios(&list);
746 bio_cnt = 0; 766 bio_cnt = 0;
@@ -757,12 +777,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
757 } 777 }
758 778
759 /* Always in 512 byte units for Linux/Block */ 779 /* Always in 512 byte units for Linux/Block */
760 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 780 block_lba += sg->length >> SECTOR_SHIFT;
761 sg_num--; 781 sg_num--;
762 } 782 }
763 783
764 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { 784 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
765 int rc = iblock_alloc_bip(cmd, bio_start); 785 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
766 if (rc) 786 if (rc)
767 goto fail_put_bios; 787 goto fail_put_bios;
768 } 788 }
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 9cc3843404d4..cefc641145b3 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -9,7 +9,6 @@
9#define IBLOCK_VERSION "4.0" 9#define IBLOCK_VERSION "4.0"
10 10
11#define IBLOCK_MAX_CDBS 16 11#define IBLOCK_MAX_CDBS 16
12#define IBLOCK_LBA_SHIFT 9
13 12
14struct iblock_req { 13struct iblock_req {
15 refcount_t pending; 14 refcount_t pending;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index ebac2b49b9c6..1ac1f7d2e6c9 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -360,6 +360,10 @@ static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
360 unsigned int offset; 360 unsigned int offset;
361 sense_reason_t ret = TCM_NO_SENSE; 361 sense_reason_t ret = TCM_NO_SENSE;
362 int i, count; 362 int i, count;
363
364 if (!success)
365 return 0;
366
363 /* 367 /*
364 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 368 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
365 * 369 *
@@ -425,14 +429,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
425 struct se_device *dev = cmd->se_dev; 429 struct se_device *dev = cmd->se_dev;
426 sense_reason_t ret = TCM_NO_SENSE; 430 sense_reason_t ret = TCM_NO_SENSE;
427 431
428 /*
429 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
430 * within target_complete_ok_work() if the command was successfully
431 * sent to the backend driver.
432 */
433 spin_lock_irq(&cmd->t_state_lock); 432 spin_lock_irq(&cmd->t_state_lock);
434 if (cmd->transport_state & CMD_T_SENT) { 433 if (success) {
435 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
436 *post_ret = 1; 434 *post_ret = 1;
437 435
438 if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION) 436 if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
@@ -453,7 +451,8 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
453 int *post_ret) 451 int *post_ret)
454{ 452{
455 struct se_device *dev = cmd->se_dev; 453 struct se_device *dev = cmd->se_dev;
456 struct scatterlist *write_sg = NULL, *sg; 454 struct sg_table write_tbl = { };
455 struct scatterlist *write_sg, *sg;
457 unsigned char *buf = NULL, *addr; 456 unsigned char *buf = NULL, *addr;
458 struct sg_mapping_iter m; 457 struct sg_mapping_iter m;
459 unsigned int offset = 0, len; 458 unsigned int offset = 0, len;
@@ -494,14 +493,12 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
494 goto out; 493 goto out;
495 } 494 }
496 495
497 write_sg = kmalloc_array(cmd->t_data_nents, sizeof(*write_sg), 496 if (sg_alloc_table(&write_tbl, cmd->t_data_nents, GFP_KERNEL) < 0) {
498 GFP_KERNEL);
499 if (!write_sg) {
500 pr_err("Unable to allocate compare_and_write sg\n"); 497 pr_err("Unable to allocate compare_and_write sg\n");
501 ret = TCM_OUT_OF_RESOURCES; 498 ret = TCM_OUT_OF_RESOURCES;
502 goto out; 499 goto out;
503 } 500 }
504 sg_init_table(write_sg, cmd->t_data_nents); 501 write_sg = write_tbl.sgl;
505 /* 502 /*
506 * Setup verify and write data payloads from total NumberLBAs. 503 * Setup verify and write data payloads from total NumberLBAs.
507 */ 504 */
@@ -597,7 +594,7 @@ out:
597 * sbc_compare_and_write() before the original READ I/O submission. 594 * sbc_compare_and_write() before the original READ I/O submission.
598 */ 595 */
599 up(&dev->caw_sem); 596 up(&dev->caw_sem);
600 kfree(write_sg); 597 sg_free_table(&write_tbl);
601 kfree(buf); 598 kfree(buf);
602 return ret; 599 return ret;
603} 600}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 86c0156e6c88..4cf33e2cc705 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1778,7 +1778,7 @@ EXPORT_SYMBOL(target_submit_tmr);
1778void transport_generic_request_failure(struct se_cmd *cmd, 1778void transport_generic_request_failure(struct se_cmd *cmd,
1779 sense_reason_t sense_reason) 1779 sense_reason_t sense_reason)
1780{ 1780{
1781 int ret = 0, post_ret = 0; 1781 int ret = 0;
1782 1782
1783 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", 1783 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
1784 sense_reason); 1784 sense_reason);
@@ -1789,13 +1789,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1789 */ 1789 */
1790 transport_complete_task_attr(cmd); 1790 transport_complete_task_attr(cmd);
1791 1791
1792 /* 1792 if (cmd->transport_complete_callback)
1793 * Handle special case for COMPARE_AND_WRITE failure, where the 1793 cmd->transport_complete_callback(cmd, false, NULL);
1794 * callback is expected to drop the per device ->caw_sem.
1795 */
1796 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1797 cmd->transport_complete_callback)
1798 cmd->transport_complete_callback(cmd, false, &post_ret);
1799 1794
1800 if (transport_check_aborted_status(cmd, 1)) 1795 if (transport_check_aborted_status(cmd, 1))
1801 return; 1796 return;
@@ -2012,7 +2007,7 @@ void target_execute_cmd(struct se_cmd *cmd)
2012 * Determine if frontend context caller is requesting the stopping of 2007 * Determine if frontend context caller is requesting the stopping of
2013 * this command for frontend exceptions. 2008 * this command for frontend exceptions.
2014 * 2009 *
2015 * If the received CDB has aleady been aborted stop processing it here. 2010 * If the received CDB has already been aborted stop processing it here.
2016 */ 2011 */
2017 spin_lock_irq(&cmd->t_state_lock); 2012 spin_lock_irq(&cmd->t_state_lock);
2018 if (__transport_check_aborted_status(cmd, 1)) { 2013 if (__transport_check_aborted_status(cmd, 1)) {
@@ -2516,7 +2511,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
2516 } 2511 }
2517 2512
2518 /* 2513 /*
2519 * Determine is the TCM fabric module has already allocated physical 2514 * Determine if the TCM fabric module has already allocated physical
2520 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2515 * memory, and is directly calling transport_generic_map_mem_to_cmd()
2521 * beforehand. 2516 * beforehand.
2522 */ 2517 */
@@ -2754,7 +2749,7 @@ static void target_release_cmd_kref(struct kref *kref)
2754 if (se_sess) { 2749 if (se_sess) {
2755 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2750 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2756 list_del_init(&se_cmd->se_cmd_list); 2751 list_del_init(&se_cmd->se_cmd_list);
2757 if (list_empty(&se_sess->sess_cmd_list)) 2752 if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
2758 wake_up(&se_sess->cmd_list_wq); 2753 wake_up(&se_sess->cmd_list_wq);
2759 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2754 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2760 } 2755 }
@@ -2907,7 +2902,7 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
2907 2902
2908 spin_lock_irq(&se_sess->sess_cmd_lock); 2903 spin_lock_irq(&se_sess->sess_cmd_lock);
2909 do { 2904 do {
2910 ret = wait_event_interruptible_lock_irq_timeout( 2905 ret = wait_event_lock_irq_timeout(
2911 se_sess->cmd_list_wq, 2906 se_sess->cmd_list_wq,
2912 list_empty(&se_sess->sess_cmd_list), 2907 list_empty(&se_sess->sess_cmd_list),
2913 se_sess->sess_cmd_lock, 180 * HZ); 2908 se_sess->sess_cmd_lock, 180 * HZ);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 2718a933c0c6..70adcfdca8d1 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -391,7 +391,6 @@ out:
391struct xcopy_pt_cmd { 391struct xcopy_pt_cmd {
392 bool remote_port; 392 bool remote_port;
393 struct se_cmd se_cmd; 393 struct se_cmd se_cmd;
394 struct xcopy_op *xcopy_op;
395 struct completion xpt_passthrough_sem; 394 struct completion xpt_passthrough_sem;
396 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; 395 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
397}; 396};
@@ -596,8 +595,6 @@ static int target_xcopy_setup_pt_cmd(
596 * X-COPY PUSH or X-COPY PULL based upon where the CDB was received. 595 * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
597 */ 596 */
598 target_xcopy_init_pt_lun(se_dev, cmd, remote_port); 597 target_xcopy_init_pt_lun(se_dev, cmd, remote_port);
599
600 xpt_cmd->xcopy_op = xop;
601 target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port); 598 target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
602 599
603 cmd->tag = 0; 600 cmd->tag = 0;
diff --git a/include/linux/wait.h b/include/linux/wait.h
index d9f131ecf708..ed7c122cb31f 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -1052,10 +1052,9 @@ do { \
1052 __ret; \ 1052 __ret; \
1053}) 1053})
1054 1054
1055#define __wait_event_interruptible_lock_irq_timeout(wq_head, condition, \ 1055#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
1056 lock, timeout) \
1057 ___wait_event(wq_head, ___wait_cond_timeout(condition), \ 1056 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
1058 TASK_INTERRUPTIBLE, 0, timeout, \ 1057 state, 0, timeout, \
1059 spin_unlock_irq(&lock); \ 1058 spin_unlock_irq(&lock); \
1060 __ret = schedule_timeout(__ret); \ 1059 __ret = schedule_timeout(__ret); \
1061 spin_lock_irq(&lock)); 1060 spin_lock_irq(&lock));
@@ -1089,8 +1088,19 @@ do { \
1089({ \ 1088({ \
1090 long __ret = timeout; \ 1089 long __ret = timeout; \
1091 if (!___wait_cond_timeout(condition)) \ 1090 if (!___wait_cond_timeout(condition)) \
1092 __ret = __wait_event_interruptible_lock_irq_timeout( \ 1091 __ret = __wait_event_lock_irq_timeout( \
1093 wq_head, condition, lock, timeout); \ 1092 wq_head, condition, lock, timeout, \
1093 TASK_INTERRUPTIBLE); \
1094 __ret; \
1095})
1096
1097#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
1098({ \
1099 long __ret = timeout; \
1100 if (!___wait_cond_timeout(condition)) \
1101 __ret = __wait_event_lock_irq_timeout( \
1102 wq_head, condition, lock, timeout, \
1103 TASK_UNINTERRUPTIBLE); \
1094 __ret; \ 1104 __ret; \
1095}) 1105})
1096 1106
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index f2e6abea8490..24c398f4a68f 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -25,6 +25,7 @@ struct sock;
25#define ISCSIT_TCP_BACKLOG 256 25#define ISCSIT_TCP_BACKLOG 256
26#define ISCSI_RX_THREAD_NAME "iscsi_trx" 26#define ISCSI_RX_THREAD_NAME "iscsi_trx"
27#define ISCSI_TX_THREAD_NAME "iscsi_ttx" 27#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
28#define ISCSI_IQN_LEN 224
28 29
29/* struct iscsi_node_attrib sanity values */ 30/* struct iscsi_node_attrib sanity values */
30#define NA_DATAOUT_TIMEOUT 3 31#define NA_DATAOUT_TIMEOUT 3
@@ -270,9 +271,9 @@ struct iscsi_conn_ops {
270}; 271};
271 272
272struct iscsi_sess_ops { 273struct iscsi_sess_ops {
273 char InitiatorName[224]; 274 char InitiatorName[ISCSI_IQN_LEN];
274 char InitiatorAlias[256]; 275 char InitiatorAlias[256];
275 char TargetName[224]; 276 char TargetName[ISCSI_IQN_LEN];
276 char TargetAlias[256]; 277 char TargetAlias[256];
277 char TargetAddress[256]; 278 char TargetAddress[256];
278 u16 TargetPortalGroupTag; /* [0..65535] */ 279 u16 TargetPortalGroupTag; /* [0..65535] */
@@ -855,7 +856,6 @@ struct iscsi_wwn_stat_grps {
855}; 856};
856 857
857struct iscsi_tiqn { 858struct iscsi_tiqn {
858#define ISCSI_IQN_LEN 224
859 unsigned char tiqn[ISCSI_IQN_LEN]; 859 unsigned char tiqn[ISCSI_IQN_LEN];
860 enum tiqn_state_table tiqn_state; 860 enum tiqn_state_table tiqn_state;
861 int tiqn_access_count; 861 int tiqn_access_count;
diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
index 4d75a2c426ca..ff6a47209313 100644
--- a/include/target/iscsi/iscsi_target_stat.h
+++ b/include/target/iscsi/iscsi_target_stat.h
@@ -33,7 +33,7 @@ struct iscsi_sess_err_stats {
33 u32 cxn_timeout_errors; 33 u32 cxn_timeout_errors;
34 u32 pdu_format_errors; 34 u32 pdu_format_errors;
35 u32 last_sess_failure_type; 35 u32 last_sess_failure_type;
36 char last_sess_fail_rem_name[224]; 36 char last_sess_fail_rem_name[ISCSI_IQN_LEN];
37} ____cacheline_aligned; 37} ____cacheline_aligned;
38 38
39/* iSCSI login failure types (sub oids) */ 39/* iSCSI login failure types (sub oids) */
@@ -56,7 +56,7 @@ struct iscsi_login_stats {
56 u32 last_fail_type; 56 u32 last_fail_type;
57 int last_intr_fail_ip_family; 57 int last_intr_fail_ip_family;
58 struct sockaddr_storage last_intr_fail_sockaddr; 58 struct sockaddr_storage last_intr_fail_sockaddr;
59 char last_intr_fail_name[224]; 59 char last_intr_fail_name[ISCSI_IQN_LEN];
60} ____cacheline_aligned; 60} ____cacheline_aligned;
61 61
62/* iSCSI logout stats */ 62/* iSCSI logout stats */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7a4ee7852ca4..e3bdb0550a59 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -138,7 +138,6 @@ enum se_cmd_flags_table {
138 SCF_ALUA_NON_OPTIMIZED = 0x00008000, 138 SCF_ALUA_NON_OPTIMIZED = 0x00008000,
139 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, 139 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
140 SCF_COMPARE_AND_WRITE = 0x00080000, 140 SCF_COMPARE_AND_WRITE = 0x00080000,
141 SCF_COMPARE_AND_WRITE_POST = 0x00100000,
142 SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000, 141 SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
143 SCF_ACK_KREF = 0x00400000, 142 SCF_ACK_KREF = 0x00400000,
144 SCF_USE_CPUID = 0x00800000, 143 SCF_USE_CPUID = 0x00800000,
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
new file mode 100644
index 000000000000..17c7abd0803a
--- /dev/null
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -0,0 +1,106 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * UFS Transport SGIO v4 BSG Message Support
4 *
5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 * Copyright (C) 2018 Western Digital Corporation
7 */
8#ifndef SCSI_BSG_UFS_H
9#define SCSI_BSG_UFS_H
10
11#include <linux/types.h>
12/*
13 * This file intended to be included by both kernel and user space
14 */
15
16#define UFS_CDB_SIZE 16
17#define UPIU_TRANSACTION_UIC_CMD 0x1F
18/* uic commands are 4DW long, per UFSHCI V2.1 paragraph 5.6.1 */
19#define UIC_CMD_SIZE (sizeof(__u32) * 4)
20
21/**
22 * struct utp_upiu_header - UPIU header structure
23 * @dword_0: UPIU header DW-0
24 * @dword_1: UPIU header DW-1
25 * @dword_2: UPIU header DW-2
26 */
27struct utp_upiu_header {
28 __be32 dword_0;
29 __be32 dword_1;
30 __be32 dword_2;
31};
32
33/**
34 * struct utp_upiu_query - upiu request buffer structure for
35 * query request.
36 * @opcode: command to perform B-0
37 * @idn: a value that indicates the particular type of data B-1
38 * @index: Index to further identify data B-2
39 * @selector: Index to further identify data B-3
40 * @reserved_osf: spec reserved field B-4,5
41 * @length: number of descriptor bytes to read/write B-6,7
42 * @value: Attribute value to be written DW-5
43 * @reserved: spec reserved DW-6,7
44 */
45struct utp_upiu_query {
46 __u8 opcode;
47 __u8 idn;
48 __u8 index;
49 __u8 selector;
50 __be16 reserved_osf;
51 __be16 length;
52 __be32 value;
53 __be32 reserved[2];
54};
55
56/**
57 * struct utp_upiu_cmd - Command UPIU structure
58 * @data_transfer_len: Data Transfer Length DW-3
59 * @cdb: Command Descriptor Block CDB DW-4 to DW-7
60 */
61struct utp_upiu_cmd {
62 __be32 exp_data_transfer_len;
63 __u8 cdb[UFS_CDB_SIZE];
64};
65
66/**
67 * struct utp_upiu_req - general upiu request structure
68 * @header:UPIU header structure DW-0 to DW-2
69 * @sc: fields structure for scsi command DW-3 to DW-7
70 * @qr: fields structure for query request DW-3 to DW-7
71 */
72struct utp_upiu_req {
73 struct utp_upiu_header header;
74 union {
75 struct utp_upiu_cmd sc;
76 struct utp_upiu_query qr;
77 struct utp_upiu_query tr;
78 /* use utp_upiu_query to host the 4 dwords of uic command */
79 struct utp_upiu_query uc;
80 };
81};
82
83/* request (CDB) structure of the sg_io_v4 */
84struct ufs_bsg_request {
85 __u32 msgcode;
86 struct utp_upiu_req upiu_req;
87};
88
89/* response (request sense data) structure of the sg_io_v4 */
90struct ufs_bsg_reply {
91 /*
92 * The completion result. Result exists in two forms:
93 * if negative, it is an -Exxx system errno value. There will
94 * be no further reply information supplied.
95 * else, it's the 4-byte scsi error result, with driver, host,
96 * msg and status fields. The per-msgcode reply structure
97 * will contain valid data.
98 */
99 __u32 result;
100
101 /* If there was reply_payload, how much was received? */
102 __u32 reply_payload_rcv_len;
103
104 struct utp_upiu_req upiu_rsp;
105};
106#endif /* UFS_BSG_H */