summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 17:48:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 17:48:06 -0500
commit938edb8a31b976c9a92eb0cd4ff481e93f76c1f1 (patch)
tree0854d5f6859d51032f1d853eaa8ab0e8647fb0cb
parentaf7ddd8a627c62a835524b3f5b471edbbbcce025 (diff)
parentda7903092b880b25971ca9103cb0b934a44ace2b (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly update of the usual drivers: smarpqi, lpfc, qedi, megaraid_sas, libsas, zfcp, mpt3sas, hisi_sas. Additionally, we have a pile of annotation, unused variable and minor updates. The big API change is the updates for Christoph's DMA rework which include removing the DISABLE_CLUSTERING flag. And finally there are a couple of target tree updates" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (259 commits) scsi: isci: request: mark expected switch fall-through scsi: isci: remote_node_context: mark expected switch fall-throughs scsi: isci: remote_device: Mark expected switch fall-throughs scsi: isci: phy: Mark expected switch fall-through scsi: iscsi: Capture iscsi debug messages using tracepoints scsi: myrb: Mark expected switch fall-throughs scsi: megaraid: fix out-of-bound array accesses scsi: mpt3sas: mpt3sas_scsih: Mark expected switch fall-through scsi: fcoe: remove set but not used variable 'port' scsi: smartpqi: call pqi_free_interrupts() in pqi_shutdown() scsi: smartpqi: fix build warnings scsi: smartpqi: update driver version scsi: smartpqi: add ofa support scsi: smartpqi: increase fw status register read timeout scsi: smartpqi: bump driver version scsi: smartpqi: add smp_utils support scsi: smartpqi: correct lun reset issues scsi: smartpqi: correct volume status scsi: smartpqi: do not offline disks for transient did no connect conditions scsi: smartpqi: allow for larger raid maps ...
-rw-r--r--Documentation/devicetree/bindings/ufs/cdns,ufshc.txt31
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt6
-rw-r--r--Documentation/scsi/scsi_mid_low_api.txt2
-rw-r--r--arch/ia64/hp/sim/simscsi.c2
-rw-r--r--block/blk-merge.c18
-rw-r--r--block/blk-settings.c3
-rw-r--r--block/blk-sysfs.c5
-rw-r--r--drivers/firewire/sbp2.c1
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c1
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c8
-rw-r--r--drivers/message/fusion/mptfc.c1
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/message/fusion/mptspi.c1
-rw-r--r--drivers/s390/scsi/zfcp_aux.c82
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c25
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h14
-rw-r--r--drivers/s390/scsi/zfcp_def.h113
-rw-r--r--drivers/s390/scsi/zfcp_erp.c345
-rw-r--r--drivers/s390/scsi/zfcp_ext.h9
-rw-r--r--drivers/s390/scsi/zfcp_fc.c48
-rw-r--r--drivers/s390/scsi/zfcp_fc.h21
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c51
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h4
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c11
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h9
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h2
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c15
-rw-r--r--drivers/scsi/3w-9xxx.c1
-rw-r--r--drivers/scsi/3w-sas.c1
-rw-r--r--drivers/scsi/3w-xxxx.c3
-rw-r--r--drivers/scsi/53c700.c1
-rw-r--r--drivers/scsi/BusLogic.c2
-rw-r--r--drivers/scsi/a100u2w.c1
-rw-r--r--drivers/scsi/a2091.c2
-rw-r--r--drivers/scsi/a3000.c1
-rw-r--r--drivers/scsi/aacraid/aachba.c5
-rw-r--r--drivers/scsi/aacraid/aacraid.h5
-rw-r--r--drivers/scsi/aacraid/commctrl.c5
-rw-r--r--drivers/scsi/aacraid/commsup.c19
-rw-r--r--drivers/scsi/aacraid/dpcsup.c19
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/advansys.c12
-rw-r--r--drivers/scsi/aha152x.c2
-rw-r--r--drivers/scsi/aha1542.c127
-rw-r--r--drivers/scsi/aha1740.c1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c1
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c9
-rw-r--r--drivers/scsi/arm/acornscsi.c2
-rw-r--r--drivers/scsi/arm/arxescsi.c2
-rw-r--r--drivers/scsi/arm/cumana_1.c2
-rw-r--r--drivers/scsi/arm/cumana_2.c1
-rw-r--r--drivers/scsi/arm/eesox.c1
-rw-r--r--drivers/scsi/arm/oak.c2
-rw-r--r--drivers/scsi/arm/powertec.c1
-rw-r--r--drivers/scsi/atari_scsi.c2
-rw-r--r--drivers/scsi/atp870u.c1
-rw-r--r--drivers/scsi/be2iscsi/be_main.c9
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c4
-rw-r--r--drivers/scsi/bfa/bfad.c18
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c1
-rw-r--r--drivers/scsi/csiostor/csio_init.c3
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kconfig4
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxlflash/main.c1
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/dpt_i2o.c13
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c49
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c1
-rw-r--r--drivers/scsi/esp_scsi.c1
-rw-r--r--drivers/scsi/fcoe/fcoe.c5
-rw-r--r--drivers/scsi/fnic/fnic_main.c1
-rw-r--r--drivers/scsi/fnic/fnic_trace.c3
-rw-r--r--drivers/scsi/g_NCR5380.c2
-rw-r--r--drivers/scsi/gdth.c1
-rw-r--r--drivers/scsi/gvp11.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c203
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c25
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c74
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c322
-rw-r--r--drivers/scsi/hosts.c6
-rw-r--r--drivers/scsi/hpsa.c6
-rw-r--r--drivers/scsi/hptiop.c11
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c1
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c8
-rw-r--r--drivers/scsi/imm.c1
-rw-r--r--drivers/scsi/initio.c3
-rw-r--r--drivers/scsi/ipr.c1
-rw-r--r--drivers/scsi/ips.c10
-rw-r--r--drivers/scsi/ips.h9
-rw-r--r--drivers/scsi/isci/init.c20
-rw-r--r--drivers/scsi/isci/phy.c1
-rw-r--r--drivers/scsi/isci/remote_device.c4
-rw-r--r--drivers/scsi/isci/remote_node_context.c4
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c6
-rw-r--r--drivers/scsi/libfc/fc_rport.c3
-rw-r--r--drivers/scsi/libiscsi.c10
-rw-r--r--drivers/scsi/libiscsi_tcp.c4
-rw-r--r--drivers/scsi/libsas/Makefile3
-rw-r--r--drivers/scsi/libsas/sas_ata.c26
-rw-r--r--drivers/scsi/libsas/sas_discover.c33
-rw-r--r--drivers/scsi/libsas/sas_dump.c63
-rw-r--r--drivers/scsi/libsas/sas_dump.h29
-rw-r--r--drivers/scsi/libsas/sas_event.c1
-rw-r--r--drivers/scsi/libsas/sas_expander.c236
-rw-r--r--drivers/scsi/libsas/sas_init.c10
-rw-r--r--drivers/scsi/libsas/sas_internal.h16
-rw-r--r--drivers/scsi/libsas/sas_phy.c8
-rw-r--r--drivers/scsi/libsas/sas_port.c23
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c123
-rw-r--r--drivers/scsi/libsas/sas_task.c10
-rw-r--r--drivers/scsi/lpfc/lpfc.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c230
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c200
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h38
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c261
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c468
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c224
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h80
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c273
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c44
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c81
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c145
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c309
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/mac53c94.c2
-rw-r--r--drivers/scsi/mac_esp.c2
-rw-r--r--drivers/scsi/mac_scsi.c2
-rw-r--r--drivers/scsi/megaraid.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h74
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c447
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c24
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c470
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h26
-rw-r--r--drivers/scsi/mesh.c2
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h17
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h94
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_image.h506
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h2
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h359
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_pci.h11
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_raid.h2
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_sas.h2
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h72
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c145
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h14
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c29
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c21
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c72
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c91
-rw-r--r--drivers/scsi/mvme147.c1
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/mvumi.c3
-rw-r--r--drivers/scsi/myrb.c3
-rw-r--r--drivers/scsi/ncr53c8xx.c1
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c2
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c1
-rw-r--r--drivers/scsi/pmcraid.c117
-rw-r--r--drivers/scsi/ppa.c1
-rw-r--r--drivers/scsi/ps3rom.c1
-rw-r--r--drivers/scsi/qedf/qedf_main.c4
-rw-r--r--drivers/scsi/qedi/qedi.h7
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c1
-rw-r--r--drivers/scsi/qedi/qedi_main.c87
-rw-r--r--drivers/scsi/qedi/qedi_version.h4
-rw-r--r--drivers/scsi/qla1280.c53
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c117
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c64
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c11
-rw-r--r--drivers/scsi/qlogicfas.c2
-rw-r--r--drivers/scsi/qlogicpti.c1
-rw-r--r--drivers/scsi/scsi_debug.c8
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c34
-rw-r--r--drivers/scsi/sgiwd93.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h216
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c1558
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c164
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c15
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h1
-rw-r--r--drivers/scsi/snic/snic_main.c1
-rw-r--r--drivers/scsi/snic/snic_trc.c3
-rw-r--r--drivers/scsi/stex.c18
-rw-r--r--drivers/scsi/storvsc_drv.c1
-rw-r--r--drivers/scsi/sun3_scsi.c2
-rw-r--r--drivers/scsi/sun_esp.c7
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c5
-rw-r--r--drivers/scsi/ufs/Kconfig8
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c148
-rw-r--r--drivers/scsi/ufs/ufs.h18
-rw-r--r--drivers/scsi/ufs/ufshcd.c104
-rw-r--r--drivers/scsi/ufs/ufshcd.h2
-rw-r--r--drivers/scsi/virtio_scsi.c53
-rw-r--r--drivers/scsi/vmw_pvscsi.c1
-rw-r--r--drivers/scsi/wd719x.c136
-rw-r--r--drivers/scsi/wd719x.h1
-rw-r--r--drivers/scsi/xen-scsifront.c3
-rw-r--r--drivers/staging/rts5208/rtsx.c6
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c28
-rw-r--r--drivers/target/loopback/tcm_loop.c10
-rw-r--r--drivers/target/sbp/sbp_target.c8
-rw-r--r--drivers/target/target_core_alua.c6
-rw-r--r--drivers/target/target_core_configfs.c157
-rw-r--r--drivers/target/target_core_device.c111
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_pr.c90
-rw-r--r--drivers/target/target_core_pscsi.c50
-rw-r--r--drivers/target/target_core_spc.c28
-rw-r--r--drivers/target/target_core_stat.c34
-rw-r--r--drivers/target/target_core_tmr.c56
-rw-r--r--drivers/target/target_core_tpg.c23
-rw-r--r--drivers/target/target_core_transport.c416
-rw-r--r--drivers/target/target_core_ua.c4
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/target/target_core_xcopy.c13
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c8
-rw-r--r--drivers/usb/gadget/function/f_tcm.c8
-rw-r--r--drivers/usb/image/microtek.c1
-rw-r--r--drivers/usb/storage/scsiglue.c7
-rw-r--r--drivers/usb/storage/uas.c1
-rw-r--r--drivers/vhost/scsi.c8
-rw-r--r--drivers/xen/xen-scsiback.c8
-rw-r--r--include/linux/blkdev.h6
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/scsi/scsi_host.h20
-rw-r--r--include/target/target_core_base.h25
-rw-r--r--include/target/target_core_fabric.h25
-rw-r--r--include/trace/events/iscsi.h107
269 files changed, 7984 insertions, 3844 deletions
diff --git a/Documentation/devicetree/bindings/ufs/cdns,ufshc.txt b/Documentation/devicetree/bindings/ufs/cdns,ufshc.txt
new file mode 100644
index 000000000000..a04a4989ec7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/ufs/cdns,ufshc.txt
@@ -0,0 +1,31 @@
1* Cadence Universal Flash Storage (UFS) Controller
2
3UFS nodes are defined to describe on-chip UFS host controllers.
4Each UFS controller instance should have its own node.
5Please see the ufshcd-pltfrm.txt for a list of all available properties.
6
7Required properties:
8- compatible : Compatible list, contains the following controller:
9 "cdns,ufshc"
10 complemented with the JEDEC version:
11 "jedec,ufs-2.0"
12
13- reg : Address and length of the UFS register set.
14- interrupts : One interrupt mapping.
15- freq-table-hz : Clock frequency table.
16 See the ufshcd-pltfrm.txt for details.
17- clocks : List of phandle and clock specifier pairs.
18- clock-names : List of clock input name strings sorted in the same
19 order as the clocks property. "core_clk" is mandatory.
20 Depending on a type of a PHY,
21 the "phy_clk" clock can also be added, if needed.
22
23Example:
24 ufs@fd030000 {
25 compatible = "cdns,ufshc", "jedec,ufs-2.0";
26 reg = <0xfd030000 0x10000>;
27 interrupts = <0 1 IRQ_TYPE_LEVEL_HIGH>;
28 freq-table-hz = <0 0>, <0 0>;
29 clocks = <&ufs_core_clk>, <&ufs_phy_clk>;
30 clock-names = "core_clk", "phy_clk";
31 };
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index 2df00524bd21..8cf59452c675 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -33,6 +33,12 @@ Optional properties:
33- clocks : List of phandle and clock specifier pairs 33- clocks : List of phandle and clock specifier pairs
34- clock-names : List of clock input name strings sorted in the same 34- clock-names : List of clock input name strings sorted in the same
35 order as the clocks property. 35 order as the clocks property.
36 "ref_clk" indicates reference clock frequency.
37 UFS host supplies reference clock to UFS device and UFS device
38 specification allows host to provide one of the 4 frequencies (19.2 MHz,
39 26 MHz, 38.4 MHz, 52MHz) for reference clock. This "ref_clk" entry is
40 parsed and used to update the reference clock setting in device.
41 Defaults to 26 MHz(as per specification) if not specified by host.
36- freq-table-hz : Array of <min max> operating frequencies stored in the same 42- freq-table-hz : Array of <min max> operating frequencies stored in the same
37 order as the clocks property. If this property is not 43 order as the clocks property. If this property is not
38 defined or a value in the array is "0" then it is assumed 44 defined or a value in the array is "0" then it is assumed
diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt
index 177c031763c0..c1dd4939f4ae 100644
--- a/Documentation/scsi/scsi_mid_low_api.txt
+++ b/Documentation/scsi/scsi_mid_low_api.txt
@@ -1098,8 +1098,6 @@ of interest:
1098 unchecked_isa_dma - 1=>only use bottom 16 MB of ram (ISA DMA addressing 1098 unchecked_isa_dma - 1=>only use bottom 16 MB of ram (ISA DMA addressing
1099 restriction), 0=>can use full 32 bit (or better) DMA 1099 restriction), 0=>can use full 32 bit (or better) DMA
1100 address space 1100 address space
1101 use_clustering - 1=>SCSI commands in mid level's queue can be merged,
1102 0=>disallow SCSI command merging
1103 no_async_abort - 1=>Asynchronous aborts are not supported 1101 no_async_abort - 1=>Asynchronous aborts are not supported
1104 0=>Timed-out commands will be aborted asynchronously 1102 0=>Timed-out commands will be aborted asynchronously
1105 hostt - pointer to driver's struct scsi_host_template from which 1103 hostt - pointer to driver's struct scsi_host_template from which
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index 7e1426e76d96..f86844fc0725 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -347,7 +347,7 @@ static struct scsi_host_template driver_template = {
347 .sg_tablesize = SG_ALL, 347 .sg_tablesize = SG_ALL,
348 .max_sectors = 1024, 348 .max_sectors = 1024,
349 .cmd_per_lun = SIMSCSI_REQ_QUEUE_LEN, 349 .cmd_per_lun = SIMSCSI_REQ_QUEUE_LEN,
350 .use_clustering = DISABLE_CLUSTERING, 350 .dma_boundary = PAGE_SIZE - 1,
351}; 351};
352 352
353static int __init 353static int __init
diff --git a/block/blk-merge.c b/block/blk-merge.c
index e7f1c6cf0167..71e9ac03f621 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -195,7 +195,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
195 goto split; 195 goto split;
196 } 196 }
197 197
198 if (bvprvp && blk_queue_cluster(q)) { 198 if (bvprvp) {
199 if (seg_size + bv.bv_len > queue_max_segment_size(q)) 199 if (seg_size + bv.bv_len > queue_max_segment_size(q))
200 goto new_segment; 200 goto new_segment;
201 if (!biovec_phys_mergeable(q, bvprvp, &bv)) 201 if (!biovec_phys_mergeable(q, bvprvp, &bv))
@@ -295,7 +295,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
295 bool no_sg_merge) 295 bool no_sg_merge)
296{ 296{
297 struct bio_vec bv, bvprv = { NULL }; 297 struct bio_vec bv, bvprv = { NULL };
298 int cluster, prev = 0; 298 int prev = 0;
299 unsigned int seg_size, nr_phys_segs; 299 unsigned int seg_size, nr_phys_segs;
300 struct bio *fbio, *bbio; 300 struct bio *fbio, *bbio;
301 struct bvec_iter iter; 301 struct bvec_iter iter;
@@ -313,7 +313,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
313 } 313 }
314 314
315 fbio = bio; 315 fbio = bio;
316 cluster = blk_queue_cluster(q);
317 seg_size = 0; 316 seg_size = 0;
318 nr_phys_segs = 0; 317 nr_phys_segs = 0;
319 for_each_bio(bio) { 318 for_each_bio(bio) {
@@ -325,7 +324,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
325 if (no_sg_merge) 324 if (no_sg_merge)
326 goto new_segment; 325 goto new_segment;
327 326
328 if (prev && cluster) { 327 if (prev) {
329 if (seg_size + bv.bv_len 328 if (seg_size + bv.bv_len
330 > queue_max_segment_size(q)) 329 > queue_max_segment_size(q))
331 goto new_segment; 330 goto new_segment;
@@ -395,9 +394,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
395{ 394{
396 struct bio_vec end_bv = { NULL }, nxt_bv; 395 struct bio_vec end_bv = { NULL }, nxt_bv;
397 396
398 if (!blk_queue_cluster(q))
399 return 0;
400
401 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > 397 if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
402 queue_max_segment_size(q)) 398 queue_max_segment_size(q))
403 return 0; 399 return 0;
@@ -414,12 +410,12 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
414static inline void 410static inline void
415__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, 411__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
416 struct scatterlist *sglist, struct bio_vec *bvprv, 412 struct scatterlist *sglist, struct bio_vec *bvprv,
417 struct scatterlist **sg, int *nsegs, int *cluster) 413 struct scatterlist **sg, int *nsegs)
418{ 414{
419 415
420 int nbytes = bvec->bv_len; 416 int nbytes = bvec->bv_len;
421 417
422 if (*sg && *cluster) { 418 if (*sg) {
423 if ((*sg)->length + nbytes > queue_max_segment_size(q)) 419 if ((*sg)->length + nbytes > queue_max_segment_size(q))
424 goto new_segment; 420 goto new_segment;
425 if (!biovec_phys_mergeable(q, bvprv, bvec)) 421 if (!biovec_phys_mergeable(q, bvprv, bvec))
@@ -465,12 +461,12 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
465{ 461{
466 struct bio_vec bvec, bvprv = { NULL }; 462 struct bio_vec bvec, bvprv = { NULL };
467 struct bvec_iter iter; 463 struct bvec_iter iter;
468 int cluster = blk_queue_cluster(q), nsegs = 0; 464 int nsegs = 0;
469 465
470 for_each_bio(bio) 466 for_each_bio(bio)
471 bio_for_each_segment(bvec, bio, iter) 467 bio_for_each_segment(bvec, bio, iter)
472 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, 468 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
473 &nsegs, &cluster); 469 &nsegs);
474 470
475 return nsegs; 471 return nsegs;
476} 472}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 3abe831e92c8..3e7038e475ee 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -56,7 +56,6 @@ void blk_set_default_limits(struct queue_limits *lim)
56 lim->alignment_offset = 0; 56 lim->alignment_offset = 0;
57 lim->io_opt = 0; 57 lim->io_opt = 0;
58 lim->misaligned = 0; 58 lim->misaligned = 0;
59 lim->cluster = 1;
60 lim->zoned = BLK_ZONED_NONE; 59 lim->zoned = BLK_ZONED_NONE;
61} 60}
62EXPORT_SYMBOL(blk_set_default_limits); 61EXPORT_SYMBOL(blk_set_default_limits);
@@ -547,8 +546,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
547 t->io_min = max(t->io_min, b->io_min); 546 t->io_min = max(t->io_min, b->io_min);
548 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); 547 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
549 548
550 t->cluster &= b->cluster;
551
552 /* Physical block size a multiple of the logical block size? */ 549 /* Physical block size a multiple of the logical block size? */
553 if (t->physical_block_size & (t->logical_block_size - 1)) { 550 if (t->physical_block_size & (t->logical_block_size - 1)) {
554 t->physical_block_size = t->logical_block_size; 551 t->physical_block_size = t->logical_block_size;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 0619c8922893..590d1ef2f961 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -132,10 +132,7 @@ static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *
132 132
133static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 133static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
134{ 134{
135 if (blk_queue_cluster(q)) 135 return queue_var_show(queue_max_segment_size(q), (page));
136 return queue_var_show(queue_max_segment_size(q), (page));
137
138 return queue_var_show(PAGE_SIZE, (page));
139} 136}
140 137
141static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 138static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 6bac03999fd4..09b845e90114 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1610,7 +1610,6 @@ static struct scsi_host_template scsi_driver_template = {
1610 .eh_abort_handler = sbp2_scsi_abort, 1610 .eh_abort_handler = sbp2_scsi_abort,
1611 .this_id = -1, 1611 .this_id = -1,
1612 .sg_tablesize = SG_ALL, 1612 .sg_tablesize = SG_ALL,
1613 .use_clustering = ENABLE_CLUSTERING,
1614 .can_queue = 1, 1613 .can_queue = 1,
1615 .sdev_attrs = sbp2_scsi_sysfs_attrs, 1614 .sdev_attrs = sbp2_scsi_sysfs_attrs,
1616}; 1615};
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 3fecd87c9f2b..8c707accd148 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -997,7 +997,6 @@ static struct scsi_host_template iscsi_iser_sht = {
997 .eh_device_reset_handler= iscsi_eh_device_reset, 997 .eh_device_reset_handler= iscsi_eh_device_reset,
998 .eh_target_reset_handler = iscsi_eh_recover_target, 998 .eh_target_reset_handler = iscsi_eh_recover_target,
999 .target_alloc = iscsi_target_alloc, 999 .target_alloc = iscsi_target_alloc,
1000 .use_clustering = ENABLE_CLUSTERING,
1001 .slave_alloc = iscsi_iser_slave_alloc, 1000 .slave_alloc = iscsi_iser_slave_alloc,
1002 .proc_name = "iscsi_iser", 1001 .proc_name = "iscsi_iser",
1003 .this_id = -1, 1002 .this_id = -1,
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index eed0eb3bb04c..d27fe970ceba 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -3215,7 +3215,6 @@ static struct scsi_host_template srp_template = {
3215 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE, 3215 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
3216 .this_id = -1, 3216 .this_id = -1,
3217 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, 3217 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
3218 .use_clustering = ENABLE_CLUSTERING,
3219 .shost_attrs = srp_host_attrs, 3218 .shost_attrs = srp_host_attrs,
3220 .track_queue_depth = 1, 3219 .track_queue_depth = 1,
3221}; 3220};
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 2357aa727dcf..41ee1f263bd6 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3147,11 +3147,6 @@ static int srpt_check_false(struct se_portal_group *se_tpg)
3147 return 0; 3147 return 0;
3148} 3148}
3149 3149
3150static char *srpt_get_fabric_name(void)
3151{
3152 return "srpt";
3153}
3154
3155static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg) 3150static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
3156{ 3151{
3157 return tpg->se_tpg_wwn->priv; 3152 return tpg->se_tpg_wwn->priv;
@@ -3678,8 +3673,7 @@ static struct configfs_attribute *srpt_wwn_attrs[] = {
3678 3673
3679static const struct target_core_fabric_ops srpt_template = { 3674static const struct target_core_fabric_ops srpt_template = {
3680 .module = THIS_MODULE, 3675 .module = THIS_MODULE,
3681 .name = "srpt", 3676 .fabric_name = "srpt",
3682 .get_fabric_name = srpt_get_fabric_name,
3683 .tpg_get_wwn = srpt_get_fabric_wwn, 3677 .tpg_get_wwn = srpt_get_fabric_wwn,
3684 .tpg_get_tag = srpt_get_tag, 3678 .tpg_get_tag = srpt_get_tag,
3685 .tpg_check_demo_mode = srpt_check_false, 3679 .tpg_check_demo_mode = srpt_check_false,
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index b15fdc626fb8..4314a3352b96 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -129,7 +129,6 @@ static struct scsi_host_template mptfc_driver_template = {
129 .sg_tablesize = MPT_SCSI_SG_DEPTH, 129 .sg_tablesize = MPT_SCSI_SG_DEPTH,
130 .max_sectors = 8192, 130 .max_sectors = 8192,
131 .cmd_per_lun = 7, 131 .cmd_per_lun = 7,
132 .use_clustering = ENABLE_CLUSTERING,
133 .shost_attrs = mptscsih_host_attrs, 132 .shost_attrs = mptscsih_host_attrs,
134}; 133};
135 134
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 9b404fc69c90..612cb5bc1333 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1992,7 +1992,6 @@ static struct scsi_host_template mptsas_driver_template = {
1992 .sg_tablesize = MPT_SCSI_SG_DEPTH, 1992 .sg_tablesize = MPT_SCSI_SG_DEPTH,
1993 .max_sectors = 8192, 1993 .max_sectors = 8192,
1994 .cmd_per_lun = 7, 1994 .cmd_per_lun = 7,
1995 .use_clustering = ENABLE_CLUSTERING,
1996 .shost_attrs = mptscsih_host_attrs, 1995 .shost_attrs = mptscsih_host_attrs,
1997 .no_write_same = 1, 1996 .no_write_same = 1,
1998}; 1997};
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 9a336a161d9f..7172b0b16bdd 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -848,7 +848,6 @@ static struct scsi_host_template mptspi_driver_template = {
848 .sg_tablesize = MPT_SCSI_SG_DEPTH, 848 .sg_tablesize = MPT_SCSI_SG_DEPTH,
849 .max_sectors = 8192, 849 .max_sectors = 8192,
850 .cmd_per_lun = 7, 850 .cmd_per_lun = 7,
851 .use_clustering = ENABLE_CLUSTERING,
852 .shost_attrs = mptscsih_host_attrs, 851 .shost_attrs = mptscsih_host_attrs,
853}; 852};
854 853
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 94f4d8fe85e0..9cf30d124b9e 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -4,7 +4,7 @@
4 * 4 *
5 * Module interface and handling of zfcp data structures. 5 * Module interface and handling of zfcp data structures.
6 * 6 *
7 * Copyright IBM Corp. 2002, 2013 7 * Copyright IBM Corp. 2002, 2017
8 */ 8 */
9 9
10/* 10/*
@@ -124,6 +124,9 @@ static int __init zfcp_module_init(void)
124{ 124{
125 int retval = -ENOMEM; 125 int retval = -ENOMEM;
126 126
127 if (zfcp_experimental_dix)
128 pr_warn("DIX is enabled. It is experimental and might cause problems\n");
129
127 zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb", 130 zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
128 sizeof(struct fsf_qtcb)); 131 sizeof(struct fsf_qtcb));
129 if (!zfcp_fsf_qtcb_cache) 132 if (!zfcp_fsf_qtcb_cache)
@@ -248,43 +251,36 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
248 251
249static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) 252static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
250{ 253{
251 if (adapter->pool.erp_req) 254 mempool_destroy(adapter->pool.erp_req);
252 mempool_destroy(adapter->pool.erp_req); 255 mempool_destroy(adapter->pool.scsi_req);
253 if (adapter->pool.scsi_req) 256 mempool_destroy(adapter->pool.scsi_abort);
254 mempool_destroy(adapter->pool.scsi_req); 257 mempool_destroy(adapter->pool.qtcb_pool);
255 if (adapter->pool.scsi_abort) 258 mempool_destroy(adapter->pool.status_read_req);
256 mempool_destroy(adapter->pool.scsi_abort); 259 mempool_destroy(adapter->pool.sr_data);
257 if (adapter->pool.qtcb_pool) 260 mempool_destroy(adapter->pool.gid_pn);
258 mempool_destroy(adapter->pool.qtcb_pool);
259 if (adapter->pool.status_read_req)
260 mempool_destroy(adapter->pool.status_read_req);
261 if (adapter->pool.sr_data)
262 mempool_destroy(adapter->pool.sr_data);
263 if (adapter->pool.gid_pn)
264 mempool_destroy(adapter->pool.gid_pn);
265} 261}
266 262
267/** 263/**
268 * zfcp_status_read_refill - refill the long running status_read_requests 264 * zfcp_status_read_refill - refill the long running status_read_requests
269 * @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled 265 * @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
270 * 266 *
271 * Returns: 0 on success, 1 otherwise 267 * Return:
272 * 268 * * 0 on success meaning at least one status read is pending
273 * if there are 16 or more status_read requests missing an adapter_reopen 269 * * 1 if posting failed and not a single status read buffer is pending,
274 * is triggered 270 * also triggers adapter reopen recovery
275 */ 271 */
276int zfcp_status_read_refill(struct zfcp_adapter *adapter) 272int zfcp_status_read_refill(struct zfcp_adapter *adapter)
277{ 273{
278 while (atomic_read(&adapter->stat_miss) > 0) 274 while (atomic_add_unless(&adapter->stat_miss, -1, 0))
279 if (zfcp_fsf_status_read(adapter->qdio)) { 275 if (zfcp_fsf_status_read(adapter->qdio)) {
276 atomic_inc(&adapter->stat_miss); /* undo add -1 */
280 if (atomic_read(&adapter->stat_miss) >= 277 if (atomic_read(&adapter->stat_miss) >=
281 adapter->stat_read_buf_num) { 278 adapter->stat_read_buf_num) {
282 zfcp_erp_adapter_reopen(adapter, 0, "axsref1"); 279 zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
283 return 1; 280 return 1;
284 } 281 }
285 break; 282 break;
286 } else 283 }
287 atomic_dec(&adapter->stat_miss);
288 return 0; 284 return 0;
289} 285}
290 286
@@ -542,45 +538,3 @@ err_out:
542 zfcp_ccw_adapter_put(adapter); 538 zfcp_ccw_adapter_put(adapter);
543 return ERR_PTR(retval); 539 return ERR_PTR(retval);
544} 540}
545
546/**
547 * zfcp_sg_free_table - free memory used by scatterlists
548 * @sg: pointer to scatterlist
549 * @count: number of scatterlist which are to be free'ed
550 * the scatterlist are expected to reference pages always
551 */
552void zfcp_sg_free_table(struct scatterlist *sg, int count)
553{
554 int i;
555
556 for (i = 0; i < count; i++, sg++)
557 if (sg)
558 free_page((unsigned long) sg_virt(sg));
559 else
560 break;
561}
562
563/**
564 * zfcp_sg_setup_table - init scatterlist and allocate, assign buffers
565 * @sg: pointer to struct scatterlist
566 * @count: number of scatterlists which should be assigned with buffers
567 * of size page
568 *
569 * Returns: 0 on success, -ENOMEM otherwise
570 */
571int zfcp_sg_setup_table(struct scatterlist *sg, int count)
572{
573 void *addr;
574 int i;
575
576 sg_init_table(sg, count);
577 for (i = 0; i < count; i++, sg++) {
578 addr = (void *) get_zeroed_page(GFP_KERNEL);
579 if (!addr) {
580 zfcp_sg_free_table(sg, i);
581 return -ENOMEM;
582 }
583 sg_set_buf(sg, addr, PAGE_SIZE);
584 }
585 return 0;
586}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 3b368fcf13f4..dccdb41bed8c 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -63,7 +63,8 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
63 63
64/** 64/**
65 * zfcp_dbf_hba_fsf_res - trace event for fsf responses 65 * zfcp_dbf_hba_fsf_res - trace event for fsf responses
66 * @tag: tag indicating which kind of unsolicited status has been received 66 * @tag: tag indicating which kind of FSF response has been received
67 * @level: trace level to be used for event
67 * @req: request for which a response was received 68 * @req: request for which a response was received
68 */ 69 */
69void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req) 70void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
@@ -81,8 +82,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
81 rec->id = ZFCP_DBF_HBA_RES; 82 rec->id = ZFCP_DBF_HBA_RES;
82 rec->fsf_req_id = req->req_id; 83 rec->fsf_req_id = req->req_id;
83 rec->fsf_req_status = req->status; 84 rec->fsf_req_status = req->status;
84 rec->fsf_cmd = req->fsf_command; 85 rec->fsf_cmd = q_head->fsf_command;
85 rec->fsf_seq_no = req->seq_no; 86 rec->fsf_seq_no = q_pref->req_seq_no;
86 rec->u.res.req_issued = req->issued; 87 rec->u.res.req_issued = req->issued;
87 rec->u.res.prot_status = q_pref->prot_status; 88 rec->u.res.prot_status = q_pref->prot_status;
88 rec->u.res.fsf_status = q_head->fsf_status; 89 rec->u.res.fsf_status = q_head->fsf_status;
@@ -94,7 +95,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
94 memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual, 95 memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
95 FSF_STATUS_QUALIFIER_SIZE); 96 FSF_STATUS_QUALIFIER_SIZE);
96 97
97 if (req->fsf_command != FSF_QTCB_FCP_CMND) { 98 if (q_head->fsf_command != FSF_QTCB_FCP_CMND) {
98 rec->pl_len = q_head->log_length; 99 rec->pl_len = q_head->log_length;
99 zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start, 100 zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
100 rec->pl_len, "fsf_res", req->req_id); 101 rec->pl_len, "fsf_res", req->req_id);
@@ -127,7 +128,7 @@ void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
127 rec->id = ZFCP_DBF_HBA_USS; 128 rec->id = ZFCP_DBF_HBA_USS;
128 rec->fsf_req_id = req->req_id; 129 rec->fsf_req_id = req->req_id;
129 rec->fsf_req_status = req->status; 130 rec->fsf_req_status = req->status;
130 rec->fsf_cmd = req->fsf_command; 131 rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
131 132
132 if (!srb) 133 if (!srb)
133 goto log; 134 goto log;
@@ -153,7 +154,7 @@ log:
153 154
154/** 155/**
155 * zfcp_dbf_hba_bit_err - trace event for bit error conditions 156 * zfcp_dbf_hba_bit_err - trace event for bit error conditions
156 * @tag: tag indicating which kind of unsolicited status has been received 157 * @tag: tag indicating which kind of bit error unsolicited status was received
157 * @req: request which caused the bit_error condition 158 * @req: request which caused the bit_error condition
158 */ 159 */
159void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req) 160void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
@@ -174,7 +175,7 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
174 rec->id = ZFCP_DBF_HBA_BIT; 175 rec->id = ZFCP_DBF_HBA_BIT;
175 rec->fsf_req_id = req->req_id; 176 rec->fsf_req_id = req->req_id;
176 rec->fsf_req_status = req->status; 177 rec->fsf_req_status = req->status;
177 rec->fsf_cmd = req->fsf_command; 178 rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
178 memcpy(&rec->u.be, &sr_buf->payload.bit_error, 179 memcpy(&rec->u.be, &sr_buf->payload.bit_error,
179 sizeof(struct fsf_bit_error_payload)); 180 sizeof(struct fsf_bit_error_payload));
180 181
@@ -224,6 +225,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
224 225
225/** 226/**
226 * zfcp_dbf_hba_basic - trace event for basic adapter events 227 * zfcp_dbf_hba_basic - trace event for basic adapter events
228 * @tag: identifier for event
227 * @adapter: pointer to struct zfcp_adapter 229 * @adapter: pointer to struct zfcp_adapter
228 */ 230 */
229void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter) 231void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
@@ -357,7 +359,7 @@ void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
357 rec->u.run.fsf_req_id = erp->fsf_req_id; 359 rec->u.run.fsf_req_id = erp->fsf_req_id;
358 rec->u.run.rec_status = erp->status; 360 rec->u.run.rec_status = erp->status;
359 rec->u.run.rec_step = erp->step; 361 rec->u.run.rec_step = erp->step;
360 rec->u.run.rec_action = erp->action; 362 rec->u.run.rec_action = erp->type;
361 363
362 if (erp->sdev) 364 if (erp->sdev)
363 rec->u.run.rec_count = 365 rec->u.run.rec_count =
@@ -478,7 +480,8 @@ out:
478/** 480/**
479 * zfcp_dbf_san_req - trace event for issued SAN request 481 * zfcp_dbf_san_req - trace event for issued SAN request
480 * @tag: identifier for event 482 * @tag: identifier for event
481 * @fsf_req: request containing issued CT data 483 * @fsf: request containing issued CT or ELS data
484 * @d_id: N_Port_ID where SAN request is sent to
482 * d_id: destination ID 485 * d_id: destination ID
483 */ 486 */
484void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id) 487void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
@@ -560,7 +563,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
560/** 563/**
561 * zfcp_dbf_san_res - trace event for received SAN request 564 * zfcp_dbf_san_res - trace event for received SAN request
562 * @tag: identifier for event 565 * @tag: identifier for event
563 * @fsf_req: request containing issued CT data 566 * @fsf: request containing received CT or ELS data
564 */ 567 */
565void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf) 568void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
566{ 569{
@@ -580,7 +583,7 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
580/** 583/**
581 * zfcp_dbf_san_in_els - trace event for incoming ELS 584 * zfcp_dbf_san_in_els - trace event for incoming ELS
582 * @tag: identifier for event 585 * @tag: identifier for event
583 * @fsf_req: request containing issued CT data 586 * @fsf: request containing received ELS data
584 */ 587 */
585void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) 588void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
586{ 589{
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index d116c07ed77a..900c779cc39b 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -42,7 +42,8 @@ struct zfcp_dbf_rec_trigger {
42 * @fsf_req_id: request id for fsf requests 42 * @fsf_req_id: request id for fsf requests
43 * @rec_status: status of the fsf request 43 * @rec_status: status of the fsf request
44 * @rec_step: current step of the recovery action 44 * @rec_step: current step of the recovery action
45 * rec_count: recovery counter 45 * @rec_action: ERP action type
46 * @rec_count: recoveries including retries for particular @rec_action
46 */ 47 */
47struct zfcp_dbf_rec_running { 48struct zfcp_dbf_rec_running {
48 u64 fsf_req_id; 49 u64 fsf_req_id;
@@ -72,6 +73,7 @@ enum zfcp_dbf_rec_id {
72 * @adapter_status: current status of the adapter 73 * @adapter_status: current status of the adapter
73 * @port_status: current status of the port 74 * @port_status: current status of the port
74 * @lun_status: current status of the lun 75 * @lun_status: current status of the lun
76 * @u: record type specific data
75 * @u.trig: structure zfcp_dbf_rec_trigger 77 * @u.trig: structure zfcp_dbf_rec_trigger
76 * @u.run: structure zfcp_dbf_rec_running 78 * @u.run: structure zfcp_dbf_rec_running
77 */ 79 */
@@ -126,6 +128,8 @@ struct zfcp_dbf_san {
126 * @prot_status_qual: protocol status qualifier 128 * @prot_status_qual: protocol status qualifier
127 * @fsf_status: fsf status 129 * @fsf_status: fsf status
128 * @fsf_status_qual: fsf status qualifier 130 * @fsf_status_qual: fsf status qualifier
131 * @port_handle: handle for port
132 * @lun_handle: handle for LUN
129 */ 133 */
130struct zfcp_dbf_hba_res { 134struct zfcp_dbf_hba_res {
131 u64 req_issued; 135 u64 req_issued;
@@ -158,6 +162,7 @@ struct zfcp_dbf_hba_uss {
158 * @ZFCP_DBF_HBA_RES: response trace record 162 * @ZFCP_DBF_HBA_RES: response trace record
159 * @ZFCP_DBF_HBA_USS: unsolicited status trace record 163 * @ZFCP_DBF_HBA_USS: unsolicited status trace record
160 * @ZFCP_DBF_HBA_BIT: bit error trace record 164 * @ZFCP_DBF_HBA_BIT: bit error trace record
165 * @ZFCP_DBF_HBA_BASIC: basic adapter event, only trace tag, no other data
161 */ 166 */
162enum zfcp_dbf_hba_id { 167enum zfcp_dbf_hba_id {
163 ZFCP_DBF_HBA_RES = 1, 168 ZFCP_DBF_HBA_RES = 1,
@@ -176,6 +181,9 @@ enum zfcp_dbf_hba_id {
176 * @fsf_seq_no: fsf sequence number 181 * @fsf_seq_no: fsf sequence number
177 * @pl_len: length of payload stored as zfcp_dbf_pay 182 * @pl_len: length of payload stored as zfcp_dbf_pay
178 * @u: record type specific data 183 * @u: record type specific data
184 * @u.res: data for fsf responses
185 * @u.uss: data for unsolicited status buffer
186 * @u.be: data for bit error unsolicited status buffer
179 */ 187 */
180struct zfcp_dbf_hba { 188struct zfcp_dbf_hba {
181 u8 id; 189 u8 id;
@@ -339,8 +347,8 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
339 zfcp_dbf_hba_fsf_resp_suppress(req) 347 zfcp_dbf_hba_fsf_resp_suppress(req)
340 ? 5 : 1, req); 348 ? 5 : 1, req);
341 349
342 } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || 350 } else if ((qtcb->header.fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
343 (req->fsf_command == FSF_QTCB_OPEN_LUN)) { 351 (qtcb->header.fsf_command == FSF_QTCB_OPEN_LUN)) {
344 zfcp_dbf_hba_fsf_resp("fs_open", 4, req); 352 zfcp_dbf_hba_fsf_resp("fs_open", 4, req);
345 353
346 } else if (qtcb->header.log_length) { 354 } else if (qtcb->header.log_length) {
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 3396a47721a7..87d2f47a6990 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -4,7 +4,7 @@
4 * 4 *
5 * Global definitions for the zfcp device driver. 5 * Global definitions for the zfcp device driver.
6 * 6 *
7 * Copyright IBM Corp. 2002, 2010 7 * Copyright IBM Corp. 2002, 2017
8 */ 8 */
9 9
10#ifndef ZFCP_DEF_H 10#ifndef ZFCP_DEF_H
@@ -41,24 +41,16 @@
41#include "zfcp_fc.h" 41#include "zfcp_fc.h"
42#include "zfcp_qdio.h" 42#include "zfcp_qdio.h"
43 43
44struct zfcp_reqlist;
45
46/********************* SCSI SPECIFIC DEFINES *********************************/
47#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
48
49/********************* FSF SPECIFIC DEFINES *********************************/ 44/********************* FSF SPECIFIC DEFINES *********************************/
50 45
51/* ATTENTION: value must not be used by hardware */ 46/* ATTENTION: value must not be used by hardware */
52#define FSF_QTCB_UNSOLICITED_STATUS 0x6305 47#define FSF_QTCB_UNSOLICITED_STATUS 0x6305
53 48
54/* timeout value for "default timer" for fsf requests */
55#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
56
57/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/ 49/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
58 50
59/* 51/*
60 * Note, the leftmost status byte is common among adapter, port 52 * Note, the leftmost 12 status bits (3 nibbles) are common among adapter, port
61 * and unit 53 * and unit. This is a mask for bitwise 'and' with status values.
62 */ 54 */
63#define ZFCP_COMMON_FLAGS 0xfff00000 55#define ZFCP_COMMON_FLAGS 0xfff00000
64 56
@@ -97,49 +89,60 @@ struct zfcp_reqlist;
97 89
98/************************* STRUCTURE DEFINITIONS *****************************/ 90/************************* STRUCTURE DEFINITIONS *****************************/
99 91
100struct zfcp_fsf_req; 92/**
93 * enum zfcp_erp_act_type - Type of ERP action object.
94 * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
95 * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
96 * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
97 * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
98 *
99 * Values must fit into u8 because of code dependencies:
100 * zfcp_dbf_rec_trig(), &zfcp_dbf_rec_trigger.want, &zfcp_dbf_rec_trigger.need;
101 * zfcp_dbf_rec_run_lvl(), zfcp_dbf_rec_run(), &zfcp_dbf_rec_running.rec_action.
102 */
103enum zfcp_erp_act_type {
104 ZFCP_ERP_ACTION_REOPEN_LUN = 1,
105 ZFCP_ERP_ACTION_REOPEN_PORT = 2,
106 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
107 ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
108};
101 109
102/* holds various memory pools of an adapter */ 110/*
103struct zfcp_adapter_mempool { 111 * Values must fit into u16 because of code dependencies:
104 mempool_t *erp_req; 112 * zfcp_dbf_rec_run_lvl(), zfcp_dbf_rec_run(), zfcp_dbf_rec_run_wka(),
105 mempool_t *gid_pn_req; 113 * &zfcp_dbf_rec_running.rec_step.
106 mempool_t *scsi_req; 114 */
107 mempool_t *scsi_abort; 115enum zfcp_erp_steps {
108 mempool_t *status_read_req; 116 ZFCP_ERP_STEP_UNINITIALIZED = 0x0000,
109 mempool_t *sr_data; 117 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
110 mempool_t *gid_pn; 118 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
111 mempool_t *qtcb_pool; 119 ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
120 ZFCP_ERP_STEP_LUN_CLOSING = 0x1000,
121 ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
112}; 122};
113 123
114struct zfcp_erp_action { 124struct zfcp_erp_action {
115 struct list_head list; 125 struct list_head list;
116 int action; /* requested action code */ 126 enum zfcp_erp_act_type type; /* requested action code */
117 struct zfcp_adapter *adapter; /* device which should be recovered */ 127 struct zfcp_adapter *adapter; /* device which should be recovered */
118 struct zfcp_port *port; 128 struct zfcp_port *port;
119 struct scsi_device *sdev; 129 struct scsi_device *sdev;
120 u32 status; /* recovery status */ 130 u32 status; /* recovery status */
121 u32 step; /* active step of this erp action */ 131 enum zfcp_erp_steps step; /* active step of this erp action */
122 unsigned long fsf_req_id; 132 unsigned long fsf_req_id;
123 struct timer_list timer; 133 struct timer_list timer;
124}; 134};
125 135
126struct fsf_latency_record { 136/* holds various memory pools of an adapter */
127 u32 min; 137struct zfcp_adapter_mempool {
128 u32 max; 138 mempool_t *erp_req;
129 u64 sum; 139 mempool_t *gid_pn_req;
130}; 140 mempool_t *scsi_req;
131 141 mempool_t *scsi_abort;
132struct latency_cont { 142 mempool_t *status_read_req;
133 struct fsf_latency_record channel; 143 mempool_t *sr_data;
134 struct fsf_latency_record fabric; 144 mempool_t *gid_pn;
135 u64 counter; 145 mempool_t *qtcb_pool;
136};
137
138struct zfcp_latencies {
139 struct latency_cont read;
140 struct latency_cont write;
141 struct latency_cont cmd;
142 spinlock_t lock;
143}; 146};
144 147
145struct zfcp_adapter { 148struct zfcp_adapter {
@@ -220,6 +223,25 @@ struct zfcp_port {
220 unsigned int starget_id; 223 unsigned int starget_id;
221}; 224};
222 225
226struct zfcp_latency_record {
227 u32 min;
228 u32 max;
229 u64 sum;
230};
231
232struct zfcp_latency_cont {
233 struct zfcp_latency_record channel;
234 struct zfcp_latency_record fabric;
235 u64 counter;
236};
237
238struct zfcp_latencies {
239 struct zfcp_latency_cont read;
240 struct zfcp_latency_cont write;
241 struct zfcp_latency_cont cmd;
242 spinlock_t lock;
243};
244
223/** 245/**
224 * struct zfcp_unit - LUN configured via zfcp sysfs 246 * struct zfcp_unit - LUN configured via zfcp sysfs
225 * @dev: struct device for sysfs representation and reference counting 247 * @dev: struct device for sysfs representation and reference counting
@@ -287,9 +309,7 @@ static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev)
287 * @qdio_req: qdio queue related values 309 * @qdio_req: qdio queue related values
288 * @completion: used to signal the completion of the request 310 * @completion: used to signal the completion of the request
289 * @status: status of the request 311 * @status: status of the request
290 * @fsf_command: FSF command issued
291 * @qtcb: associated QTCB 312 * @qtcb: associated QTCB
292 * @seq_no: sequence number of this request
293 * @data: private data 313 * @data: private data
294 * @timer: timer data of this request 314 * @timer: timer data of this request
295 * @erp_action: reference to erp action if request issued on behalf of ERP 315 * @erp_action: reference to erp action if request issued on behalf of ERP
@@ -304,9 +324,7 @@ struct zfcp_fsf_req {
304 struct zfcp_qdio_req qdio_req; 324 struct zfcp_qdio_req qdio_req;
305 struct completion completion; 325 struct completion completion;
306 u32 status; 326 u32 status;
307 u32 fsf_command;
308 struct fsf_qtcb *qtcb; 327 struct fsf_qtcb *qtcb;
309 u32 seq_no;
310 void *data; 328 void *data;
311 struct timer_list timer; 329 struct timer_list timer;
312 struct zfcp_erp_action *erp_action; 330 struct zfcp_erp_action *erp_action;
@@ -321,4 +339,9 @@ int zfcp_adapter_multi_buffer_active(struct zfcp_adapter *adapter)
321 return atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_MB_ACT; 339 return atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_MB_ACT;
322} 340}
323 341
342static inline bool zfcp_fsf_req_is_status_read_buffer(struct zfcp_fsf_req *req)
343{
344 return req->qtcb == NULL;
345}
346
324#endif /* ZFCP_DEF_H */ 347#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index e7e6b63905e2..744a64680d5b 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -4,7 +4,7 @@
4 * 4 *
5 * Error Recovery Procedures (ERP). 5 * Error Recovery Procedures (ERP).
6 * 6 *
7 * Copyright IBM Corp. 2002, 2016 7 * Copyright IBM Corp. 2002, 2017
8 */ 8 */
9 9
10#define KMSG_COMPONENT "zfcp" 10#define KMSG_COMPONENT "zfcp"
@@ -24,38 +24,18 @@ enum zfcp_erp_act_flags {
24 ZFCP_STATUS_ERP_NO_REF = 0x00800000, 24 ZFCP_STATUS_ERP_NO_REF = 0x00800000,
25}; 25};
26 26
27enum zfcp_erp_steps { 27/*
28 ZFCP_ERP_STEP_UNINITIALIZED = 0x0000, 28 * Eyecatcher pseudo flag to bitwise or-combine with enum zfcp_erp_act_type.
29 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, 29 * Used to indicate that an ERP action could not be set up despite a detected
30 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, 30 * need for some recovery.
31 ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
32 ZFCP_ERP_STEP_LUN_CLOSING = 0x1000,
33 ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
34};
35
36/**
37 * enum zfcp_erp_act_type - Type of ERP action object.
38 * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
39 * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
40 * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
41 * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
42 * @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with
43 * either of the first four enum values.
44 * Used to indicate that an ERP action could not be
45 * set up despite a detected need for some recovery.
46 * @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with
47 * either of the first four enum values.
48 * Used to indicate that ERP not needed because
49 * the object has ZFCP_STATUS_COMMON_ERP_FAILED.
50 */ 31 */
51enum zfcp_erp_act_type { 32#define ZFCP_ERP_ACTION_NONE 0xc0
52 ZFCP_ERP_ACTION_REOPEN_LUN = 1, 33/*
53 ZFCP_ERP_ACTION_REOPEN_PORT = 2, 34 * Eyecatcher pseudo flag to bitwise or-combine with enum zfcp_erp_act_type.
54 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3, 35 * Used to indicate that ERP not needed because the object has
55 ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4, 36 * ZFCP_STATUS_COMMON_ERP_FAILED.
56 ZFCP_ERP_ACTION_NONE = 0xc0, 37 */
57 ZFCP_ERP_ACTION_FAILED = 0xe0, 38#define ZFCP_ERP_ACTION_FAILED 0xe0
58};
59 39
60enum zfcp_erp_act_result { 40enum zfcp_erp_act_result {
61 ZFCP_ERP_SUCCEEDED = 0, 41 ZFCP_ERP_SUCCEEDED = 0,
@@ -136,11 +116,11 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
136 } 116 }
137} 117}
138 118
139static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter, 119static enum zfcp_erp_act_type zfcp_erp_handle_failed(
140 struct zfcp_port *port, 120 enum zfcp_erp_act_type want, struct zfcp_adapter *adapter,
141 struct scsi_device *sdev) 121 struct zfcp_port *port, struct scsi_device *sdev)
142{ 122{
143 int need = want; 123 enum zfcp_erp_act_type need = want;
144 struct zfcp_scsi_dev *zsdev; 124 struct zfcp_scsi_dev *zsdev;
145 125
146 switch (want) { 126 switch (want) {
@@ -171,19 +151,17 @@ static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
171 adapter, ZFCP_STATUS_COMMON_ERP_FAILED); 151 adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
172 } 152 }
173 break; 153 break;
174 default:
175 need = 0;
176 break;
177 } 154 }
178 155
179 return need; 156 return need;
180} 157}
181 158
182static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, 159static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
160 struct zfcp_adapter *adapter,
183 struct zfcp_port *port, 161 struct zfcp_port *port,
184 struct scsi_device *sdev) 162 struct scsi_device *sdev)
185{ 163{
186 int need = want; 164 enum zfcp_erp_act_type need = want;
187 int l_status, p_status, a_status; 165 int l_status, p_status, a_status;
188 struct zfcp_scsi_dev *zfcp_sdev; 166 struct zfcp_scsi_dev *zfcp_sdev;
189 167
@@ -230,7 +208,8 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
230 return need; 208 return need;
231} 209}
232 210
233static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, 211static struct zfcp_erp_action *zfcp_erp_setup_act(enum zfcp_erp_act_type need,
212 u32 act_status,
234 struct zfcp_adapter *adapter, 213 struct zfcp_adapter *adapter,
235 struct zfcp_port *port, 214 struct zfcp_port *port,
236 struct scsi_device *sdev) 215 struct scsi_device *sdev)
@@ -278,9 +257,6 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
278 ZFCP_STATUS_COMMON_RUNNING)) 257 ZFCP_STATUS_COMMON_RUNNING))
279 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; 258 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
280 break; 259 break;
281
282 default:
283 return NULL;
284 } 260 }
285 261
286 WARN_ON_ONCE(erp_action->adapter != adapter); 262 WARN_ON_ONCE(erp_action->adapter != adapter);
@@ -288,18 +264,19 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
288 memset(&erp_action->timer, 0, sizeof(erp_action->timer)); 264 memset(&erp_action->timer, 0, sizeof(erp_action->timer));
289 erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED; 265 erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
290 erp_action->fsf_req_id = 0; 266 erp_action->fsf_req_id = 0;
291 erp_action->action = need; 267 erp_action->type = need;
292 erp_action->status = act_status; 268 erp_action->status = act_status;
293 269
294 return erp_action; 270 return erp_action;
295} 271}
296 272
297static void zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, 273static void zfcp_erp_action_enqueue(enum zfcp_erp_act_type want,
274 struct zfcp_adapter *adapter,
298 struct zfcp_port *port, 275 struct zfcp_port *port,
299 struct scsi_device *sdev, 276 struct scsi_device *sdev,
300 char *id, u32 act_status) 277 char *dbftag, u32 act_status)
301{ 278{
302 int need; 279 enum zfcp_erp_act_type need;
303 struct zfcp_erp_action *act; 280 struct zfcp_erp_action *act;
304 281
305 need = zfcp_erp_handle_failed(want, adapter, port, sdev); 282 need = zfcp_erp_handle_failed(want, adapter, port, sdev);
@@ -327,10 +304,11 @@ static void zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
327 list_add_tail(&act->list, &adapter->erp_ready_head); 304 list_add_tail(&act->list, &adapter->erp_ready_head);
328 wake_up(&adapter->erp_ready_wq); 305 wake_up(&adapter->erp_ready_wq);
329 out: 306 out:
330 zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need); 307 zfcp_dbf_rec_trig(dbftag, adapter, port, sdev, want, need);
331} 308}
332 309
333void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter, 310void zfcp_erp_port_forced_no_port_dbf(char *dbftag,
311 struct zfcp_adapter *adapter,
334 u64 port_name, u32 port_id) 312 u64 port_name, u32 port_id)
335{ 313{
336 unsigned long flags; 314 unsigned long flags;
@@ -344,29 +322,30 @@ void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
344 atomic_set(&tmpport.status, -1); /* unknown */ 322 atomic_set(&tmpport.status, -1); /* unknown */
345 tmpport.wwpn = port_name; 323 tmpport.wwpn = port_name;
346 tmpport.d_id = port_id; 324 tmpport.d_id = port_id;
347 zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL, 325 zfcp_dbf_rec_trig(dbftag, adapter, &tmpport, NULL,
348 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, 326 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
349 ZFCP_ERP_ACTION_NONE); 327 ZFCP_ERP_ACTION_NONE);
350 write_unlock_irqrestore(&adapter->erp_lock, flags); 328 write_unlock_irqrestore(&adapter->erp_lock, flags);
351} 329}
352 330
353static void _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, 331static void _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
354 int clear_mask, char *id) 332 int clear_mask, char *dbftag)
355{ 333{
356 zfcp_erp_adapter_block(adapter, clear_mask); 334 zfcp_erp_adapter_block(adapter, clear_mask);
357 zfcp_scsi_schedule_rports_block(adapter); 335 zfcp_scsi_schedule_rports_block(adapter);
358 336
359 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, 337 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
360 adapter, NULL, NULL, id, 0); 338 adapter, NULL, NULL, dbftag, 0);
361} 339}
362 340
363/** 341/**
364 * zfcp_erp_adapter_reopen - Reopen adapter. 342 * zfcp_erp_adapter_reopen - Reopen adapter.
365 * @adapter: Adapter to reopen. 343 * @adapter: Adapter to reopen.
366 * @clear: Status flags to clear. 344 * @clear: Status flags to clear.
367 * @id: Id for debug trace event. 345 * @dbftag: Tag for debug trace event.
368 */ 346 */
369void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id) 347void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
348 char *dbftag)
370{ 349{
371 unsigned long flags; 350 unsigned long flags;
372 351
@@ -375,7 +354,7 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
375 354
376 write_lock_irqsave(&adapter->erp_lock, flags); 355 write_lock_irqsave(&adapter->erp_lock, flags);
377 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, 356 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
378 NULL, NULL, id, 0); 357 NULL, NULL, dbftag, 0);
379 write_unlock_irqrestore(&adapter->erp_lock, flags); 358 write_unlock_irqrestore(&adapter->erp_lock, flags);
380} 359}
381 360
@@ -383,25 +362,25 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
383 * zfcp_erp_adapter_shutdown - Shutdown adapter. 362 * zfcp_erp_adapter_shutdown - Shutdown adapter.
384 * @adapter: Adapter to shut down. 363 * @adapter: Adapter to shut down.
385 * @clear: Status flags to clear. 364 * @clear: Status flags to clear.
386 * @id: Id for debug trace event. 365 * @dbftag: Tag for debug trace event.
387 */ 366 */
388void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear, 367void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
389 char *id) 368 char *dbftag)
390{ 369{
391 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; 370 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
392 zfcp_erp_adapter_reopen(adapter, clear | flags, id); 371 zfcp_erp_adapter_reopen(adapter, clear | flags, dbftag);
393} 372}
394 373
395/** 374/**
396 * zfcp_erp_port_shutdown - Shutdown port 375 * zfcp_erp_port_shutdown - Shutdown port
397 * @port: Port to shut down. 376 * @port: Port to shut down.
398 * @clear: Status flags to clear. 377 * @clear: Status flags to clear.
399 * @id: Id for debug trace event. 378 * @dbftag: Tag for debug trace event.
400 */ 379 */
401void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id) 380void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *dbftag)
402{ 381{
403 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; 382 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
404 zfcp_erp_port_reopen(port, clear | flags, id); 383 zfcp_erp_port_reopen(port, clear | flags, dbftag);
405} 384}
406 385
407static void zfcp_erp_port_block(struct zfcp_port *port, int clear) 386static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
@@ -411,53 +390,55 @@ static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
411} 390}
412 391
413static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, 392static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
414 char *id) 393 char *dbftag)
415{ 394{
416 zfcp_erp_port_block(port, clear); 395 zfcp_erp_port_block(port, clear);
417 zfcp_scsi_schedule_rport_block(port); 396 zfcp_scsi_schedule_rport_block(port);
418 397
419 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, 398 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
420 port->adapter, port, NULL, id, 0); 399 port->adapter, port, NULL, dbftag, 0);
421} 400}
422 401
423/** 402/**
424 * zfcp_erp_port_forced_reopen - Forced close of port and open again 403 * zfcp_erp_port_forced_reopen - Forced close of port and open again
425 * @port: Port to force close and to reopen. 404 * @port: Port to force close and to reopen.
426 * @clear: Status flags to clear. 405 * @clear: Status flags to clear.
427 * @id: Id for debug trace event. 406 * @dbftag: Tag for debug trace event.
428 */ 407 */
429void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id) 408void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
409 char *dbftag)
430{ 410{
431 unsigned long flags; 411 unsigned long flags;
432 struct zfcp_adapter *adapter = port->adapter; 412 struct zfcp_adapter *adapter = port->adapter;
433 413
434 write_lock_irqsave(&adapter->erp_lock, flags); 414 write_lock_irqsave(&adapter->erp_lock, flags);
435 _zfcp_erp_port_forced_reopen(port, clear, id); 415 _zfcp_erp_port_forced_reopen(port, clear, dbftag);
436 write_unlock_irqrestore(&adapter->erp_lock, flags); 416 write_unlock_irqrestore(&adapter->erp_lock, flags);
437} 417}
438 418
439static void _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id) 419static void _zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
420 char *dbftag)
440{ 421{
441 zfcp_erp_port_block(port, clear); 422 zfcp_erp_port_block(port, clear);
442 zfcp_scsi_schedule_rport_block(port); 423 zfcp_scsi_schedule_rport_block(port);
443 424
444 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, 425 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
445 port->adapter, port, NULL, id, 0); 426 port->adapter, port, NULL, dbftag, 0);
446} 427}
447 428
448/** 429/**
449 * zfcp_erp_port_reopen - trigger remote port recovery 430 * zfcp_erp_port_reopen - trigger remote port recovery
450 * @port: port to recover 431 * @port: port to recover
451 * @clear_mask: flags in port status to be cleared 432 * @clear: flags in port status to be cleared
452 * @id: Id for debug trace event. 433 * @dbftag: Tag for debug trace event.
453 */ 434 */
454void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id) 435void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *dbftag)
455{ 436{
456 unsigned long flags; 437 unsigned long flags;
457 struct zfcp_adapter *adapter = port->adapter; 438 struct zfcp_adapter *adapter = port->adapter;
458 439
459 write_lock_irqsave(&adapter->erp_lock, flags); 440 write_lock_irqsave(&adapter->erp_lock, flags);
460 _zfcp_erp_port_reopen(port, clear, id); 441 _zfcp_erp_port_reopen(port, clear, dbftag);
461 write_unlock_irqrestore(&adapter->erp_lock, flags); 442 write_unlock_irqrestore(&adapter->erp_lock, flags);
462} 443}
463 444
@@ -467,8 +448,8 @@ static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
467 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask); 448 ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask);
468} 449}
469 450
470static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, 451static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear,
471 u32 act_status) 452 char *dbftag, u32 act_status)
472{ 453{
473 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 454 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
474 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 455 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
@@ -476,18 +457,18 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
476 zfcp_erp_lun_block(sdev, clear); 457 zfcp_erp_lun_block(sdev, clear);
477 458
478 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter, 459 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
479 zfcp_sdev->port, sdev, id, act_status); 460 zfcp_sdev->port, sdev, dbftag, act_status);
480} 461}
481 462
482/** 463/**
483 * zfcp_erp_lun_reopen - initiate reopen of a LUN 464 * zfcp_erp_lun_reopen - initiate reopen of a LUN
484 * @sdev: SCSI device / LUN to be reopened 465 * @sdev: SCSI device / LUN to be reopened
485 * @clear_mask: specifies flags in LUN status to be cleared 466 * @clear: specifies flags in LUN status to be cleared
486 * @id: Id for debug trace event. 467 * @dbftag: Tag for debug trace event.
487 * 468 *
488 * Return: 0 on success, < 0 on error 469 * Return: 0 on success, < 0 on error
489 */ 470 */
490void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id) 471void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *dbftag)
491{ 472{
492 unsigned long flags; 473 unsigned long flags;
493 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 474 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -495,7 +476,7 @@ void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
495 struct zfcp_adapter *adapter = port->adapter; 476 struct zfcp_adapter *adapter = port->adapter;
496 477
497 write_lock_irqsave(&adapter->erp_lock, flags); 478 write_lock_irqsave(&adapter->erp_lock, flags);
498 _zfcp_erp_lun_reopen(sdev, clear, id, 0); 479 _zfcp_erp_lun_reopen(sdev, clear, dbftag, 0);
499 write_unlock_irqrestore(&adapter->erp_lock, flags); 480 write_unlock_irqrestore(&adapter->erp_lock, flags);
500} 481}
501 482
@@ -503,25 +484,25 @@ void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
503 * zfcp_erp_lun_shutdown - Shutdown LUN 484 * zfcp_erp_lun_shutdown - Shutdown LUN
504 * @sdev: SCSI device / LUN to shut down. 485 * @sdev: SCSI device / LUN to shut down.
505 * @clear: Status flags to clear. 486 * @clear: Status flags to clear.
506 * @id: Id for debug trace event. 487 * @dbftag: Tag for debug trace event.
507 */ 488 */
508void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id) 489void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *dbftag)
509{ 490{
510 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; 491 int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
511 zfcp_erp_lun_reopen(sdev, clear | flags, id); 492 zfcp_erp_lun_reopen(sdev, clear | flags, dbftag);
512} 493}
513 494
514/** 495/**
515 * zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion 496 * zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion
516 * @sdev: SCSI device / LUN to shut down. 497 * @sdev: SCSI device / LUN to shut down.
517 * @id: Id for debug trace event. 498 * @dbftag: Tag for debug trace event.
518 * 499 *
519 * Do not acquire a reference for the LUN when creating the ERP 500 * Do not acquire a reference for the LUN when creating the ERP
520 * action. It is safe, because this function waits for the ERP to 501 * action. It is safe, because this function waits for the ERP to
521 * complete first. This allows to shutdown the LUN, even when the SCSI 502 * complete first. This allows to shutdown the LUN, even when the SCSI
522 * device is in the state SDEV_DEL when scsi_device_get will fail. 503 * device is in the state SDEV_DEL when scsi_device_get will fail.
523 */ 504 */
524void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id) 505void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *dbftag)
525{ 506{
526 unsigned long flags; 507 unsigned long flags;
527 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 508 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -530,7 +511,7 @@ void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
530 int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; 511 int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
531 512
532 write_lock_irqsave(&adapter->erp_lock, flags); 513 write_lock_irqsave(&adapter->erp_lock, flags);
533 _zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF); 514 _zfcp_erp_lun_reopen(sdev, clear, dbftag, ZFCP_STATUS_ERP_NO_REF);
534 write_unlock_irqrestore(&adapter->erp_lock, flags); 515 write_unlock_irqrestore(&adapter->erp_lock, flags);
535 516
536 zfcp_erp_wait(adapter); 517 zfcp_erp_wait(adapter);
@@ -619,7 +600,7 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
619 600
620/** 601/**
621 * zfcp_erp_timeout_handler - Trigger ERP action from timed out ERP request 602 * zfcp_erp_timeout_handler - Trigger ERP action from timed out ERP request
622 * @data: ERP action (from timer data) 603 * @t: timer list entry embedded in zfcp FSF request
623 */ 604 */
624void zfcp_erp_timeout_handler(struct timer_list *t) 605void zfcp_erp_timeout_handler(struct timer_list *t)
625{ 606{
@@ -644,31 +625,31 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
644} 625}
645 626
646static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, 627static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
647 int clear, char *id) 628 int clear, char *dbftag)
648{ 629{
649 struct zfcp_port *port; 630 struct zfcp_port *port;
650 631
651 read_lock(&adapter->port_list_lock); 632 read_lock(&adapter->port_list_lock);
652 list_for_each_entry(port, &adapter->port_list, list) 633 list_for_each_entry(port, &adapter->port_list, list)
653 _zfcp_erp_port_reopen(port, clear, id); 634 _zfcp_erp_port_reopen(port, clear, dbftag);
654 read_unlock(&adapter->port_list_lock); 635 read_unlock(&adapter->port_list_lock);
655} 636}
656 637
657static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear, 638static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
658 char *id) 639 char *dbftag)
659{ 640{
660 struct scsi_device *sdev; 641 struct scsi_device *sdev;
661 642
662 spin_lock(port->adapter->scsi_host->host_lock); 643 spin_lock(port->adapter->scsi_host->host_lock);
663 __shost_for_each_device(sdev, port->adapter->scsi_host) 644 __shost_for_each_device(sdev, port->adapter->scsi_host)
664 if (sdev_to_zfcp(sdev)->port == port) 645 if (sdev_to_zfcp(sdev)->port == port)
665 _zfcp_erp_lun_reopen(sdev, clear, id, 0); 646 _zfcp_erp_lun_reopen(sdev, clear, dbftag, 0);
666 spin_unlock(port->adapter->scsi_host->host_lock); 647 spin_unlock(port->adapter->scsi_host->host_lock);
667} 648}
668 649
669static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) 650static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
670{ 651{
671 switch (act->action) { 652 switch (act->type) {
672 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 653 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
673 _zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1"); 654 _zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1");
674 break; 655 break;
@@ -686,7 +667,7 @@ static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
686 667
687static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act) 668static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
688{ 669{
689 switch (act->action) { 670 switch (act->type) {
690 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 671 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
691 _zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1"); 672 _zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1");
692 break; 673 break;
@@ -696,6 +677,9 @@ static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
696 case ZFCP_ERP_ACTION_REOPEN_PORT: 677 case ZFCP_ERP_ACTION_REOPEN_PORT:
697 _zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3"); 678 _zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3");
698 break; 679 break;
680 case ZFCP_ERP_ACTION_REOPEN_LUN:
681 /* NOP */
682 break;
699 } 683 }
700} 684}
701 685
@@ -723,7 +707,8 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
723 _zfcp_erp_port_reopen(port, 0, "ereptp1"); 707 _zfcp_erp_port_reopen(port, 0, "ereptp1");
724} 708}
725 709
726static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) 710static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf(
711 struct zfcp_erp_action *erp_action)
727{ 712{
728 int retries; 713 int retries;
729 int sleep = 1; 714 int sleep = 1;
@@ -768,7 +753,8 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
768 return ZFCP_ERP_SUCCEEDED; 753 return ZFCP_ERP_SUCCEEDED;
769} 754}
770 755
771static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act) 756static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
757 struct zfcp_erp_action *act)
772{ 758{
773 int ret; 759 int ret;
774 struct zfcp_adapter *adapter = act->adapter; 760 struct zfcp_adapter *adapter = act->adapter;
@@ -793,7 +779,8 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
793 return ZFCP_ERP_SUCCEEDED; 779 return ZFCP_ERP_SUCCEEDED;
794} 780}
795 781
796static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act) 782static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
783 struct zfcp_erp_action *act)
797{ 784{
798 if (zfcp_erp_adapter_strat_fsf_xconf(act) == ZFCP_ERP_FAILED) 785 if (zfcp_erp_adapter_strat_fsf_xconf(act) == ZFCP_ERP_FAILED)
799 return ZFCP_ERP_FAILED; 786 return ZFCP_ERP_FAILED;
@@ -832,7 +819,8 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
832 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 819 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
833} 820}
834 821
835static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act) 822static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open(
823 struct zfcp_erp_action *act)
836{ 824{
837 struct zfcp_adapter *adapter = act->adapter; 825 struct zfcp_adapter *adapter = act->adapter;
838 826
@@ -853,7 +841,8 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
853 return ZFCP_ERP_SUCCEEDED; 841 return ZFCP_ERP_SUCCEEDED;
854} 842}
855 843
856static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act) 844static enum zfcp_erp_act_result zfcp_erp_adapter_strategy(
845 struct zfcp_erp_action *act)
857{ 846{
858 struct zfcp_adapter *adapter = act->adapter; 847 struct zfcp_adapter *adapter = act->adapter;
859 848
@@ -871,7 +860,8 @@ static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
871 return ZFCP_ERP_SUCCEEDED; 860 return ZFCP_ERP_SUCCEEDED;
872} 861}
873 862
874static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act) 863static enum zfcp_erp_act_result zfcp_erp_port_forced_strategy_close(
864 struct zfcp_erp_action *act)
875{ 865{
876 int retval; 866 int retval;
877 867
@@ -885,7 +875,8 @@ static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
885 return ZFCP_ERP_CONTINUES; 875 return ZFCP_ERP_CONTINUES;
886} 876}
887 877
888static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action) 878static enum zfcp_erp_act_result zfcp_erp_port_forced_strategy(
879 struct zfcp_erp_action *erp_action)
889{ 880{
890 struct zfcp_port *port = erp_action->port; 881 struct zfcp_port *port = erp_action->port;
891 int status = atomic_read(&port->status); 882 int status = atomic_read(&port->status);
@@ -901,11 +892,19 @@ static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action)
901 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING: 892 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
902 if (!(status & ZFCP_STATUS_PORT_PHYS_OPEN)) 893 if (!(status & ZFCP_STATUS_PORT_PHYS_OPEN))
903 return ZFCP_ERP_SUCCEEDED; 894 return ZFCP_ERP_SUCCEEDED;
895 break;
896 case ZFCP_ERP_STEP_PORT_CLOSING:
897 case ZFCP_ERP_STEP_PORT_OPENING:
898 case ZFCP_ERP_STEP_LUN_CLOSING:
899 case ZFCP_ERP_STEP_LUN_OPENING:
900 /* NOP */
901 break;
904 } 902 }
905 return ZFCP_ERP_FAILED; 903 return ZFCP_ERP_FAILED;
906} 904}
907 905
908static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action) 906static enum zfcp_erp_act_result zfcp_erp_port_strategy_close(
907 struct zfcp_erp_action *erp_action)
909{ 908{
910 int retval; 909 int retval;
911 910
@@ -918,7 +917,8 @@ static int zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
918 return ZFCP_ERP_CONTINUES; 917 return ZFCP_ERP_CONTINUES;
919} 918}
920 919
921static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action) 920static enum zfcp_erp_act_result zfcp_erp_port_strategy_open_port(
921 struct zfcp_erp_action *erp_action)
922{ 922{
923 int retval; 923 int retval;
924 924
@@ -944,7 +944,8 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
944 return zfcp_erp_port_strategy_open_port(act); 944 return zfcp_erp_port_strategy_open_port(act);
945} 945}
946 946
947static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) 947static enum zfcp_erp_act_result zfcp_erp_port_strategy_open_common(
948 struct zfcp_erp_action *act)
948{ 949{
949 struct zfcp_adapter *adapter = act->adapter; 950 struct zfcp_adapter *adapter = act->adapter;
950 struct zfcp_port *port = act->port; 951 struct zfcp_port *port = act->port;
@@ -975,12 +976,18 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
975 port->d_id = 0; 976 port->d_id = 0;
976 return ZFCP_ERP_FAILED; 977 return ZFCP_ERP_FAILED;
977 } 978 }
978 /* fall through otherwise */ 979 /* no early return otherwise, continue after switch case */
980 break;
981 case ZFCP_ERP_STEP_LUN_CLOSING:
982 case ZFCP_ERP_STEP_LUN_OPENING:
983 /* NOP */
984 break;
979 } 985 }
980 return ZFCP_ERP_FAILED; 986 return ZFCP_ERP_FAILED;
981} 987}
982 988
983static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action) 989static enum zfcp_erp_act_result zfcp_erp_port_strategy(
990 struct zfcp_erp_action *erp_action)
984{ 991{
985 struct zfcp_port *port = erp_action->port; 992 struct zfcp_port *port = erp_action->port;
986 int p_status = atomic_read(&port->status); 993 int p_status = atomic_read(&port->status);
@@ -999,6 +1006,12 @@ static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
999 if (p_status & ZFCP_STATUS_COMMON_OPEN) 1006 if (p_status & ZFCP_STATUS_COMMON_OPEN)
1000 return ZFCP_ERP_FAILED; 1007 return ZFCP_ERP_FAILED;
1001 break; 1008 break;
1009 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
1010 case ZFCP_ERP_STEP_PORT_OPENING:
1011 case ZFCP_ERP_STEP_LUN_CLOSING:
1012 case ZFCP_ERP_STEP_LUN_OPENING:
1013 /* NOP */
1014 break;
1002 } 1015 }
1003 1016
1004close_init_done: 1017close_init_done:
@@ -1016,7 +1029,8 @@ static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
1016 &zfcp_sdev->status); 1029 &zfcp_sdev->status);
1017} 1030}
1018 1031
1019static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action) 1032static enum zfcp_erp_act_result zfcp_erp_lun_strategy_close(
1033 struct zfcp_erp_action *erp_action)
1020{ 1034{
1021 int retval = zfcp_fsf_close_lun(erp_action); 1035 int retval = zfcp_fsf_close_lun(erp_action);
1022 if (retval == -ENOMEM) 1036 if (retval == -ENOMEM)
@@ -1027,7 +1041,8 @@ static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action)
1027 return ZFCP_ERP_CONTINUES; 1041 return ZFCP_ERP_CONTINUES;
1028} 1042}
1029 1043
1030static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action) 1044static enum zfcp_erp_act_result zfcp_erp_lun_strategy_open(
1045 struct zfcp_erp_action *erp_action)
1031{ 1046{
1032 int retval = zfcp_fsf_open_lun(erp_action); 1047 int retval = zfcp_fsf_open_lun(erp_action);
1033 if (retval == -ENOMEM) 1048 if (retval == -ENOMEM)
@@ -1038,7 +1053,8 @@ static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action)
1038 return ZFCP_ERP_CONTINUES; 1053 return ZFCP_ERP_CONTINUES;
1039} 1054}
1040 1055
1041static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action) 1056static enum zfcp_erp_act_result zfcp_erp_lun_strategy(
1057 struct zfcp_erp_action *erp_action)
1042{ 1058{
1043 struct scsi_device *sdev = erp_action->sdev; 1059 struct scsi_device *sdev = erp_action->sdev;
1044 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 1060 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -1048,7 +1064,8 @@ static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
1048 zfcp_erp_lun_strategy_clearstati(sdev); 1064 zfcp_erp_lun_strategy_clearstati(sdev);
1049 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) 1065 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
1050 return zfcp_erp_lun_strategy_close(erp_action); 1066 return zfcp_erp_lun_strategy_close(erp_action);
1051 /* already closed, fall through */ 1067 /* already closed */
1068 /* fall through */
1052 case ZFCP_ERP_STEP_LUN_CLOSING: 1069 case ZFCP_ERP_STEP_LUN_CLOSING:
1053 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) 1070 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
1054 return ZFCP_ERP_FAILED; 1071 return ZFCP_ERP_FAILED;
@@ -1059,11 +1076,18 @@ static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
1059 case ZFCP_ERP_STEP_LUN_OPENING: 1076 case ZFCP_ERP_STEP_LUN_OPENING:
1060 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) 1077 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
1061 return ZFCP_ERP_SUCCEEDED; 1078 return ZFCP_ERP_SUCCEEDED;
1079 break;
1080 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
1081 case ZFCP_ERP_STEP_PORT_CLOSING:
1082 case ZFCP_ERP_STEP_PORT_OPENING:
1083 /* NOP */
1084 break;
1062 } 1085 }
1063 return ZFCP_ERP_FAILED; 1086 return ZFCP_ERP_FAILED;
1064} 1087}
1065 1088
1066static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result) 1089static enum zfcp_erp_act_result zfcp_erp_strategy_check_lun(
1090 struct scsi_device *sdev, enum zfcp_erp_act_result result)
1067{ 1091{
1068 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 1092 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1069 1093
@@ -1084,6 +1108,12 @@ static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
1084 ZFCP_STATUS_COMMON_ERP_FAILED); 1108 ZFCP_STATUS_COMMON_ERP_FAILED);
1085 } 1109 }
1086 break; 1110 break;
1111 case ZFCP_ERP_CONTINUES:
1112 case ZFCP_ERP_EXIT:
1113 case ZFCP_ERP_DISMISSED:
1114 case ZFCP_ERP_NOMEM:
1115 /* NOP */
1116 break;
1087 } 1117 }
1088 1118
1089 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { 1119 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
@@ -1093,7 +1123,8 @@ static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
1093 return result; 1123 return result;
1094} 1124}
1095 1125
1096static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result) 1126static enum zfcp_erp_act_result zfcp_erp_strategy_check_port(
1127 struct zfcp_port *port, enum zfcp_erp_act_result result)
1097{ 1128{
1098 switch (result) { 1129 switch (result) {
1099 case ZFCP_ERP_SUCCEEDED : 1130 case ZFCP_ERP_SUCCEEDED :
@@ -1115,6 +1146,12 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
1115 ZFCP_STATUS_COMMON_ERP_FAILED); 1146 ZFCP_STATUS_COMMON_ERP_FAILED);
1116 } 1147 }
1117 break; 1148 break;
1149 case ZFCP_ERP_CONTINUES:
1150 case ZFCP_ERP_EXIT:
1151 case ZFCP_ERP_DISMISSED:
1152 case ZFCP_ERP_NOMEM:
1153 /* NOP */
1154 break;
1118 } 1155 }
1119 1156
1120 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { 1157 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
@@ -1124,8 +1161,8 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
1124 return result; 1161 return result;
1125} 1162}
1126 1163
1127static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, 1164static enum zfcp_erp_act_result zfcp_erp_strategy_check_adapter(
1128 int result) 1165 struct zfcp_adapter *adapter, enum zfcp_erp_act_result result)
1129{ 1166{
1130 switch (result) { 1167 switch (result) {
1131 case ZFCP_ERP_SUCCEEDED : 1168 case ZFCP_ERP_SUCCEEDED :
@@ -1143,6 +1180,12 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
1143 ZFCP_STATUS_COMMON_ERP_FAILED); 1180 ZFCP_STATUS_COMMON_ERP_FAILED);
1144 } 1181 }
1145 break; 1182 break;
1183 case ZFCP_ERP_CONTINUES:
1184 case ZFCP_ERP_EXIT:
1185 case ZFCP_ERP_DISMISSED:
1186 case ZFCP_ERP_NOMEM:
1187 /* NOP */
1188 break;
1146 } 1189 }
1147 1190
1148 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { 1191 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
@@ -1152,14 +1195,14 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
1152 return result; 1195 return result;
1153} 1196}
1154 1197
1155static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action, 1198static enum zfcp_erp_act_result zfcp_erp_strategy_check_target(
1156 int result) 1199 struct zfcp_erp_action *erp_action, enum zfcp_erp_act_result result)
1157{ 1200{
1158 struct zfcp_adapter *adapter = erp_action->adapter; 1201 struct zfcp_adapter *adapter = erp_action->adapter;
1159 struct zfcp_port *port = erp_action->port; 1202 struct zfcp_port *port = erp_action->port;
1160 struct scsi_device *sdev = erp_action->sdev; 1203 struct scsi_device *sdev = erp_action->sdev;
1161 1204
1162 switch (erp_action->action) { 1205 switch (erp_action->type) {
1163 1206
1164 case ZFCP_ERP_ACTION_REOPEN_LUN: 1207 case ZFCP_ERP_ACTION_REOPEN_LUN:
1165 result = zfcp_erp_strategy_check_lun(sdev, result); 1208 result = zfcp_erp_strategy_check_lun(sdev, result);
@@ -1192,16 +1235,17 @@ static int zfcp_erp_strat_change_det(atomic_t *target_status, u32 erp_status)
1192 return 0; 1235 return 0;
1193} 1236}
1194 1237
1195static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) 1238static enum zfcp_erp_act_result zfcp_erp_strategy_statechange(
1239 struct zfcp_erp_action *act, enum zfcp_erp_act_result result)
1196{ 1240{
1197 int action = act->action; 1241 enum zfcp_erp_act_type type = act->type;
1198 struct zfcp_adapter *adapter = act->adapter; 1242 struct zfcp_adapter *adapter = act->adapter;
1199 struct zfcp_port *port = act->port; 1243 struct zfcp_port *port = act->port;
1200 struct scsi_device *sdev = act->sdev; 1244 struct scsi_device *sdev = act->sdev;
1201 struct zfcp_scsi_dev *zfcp_sdev; 1245 struct zfcp_scsi_dev *zfcp_sdev;
1202 u32 erp_status = act->status; 1246 u32 erp_status = act->status;
1203 1247
1204 switch (action) { 1248 switch (type) {
1205 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1249 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1206 if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) { 1250 if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
1207 _zfcp_erp_adapter_reopen(adapter, 1251 _zfcp_erp_adapter_reopen(adapter,
@@ -1231,7 +1275,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
1231 } 1275 }
1232 break; 1276 break;
1233 } 1277 }
1234 return ret; 1278 return result;
1235} 1279}
1236 1280
1237static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) 1281static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
@@ -1248,7 +1292,7 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
1248 list_del(&erp_action->list); 1292 list_del(&erp_action->list);
1249 zfcp_dbf_rec_run("eractd1", erp_action); 1293 zfcp_dbf_rec_run("eractd1", erp_action);
1250 1294
1251 switch (erp_action->action) { 1295 switch (erp_action->type) {
1252 case ZFCP_ERP_ACTION_REOPEN_LUN: 1296 case ZFCP_ERP_ACTION_REOPEN_LUN:
1253 zfcp_sdev = sdev_to_zfcp(erp_action->sdev); 1297 zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1254 atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE, 1298 atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
@@ -1324,13 +1368,14 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
1324 write_unlock_irqrestore(&adapter->erp_lock, flags); 1368 write_unlock_irqrestore(&adapter->erp_lock, flags);
1325} 1369}
1326 1370
1327static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) 1371static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act,
1372 enum zfcp_erp_act_result result)
1328{ 1373{
1329 struct zfcp_adapter *adapter = act->adapter; 1374 struct zfcp_adapter *adapter = act->adapter;
1330 struct zfcp_port *port = act->port; 1375 struct zfcp_port *port = act->port;
1331 struct scsi_device *sdev = act->sdev; 1376 struct scsi_device *sdev = act->sdev;
1332 1377
1333 switch (act->action) { 1378 switch (act->type) {
1334 case ZFCP_ERP_ACTION_REOPEN_LUN: 1379 case ZFCP_ERP_ACTION_REOPEN_LUN:
1335 if (!(act->status & ZFCP_STATUS_ERP_NO_REF)) 1380 if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
1336 scsi_device_put(sdev); 1381 scsi_device_put(sdev);
@@ -1364,9 +1409,10 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1364 } 1409 }
1365} 1410}
1366 1411
1367static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action) 1412static enum zfcp_erp_act_result zfcp_erp_strategy_do_action(
1413 struct zfcp_erp_action *erp_action)
1368{ 1414{
1369 switch (erp_action->action) { 1415 switch (erp_action->type) {
1370 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1416 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1371 return zfcp_erp_adapter_strategy(erp_action); 1417 return zfcp_erp_adapter_strategy(erp_action);
1372 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1418 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
@@ -1379,9 +1425,10 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
1379 return ZFCP_ERP_FAILED; 1425 return ZFCP_ERP_FAILED;
1380} 1426}
1381 1427
1382static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) 1428static enum zfcp_erp_act_result zfcp_erp_strategy(
1429 struct zfcp_erp_action *erp_action)
1383{ 1430{
1384 int retval; 1431 enum zfcp_erp_act_result result;
1385 unsigned long flags; 1432 unsigned long flags;
1386 struct zfcp_adapter *adapter = erp_action->adapter; 1433 struct zfcp_adapter *adapter = erp_action->adapter;
1387 1434
@@ -1392,12 +1439,12 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1392 1439
1393 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) { 1440 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
1394 zfcp_erp_action_dequeue(erp_action); 1441 zfcp_erp_action_dequeue(erp_action);
1395 retval = ZFCP_ERP_DISMISSED; 1442 result = ZFCP_ERP_DISMISSED;
1396 goto unlock; 1443 goto unlock;
1397 } 1444 }
1398 1445
1399 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { 1446 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
1400 retval = ZFCP_ERP_FAILED; 1447 result = ZFCP_ERP_FAILED;
1401 goto check_target; 1448 goto check_target;
1402 } 1449 }
1403 1450
@@ -1405,13 +1452,13 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1405 1452
1406 /* no lock to allow for blocking operations */ 1453 /* no lock to allow for blocking operations */
1407 write_unlock_irqrestore(&adapter->erp_lock, flags); 1454 write_unlock_irqrestore(&adapter->erp_lock, flags);
1408 retval = zfcp_erp_strategy_do_action(erp_action); 1455 result = zfcp_erp_strategy_do_action(erp_action);
1409 write_lock_irqsave(&adapter->erp_lock, flags); 1456 write_lock_irqsave(&adapter->erp_lock, flags);
1410 1457
1411 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) 1458 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
1412 retval = ZFCP_ERP_CONTINUES; 1459 result = ZFCP_ERP_CONTINUES;
1413 1460
1414 switch (retval) { 1461 switch (result) {
1415 case ZFCP_ERP_NOMEM: 1462 case ZFCP_ERP_NOMEM:
1416 if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) { 1463 if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
1417 ++adapter->erp_low_mem_count; 1464 ++adapter->erp_low_mem_count;
@@ -1421,7 +1468,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1421 _zfcp_erp_adapter_reopen(adapter, 0, "erstgy1"); 1468 _zfcp_erp_adapter_reopen(adapter, 0, "erstgy1");
1422 else { 1469 else {
1423 zfcp_erp_strategy_memwait(erp_action); 1470 zfcp_erp_strategy_memwait(erp_action);
1424 retval = ZFCP_ERP_CONTINUES; 1471 result = ZFCP_ERP_CONTINUES;
1425 } 1472 }
1426 goto unlock; 1473 goto unlock;
1427 1474
@@ -1431,27 +1478,33 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1431 erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM; 1478 erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
1432 } 1479 }
1433 goto unlock; 1480 goto unlock;
1481 case ZFCP_ERP_SUCCEEDED:
1482 case ZFCP_ERP_FAILED:
1483 case ZFCP_ERP_EXIT:
1484 case ZFCP_ERP_DISMISSED:
1485 /* NOP */
1486 break;
1434 } 1487 }
1435 1488
1436check_target: 1489check_target:
1437 retval = zfcp_erp_strategy_check_target(erp_action, retval); 1490 result = zfcp_erp_strategy_check_target(erp_action, result);
1438 zfcp_erp_action_dequeue(erp_action); 1491 zfcp_erp_action_dequeue(erp_action);
1439 retval = zfcp_erp_strategy_statechange(erp_action, retval); 1492 result = zfcp_erp_strategy_statechange(erp_action, result);
1440 if (retval == ZFCP_ERP_EXIT) 1493 if (result == ZFCP_ERP_EXIT)
1441 goto unlock; 1494 goto unlock;
1442 if (retval == ZFCP_ERP_SUCCEEDED) 1495 if (result == ZFCP_ERP_SUCCEEDED)
1443 zfcp_erp_strategy_followup_success(erp_action); 1496 zfcp_erp_strategy_followup_success(erp_action);
1444 if (retval == ZFCP_ERP_FAILED) 1497 if (result == ZFCP_ERP_FAILED)
1445 zfcp_erp_strategy_followup_failed(erp_action); 1498 zfcp_erp_strategy_followup_failed(erp_action);
1446 1499
1447 unlock: 1500 unlock:
1448 write_unlock_irqrestore(&adapter->erp_lock, flags); 1501 write_unlock_irqrestore(&adapter->erp_lock, flags);
1449 1502
1450 if (retval != ZFCP_ERP_CONTINUES) 1503 if (result != ZFCP_ERP_CONTINUES)
1451 zfcp_erp_action_cleanup(erp_action, retval); 1504 zfcp_erp_action_cleanup(erp_action, result);
1452 1505
1453 kref_put(&adapter->ref, zfcp_adapter_release); 1506 kref_put(&adapter->ref, zfcp_adapter_release);
1454 return retval; 1507 return result;
1455} 1508}
1456 1509
1457static int zfcp_erp_thread(void *data) 1510static int zfcp_erp_thread(void *data)
@@ -1489,7 +1542,7 @@ static int zfcp_erp_thread(void *data)
1489 * zfcp_erp_thread_setup - Start ERP thread for adapter 1542 * zfcp_erp_thread_setup - Start ERP thread for adapter
1490 * @adapter: Adapter to start the ERP thread for 1543 * @adapter: Adapter to start the ERP thread for
1491 * 1544 *
1492 * Returns 0 on success or error code from kernel_thread() 1545 * Return: 0 on success, or error code from kthread_run().
1493 */ 1546 */
1494int zfcp_erp_thread_setup(struct zfcp_adapter *adapter) 1547int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
1495{ 1548{
@@ -1694,11 +1747,11 @@ void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
1694/** 1747/**
1695 * zfcp_erp_adapter_reset_sync() - Really reopen adapter and wait. 1748 * zfcp_erp_adapter_reset_sync() - Really reopen adapter and wait.
1696 * @adapter: Pointer to zfcp_adapter to reopen. 1749 * @adapter: Pointer to zfcp_adapter to reopen.
1697 * @id: Trace tag string of length %ZFCP_DBF_TAG_LEN. 1750 * @dbftag: Trace tag string of length %ZFCP_DBF_TAG_LEN.
1698 */ 1751 */
1699void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id) 1752void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *dbftag)
1700{ 1753{
1701 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); 1754 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
1702 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, id); 1755 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
1703 zfcp_erp_wait(adapter); 1756 zfcp_erp_wait(adapter);
1704} 1757}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index bd0c5a9f04cb..3fce47b0b21b 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -59,14 +59,15 @@ extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
59/* zfcp_erp.c */ 59/* zfcp_erp.c */
60extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); 60extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
61extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32); 61extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
62extern void zfcp_erp_port_forced_no_port_dbf(char *id, 62extern void zfcp_erp_port_forced_no_port_dbf(char *dbftag,
63 struct zfcp_adapter *adapter, 63 struct zfcp_adapter *adapter,
64 u64 port_name, u32 port_id); 64 u64 port_name, u32 port_id);
65extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *); 65extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
66extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *); 66extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
67extern void zfcp_erp_set_port_status(struct zfcp_port *, u32); 67extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
68extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32); 68extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
69extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id); 69extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
70 char *dbftag);
70extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *); 71extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
71extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *); 72extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
72extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); 73extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
@@ -79,7 +80,8 @@ extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
79extern void zfcp_erp_wait(struct zfcp_adapter *); 80extern void zfcp_erp_wait(struct zfcp_adapter *);
80extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long); 81extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
81extern void zfcp_erp_timeout_handler(struct timer_list *t); 82extern void zfcp_erp_timeout_handler(struct timer_list *t);
82extern void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id); 83extern void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter,
84 char *dbftag);
83 85
84/* zfcp_fc.c */ 86/* zfcp_fc.c */
85extern struct kmem_cache *zfcp_fc_req_cache; 87extern struct kmem_cache *zfcp_fc_req_cache;
@@ -144,6 +146,7 @@ extern void zfcp_qdio_close(struct zfcp_qdio *);
144extern void zfcp_qdio_siosl(struct zfcp_adapter *); 146extern void zfcp_qdio_siosl(struct zfcp_adapter *);
145 147
146/* zfcp_scsi.c */ 148/* zfcp_scsi.c */
149extern bool zfcp_experimental_dix;
147extern struct scsi_transport_template *zfcp_scsi_transport_template; 150extern struct scsi_transport_template *zfcp_scsi_transport_template;
148extern int zfcp_scsi_adapter_register(struct zfcp_adapter *); 151extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
149extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *); 152extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index f6c415d6ef48..db00b5e3abbe 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -312,7 +312,7 @@ static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
312 312
313/** 313/**
314 * zfcp_fc_incoming_els - handle incoming ELS 314 * zfcp_fc_incoming_els - handle incoming ELS
315 * @fsf_req - request which contains incoming ELS 315 * @fsf_req: request which contains incoming ELS
316 */ 316 */
317void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req) 317void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
318{ 318{
@@ -597,6 +597,48 @@ void zfcp_fc_test_link(struct zfcp_port *port)
597 put_device(&port->dev); 597 put_device(&port->dev);
598} 598}
599 599
600/**
601 * zfcp_fc_sg_free_table - free memory used by scatterlists
602 * @sg: pointer to scatterlist
603 * @count: number of scatterlist which are to be free'ed
604 * the scatterlist are expected to reference pages always
605 */
606static void zfcp_fc_sg_free_table(struct scatterlist *sg, int count)
607{
608 int i;
609
610 for (i = 0; i < count; i++, sg++)
611 if (sg)
612 free_page((unsigned long) sg_virt(sg));
613 else
614 break;
615}
616
617/**
618 * zfcp_fc_sg_setup_table - init scatterlist and allocate, assign buffers
619 * @sg: pointer to struct scatterlist
620 * @count: number of scatterlists which should be assigned with buffers
621 * of size page
622 *
623 * Returns: 0 on success, -ENOMEM otherwise
624 */
625static int zfcp_fc_sg_setup_table(struct scatterlist *sg, int count)
626{
627 void *addr;
628 int i;
629
630 sg_init_table(sg, count);
631 for (i = 0; i < count; i++, sg++) {
632 addr = (void *) get_zeroed_page(GFP_KERNEL);
633 if (!addr) {
634 zfcp_fc_sg_free_table(sg, i);
635 return -ENOMEM;
636 }
637 sg_set_buf(sg, addr, PAGE_SIZE);
638 }
639 return 0;
640}
641
600static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num) 642static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num)
601{ 643{
602 struct zfcp_fc_req *fc_req; 644 struct zfcp_fc_req *fc_req;
@@ -605,7 +647,7 @@ static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num)
605 if (!fc_req) 647 if (!fc_req)
606 return NULL; 648 return NULL;
607 649
608 if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) { 650 if (zfcp_fc_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
609 kmem_cache_free(zfcp_fc_req_cache, fc_req); 651 kmem_cache_free(zfcp_fc_req_cache, fc_req);
610 return NULL; 652 return NULL;
611 } 653 }
@@ -763,7 +805,7 @@ void zfcp_fc_scan_ports(struct work_struct *work)
763 break; 805 break;
764 } 806 }
765 } 807 }
766 zfcp_sg_free_table(&fc_req->sg_rsp, buf_num); 808 zfcp_fc_sg_free_table(&fc_req->sg_rsp, buf_num);
767 kmem_cache_free(zfcp_fc_req_cache, fc_req); 809 kmem_cache_free(zfcp_fc_req_cache, fc_req);
768out: 810out:
769 zfcp_fc_wka_port_put(&adapter->gs->ds); 811 zfcp_fc_wka_port_put(&adapter->gs->ds);
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 3cd74729cfb9..6902ae1f8e4f 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -121,9 +121,24 @@ struct zfcp_fc_rspn_req {
121/** 121/**
122 * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp 122 * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp
123 * @ct_els: data required for issuing fsf command 123 * @ct_els: data required for issuing fsf command
124 * @sg_req: scatterlist entry for request data 124 * @sg_req: scatterlist entry for request data, refers to embedded @u submember
125 * @sg_rsp: scatterlist entry for response data 125 * @sg_rsp: scatterlist entry for response data, refers to embedded @u submember
126 * @u: request specific data 126 * @u: request and response specific data
127 * @u.adisc: ADISC specific data
128 * @u.adisc.req: ADISC request
129 * @u.adisc.rsp: ADISC response
130 * @u.gid_pn: GID_PN specific data
131 * @u.gid_pn.req: GID_PN request
132 * @u.gid_pn.rsp: GID_PN response
133 * @u.gpn_ft: GPN_FT specific data
134 * @u.gpn_ft.sg_rsp2: GPN_FT response, not embedded here, allocated elsewhere
135 * @u.gpn_ft.req: GPN_FT request
136 * @u.gspn: GSPN specific data
137 * @u.gspn.req: GSPN request
138 * @u.gspn.rsp: GSPN response
139 * @u.rspn: RSPN specific data
140 * @u.rspn.req: RSPN request
141 * @u.rspn.rsp: RSPN response
127 */ 142 */
128struct zfcp_fc_req { 143struct zfcp_fc_req {
129 struct zfcp_fsf_ct_els ct_els; 144 struct zfcp_fsf_ct_els ct_els;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 3c86e27f094d..d94496ee6883 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -19,6 +19,11 @@
19#include "zfcp_qdio.h" 19#include "zfcp_qdio.h"
20#include "zfcp_reqlist.h" 20#include "zfcp_reqlist.h"
21 21
22/* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
23#define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
24/* timeout for: exchange config/port data outside ERP, or open/close WKA port */
25#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
26
22struct kmem_cache *zfcp_fsf_qtcb_cache; 27struct kmem_cache *zfcp_fsf_qtcb_cache;
23 28
24static void zfcp_fsf_request_timeout_handler(struct timer_list *t) 29static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
@@ -74,18 +79,18 @@ static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
74 79
75/** 80/**
76 * zfcp_fsf_req_free - free memory used by fsf request 81 * zfcp_fsf_req_free - free memory used by fsf request
77 * @fsf_req: pointer to struct zfcp_fsf_req 82 * @req: pointer to struct zfcp_fsf_req
78 */ 83 */
79void zfcp_fsf_req_free(struct zfcp_fsf_req *req) 84void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
80{ 85{
81 if (likely(req->pool)) { 86 if (likely(req->pool)) {
82 if (likely(req->qtcb)) 87 if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
83 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); 88 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
84 mempool_free(req, req->pool); 89 mempool_free(req, req->pool);
85 return; 90 return;
86 } 91 }
87 92
88 if (likely(req->qtcb)) 93 if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
89 kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb); 94 kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
90 kfree(req); 95 kfree(req);
91} 96}
@@ -379,7 +384,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
379 384
380/** 385/**
381 * zfcp_fsf_req_complete - process completion of a FSF request 386 * zfcp_fsf_req_complete - process completion of a FSF request
382 * @fsf_req: The FSF request that has been completed. 387 * @req: The FSF request that has been completed.
383 * 388 *
384 * When a request has been completed either from the FCP adapter, 389 * When a request has been completed either from the FCP adapter,
385 * or it has been dismissed due to a queue shutdown, this function 390 * or it has been dismissed due to a queue shutdown, this function
@@ -388,7 +393,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
388 */ 393 */
389static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) 394static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
390{ 395{
391 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) { 396 if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) {
392 zfcp_fsf_status_read_handler(req); 397 zfcp_fsf_status_read_handler(req);
393 return; 398 return;
394 } 399 }
@@ -705,7 +710,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
705 init_completion(&req->completion); 710 init_completion(&req->completion);
706 711
707 req->adapter = adapter; 712 req->adapter = adapter;
708 req->fsf_command = fsf_cmd;
709 req->req_id = adapter->req_no; 713 req->req_id = adapter->req_no;
710 714
711 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) { 715 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
@@ -720,14 +724,13 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
720 return ERR_PTR(-ENOMEM); 724 return ERR_PTR(-ENOMEM);
721 } 725 }
722 726
723 req->seq_no = adapter->fsf_req_seq_no;
724 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; 727 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
725 req->qtcb->prefix.req_id = req->req_id; 728 req->qtcb->prefix.req_id = req->req_id;
726 req->qtcb->prefix.ulp_info = 26; 729 req->qtcb->prefix.ulp_info = 26;
727 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command]; 730 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
728 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; 731 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
729 req->qtcb->header.req_handle = req->req_id; 732 req->qtcb->header.req_handle = req->req_id;
730 req->qtcb->header.fsf_command = req->fsf_command; 733 req->qtcb->header.fsf_command = fsf_cmd;
731 } 734 }
732 735
733 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype, 736 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
@@ -740,7 +743,6 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
740{ 743{
741 struct zfcp_adapter *adapter = req->adapter; 744 struct zfcp_adapter *adapter = req->adapter;
742 struct zfcp_qdio *qdio = adapter->qdio; 745 struct zfcp_qdio *qdio = adapter->qdio;
743 int with_qtcb = (req->qtcb != NULL);
744 int req_id = req->req_id; 746 int req_id = req->req_id;
745 747
746 zfcp_reqlist_add(adapter->req_list, req); 748 zfcp_reqlist_add(adapter->req_list, req);
@@ -756,7 +758,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
756 } 758 }
757 759
758 /* Don't increase for unsolicited status */ 760 /* Don't increase for unsolicited status */
759 if (with_qtcb) 761 if (!zfcp_fsf_req_is_status_read_buffer(req))
760 adapter->fsf_req_seq_no++; 762 adapter->fsf_req_seq_no++;
761 adapter->req_no++; 763 adapter->req_no++;
762 764
@@ -765,8 +767,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
765 767
766/** 768/**
767 * zfcp_fsf_status_read - send status read request 769 * zfcp_fsf_status_read - send status read request
768 * @adapter: pointer to struct zfcp_adapter 770 * @qdio: pointer to struct zfcp_qdio
769 * @req_flags: request flags
770 * Returns: 0 on success, ERROR otherwise 771 * Returns: 0 on success, ERROR otherwise
771 */ 772 */
772int zfcp_fsf_status_read(struct zfcp_qdio *qdio) 773int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
@@ -912,7 +913,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
912 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 913 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
913 req->qtcb->bottom.support.req_handle = (u64) old_req_id; 914 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
914 915
915 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); 916 zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
916 if (!zfcp_fsf_req_send(req)) 917 if (!zfcp_fsf_req_send(req))
917 goto out; 918 goto out;
918 919
@@ -1057,8 +1058,10 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1057 1058
1058/** 1059/**
1059 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) 1060 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1061 * @wka_port: pointer to zfcp WKA port to send CT/GS to
1060 * @ct: pointer to struct zfcp_send_ct with data for request 1062 * @ct: pointer to struct zfcp_send_ct with data for request
1061 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1063 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1064 * @timeout: timeout that hardware should use, and a later software timeout
1062 */ 1065 */
1063int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, 1066int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1064 struct zfcp_fsf_ct_els *ct, mempool_t *pool, 1067 struct zfcp_fsf_ct_els *ct, mempool_t *pool,
@@ -1151,7 +1154,10 @@ skip_fsfstatus:
1151 1154
1152/** 1155/**
1153 * zfcp_fsf_send_els - initiate an ELS command (FC-FS) 1156 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1157 * @adapter: pointer to zfcp adapter
1158 * @d_id: N_Port_ID to send ELS to
1154 * @els: pointer to struct zfcp_send_els with data for the command 1159 * @els: pointer to struct zfcp_send_els with data for the command
1160 * @timeout: timeout that hardware should use, and a later software timeout
1155 */ 1161 */
1156int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, 1162int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1157 struct zfcp_fsf_ct_els *els, unsigned int timeout) 1163 struct zfcp_fsf_ct_els *els, unsigned int timeout)
@@ -1809,7 +1815,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1809 case FSF_LUN_SHARING_VIOLATION: 1815 case FSF_LUN_SHARING_VIOLATION:
1810 if (qual->word[0]) 1816 if (qual->word[0])
1811 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev, 1817 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
1812 "LUN 0x%Lx on port 0x%Lx is already in " 1818 "LUN 0x%016Lx on port 0x%016Lx is already in "
1813 "use by CSS%d, MIF Image ID %x\n", 1819 "use by CSS%d, MIF Image ID %x\n",
1814 zfcp_scsi_dev_lun(sdev), 1820 zfcp_scsi_dev_lun(sdev),
1815 (unsigned long long)zfcp_sdev->port->wwpn, 1821 (unsigned long long)zfcp_sdev->port->wwpn,
@@ -1986,7 +1992,7 @@ out:
1986 return retval; 1992 return retval;
1987} 1993}
1988 1994
1989static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat) 1995static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat)
1990{ 1996{
1991 lat_rec->sum += lat; 1997 lat_rec->sum += lat;
1992 lat_rec->min = min(lat_rec->min, lat); 1998 lat_rec->min = min(lat_rec->min, lat);
@@ -1996,7 +2002,7 @@ static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
1996static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) 2002static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
1997{ 2003{
1998 struct fsf_qual_latency_info *lat_in; 2004 struct fsf_qual_latency_info *lat_in;
1999 struct latency_cont *lat = NULL; 2005 struct zfcp_latency_cont *lat = NULL;
2000 struct zfcp_scsi_dev *zfcp_sdev; 2006 struct zfcp_scsi_dev *zfcp_sdev;
2001 struct zfcp_blk_drv_data blktrc; 2007 struct zfcp_blk_drv_data blktrc;
2002 int ticks = req->adapter->timer_ticks; 2008 int ticks = req->adapter->timer_ticks;
@@ -2088,11 +2094,8 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
2088 break; 2094 break;
2089 case FSF_CMND_LENGTH_NOT_VALID: 2095 case FSF_CMND_LENGTH_NOT_VALID:
2090 dev_err(&req->adapter->ccw_device->dev, 2096 dev_err(&req->adapter->ccw_device->dev,
2091 "Incorrect CDB length %d, LUN 0x%016Lx on " 2097 "Incorrect FCP_CMND length %d, FCP device closed\n",
2092 "port 0x%016Lx closed\n", 2098 req->qtcb->bottom.io.fcp_cmnd_length);
2093 req->qtcb->bottom.io.fcp_cmnd_length,
2094 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2095 (unsigned long long)zfcp_sdev->port->wwpn);
2096 zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4"); 2099 zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
2097 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2100 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2098 break; 2101 break;
@@ -2369,7 +2372,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
2369 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; 2372 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2370 zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags); 2373 zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
2371 2374
2372 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); 2375 zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
2373 if (!zfcp_fsf_req_send(req)) 2376 if (!zfcp_fsf_req_send(req))
2374 goto out; 2377 goto out;
2375 2378
@@ -2382,7 +2385,7 @@ out:
2382 2385
2383/** 2386/**
2384 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO 2387 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2385 * @adapter: pointer to struct zfcp_adapter 2388 * @qdio: pointer to struct zfcp_qdio
2386 * @sbal_idx: response queue index of SBAL to be processed 2389 * @sbal_idx: response queue index of SBAL to be processed
2387 */ 2390 */
2388void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) 2391void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 535628b92f0a..2c658b66318c 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -438,8 +438,8 @@ struct zfcp_blk_drv_data {
438 438
439/** 439/**
440 * struct zfcp_fsf_ct_els - zfcp data for ct or els request 440 * struct zfcp_fsf_ct_els - zfcp data for ct or els request
441 * @req: scatter-gather list for request 441 * @req: scatter-gather list for request, points to &zfcp_fc_req.sg_req or BSG
442 * @resp: scatter-gather list for response 442 * @resp: scatter-gather list for response, points to &zfcp_fc_req.sg_rsp or BSG
443 * @handler: handler function (called for response to the request) 443 * @handler: handler function (called for response to the request)
444 * @handler_data: data passed to handler function 444 * @handler_data: data passed to handler function
445 * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC) 445 * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 4ab02e8d36f3..10c4e8e3fd59 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -4,7 +4,7 @@
4 * 4 *
5 * Setup and helper functions to access QDIO. 5 * Setup and helper functions to access QDIO.
6 * 6 *
7 * Copyright IBM Corp. 2002, 2010 7 * Copyright IBM Corp. 2002, 2017
8 */ 8 */
9 9
10#define KMSG_COMPONENT "zfcp" 10#define KMSG_COMPONENT "zfcp"
@@ -19,7 +19,7 @@ static bool enable_multibuffer = true;
19module_param_named(datarouter, enable_multibuffer, bool, 0400); 19module_param_named(datarouter, enable_multibuffer, bool, 0400);
20MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)"); 20MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
21 21
22static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id, 22static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
23 unsigned int qdio_err) 23 unsigned int qdio_err)
24{ 24{
25 struct zfcp_adapter *adapter = qdio->adapter; 25 struct zfcp_adapter *adapter = qdio->adapter;
@@ -28,12 +28,12 @@ static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
28 28
29 if (qdio_err & QDIO_ERROR_SLSB_STATE) { 29 if (qdio_err & QDIO_ERROR_SLSB_STATE) {
30 zfcp_qdio_siosl(adapter); 30 zfcp_qdio_siosl(adapter);
31 zfcp_erp_adapter_shutdown(adapter, 0, id); 31 zfcp_erp_adapter_shutdown(adapter, 0, dbftag);
32 return; 32 return;
33 } 33 }
34 zfcp_erp_adapter_reopen(adapter, 34 zfcp_erp_adapter_reopen(adapter,
35 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 35 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
36 ZFCP_STATUS_COMMON_ERP_FAILED, id); 36 ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
37} 37}
38 38
39static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) 39static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
@@ -180,7 +180,6 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
180 * @qdio: pointer to struct zfcp_qdio 180 * @qdio: pointer to struct zfcp_qdio
181 * @q_req: pointer to struct zfcp_qdio_req 181 * @q_req: pointer to struct zfcp_qdio_req
182 * @sg: scatter-gather list 182 * @sg: scatter-gather list
183 * @max_sbals: upper bound for number of SBALs to be used
184 * Returns: zero or -EINVAL on error 183 * Returns: zero or -EINVAL on error
185 */ 184 */
186int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 185int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
@@ -303,7 +302,7 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
303 302
304/** 303/**
305 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data 304 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
306 * @adapter: pointer to struct zfcp_adapter 305 * @qdio: pointer to struct zfcp_qdio
307 * Returns: -ENOMEM on memory allocation error or return value from 306 * Returns: -ENOMEM on memory allocation error or return value from
308 * qdio_allocate 307 * qdio_allocate
309 */ 308 */
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 886c662cc154..2a816a37b3c0 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -30,6 +30,8 @@
30 * @req_q_full: queue full incidents 30 * @req_q_full: queue full incidents
31 * @req_q_wq: used to wait for SBAL availability 31 * @req_q_wq: used to wait for SBAL availability
32 * @adapter: adapter used in conjunction with this qdio structure 32 * @adapter: adapter used in conjunction with this qdio structure
33 * @max_sbale_per_sbal: qdio limit per sbal
34 * @max_sbale_per_req: qdio limit per request
33 */ 35 */
34struct zfcp_qdio { 36struct zfcp_qdio {
35 struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q]; 37 struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q];
@@ -70,7 +72,7 @@ struct zfcp_qdio_req {
70/** 72/**
71 * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request 73 * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
72 * @qdio: pointer to struct zfcp_qdio 74 * @qdio: pointer to struct zfcp_qdio
73 * @q_rec: pointer to struct zfcp_qdio_req 75 * @q_req: pointer to struct zfcp_qdio_req
74 * Returns: pointer to qdio_buffer_element (sbale) structure 76 * Returns: pointer to qdio_buffer_element (sbale) structure
75 */ 77 */
76static inline struct qdio_buffer_element * 78static inline struct qdio_buffer_element *
@@ -82,7 +84,7 @@ zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
82/** 84/**
83 * zfcp_qdio_sbale_curr - return current sbale on req_q for a request 85 * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
84 * @qdio: pointer to struct zfcp_qdio 86 * @qdio: pointer to struct zfcp_qdio
85 * @fsf_req: pointer to struct zfcp_fsf_req 87 * @q_req: pointer to struct zfcp_qdio_req
86 * Returns: pointer to qdio_buffer_element (sbale) structure 88 * Returns: pointer to qdio_buffer_element (sbale) structure
87 */ 89 */
88static inline struct qdio_buffer_element * 90static inline struct qdio_buffer_element *
@@ -135,6 +137,8 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
135 * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests 137 * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
136 * @qdio: pointer to struct zfcp_qdio 138 * @qdio: pointer to struct zfcp_qdio
137 * @q_req: pointer to struct zfcp_queue_req 139 * @q_req: pointer to struct zfcp_queue_req
140 * @data: pointer to data
141 * @len: length of data
138 * 142 *
139 * This is only required for single sbal requests, calling it when 143 * This is only required for single sbal requests, calling it when
140 * wrapping around to the next sbal is a bug. 144 * wrapping around to the next sbal is a bug.
@@ -182,6 +186,7 @@ int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
182 186
183/** 187/**
184 * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal 188 * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
189 * @qdio: pointer to struct zfcp_qdio
185 * @q_req: The current zfcp_qdio_req 190 * @q_req: The current zfcp_qdio_req
186 */ 191 */
187static inline 192static inline
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
index 59a943c0d51d..9b8ff249e31c 100644
--- a/drivers/s390/scsi/zfcp_reqlist.h
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -17,7 +17,7 @@
17/** 17/**
18 * struct zfcp_reqlist - Container for request list (reqlist) 18 * struct zfcp_reqlist - Container for request list (reqlist)
19 * @lock: Spinlock for protecting the hash list 19 * @lock: Spinlock for protecting the hash list
20 * @list: Array of hashbuckets, each is a list of requests in this bucket 20 * @buckets: Array of hashbuckets, each is a list of requests in this bucket
21 */ 21 */
22struct zfcp_reqlist { 22struct zfcp_reqlist {
23 spinlock_t lock; 23 spinlock_t lock;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index a8efcb330bc1..00acc7144bbc 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -27,7 +27,11 @@ MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
27 27
28static bool enable_dif; 28static bool enable_dif;
29module_param_named(dif, enable_dif, bool, 0400); 29module_param_named(dif, enable_dif, bool, 0400);
30MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support"); 30MODULE_PARM_DESC(dif, "Enable DIF data integrity support (default off)");
31
32bool zfcp_experimental_dix;
33module_param_named(dix, zfcp_experimental_dix, bool, 0400);
34MODULE_PARM_DESC(dix, "Enable experimental DIX (data integrity extension) support which implies DIF support (default off)");
31 35
32static bool allow_lun_scan = true; 36static bool allow_lun_scan = true;
33module_param(allow_lun_scan, bool, 0600); 37module_param(allow_lun_scan, bool, 0600);
@@ -226,7 +230,9 @@ static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data)
226 (struct zfcp_scsi_req_filter *)data; 230 (struct zfcp_scsi_req_filter *)data;
227 231
228 /* already aborted - prevent side-effects - or not a SCSI command */ 232 /* already aborted - prevent side-effects - or not a SCSI command */
229 if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND) 233 if (old_req->data == NULL ||
234 zfcp_fsf_req_is_status_read_buffer(old_req) ||
235 old_req->qtcb->header.fsf_command != FSF_QTCB_FCP_CMND)
230 return; 236 return;
231 237
232 /* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */ 238 /* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
@@ -423,7 +429,6 @@ static struct scsi_host_template zfcp_scsi_host_template = {
423 * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, 429 * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
424 /* GCD, adjusted later */ 430 /* GCD, adjusted later */
425 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, 431 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
426 .use_clustering = 1,
427 .shost_attrs = zfcp_sysfs_shost_attrs, 432 .shost_attrs = zfcp_sysfs_shost_attrs,
428 .sdev_attrs = zfcp_sysfs_sdev_attrs, 433 .sdev_attrs = zfcp_sysfs_sdev_attrs,
429 .track_queue_depth = 1, 434 .track_queue_depth = 1,
@@ -788,11 +793,11 @@ void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
788 data_div = atomic_read(&adapter->status) & 793 data_div = atomic_read(&adapter->status) &
789 ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED; 794 ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED;
790 795
791 if (enable_dif && 796 if ((enable_dif || zfcp_experimental_dix) &&
792 adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1) 797 adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1)
793 mask |= SHOST_DIF_TYPE1_PROTECTION; 798 mask |= SHOST_DIF_TYPE1_PROTECTION;
794 799
795 if (enable_dif && data_div && 800 if (zfcp_experimental_dix && data_div &&
796 adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) { 801 adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
797 mask |= SHOST_DIX_TYPE1_PROTECTION; 802 mask |= SHOST_DIX_TYPE1_PROTECTION;
798 scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); 803 scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 2d655a97b959..a3c20e3a8b7c 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1998,7 +1998,6 @@ static struct scsi_host_template driver_template = {
1998 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH, 1998 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
1999 .max_sectors = TW_MAX_SECTORS, 1999 .max_sectors = TW_MAX_SECTORS,
2000 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 2000 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
2001 .use_clustering = ENABLE_CLUSTERING,
2002 .shost_attrs = twa_host_attrs, 2001 .shost_attrs = twa_host_attrs,
2003 .emulated = 1, 2002 .emulated = 1,
2004 .no_write_same = 1, 2003 .no_write_same = 1,
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 480cf82700e9..e8f5f7c63190 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1550,7 +1550,6 @@ static struct scsi_host_template driver_template = {
1550 .sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH, 1550 .sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH,
1551 .max_sectors = TW_MAX_SECTORS, 1551 .max_sectors = TW_MAX_SECTORS,
1552 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 1552 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
1553 .use_clustering = ENABLE_CLUSTERING,
1554 .shost_attrs = twl_host_attrs, 1553 .shost_attrs = twl_host_attrs,
1555 .emulated = 1, 1554 .emulated = 1,
1556 .no_write_same = 1, 1555 .no_write_same = 1,
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index a58257645e94..2b1e0d503020 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1174,7 +1174,7 @@ static int tw_setfeature(TW_Device_Extension *tw_dev, int parm, int param_size,
1174 command_que_value = tw_dev->command_packet_physical_address[request_id]; 1174 command_que_value = tw_dev->command_packet_physical_address[request_id];
1175 if (command_que_value == 0) { 1175 if (command_que_value == 0) {
1176 printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Bad command packet physical address.\n"); 1176 printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Bad command packet physical address.\n");
1177 return 1; 1177 return 1;
1178 } 1178 }
1179 1179
1180 /* Send command packet to the board */ 1180 /* Send command packet to the board */
@@ -2247,7 +2247,6 @@ static struct scsi_host_template driver_template = {
2247 .sg_tablesize = TW_MAX_SGL_LENGTH, 2247 .sg_tablesize = TW_MAX_SGL_LENGTH,
2248 .max_sectors = TW_MAX_SECTORS, 2248 .max_sectors = TW_MAX_SECTORS,
2249 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 2249 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
2250 .use_clustering = ENABLE_CLUSTERING,
2251 .shost_attrs = tw_host_attrs, 2250 .shost_attrs = tw_host_attrs,
2252 .emulated = 1, 2251 .emulated = 1,
2253 .no_write_same = 1, 2252 .no_write_same = 1,
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 6be77b3aa8a5..128d658d472a 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -318,7 +318,6 @@ NCR_700_detect(struct scsi_host_template *tpnt,
318 tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST; 318 tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
319 tpnt->sg_tablesize = NCR_700_SG_SEGMENTS; 319 tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
320 tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN; 320 tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
321 tpnt->use_clustering = ENABLE_CLUSTERING;
322 tpnt->slave_configure = NCR_700_slave_configure; 321 tpnt->slave_configure = NCR_700_slave_configure;
323 tpnt->slave_destroy = NCR_700_slave_destroy; 322 tpnt->slave_destroy = NCR_700_slave_destroy;
324 tpnt->slave_alloc = NCR_700_slave_alloc; 323 tpnt->slave_alloc = NCR_700_slave_alloc;
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 9cee941f97d6..e41e51f1da71 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2641,6 +2641,7 @@ static int blogic_resultcode(struct blogic_adapter *adapter,
2641 case BLOGIC_BAD_CMD_PARAM: 2641 case BLOGIC_BAD_CMD_PARAM:
2642 blogic_warn("BusLogic Driver Protocol Error 0x%02X\n", 2642 blogic_warn("BusLogic Driver Protocol Error 0x%02X\n",
2643 adapter, adapter_status); 2643 adapter, adapter_status);
2644 /* fall through */
2644 case BLOGIC_DATA_UNDERRUN: 2645 case BLOGIC_DATA_UNDERRUN:
2645 case BLOGIC_DATA_OVERRUN: 2646 case BLOGIC_DATA_OVERRUN:
2646 case BLOGIC_NOEXPECT_BUSFREE: 2647 case BLOGIC_NOEXPECT_BUSFREE:
@@ -3857,7 +3858,6 @@ static struct scsi_host_template blogic_template = {
3857#endif 3858#endif
3858 .unchecked_isa_dma = 1, 3859 .unchecked_isa_dma = 1,
3859 .max_sectors = 128, 3860 .max_sectors = 128,
3860 .use_clustering = ENABLE_CLUSTERING,
3861}; 3861};
3862 3862
3863/* 3863/*
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 00072ed9540b..ff53fd0d12f2 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1078,7 +1078,6 @@ static struct scsi_host_template inia100_template = {
1078 .can_queue = 1, 1078 .can_queue = 1,
1079 .this_id = 1, 1079 .this_id = 1,
1080 .sg_tablesize = SG_ALL, 1080 .sg_tablesize = SG_ALL,
1081 .use_clustering = ENABLE_CLUSTERING,
1082}; 1081};
1083 1082
1084static int inia100_probe_one(struct pci_dev *pdev, 1083static int inia100_probe_one(struct pci_dev *pdev,
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 61aadc7acb49..c96bc7261a42 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -160,7 +160,7 @@ static struct scsi_host_template a2091_scsi_template = {
160 .this_id = 7, 160 .this_id = 7,
161 .sg_tablesize = SG_ALL, 161 .sg_tablesize = SG_ALL,
162 .cmd_per_lun = CMD_PER_LUN, 162 .cmd_per_lun = CMD_PER_LUN,
163 .use_clustering = DISABLE_CLUSTERING 163 .dma_boundary = PAGE_SIZE - 1,
164}; 164};
165 165
166static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent) 166static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index 2427a8541247..dcf435f312dd 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -175,7 +175,6 @@ static struct scsi_host_template amiga_a3000_scsi_template = {
175 .this_id = 7, 175 .this_id = 7,
176 .sg_tablesize = SG_ALL, 176 .sg_tablesize = SG_ALL,
177 .cmd_per_lun = CMD_PER_LUN, 177 .cmd_per_lun = CMD_PER_LUN,
178 .use_clustering = ENABLE_CLUSTERING
179}; 178};
180 179
181static int __init amiga_a3000_scsi_probe(struct platform_device *pdev) 180static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index bd7f352c28f3..75ab5ff6b78c 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -2892,6 +2892,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2892 !(dev->raw_io_64) || 2892 !(dev->raw_io_64) ||
2893 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) 2893 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
2894 break; 2894 break;
2895 /* fall through */
2895 case INQUIRY: 2896 case INQUIRY:
2896 case READ_CAPACITY: 2897 case READ_CAPACITY:
2897 case TEST_UNIT_READY: 2898 case TEST_UNIT_READY:
@@ -2966,6 +2967,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2966 /* Issue FIB to tell Firmware to flush it's cache */ 2967 /* Issue FIB to tell Firmware to flush it's cache */
2967 if ((aac_cache & 6) != 2) 2968 if ((aac_cache & 6) != 2)
2968 return aac_synchronize(scsicmd); 2969 return aac_synchronize(scsicmd);
2970 /* fall through */
2969 case INQUIRY: 2971 case INQUIRY:
2970 { 2972 {
2971 struct inquiry_data inq_data; 2973 struct inquiry_data inq_data;
@@ -3319,8 +3321,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
3319 min_t(size_t, 3321 min_t(size_t,
3320 sizeof(dev->fsa_dev[cid].sense_data), 3322 sizeof(dev->fsa_dev[cid].sense_data),
3321 SCSI_SENSE_BUFFERSIZE)); 3323 SCSI_SENSE_BUFFERSIZE));
3322 break; 3324 break;
3323 } 3325 }
3326 /* fall through */
3324 case RESERVE: 3327 case RESERVE:
3325 case RELEASE: 3328 case RELEASE:
3326 case REZERO_UNIT: 3329 case REZERO_UNIT:
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 39eb415987fc..3291d1c16864 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -40,6 +40,7 @@
40#define nblank(x) _nblank(x)[0] 40#define nblank(x) _nblank(x)[0]
41 41
42#include <linux/interrupt.h> 42#include <linux/interrupt.h>
43#include <linux/completion.h>
43#include <linux/pci.h> 44#include <linux/pci.h>
44#include <scsi/scsi_host.h> 45#include <scsi/scsi_host.h>
45 46
@@ -1241,7 +1242,7 @@ struct aac_fib_context {
1241 u32 unique; // unique value representing this context 1242 u32 unique; // unique value representing this context
1242 ulong jiffies; // used for cleanup - dmb changed to ulong 1243 ulong jiffies; // used for cleanup - dmb changed to ulong
1243 struct list_head next; // used to link context's into a linked list 1244 struct list_head next; // used to link context's into a linked list
1244 struct semaphore wait_sem; // this is used to wait for the next fib to arrive. 1245 struct completion completion; // this is used to wait for the next fib to arrive.
1245 int wait; // Set to true when thread is in WaitForSingleObject 1246 int wait; // Set to true when thread is in WaitForSingleObject
1246 unsigned long count; // total number of FIBs on FibList 1247 unsigned long count; // total number of FIBs on FibList
1247 struct list_head fib_list; // this holds fibs and their attachd hw_fibs 1248 struct list_head fib_list; // this holds fibs and their attachd hw_fibs
@@ -1313,7 +1314,7 @@ struct fib {
1313 * This is the event the sendfib routine will wait on if the 1314 * This is the event the sendfib routine will wait on if the
1314 * caller did not pass one and this is synch io. 1315 * caller did not pass one and this is synch io.
1315 */ 1316 */
1316 struct semaphore event_wait; 1317 struct completion event_wait;
1317 spinlock_t event_lock; 1318 spinlock_t event_lock;
1318 1319
1319 u32 done; /* gets set to 1 when fib is complete */ 1320 u32 done; /* gets set to 1 when fib is complete */
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 25f6600d6c09..e2899ff7913e 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -41,7 +41,6 @@
41#include <linux/blkdev.h> 41#include <linux/blkdev.h>
42#include <linux/delay.h> /* ssleep prototype */ 42#include <linux/delay.h> /* ssleep prototype */
43#include <linux/kthread.h> 43#include <linux/kthread.h>
44#include <linux/semaphore.h>
45#include <linux/uaccess.h> 44#include <linux/uaccess.h>
46#include <scsi/scsi_host.h> 45#include <scsi/scsi_host.h>
47 46
@@ -203,7 +202,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
203 /* 202 /*
204 * Initialize the mutex used to wait for the next AIF. 203 * Initialize the mutex used to wait for the next AIF.
205 */ 204 */
206 sema_init(&fibctx->wait_sem, 0); 205 init_completion(&fibctx->completion);
207 fibctx->wait = 0; 206 fibctx->wait = 0;
208 /* 207 /*
209 * Initialize the fibs and set the count of fibs on 208 * Initialize the fibs and set the count of fibs on
@@ -335,7 +334,7 @@ return_fib:
335 ssleep(1); 334 ssleep(1);
336 } 335 }
337 if (f.wait) { 336 if (f.wait) {
338 if(down_interruptible(&fibctx->wait_sem) < 0) { 337 if (wait_for_completion_interruptible(&fibctx->completion) < 0) {
339 status = -ERESTARTSYS; 338 status = -ERESTARTSYS;
340 } else { 339 } else {
341 /* Lock again and retry */ 340 /* Lock again and retry */
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 1e77d96a18f2..d5a6aa9676c8 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -44,7 +44,6 @@
44#include <linux/delay.h> 44#include <linux/delay.h>
45#include <linux/kthread.h> 45#include <linux/kthread.h>
46#include <linux/interrupt.h> 46#include <linux/interrupt.h>
47#include <linux/semaphore.h>
48#include <linux/bcd.h> 47#include <linux/bcd.h>
49#include <scsi/scsi.h> 48#include <scsi/scsi.h>
50#include <scsi/scsi_host.h> 49#include <scsi/scsi_host.h>
@@ -189,7 +188,7 @@ int aac_fib_setup(struct aac_dev * dev)
189 fibptr->hw_fib_va = hw_fib; 188 fibptr->hw_fib_va = hw_fib;
190 fibptr->data = (void *) fibptr->hw_fib_va->data; 189 fibptr->data = (void *) fibptr->hw_fib_va->data;
191 fibptr->next = fibptr+1; /* Forward chain the fibs */ 190 fibptr->next = fibptr+1; /* Forward chain the fibs */
192 sema_init(&fibptr->event_wait, 0); 191 init_completion(&fibptr->event_wait);
193 spin_lock_init(&fibptr->event_lock); 192 spin_lock_init(&fibptr->event_lock);
194 hw_fib->header.XferState = cpu_to_le32(0xffffffff); 193 hw_fib->header.XferState = cpu_to_le32(0xffffffff);
195 hw_fib->header.SenderSize = 194 hw_fib->header.SenderSize =
@@ -623,7 +622,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
623 } 622 }
624 if (wait) { 623 if (wait) {
625 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; 624 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
626 if (down_interruptible(&fibptr->event_wait)) { 625 if (wait_for_completion_interruptible(&fibptr->event_wait)) {
627 fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT; 626 fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
628 return -EFAULT; 627 return -EFAULT;
629 } 628 }
@@ -659,7 +658,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
659 * hardware failure has occurred. 658 * hardware failure has occurred.
660 */ 659 */
661 unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */ 660 unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
662 while (down_trylock(&fibptr->event_wait)) { 661 while (!try_wait_for_completion(&fibptr->event_wait)) {
663 int blink; 662 int blink;
664 if (time_is_before_eq_jiffies(timeout)) { 663 if (time_is_before_eq_jiffies(timeout)) {
665 struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; 664 struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
@@ -689,9 +688,9 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
689 */ 688 */
690 schedule(); 689 schedule();
691 } 690 }
692 } else if (down_interruptible(&fibptr->event_wait)) { 691 } else if (wait_for_completion_interruptible(&fibptr->event_wait)) {
693 /* Do nothing ... satisfy 692 /* Do nothing ... satisfy
694 * down_interruptible must_check */ 693 * wait_for_completion_interruptible must_check */
695 } 694 }
696 695
697 spin_lock_irqsave(&fibptr->event_lock, flags); 696 spin_lock_irqsave(&fibptr->event_lock, flags);
@@ -777,7 +776,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
777 return -EFAULT; 776 return -EFAULT;
778 777
779 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; 778 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
780 if (down_interruptible(&fibptr->event_wait)) 779 if (wait_for_completion_interruptible(&fibptr->event_wait))
781 fibptr->done = 2; 780 fibptr->done = 2;
782 fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT); 781 fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT);
783 782
@@ -1538,7 +1537,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
1538 || fib->flags & FIB_CONTEXT_FLAG_WAIT) { 1537 || fib->flags & FIB_CONTEXT_FLAG_WAIT) {
1539 unsigned long flagv; 1538 unsigned long flagv;
1540 spin_lock_irqsave(&fib->event_lock, flagv); 1539 spin_lock_irqsave(&fib->event_lock, flagv);
1541 up(&fib->event_wait); 1540 complete(&fib->event_wait);
1542 spin_unlock_irqrestore(&fib->event_lock, flagv); 1541 spin_unlock_irqrestore(&fib->event_lock, flagv);
1543 schedule(); 1542 schedule();
1544 retval = 0; 1543 retval = 0;
@@ -1828,7 +1827,7 @@ int aac_check_health(struct aac_dev * aac)
1828 * Set the event to wake up the 1827 * Set the event to wake up the
1829 * thread that will waiting. 1828 * thread that will waiting.
1830 */ 1829 */
1831 up(&fibctx->wait_sem); 1830 complete(&fibctx->completion);
1832 } else { 1831 } else {
1833 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); 1832 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1834 kfree(fib); 1833 kfree(fib);
@@ -2165,7 +2164,7 @@ static void wakeup_fibctx_threads(struct aac_dev *dev,
2165 * Set the event to wake up the 2164 * Set the event to wake up the
2166 * thread that is waiting. 2165 * thread that is waiting.
2167 */ 2166 */
2168 up(&fibctx->wait_sem); 2167 complete(&fibctx->completion);
2169 2168
2170 entry = entry->next; 2169 entry = entry->next;
2171 } 2170 }
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index ddc69738375f..40a771dd1c0e 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -38,7 +38,6 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/completion.h> 39#include <linux/completion.h>
40#include <linux/blkdev.h> 40#include <linux/blkdev.h>
41#include <linux/semaphore.h>
42 41
43#include "aacraid.h" 42#include "aacraid.h"
44 43
@@ -129,7 +128,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
129 spin_lock_irqsave(&fib->event_lock, flagv); 128 spin_lock_irqsave(&fib->event_lock, flagv);
130 if (!fib->done) { 129 if (!fib->done) {
131 fib->done = 1; 130 fib->done = 1;
132 up(&fib->event_wait); 131 complete(&fib->event_wait);
133 } 132 }
134 spin_unlock_irqrestore(&fib->event_lock, flagv); 133 spin_unlock_irqrestore(&fib->event_lock, flagv);
135 134
@@ -376,16 +375,16 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
376 start_callback = 1; 375 start_callback = 1;
377 } else { 376 } else {
378 unsigned long flagv; 377 unsigned long flagv;
379 int complete = 0; 378 int completed = 0;
380 379
381 dprintk((KERN_INFO "event_wait up\n")); 380 dprintk((KERN_INFO "event_wait up\n"));
382 spin_lock_irqsave(&fib->event_lock, flagv); 381 spin_lock_irqsave(&fib->event_lock, flagv);
383 if (fib->done == 2) { 382 if (fib->done == 2) {
384 fib->done = 1; 383 fib->done = 1;
385 complete = 1; 384 completed = 1;
386 } else { 385 } else {
387 fib->done = 1; 386 fib->done = 1;
388 up(&fib->event_wait); 387 complete(&fib->event_wait);
389 } 388 }
390 spin_unlock_irqrestore(&fib->event_lock, flagv); 389 spin_unlock_irqrestore(&fib->event_lock, flagv);
391 390
@@ -395,7 +394,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
395 mflags); 394 mflags);
396 395
397 FIB_COUNTER_INCREMENT(aac_config.NativeRecved); 396 FIB_COUNTER_INCREMENT(aac_config.NativeRecved);
398 if (complete) 397 if (completed)
399 aac_fib_complete(fib); 398 aac_fib_complete(fib);
400 } 399 }
401 } else { 400 } else {
@@ -428,16 +427,16 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
428 start_callback = 1; 427 start_callback = 1;
429 } else { 428 } else {
430 unsigned long flagv; 429 unsigned long flagv;
431 int complete = 0; 430 int completed = 0;
432 431
433 dprintk((KERN_INFO "event_wait up\n")); 432 dprintk((KERN_INFO "event_wait up\n"));
434 spin_lock_irqsave(&fib->event_lock, flagv); 433 spin_lock_irqsave(&fib->event_lock, flagv);
435 if (fib->done == 2) { 434 if (fib->done == 2) {
436 fib->done = 1; 435 fib->done = 1;
437 complete = 1; 436 completed = 1;
438 } else { 437 } else {
439 fib->done = 1; 438 fib->done = 1;
440 up(&fib->event_wait); 439 complete(&fib->event_wait);
441 } 440 }
442 spin_unlock_irqrestore(&fib->event_lock, flagv); 441 spin_unlock_irqrestore(&fib->event_lock, flagv);
443 442
@@ -447,7 +446,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
447 mflags); 446 mflags);
448 447
449 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 448 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
450 if (complete) 449 if (completed)
451 aac_fib_complete(fib); 450 aac_fib_complete(fib);
452 } 451 }
453 } 452 }
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 2d4e4ddc5ace..634ddb90e7aa 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -759,6 +759,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
759 !(aac->raw_io_64) || 759 !(aac->raw_io_64) ||
760 ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) 760 ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
761 break; 761 break;
762 /* fall through */
762 case INQUIRY: 763 case INQUIRY:
763 case READ_CAPACITY: 764 case READ_CAPACITY:
764 /* 765 /*
@@ -1539,7 +1540,6 @@ static struct scsi_host_template aac_driver_template = {
1539#else 1540#else
1540 .cmd_per_lun = AAC_NUM_IO_FIB, 1541 .cmd_per_lun = AAC_NUM_IO_FIB,
1541#endif 1542#endif
1542 .use_clustering = ENABLE_CLUSTERING,
1543 .emulated = 1, 1543 .emulated = 1,
1544 .no_write_same = 1, 1544 .no_write_same = 1,
1545}; 1545};
@@ -1559,7 +1559,7 @@ static void __aac_shutdown(struct aac_dev * aac)
1559 struct fib *fib = &aac->fibs[i]; 1559 struct fib *fib = &aac->fibs[i];
1560 if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) && 1560 if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1561 (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) 1561 (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
1562 up(&fib->event_wait); 1562 complete(&fib->event_wait);
1563 } 1563 }
1564 kthread_stop(aac->thread); 1564 kthread_stop(aac->thread);
1565 aac->thread = NULL; 1565 aac->thread = NULL;
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 7a51ccfa8662..8377aec0649d 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -106,7 +106,7 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
106 spin_lock_irqsave(&dev->sync_fib->event_lock, sflags); 106 spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
107 if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) { 107 if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
108 dev->management_fib_count--; 108 dev->management_fib_count--;
109 up(&dev->sync_fib->event_wait); 109 complete(&dev->sync_fib->event_wait);
110 } 110 }
111 spin_unlock_irqrestore(&dev->sync_fib->event_lock, 111 spin_unlock_irqrestore(&dev->sync_fib->event_lock,
112 sflags); 112 sflags);
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 223ef6f4e258..d37584403c33 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -3192,8 +3192,8 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
3192 shost->sg_tablesize, shost->cmd_per_lun); 3192 shost->sg_tablesize, shost->cmd_per_lun);
3193 3193
3194 seq_printf(m, 3194 seq_printf(m,
3195 " unchecked_isa_dma %d, use_clustering %d\n", 3195 " unchecked_isa_dma %d\n",
3196 shost->unchecked_isa_dma, shost->use_clustering); 3196 shost->unchecked_isa_dma);
3197 3197
3198 seq_printf(m, 3198 seq_printf(m,
3199 " flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n", 3199 " flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n",
@@ -10808,14 +10808,6 @@ static struct scsi_host_template advansys_template = {
10808 * for non-ISA adapters. 10808 * for non-ISA adapters.
10809 */ 10809 */
10810 .unchecked_isa_dma = true, 10810 .unchecked_isa_dma = true,
10811 /*
10812 * All adapters controlled by this driver are capable of large
10813 * scatter-gather lists. According to the mid-level SCSI documentation
10814 * this obviates any performance gain provided by setting
10815 * 'use_clustering'. But empirically while CPU utilization is increased
10816 * by enabling clustering, I/O throughput increases as well.
10817 */
10818 .use_clustering = ENABLE_CLUSTERING,
10819}; 10811};
10820 10812
10821static int advansys_wide_init_chip(struct Scsi_Host *shost) 10813static int advansys_wide_init_chip(struct Scsi_Host *shost)
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 301b3cad15f8..97872838b983 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -2920,7 +2920,7 @@ static struct scsi_host_template aha152x_driver_template = {
2920 .can_queue = 1, 2920 .can_queue = 1,
2921 .this_id = 7, 2921 .this_id = 7,
2922 .sg_tablesize = SG_ALL, 2922 .sg_tablesize = SG_ALL,
2923 .use_clustering = DISABLE_CLUSTERING, 2923 .dma_boundary = PAGE_SIZE - 1,
2924 .slave_alloc = aha152x_adjust_queue, 2924 .slave_alloc = aha152x_adjust_queue,
2925}; 2925};
2926 2926
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 41add33e3f1f..ba7a5725be04 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -58,8 +58,15 @@ struct aha1542_hostdata {
58 int aha1542_last_mbi_used; 58 int aha1542_last_mbi_used;
59 int aha1542_last_mbo_used; 59 int aha1542_last_mbo_used;
60 struct scsi_cmnd *int_cmds[AHA1542_MAILBOXES]; 60 struct scsi_cmnd *int_cmds[AHA1542_MAILBOXES];
61 struct mailbox mb[2 * AHA1542_MAILBOXES]; 61 struct mailbox *mb;
62 struct ccb ccb[AHA1542_MAILBOXES]; 62 dma_addr_t mb_handle;
63 struct ccb *ccb;
64 dma_addr_t ccb_handle;
65};
66
67struct aha1542_cmd {
68 struct chain *chain;
69 dma_addr_t chain_handle;
63}; 70};
64 71
65static inline void aha1542_intr_reset(u16 base) 72static inline void aha1542_intr_reset(u16 base)
@@ -233,6 +240,21 @@ static int aha1542_test_port(struct Scsi_Host *sh)
233 return 1; 240 return 1;
234} 241}
235 242
243static void aha1542_free_cmd(struct scsi_cmnd *cmd)
244{
245 struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
246 struct device *dev = cmd->device->host->dma_dev;
247 size_t len = scsi_sg_count(cmd) * sizeof(struct chain);
248
249 if (acmd->chain) {
250 dma_unmap_single(dev, acmd->chain_handle, len, DMA_TO_DEVICE);
251 kfree(acmd->chain);
252 }
253
254 acmd->chain = NULL;
255 scsi_dma_unmap(cmd);
256}
257
236static irqreturn_t aha1542_interrupt(int irq, void *dev_id) 258static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
237{ 259{
238 struct Scsi_Host *sh = dev_id; 260 struct Scsi_Host *sh = dev_id;
@@ -303,7 +325,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
303 return IRQ_HANDLED; 325 return IRQ_HANDLED;
304 }; 326 };
305 327
306 mbo = (scsi2int(mb[mbi].ccbptr) - (isa_virt_to_bus(&ccb[0]))) / sizeof(struct ccb); 328 mbo = (scsi2int(mb[mbi].ccbptr) - (unsigned long)aha1542->ccb_handle) / sizeof(struct ccb);
307 mbistatus = mb[mbi].status; 329 mbistatus = mb[mbi].status;
308 mb[mbi].status = 0; 330 mb[mbi].status = 0;
309 aha1542->aha1542_last_mbi_used = mbi; 331 aha1542->aha1542_last_mbi_used = mbi;
@@ -331,8 +353,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
331 return IRQ_HANDLED; 353 return IRQ_HANDLED;
332 } 354 }
333 my_done = tmp_cmd->scsi_done; 355 my_done = tmp_cmd->scsi_done;
334 kfree(tmp_cmd->host_scribble); 356 aha1542_free_cmd(tmp_cmd);
335 tmp_cmd->host_scribble = NULL;
336 /* Fetch the sense data, and tuck it away, in the required slot. The 357 /* Fetch the sense data, and tuck it away, in the required slot. The
337 Adaptec automatically fetches it, and there is no guarantee that 358 Adaptec automatically fetches it, and there is no guarantee that
338 we will still have it in the cdb when we come back */ 359 we will still have it in the cdb when we come back */
@@ -369,6 +390,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
369 390
370static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) 391static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
371{ 392{
393 struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
372 struct aha1542_hostdata *aha1542 = shost_priv(sh); 394 struct aha1542_hostdata *aha1542 = shost_priv(sh);
373 u8 direction; 395 u8 direction;
374 u8 target = cmd->device->id; 396 u8 target = cmd->device->id;
@@ -378,7 +400,6 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
378 int mbo, sg_count; 400 int mbo, sg_count;
379 struct mailbox *mb = aha1542->mb; 401 struct mailbox *mb = aha1542->mb;
380 struct ccb *ccb = aha1542->ccb; 402 struct ccb *ccb = aha1542->ccb;
381 struct chain *cptr;
382 403
383 if (*cmd->cmnd == REQUEST_SENSE) { 404 if (*cmd->cmnd == REQUEST_SENSE) {
384 /* Don't do the command - we have the sense data already */ 405 /* Don't do the command - we have the sense data already */
@@ -398,15 +419,17 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
398 print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len); 419 print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len);
399 } 420 }
400#endif 421#endif
401 if (bufflen) { /* allocate memory before taking host_lock */ 422 sg_count = scsi_dma_map(cmd);
402 sg_count = scsi_sg_count(cmd); 423 if (sg_count) {
403 cptr = kmalloc_array(sg_count, sizeof(*cptr), 424 size_t len = sg_count * sizeof(struct chain);
404 GFP_KERNEL | GFP_DMA); 425
405 if (!cptr) 426 acmd->chain = kmalloc(len, GFP_DMA);
406 return SCSI_MLQUEUE_HOST_BUSY; 427 if (!acmd->chain)
407 } else { 428 goto out_unmap;
408 sg_count = 0; 429 acmd->chain_handle = dma_map_single(sh->dma_dev, acmd->chain,
409 cptr = NULL; 430 len, DMA_TO_DEVICE);
431 if (dma_mapping_error(sh->dma_dev, acmd->chain_handle))
432 goto out_free_chain;
410 } 433 }
411 434
412 /* Use the outgoing mailboxes in a round-robin fashion, because this 435 /* Use the outgoing mailboxes in a round-robin fashion, because this
@@ -437,7 +460,8 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
437 shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done); 460 shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done);
438#endif 461#endif
439 462
440 any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo])); /* This gets trashed for some reason */ 463 /* This gets trashed for some reason */
464 any2scsi(mb[mbo].ccbptr, aha1542->ccb_handle + mbo * sizeof(*ccb));
441 465
442 memset(&ccb[mbo], 0, sizeof(struct ccb)); 466 memset(&ccb[mbo], 0, sizeof(struct ccb));
443 467
@@ -456,21 +480,18 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
456 int i; 480 int i;
457 481
458 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */ 482 ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
459 cmd->host_scribble = (void *)cptr;
460 scsi_for_each_sg(cmd, sg, sg_count, i) { 483 scsi_for_each_sg(cmd, sg, sg_count, i) {
461 any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg)) 484 any2scsi(acmd->chain[i].dataptr, sg_dma_address(sg));
462 + sg->offset); 485 any2scsi(acmd->chain[i].datalen, sg_dma_len(sg));
463 any2scsi(cptr[i].datalen, sg->length);
464 }; 486 };
465 any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain)); 487 any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
466 any2scsi(ccb[mbo].dataptr, isa_virt_to_bus(cptr)); 488 any2scsi(ccb[mbo].dataptr, acmd->chain_handle);
467#ifdef DEBUG 489#ifdef DEBUG
468 shost_printk(KERN_DEBUG, sh, "cptr %p: ", cptr); 490 shost_printk(KERN_DEBUG, sh, "cptr %p: ", acmd->chain);
469 print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, cptr, 18); 491 print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, acmd->chain, 18);
470#endif 492#endif
471 } else { 493 } else {
472 ccb[mbo].op = 0; /* SCSI Initiator Command */ 494 ccb[mbo].op = 0; /* SCSI Initiator Command */
473 cmd->host_scribble = NULL;
474 any2scsi(ccb[mbo].datalen, 0); 495 any2scsi(ccb[mbo].datalen, 0);
475 any2scsi(ccb[mbo].dataptr, 0); 496 any2scsi(ccb[mbo].dataptr, 0);
476 }; 497 };
@@ -488,24 +509,29 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
488 spin_unlock_irqrestore(sh->host_lock, flags); 509 spin_unlock_irqrestore(sh->host_lock, flags);
489 510
490 return 0; 511 return 0;
512out_free_chain:
513 kfree(acmd->chain);
514 acmd->chain = NULL;
515out_unmap:
516 scsi_dma_unmap(cmd);
517 return SCSI_MLQUEUE_HOST_BUSY;
491} 518}
492 519
493/* Initialize mailboxes */ 520/* Initialize mailboxes */
494static void setup_mailboxes(struct Scsi_Host *sh) 521static void setup_mailboxes(struct Scsi_Host *sh)
495{ 522{
496 struct aha1542_hostdata *aha1542 = shost_priv(sh); 523 struct aha1542_hostdata *aha1542 = shost_priv(sh);
497 int i;
498 struct mailbox *mb = aha1542->mb;
499 struct ccb *ccb = aha1542->ccb;
500
501 u8 mb_cmd[5] = { CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0}; 524 u8 mb_cmd[5] = { CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0};
525 int i;
502 526
503 for (i = 0; i < AHA1542_MAILBOXES; i++) { 527 for (i = 0; i < AHA1542_MAILBOXES; i++) {
504 mb[i].status = mb[AHA1542_MAILBOXES + i].status = 0; 528 aha1542->mb[i].status = 0;
505 any2scsi(mb[i].ccbptr, isa_virt_to_bus(&ccb[i])); 529 any2scsi(aha1542->mb[i].ccbptr,
530 aha1542->ccb_handle + i * sizeof(struct ccb));
531 aha1542->mb[AHA1542_MAILBOXES + i].status = 0;
506 }; 532 };
507 aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */ 533 aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */
508 any2scsi((mb_cmd + 2), isa_virt_to_bus(mb)); 534 any2scsi(mb_cmd + 2, aha1542->mb_handle);
509 if (aha1542_out(sh->io_port, mb_cmd, 5)) 535 if (aha1542_out(sh->io_port, mb_cmd, 5))
510 shost_printk(KERN_ERR, sh, "failed setting up mailboxes\n"); 536 shost_printk(KERN_ERR, sh, "failed setting up mailboxes\n");
511 aha1542_intr_reset(sh->io_port); 537 aha1542_intr_reset(sh->io_port);
@@ -739,11 +765,26 @@ static struct Scsi_Host *aha1542_hw_init(struct scsi_host_template *tpnt, struct
739 if (aha1542->bios_translation == BIOS_TRANSLATION_25563) 765 if (aha1542->bios_translation == BIOS_TRANSLATION_25563)
740 shost_printk(KERN_INFO, sh, "Using extended bios translation\n"); 766 shost_printk(KERN_INFO, sh, "Using extended bios translation\n");
741 767
768 if (dma_set_mask_and_coherent(pdev, DMA_BIT_MASK(24)) < 0)
769 goto unregister;
770
771 aha1542->mb = dma_alloc_coherent(pdev,
772 AHA1542_MAILBOXES * 2 * sizeof(struct mailbox),
773 &aha1542->mb_handle, GFP_KERNEL);
774 if (!aha1542->mb)
775 goto unregister;
776
777 aha1542->ccb = dma_alloc_coherent(pdev,
778 AHA1542_MAILBOXES * sizeof(struct ccb),
779 &aha1542->ccb_handle, GFP_KERNEL);
780 if (!aha1542->ccb)
781 goto free_mb;
782
742 setup_mailboxes(sh); 783 setup_mailboxes(sh);
743 784
744 if (request_irq(sh->irq, aha1542_interrupt, 0, "aha1542", sh)) { 785 if (request_irq(sh->irq, aha1542_interrupt, 0, "aha1542", sh)) {
745 shost_printk(KERN_ERR, sh, "Unable to allocate IRQ.\n"); 786 shost_printk(KERN_ERR, sh, "Unable to allocate IRQ.\n");
746 goto unregister; 787 goto free_ccb;
747 } 788 }
748 if (sh->dma_channel != 0xFF) { 789 if (sh->dma_channel != 0xFF) {
749 if (request_dma(sh->dma_channel, "aha1542")) { 790 if (request_dma(sh->dma_channel, "aha1542")) {
@@ -762,11 +803,18 @@ static struct Scsi_Host *aha1542_hw_init(struct scsi_host_template *tpnt, struct
762 scsi_scan_host(sh); 803 scsi_scan_host(sh);
763 804
764 return sh; 805 return sh;
806
765free_dma: 807free_dma:
766 if (sh->dma_channel != 0xff) 808 if (sh->dma_channel != 0xff)
767 free_dma(sh->dma_channel); 809 free_dma(sh->dma_channel);
768free_irq: 810free_irq:
769 free_irq(sh->irq, sh); 811 free_irq(sh->irq, sh);
812free_ccb:
813 dma_free_coherent(pdev, AHA1542_MAILBOXES * sizeof(struct ccb),
814 aha1542->ccb, aha1542->ccb_handle);
815free_mb:
816 dma_free_coherent(pdev, AHA1542_MAILBOXES * 2 * sizeof(struct mailbox),
817 aha1542->mb, aha1542->mb_handle);
770unregister: 818unregister:
771 scsi_host_put(sh); 819 scsi_host_put(sh);
772release: 820release:
@@ -777,9 +825,16 @@ release:
777 825
778static int aha1542_release(struct Scsi_Host *sh) 826static int aha1542_release(struct Scsi_Host *sh)
779{ 827{
828 struct aha1542_hostdata *aha1542 = shost_priv(sh);
829 struct device *dev = sh->dma_dev;
830
780 scsi_remove_host(sh); 831 scsi_remove_host(sh);
781 if (sh->dma_channel != 0xff) 832 if (sh->dma_channel != 0xff)
782 free_dma(sh->dma_channel); 833 free_dma(sh->dma_channel);
834 dma_free_coherent(dev, AHA1542_MAILBOXES * sizeof(struct ccb),
835 aha1542->ccb, aha1542->ccb_handle);
836 dma_free_coherent(dev, AHA1542_MAILBOXES * 2 * sizeof(struct mailbox),
837 aha1542->mb, aha1542->mb_handle);
783 if (sh->irq) 838 if (sh->irq)
784 free_irq(sh->irq, sh); 839 free_irq(sh->irq, sh);
785 if (sh->io_port && sh->n_io_port) 840 if (sh->io_port && sh->n_io_port)
@@ -826,7 +881,8 @@ static int aha1542_dev_reset(struct scsi_cmnd *cmd)
826 881
827 aha1542->aha1542_last_mbo_used = mbo; 882 aha1542->aha1542_last_mbo_used = mbo;
828 883
829 any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo])); /* This gets trashed for some reason */ 884 /* This gets trashed for some reason */
885 any2scsi(mb[mbo].ccbptr, aha1542->ccb_handle + mbo * sizeof(*ccb));
830 886
831 memset(&ccb[mbo], 0, sizeof(struct ccb)); 887 memset(&ccb[mbo], 0, sizeof(struct ccb));
832 888
@@ -901,8 +957,7 @@ static int aha1542_reset(struct scsi_cmnd *cmd, u8 reset_cmd)
901 */ 957 */
902 continue; 958 continue;
903 } 959 }
904 kfree(tmp_cmd->host_scribble); 960 aha1542_free_cmd(tmp_cmd);
905 tmp_cmd->host_scribble = NULL;
906 aha1542->int_cmds[i] = NULL; 961 aha1542->int_cmds[i] = NULL;
907 aha1542->mb[i].status = 0; 962 aha1542->mb[i].status = 0;
908 } 963 }
@@ -946,6 +1001,7 @@ static struct scsi_host_template driver_template = {
946 .module = THIS_MODULE, 1001 .module = THIS_MODULE,
947 .proc_name = "aha1542", 1002 .proc_name = "aha1542",
948 .name = "Adaptec 1542", 1003 .name = "Adaptec 1542",
1004 .cmd_size = sizeof(struct aha1542_cmd),
949 .queuecommand = aha1542_queuecommand, 1005 .queuecommand = aha1542_queuecommand,
950 .eh_device_reset_handler= aha1542_dev_reset, 1006 .eh_device_reset_handler= aha1542_dev_reset,
951 .eh_bus_reset_handler = aha1542_bus_reset, 1007 .eh_bus_reset_handler = aha1542_bus_reset,
@@ -955,7 +1011,6 @@ static struct scsi_host_template driver_template = {
955 .this_id = 7, 1011 .this_id = 7,
956 .sg_tablesize = 16, 1012 .sg_tablesize = 16,
957 .unchecked_isa_dma = 1, 1013 .unchecked_isa_dma = 1,
958 .use_clustering = ENABLE_CLUSTERING,
959}; 1014};
960 1015
961static int aha1542_isa_match(struct device *pdev, unsigned int ndev) 1016static int aha1542_isa_match(struct device *pdev, unsigned int ndev)
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 786bf7f32c64..da4150c17781 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -545,7 +545,6 @@ static struct scsi_host_template aha1740_template = {
545 .can_queue = AHA1740_ECBS, 545 .can_queue = AHA1740_ECBS,
546 .this_id = 7, 546 .this_id = 7,
547 .sg_tablesize = AHA1740_SCATTER, 547 .sg_tablesize = AHA1740_SCATTER,
548 .use_clustering = ENABLE_CLUSTERING,
549 .eh_abort_handler = aha1740_eh_abort_handler, 548 .eh_abort_handler = aha1740_eh_abort_handler,
550}; 549};
551 550
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 2588b8f84ba0..57992519384e 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -920,7 +920,6 @@ struct scsi_host_template aic79xx_driver_template = {
920 .this_id = -1, 920 .this_id = -1,
921 .max_sectors = 8192, 921 .max_sectors = 8192,
922 .cmd_per_lun = 2, 922 .cmd_per_lun = 2,
923 .use_clustering = ENABLE_CLUSTERING,
924 .slave_alloc = ahd_linux_slave_alloc, 923 .slave_alloc = ahd_linux_slave_alloc,
925 .slave_configure = ahd_linux_slave_configure, 924 .slave_configure = ahd_linux_slave_configure,
926 .target_alloc = ahd_linux_target_alloc, 925 .target_alloc = ahd_linux_target_alloc,
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index c6be3aeb302b..3c9c17450bb3 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -807,7 +807,6 @@ struct scsi_host_template aic7xxx_driver_template = {
807 .this_id = -1, 807 .this_id = -1,
808 .max_sectors = 8192, 808 .max_sectors = 8192,
809 .cmd_per_lun = 2, 809 .cmd_per_lun = 2,
810 .use_clustering = ENABLE_CLUSTERING,
811 .slave_alloc = ahc_linux_slave_alloc, 810 .slave_alloc = ahc_linux_slave_alloc,
812 .slave_configure = ahc_linux_slave_configure, 811 .slave_configure = ahc_linux_slave_configure,
813 .target_alloc = ahc_linux_target_alloc, 812 .target_alloc = ahc_linux_target_alloc,
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 3b8ad55e59de..2bc7615193bd 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -1057,14 +1057,13 @@ static struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
1057 1057
1058 if (ascb) { 1058 if (ascb) {
1059 ascb->dma_scb.size = sizeof(struct scb); 1059 ascb->dma_scb.size = sizeof(struct scb);
1060 ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, 1060 ascb->dma_scb.vaddr = dma_pool_zalloc(asd_ha->scb_pool,
1061 gfp_flags, 1061 gfp_flags,
1062 &ascb->dma_scb.dma_handle); 1062 &ascb->dma_scb.dma_handle);
1063 if (!ascb->dma_scb.vaddr) { 1063 if (!ascb->dma_scb.vaddr) {
1064 kmem_cache_free(asd_ascb_cache, ascb); 1064 kmem_cache_free(asd_ascb_cache, ascb);
1065 return NULL; 1065 return NULL;
1066 } 1066 }
1067 memset(ascb->dma_scb.vaddr, 0, sizeof(struct scb));
1068 asd_init_ascb(asd_ha, ascb); 1067 asd_init_ascb(asd_ha, ascb);
1069 1068
1070 spin_lock_irqsave(&seq->tc_index_lock, flags); 1069 spin_lock_irqsave(&seq->tc_index_lock, flags);
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 41c4d8abdd4a..f83f79b07b50 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -68,7 +68,6 @@ static struct scsi_host_template aic94xx_sht = {
68 .this_id = -1, 68 .this_id = -1,
69 .sg_tablesize = SG_ALL, 69 .sg_tablesize = SG_ALL,
70 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 70 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
71 .use_clustering = ENABLE_CLUSTERING,
72 .eh_device_reset_handler = sas_eh_device_reset_handler, 71 .eh_device_reset_handler = sas_eh_device_reset_handler,
73 .eh_target_reset_handler = sas_eh_target_reset_handler, 72 .eh_target_reset_handler = sas_eh_target_reset_handler,
74 .target_destroy = sas_target_destroy, 73 .target_destroy = sas_target_destroy,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index d4404eea24fb..0f6751b0a633 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -156,7 +156,6 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
156 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES, 156 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
157 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C, 157 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
158 .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN, 158 .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
159 .use_clustering = ENABLE_CLUSTERING,
160 .shost_attrs = arcmsr_host_attrs, 159 .shost_attrs = arcmsr_host_attrs,
161 .no_write_same = 1, 160 .no_write_same = 1,
162}; 161};
@@ -903,9 +902,9 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
903 if(!host){ 902 if(!host){
904 goto pci_disable_dev; 903 goto pci_disable_dev;
905 } 904 }
906 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 905 error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
907 if(error){ 906 if(error){
908 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 907 error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
909 if(error){ 908 if(error){
910 printk(KERN_WARNING 909 printk(KERN_WARNING
911 "scsi%d: No suitable DMA mask available\n", 910 "scsi%d: No suitable DMA mask available\n",
@@ -1049,9 +1048,9 @@ static int arcmsr_resume(struct pci_dev *pdev)
1049 pr_warn("%s: pci_enable_device error\n", __func__); 1048 pr_warn("%s: pci_enable_device error\n", __func__);
1050 return -ENODEV; 1049 return -ENODEV;
1051 } 1050 }
1052 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1051 error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1053 if (error) { 1052 if (error) {
1054 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1053 error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1055 if (error) { 1054 if (error) {
1056 pr_warn("scsi%d: No suitable DMA mask available\n", 1055 pr_warn("scsi%d: No suitable DMA mask available\n",
1057 host->host_no); 1056 host->host_no);
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 421fe869a11e..d7509859dc00 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2890,7 +2890,7 @@ static struct scsi_host_template acornscsi_template = {
2890 .this_id = 7, 2890 .this_id = 7,
2891 .sg_tablesize = SG_ALL, 2891 .sg_tablesize = SG_ALL,
2892 .cmd_per_lun = 2, 2892 .cmd_per_lun = 2,
2893 .use_clustering = DISABLE_CLUSTERING, 2893 .dma_boundary = PAGE_SIZE - 1,
2894 .proc_name = "acornscsi", 2894 .proc_name = "acornscsi",
2895}; 2895};
2896 2896
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
index 3110736fd337..5e9dd9f34821 100644
--- a/drivers/scsi/arm/arxescsi.c
+++ b/drivers/scsi/arm/arxescsi.c
@@ -245,7 +245,7 @@ static struct scsi_host_template arxescsi_template = {
245 .can_queue = 0, 245 .can_queue = 0,
246 .this_id = 7, 246 .this_id = 7,
247 .sg_tablesize = SG_ALL, 247 .sg_tablesize = SG_ALL,
248 .use_clustering = DISABLE_CLUSTERING, 248 .dma_boundary = PAGE_SIZE - 1,
249 .proc_name = "arxescsi", 249 .proc_name = "arxescsi",
250}; 250};
251 251
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index ae1d809904fb..e2d2a81d8e0b 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -221,10 +221,10 @@ static struct scsi_host_template cumanascsi_template = {
221 .this_id = 7, 221 .this_id = 7,
222 .sg_tablesize = SG_ALL, 222 .sg_tablesize = SG_ALL,
223 .cmd_per_lun = 2, 223 .cmd_per_lun = 2,
224 .use_clustering = DISABLE_CLUSTERING,
225 .proc_name = "CumanaSCSI-1", 224 .proc_name = "CumanaSCSI-1",
226 .cmd_size = NCR5380_CMD_SIZE, 225 .cmd_size = NCR5380_CMD_SIZE,
227 .max_sectors = 128, 226 .max_sectors = 128,
227 .dma_boundary = PAGE_SIZE - 1,
228}; 228};
229 229
230static int cumanascsi1_probe(struct expansion_card *ec, 230static int cumanascsi1_probe(struct expansion_card *ec,
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index edce5f3cfdba..40afcbd8de61 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -367,7 +367,6 @@ static struct scsi_host_template cumanascsi2_template = {
367 .this_id = 7, 367 .this_id = 7,
368 .sg_tablesize = SG_MAX_SEGMENTS, 368 .sg_tablesize = SG_MAX_SEGMENTS,
369 .dma_boundary = IOMD_DMA_BOUNDARY, 369 .dma_boundary = IOMD_DMA_BOUNDARY,
370 .use_clustering = DISABLE_CLUSTERING,
371 .proc_name = "cumanascsi2", 370 .proc_name = "cumanascsi2",
372}; 371};
373 372
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index e93e047f4316..8f64c370a8a7 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -486,7 +486,6 @@ static struct scsi_host_template eesox_template = {
486 .this_id = 7, 486 .this_id = 7,
487 .sg_tablesize = SG_MAX_SEGMENTS, 487 .sg_tablesize = SG_MAX_SEGMENTS,
488 .dma_boundary = IOMD_DMA_BOUNDARY, 488 .dma_boundary = IOMD_DMA_BOUNDARY,
489 .use_clustering = DISABLE_CLUSTERING,
490 .proc_name = "eesox", 489 .proc_name = "eesox",
491}; 490};
492 491
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 05b7f755499b..8f2efaab8d46 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -110,7 +110,7 @@ static struct scsi_host_template oakscsi_template = {
110 .this_id = 7, 110 .this_id = 7,
111 .sg_tablesize = SG_ALL, 111 .sg_tablesize = SG_ALL,
112 .cmd_per_lun = 2, 112 .cmd_per_lun = 2,
113 .use_clustering = DISABLE_CLUSTERING, 113 .dma_boundary = PAGE_SIZE - 1,
114 .proc_name = "oakscsi", 114 .proc_name = "oakscsi",
115 .cmd_size = NCR5380_CMD_SIZE, 115 .cmd_size = NCR5380_CMD_SIZE,
116 .max_sectors = 128, 116 .max_sectors = 128,
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index 79aa88911b7f..759f95ba993c 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -294,7 +294,6 @@ static struct scsi_host_template powertecscsi_template = {
294 .sg_tablesize = SG_MAX_SEGMENTS, 294 .sg_tablesize = SG_MAX_SEGMENTS,
295 .dma_boundary = IOMD_DMA_BOUNDARY, 295 .dma_boundary = IOMD_DMA_BOUNDARY,
296 .cmd_per_lun = 2, 296 .cmd_per_lun = 2,
297 .use_clustering = ENABLE_CLUSTERING,
298 .proc_name = "powertec", 297 .proc_name = "powertec",
299}; 298};
300 299
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index 89f5154c40b6..a503dc50c4f8 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -714,7 +714,7 @@ static struct scsi_host_template atari_scsi_template = {
714 .eh_host_reset_handler = atari_scsi_host_reset, 714 .eh_host_reset_handler = atari_scsi_host_reset,
715 .this_id = 7, 715 .this_id = 7,
716 .cmd_per_lun = 2, 716 .cmd_per_lun = 2,
717 .use_clustering = DISABLE_CLUSTERING, 717 .dma_boundary = PAGE_SIZE - 1,
718 .cmd_size = NCR5380_CMD_SIZE, 718 .cmd_size = NCR5380_CMD_SIZE,
719}; 719};
720 720
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 802d15018ec0..1267200380f8 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1681,7 +1681,6 @@ static struct scsi_host_template atp870u_template = {
1681 .can_queue = qcnt /* can_queue */, 1681 .can_queue = qcnt /* can_queue */,
1682 .this_id = 7 /* SCSI ID */, 1682 .this_id = 7 /* SCSI ID */,
1683 .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/, 1683 .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/,
1684 .use_clustering = ENABLE_CLUSTERING,
1685 .max_sectors = ATP870U_MAX_SECTORS, 1684 .max_sectors = ATP870U_MAX_SECTORS,
1686}; 1685};
1687 1686
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index effb6fc95af4..39f3820572b4 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -214,12 +214,6 @@ static char const *cqe_desc[] = {
214 "CXN_KILLED_IMM_DATA_RCVD" 214 "CXN_KILLED_IMM_DATA_RCVD"
215}; 215};
216 216
217static int beiscsi_slave_configure(struct scsi_device *sdev)
218{
219 blk_queue_max_segment_size(sdev->request_queue, 65536);
220 return 0;
221}
222
223static int beiscsi_eh_abort(struct scsi_cmnd *sc) 217static int beiscsi_eh_abort(struct scsi_cmnd *sc)
224{ 218{
225 struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr; 219 struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr;
@@ -393,7 +387,6 @@ static struct scsi_host_template beiscsi_sht = {
393 .proc_name = DRV_NAME, 387 .proc_name = DRV_NAME,
394 .queuecommand = iscsi_queuecommand, 388 .queuecommand = iscsi_queuecommand,
395 .change_queue_depth = scsi_change_queue_depth, 389 .change_queue_depth = scsi_change_queue_depth,
396 .slave_configure = beiscsi_slave_configure,
397 .target_alloc = iscsi_target_alloc, 390 .target_alloc = iscsi_target_alloc,
398 .eh_timed_out = iscsi_eh_cmd_timed_out, 391 .eh_timed_out = iscsi_eh_cmd_timed_out,
399 .eh_abort_handler = beiscsi_eh_abort, 392 .eh_abort_handler = beiscsi_eh_abort,
@@ -404,8 +397,8 @@ static struct scsi_host_template beiscsi_sht = {
404 .can_queue = BE2_IO_DEPTH, 397 .can_queue = BE2_IO_DEPTH,
405 .this_id = -1, 398 .this_id = -1,
406 .max_sectors = BEISCSI_MAX_SECTORS, 399 .max_sectors = BEISCSI_MAX_SECTORS,
400 .max_segment_size = 65536,
407 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 401 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
408 .use_clustering = ENABLE_CLUSTERING,
409 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, 402 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
410 .track_queue_depth = 1, 403 .track_queue_depth = 1,
411}; 404};
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 16d3aeb0e572..9631877aba4f 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -3819,7 +3819,7 @@ bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3819 sfp->state = BFA_SFP_STATE_REMOVED; 3819 sfp->state = BFA_SFP_STATE_REMOVED;
3820 sfp->data_valid = 0; 3820 sfp->data_valid = 0;
3821 bfa_sfp_scn_aen_post(sfp, rsp); 3821 bfa_sfp_scn_aen_post(sfp, rsp);
3822 break; 3822 break;
3823 case BFA_SFP_SCN_FAILED: 3823 case BFA_SFP_SCN_FAILED:
3824 sfp->state = BFA_SFP_STATE_FAILED; 3824 sfp->state = BFA_SFP_STATE_FAILED;
3825 sfp->data_valid = 0; 3825 sfp->data_valid = 0;
@@ -5763,7 +5763,7 @@ bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5763 (struct bfa_phy_stats_s *) phy->ubuf; 5763 (struct bfa_phy_stats_s *) phy->ubuf;
5764 bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva, 5764 bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5765 sizeof(struct bfa_phy_stats_s)); 5765 sizeof(struct bfa_phy_stats_s));
5766 bfa_trc(phy, stats->status); 5766 bfa_trc(phy, stats->status);
5767 } 5767 }
5768 5768
5769 phy->status = status; 5769 phy->status = status;
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 911efc98d1fd..42a0caf6740d 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -739,14 +739,10 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
739 739
740 pci_set_master(pdev); 740 pci_set_master(pdev);
741 741
742 742 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
743 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) || 743 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
744 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) { 744 printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev);
745 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || 745 goto out_release_region;
746 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
747 printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
748 goto out_release_region;
749 }
750 } 746 }
751 747
752 /* Enable PCIE Advanced Error Recovery (AER) if kernel supports */ 748 /* Enable PCIE Advanced Error Recovery (AER) if kernel supports */
@@ -1565,9 +1561,9 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
1565 pci_save_state(pdev); 1561 pci_save_state(pdev);
1566 pci_set_master(pdev); 1562 pci_set_master(pdev);
1567 1563
1568 if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(64)) != 0) 1564 if (dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64)) ||
1569 if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0) 1565 dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(32)))
1570 goto out_disable_device; 1566 goto out_disable_device;
1571 1567
1572 if (restart_bfa(bfad) == -1) 1568 if (restart_bfa(bfad) == -1)
1573 goto out_disable_device; 1569 goto out_disable_device;
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index c4a33317d344..394930cbaa13 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -817,7 +817,6 @@ struct scsi_host_template bfad_im_scsi_host_template = {
817 .this_id = -1, 817 .this_id = -1,
818 .sg_tablesize = BFAD_IO_MAX_SGE, 818 .sg_tablesize = BFAD_IO_MAX_SGE,
819 .cmd_per_lun = 3, 819 .cmd_per_lun = 3,
820 .use_clustering = ENABLE_CLUSTERING,
821 .shost_attrs = bfad_im_host_attrs, 820 .shost_attrs = bfad_im_host_attrs,
822 .max_sectors = BFAD_MAX_SECTORS, 821 .max_sectors = BFAD_MAX_SECTORS,
823 .vendor_id = BFA_PCI_VENDOR_ID_BROCADE, 822 .vendor_id = BFA_PCI_VENDOR_ID_BROCADE,
@@ -840,7 +839,6 @@ struct scsi_host_template bfad_im_vport_template = {
840 .this_id = -1, 839 .this_id = -1,
841 .sg_tablesize = BFAD_IO_MAX_SGE, 840 .sg_tablesize = BFAD_IO_MAX_SGE,
842 .cmd_per_lun = 3, 841 .cmd_per_lun = 3,
843 .use_clustering = ENABLE_CLUSTERING,
844 .shost_attrs = bfad_im_vport_attrs, 842 .shost_attrs = bfad_im_vport_attrs,
845 .max_sectors = BFAD_MAX_SECTORS, 843 .max_sectors = BFAD_MAX_SECTORS,
846}; 844};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index bcd30e2374f1..2e4e7159ebf9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2970,7 +2970,6 @@ static struct scsi_host_template bnx2fc_shost_template = {
2970 .change_queue_depth = scsi_change_queue_depth, 2970 .change_queue_depth = scsi_change_queue_depth,
2971 .this_id = -1, 2971 .this_id = -1,
2972 .cmd_per_lun = 3, 2972 .cmd_per_lun = 3,
2973 .use_clustering = ENABLE_CLUSTERING,
2974 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, 2973 .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
2975 .max_sectors = 1024, 2974 .max_sectors = 1024,
2976 .track_queue_depth = 1, 2975 .track_queue_depth = 1,
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 6bad2689edd4..91f5316aa3ab 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2427,7 +2427,6 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
2427{ 2427{
2428 u32 cid_addr; 2428 u32 cid_addr;
2429 struct bnx2i_endpoint *ep; 2429 struct bnx2i_endpoint *ep;
2430 u32 cid_num;
2431 2430
2432 ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id); 2431 ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
2433 if (!ep) { 2432 if (!ep) {
@@ -2462,7 +2461,6 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
2462 } else { 2461 } else {
2463 ep->state = EP_STATE_OFLD_COMPL; 2462 ep->state = EP_STATE_OFLD_COMPL;
2464 cid_addr = ofld_kcqe->iscsi_conn_context_id; 2463 cid_addr = ofld_kcqe->iscsi_conn_context_id;
2465 cid_num = bnx2i_get_cid_num(ep);
2466 ep->ep_cid = cid_addr; 2464 ep->ep_cid = cid_addr;
2467 ep->qp.ctx_base = NULL; 2465 ep->qp.ctx_base = NULL;
2468 } 2466 }
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index de0a507577ef..69c75426c5eb 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -2263,7 +2263,6 @@ static struct scsi_host_template bnx2i_host_template = {
2263 .max_sectors = 127, 2263 .max_sectors = 127,
2264 .cmd_per_lun = 128, 2264 .cmd_per_lun = 128,
2265 .this_id = -1, 2265 .this_id = -1,
2266 .use_clustering = ENABLE_CLUSTERING,
2267 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, 2266 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
2268 .shost_attrs = bnx2i_dev_attributes, 2267 .shost_attrs = bnx2i_dev_attributes,
2269 .track_queue_depth = 1, 2268 .track_queue_depth = 1,
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 1a458ce08210..cf629380a981 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -255,7 +255,6 @@ static void
255csio_hw_exit_workers(struct csio_hw *hw) 255csio_hw_exit_workers(struct csio_hw *hw)
256{ 256{
257 cancel_work_sync(&hw->evtq_work); 257 cancel_work_sync(&hw->evtq_work);
258 flush_scheduled_work();
259} 258}
260 259
261static int 260static int
@@ -646,7 +645,7 @@ csio_shost_init(struct csio_hw *hw, struct device *dev,
646 if (csio_lnode_init(ln, hw, pln)) 645 if (csio_lnode_init(ln, hw, pln))
647 goto err_shost_put; 646 goto err_shost_put;
648 647
649 if (scsi_add_host(shost, dev)) 648 if (scsi_add_host_with_dma(shost, dev, &hw->pdev->dev))
650 goto err_lnode_exit; 649 goto err_lnode_exit;
651 650
652 return ln; 651 return ln;
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index a95debbea0e4..bc5547a62c00 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -2274,7 +2274,6 @@ struct scsi_host_template csio_fcoe_shost_template = {
2274 .this_id = -1, 2274 .this_id = -1,
2275 .sg_tablesize = CSIO_SCSI_MAX_SGE, 2275 .sg_tablesize = CSIO_SCSI_MAX_SGE,
2276 .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, 2276 .cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
2277 .use_clustering = ENABLE_CLUSTERING,
2278 .shost_attrs = csio_fcoe_lport_attrs, 2277 .shost_attrs = csio_fcoe_lport_attrs,
2279 .max_sectors = CSIO_MAX_SECTOR_SIZE, 2278 .max_sectors = CSIO_MAX_SECTOR_SIZE,
2280}; 2279};
@@ -2294,7 +2293,6 @@ struct scsi_host_template csio_fcoe_shost_vport_template = {
2294 .this_id = -1, 2293 .this_id = -1,
2295 .sg_tablesize = CSIO_SCSI_MAX_SGE, 2294 .sg_tablesize = CSIO_SCSI_MAX_SGE,
2296 .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, 2295 .cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
2297 .use_clustering = ENABLE_CLUSTERING,
2298 .shost_attrs = csio_fcoe_vport_attrs, 2296 .shost_attrs = csio_fcoe_vport_attrs,
2299 .max_sectors = CSIO_MAX_SECTOR_SIZE, 2297 .max_sectors = CSIO_MAX_SECTOR_SIZE,
2300}; 2298};
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index bf07735275a4..8a20411699d9 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -95,7 +95,7 @@ static struct scsi_host_template cxgb3i_host_template = {
95 .eh_device_reset_handler = iscsi_eh_device_reset, 95 .eh_device_reset_handler = iscsi_eh_device_reset,
96 .eh_target_reset_handler = iscsi_eh_recover_target, 96 .eh_target_reset_handler = iscsi_eh_recover_target,
97 .target_alloc = iscsi_target_alloc, 97 .target_alloc = iscsi_target_alloc,
98 .use_clustering = DISABLE_CLUSTERING, 98 .dma_boundary = PAGE_SIZE - 1,
99 .this_id = -1, 99 .this_id = -1,
100 .track_queue_depth = 1, 100 .track_queue_depth = 1,
101}; 101};
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
index 594f593c8821..f36b76e8e12c 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -1,8 +1,8 @@
1config SCSI_CXGB4_ISCSI 1config SCSI_CXGB4_ISCSI
2 tristate "Chelsio T4 iSCSI support" 2 tristate "Chelsio T4 iSCSI support"
3 depends on PCI && INET && (IPV6 || IPV6=n) 3 depends on PCI && INET && (IPV6 || IPV6=n)
4 select NETDEVICES 4 depends on THERMAL || !THERMAL
5 select ETHERNET 5 depends on ETHERNET
6 select NET_VENDOR_CHELSIO 6 select NET_VENDOR_CHELSIO
7 select CHELSIO_T4 7 select CHELSIO_T4
8 select CHELSIO_LIB 8 select CHELSIO_LIB
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 907dd8792a0a..49f8028ac524 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -113,7 +113,7 @@ static struct scsi_host_template cxgb4i_host_template = {
113 .eh_device_reset_handler = iscsi_eh_device_reset, 113 .eh_device_reset_handler = iscsi_eh_device_reset,
114 .eh_target_reset_handler = iscsi_eh_recover_target, 114 .eh_target_reset_handler = iscsi_eh_recover_target,
115 .target_alloc = iscsi_target_alloc, 115 .target_alloc = iscsi_target_alloc,
116 .use_clustering = DISABLE_CLUSTERING, 116 .dma_boundary = PAGE_SIZE - 1,
117 .this_id = -1, 117 .this_id = -1,
118 .track_queue_depth = 1, 118 .track_queue_depth = 1,
119}; 119};
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index abdc9eac4173..bfa13e3b191c 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3174,7 +3174,6 @@ static struct scsi_host_template driver_template = {
3174 .this_id = -1, 3174 .this_id = -1,
3175 .sg_tablesize = 1, /* No scatter gather support */ 3175 .sg_tablesize = 1, /* No scatter gather support */
3176 .max_sectors = CXLFLASH_MAX_SECTORS, 3176 .max_sectors = CXLFLASH_MAX_SECTORS,
3177 .use_clustering = ENABLE_CLUSTERING,
3178 .shost_attrs = cxlflash_host_attrs, 3177 .shost_attrs = cxlflash_host_attrs,
3179 .sdev_attrs = cxlflash_dev_attrs, 3178 .sdev_attrs = cxlflash_dev_attrs,
3180}; 3179};
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 8c55ec6e1827..13fbb2eab842 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4631,7 +4631,7 @@ static struct scsi_host_template dc395x_driver_template = {
4631 .cmd_per_lun = DC395x_MAX_CMD_PER_LUN, 4631 .cmd_per_lun = DC395x_MAX_CMD_PER_LUN,
4632 .eh_abort_handler = dc395x_eh_abort, 4632 .eh_abort_handler = dc395x_eh_abort,
4633 .eh_bus_reset_handler = dc395x_eh_bus_reset, 4633 .eh_bus_reset_handler = dc395x_eh_bus_reset,
4634 .use_clustering = DISABLE_CLUSTERING, 4634 .dma_boundary = PAGE_SIZE - 1,
4635}; 4635};
4636 4636
4637 4637
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 003c3d726238..8db1cc552932 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -63,7 +63,7 @@ static struct scsi_host_template dmx3191d_driver_template = {
63 .this_id = 7, 63 .this_id = 7,
64 .sg_tablesize = SG_ALL, 64 .sg_tablesize = SG_ALL,
65 .cmd_per_lun = 2, 65 .cmd_per_lun = 2,
66 .use_clustering = DISABLE_CLUSTERING, 66 .dma_boundary = PAGE_SIZE - 1,
67 .cmd_size = NCR5380_CMD_SIZE, 67 .cmd_size = NCR5380_CMD_SIZE,
68}; 68};
69 69
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 37de8fb186d7..70d1a18278af 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -934,15 +934,15 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
934 * See if we should enable dma64 mode. 934 * See if we should enable dma64 mode.
935 */ 935 */
936 if (sizeof(dma_addr_t) > 4 && 936 if (sizeof(dma_addr_t) > 4 &&
937 pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) { 937 dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
938 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32)) 938 dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
939 dma64 = 1; 939 dma64 = 1;
940 } 940
941 if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0) 941 if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
942 return -EINVAL; 942 return -EINVAL;
943 943
944 /* adapter only supports message blocks below 4GB */ 944 /* adapter only supports message blocks below 4GB */
945 pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32)); 945 dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
946 946
947 base_addr0_phys = pci_resource_start(pDev,0); 947 base_addr0_phys = pci_resource_start(pDev,0);
948 hba_map0_area_size = pci_resource_len(pDev,0); 948 hba_map0_area_size = pci_resource_len(pDev,0);
@@ -3569,7 +3569,6 @@ static struct scsi_host_template driver_template = {
3569 .slave_configure = adpt_slave_configure, 3569 .slave_configure = adpt_slave_configure,
3570 .can_queue = MAX_TO_IOP_MESSAGES, 3570 .can_queue = MAX_TO_IOP_MESSAGES,
3571 .this_id = 7, 3571 .this_id = 7,
3572 .use_clustering = ENABLE_CLUSTERING,
3573}; 3572};
3574 3573
3575static int __init adpt_init(void) 3574static int __init adpt_init(void)
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index bbe77db8938d..46b2c83ba21f 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -266,6 +266,7 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
266 int i; 266 int i;
267 void *next_uncached; 267 void *next_uncached;
268 struct esas2r_request *first_request, *last_request; 268 struct esas2r_request *first_request, *last_request;
269 bool dma64 = false;
269 270
270 if (index >= MAX_ADAPTERS) { 271 if (index >= MAX_ADAPTERS) {
271 esas2r_log(ESAS2R_LOG_CRIT, 272 esas2r_log(ESAS2R_LOG_CRIT,
@@ -286,42 +287,20 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
286 a->pcid = pcid; 287 a->pcid = pcid;
287 a->host = host; 288 a->host = host;
288 289
289 if (sizeof(dma_addr_t) > 4) { 290 if (sizeof(dma_addr_t) > 4 &&
290 const uint64_t required_mask = dma_get_required_mask 291 dma_get_required_mask(&pcid->dev) > DMA_BIT_MASK(32) &&
291 (&pcid->dev); 292 !dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(64)))
292 if (required_mask > DMA_BIT_MASK(32) 293 dma64 = true;
293 && !pci_set_dma_mask(pcid, DMA_BIT_MASK(64)) 294
294 && !pci_set_consistent_dma_mask(pcid, 295 if (!dma64 && dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(32))) {
295 DMA_BIT_MASK(64))) { 296 esas2r_log(ESAS2R_LOG_CRIT, "failed to set DMA mask");
296 esas2r_log_dev(ESAS2R_LOG_INFO, 297 esas2r_kill_adapter(index);
297 &(a->pcid->dev), 298 return 0;
298 "64-bit PCI addressing enabled\n");
299 } else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
300 && !pci_set_consistent_dma_mask(pcid,
301 DMA_BIT_MASK(32))) {
302 esas2r_log_dev(ESAS2R_LOG_INFO,
303 &(a->pcid->dev),
304 "32-bit PCI addressing enabled\n");
305 } else {
306 esas2r_log(ESAS2R_LOG_CRIT,
307 "failed to set DMA mask");
308 esas2r_kill_adapter(index);
309 return 0;
310 }
311 } else {
312 if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
313 && !pci_set_consistent_dma_mask(pcid,
314 DMA_BIT_MASK(32))) {
315 esas2r_log_dev(ESAS2R_LOG_INFO,
316 &(a->pcid->dev),
317 "32-bit PCI addressing enabled\n");
318 } else {
319 esas2r_log(ESAS2R_LOG_CRIT,
320 "failed to set DMA mask");
321 esas2r_kill_adapter(index);
322 return 0;
323 }
324 } 299 }
300
301 esas2r_log_dev(ESAS2R_LOG_INFO, &pcid->dev,
302 "%s-bit PCI addressing enabled\n", dma64 ? "64" : "32");
303
325 esas2r_adapters[index] = a; 304 esas2r_adapters[index] = a;
326 sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index); 305 sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
327 esas2r_debug("new adapter %p, name %s", a, a->name); 306 esas2r_debug("new adapter %p, name %s", a, a->name);
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index c07118617d89..64397d441bae 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -250,7 +250,6 @@ static struct scsi_host_template driver_template = {
250 ESAS2R_DEFAULT_CMD_PER_LUN, 250 ESAS2R_DEFAULT_CMD_PER_LUN,
251 .present = 0, 251 .present = 0,
252 .unchecked_isa_dma = 0, 252 .unchecked_isa_dma = 0,
253 .use_clustering = ENABLE_CLUSTERING,
254 .emulated = 0, 253 .emulated = 0,
255 .proc_name = ESAS2R_DRVR_NAME, 254 .proc_name = ESAS2R_DRVR_NAME,
256 .change_queue_depth = scsi_change_queue_depth, 255 .change_queue_depth = scsi_change_queue_depth,
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index ac7da9db7317..465df475f753 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -2676,7 +2676,6 @@ struct scsi_host_template scsi_esp_template = {
2676 .can_queue = 7, 2676 .can_queue = 7,
2677 .this_id = 7, 2677 .this_id = 7,
2678 .sg_tablesize = SG_ALL, 2678 .sg_tablesize = SG_ALL,
2679 .use_clustering = ENABLE_CLUSTERING,
2680 .max_sectors = 0xffff, 2679 .max_sectors = 0xffff,
2681 .skip_settle_delay = 1, 2680 .skip_settle_delay = 1,
2682}; 2681};
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index f46b312d04bc..cd19be3f3405 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -286,7 +286,6 @@ static struct scsi_host_template fcoe_shost_template = {
286 .this_id = -1, 286 .this_id = -1,
287 .cmd_per_lun = 3, 287 .cmd_per_lun = 3,
288 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS, 288 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
289 .use_clustering = ENABLE_CLUSTERING,
290 .sg_tablesize = SG_ALL, 289 .sg_tablesize = SG_ALL,
291 .max_sectors = 0xffff, 290 .max_sectors = 0xffff,
292 .track_queue_depth = 1, 291 .track_queue_depth = 1,
@@ -1670,7 +1669,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
1670 struct fc_stats *stats; 1669 struct fc_stats *stats;
1671 struct fcoe_crc_eof crc_eof; 1670 struct fcoe_crc_eof crc_eof;
1672 struct fc_frame *fp; 1671 struct fc_frame *fp;
1673 struct fcoe_port *port;
1674 struct fcoe_hdr *hp; 1672 struct fcoe_hdr *hp;
1675 1673
1676 fr = fcoe_dev_from_skb(skb); 1674 fr = fcoe_dev_from_skb(skb);
@@ -1688,7 +1686,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
1688 skb_end_pointer(skb), skb->csum, 1686 skb_end_pointer(skb), skb->csum,
1689 skb->dev ? skb->dev->name : "<NULL>"); 1687 skb->dev ? skb->dev->name : "<NULL>");
1690 1688
1691 port = lport_priv(lport);
1692 skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */ 1689 skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
1693 1690
1694 /* 1691 /*
@@ -1859,7 +1856,6 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1859 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 1856 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1860 struct fcoe_ctlr *ctlr; 1857 struct fcoe_ctlr *ctlr;
1861 struct fcoe_interface *fcoe; 1858 struct fcoe_interface *fcoe;
1862 struct fcoe_port *port;
1863 struct fc_stats *stats; 1859 struct fc_stats *stats;
1864 u32 link_possible = 1; 1860 u32 link_possible = 1;
1865 u32 mfs; 1861 u32 mfs;
@@ -1897,7 +1893,6 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1897 break; 1893 break;
1898 case NETDEV_UNREGISTER: 1894 case NETDEV_UNREGISTER:
1899 list_del(&fcoe->list); 1895 list_del(&fcoe->list);
1900 port = lport_priv(ctlr->lp);
1901 fcoe_vport_remove(lport); 1896 fcoe_vport_remove(lport);
1902 mutex_lock(&fcoe_config_mutex); 1897 mutex_lock(&fcoe_config_mutex);
1903 fcoe_if_destroy(lport); 1898 fcoe_if_destroy(lport);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index cc461fd7bef1..5b3534b0deda 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -115,7 +115,6 @@ static struct scsi_host_template fnic_host_template = {
115 .this_id = -1, 115 .this_id = -1,
116 .cmd_per_lun = 3, 116 .cmd_per_lun = 3,
117 .can_queue = FNIC_DFLT_IO_REQ, 117 .can_queue = FNIC_DFLT_IO_REQ,
118 .use_clustering = ENABLE_CLUSTERING,
119 .sg_tablesize = FNIC_MAX_SG_DESC_CNT, 118 .sg_tablesize = FNIC_MAX_SG_DESC_CNT,
120 .max_sectors = 0xffff, 119 .max_sectors = 0xffff,
121 .shost_attrs = fnic_attrs, 120 .shost_attrs = fnic_attrs,
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 8271785bdb93..bf0fd2aeb92e 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -468,14 +468,13 @@ int fnic_trace_buf_init(void)
468 fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/ 468 fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
469 FNIC_ENTRY_SIZE_BYTES; 469 FNIC_ENTRY_SIZE_BYTES;
470 470
471 fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE)); 471 fnic_trace_buf_p = (unsigned long)vzalloc(trace_max_pages * PAGE_SIZE);
472 if (!fnic_trace_buf_p) { 472 if (!fnic_trace_buf_p) {
473 printk(KERN_ERR PFX "Failed to allocate memory " 473 printk(KERN_ERR PFX "Failed to allocate memory "
474 "for fnic_trace_buf_p\n"); 474 "for fnic_trace_buf_p\n");
475 err = -ENOMEM; 475 err = -ENOMEM;
476 goto err_fnic_trace_buf_init; 476 goto err_fnic_trace_buf_init;
477 } 477 }
478 memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE));
479 478
480 fnic_trace_entries.page_offset = 479 fnic_trace_entries.page_offset =
481 vmalloc(array_size(fnic_max_trace_entries, 480 vmalloc(array_size(fnic_max_trace_entries,
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index fc538181f8df..9cdca0625498 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -700,7 +700,7 @@ static struct scsi_host_template driver_template = {
700 .this_id = 7, 700 .this_id = 7,
701 .sg_tablesize = SG_ALL, 701 .sg_tablesize = SG_ALL,
702 .cmd_per_lun = 2, 702 .cmd_per_lun = 2,
703 .use_clustering = DISABLE_CLUSTERING, 703 .dma_boundary = PAGE_SIZE - 1,
704 .cmd_size = NCR5380_CMD_SIZE, 704 .cmd_size = NCR5380_CMD_SIZE,
705 .max_sectors = 128, 705 .max_sectors = 128,
706}; 706};
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 16709735b546..194c294f9b6c 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4680,7 +4680,6 @@ static struct scsi_host_template gdth_template = {
4680 .sg_tablesize = GDTH_MAXSG, 4680 .sg_tablesize = GDTH_MAXSG,
4681 .cmd_per_lun = GDTH_MAXC_P_L, 4681 .cmd_per_lun = GDTH_MAXC_P_L,
4682 .unchecked_isa_dma = 1, 4682 .unchecked_isa_dma = 1,
4683 .use_clustering = ENABLE_CLUSTERING,
4684 .no_write_same = 1, 4683 .no_write_same = 1,
4685}; 4684};
4686 4685
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index a27fc49ebd3a..d2acd0d826e2 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -184,7 +184,7 @@ static struct scsi_host_template gvp11_scsi_template = {
184 .this_id = 7, 184 .this_id = 7,
185 .sg_tablesize = SG_ALL, 185 .sg_tablesize = SG_ALL,
186 .cmd_per_lun = CMD_PER_LUN, 186 .cmd_per_lun = CMD_PER_LUN,
187 .use_clustering = DISABLE_CLUSTERING 187 .dma_boundary = PAGE_SIZE - 1,
188}; 188};
189 189
190static int check_wd33c93(struct gvp11_scsiregs *regs) 190static int check_wd33c93(struct gvp11_scsiregs *regs)
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 0ddb53c8a2e2..af291947a54d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -69,6 +69,12 @@
69#define HISI_SAS_SATA_PROTOCOL_FPDMA 0x8 69#define HISI_SAS_SATA_PROTOCOL_FPDMA 0x8
70#define HISI_SAS_SATA_PROTOCOL_ATAPI 0x10 70#define HISI_SAS_SATA_PROTOCOL_ATAPI 0x10
71 71
72#define HISI_SAS_DIF_PROT_MASK (SHOST_DIF_TYPE1_PROTECTION | \
73 SHOST_DIF_TYPE2_PROTECTION | \
74 SHOST_DIF_TYPE3_PROTECTION)
75
76#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK)
77
72struct hisi_hba; 78struct hisi_hba;
73 79
74enum { 80enum {
@@ -211,7 +217,7 @@ struct hisi_sas_slot {
211 /* Do not reorder/change members after here */ 217 /* Do not reorder/change members after here */
212 void *buf; 218 void *buf;
213 dma_addr_t buf_dma; 219 dma_addr_t buf_dma;
214 int idx; 220 u16 idx;
215}; 221};
216 222
217struct hisi_sas_hw { 223struct hisi_sas_hw {
@@ -268,6 +274,8 @@ struct hisi_hba {
268 struct pci_dev *pci_dev; 274 struct pci_dev *pci_dev;
269 struct device *dev; 275 struct device *dev;
270 276
277 int prot_mask;
278
271 void __iomem *regs; 279 void __iomem *regs;
272 void __iomem *sgpio_regs; 280 void __iomem *sgpio_regs;
273 struct regmap *ctrl; 281 struct regmap *ctrl;
@@ -322,6 +330,8 @@ struct hisi_hba {
322 unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)]; 330 unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)];
323 struct work_struct rst_work; 331 struct work_struct rst_work;
324 u32 phy_state; 332 u32 phy_state;
333 u32 intr_coal_ticks; /* Time of interrupt coalesce in us */
334 u32 intr_coal_count; /* Interrupt count to coalesce */
325}; 335};
326 336
327/* Generic HW DMA host memory structures */ 337/* Generic HW DMA host memory structures */
@@ -468,7 +478,6 @@ extern int hisi_sas_remove(struct platform_device *pdev);
468extern int hisi_sas_slave_configure(struct scsi_device *sdev); 478extern int hisi_sas_slave_configure(struct scsi_device *sdev);
469extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time); 479extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
470extern void hisi_sas_scan_start(struct Scsi_Host *shost); 480extern void hisi_sas_scan_start(struct Scsi_Host *shost);
471extern struct device_attribute *host_attrs[];
472extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type); 481extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
473extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy); 482extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy);
474extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, 483extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index b3f01d5b821b..eed7fc5b3389 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -296,42 +296,109 @@ static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
296 device_id, abort_flag, tag_to_abort); 296 device_id, abort_flag, tag_to_abort);
297} 297}
298 298
299static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
300 struct sas_task *task, int n_elem,
301 int n_elem_req, int n_elem_resp)
302{
303 struct device *dev = hisi_hba->dev;
304
305 if (!sas_protocol_ata(task->task_proto)) {
306 if (task->num_scatter) {
307 if (n_elem)
308 dma_unmap_sg(dev, task->scatter,
309 task->num_scatter,
310 task->data_dir);
311 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
312 if (n_elem_req)
313 dma_unmap_sg(dev, &task->smp_task.smp_req,
314 1, DMA_TO_DEVICE);
315 if (n_elem_resp)
316 dma_unmap_sg(dev, &task->smp_task.smp_resp,
317 1, DMA_FROM_DEVICE);
318 }
319 }
320}
321
322static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
323 struct sas_task *task, int *n_elem,
324 int *n_elem_req, int *n_elem_resp)
325{
326 struct device *dev = hisi_hba->dev;
327 int rc;
328
329 if (sas_protocol_ata(task->task_proto)) {
330 *n_elem = task->num_scatter;
331 } else {
332 unsigned int req_len, resp_len;
333
334 if (task->num_scatter) {
335 *n_elem = dma_map_sg(dev, task->scatter,
336 task->num_scatter, task->data_dir);
337 if (!*n_elem) {
338 rc = -ENOMEM;
339 goto prep_out;
340 }
341 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
342 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
343 1, DMA_TO_DEVICE);
344 if (!*n_elem_req) {
345 rc = -ENOMEM;
346 goto prep_out;
347 }
348 req_len = sg_dma_len(&task->smp_task.smp_req);
349 if (req_len & 0x3) {
350 rc = -EINVAL;
351 goto err_out_dma_unmap;
352 }
353 *n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
354 1, DMA_FROM_DEVICE);
355 if (!*n_elem_resp) {
356 rc = -ENOMEM;
357 goto err_out_dma_unmap;
358 }
359 resp_len = sg_dma_len(&task->smp_task.smp_resp);
360 if (resp_len & 0x3) {
361 rc = -EINVAL;
362 goto err_out_dma_unmap;
363 }
364 }
365 }
366
367 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
368 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
369 *n_elem);
370 rc = -EINVAL;
371 goto err_out_dma_unmap;
372 }
373 return 0;
374
375err_out_dma_unmap:
376 /* It would be better to call dma_unmap_sg() here, but it's messy */
377 hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
378 *n_elem_req, *n_elem_resp);
379prep_out:
380 return rc;
381}
382
299static int hisi_sas_task_prep(struct sas_task *task, 383static int hisi_sas_task_prep(struct sas_task *task,
300 struct hisi_sas_dq **dq_pointer, 384 struct hisi_sas_dq **dq_pointer,
301 bool is_tmf, struct hisi_sas_tmf_task *tmf, 385 bool is_tmf, struct hisi_sas_tmf_task *tmf,
302 int *pass) 386 int *pass)
303{ 387{
304 struct domain_device *device = task->dev; 388 struct domain_device *device = task->dev;
305 struct hisi_hba *hisi_hba; 389 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
306 struct hisi_sas_device *sas_dev = device->lldd_dev; 390 struct hisi_sas_device *sas_dev = device->lldd_dev;
307 struct hisi_sas_port *port; 391 struct hisi_sas_port *port;
308 struct hisi_sas_slot *slot; 392 struct hisi_sas_slot *slot;
309 struct hisi_sas_cmd_hdr *cmd_hdr_base; 393 struct hisi_sas_cmd_hdr *cmd_hdr_base;
310 struct asd_sas_port *sas_port = device->port; 394 struct asd_sas_port *sas_port = device->port;
311 struct device *dev; 395 struct device *dev = hisi_hba->dev;
312 int dlvry_queue_slot, dlvry_queue, rc, slot_idx; 396 int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
313 int n_elem = 0, n_elem_req = 0, n_elem_resp = 0; 397 int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
314 struct hisi_sas_dq *dq; 398 struct hisi_sas_dq *dq;
315 unsigned long flags; 399 unsigned long flags;
316 int wr_q_index; 400 int wr_q_index;
317 401
318 if (!sas_port) {
319 struct task_status_struct *ts = &task->task_status;
320
321 ts->resp = SAS_TASK_UNDELIVERED;
322 ts->stat = SAS_PHY_DOWN;
323 /*
324 * libsas will use dev->port, should
325 * not call task_done for sata
326 */
327 if (device->dev_type != SAS_SATA_DEV)
328 task->task_done(task);
329 return -ECOMM;
330 }
331
332 hisi_hba = dev_to_hisi_hba(device);
333 dev = hisi_hba->dev;
334
335 if (DEV_IS_GONE(sas_dev)) { 402 if (DEV_IS_GONE(sas_dev)) {
336 if (sas_dev) 403 if (sas_dev)
337 dev_info(dev, "task prep: device %d not ready\n", 404 dev_info(dev, "task prep: device %d not ready\n",
@@ -355,49 +422,10 @@ static int hisi_sas_task_prep(struct sas_task *task,
355 return -ECOMM; 422 return -ECOMM;
356 } 423 }
357 424
358 if (!sas_protocol_ata(task->task_proto)) { 425 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
359 unsigned int req_len, resp_len; 426 &n_elem_req, &n_elem_resp);
360 427 if (rc < 0)
361 if (task->num_scatter) { 428 goto prep_out;
362 n_elem = dma_map_sg(dev, task->scatter,
363 task->num_scatter, task->data_dir);
364 if (!n_elem) {
365 rc = -ENOMEM;
366 goto prep_out;
367 }
368 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
369 n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
370 1, DMA_TO_DEVICE);
371 if (!n_elem_req) {
372 rc = -ENOMEM;
373 goto prep_out;
374 }
375 req_len = sg_dma_len(&task->smp_task.smp_req);
376 if (req_len & 0x3) {
377 rc = -EINVAL;
378 goto err_out_dma_unmap;
379 }
380 n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
381 1, DMA_FROM_DEVICE);
382 if (!n_elem_resp) {
383 rc = -ENOMEM;
384 goto err_out_dma_unmap;
385 }
386 resp_len = sg_dma_len(&task->smp_task.smp_resp);
387 if (resp_len & 0x3) {
388 rc = -EINVAL;
389 goto err_out_dma_unmap;
390 }
391 }
392 } else
393 n_elem = task->num_scatter;
394
395 if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
396 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
397 n_elem);
398 rc = -EINVAL;
399 goto err_out_dma_unmap;
400 }
401 429
402 if (hisi_hba->hw->slot_index_alloc) 430 if (hisi_hba->hw->slot_index_alloc)
403 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 431 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
@@ -482,19 +510,8 @@ static int hisi_sas_task_prep(struct sas_task *task,
482err_out_tag: 510err_out_tag:
483 hisi_sas_slot_index_free(hisi_hba, slot_idx); 511 hisi_sas_slot_index_free(hisi_hba, slot_idx);
484err_out_dma_unmap: 512err_out_dma_unmap:
485 if (!sas_protocol_ata(task->task_proto)) { 513 hisi_sas_dma_unmap(hisi_hba, task, n_elem,
486 if (task->num_scatter) { 514 n_elem_req, n_elem_resp);
487 dma_unmap_sg(dev, task->scatter, task->num_scatter,
488 task->data_dir);
489 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
490 if (n_elem_req)
491 dma_unmap_sg(dev, &task->smp_task.smp_req,
492 1, DMA_TO_DEVICE);
493 if (n_elem_resp)
494 dma_unmap_sg(dev, &task->smp_task.smp_resp,
495 1, DMA_FROM_DEVICE);
496 }
497 }
498prep_out: 515prep_out:
499 dev_err(dev, "task prep: failed[%d]!\n", rc); 516 dev_err(dev, "task prep: failed[%d]!\n", rc);
500 return rc; 517 return rc;
@@ -506,10 +523,29 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
506 u32 rc; 523 u32 rc;
507 u32 pass = 0; 524 u32 pass = 0;
508 unsigned long flags; 525 unsigned long flags;
509 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); 526 struct hisi_hba *hisi_hba;
510 struct device *dev = hisi_hba->dev; 527 struct device *dev;
528 struct domain_device *device = task->dev;
529 struct asd_sas_port *sas_port = device->port;
511 struct hisi_sas_dq *dq = NULL; 530 struct hisi_sas_dq *dq = NULL;
512 531
532 if (!sas_port) {
533 struct task_status_struct *ts = &task->task_status;
534
535 ts->resp = SAS_TASK_UNDELIVERED;
536 ts->stat = SAS_PHY_DOWN;
537 /*
538 * libsas will use dev->port, should
539 * not call task_done for sata
540 */
541 if (device->dev_type != SAS_SATA_DEV)
542 task->task_done(task);
543 return -ECOMM;
544 }
545
546 hisi_hba = dev_to_hisi_hba(device);
547 dev = hisi_hba->dev;
548
513 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 549 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
514 if (in_softirq()) 550 if (in_softirq())
515 return -EINVAL; 551 return -EINVAL;
@@ -1459,12 +1495,12 @@ static int hisi_sas_abort_task(struct sas_task *task)
1459 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1495 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1460 struct scsi_cmnd *cmnd = task->uldd_task; 1496 struct scsi_cmnd *cmnd = task->uldd_task;
1461 struct hisi_sas_slot *slot = task->lldd_task; 1497 struct hisi_sas_slot *slot = task->lldd_task;
1462 u32 tag = slot->idx; 1498 u16 tag = slot->idx;
1463 int rc2; 1499 int rc2;
1464 1500
1465 int_to_scsilun(cmnd->device->lun, &lun); 1501 int_to_scsilun(cmnd->device->lun, &lun);
1466 tmf_task.tmf = TMF_ABORT_TASK; 1502 tmf_task.tmf = TMF_ABORT_TASK;
1467 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 1503 tmf_task.tag_of_task_to_be_managed = tag;
1468 1504
1469 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, 1505 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
1470 &tmf_task); 1506 &tmf_task);
@@ -1718,7 +1754,7 @@ static int hisi_sas_query_task(struct sas_task *task)
1718 1754
1719 int_to_scsilun(cmnd->device->lun, &lun); 1755 int_to_scsilun(cmnd->device->lun, &lun);
1720 tmf_task.tmf = TMF_QUERY_TASK; 1756 tmf_task.tmf = TMF_QUERY_TASK;
1721 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 1757 tmf_task.tag_of_task_to_be_managed = tag;
1722 1758
1723 rc = hisi_sas_debug_issue_ssp_tmf(device, 1759 rc = hisi_sas_debug_issue_ssp_tmf(device,
1724 lun.scsi_lun, 1760 lun.scsi_lun,
@@ -1994,12 +2030,6 @@ EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1994struct scsi_transport_template *hisi_sas_stt; 2030struct scsi_transport_template *hisi_sas_stt;
1995EXPORT_SYMBOL_GPL(hisi_sas_stt); 2031EXPORT_SYMBOL_GPL(hisi_sas_stt);
1996 2032
1997struct device_attribute *host_attrs[] = {
1998 &dev_attr_phy_event_threshold,
1999 NULL,
2000};
2001EXPORT_SYMBOL_GPL(host_attrs);
2002
2003static struct sas_domain_function_template hisi_sas_transport_ops = { 2033static struct sas_domain_function_template hisi_sas_transport_ops = {
2004 .lldd_dev_found = hisi_sas_dev_found, 2034 .lldd_dev_found = hisi_sas_dev_found,
2005 .lldd_dev_gone = hisi_sas_dev_gone, 2035 .lldd_dev_gone = hisi_sas_dev_gone,
@@ -2380,7 +2410,6 @@ int hisi_sas_probe(struct platform_device *pdev,
2380 shost->max_lun = ~0; 2410 shost->max_lun = ~0;
2381 shost->max_channel = 1; 2411 shost->max_channel = 1;
2382 shost->max_cmd_len = 16; 2412 shost->max_cmd_len = 16;
2383 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2384 if (hisi_hba->hw->slot_index_alloc) { 2413 if (hisi_hba->hw->slot_index_alloc) {
2385 shost->can_queue = hisi_hba->hw->max_command_entries; 2414 shost->can_queue = hisi_hba->hw->max_command_entries;
2386 shost->cmd_per_lun = hisi_hba->hw->max_command_entries; 2415 shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 8df822a4a1bd..28ab52a021cf 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -510,6 +510,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
510 struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; 510 struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
511 struct asd_sas_port *sas_port = device->port; 511 struct asd_sas_port *sas_port = device->port;
512 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 512 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
513 u64 sas_addr;
513 514
514 memset(itct, 0, sizeof(*itct)); 515 memset(itct, 0, sizeof(*itct));
515 516
@@ -534,8 +535,8 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
534 itct->qw0 = cpu_to_le64(qw0); 535 itct->qw0 = cpu_to_le64(qw0);
535 536
536 /* qw1 */ 537 /* qw1 */
537 memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE); 538 memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE);
538 itct->sas_addr = __swab64(itct->sas_addr); 539 itct->sas_addr = cpu_to_le64(__swab64(sas_addr));
539 540
540 /* qw2 */ 541 /* qw2 */
541 itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_IT_NEXUS_LOSS_TL_OFF) | 542 itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_IT_NEXUS_LOSS_TL_OFF) |
@@ -561,7 +562,7 @@ static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
561 reg_val &= ~CFG_AGING_TIME_ITCT_REL_MSK; 562 reg_val &= ~CFG_AGING_TIME_ITCT_REL_MSK;
562 hisi_sas_write32(hisi_hba, CFG_AGING_TIME, reg_val); 563 hisi_sas_write32(hisi_hba, CFG_AGING_TIME, reg_val);
563 564
564 qw0 = cpu_to_le64(itct->qw0); 565 qw0 = le64_to_cpu(itct->qw0);
565 qw0 &= ~ITCT_HDR_VALID_MSK; 566 qw0 &= ~ITCT_HDR_VALID_MSK;
566 itct->qw0 = cpu_to_le64(qw0); 567 itct->qw0 = cpu_to_le64(qw0);
567} 568}
@@ -1100,7 +1101,7 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
1100 case SAS_PROTOCOL_SSP: 1101 case SAS_PROTOCOL_SSP:
1101 { 1102 {
1102 int error = -1; 1103 int error = -1;
1103 u32 dma_err_type = cpu_to_le32(err_record->dma_err_type); 1104 u32 dma_err_type = le32_to_cpu(err_record->dma_err_type);
1104 u32 dma_tx_err_type = ((dma_err_type & 1105 u32 dma_tx_err_type = ((dma_err_type &
1105 ERR_HDR_DMA_TX_ERR_TYPE_MSK)) >> 1106 ERR_HDR_DMA_TX_ERR_TYPE_MSK)) >>
1106 ERR_HDR_DMA_TX_ERR_TYPE_OFF; 1107 ERR_HDR_DMA_TX_ERR_TYPE_OFF;
@@ -1108,9 +1109,9 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
1108 ERR_HDR_DMA_RX_ERR_TYPE_MSK)) >> 1109 ERR_HDR_DMA_RX_ERR_TYPE_MSK)) >>
1109 ERR_HDR_DMA_RX_ERR_TYPE_OFF; 1110 ERR_HDR_DMA_RX_ERR_TYPE_OFF;
1110 u32 trans_tx_fail_type = 1111 u32 trans_tx_fail_type =
1111 cpu_to_le32(err_record->trans_tx_fail_type); 1112 le32_to_cpu(err_record->trans_tx_fail_type);
1112 u32 trans_rx_fail_type = 1113 u32 trans_rx_fail_type =
1113 cpu_to_le32(err_record->trans_rx_fail_type); 1114 le32_to_cpu(err_record->trans_rx_fail_type);
1114 1115
1115 if (dma_tx_err_type) { 1116 if (dma_tx_err_type) {
1116 /* dma tx err */ 1117 /* dma tx err */
@@ -1558,7 +1559,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
1558 u32 cmplt_hdr_data; 1559 u32 cmplt_hdr_data;
1559 1560
1560 complete_hdr = &complete_queue[rd_point]; 1561 complete_hdr = &complete_queue[rd_point];
1561 cmplt_hdr_data = cpu_to_le32(complete_hdr->data); 1562 cmplt_hdr_data = le32_to_cpu(complete_hdr->data);
1562 idx = (cmplt_hdr_data & CMPLT_HDR_IPTT_MSK) >> 1563 idx = (cmplt_hdr_data & CMPLT_HDR_IPTT_MSK) >>
1563 CMPLT_HDR_IPTT_OFF; 1564 CMPLT_HDR_IPTT_OFF;
1564 slot = &hisi_hba->slot_info[idx]; 1565 slot = &hisi_hba->slot_info[idx];
@@ -1797,6 +1798,11 @@ static int hisi_sas_v1_init(struct hisi_hba *hisi_hba)
1797 return 0; 1798 return 0;
1798} 1799}
1799 1800
1801static struct device_attribute *host_attrs_v1_hw[] = {
1802 &dev_attr_phy_event_threshold,
1803 NULL
1804};
1805
1800static struct scsi_host_template sht_v1_hw = { 1806static struct scsi_host_template sht_v1_hw = {
1801 .name = DRV_NAME, 1807 .name = DRV_NAME,
1802 .module = THIS_MODULE, 1808 .module = THIS_MODULE,
@@ -1808,14 +1814,13 @@ static struct scsi_host_template sht_v1_hw = {
1808 .change_queue_depth = sas_change_queue_depth, 1814 .change_queue_depth = sas_change_queue_depth,
1809 .bios_param = sas_bios_param, 1815 .bios_param = sas_bios_param,
1810 .this_id = -1, 1816 .this_id = -1,
1811 .sg_tablesize = SG_ALL, 1817 .sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
1812 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 1818 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1813 .use_clustering = ENABLE_CLUSTERING,
1814 .eh_device_reset_handler = sas_eh_device_reset_handler, 1819 .eh_device_reset_handler = sas_eh_device_reset_handler,
1815 .eh_target_reset_handler = sas_eh_target_reset_handler, 1820 .eh_target_reset_handler = sas_eh_target_reset_handler,
1816 .target_destroy = sas_target_destroy, 1821 .target_destroy = sas_target_destroy,
1817 .ioctl = sas_ioctl, 1822 .ioctl = sas_ioctl,
1818 .shost_attrs = host_attrs, 1823 .shost_attrs = host_attrs_v1_hw,
1819}; 1824};
1820 1825
1821static const struct hisi_sas_hw hisi_sas_v1_hw = { 1826static const struct hisi_sas_hw hisi_sas_v1_hw = {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 77a85ead483e..c8ebff3ba559 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -934,6 +934,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
934 struct domain_device *parent_dev = device->parent; 934 struct domain_device *parent_dev = device->parent;
935 struct asd_sas_port *sas_port = device->port; 935 struct asd_sas_port *sas_port = device->port;
936 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 936 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
937 u64 sas_addr;
937 938
938 memset(itct, 0, sizeof(*itct)); 939 memset(itct, 0, sizeof(*itct));
939 940
@@ -966,8 +967,8 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
966 itct->qw0 = cpu_to_le64(qw0); 967 itct->qw0 = cpu_to_le64(qw0);
967 968
968 /* qw1 */ 969 /* qw1 */
969 memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE); 970 memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE);
970 itct->sas_addr = __swab64(itct->sas_addr); 971 itct->sas_addr = cpu_to_le64(__swab64(sas_addr));
971 972
972 /* qw2 */ 973 /* qw2 */
973 if (!dev_is_sata(device)) 974 if (!dev_is_sata(device))
@@ -2044,11 +2045,11 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
2044 struct task_status_struct *ts = &task->task_status; 2045 struct task_status_struct *ts = &task->task_status;
2045 struct hisi_sas_err_record_v2 *err_record = 2046 struct hisi_sas_err_record_v2 *err_record =
2046 hisi_sas_status_buf_addr_mem(slot); 2047 hisi_sas_status_buf_addr_mem(slot);
2047 u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type); 2048 u32 trans_tx_fail_type = le32_to_cpu(err_record->trans_tx_fail_type);
2048 u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type); 2049 u32 trans_rx_fail_type = le32_to_cpu(err_record->trans_rx_fail_type);
2049 u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type); 2050 u16 dma_tx_err_type = le16_to_cpu(err_record->dma_tx_err_type);
2050 u16 sipc_rx_err_type = cpu_to_le16(err_record->sipc_rx_err_type); 2051 u16 sipc_rx_err_type = le16_to_cpu(err_record->sipc_rx_err_type);
2051 u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type); 2052 u32 dma_rx_err_type = le32_to_cpu(err_record->dma_rx_err_type);
2052 int error = -1; 2053 int error = -1;
2053 2054
2054 if (err_phase == 1) { 2055 if (err_phase == 1) {
@@ -2059,8 +2060,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
2059 trans_tx_fail_type); 2060 trans_tx_fail_type);
2060 } else if (err_phase == 2) { 2061 } else if (err_phase == 2) {
2061 /* error in RX phase, the priority is: DW1 > DW3 > DW2 */ 2062 /* error in RX phase, the priority is: DW1 > DW3 > DW2 */
2062 error = parse_trans_rx_err_code_v2_hw( 2063 error = parse_trans_rx_err_code_v2_hw(trans_rx_fail_type);
2063 trans_rx_fail_type);
2064 if (error == -1) { 2064 if (error == -1) {
2065 error = parse_dma_rx_err_code_v2_hw( 2065 error = parse_dma_rx_err_code_v2_hw(
2066 dma_rx_err_type); 2066 dma_rx_err_type);
@@ -2358,6 +2358,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2358 &complete_queue[slot->cmplt_queue_slot]; 2358 &complete_queue[slot->cmplt_queue_slot];
2359 unsigned long flags; 2359 unsigned long flags;
2360 bool is_internal = slot->is_internal; 2360 bool is_internal = slot->is_internal;
2361 u32 dw0;
2361 2362
2362 if (unlikely(!task || !task->lldd_task || !task->dev)) 2363 if (unlikely(!task || !task->lldd_task || !task->dev))
2363 return -EINVAL; 2364 return -EINVAL;
@@ -2382,8 +2383,9 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2382 } 2383 }
2383 2384
2384 /* Use SAS+TMF status codes */ 2385 /* Use SAS+TMF status codes */
2385 switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK) 2386 dw0 = le32_to_cpu(complete_hdr->dw0);
2386 >> CMPLT_HDR_ABORT_STAT_OFF) { 2387 switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >>
2388 CMPLT_HDR_ABORT_STAT_OFF) {
2387 case STAT_IO_ABORTED: 2389 case STAT_IO_ABORTED:
2388 /* this io has been aborted by abort command */ 2390 /* this io has been aborted by abort command */
2389 ts->stat = SAS_ABORTED_TASK; 2391 ts->stat = SAS_ABORTED_TASK;
@@ -2408,9 +2410,8 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2408 break; 2410 break;
2409 } 2411 }
2410 2412
2411 if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) && 2413 if ((dw0 & CMPLT_HDR_ERX_MSK) && (!(dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
2412 (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) { 2414 u32 err_phase = (dw0 & CMPLT_HDR_ERR_PHASE_MSK)
2413 u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK)
2414 >> CMPLT_HDR_ERR_PHASE_OFF; 2415 >> CMPLT_HDR_ERR_PHASE_OFF;
2415 u32 *error_info = hisi_sas_status_buf_addr_mem(slot); 2416 u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
2416 2417
@@ -2526,22 +2527,23 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
2526 struct hisi_sas_tmf_task *tmf = slot->tmf; 2527 struct hisi_sas_tmf_task *tmf = slot->tmf;
2527 u8 *buf_cmd; 2528 u8 *buf_cmd;
2528 int has_data = 0, hdr_tag = 0; 2529 int has_data = 0, hdr_tag = 0;
2529 u32 dw1 = 0, dw2 = 0; 2530 u32 dw0, dw1 = 0, dw2 = 0;
2530 2531
2531 /* create header */ 2532 /* create header */
2532 /* dw0 */ 2533 /* dw0 */
2533 hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); 2534 dw0 = port->id << CMD_HDR_PORT_OFF;
2534 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) 2535 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
2535 hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); 2536 dw0 |= 3 << CMD_HDR_CMD_OFF;
2536 else 2537 else
2537 hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF); 2538 dw0 |= 4 << CMD_HDR_CMD_OFF;
2538 2539
2539 if (tmf && tmf->force_phy) { 2540 if (tmf && tmf->force_phy) {
2540 hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK; 2541 dw0 |= CMD_HDR_FORCE_PHY_MSK;
2541 hdr->dw0 |= cpu_to_le32((1 << tmf->phy_id) 2542 dw0 |= (1 << tmf->phy_id) << CMD_HDR_PHY_ID_OFF;
2542 << CMD_HDR_PHY_ID_OFF);
2543 } 2543 }
2544 2544
2545 hdr->dw0 = cpu_to_le32(dw0);
2546
2545 /* dw1 */ 2547 /* dw1 */
2546 switch (task->data_dir) { 2548 switch (task->data_dir) {
2547 case DMA_TO_DEVICE: 2549 case DMA_TO_DEVICE:
@@ -3152,20 +3154,24 @@ static void cq_tasklet_v2_hw(unsigned long val)
3152 3154
3153 /* Check for NCQ completion */ 3155 /* Check for NCQ completion */
3154 if (complete_hdr->act) { 3156 if (complete_hdr->act) {
3155 u32 act_tmp = complete_hdr->act; 3157 u32 act_tmp = le32_to_cpu(complete_hdr->act);
3156 int ncq_tag_count = ffs(act_tmp); 3158 int ncq_tag_count = ffs(act_tmp);
3159 u32 dw1 = le32_to_cpu(complete_hdr->dw1);
3157 3160
3158 dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >> 3161 dev_id = (dw1 & CMPLT_HDR_DEV_ID_MSK) >>
3159 CMPLT_HDR_DEV_ID_OFF; 3162 CMPLT_HDR_DEV_ID_OFF;
3160 itct = &hisi_hba->itct[dev_id]; 3163 itct = &hisi_hba->itct[dev_id];
3161 3164
3162 /* The NCQ tags are held in the itct header */ 3165 /* The NCQ tags are held in the itct header */
3163 while (ncq_tag_count) { 3166 while (ncq_tag_count) {
3164 __le64 *ncq_tag = &itct->qw4_15[0]; 3167 __le64 *_ncq_tag = &itct->qw4_15[0], __ncq_tag;
3168 u64 ncq_tag;
3165 3169
3166 ncq_tag_count -= 1; 3170 ncq_tag_count--;
3167 iptt = (ncq_tag[ncq_tag_count / 5] 3171 __ncq_tag = _ncq_tag[ncq_tag_count / 5];
3168 >> (ncq_tag_count % 5) * 12) & 0xfff; 3172 ncq_tag = le64_to_cpu(__ncq_tag);
3173 iptt = (ncq_tag >> (ncq_tag_count % 5) * 12) &
3174 0xfff;
3169 3175
3170 slot = &hisi_hba->slot_info[iptt]; 3176 slot = &hisi_hba->slot_info[iptt];
3171 slot->cmplt_queue_slot = rd_point; 3177 slot->cmplt_queue_slot = rd_point;
@@ -3176,7 +3182,9 @@ static void cq_tasklet_v2_hw(unsigned long val)
3176 ncq_tag_count = ffs(act_tmp); 3182 ncq_tag_count = ffs(act_tmp);
3177 } 3183 }
3178 } else { 3184 } else {
3179 iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK; 3185 u32 dw1 = le32_to_cpu(complete_hdr->dw1);
3186
3187 iptt = dw1 & CMPLT_HDR_IPTT_MSK;
3180 slot = &hisi_hba->slot_info[iptt]; 3188 slot = &hisi_hba->slot_info[iptt];
3181 slot->cmplt_queue_slot = rd_point; 3189 slot->cmplt_queue_slot = rd_point;
3182 slot->cmplt_queue = queue; 3190 slot->cmplt_queue = queue;
@@ -3552,6 +3560,11 @@ static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
3552 dev_dbg(dev, "wait commands complete %dms\n", time); 3560 dev_dbg(dev, "wait commands complete %dms\n", time);
3553} 3561}
3554 3562
3563static struct device_attribute *host_attrs_v2_hw[] = {
3564 &dev_attr_phy_event_threshold,
3565 NULL
3566};
3567
3555static struct scsi_host_template sht_v2_hw = { 3568static struct scsi_host_template sht_v2_hw = {
3556 .name = DRV_NAME, 3569 .name = DRV_NAME,
3557 .module = THIS_MODULE, 3570 .module = THIS_MODULE,
@@ -3563,14 +3576,13 @@ static struct scsi_host_template sht_v2_hw = {
3563 .change_queue_depth = sas_change_queue_depth, 3576 .change_queue_depth = sas_change_queue_depth,
3564 .bios_param = sas_bios_param, 3577 .bios_param = sas_bios_param,
3565 .this_id = -1, 3578 .this_id = -1,
3566 .sg_tablesize = SG_ALL, 3579 .sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
3567 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 3580 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
3568 .use_clustering = ENABLE_CLUSTERING,
3569 .eh_device_reset_handler = sas_eh_device_reset_handler, 3581 .eh_device_reset_handler = sas_eh_device_reset_handler,
3570 .eh_target_reset_handler = sas_eh_target_reset_handler, 3582 .eh_target_reset_handler = sas_eh_target_reset_handler,
3571 .target_destroy = sas_target_destroy, 3583 .target_destroy = sas_target_destroy,
3572 .ioctl = sas_ioctl, 3584 .ioctl = sas_ioctl,
3573 .shost_attrs = host_attrs, 3585 .shost_attrs = host_attrs_v2_hw,
3574}; 3586};
3575 3587
3576static const struct hisi_sas_hw hisi_sas_v2_hw = { 3588static const struct hisi_sas_hw hisi_sas_v2_hw = {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index a369450a1fa7..e2420a810e99 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -42,6 +42,7 @@
42#define MAX_CON_TIME_LIMIT_TIME 0xa4 42#define MAX_CON_TIME_LIMIT_TIME 0xa4
43#define BUS_INACTIVE_LIMIT_TIME 0xa8 43#define BUS_INACTIVE_LIMIT_TIME 0xa8
44#define REJECT_TO_OPEN_LIMIT_TIME 0xac 44#define REJECT_TO_OPEN_LIMIT_TIME 0xac
45#define CQ_INT_CONVERGE_EN 0xb0
45#define CFG_AGING_TIME 0xbc 46#define CFG_AGING_TIME 0xbc
46#define HGC_DFX_CFG2 0xc0 47#define HGC_DFX_CFG2 0xc0
47#define CFG_ABT_SET_QUERY_IPTT 0xd4 48#define CFG_ABT_SET_QUERY_IPTT 0xd4
@@ -126,6 +127,8 @@
126#define PHY_CTRL (PORT_BASE + 0x14) 127#define PHY_CTRL (PORT_BASE + 0x14)
127#define PHY_CTRL_RESET_OFF 0 128#define PHY_CTRL_RESET_OFF 0
128#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) 129#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
130#define CMD_HDR_PIR_OFF 8
131#define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF)
129#define SL_CFG (PORT_BASE + 0x84) 132#define SL_CFG (PORT_BASE + 0x84)
130#define AIP_LIMIT (PORT_BASE + 0x90) 133#define AIP_LIMIT (PORT_BASE + 0x90)
131#define SL_CONTROL (PORT_BASE + 0x94) 134#define SL_CONTROL (PORT_BASE + 0x94)
@@ -332,6 +335,16 @@
332#define ITCT_HDR_RTOLT_OFF 48 335#define ITCT_HDR_RTOLT_OFF 48
333#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) 336#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
334 337
338struct hisi_sas_protect_iu_v3_hw {
339 u32 dw0;
340 u32 lbrtcv;
341 u32 lbrtgv;
342 u32 dw3;
343 u32 dw4;
344 u32 dw5;
345 u32 rsv;
346};
347
335struct hisi_sas_complete_v3_hdr { 348struct hisi_sas_complete_v3_hdr {
336 __le32 dw0; 349 __le32 dw0;
337 __le32 dw1; 350 __le32 dw1;
@@ -371,6 +384,28 @@ struct hisi_sas_err_record_v3 {
371 ((fis.command == ATA_CMD_DEV_RESET) && \ 384 ((fis.command == ATA_CMD_DEV_RESET) && \
372 ((fis.control & ATA_SRST) != 0))) 385 ((fis.control & ATA_SRST) != 0)))
373 386
387#define T10_INSRT_EN_OFF 0
388#define T10_INSRT_EN_MSK (1 << T10_INSRT_EN_OFF)
389#define T10_RMV_EN_OFF 1
390#define T10_RMV_EN_MSK (1 << T10_RMV_EN_OFF)
391#define T10_RPLC_EN_OFF 2
392#define T10_RPLC_EN_MSK (1 << T10_RPLC_EN_OFF)
393#define T10_CHK_EN_OFF 3
394#define T10_CHK_EN_MSK (1 << T10_CHK_EN_OFF)
395#define INCR_LBRT_OFF 5
396#define INCR_LBRT_MSK (1 << INCR_LBRT_OFF)
397#define USR_DATA_BLOCK_SZ_OFF 20
398#define USR_DATA_BLOCK_SZ_MSK (0x3 << USR_DATA_BLOCK_SZ_OFF)
399#define T10_CHK_MSK_OFF 16
400
401static bool hisi_sas_intr_conv;
402MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)");
403
404/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
405static int prot_mask;
406module_param(prot_mask, int, 0);
407MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
408
374static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) 409static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
375{ 410{
376 void __iomem *regs = hisi_hba->regs + off; 411 void __iomem *regs = hisi_hba->regs + off;
@@ -436,6 +471,8 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
436 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); 471 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
437 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); 472 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
438 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); 473 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
474 hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN,
475 hisi_sas_intr_conv);
439 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff); 476 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff);
440 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff); 477 hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
441 hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff); 478 hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
@@ -494,7 +531,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
494 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); 531 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1);
495 hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); 532 hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
496 hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); 533 hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
497 534 hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32);
498 /* used for 12G negotiate */ 535 /* used for 12G negotiate */
499 hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); 536 hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
500 hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff); 537 hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
@@ -622,6 +659,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
622 struct domain_device *parent_dev = device->parent; 659 struct domain_device *parent_dev = device->parent;
623 struct asd_sas_port *sas_port = device->port; 660 struct asd_sas_port *sas_port = device->port;
624 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 661 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
662 u64 sas_addr;
625 663
626 memset(itct, 0, sizeof(*itct)); 664 memset(itct, 0, sizeof(*itct));
627 665
@@ -654,8 +692,8 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
654 itct->qw0 = cpu_to_le64(qw0); 692 itct->qw0 = cpu_to_le64(qw0);
655 693
656 /* qw1 */ 694 /* qw1 */
657 memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE); 695 memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE);
658 itct->sas_addr = __swab64(itct->sas_addr); 696 itct->sas_addr = cpu_to_le64(__swab64(sas_addr));
659 697
660 /* qw2 */ 698 /* qw2 */
661 if (!dev_is_sata(device)) 699 if (!dev_is_sata(device))
@@ -932,6 +970,58 @@ static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
932 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); 970 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
933} 971}
934 972
973static u32 get_prot_chk_msk_v3_hw(struct scsi_cmnd *scsi_cmnd)
974{
975 unsigned char prot_flags = scsi_cmnd->prot_flags;
976
977 if (prot_flags & SCSI_PROT_TRANSFER_PI) {
978 if (prot_flags & SCSI_PROT_REF_CHECK)
979 return 0xc << 16;
980 return 0xfc << 16;
981 }
982 return 0;
983}
984
985static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd,
986 struct hisi_sas_protect_iu_v3_hw *prot)
987{
988 unsigned char prot_op = scsi_get_prot_op(scsi_cmnd);
989 unsigned int interval = scsi_prot_interval(scsi_cmnd);
990 u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request);
991
992 switch (prot_op) {
993 case SCSI_PROT_READ_STRIP:
994 prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK);
995 prot->lbrtcv = lbrt_chk_val;
996 prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd);
997 break;
998 case SCSI_PROT_WRITE_INSERT:
999 prot->dw0 |= T10_INSRT_EN_MSK;
1000 prot->lbrtgv = lbrt_chk_val;
1001 break;
1002 default:
1003 WARN(1, "prot_op(0x%x) is not valid\n", prot_op);
1004 break;
1005 }
1006
1007 switch (interval) {
1008 case 512:
1009 break;
1010 case 4096:
1011 prot->dw0 |= (0x1 << USR_DATA_BLOCK_SZ_OFF);
1012 break;
1013 case 520:
1014 prot->dw0 |= (0x2 << USR_DATA_BLOCK_SZ_OFF);
1015 break;
1016 default:
1017 WARN(1, "protection interval (0x%x) invalid\n",
1018 interval);
1019 break;
1020 }
1021
1022 prot->dw0 |= INCR_LBRT_MSK;
1023}
1024
935static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, 1025static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
936 struct hisi_sas_slot *slot) 1026 struct hisi_sas_slot *slot)
937{ 1027{
@@ -943,9 +1033,10 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
943 struct sas_ssp_task *ssp_task = &task->ssp_task; 1033 struct sas_ssp_task *ssp_task = &task->ssp_task;
944 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 1034 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
945 struct hisi_sas_tmf_task *tmf = slot->tmf; 1035 struct hisi_sas_tmf_task *tmf = slot->tmf;
1036 unsigned char prot_op = scsi_get_prot_op(scsi_cmnd);
946 int has_data = 0, priority = !!tmf; 1037 int has_data = 0, priority = !!tmf;
947 u8 *buf_cmd; 1038 u8 *buf_cmd;
948 u32 dw1 = 0, dw2 = 0; 1039 u32 dw1 = 0, dw2 = 0, len = 0;
949 1040
950 hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) | 1041 hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) |
951 (2 << CMD_HDR_TLR_CTRL_OFF) | 1042 (2 << CMD_HDR_TLR_CTRL_OFF) |
@@ -975,7 +1066,6 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
975 1066
976 /* map itct entry */ 1067 /* map itct entry */
977 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1068 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
978 hdr->dw1 = cpu_to_le32(dw1);
979 1069
980 dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) 1070 dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
981 + 3) / 4) << CMD_HDR_CFL_OFF) | 1071 + 3) / 4) << CMD_HDR_CFL_OFF) |
@@ -988,7 +1078,6 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
988 prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, 1078 prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
989 slot->n_elem); 1079 slot->n_elem);
990 1080
991 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
992 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 1081 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
993 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1082 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
994 1083
@@ -1013,6 +1102,38 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
1013 break; 1102 break;
1014 } 1103 }
1015 } 1104 }
1105
1106 if (has_data && (prot_op != SCSI_PROT_NORMAL)) {
1107 struct hisi_sas_protect_iu_v3_hw prot;
1108 u8 *buf_cmd_prot;
1109
1110 hdr->dw7 |= cpu_to_le32(1 << CMD_HDR_ADDR_MODE_SEL_OFF);
1111 dw1 |= CMD_HDR_PIR_MSK;
1112 buf_cmd_prot = hisi_sas_cmd_hdr_addr_mem(slot) +
1113 sizeof(struct ssp_frame_hdr) +
1114 sizeof(struct ssp_command_iu);
1115
1116 memset(&prot, 0, sizeof(struct hisi_sas_protect_iu_v3_hw));
1117 fill_prot_v3_hw(scsi_cmnd, &prot);
1118 memcpy(buf_cmd_prot, &prot,
1119 sizeof(struct hisi_sas_protect_iu_v3_hw));
1120
1121 /*
1122 * For READ, we need length of info read to memory, while for
1123 * WRITE we need length of data written to the disk.
1124 */
1125 if (prot_op == SCSI_PROT_WRITE_INSERT) {
1126 unsigned int interval = scsi_prot_interval(scsi_cmnd);
1127 unsigned int ilog2_interval = ilog2(interval);
1128
1129 len = (task->total_xfer_len >> ilog2_interval) * 8;
1130 }
1131
1132 }
1133
1134 hdr->dw1 = cpu_to_le32(dw1);
1135
1136 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len + len);
1016} 1137}
1017 1138
1018static void prep_smp_v3_hw(struct hisi_hba *hisi_hba, 1139static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
@@ -1584,15 +1705,16 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
1584 &complete_queue[slot->cmplt_queue_slot]; 1705 &complete_queue[slot->cmplt_queue_slot];
1585 struct hisi_sas_err_record_v3 *record = 1706 struct hisi_sas_err_record_v3 *record =
1586 hisi_sas_status_buf_addr_mem(slot); 1707 hisi_sas_status_buf_addr_mem(slot);
1587 u32 dma_rx_err_type = record->dma_rx_err_type; 1708 u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type);
1588 u32 trans_tx_fail_type = record->trans_tx_fail_type; 1709 u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type);
1710 u32 dw3 = le32_to_cpu(complete_hdr->dw3);
1589 1711
1590 switch (task->task_proto) { 1712 switch (task->task_proto) {
1591 case SAS_PROTOCOL_SSP: 1713 case SAS_PROTOCOL_SSP:
1592 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { 1714 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
1593 ts->residual = trans_tx_fail_type; 1715 ts->residual = trans_tx_fail_type;
1594 ts->stat = SAS_DATA_UNDERRUN; 1716 ts->stat = SAS_DATA_UNDERRUN;
1595 } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { 1717 } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
1596 ts->stat = SAS_QUEUE_FULL; 1718 ts->stat = SAS_QUEUE_FULL;
1597 slot->abort = 1; 1719 slot->abort = 1;
1598 } else { 1720 } else {
@@ -1606,7 +1728,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
1606 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { 1728 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
1607 ts->residual = trans_tx_fail_type; 1729 ts->residual = trans_tx_fail_type;
1608 ts->stat = SAS_DATA_UNDERRUN; 1730 ts->stat = SAS_DATA_UNDERRUN;
1609 } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { 1731 } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
1610 ts->stat = SAS_PHY_DOWN; 1732 ts->stat = SAS_PHY_DOWN;
1611 slot->abort = 1; 1733 slot->abort = 1;
1612 } else { 1734 } else {
@@ -1639,6 +1761,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
1639 &complete_queue[slot->cmplt_queue_slot]; 1761 &complete_queue[slot->cmplt_queue_slot];
1640 unsigned long flags; 1762 unsigned long flags;
1641 bool is_internal = slot->is_internal; 1763 bool is_internal = slot->is_internal;
1764 u32 dw0, dw1, dw3;
1642 1765
1643 if (unlikely(!task || !task->lldd_task || !task->dev)) 1766 if (unlikely(!task || !task->lldd_task || !task->dev))
1644 return -EINVAL; 1767 return -EINVAL;
@@ -1662,11 +1785,14 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
1662 goto out; 1785 goto out;
1663 } 1786 }
1664 1787
1788 dw0 = le32_to_cpu(complete_hdr->dw0);
1789 dw1 = le32_to_cpu(complete_hdr->dw1);
1790 dw3 = le32_to_cpu(complete_hdr->dw3);
1791
1665 /* 1792 /*
1666 * Use SAS+TMF status codes 1793 * Use SAS+TMF status codes
1667 */ 1794 */
1668 switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK) 1795 switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >> CMPLT_HDR_ABORT_STAT_OFF) {
1669 >> CMPLT_HDR_ABORT_STAT_OFF) {
1670 case STAT_IO_ABORTED: 1796 case STAT_IO_ABORTED:
1671 /* this IO has been aborted by abort command */ 1797 /* this IO has been aborted by abort command */
1672 ts->stat = SAS_ABORTED_TASK; 1798 ts->stat = SAS_ABORTED_TASK;
@@ -1689,7 +1815,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
1689 } 1815 }
1690 1816
1691 /* check for erroneous completion */ 1817 /* check for erroneous completion */
1692 if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) { 1818 if ((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
1693 u32 *error_info = hisi_sas_status_buf_addr_mem(slot); 1819 u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
1694 1820
1695 slot_err_v3_hw(hisi_hba, task, slot); 1821 slot_err_v3_hw(hisi_hba, task, slot);
@@ -1698,8 +1824,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
1698 "CQ hdr: 0x%x 0x%x 0x%x 0x%x " 1824 "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
1699 "Error info: 0x%x 0x%x 0x%x 0x%x\n", 1825 "Error info: 0x%x 0x%x 0x%x 0x%x\n",
1700 slot->idx, task, sas_dev->device_id, 1826 slot->idx, task, sas_dev->device_id,
1701 complete_hdr->dw0, complete_hdr->dw1, 1827 dw0, dw1, complete_hdr->act, dw3,
1702 complete_hdr->act, complete_hdr->dw3,
1703 error_info[0], error_info[1], 1828 error_info[0], error_info[1],
1704 error_info[2], error_info[3]); 1829 error_info[2], error_info[3]);
1705 if (unlikely(slot->abort)) 1830 if (unlikely(slot->abort))
@@ -1797,11 +1922,13 @@ static void cq_tasklet_v3_hw(unsigned long val)
1797 while (rd_point != wr_point) { 1922 while (rd_point != wr_point) {
1798 struct hisi_sas_complete_v3_hdr *complete_hdr; 1923 struct hisi_sas_complete_v3_hdr *complete_hdr;
1799 struct device *dev = hisi_hba->dev; 1924 struct device *dev = hisi_hba->dev;
1925 u32 dw1;
1800 int iptt; 1926 int iptt;
1801 1927
1802 complete_hdr = &complete_queue[rd_point]; 1928 complete_hdr = &complete_queue[rd_point];
1929 dw1 = le32_to_cpu(complete_hdr->dw1);
1803 1930
1804 iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK; 1931 iptt = dw1 & CMPLT_HDR_IPTT_MSK;
1805 if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) { 1932 if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) {
1806 slot = &hisi_hba->slot_info[iptt]; 1933 slot = &hisi_hba->slot_info[iptt];
1807 slot->cmplt_queue_slot = rd_point; 1934 slot->cmplt_queue_slot = rd_point;
@@ -1878,10 +2005,12 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
1878 for (i = 0; i < hisi_hba->queue_count; i++) { 2005 for (i = 0; i < hisi_hba->queue_count; i++) {
1879 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2006 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1880 struct tasklet_struct *t = &cq->tasklet; 2007 struct tasklet_struct *t = &cq->tasklet;
2008 int nr = hisi_sas_intr_conv ? 16 : 16 + i;
2009 unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : 0;
1881 2010
1882 rc = devm_request_irq(dev, pci_irq_vector(pdev, i+16), 2011 rc = devm_request_irq(dev, pci_irq_vector(pdev, nr),
1883 cq_interrupt_v3_hw, 0, 2012 cq_interrupt_v3_hw, irqflags,
1884 DRV_NAME " cq", cq); 2013 DRV_NAME " cq", cq);
1885 if (rc) { 2014 if (rc) {
1886 dev_err(dev, 2015 dev_err(dev,
1887 "could not request cq%d interrupt, rc=%d\n", 2016 "could not request cq%d interrupt, rc=%d\n",
@@ -1898,8 +2027,9 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
1898free_cq_irqs: 2027free_cq_irqs:
1899 for (k = 0; k < i; k++) { 2028 for (k = 0; k < i; k++) {
1900 struct hisi_sas_cq *cq = &hisi_hba->cq[k]; 2029 struct hisi_sas_cq *cq = &hisi_hba->cq[k];
2030 int nr = hisi_sas_intr_conv ? 16 : 16 + k;
1901 2031
1902 free_irq(pci_irq_vector(pdev, k+16), cq); 2032 free_irq(pci_irq_vector(pdev, nr), cq);
1903 } 2033 }
1904 free_irq(pci_irq_vector(pdev, 11), hisi_hba); 2034 free_irq(pci_irq_vector(pdev, 11), hisi_hba);
1905free_chnl_interrupt: 2035free_chnl_interrupt:
@@ -2089,6 +2219,119 @@ static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
2089 dev_dbg(dev, "wait commands complete %dms\n", time); 2219 dev_dbg(dev, "wait commands complete %dms\n", time);
2090} 2220}
2091 2221
2222static ssize_t intr_conv_v3_hw_show(struct device *dev,
2223 struct device_attribute *attr, char *buf)
2224{
2225 return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv);
2226}
2227static DEVICE_ATTR_RO(intr_conv_v3_hw);
2228
2229static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba)
2230{
2231 /* config those registers between enable and disable PHYs */
2232 hisi_sas_stop_phys(hisi_hba);
2233
2234 if (hisi_hba->intr_coal_ticks == 0 ||
2235 hisi_hba->intr_coal_count == 0) {
2236 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
2237 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
2238 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
2239 } else {
2240 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3);
2241 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME,
2242 hisi_hba->intr_coal_ticks);
2243 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT,
2244 hisi_hba->intr_coal_count);
2245 }
2246 phys_init_v3_hw(hisi_hba);
2247}
2248
2249static ssize_t intr_coal_ticks_v3_hw_show(struct device *dev,
2250 struct device_attribute *attr,
2251 char *buf)
2252{
2253 struct Scsi_Host *shost = class_to_shost(dev);
2254 struct hisi_hba *hisi_hba = shost_priv(shost);
2255
2256 return scnprintf(buf, PAGE_SIZE, "%u\n",
2257 hisi_hba->intr_coal_ticks);
2258}
2259
2260static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev,
2261 struct device_attribute *attr,
2262 const char *buf, size_t count)
2263{
2264 struct Scsi_Host *shost = class_to_shost(dev);
2265 struct hisi_hba *hisi_hba = shost_priv(shost);
2266 u32 intr_coal_ticks;
2267 int ret;
2268
2269 ret = kstrtou32(buf, 10, &intr_coal_ticks);
2270 if (ret) {
2271 dev_err(dev, "Input data of interrupt coalesce unmatch\n");
2272 return -EINVAL;
2273 }
2274
2275 if (intr_coal_ticks >= BIT(24)) {
2276 dev_err(dev, "intr_coal_ticks must be less than 2^24!\n");
2277 return -EINVAL;
2278 }
2279
2280 hisi_hba->intr_coal_ticks = intr_coal_ticks;
2281
2282 config_intr_coal_v3_hw(hisi_hba);
2283
2284 return count;
2285}
2286static DEVICE_ATTR_RW(intr_coal_ticks_v3_hw);
2287
2288static ssize_t intr_coal_count_v3_hw_show(struct device *dev,
2289 struct device_attribute
2290 *attr, char *buf)
2291{
2292 struct Scsi_Host *shost = class_to_shost(dev);
2293 struct hisi_hba *hisi_hba = shost_priv(shost);
2294
2295 return scnprintf(buf, PAGE_SIZE, "%u\n",
2296 hisi_hba->intr_coal_count);
2297}
2298
2299static ssize_t intr_coal_count_v3_hw_store(struct device *dev,
2300 struct device_attribute
2301 *attr, const char *buf, size_t count)
2302{
2303 struct Scsi_Host *shost = class_to_shost(dev);
2304 struct hisi_hba *hisi_hba = shost_priv(shost);
2305 u32 intr_coal_count;
2306 int ret;
2307
2308 ret = kstrtou32(buf, 10, &intr_coal_count);
2309 if (ret) {
2310 dev_err(dev, "Input data of interrupt coalesce unmatch\n");
2311 return -EINVAL;
2312 }
2313
2314 if (intr_coal_count >= BIT(8)) {
2315 dev_err(dev, "intr_coal_count must be less than 2^8!\n");
2316 return -EINVAL;
2317 }
2318
2319 hisi_hba->intr_coal_count = intr_coal_count;
2320
2321 config_intr_coal_v3_hw(hisi_hba);
2322
2323 return count;
2324}
2325static DEVICE_ATTR_RW(intr_coal_count_v3_hw);
2326
2327static struct device_attribute *host_attrs_v3_hw[] = {
2328 &dev_attr_phy_event_threshold,
2329 &dev_attr_intr_conv_v3_hw,
2330 &dev_attr_intr_coal_ticks_v3_hw,
2331 &dev_attr_intr_coal_count_v3_hw,
2332 NULL
2333};
2334
2092static struct scsi_host_template sht_v3_hw = { 2335static struct scsi_host_template sht_v3_hw = {
2093 .name = DRV_NAME, 2336 .name = DRV_NAME,
2094 .module = THIS_MODULE, 2337 .module = THIS_MODULE,
@@ -2100,14 +2343,13 @@ static struct scsi_host_template sht_v3_hw = {
2100 .change_queue_depth = sas_change_queue_depth, 2343 .change_queue_depth = sas_change_queue_depth,
2101 .bios_param = sas_bios_param, 2344 .bios_param = sas_bios_param,
2102 .this_id = -1, 2345 .this_id = -1,
2103 .sg_tablesize = SG_ALL, 2346 .sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
2104 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 2347 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
2105 .use_clustering = ENABLE_CLUSTERING,
2106 .eh_device_reset_handler = sas_eh_device_reset_handler, 2348 .eh_device_reset_handler = sas_eh_device_reset_handler,
2107 .eh_target_reset_handler = sas_eh_target_reset_handler, 2349 .eh_target_reset_handler = sas_eh_target_reset_handler,
2108 .target_destroy = sas_target_destroy, 2350 .target_destroy = sas_target_destroy,
2109 .ioctl = sas_ioctl, 2351 .ioctl = sas_ioctl,
2110 .shost_attrs = host_attrs, 2352 .shost_attrs = host_attrs_v3_hw,
2111 .tag_alloc_policy = BLK_TAG_ALLOC_RR, 2353 .tag_alloc_policy = BLK_TAG_ALLOC_RR,
2112}; 2354};
2113 2355
@@ -2161,6 +2403,12 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
2161 hisi_hba->shost = shost; 2403 hisi_hba->shost = shost;
2162 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2404 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2163 2405
2406 if (prot_mask & ~HISI_SAS_PROT_MASK)
2407 dev_err(dev, "unsupported protection mask 0x%x, using default (0x0)\n",
2408 prot_mask);
2409 else
2410 hisi_hba->prot_mask = prot_mask;
2411
2164 timer_setup(&hisi_hba->timer, NULL, 0); 2412 timer_setup(&hisi_hba->timer, NULL, 0);
2165 2413
2166 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2414 if (hisi_sas_get_fw_info(hisi_hba) < 0)
@@ -2199,14 +2447,11 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2199 if (rc) 2447 if (rc)
2200 goto err_out_disable_device; 2448 goto err_out_disable_device;
2201 2449
2202 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) || 2450 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
2203 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) { 2451 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
2204 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || 2452 dev_err(dev, "No usable DMA addressing method\n");
2205 (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { 2453 rc = -EIO;
2206 dev_err(dev, "No usable DMA addressing method\n"); 2454 goto err_out_regions;
2207 rc = -EIO;
2208 goto err_out_regions;
2209 }
2210 } 2455 }
2211 2456
2212 shost = hisi_sas_shost_alloc_pci(pdev); 2457 shost = hisi_sas_shost_alloc_pci(pdev);
@@ -2245,7 +2490,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2245 shost->max_lun = ~0; 2490 shost->max_lun = ~0;
2246 shost->max_channel = 1; 2491 shost->max_channel = 1;
2247 shost->max_cmd_len = 16; 2492 shost->max_cmd_len = 16;
2248 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
2249 shost->can_queue = hisi_hba->hw->max_command_entries - 2493 shost->can_queue = hisi_hba->hw->max_command_entries -
2250 HISI_SAS_RESERVED_IPTT_CNT; 2494 HISI_SAS_RESERVED_IPTT_CNT;
2251 shost->cmd_per_lun = hisi_hba->hw->max_command_entries - 2495 shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
@@ -2275,6 +2519,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2275 if (rc) 2519 if (rc)
2276 goto err_out_register_ha; 2520 goto err_out_register_ha;
2277 2521
2522 if (hisi_hba->prot_mask) {
2523 dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
2524 prot_mask);
2525 scsi_host_set_prot(hisi_hba->shost, prot_mask);
2526 }
2527
2278 scsi_scan_host(shost); 2528 scsi_scan_host(shost);
2279 2529
2280 return 0; 2530 return 0;
@@ -2301,8 +2551,9 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
2301 free_irq(pci_irq_vector(pdev, 11), hisi_hba); 2551 free_irq(pci_irq_vector(pdev, 11), hisi_hba);
2302 for (i = 0; i < hisi_hba->queue_count; i++) { 2552 for (i = 0; i < hisi_hba->queue_count; i++) {
2303 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2553 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2554 int nr = hisi_sas_intr_conv ? 16 : 16 + i;
2304 2555
2305 free_irq(pci_irq_vector(pdev, i+16), cq); 2556 free_irq(pci_irq_vector(pdev, nr), cq);
2306 } 2557 }
2307 pci_free_irq_vectors(pdev); 2558 pci_free_irq_vectors(pdev);
2308} 2559}
@@ -2529,7 +2780,7 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
2529 struct hisi_hba *hisi_hba = sha->lldd_ha; 2780 struct hisi_hba *hisi_hba = sha->lldd_ha;
2530 struct device *dev = hisi_hba->dev; 2781 struct device *dev = hisi_hba->dev;
2531 struct Scsi_Host *shost = hisi_hba->shost; 2782 struct Scsi_Host *shost = hisi_hba->shost;
2532 u32 device_state; 2783 pci_power_t device_state;
2533 int rc; 2784 int rc;
2534 2785
2535 if (!pdev->pm_cap) { 2786 if (!pdev->pm_cap) {
@@ -2575,7 +2826,7 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
2575 struct Scsi_Host *shost = hisi_hba->shost; 2826 struct Scsi_Host *shost = hisi_hba->shost;
2576 struct device *dev = hisi_hba->dev; 2827 struct device *dev = hisi_hba->dev;
2577 unsigned int rc; 2828 unsigned int rc;
2578 u32 device_state = pdev->current_state; 2829 pci_power_t device_state = pdev->current_state;
2579 2830
2580 dev_warn(dev, "resuming from operating state [D%d]\n", 2831 dev_warn(dev, "resuming from operating state [D%d]\n",
2581 device_state); 2832 device_state);
@@ -2624,6 +2875,7 @@ static struct pci_driver sas_v3_pci_driver = {
2624}; 2875};
2625 2876
2626module_pci_driver(sas_v3_pci_driver); 2877module_pci_driver(sas_v3_pci_driver);
2878module_param_named(intr_conv, hisi_sas_intr_conv, bool, 0444);
2627 2879
2628MODULE_LICENSE("GPL"); 2880MODULE_LICENSE("GPL");
2629MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2881MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index cc71136ba300..eaf329db3973 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -416,7 +416,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
416 shost->sg_prot_tablesize = sht->sg_prot_tablesize; 416 shost->sg_prot_tablesize = sht->sg_prot_tablesize;
417 shost->cmd_per_lun = sht->cmd_per_lun; 417 shost->cmd_per_lun = sht->cmd_per_lun;
418 shost->unchecked_isa_dma = sht->unchecked_isa_dma; 418 shost->unchecked_isa_dma = sht->unchecked_isa_dma;
419 shost->use_clustering = sht->use_clustering;
420 shost->no_write_same = sht->no_write_same; 419 shost->no_write_same = sht->no_write_same;
421 420
422 if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler) 421 if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
@@ -449,6 +448,11 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
449 else 448 else
450 shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS; 449 shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
451 450
451 if (sht->max_segment_size)
452 shost->max_segment_size = sht->max_segment_size;
453 else
454 shost->max_segment_size = BLK_MAX_SEGMENT_SIZE;
455
452 /* 456 /*
453 * assume a 4GB boundary, if not set 457 * assume a 4GB boundary, if not set
454 */ 458 */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c9cccf35e9d7..ff67ef5d5347 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -965,7 +965,6 @@ static struct scsi_host_template hpsa_driver_template = {
965 .scan_finished = hpsa_scan_finished, 965 .scan_finished = hpsa_scan_finished,
966 .change_queue_depth = hpsa_change_queue_depth, 966 .change_queue_depth = hpsa_change_queue_depth,
967 .this_id = -1, 967 .this_id = -1,
968 .use_clustering = ENABLE_CLUSTERING,
969 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 968 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
970 .ioctl = hpsa_ioctl, 969 .ioctl = hpsa_ioctl,
971 .slave_alloc = hpsa_slave_alloc, 970 .slave_alloc = hpsa_slave_alloc,
@@ -4663,6 +4662,7 @@ static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4663 case WRITE_6: 4662 case WRITE_6:
4664 case WRITE_12: 4663 case WRITE_12:
4665 is_write = 1; 4664 is_write = 1;
4665 /* fall through */
4666 case READ_6: 4666 case READ_6:
4667 case READ_12: 4667 case READ_12:
4668 if (*cdb_len == 6) { 4668 if (*cdb_len == 6) {
@@ -5093,6 +5093,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5093 switch (cmd->cmnd[0]) { 5093 switch (cmd->cmnd[0]) {
5094 case WRITE_6: 5094 case WRITE_6:
5095 is_write = 1; 5095 is_write = 1;
5096 /* fall through */
5096 case READ_6: 5097 case READ_6:
5097 first_block = (((cmd->cmnd[1] & 0x1F) << 16) | 5098 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
5098 (cmd->cmnd[2] << 8) | 5099 (cmd->cmnd[2] << 8) |
@@ -5103,6 +5104,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5103 break; 5104 break;
5104 case WRITE_10: 5105 case WRITE_10:
5105 is_write = 1; 5106 is_write = 1;
5107 /* fall through */
5106 case READ_10: 5108 case READ_10:
5107 first_block = 5109 first_block =
5108 (((u64) cmd->cmnd[2]) << 24) | 5110 (((u64) cmd->cmnd[2]) << 24) |
@@ -5115,6 +5117,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5115 break; 5117 break;
5116 case WRITE_12: 5118 case WRITE_12:
5117 is_write = 1; 5119 is_write = 1;
5120 /* fall through */
5118 case READ_12: 5121 case READ_12:
5119 first_block = 5122 first_block =
5120 (((u64) cmd->cmnd[2]) << 24) | 5123 (((u64) cmd->cmnd[2]) << 24) |
@@ -5129,6 +5132,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5129 break; 5132 break;
5130 case WRITE_16: 5133 case WRITE_16:
5131 is_write = 1; 5134 is_write = 1;
5135 /* fall through */
5132 case READ_16: 5136 case READ_16:
5133 first_block = 5137 first_block =
5134 (((u64) cmd->cmnd[2]) << 56) | 5138 (((u64) cmd->cmnd[2]) << 56) |
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 2fad7f03aa02..3eedfd4f8f57 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1180,7 +1180,6 @@ static struct scsi_host_template driver_template = {
1180 .eh_host_reset_handler = hptiop_reset, 1180 .eh_host_reset_handler = hptiop_reset,
1181 .info = hptiop_info, 1181 .info = hptiop_info,
1182 .emulated = 0, 1182 .emulated = 0,
1183 .use_clustering = ENABLE_CLUSTERING,
1184 .proc_name = driver_name, 1183 .proc_name = driver_name,
1185 .shost_attrs = hptiop_attrs, 1184 .shost_attrs = hptiop_attrs,
1186 .slave_configure = hptiop_slave_config, 1185 .slave_configure = hptiop_slave_config,
@@ -1309,11 +1308,11 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
1309 1308
1310 /* Enable 64bit DMA if possible */ 1309 /* Enable 64bit DMA if possible */
1311 iop_ops = (struct hptiop_adapter_ops *)id->driver_data; 1310 iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
1312 if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(iop_ops->hw_dma_bit_mask))) { 1311 if (dma_set_mask(&pcidev->dev,
1313 if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) { 1312 DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)) ||
1314 printk(KERN_ERR "hptiop: fail to set dma_mask\n"); 1313 dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32))) {
1315 goto disable_pci_device; 1314 printk(KERN_ERR "hptiop: fail to set dma_mask\n");
1316 } 1315 goto disable_pci_device;
1317 } 1316 }
1318 1317
1319 if (pci_request_regions(pcidev, driver_name)) { 1318 if (pci_request_regions(pcidev, driver_name)) {
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index b64ca977825d..dbaa4f131433 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -3100,7 +3100,6 @@ static struct scsi_host_template driver_template = {
3100 .this_id = -1, 3100 .this_id = -1,
3101 .sg_tablesize = SG_ALL, 3101 .sg_tablesize = SG_ALL,
3102 .max_sectors = IBMVFC_MAX_SECTORS, 3102 .max_sectors = IBMVFC_MAX_SECTORS,
3103 .use_clustering = ENABLE_CLUSTERING,
3104 .shost_attrs = ibmvfc_attrs, 3103 .shost_attrs = ibmvfc_attrs,
3105 .track_queue_depth = 1, 3104 .track_queue_depth = 1,
3106}; 3105};
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 9df8a1a2299c..1135e74646e2 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -2079,7 +2079,6 @@ static struct scsi_host_template driver_template = {
2079 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT, 2079 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
2080 .this_id = -1, 2080 .this_id = -1,
2081 .sg_tablesize = SG_ALL, 2081 .sg_tablesize = SG_ALL,
2082 .use_clustering = ENABLE_CLUSTERING,
2083 .shost_attrs = ibmvscsi_attrs, 2082 .shost_attrs = ibmvscsi_attrs,
2084}; 2083};
2085 2084
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index e63aadd10dfd..cc9cae469c4b 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3695,11 +3695,6 @@ static int ibmvscsis_get_system_info(void)
3695 return 0; 3695 return 0;
3696} 3696}
3697 3697
3698static char *ibmvscsis_get_fabric_name(void)
3699{
3700 return "ibmvscsis";
3701}
3702
3703static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg) 3698static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg)
3704{ 3699{
3705 struct ibmvscsis_tport *tport = 3700 struct ibmvscsis_tport *tport =
@@ -4044,9 +4039,8 @@ static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
4044 4039
4045static const struct target_core_fabric_ops ibmvscsis_ops = { 4040static const struct target_core_fabric_ops ibmvscsis_ops = {
4046 .module = THIS_MODULE, 4041 .module = THIS_MODULE,
4047 .name = "ibmvscsis", 4042 .fabric_name = "ibmvscsis",
4048 .max_data_sg_nents = MAX_TXU / PAGE_SIZE, 4043 .max_data_sg_nents = MAX_TXU / PAGE_SIZE,
4049 .get_fabric_name = ibmvscsis_get_fabric_name,
4050 .tpg_get_wwn = ibmvscsis_get_fabric_wwn, 4044 .tpg_get_wwn = ibmvscsis_get_fabric_wwn,
4051 .tpg_get_tag = ibmvscsis_get_tag, 4045 .tpg_get_tag = ibmvscsis_get_tag,
4052 .tpg_get_default_depth = ibmvscsis_get_default_depth, 4046 .tpg_get_default_depth = ibmvscsis_get_default_depth,
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 8c6627bc8a39..cea7f502e8ca 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1110,7 +1110,6 @@ static struct scsi_host_template imm_template = {
1110 .bios_param = imm_biosparam, 1110 .bios_param = imm_biosparam,
1111 .this_id = 7, 1111 .this_id = 7,
1112 .sg_tablesize = SG_ALL, 1112 .sg_tablesize = SG_ALL,
1113 .use_clustering = ENABLE_CLUSTERING,
1114 .can_queue = 1, 1113 .can_queue = 1,
1115 .slave_alloc = imm_adjust_queue, 1114 .slave_alloc = imm_adjust_queue,
1116}; 1115};
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 7a91cf3ff173..eb2778b5c81b 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2817,7 +2817,6 @@ static struct scsi_host_template initio_template = {
2817 .can_queue = MAX_TARGETS * i91u_MAXQUEUE, 2817 .can_queue = MAX_TARGETS * i91u_MAXQUEUE,
2818 .this_id = 1, 2818 .this_id = 1,
2819 .sg_tablesize = SG_ALL, 2819 .sg_tablesize = SG_ALL,
2820 .use_clustering = ENABLE_CLUSTERING,
2821}; 2820};
2822 2821
2823static int initio_probe_one(struct pci_dev *pdev, 2822static int initio_probe_one(struct pci_dev *pdev,
@@ -2840,7 +2839,7 @@ static int initio_probe_one(struct pci_dev *pdev,
2840 reg = 0; 2839 reg = 0;
2841 bios_seg = (bios_seg << 8) + ((u16) ((reg & 0xFF00) >> 8)); 2840 bios_seg = (bios_seg << 8) + ((u16) ((reg & 0xFF00) >> 8));
2842 2841
2843 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 2842 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
2844 printk(KERN_WARNING "i91u: Could not set 32 bit DMA mask\n"); 2843 printk(KERN_WARNING "i91u: Could not set 32 bit DMA mask\n");
2845 error = -ENODEV; 2844 error = -ENODEV;
2846 goto out_disable_device; 2845 goto out_disable_device;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 271990bc065b..d1b4025a4503 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6754,7 +6754,6 @@ static struct scsi_host_template driver_template = {
6754 .sg_tablesize = IPR_MAX_SGLIST, 6754 .sg_tablesize = IPR_MAX_SGLIST,
6755 .max_sectors = IPR_IOA_MAX_SECTORS, 6755 .max_sectors = IPR_IOA_MAX_SECTORS,
6756 .cmd_per_lun = IPR_MAX_CMD_PER_LUN, 6756 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6757 .use_clustering = ENABLE_CLUSTERING,
6758 .shost_attrs = ipr_ioa_attrs, 6757 .shost_attrs = ipr_ioa_attrs,
6759 .sdev_attrs = ipr_dev_attrs, 6758 .sdev_attrs = ipr_dev_attrs,
6760 .proc_name = IPR_NAME, 6759 .proc_name = IPR_NAME,
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index ee8a1ecd58fd..e8bc8d328bab 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -365,7 +365,6 @@ static struct scsi_host_template ips_driver_template = {
365 .this_id = -1, 365 .this_id = -1,
366 .sg_tablesize = IPS_MAX_SG, 366 .sg_tablesize = IPS_MAX_SG,
367 .cmd_per_lun = 3, 367 .cmd_per_lun = 3,
368 .use_clustering = ENABLE_CLUSTERING,
369 .no_write_same = 1, 368 .no_write_same = 1,
370}; 369};
371 370
@@ -1801,13 +1800,13 @@ ips_fill_scb_sg_single(ips_ha_t * ha, dma_addr_t busaddr,
1801 } 1800 }
1802 if (IPS_USE_ENH_SGLIST(ha)) { 1801 if (IPS_USE_ENH_SGLIST(ha)) {
1803 scb->sg_list.enh_list[indx].address_lo = 1802 scb->sg_list.enh_list[indx].address_lo =
1804 cpu_to_le32(pci_dma_lo32(busaddr)); 1803 cpu_to_le32(lower_32_bits(busaddr));
1805 scb->sg_list.enh_list[indx].address_hi = 1804 scb->sg_list.enh_list[indx].address_hi =
1806 cpu_to_le32(pci_dma_hi32(busaddr)); 1805 cpu_to_le32(upper_32_bits(busaddr));
1807 scb->sg_list.enh_list[indx].length = cpu_to_le32(e_len); 1806 scb->sg_list.enh_list[indx].length = cpu_to_le32(e_len);
1808 } else { 1807 } else {
1809 scb->sg_list.std_list[indx].address = 1808 scb->sg_list.std_list[indx].address =
1810 cpu_to_le32(pci_dma_lo32(busaddr)); 1809 cpu_to_le32(lower_32_bits(busaddr));
1811 scb->sg_list.std_list[indx].length = cpu_to_le32(e_len); 1810 scb->sg_list.std_list[indx].length = cpu_to_le32(e_len);
1812 } 1811 }
1813 1812
@@ -6678,7 +6677,6 @@ ips_register_scsi(int index)
6678 sh->sg_tablesize = sh->hostt->sg_tablesize; 6677 sh->sg_tablesize = sh->hostt->sg_tablesize;
6679 sh->can_queue = sh->hostt->can_queue; 6678 sh->can_queue = sh->hostt->can_queue;
6680 sh->cmd_per_lun = sh->hostt->cmd_per_lun; 6679 sh->cmd_per_lun = sh->hostt->cmd_per_lun;
6681 sh->use_clustering = sh->hostt->use_clustering;
6682 sh->max_sectors = 128; 6680 sh->max_sectors = 128;
6683 6681
6684 sh->max_id = ha->ntargets; 6682 sh->max_id = ha->ntargets;
@@ -6926,7 +6924,7 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
6926 * it! Also, don't use 64bit addressing if dma addresses 6924 * it! Also, don't use 64bit addressing if dma addresses
6927 * are guaranteed to be < 4G. 6925 * are guaranteed to be < 4G.
6928 */ 6926 */
6929 if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) && 6927 if (sizeof(dma_addr_t) > 4 && IPS_HAS_ENH_SGLIST(ha) &&
6930 !dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(64))) { 6928 !dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(64))) {
6931 (ha)->flags |= IPS_HA_ENH_SG; 6929 (ha)->flags |= IPS_HA_ENH_SG;
6932 } else { 6930 } else {
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index db546171e97f..6c0678fb9a67 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -96,15 +96,6 @@
96 #define __iomem 96 #define __iomem
97 #endif 97 #endif
98 98
99 #define pci_dma_hi32(a) ((a >> 16) >> 16)
100 #define pci_dma_lo32(a) (a & 0xffffffff)
101
102 #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
103 #define IPS_ENABLE_DMA64 (1)
104 #else
105 #define IPS_ENABLE_DMA64 (0)
106 #endif
107
108 /* 99 /*
109 * Adapter address map equates 100 * Adapter address map equates
110 */ 101 */
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 08c7b1e25fe4..68b90c4f79a3 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -163,7 +163,6 @@ static struct scsi_host_template isci_sht = {
163 .this_id = -1, 163 .this_id = -1,
164 .sg_tablesize = SG_ALL, 164 .sg_tablesize = SG_ALL,
165 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 165 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
166 .use_clustering = ENABLE_CLUSTERING,
167 .eh_abort_handler = sas_eh_abort_handler, 166 .eh_abort_handler = sas_eh_abort_handler,
168 .eh_device_reset_handler = sas_eh_device_reset_handler, 167 .eh_device_reset_handler = sas_eh_device_reset_handler,
169 .eh_target_reset_handler = sas_eh_target_reset_handler, 168 .eh_target_reset_handler = sas_eh_target_reset_handler,
@@ -304,21 +303,10 @@ static int isci_pci_init(struct pci_dev *pdev)
304 303
305 pci_set_master(pdev); 304 pci_set_master(pdev);
306 305
307 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 306 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
308 if (err) { 307 if (err)
309 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 308 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
310 if (err) 309 return err;
311 return err;
312 }
313
314 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
315 if (err) {
316 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
317 if (err)
318 return err;
319 }
320
321 return 0;
322} 310}
323 311
324static int num_controllers(struct pci_dev *pdev) 312static int num_controllers(struct pci_dev *pdev)
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 1deca8c5a94f..7f9b3f20e5e4 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -778,6 +778,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
778 break; 778 break;
779 case SCU_EVENT_LINK_FAILURE: 779 case SCU_EVENT_LINK_FAILURE:
780 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); 780 scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
781 /* fall through */
781 case SCU_EVENT_HARD_RESET_RECEIVED: 782 case SCU_EVENT_HARD_RESET_RECEIVED:
782 /* Start the oob/sn state machine over again */ 783 /* Start the oob/sn state machine over again */
783 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 784 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index cc51f38b116d..9d29edb9f590 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -310,7 +310,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost,
310 /* Kill all outstanding requests for the device. */ 310 /* Kill all outstanding requests for the device. */
311 sci_remote_device_terminate_requests(idev); 311 sci_remote_device_terminate_requests(idev);
312 312
313 /* Fall through into the default case... */ 313 /* Fall through - into the default case... */
314 default: 314 default:
315 clear_bit(IDEV_IO_READY, &idev->flags); 315 clear_bit(IDEV_IO_READY, &idev->flags);
316 break; 316 break;
@@ -593,7 +593,7 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
593 593
594 break; 594 break;
595 } 595 }
596 /* Else, fall through and treat as unhandled... */ 596 /* fall through - and treat as unhandled... */
597 default: 597 default:
598 dev_dbg(scirdev_to_dev(idev), 598 dev_dbg(scirdev_to_dev(idev),
599 "%s: device: %p event code: %x: %s\n", 599 "%s: device: %p event code: %x: %s\n",
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index e3f2a5359d71..474a43460963 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -601,9 +601,9 @@ enum sci_status sci_remote_node_context_suspend(
601 __func__, sci_rnc); 601 __func__, sci_rnc);
602 return SCI_FAILURE_INVALID_STATE; 602 return SCI_FAILURE_INVALID_STATE;
603 } 603 }
604 /* Fall through and handle like SCI_RNC_POSTING */ 604 /* Fall through - and handle like SCI_RNC_POSTING */
605 case SCI_RNC_RESUMING: 605 case SCI_RNC_RESUMING:
606 /* Fall through and handle like SCI_RNC_POSTING */ 606 /* Fall through - and handle like SCI_RNC_POSTING */
607 case SCI_RNC_POSTING: 607 case SCI_RNC_POSTING:
608 /* Set the destination state to AWAIT - this signals the 608 /* Set the destination state to AWAIT - this signals the
609 * entry into the SCI_RNC_READY state that a suspension 609 * entry into the SCI_RNC_READY state that a suspension
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 2f151708b59a..1b18cf55167e 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -894,7 +894,7 @@ sci_io_request_terminate(struct isci_request *ireq)
894 * and don't wait for the task response. 894 * and don't wait for the task response.
895 */ 895 */
896 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 896 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
897 /* Fall through and handle like ABORTING... */ 897 /* Fall through - and handle like ABORTING... */
898 case SCI_REQ_ABORTING: 898 case SCI_REQ_ABORTING:
899 if (!isci_remote_device_is_safe_to_abort(ireq->target_device)) 899 if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
900 set_bit(IREQ_PENDING_ABORT, &ireq->flags); 900 set_bit(IREQ_PENDING_ABORT, &ireq->flags);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 23354f206533..cae6368ebb98 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -44,6 +44,7 @@
44#include <scsi/scsi_host.h> 44#include <scsi/scsi_host.h>
45#include <scsi/scsi.h> 45#include <scsi/scsi.h>
46#include <scsi/scsi_transport_iscsi.h> 46#include <scsi/scsi_transport_iscsi.h>
47#include <trace/events/iscsi.h>
47 48
48#include "iscsi_tcp.h" 49#include "iscsi_tcp.h"
49 50
@@ -72,6 +73,9 @@ MODULE_PARM_DESC(debug_iscsi_tcp, "Turn on debugging for iscsi_tcp module "
72 iscsi_conn_printk(KERN_INFO, _conn, \ 73 iscsi_conn_printk(KERN_INFO, _conn, \
73 "%s " dbg_fmt, \ 74 "%s " dbg_fmt, \
74 __func__, ##arg); \ 75 __func__, ##arg); \
76 iscsi_dbg_trace(trace_iscsi_dbg_sw_tcp, \
77 &(_conn)->cls_conn->dev, \
78 "%s " dbg_fmt, __func__, ##arg);\
75 } while (0); 79 } while (0);
76 80
77 81
@@ -980,7 +984,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
980 .eh_abort_handler = iscsi_eh_abort, 984 .eh_abort_handler = iscsi_eh_abort,
981 .eh_device_reset_handler= iscsi_eh_device_reset, 985 .eh_device_reset_handler= iscsi_eh_device_reset,
982 .eh_target_reset_handler = iscsi_eh_recover_target, 986 .eh_target_reset_handler = iscsi_eh_recover_target,
983 .use_clustering = DISABLE_CLUSTERING, 987 .dma_boundary = PAGE_SIZE - 1,
984 .slave_alloc = iscsi_sw_tcp_slave_alloc, 988 .slave_alloc = iscsi_sw_tcp_slave_alloc,
985 .slave_configure = iscsi_sw_tcp_slave_configure, 989 .slave_configure = iscsi_sw_tcp_slave_configure,
986 .target_alloc = iscsi_target_alloc, 990 .target_alloc = iscsi_target_alloc,
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 1e1c0f1b9e69..9192a1d9dec6 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -860,7 +860,6 @@ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
860static void fc_rport_recv_flogi_req(struct fc_lport *lport, 860static void fc_rport_recv_flogi_req(struct fc_lport *lport,
861 struct fc_frame *rx_fp) 861 struct fc_frame *rx_fp)
862{ 862{
863 struct fc_disc *disc;
864 struct fc_els_flogi *flp; 863 struct fc_els_flogi *flp;
865 struct fc_rport_priv *rdata; 864 struct fc_rport_priv *rdata;
866 struct fc_frame *fp = rx_fp; 865 struct fc_frame *fp = rx_fp;
@@ -871,7 +870,6 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
871 870
872 FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n"); 871 FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n");
873 872
874 disc = &lport->disc;
875 if (!lport->point_to_multipoint) { 873 if (!lport->point_to_multipoint) {
876 rjt_data.reason = ELS_RJT_UNSUP; 874 rjt_data.reason = ELS_RJT_UNSUP;
877 rjt_data.explan = ELS_EXPL_NONE; 875 rjt_data.explan = ELS_EXPL_NONE;
@@ -1724,6 +1722,7 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
1724 kref_put(&rdata->kref, fc_rport_destroy); 1722 kref_put(&rdata->kref, fc_rport_destroy);
1725 goto busy; 1723 goto busy;
1726 } 1724 }
1725 /* fall through */
1727 default: 1726 default:
1728 FC_RPORT_DBG(rdata, 1727 FC_RPORT_DBG(rdata,
1729 "Reject ELS 0x%02x while in state %s\n", 1728 "Reject ELS 0x%02x while in state %s\n",
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index f78d2e5c1471..b8d325ce8754 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -40,6 +40,7 @@
40#include <scsi/scsi_transport.h> 40#include <scsi/scsi_transport.h>
41#include <scsi/scsi_transport_iscsi.h> 41#include <scsi/scsi_transport_iscsi.h>
42#include <scsi/libiscsi.h> 42#include <scsi/libiscsi.h>
43#include <trace/events/iscsi.h>
43 44
44static int iscsi_dbg_lib_conn; 45static int iscsi_dbg_lib_conn;
45module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int, 46module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int,
@@ -68,6 +69,9 @@ MODULE_PARM_DESC(debug_libiscsi_eh,
68 iscsi_conn_printk(KERN_INFO, _conn, \ 69 iscsi_conn_printk(KERN_INFO, _conn, \
69 "%s " dbg_fmt, \ 70 "%s " dbg_fmt, \
70 __func__, ##arg); \ 71 __func__, ##arg); \
72 iscsi_dbg_trace(trace_iscsi_dbg_conn, \
73 &(_conn)->cls_conn->dev, \
74 "%s " dbg_fmt, __func__, ##arg);\
71 } while (0); 75 } while (0);
72 76
73#define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \ 77#define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \
@@ -76,6 +80,9 @@ MODULE_PARM_DESC(debug_libiscsi_eh,
76 iscsi_session_printk(KERN_INFO, _session, \ 80 iscsi_session_printk(KERN_INFO, _session, \
77 "%s " dbg_fmt, \ 81 "%s " dbg_fmt, \
78 __func__, ##arg); \ 82 __func__, ##arg); \
83 iscsi_dbg_trace(trace_iscsi_dbg_session, \
84 &(_session)->cls_session->dev, \
85 "%s " dbg_fmt, __func__, ##arg); \
79 } while (0); 86 } while (0);
80 87
81#define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \ 88#define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \
@@ -84,6 +91,9 @@ MODULE_PARM_DESC(debug_libiscsi_eh,
84 iscsi_session_printk(KERN_INFO, _session, \ 91 iscsi_session_printk(KERN_INFO, _session, \
85 "%s " dbg_fmt, \ 92 "%s " dbg_fmt, \
86 __func__, ##arg); \ 93 __func__, ##arg); \
94 iscsi_dbg_trace(trace_iscsi_dbg_eh, \
95 &(_session)->cls_session->dev, \
96 "%s " dbg_fmt, __func__, ##arg); \
87 } while (0); 97 } while (0);
88 98
89inline void iscsi_conn_queue_work(struct iscsi_conn *conn) 99inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 4fcb9e65be57..8a6b1b3f8277 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -43,6 +43,7 @@
43#include <scsi/scsi_host.h> 43#include <scsi/scsi_host.h>
44#include <scsi/scsi.h> 44#include <scsi/scsi.h>
45#include <scsi/scsi_transport_iscsi.h> 45#include <scsi/scsi_transport_iscsi.h>
46#include <trace/events/iscsi.h>
46 47
47#include "iscsi_tcp.h" 48#include "iscsi_tcp.h"
48 49
@@ -65,6 +66,9 @@ MODULE_PARM_DESC(debug_libiscsi_tcp, "Turn on debugging for libiscsi_tcp "
65 iscsi_conn_printk(KERN_INFO, _conn, \ 66 iscsi_conn_printk(KERN_INFO, _conn, \
66 "%s " dbg_fmt, \ 67 "%s " dbg_fmt, \
67 __func__, ##arg); \ 68 __func__, ##arg); \
69 iscsi_dbg_trace(trace_iscsi_dbg_tcp, \
70 &(_conn)->cls_conn->dev, \
71 "%s " dbg_fmt, __func__, ##arg);\
68 } while (0); 72 } while (0);
69 73
70static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn, 74static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
index 2e70140f70c3..5d51520c6f23 100644
--- a/drivers/scsi/libsas/Makefile
+++ b/drivers/scsi/libsas/Makefile
@@ -26,10 +26,11 @@ libsas-y += sas_init.o \
26 sas_phy.o \ 26 sas_phy.o \
27 sas_port.o \ 27 sas_port.o \
28 sas_event.o \ 28 sas_event.o \
29 sas_dump.o \
30 sas_discover.o \ 29 sas_discover.o \
31 sas_expander.o \ 30 sas_expander.o \
32 sas_scsi_host.o \ 31 sas_scsi_host.o \
33 sas_task.o 32 sas_task.o
34libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o 33libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o
35libsas-$(CONFIG_SCSI_SAS_HOST_SMP) += sas_host_smp.o 34libsas-$(CONFIG_SCSI_SAS_HOST_SMP) += sas_host_smp.o
35
36ccflags-y := -DDEBUG
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index c90b278cc28c..6f93fee2b21b 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -75,8 +75,8 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
75 75
76 case SAS_OPEN_TO: 76 case SAS_OPEN_TO:
77 case SAS_OPEN_REJECT: 77 case SAS_OPEN_REJECT:
78 SAS_DPRINTK("%s: Saw error %d. What to do?\n", 78 pr_warn("%s: Saw error %d. What to do?\n",
79 __func__, ts->stat); 79 __func__, ts->stat);
80 return AC_ERR_OTHER; 80 return AC_ERR_OTHER;
81 81
82 case SAM_STAT_CHECK_CONDITION: 82 case SAM_STAT_CHECK_CONDITION:
@@ -151,8 +151,7 @@ static void sas_ata_task_done(struct sas_task *task)
151 } else { 151 } else {
152 ac = sas_to_ata_err(stat); 152 ac = sas_to_ata_err(stat);
153 if (ac) { 153 if (ac) {
154 SAS_DPRINTK("%s: SAS error %x\n", __func__, 154 pr_warn("%s: SAS error %x\n", __func__, stat->stat);
155 stat->stat);
156 /* We saw a SAS error. Send a vague error. */ 155 /* We saw a SAS error. Send a vague error. */
157 if (!link->sactive) { 156 if (!link->sactive) {
158 qc->err_mask = ac; 157 qc->err_mask = ac;
@@ -237,7 +236,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
237 236
238 ret = i->dft->lldd_execute_task(task, GFP_ATOMIC); 237 ret = i->dft->lldd_execute_task(task, GFP_ATOMIC);
239 if (ret) { 238 if (ret) {
240 SAS_DPRINTK("lldd_execute_task returned: %d\n", ret); 239 pr_debug("lldd_execute_task returned: %d\n", ret);
241 240
242 if (qc->scsicmd) 241 if (qc->scsicmd)
243 ASSIGN_SAS_TASK(qc->scsicmd, NULL); 242 ASSIGN_SAS_TASK(qc->scsicmd, NULL);
@@ -282,9 +281,9 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
282 res = sas_get_report_phy_sata(dev->parent, phy->phy_id, 281 res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
283 &dev->sata_dev.rps_resp); 282 &dev->sata_dev.rps_resp);
284 if (res) { 283 if (res) {
285 SAS_DPRINTK("report phy sata to %016llx:0x%x returned " 284 pr_debug("report phy sata to %016llx:0x%x returned 0x%x\n",
286 "0x%x\n", SAS_ADDR(dev->parent->sas_addr), 285 SAS_ADDR(dev->parent->sas_addr),
287 phy->phy_id, res); 286 phy->phy_id, res);
288 return res; 287 return res;
289 } 288 }
290 memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis, 289 memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis,
@@ -375,7 +374,7 @@ static int sas_ata_printk(const char *level, const struct domain_device *ddev,
375 vaf.fmt = fmt; 374 vaf.fmt = fmt;
376 vaf.va = &args; 375 vaf.va = &args;
377 376
378 r = printk("%ssas: ata%u: %s: %pV", 377 r = printk("%s" SAS_FMT "ata%u: %s: %pV",
379 level, ap->print_id, dev_name(dev), &vaf); 378 level, ap->print_id, dev_name(dev), &vaf);
380 379
381 va_end(args); 380 va_end(args);
@@ -431,8 +430,7 @@ static void sas_ata_internal_abort(struct sas_task *task)
431 if (task->task_state_flags & SAS_TASK_STATE_ABORTED || 430 if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
432 task->task_state_flags & SAS_TASK_STATE_DONE) { 431 task->task_state_flags & SAS_TASK_STATE_DONE) {
433 spin_unlock_irqrestore(&task->task_state_lock, flags); 432 spin_unlock_irqrestore(&task->task_state_lock, flags);
434 SAS_DPRINTK("%s: Task %p already finished.\n", __func__, 433 pr_debug("%s: Task %p already finished.\n", __func__, task);
435 task);
436 goto out; 434 goto out;
437 } 435 }
438 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 436 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
@@ -452,7 +450,7 @@ static void sas_ata_internal_abort(struct sas_task *task)
452 * aborted ata tasks, otherwise we (likely) leak the sas task 450 * aborted ata tasks, otherwise we (likely) leak the sas task
453 * here 451 * here
454 */ 452 */
455 SAS_DPRINTK("%s: Task %p leaked.\n", __func__, task); 453 pr_warn("%s: Task %p leaked.\n", __func__, task);
456 454
457 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) 455 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
458 task->task_state_flags &= ~SAS_TASK_STATE_ABORTED; 456 task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
@@ -558,7 +556,7 @@ int sas_ata_init(struct domain_device *found_dev)
558 556
559 ata_host = kzalloc(sizeof(*ata_host), GFP_KERNEL); 557 ata_host = kzalloc(sizeof(*ata_host), GFP_KERNEL);
560 if (!ata_host) { 558 if (!ata_host) {
561 SAS_DPRINTK("ata host alloc failed.\n"); 559 pr_err("ata host alloc failed.\n");
562 return -ENOMEM; 560 return -ENOMEM;
563 } 561 }
564 562
@@ -566,7 +564,7 @@ int sas_ata_init(struct domain_device *found_dev)
566 564
567 ap = ata_sas_port_alloc(ata_host, &sata_port_info, shost); 565 ap = ata_sas_port_alloc(ata_host, &sata_port_info, shost);
568 if (!ap) { 566 if (!ap) {
569 SAS_DPRINTK("ata_sas_port_alloc failed.\n"); 567 pr_err("ata_sas_port_alloc failed.\n");
570 rc = -ENODEV; 568 rc = -ENODEV;
571 goto free_host; 569 goto free_host;
572 } 570 }
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index dde433aa59c2..726ada9b8c79 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -128,7 +128,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
128 SAS_FANOUT_EXPANDER_DEVICE); 128 SAS_FANOUT_EXPANDER_DEVICE);
129 break; 129 break;
130 default: 130 default:
131 printk("ERROR: Unidentified device type %d\n", dev->dev_type); 131 pr_warn("ERROR: Unidentified device type %d\n", dev->dev_type);
132 rphy = NULL; 132 rphy = NULL;
133 break; 133 break;
134 } 134 }
@@ -186,10 +186,9 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
186 186
187 res = i->dft->lldd_dev_found(dev); 187 res = i->dft->lldd_dev_found(dev);
188 if (res) { 188 if (res) {
189 printk("sas: driver on pcidev %s cannot handle " 189 pr_warn("driver on host %s cannot handle device %llx, error:%d\n",
190 "device %llx, error:%d\n", 190 dev_name(sas_ha->dev),
191 dev_name(sas_ha->dev), 191 SAS_ADDR(dev->sas_addr), res);
192 SAS_ADDR(dev->sas_addr), res);
193 } 192 }
194 set_bit(SAS_DEV_FOUND, &dev->state); 193 set_bit(SAS_DEV_FOUND, &dev->state);
195 kref_get(&dev->kref); 194 kref_get(&dev->kref);
@@ -456,8 +455,8 @@ static void sas_discover_domain(struct work_struct *work)
456 return; 455 return;
457 dev = port->port_dev; 456 dev = port->port_dev;
458 457
459 SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id, 458 pr_debug("DOING DISCOVERY on port %d, pid:%d\n", port->id,
460 task_pid_nr(current)); 459 task_pid_nr(current));
461 460
462 switch (dev->dev_type) { 461 switch (dev->dev_type) {
463 case SAS_END_DEVICE: 462 case SAS_END_DEVICE:
@@ -473,12 +472,12 @@ static void sas_discover_domain(struct work_struct *work)
473 error = sas_discover_sata(dev); 472 error = sas_discover_sata(dev);
474 break; 473 break;
475#else 474#else
476 SAS_DPRINTK("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); 475 pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
477 /* Fall through */ 476 /* Fall through */
478#endif 477#endif
479 default: 478 default:
480 error = -ENXIO; 479 error = -ENXIO;
481 SAS_DPRINTK("unhandled device %d\n", dev->dev_type); 480 pr_err("unhandled device %d\n", dev->dev_type);
482 break; 481 break;
483 } 482 }
484 483
@@ -495,8 +494,8 @@ static void sas_discover_domain(struct work_struct *work)
495 494
496 sas_probe_devices(port); 495 sas_probe_devices(port);
497 496
498 SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id, 497 pr_debug("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
499 task_pid_nr(current), error); 498 task_pid_nr(current), error);
500} 499}
501 500
502static void sas_revalidate_domain(struct work_struct *work) 501static void sas_revalidate_domain(struct work_struct *work)
@@ -510,22 +509,22 @@ static void sas_revalidate_domain(struct work_struct *work)
510 /* prevent revalidation from finding sata links in recovery */ 509 /* prevent revalidation from finding sata links in recovery */
511 mutex_lock(&ha->disco_mutex); 510 mutex_lock(&ha->disco_mutex);
512 if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { 511 if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
513 SAS_DPRINTK("REVALIDATION DEFERRED on port %d, pid:%d\n", 512 pr_debug("REVALIDATION DEFERRED on port %d, pid:%d\n",
514 port->id, task_pid_nr(current)); 513 port->id, task_pid_nr(current));
515 goto out; 514 goto out;
516 } 515 }
517 516
518 clear_bit(DISCE_REVALIDATE_DOMAIN, &port->disc.pending); 517 clear_bit(DISCE_REVALIDATE_DOMAIN, &port->disc.pending);
519 518
520 SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, 519 pr_debug("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
521 task_pid_nr(current)); 520 task_pid_nr(current));
522 521
523 if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE || 522 if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
524 ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE)) 523 ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
525 res = sas_ex_revalidate_domain(ddev); 524 res = sas_ex_revalidate_domain(ddev);
526 525
527 SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", 526 pr_debug("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
528 port->id, task_pid_nr(current), res); 527 port->id, task_pid_nr(current), res);
529 out: 528 out:
530 mutex_unlock(&ha->disco_mutex); 529 mutex_unlock(&ha->disco_mutex);
531 530
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
deleted file mode 100644
index 7e5d262e7a7d..000000000000
--- a/drivers/scsi/libsas/sas_dump.c
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * Serial Attached SCSI (SAS) Dump/Debugging routines
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include "sas_dump.h"
26
27static const char *sas_porte_str[] = {
28 [0] = "PORTE_BYTES_DMAED",
29 [1] = "PORTE_BROADCAST_RCVD",
30 [2] = "PORTE_LINK_RESET_ERR",
31 [3] = "PORTE_TIMER_EVENT",
32 [4] = "PORTE_HARD_RESET",
33};
34
35static const char *sas_phye_str[] = {
36 [0] = "PHYE_LOSS_OF_SIGNAL",
37 [1] = "PHYE_OOB_DONE",
38 [2] = "PHYE_OOB_ERROR",
39 [3] = "PHYE_SPINUP_HOLD",
40 [4] = "PHYE_RESUME_TIMEOUT",
41};
42
43void sas_dprint_porte(int phyid, enum port_event pe)
44{
45 SAS_DPRINTK("phy%d: port event: %s\n", phyid, sas_porte_str[pe]);
46}
47void sas_dprint_phye(int phyid, enum phy_event pe)
48{
49 SAS_DPRINTK("phy%d: phy event: %s\n", phyid, sas_phye_str[pe]);
50}
51
52void sas_dump_port(struct asd_sas_port *port)
53{
54 SAS_DPRINTK("port%d: class:0x%x\n", port->id, port->class);
55 SAS_DPRINTK("port%d: sas_addr:%llx\n", port->id,
56 SAS_ADDR(port->sas_addr));
57 SAS_DPRINTK("port%d: attached_sas_addr:%llx\n", port->id,
58 SAS_ADDR(port->attached_sas_addr));
59 SAS_DPRINTK("port%d: iproto:0x%x\n", port->id, port->iproto);
60 SAS_DPRINTK("port%d: tproto:0x%x\n", port->id, port->tproto);
61 SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode);
62 SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys);
63}
diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h
deleted file mode 100644
index 6aaee6b0fcdb..000000000000
--- a/drivers/scsi/libsas/sas_dump.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Serial Attached SCSI (SAS) Dump/Debugging routines header file
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include "sas_internal.h"
26
27void sas_dprint_porte(int phyid, enum port_event pe);
28void sas_dprint_phye(int phyid, enum phy_event pe);
29void sas_dump_port(struct asd_sas_port *port);
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index ae923eb6de95..b1e0f7d2b396 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -25,7 +25,6 @@
25#include <linux/export.h> 25#include <linux/export.h>
26#include <scsi/scsi_host.h> 26#include <scsi/scsi_host.h>
27#include "sas_internal.h" 27#include "sas_internal.h"
28#include "sas_dump.h"
29 28
30int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw) 29int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
31{ 30{
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 0d1f72752ca2..17eb4185f29d 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -99,17 +99,17 @@ static int smp_execute_task_sg(struct domain_device *dev,
99 99
100 if (res) { 100 if (res) {
101 del_timer(&task->slow_task->timer); 101 del_timer(&task->slow_task->timer);
102 SAS_DPRINTK("executing SMP task failed:%d\n", res); 102 pr_notice("executing SMP task failed:%d\n", res);
103 break; 103 break;
104 } 104 }
105 105
106 wait_for_completion(&task->slow_task->completion); 106 wait_for_completion(&task->slow_task->completion);
107 res = -ECOMM; 107 res = -ECOMM;
108 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 108 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
109 SAS_DPRINTK("smp task timed out or aborted\n"); 109 pr_notice("smp task timed out or aborted\n");
110 i->dft->lldd_abort_task(task); 110 i->dft->lldd_abort_task(task);
111 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 111 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
112 SAS_DPRINTK("SMP task aborted and not done\n"); 112 pr_notice("SMP task aborted and not done\n");
113 break; 113 break;
114 } 114 }
115 } 115 }
@@ -134,11 +134,11 @@ static int smp_execute_task_sg(struct domain_device *dev,
134 task->task_status.stat == SAS_DEVICE_UNKNOWN) 134 task->task_status.stat == SAS_DEVICE_UNKNOWN)
135 break; 135 break;
136 else { 136 else {
137 SAS_DPRINTK("%s: task to dev %016llx response: 0x%x " 137 pr_notice("%s: task to dev %016llx response: 0x%x status 0x%x\n",
138 "status 0x%x\n", __func__, 138 __func__,
139 SAS_ADDR(dev->sas_addr), 139 SAS_ADDR(dev->sas_addr),
140 task->task_status.resp, 140 task->task_status.resp,
141 task->task_status.stat); 141 task->task_status.stat);
142 sas_free_task(task); 142 sas_free_task(task);
143 task = NULL; 143 task = NULL;
144 } 144 }
@@ -347,11 +347,11 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
347 if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) 347 if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
348 set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending); 348 set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending);
349 349
350 SAS_DPRINTK("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n", 350 pr_debug("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
351 test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "", 351 test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "",
352 SAS_ADDR(dev->sas_addr), phy->phy_id, 352 SAS_ADDR(dev->sas_addr), phy->phy_id,
353 sas_route_char(dev, phy), phy->linkrate, 353 sas_route_char(dev, phy), phy->linkrate,
354 SAS_ADDR(phy->attached_sas_addr), type); 354 SAS_ADDR(phy->attached_sas_addr), type);
355} 355}
356 356
357/* check if we have an existing attached ata device on this expander phy */ 357/* check if we have an existing attached ata device on this expander phy */
@@ -393,7 +393,7 @@ static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
393 return res; 393 return res;
394 dr = &((struct smp_resp *)disc_resp)->disc; 394 dr = &((struct smp_resp *)disc_resp)->disc;
395 if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) { 395 if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) {
396 sas_printk("Found loopback topology, just ignore it!\n"); 396 pr_notice("Found loopback topology, just ignore it!\n");
397 return 0; 397 return 0;
398 } 398 }
399 sas_set_ex_phy(dev, single, disc_resp); 399 sas_set_ex_phy(dev, single, disc_resp);
@@ -500,12 +500,12 @@ static int sas_ex_general(struct domain_device *dev)
500 RG_RESP_SIZE); 500 RG_RESP_SIZE);
501 501
502 if (res) { 502 if (res) {
503 SAS_DPRINTK("RG to ex %016llx failed:0x%x\n", 503 pr_notice("RG to ex %016llx failed:0x%x\n",
504 SAS_ADDR(dev->sas_addr), res); 504 SAS_ADDR(dev->sas_addr), res);
505 goto out; 505 goto out;
506 } else if (rg_resp->result != SMP_RESP_FUNC_ACC) { 506 } else if (rg_resp->result != SMP_RESP_FUNC_ACC) {
507 SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n", 507 pr_debug("RG:ex %016llx returned SMP result:0x%x\n",
508 SAS_ADDR(dev->sas_addr), rg_resp->result); 508 SAS_ADDR(dev->sas_addr), rg_resp->result);
509 res = rg_resp->result; 509 res = rg_resp->result;
510 goto out; 510 goto out;
511 } 511 }
@@ -513,8 +513,8 @@ static int sas_ex_general(struct domain_device *dev)
513 ex_assign_report_general(dev, rg_resp); 513 ex_assign_report_general(dev, rg_resp);
514 514
515 if (dev->ex_dev.configuring) { 515 if (dev->ex_dev.configuring) {
516 SAS_DPRINTK("RG: ex %llx self-configuring...\n", 516 pr_debug("RG: ex %llx self-configuring...\n",
517 SAS_ADDR(dev->sas_addr)); 517 SAS_ADDR(dev->sas_addr));
518 schedule_timeout_interruptible(5*HZ); 518 schedule_timeout_interruptible(5*HZ);
519 } else 519 } else
520 break; 520 break;
@@ -568,12 +568,12 @@ static int sas_ex_manuf_info(struct domain_device *dev)
568 568
569 res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE); 569 res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE);
570 if (res) { 570 if (res) {
571 SAS_DPRINTK("MI: ex %016llx failed:0x%x\n", 571 pr_notice("MI: ex %016llx failed:0x%x\n",
572 SAS_ADDR(dev->sas_addr), res); 572 SAS_ADDR(dev->sas_addr), res);
573 goto out; 573 goto out;
574 } else if (mi_resp[2] != SMP_RESP_FUNC_ACC) { 574 } else if (mi_resp[2] != SMP_RESP_FUNC_ACC) {
575 SAS_DPRINTK("MI ex %016llx returned SMP result:0x%x\n", 575 pr_debug("MI ex %016llx returned SMP result:0x%x\n",
576 SAS_ADDR(dev->sas_addr), mi_resp[2]); 576 SAS_ADDR(dev->sas_addr), mi_resp[2]);
577 goto out; 577 goto out;
578 } 578 }
579 579
@@ -836,10 +836,9 @@ static struct domain_device *sas_ex_discover_end_dev(
836 836
837 res = sas_discover_sata(child); 837 res = sas_discover_sata(child);
838 if (res) { 838 if (res) {
839 SAS_DPRINTK("sas_discover_sata() for device %16llx at " 839 pr_notice("sas_discover_sata() for device %16llx at %016llx:0x%x returned 0x%x\n",
840 "%016llx:0x%x returned 0x%x\n", 840 SAS_ADDR(child->sas_addr),
841 SAS_ADDR(child->sas_addr), 841 SAS_ADDR(parent->sas_addr), phy_id, res);
842 SAS_ADDR(parent->sas_addr), phy_id, res);
843 goto out_list_del; 842 goto out_list_del;
844 } 843 }
845 } else 844 } else
@@ -861,16 +860,15 @@ static struct domain_device *sas_ex_discover_end_dev(
861 860
862 res = sas_discover_end_dev(child); 861 res = sas_discover_end_dev(child);
863 if (res) { 862 if (res) {
864 SAS_DPRINTK("sas_discover_end_dev() for device %16llx " 863 pr_notice("sas_discover_end_dev() for device %16llx at %016llx:0x%x returned 0x%x\n",
865 "at %016llx:0x%x returned 0x%x\n", 864 SAS_ADDR(child->sas_addr),
866 SAS_ADDR(child->sas_addr), 865 SAS_ADDR(parent->sas_addr), phy_id, res);
867 SAS_ADDR(parent->sas_addr), phy_id, res);
868 goto out_list_del; 866 goto out_list_del;
869 } 867 }
870 } else { 868 } else {
871 SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n", 869 pr_notice("target proto 0x%x at %016llx:0x%x not handled\n",
872 phy->attached_tproto, SAS_ADDR(parent->sas_addr), 870 phy->attached_tproto, SAS_ADDR(parent->sas_addr),
873 phy_id); 871 phy_id);
874 goto out_free; 872 goto out_free;
875 } 873 }
876 874
@@ -927,11 +925,10 @@ static struct domain_device *sas_ex_discover_expander(
927 int res; 925 int res;
928 926
929 if (phy->routing_attr == DIRECT_ROUTING) { 927 if (phy->routing_attr == DIRECT_ROUTING) {
930 SAS_DPRINTK("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not " 928 pr_warn("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not allowed\n",
931 "allowed\n", 929 SAS_ADDR(parent->sas_addr), phy_id,
932 SAS_ADDR(parent->sas_addr), phy_id, 930 SAS_ADDR(phy->attached_sas_addr),
933 SAS_ADDR(phy->attached_sas_addr), 931 phy->attached_phy_id);
934 phy->attached_phy_id);
935 return NULL; 932 return NULL;
936 } 933 }
937 child = sas_alloc_device(); 934 child = sas_alloc_device();
@@ -1038,25 +1035,24 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
1038 ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE && 1035 ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE &&
1039 ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE && 1036 ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE &&
1040 ex_phy->attached_dev_type != SAS_SATA_PENDING) { 1037 ex_phy->attached_dev_type != SAS_SATA_PENDING) {
1041 SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " 1038 pr_warn("unknown device type(0x%x) attached to ex %016llx phy 0x%x\n",
1042 "phy 0x%x\n", ex_phy->attached_dev_type, 1039 ex_phy->attached_dev_type,
1043 SAS_ADDR(dev->sas_addr), 1040 SAS_ADDR(dev->sas_addr),
1044 phy_id); 1041 phy_id);
1045 return 0; 1042 return 0;
1046 } 1043 }
1047 1044
1048 res = sas_configure_routing(dev, ex_phy->attached_sas_addr); 1045 res = sas_configure_routing(dev, ex_phy->attached_sas_addr);
1049 if (res) { 1046 if (res) {
1050 SAS_DPRINTK("configure routing for dev %016llx " 1047 pr_notice("configure routing for dev %016llx reported 0x%x. Forgotten\n",
1051 "reported 0x%x. Forgotten\n", 1048 SAS_ADDR(ex_phy->attached_sas_addr), res);
1052 SAS_ADDR(ex_phy->attached_sas_addr), res);
1053 sas_disable_routing(dev, ex_phy->attached_sas_addr); 1049 sas_disable_routing(dev, ex_phy->attached_sas_addr);
1054 return res; 1050 return res;
1055 } 1051 }
1056 1052
1057 if (sas_ex_join_wide_port(dev, phy_id)) { 1053 if (sas_ex_join_wide_port(dev, phy_id)) {
1058 SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", 1054 pr_debug("Attaching ex phy%d to wide port %016llx\n",
1059 phy_id, SAS_ADDR(ex_phy->attached_sas_addr)); 1055 phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
1060 return res; 1056 return res;
1061 } 1057 }
1062 1058
@@ -1067,12 +1063,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
1067 break; 1063 break;
1068 case SAS_FANOUT_EXPANDER_DEVICE: 1064 case SAS_FANOUT_EXPANDER_DEVICE:
1069 if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { 1065 if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
1070 SAS_DPRINTK("second fanout expander %016llx phy 0x%x " 1066 pr_debug("second fanout expander %016llx phy 0x%x attached to ex %016llx phy 0x%x\n",
1071 "attached to ex %016llx phy 0x%x\n", 1067 SAS_ADDR(ex_phy->attached_sas_addr),
1072 SAS_ADDR(ex_phy->attached_sas_addr), 1068 ex_phy->attached_phy_id,
1073 ex_phy->attached_phy_id, 1069 SAS_ADDR(dev->sas_addr),
1074 SAS_ADDR(dev->sas_addr), 1070 phy_id);
1075 phy_id);
1076 sas_ex_disable_phy(dev, phy_id); 1071 sas_ex_disable_phy(dev, phy_id);
1077 break; 1072 break;
1078 } else 1073 } else
@@ -1101,9 +1096,8 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
1101 SAS_ADDR(child->sas_addr)) { 1096 SAS_ADDR(child->sas_addr)) {
1102 ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; 1097 ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
1103 if (sas_ex_join_wide_port(dev, i)) 1098 if (sas_ex_join_wide_port(dev, i))
1104 SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", 1099 pr_debug("Attaching ex phy%d to wide port %016llx\n",
1105 i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr)); 1100 i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
1106
1107 } 1101 }
1108 } 1102 }
1109 } 1103 }
@@ -1154,13 +1148,11 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev)
1154 if (sas_find_sub_addr(child, s2) && 1148 if (sas_find_sub_addr(child, s2) &&
1155 (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) { 1149 (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) {
1156 1150
1157 SAS_DPRINTK("ex %016llx->%016llx-?->%016llx " 1151 pr_notice("ex %016llx->%016llx-?->%016llx diverges from subtractive boundary %016llx\n",
1158 "diverges from subtractive " 1152 SAS_ADDR(dev->sas_addr),
1159 "boundary %016llx\n", 1153 SAS_ADDR(child->sas_addr),
1160 SAS_ADDR(dev->sas_addr), 1154 SAS_ADDR(s2),
1161 SAS_ADDR(child->sas_addr), 1155 SAS_ADDR(sub_addr));
1162 SAS_ADDR(s2),
1163 SAS_ADDR(sub_addr));
1164 1156
1165 sas_ex_disable_port(child, s2); 1157 sas_ex_disable_port(child, s2);
1166 } 1158 }
@@ -1239,12 +1231,10 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
1239 else if (SAS_ADDR(sub_sas_addr) != 1231 else if (SAS_ADDR(sub_sas_addr) !=
1240 SAS_ADDR(phy->attached_sas_addr)) { 1232 SAS_ADDR(phy->attached_sas_addr)) {
1241 1233
1242 SAS_DPRINTK("ex %016llx phy 0x%x " 1234 pr_notice("ex %016llx phy 0x%x diverges(%016llx) on subtractive boundary(%016llx). Disabled\n",
1243 "diverges(%016llx) on subtractive " 1235 SAS_ADDR(dev->sas_addr), i,
1244 "boundary(%016llx). Disabled\n", 1236 SAS_ADDR(phy->attached_sas_addr),
1245 SAS_ADDR(dev->sas_addr), i, 1237 SAS_ADDR(sub_sas_addr));
1246 SAS_ADDR(phy->attached_sas_addr),
1247 SAS_ADDR(sub_sas_addr));
1248 sas_ex_disable_phy(dev, i); 1238 sas_ex_disable_phy(dev, i);
1249 } 1239 }
1250 } 1240 }
@@ -1262,19 +1252,17 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
1262 }; 1252 };
1263 struct domain_device *parent = child->parent; 1253 struct domain_device *parent = child->parent;
1264 1254
1265 sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx " 1255 pr_notice("%s ex %016llx phy 0x%x <--> %s ex %016llx phy 0x%x has %c:%c routing link!\n",
1266 "phy 0x%x has %c:%c routing link!\n", 1256 ex_type[parent->dev_type],
1267 1257 SAS_ADDR(parent->sas_addr),
1268 ex_type[parent->dev_type], 1258 parent_phy->phy_id,
1269 SAS_ADDR(parent->sas_addr),
1270 parent_phy->phy_id,
1271 1259
1272 ex_type[child->dev_type], 1260 ex_type[child->dev_type],
1273 SAS_ADDR(child->sas_addr), 1261 SAS_ADDR(child->sas_addr),
1274 child_phy->phy_id, 1262 child_phy->phy_id,
1275 1263
1276 sas_route_char(parent, parent_phy), 1264 sas_route_char(parent, parent_phy),
1277 sas_route_char(child, child_phy)); 1265 sas_route_char(child, child_phy));
1278} 1266}
1279 1267
1280static int sas_check_eeds(struct domain_device *child, 1268static int sas_check_eeds(struct domain_device *child,
@@ -1286,13 +1274,12 @@ static int sas_check_eeds(struct domain_device *child,
1286 1274
1287 if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) { 1275 if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) {
1288 res = -ENODEV; 1276 res = -ENODEV;
1289 SAS_DPRINTK("edge ex %016llx phy S:0x%x <--> edge ex %016llx " 1277 pr_warn("edge ex %016llx phy S:0x%x <--> edge ex %016llx phy S:0x%x, while there is a fanout ex %016llx\n",
1290 "phy S:0x%x, while there is a fanout ex %016llx\n", 1278 SAS_ADDR(parent->sas_addr),
1291 SAS_ADDR(parent->sas_addr), 1279 parent_phy->phy_id,
1292 parent_phy->phy_id, 1280 SAS_ADDR(child->sas_addr),
1293 SAS_ADDR(child->sas_addr), 1281 child_phy->phy_id,
1294 child_phy->phy_id, 1282 SAS_ADDR(parent->port->disc.fanout_sas_addr));
1295 SAS_ADDR(parent->port->disc.fanout_sas_addr));
1296 } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) { 1283 } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) {
1297 memcpy(parent->port->disc.eeds_a, parent->sas_addr, 1284 memcpy(parent->port->disc.eeds_a, parent->sas_addr,
1298 SAS_ADDR_SIZE); 1285 SAS_ADDR_SIZE);
@@ -1310,12 +1297,11 @@ static int sas_check_eeds(struct domain_device *child,
1310 ; 1297 ;
1311 else { 1298 else {
1312 res = -ENODEV; 1299 res = -ENODEV;
1313 SAS_DPRINTK("edge ex %016llx phy 0x%x <--> edge ex %016llx " 1300 pr_warn("edge ex %016llx phy 0x%x <--> edge ex %016llx phy 0x%x link forms a third EEDS!\n",
1314 "phy 0x%x link forms a third EEDS!\n", 1301 SAS_ADDR(parent->sas_addr),
1315 SAS_ADDR(parent->sas_addr), 1302 parent_phy->phy_id,
1316 parent_phy->phy_id, 1303 SAS_ADDR(child->sas_addr),
1317 SAS_ADDR(child->sas_addr), 1304 child_phy->phy_id);
1318 child_phy->phy_id);
1319 } 1305 }
1320 1306
1321 return res; 1307 return res;
@@ -1429,14 +1415,13 @@ static int sas_configure_present(struct domain_device *dev, int phy_id,
1429 goto out; 1415 goto out;
1430 res = rri_resp[2]; 1416 res = rri_resp[2];
1431 if (res == SMP_RESP_NO_INDEX) { 1417 if (res == SMP_RESP_NO_INDEX) {
1432 SAS_DPRINTK("overflow of indexes: dev %016llx " 1418 pr_warn("overflow of indexes: dev %016llx phy 0x%x index 0x%x\n",
1433 "phy 0x%x index 0x%x\n", 1419 SAS_ADDR(dev->sas_addr), phy_id, i);
1434 SAS_ADDR(dev->sas_addr), phy_id, i);
1435 goto out; 1420 goto out;
1436 } else if (res != SMP_RESP_FUNC_ACC) { 1421 } else if (res != SMP_RESP_FUNC_ACC) {
1437 SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x " 1422 pr_notice("%s: dev %016llx phy 0x%x index 0x%x result 0x%x\n",
1438 "result 0x%x\n", __func__, 1423 __func__, SAS_ADDR(dev->sas_addr), phy_id,
1439 SAS_ADDR(dev->sas_addr), phy_id, i, res); 1424 i, res);
1440 goto out; 1425 goto out;
1441 } 1426 }
1442 if (SAS_ADDR(sas_addr) != 0) { 1427 if (SAS_ADDR(sas_addr) != 0) {
@@ -1500,9 +1485,8 @@ static int sas_configure_set(struct domain_device *dev, int phy_id,
1500 goto out; 1485 goto out;
1501 res = cri_resp[2]; 1486 res = cri_resp[2];
1502 if (res == SMP_RESP_NO_INDEX) { 1487 if (res == SMP_RESP_NO_INDEX) {
1503 SAS_DPRINTK("overflow of indexes: dev %016llx phy 0x%x " 1488 pr_warn("overflow of indexes: dev %016llx phy 0x%x index 0x%x\n",
1504 "index 0x%x\n", 1489 SAS_ADDR(dev->sas_addr), phy_id, index);
1505 SAS_ADDR(dev->sas_addr), phy_id, index);
1506 } 1490 }
1507out: 1491out:
1508 kfree(cri_req); 1492 kfree(cri_req);
@@ -1549,8 +1533,8 @@ static int sas_configure_parent(struct domain_device *parent,
1549 } 1533 }
1550 1534
1551 if (ex_parent->conf_route_table == 0) { 1535 if (ex_parent->conf_route_table == 0) {
1552 SAS_DPRINTK("ex %016llx has self-configuring routing table\n", 1536 pr_debug("ex %016llx has self-configuring routing table\n",
1553 SAS_ADDR(parent->sas_addr)); 1537 SAS_ADDR(parent->sas_addr));
1554 return 0; 1538 return 0;
1555 } 1539 }
1556 1540
@@ -1611,8 +1595,8 @@ static int sas_discover_expander(struct domain_device *dev)
1611 1595
1612 res = sas_expander_discover(dev); 1596 res = sas_expander_discover(dev);
1613 if (res) { 1597 if (res) {
1614 SAS_DPRINTK("expander %016llx discovery failed(0x%x)\n", 1598 pr_warn("expander %016llx discovery failed(0x%x)\n",
1615 SAS_ADDR(dev->sas_addr), res); 1599 SAS_ADDR(dev->sas_addr), res);
1616 goto out_err; 1600 goto out_err;
1617 } 1601 }
1618 1602
@@ -1856,10 +1840,10 @@ static int sas_find_bcast_dev(struct domain_device *dev,
1856 if (phy_id != -1) { 1840 if (phy_id != -1) {
1857 *src_dev = dev; 1841 *src_dev = dev;
1858 ex->ex_change_count = ex_change_count; 1842 ex->ex_change_count = ex_change_count;
1859 SAS_DPRINTK("Expander phy change count has changed\n"); 1843 pr_info("Expander phy change count has changed\n");
1860 return res; 1844 return res;
1861 } else 1845 } else
1862 SAS_DPRINTK("Expander phys DID NOT change\n"); 1846 pr_info("Expander phys DID NOT change\n");
1863 } 1847 }
1864 list_for_each_entry(ch, &ex->children, siblings) { 1848 list_for_each_entry(ch, &ex->children, siblings) {
1865 if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { 1849 if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
@@ -1969,8 +1953,8 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
1969 struct domain_device *child; 1953 struct domain_device *child;
1970 int res; 1954 int res;
1971 1955
1972 SAS_DPRINTK("ex %016llx phy%d new device attached\n", 1956 pr_debug("ex %016llx phy%d new device attached\n",
1973 SAS_ADDR(dev->sas_addr), phy_id); 1957 SAS_ADDR(dev->sas_addr), phy_id);
1974 res = sas_ex_phy_discover(dev, phy_id); 1958 res = sas_ex_phy_discover(dev, phy_id);
1975 if (res) 1959 if (res)
1976 return res; 1960 return res;
@@ -2048,15 +2032,15 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
2048 2032
2049 if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING) 2033 if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING)
2050 action = ", needs recovery"; 2034 action = ", needs recovery";
2051 SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n", 2035 pr_debug("ex %016llx phy 0x%x broadcast flutter%s\n",
2052 SAS_ADDR(dev->sas_addr), phy_id, action); 2036 SAS_ADDR(dev->sas_addr), phy_id, action);
2053 return res; 2037 return res;
2054 } 2038 }
2055 2039
2056 /* we always have to delete the old device when we went here */ 2040 /* we always have to delete the old device when we went here */
2057 SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n", 2041 pr_info("ex %016llx phy 0x%x replace %016llx\n",
2058 SAS_ADDR(dev->sas_addr), phy_id, 2042 SAS_ADDR(dev->sas_addr), phy_id,
2059 SAS_ADDR(phy->attached_sas_addr)); 2043 SAS_ADDR(phy->attached_sas_addr));
2060 sas_unregister_devs_sas_addr(dev, phy_id, last); 2044 sas_unregister_devs_sas_addr(dev, phy_id, last);
2061 2045
2062 return sas_discover_new(dev, phy_id); 2046 return sas_discover_new(dev, phy_id);
@@ -2084,8 +2068,8 @@ static int sas_rediscover(struct domain_device *dev, const int phy_id)
2084 int i; 2068 int i;
2085 bool last = true; /* is this the last phy of the port */ 2069 bool last = true; /* is this the last phy of the port */
2086 2070
2087 SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n", 2071 pr_debug("ex %016llx phy%d originated BROADCAST(CHANGE)\n",
2088 SAS_ADDR(dev->sas_addr), phy_id); 2072 SAS_ADDR(dev->sas_addr), phy_id);
2089 2073
2090 if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) { 2074 if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) {
2091 for (i = 0; i < ex->num_phys; i++) { 2075 for (i = 0; i < ex->num_phys; i++) {
@@ -2095,8 +2079,8 @@ static int sas_rediscover(struct domain_device *dev, const int phy_id)
2095 continue; 2079 continue;
2096 if (SAS_ADDR(phy->attached_sas_addr) == 2080 if (SAS_ADDR(phy->attached_sas_addr) ==
2097 SAS_ADDR(changed_phy->attached_sas_addr)) { 2081 SAS_ADDR(changed_phy->attached_sas_addr)) {
2098 SAS_DPRINTK("phy%d part of wide port with " 2082 pr_debug("phy%d part of wide port with phy%d\n",
2099 "phy%d\n", phy_id, i); 2083 phy_id, i);
2100 last = false; 2084 last = false;
2101 break; 2085 break;
2102 } 2086 }
@@ -2154,23 +2138,23 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
2154 case SAS_FANOUT_EXPANDER_DEVICE: 2138 case SAS_FANOUT_EXPANDER_DEVICE:
2155 break; 2139 break;
2156 default: 2140 default:
2157 printk("%s: can we send a smp request to a device?\n", 2141 pr_err("%s: can we send a smp request to a device?\n",
2158 __func__); 2142 __func__);
2159 goto out; 2143 goto out;
2160 } 2144 }
2161 2145
2162 dev = sas_find_dev_by_rphy(rphy); 2146 dev = sas_find_dev_by_rphy(rphy);
2163 if (!dev) { 2147 if (!dev) {
2164 printk("%s: fail to find a domain_device?\n", __func__); 2148 pr_err("%s: fail to find a domain_device?\n", __func__);
2165 goto out; 2149 goto out;
2166 } 2150 }
2167 2151
2168 /* do we need to support multiple segments? */ 2152 /* do we need to support multiple segments? */
2169 if (job->request_payload.sg_cnt > 1 || 2153 if (job->request_payload.sg_cnt > 1 ||
2170 job->reply_payload.sg_cnt > 1) { 2154 job->reply_payload.sg_cnt > 1) {
2171 printk("%s: multiple segments req %u, rsp %u\n", 2155 pr_info("%s: multiple segments req %u, rsp %u\n",
2172 __func__, job->request_payload.payload_len, 2156 __func__, job->request_payload.payload_len,
2173 job->reply_payload.payload_len); 2157 job->reply_payload.payload_len);
2174 goto out; 2158 goto out;
2175 } 2159 }
2176 2160
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index ede0af78144f..221340ee8651 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -128,19 +128,19 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
128 128
129 error = sas_register_phys(sas_ha); 129 error = sas_register_phys(sas_ha);
130 if (error) { 130 if (error) {
131 printk(KERN_NOTICE "couldn't register sas phys:%d\n", error); 131 pr_notice("couldn't register sas phys:%d\n", error);
132 return error; 132 return error;
133 } 133 }
134 134
135 error = sas_register_ports(sas_ha); 135 error = sas_register_ports(sas_ha);
136 if (error) { 136 if (error) {
137 printk(KERN_NOTICE "couldn't register sas ports:%d\n", error); 137 pr_notice("couldn't register sas ports:%d\n", error);
138 goto Undo_phys; 138 goto Undo_phys;
139 } 139 }
140 140
141 error = sas_init_events(sas_ha); 141 error = sas_init_events(sas_ha);
142 if (error) { 142 if (error) {
143 printk(KERN_NOTICE "couldn't start event thread:%d\n", error); 143 pr_notice("couldn't start event thread:%d\n", error);
144 goto Undo_ports; 144 goto Undo_ports;
145 } 145 }
146 146
@@ -623,8 +623,8 @@ struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
623 if (atomic_read(&phy->event_nr) > phy->ha->event_thres) { 623 if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
624 if (i->dft->lldd_control_phy) { 624 if (i->dft->lldd_control_phy) {
625 if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) { 625 if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
626 sas_printk("The phy%02d bursting events, shut it down.\n", 626 pr_notice("The phy%02d bursting events, shut it down.\n",
627 phy->id); 627 phy->id);
628 sas_notify_phy_event(phy, PHYE_SHUTDOWN); 628 sas_notify_phy_event(phy, PHYE_SHUTDOWN);
629 } 629 }
630 } else { 630 } else {
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 50e12d662ffe..2cdb981cf476 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -32,9 +32,13 @@
32#include <scsi/libsas.h> 32#include <scsi/libsas.h>
33#include <scsi/sas_ata.h> 33#include <scsi/sas_ata.h>
34 34
35#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__) 35#ifdef pr_fmt
36#undef pr_fmt
37#endif
38
39#define SAS_FMT "sas: "
36 40
37#define SAS_DPRINTK(fmt, ...) printk(KERN_DEBUG "sas: " fmt, ## __VA_ARGS__) 41#define pr_fmt(fmt) SAS_FMT fmt
38 42
39#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble) 43#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
40#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0) 44#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
@@ -120,10 +124,10 @@ static inline void sas_smp_host_handler(struct bsg_job *job,
120 124
121static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err) 125static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err)
122{ 126{
123 SAS_DPRINTK("%s: for %s device %16llx returned %d\n", 127 pr_warn("%s: for %s device %16llx returned %d\n",
124 func, dev->parent ? "exp-attached" : 128 func, dev->parent ? "exp-attached" :
125 "direct-attached", 129 "direct-attached",
126 SAS_ADDR(dev->sas_addr), err); 130 SAS_ADDR(dev->sas_addr), err);
127 sas_unregister_dev(dev->port, dev); 131 sas_unregister_dev(dev->port, dev);
128} 132}
129 133
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index bf3e1b979ca6..0374243c85d0 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -122,11 +122,11 @@ static void sas_phye_shutdown(struct work_struct *work)
122 phy->enabled = 0; 122 phy->enabled = 0;
123 ret = i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL); 123 ret = i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
124 if (ret) 124 if (ret)
125 sas_printk("lldd disable phy%02d returned %d\n", 125 pr_notice("lldd disable phy%02d returned %d\n",
126 phy->id, ret); 126 phy->id, ret);
127 } else 127 } else
128 sas_printk("phy%02d is not enabled, cannot shutdown\n", 128 pr_notice("phy%02d is not enabled, cannot shutdown\n",
129 phy->id); 129 phy->id);
130} 130}
131 131
132/* ---------- Phy class registration ---------- */ 132/* ---------- Phy class registration ---------- */
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index fad23dd39114..03fe479359b6 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -110,9 +110,9 @@ static void sas_form_port(struct asd_sas_phy *phy)
110 wake_up(&sas_ha->eh_wait_q); 110 wake_up(&sas_ha->eh_wait_q);
111 return; 111 return;
112 } else { 112 } else {
113 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", 113 pr_info("%s: phy%d belongs to port%d already(%d)!\n",
114 __func__, phy->id, phy->port->id, 114 __func__, phy->id, phy->port->id,
115 phy->port->num_phys); 115 phy->port->num_phys);
116 return; 116 return;
117 } 117 }
118 } 118 }
@@ -125,8 +125,8 @@ static void sas_form_port(struct asd_sas_phy *phy)
125 if (*(u64 *) port->sas_addr && 125 if (*(u64 *) port->sas_addr &&
126 phy_is_wideport_member(port, phy) && port->num_phys > 0) { 126 phy_is_wideport_member(port, phy) && port->num_phys > 0) {
127 /* wide port */ 127 /* wide port */
128 SAS_DPRINTK("phy%d matched wide port%d\n", phy->id, 128 pr_debug("phy%d matched wide port%d\n", phy->id,
129 port->id); 129 port->id);
130 break; 130 break;
131 } 131 }
132 spin_unlock(&port->phy_list_lock); 132 spin_unlock(&port->phy_list_lock);
@@ -147,8 +147,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
147 } 147 }
148 148
149 if (i >= sas_ha->num_phys) { 149 if (i >= sas_ha->num_phys) {
150 printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n", 150 pr_err("%s: couldn't find a free port, bug?\n", __func__);
151 __func__);
152 spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); 151 spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
153 return; 152 return;
154 } 153 }
@@ -180,10 +179,10 @@ static void sas_form_port(struct asd_sas_phy *phy)
180 } 179 }
181 sas_port_add_phy(port->port, phy->phy); 180 sas_port_add_phy(port->port, phy->phy);
182 181
183 SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n", 182 pr_debug("%s added to %s, phy_mask:0x%x (%16llx)\n",
184 dev_name(&phy->phy->dev), dev_name(&port->port->dev), 183 dev_name(&phy->phy->dev), dev_name(&port->port->dev),
185 port->phy_mask, 184 port->phy_mask,
186 SAS_ADDR(port->attached_sas_addr)); 185 SAS_ADDR(port->attached_sas_addr));
187 186
188 if (port->port_dev) 187 if (port->port_dev)
189 port->port_dev->pathways = port->num_phys; 188 port->port_dev->pathways = port->num_phys;
@@ -279,7 +278,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
279 prim = phy->sas_prim; 278 prim = phy->sas_prim;
280 spin_unlock_irqrestore(&phy->sas_prim_lock, flags); 279 spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
281 280
282 SAS_DPRINTK("broadcast received: %d\n", prim); 281 pr_debug("broadcast received: %d\n", prim);
283 sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN); 282 sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
284 283
285 if (phy->port) 284 if (phy->port)
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index af085432c5fe..c43a00a9d819 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -93,9 +93,8 @@ static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
93 hs = DID_ERROR; 93 hs = DID_ERROR;
94 break; 94 break;
95 case SAS_PROTO_RESPONSE: 95 case SAS_PROTO_RESPONSE:
96 SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP " 96 pr_notice("LLDD:%s sent SAS_PROTO_RESP for an SSP task; please report this\n",
97 "task; please report this\n", 97 task->dev->port->ha->sas_ha_name);
98 task->dev->port->ha->sas_ha_name);
99 break; 98 break;
100 case SAS_ABORTED_TASK: 99 case SAS_ABORTED_TASK:
101 hs = DID_ABORT; 100 hs = DID_ABORT;
@@ -132,12 +131,12 @@ static void sas_scsi_task_done(struct sas_task *task)
132 131
133 if (unlikely(!task)) { 132 if (unlikely(!task)) {
134 /* task will be completed by the error handler */ 133 /* task will be completed by the error handler */
135 SAS_DPRINTK("task done but aborted\n"); 134 pr_debug("task done but aborted\n");
136 return; 135 return;
137 } 136 }
138 137
139 if (unlikely(!sc)) { 138 if (unlikely(!sc)) {
140 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); 139 pr_debug("task_done called with non existing SCSI cmnd!\n");
141 sas_free_task(task); 140 sas_free_task(task);
142 return; 141 return;
143 } 142 }
@@ -208,7 +207,7 @@ int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
208 return 0; 207 return 0;
209 208
210out_free_task: 209out_free_task:
211 SAS_DPRINTK("lldd_execute_task returned: %d\n", res); 210 pr_debug("lldd_execute_task returned: %d\n", res);
212 ASSIGN_SAS_TASK(cmd, NULL); 211 ASSIGN_SAS_TASK(cmd, NULL);
213 sas_free_task(task); 212 sas_free_task(task);
214 if (res == -SAS_QUEUE_FULL) 213 if (res == -SAS_QUEUE_FULL)
@@ -301,40 +300,38 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
301 to_sas_internal(task->dev->port->ha->core.shost->transportt); 300 to_sas_internal(task->dev->port->ha->core.shost->transportt);
302 301
303 for (i = 0; i < 5; i++) { 302 for (i = 0; i < 5; i++) {
304 SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task); 303 pr_notice("%s: aborting task 0x%p\n", __func__, task);
305 res = si->dft->lldd_abort_task(task); 304 res = si->dft->lldd_abort_task(task);
306 305
307 spin_lock_irqsave(&task->task_state_lock, flags); 306 spin_lock_irqsave(&task->task_state_lock, flags);
308 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 307 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
309 spin_unlock_irqrestore(&task->task_state_lock, flags); 308 spin_unlock_irqrestore(&task->task_state_lock, flags);
310 SAS_DPRINTK("%s: task 0x%p is done\n", __func__, 309 pr_debug("%s: task 0x%p is done\n", __func__, task);
311 task);
312 return TASK_IS_DONE; 310 return TASK_IS_DONE;
313 } 311 }
314 spin_unlock_irqrestore(&task->task_state_lock, flags); 312 spin_unlock_irqrestore(&task->task_state_lock, flags);
315 313
316 if (res == TMF_RESP_FUNC_COMPLETE) { 314 if (res == TMF_RESP_FUNC_COMPLETE) {
317 SAS_DPRINTK("%s: task 0x%p is aborted\n", 315 pr_notice("%s: task 0x%p is aborted\n",
318 __func__, task); 316 __func__, task);
319 return TASK_IS_ABORTED; 317 return TASK_IS_ABORTED;
320 } else if (si->dft->lldd_query_task) { 318 } else if (si->dft->lldd_query_task) {
321 SAS_DPRINTK("%s: querying task 0x%p\n", 319 pr_notice("%s: querying task 0x%p\n", __func__, task);
322 __func__, task);
323 res = si->dft->lldd_query_task(task); 320 res = si->dft->lldd_query_task(task);
324 switch (res) { 321 switch (res) {
325 case TMF_RESP_FUNC_SUCC: 322 case TMF_RESP_FUNC_SUCC:
326 SAS_DPRINTK("%s: task 0x%p at LU\n", 323 pr_notice("%s: task 0x%p at LU\n", __func__,
327 __func__, task); 324 task);
328 return TASK_IS_AT_LU; 325 return TASK_IS_AT_LU;
329 case TMF_RESP_FUNC_COMPLETE: 326 case TMF_RESP_FUNC_COMPLETE:
330 SAS_DPRINTK("%s: task 0x%p not at LU\n", 327 pr_notice("%s: task 0x%p not at LU\n",
331 __func__, task); 328 __func__, task);
332 return TASK_IS_NOT_AT_LU; 329 return TASK_IS_NOT_AT_LU;
333 case TMF_RESP_FUNC_FAILED: 330 case TMF_RESP_FUNC_FAILED:
334 SAS_DPRINTK("%s: task 0x%p failed to abort\n", 331 pr_notice("%s: task 0x%p failed to abort\n",
335 __func__, task); 332 __func__, task);
336 return TASK_ABORT_FAILED; 333 return TASK_ABORT_FAILED;
337 } 334 }
338 335
339 } 336 }
340 } 337 }
@@ -350,9 +347,9 @@ static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
350 347
351 int_to_scsilun(cmd->device->lun, &lun); 348 int_to_scsilun(cmd->device->lun, &lun);
352 349
353 SAS_DPRINTK("eh: device %llx LUN %llx has the task\n", 350 pr_notice("eh: device %llx LUN %llx has the task\n",
354 SAS_ADDR(dev->sas_addr), 351 SAS_ADDR(dev->sas_addr),
355 cmd->device->lun); 352 cmd->device->lun);
356 353
357 if (i->dft->lldd_abort_task_set) 354 if (i->dft->lldd_abort_task_set)
358 res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun); 355 res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
@@ -376,8 +373,8 @@ static int sas_recover_I_T(struct domain_device *dev)
376 struct sas_internal *i = 373 struct sas_internal *i =
377 to_sas_internal(dev->port->ha->core.shost->transportt); 374 to_sas_internal(dev->port->ha->core.shost->transportt);
378 375
379 SAS_DPRINTK("I_T nexus reset for dev %016llx\n", 376 pr_notice("I_T nexus reset for dev %016llx\n",
380 SAS_ADDR(dev->sas_addr)); 377 SAS_ADDR(dev->sas_addr));
381 378
382 if (i->dft->lldd_I_T_nexus_reset) 379 if (i->dft->lldd_I_T_nexus_reset)
383 res = i->dft->lldd_I_T_nexus_reset(dev); 380 res = i->dft->lldd_I_T_nexus_reset(dev);
@@ -471,9 +468,9 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
471 return SUCCESS; 468 return SUCCESS;
472 } 469 }
473 470
474 SAS_DPRINTK("%s reset of %s failed\n", 471 pr_warn("%s reset of %s failed\n",
475 reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus", 472 reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
476 dev_name(&dev->rphy->dev)); 473 dev_name(&dev->rphy->dev));
477 474
478 return FAILED; 475 return FAILED;
479} 476}
@@ -501,7 +498,7 @@ int sas_eh_abort_handler(struct scsi_cmnd *cmd)
501 if (task) 498 if (task)
502 res = i->dft->lldd_abort_task(task); 499 res = i->dft->lldd_abort_task(task);
503 else 500 else
504 SAS_DPRINTK("no task to abort\n"); 501 pr_notice("no task to abort\n");
505 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) 502 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
506 return SUCCESS; 503 return SUCCESS;
507 504
@@ -612,34 +609,33 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
612 spin_unlock_irqrestore(&task->task_state_lock, flags); 609 spin_unlock_irqrestore(&task->task_state_lock, flags);
613 610
614 if (need_reset) { 611 if (need_reset) {
615 SAS_DPRINTK("%s: task 0x%p requests reset\n", 612 pr_notice("%s: task 0x%p requests reset\n",
616 __func__, task); 613 __func__, task);
617 goto reset; 614 goto reset;
618 } 615 }
619 616
620 SAS_DPRINTK("trying to find task 0x%p\n", task); 617 pr_debug("trying to find task 0x%p\n", task);
621 res = sas_scsi_find_task(task); 618 res = sas_scsi_find_task(task);
622 619
623 switch (res) { 620 switch (res) {
624 case TASK_IS_DONE: 621 case TASK_IS_DONE:
625 SAS_DPRINTK("%s: task 0x%p is done\n", __func__, 622 pr_notice("%s: task 0x%p is done\n", __func__,
626 task); 623 task);
627 sas_eh_finish_cmd(cmd); 624 sas_eh_finish_cmd(cmd);
628 continue; 625 continue;
629 case TASK_IS_ABORTED: 626 case TASK_IS_ABORTED:
630 SAS_DPRINTK("%s: task 0x%p is aborted\n", 627 pr_notice("%s: task 0x%p is aborted\n",
631 __func__, task); 628 __func__, task);
632 sas_eh_finish_cmd(cmd); 629 sas_eh_finish_cmd(cmd);
633 continue; 630 continue;
634 case TASK_IS_AT_LU: 631 case TASK_IS_AT_LU:
635 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); 632 pr_info("task 0x%p is at LU: lu recover\n", task);
636 reset: 633 reset:
637 tmf_resp = sas_recover_lu(task->dev, cmd); 634 tmf_resp = sas_recover_lu(task->dev, cmd);
638 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { 635 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
639 SAS_DPRINTK("dev %016llx LU %llx is " 636 pr_notice("dev %016llx LU %llx is recovered\n",
640 "recovered\n", 637 SAS_ADDR(task->dev),
641 SAS_ADDR(task->dev), 638 cmd->device->lun);
642 cmd->device->lun);
643 sas_eh_finish_cmd(cmd); 639 sas_eh_finish_cmd(cmd);
644 sas_scsi_clear_queue_lu(work_q, cmd); 640 sas_scsi_clear_queue_lu(work_q, cmd);
645 goto Again; 641 goto Again;
@@ -647,14 +643,14 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
647 /* fallthrough */ 643 /* fallthrough */
648 case TASK_IS_NOT_AT_LU: 644 case TASK_IS_NOT_AT_LU:
649 case TASK_ABORT_FAILED: 645 case TASK_ABORT_FAILED:
650 SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n", 646 pr_notice("task 0x%p is not at LU: I_T recover\n",
651 task); 647 task);
652 tmf_resp = sas_recover_I_T(task->dev); 648 tmf_resp = sas_recover_I_T(task->dev);
653 if (tmf_resp == TMF_RESP_FUNC_COMPLETE || 649 if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
654 tmf_resp == -ENODEV) { 650 tmf_resp == -ENODEV) {
655 struct domain_device *dev = task->dev; 651 struct domain_device *dev = task->dev;
656 SAS_DPRINTK("I_T %016llx recovered\n", 652 pr_notice("I_T %016llx recovered\n",
657 SAS_ADDR(task->dev->sas_addr)); 653 SAS_ADDR(task->dev->sas_addr));
658 sas_eh_finish_cmd(cmd); 654 sas_eh_finish_cmd(cmd);
659 sas_scsi_clear_queue_I_T(work_q, dev); 655 sas_scsi_clear_queue_I_T(work_q, dev);
660 goto Again; 656 goto Again;
@@ -663,12 +659,12 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
663 try_to_reset_cmd_device(cmd); 659 try_to_reset_cmd_device(cmd);
664 if (i->dft->lldd_clear_nexus_port) { 660 if (i->dft->lldd_clear_nexus_port) {
665 struct asd_sas_port *port = task->dev->port; 661 struct asd_sas_port *port = task->dev->port;
666 SAS_DPRINTK("clearing nexus for port:%d\n", 662 pr_debug("clearing nexus for port:%d\n",
667 port->id); 663 port->id);
668 res = i->dft->lldd_clear_nexus_port(port); 664 res = i->dft->lldd_clear_nexus_port(port);
669 if (res == TMF_RESP_FUNC_COMPLETE) { 665 if (res == TMF_RESP_FUNC_COMPLETE) {
670 SAS_DPRINTK("clear nexus port:%d " 666 pr_notice("clear nexus port:%d succeeded\n",
671 "succeeded\n", port->id); 667 port->id);
672 sas_eh_finish_cmd(cmd); 668 sas_eh_finish_cmd(cmd);
673 sas_scsi_clear_queue_port(work_q, 669 sas_scsi_clear_queue_port(work_q,
674 port); 670 port);
@@ -676,11 +672,10 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
676 } 672 }
677 } 673 }
678 if (i->dft->lldd_clear_nexus_ha) { 674 if (i->dft->lldd_clear_nexus_ha) {
679 SAS_DPRINTK("clear nexus ha\n"); 675 pr_debug("clear nexus ha\n");
680 res = i->dft->lldd_clear_nexus_ha(ha); 676 res = i->dft->lldd_clear_nexus_ha(ha);
681 if (res == TMF_RESP_FUNC_COMPLETE) { 677 if (res == TMF_RESP_FUNC_COMPLETE) {
682 SAS_DPRINTK("clear nexus ha " 678 pr_notice("clear nexus ha succeeded\n");
683 "succeeded\n");
684 sas_eh_finish_cmd(cmd); 679 sas_eh_finish_cmd(cmd);
685 goto clear_q; 680 goto clear_q;
686 } 681 }
@@ -689,10 +684,9 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
689 * of effort could recover from errors. Quite 684 * of effort could recover from errors. Quite
690 * possibly the HA just disappeared. 685 * possibly the HA just disappeared.
691 */ 686 */
692 SAS_DPRINTK("error from device %llx, LUN %llx " 687 pr_err("error from device %llx, LUN %llx couldn't be recovered in any way\n",
693 "couldn't be recovered in any way\n", 688 SAS_ADDR(task->dev->sas_addr),
694 SAS_ADDR(task->dev->sas_addr), 689 cmd->device->lun);
695 cmd->device->lun);
696 690
697 sas_eh_finish_cmd(cmd); 691 sas_eh_finish_cmd(cmd);
698 goto clear_q; 692 goto clear_q;
@@ -704,7 +698,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
704 return; 698 return;
705 699
706 clear_q: 700 clear_q:
707 SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__); 701 pr_debug("--- Exit %s -- clear_q\n", __func__);
708 list_for_each_entry_safe(cmd, n, work_q, eh_entry) 702 list_for_each_entry_safe(cmd, n, work_q, eh_entry)
709 sas_eh_finish_cmd(cmd); 703 sas_eh_finish_cmd(cmd);
710 goto out; 704 goto out;
@@ -758,8 +752,8 @@ retry:
758 list_splice_init(&shost->eh_cmd_q, &eh_work_q); 752 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
759 spin_unlock_irq(shost->host_lock); 753 spin_unlock_irq(shost->host_lock);
760 754
761 SAS_DPRINTK("Enter %s busy: %d failed: %d\n", 755 pr_notice("Enter %s busy: %d failed: %d\n",
762 __func__, scsi_host_busy(shost), shost->host_failed); 756 __func__, scsi_host_busy(shost), shost->host_failed);
763 /* 757 /*
764 * Deal with commands that still have SAS tasks (i.e. they didn't 758 * Deal with commands that still have SAS tasks (i.e. they didn't
765 * complete via the normal sas_task completion mechanism), 759 * complete via the normal sas_task completion mechanism),
@@ -800,9 +794,9 @@ out:
800 if (retry) 794 if (retry)
801 goto retry; 795 goto retry;
802 796
803 SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n", 797 pr_notice("--- Exit %s: busy: %d failed: %d tries: %d\n",
804 __func__, scsi_host_busy(shost), 798 __func__, scsi_host_busy(shost),
805 shost->host_failed, tries); 799 shost->host_failed, tries);
806} 800}
807 801
808int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 802int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
@@ -875,9 +869,8 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
875 if (scsi_dev->tagged_supported) { 869 if (scsi_dev->tagged_supported) {
876 scsi_change_queue_depth(scsi_dev, SAS_DEF_QD); 870 scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
877 } else { 871 } else {
878 SAS_DPRINTK("device %llx, LUN %llx doesn't support " 872 pr_notice("device %llx, LUN %llx doesn't support TCQ\n",
879 "TCQ\n", SAS_ADDR(dev->sas_addr), 873 SAS_ADDR(dev->sas_addr), scsi_dev->lun);
880 scsi_dev->lun);
881 scsi_change_queue_depth(scsi_dev, 1); 874 scsi_change_queue_depth(scsi_dev, 1);
882 } 875 }
883 876
diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c
index a78e5bd3e514..c3b9befad4e6 100644
--- a/drivers/scsi/libsas/sas_task.c
+++ b/drivers/scsi/libsas/sas_task.c
@@ -1,3 +1,6 @@
1
2#include "sas_internal.h"
3
1#include <linux/kernel.h> 4#include <linux/kernel.h>
2#include <linux/export.h> 5#include <linux/export.h>
3#include <scsi/sas.h> 6#include <scsi/sas.h>
@@ -23,11 +26,8 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task,
23 memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size); 26 memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size);
24 27
25 if (iu->status != SAM_STAT_CHECK_CONDITION) 28 if (iu->status != SAM_STAT_CHECK_CONDITION)
26 dev_printk(KERN_WARNING, dev, 29 dev_warn(dev, "dev %llx sent sense data, but stat(%x) is not CHECK CONDITION\n",
27 "dev %llx sent sense data, but " 30 SAS_ADDR(task->dev->sas_addr), iu->status);
28 "stat(%x) is not CHECK CONDITION\n",
29 SAS_ADDR(task->dev->sas_addr),
30 iu->status);
31 } 31 }
32 else 32 else
33 /* when datapres contains corrupt/unknown value... */ 33 /* when datapres contains corrupt/unknown value... */
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index c1eb2b00ca7f..ebdfe5b26937 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -335,6 +335,18 @@ enum hba_state {
335 LPFC_HBA_ERROR = -1 335 LPFC_HBA_ERROR = -1
336}; 336};
337 337
338struct lpfc_trunk_link_state {
339 enum hba_state state;
340 uint8_t fault;
341};
342
343struct lpfc_trunk_link {
344 struct lpfc_trunk_link_state link0,
345 link1,
346 link2,
347 link3;
348};
349
338struct lpfc_vport { 350struct lpfc_vport {
339 struct lpfc_hba *phba; 351 struct lpfc_hba *phba;
340 struct list_head listentry; 352 struct list_head listentry;
@@ -490,6 +502,7 @@ struct lpfc_vport {
490 struct nvme_fc_local_port *localport; 502 struct nvme_fc_local_port *localport;
491 uint8_t nvmei_support; /* driver supports NVME Initiator */ 503 uint8_t nvmei_support; /* driver supports NVME Initiator */
492 uint32_t last_fcp_wqidx; 504 uint32_t last_fcp_wqidx;
505 uint32_t rcv_flogi_cnt; /* How many unsol FLOGIs ACK'd. */
493}; 506};
494 507
495struct hbq_s { 508struct hbq_s {
@@ -683,6 +696,7 @@ struct lpfc_hba {
683 uint32_t iocb_cmd_size; 696 uint32_t iocb_cmd_size;
684 uint32_t iocb_rsp_size; 697 uint32_t iocb_rsp_size;
685 698
699 struct lpfc_trunk_link trunk_link;
686 enum hba_state link_state; 700 enum hba_state link_state;
687 uint32_t link_flag; /* link state flags */ 701 uint32_t link_flag; /* link state flags */
688#define LS_LOOPBACK_MODE 0x1 /* NPort is in Loopback mode */ 702#define LS_LOOPBACK_MODE 0x1 /* NPort is in Loopback mode */
@@ -717,6 +731,7 @@ struct lpfc_hba {
717 * capability 731 * capability
718 */ 732 */
719#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */ 733#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
734#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
720 735
721 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 736 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
722 struct lpfc_dmabuf slim2p; 737 struct lpfc_dmabuf slim2p;
@@ -783,6 +798,7 @@ struct lpfc_hba {
783#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */ 798#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
784 uint32_t cfg_fcf_failover_policy; 799 uint32_t cfg_fcf_failover_policy;
785 uint32_t cfg_fcp_io_sched; 800 uint32_t cfg_fcp_io_sched;
801 uint32_t cfg_ns_query;
786 uint32_t cfg_fcp2_no_tgt_reset; 802 uint32_t cfg_fcp2_no_tgt_reset;
787 uint32_t cfg_cr_delay; 803 uint32_t cfg_cr_delay;
788 uint32_t cfg_cr_count; 804 uint32_t cfg_cr_count;
@@ -989,7 +1005,8 @@ struct lpfc_hba {
989 spinlock_t port_list_lock; /* lock for port_list mutations */ 1005 spinlock_t port_list_lock; /* lock for port_list mutations */
990 struct lpfc_vport *pport; /* physical lpfc_vport pointer */ 1006 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
991 uint16_t max_vpi; /* Maximum virtual nports */ 1007 uint16_t max_vpi; /* Maximum virtual nports */
992#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ 1008#define LPFC_MAX_VPI 0xFF /* Max number VPI supported 0 - 0xff */
1009#define LPFC_MAX_VPORTS 0x100 /* Max vports per port, with pport */
993 uint16_t max_vports; /* 1010 uint16_t max_vports; /*
994 * For IOV HBAs max_vpi can change 1011 * For IOV HBAs max_vpi can change
995 * after a reset. max_vports is max 1012 * after a reset. max_vports is max
@@ -1111,6 +1128,10 @@ struct lpfc_hba {
1111 uint16_t vlan_id; 1128 uint16_t vlan_id;
1112 struct list_head fcf_conn_rec_list; 1129 struct list_head fcf_conn_rec_list;
1113 1130
1131 bool defer_flogi_acc_flag;
1132 uint16_t defer_flogi_acc_rx_id;
1133 uint16_t defer_flogi_acc_ox_id;
1134
1114 spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */ 1135 spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
1115 struct list_head ct_ev_waiters; 1136 struct list_head ct_ev_waiters;
1116 struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX]; 1137 struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
@@ -1262,6 +1283,12 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
1262static inline struct lpfc_sli_ring * 1283static inline struct lpfc_sli_ring *
1263lpfc_phba_elsring(struct lpfc_hba *phba) 1284lpfc_phba_elsring(struct lpfc_hba *phba)
1264{ 1285{
1286 /* Return NULL if sli_rev has become invalid due to bad fw */
1287 if (phba->sli_rev != LPFC_SLI_REV4 &&
1288 phba->sli_rev != LPFC_SLI_REV3 &&
1289 phba->sli_rev != LPFC_SLI_REV2)
1290 return NULL;
1291
1265 if (phba->sli_rev == LPFC_SLI_REV4) { 1292 if (phba->sli_rev == LPFC_SLI_REV4) {
1266 if (phba->sli4_hba.els_wq) 1293 if (phba->sli4_hba.els_wq)
1267 return phba->sli4_hba.els_wq->pring; 1294 return phba->sli4_hba.els_wq->pring;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index dda7f450b96d..4bae72cbf3f6 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -883,6 +883,42 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
883 } 883 }
884 } 884 }
885 885
886 if ((phba->sli_rev == LPFC_SLI_REV4) &&
887 ((bf_get(lpfc_sli_intf_if_type,
888 &phba->sli4_hba.sli_intf) ==
889 LPFC_SLI_INTF_IF_TYPE_6))) {
890 struct lpfc_trunk_link link = phba->trunk_link;
891
892 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
893 len += snprintf(buf + len, PAGE_SIZE - len,
894 "Trunk port 0: Link %s %s\n",
895 (link.link0.state == LPFC_LINK_UP) ?
896 "Up" : "Down. ",
897 trunk_errmsg[link.link0.fault]);
898
899 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
900 len += snprintf(buf + len, PAGE_SIZE - len,
901 "Trunk port 1: Link %s %s\n",
902 (link.link1.state == LPFC_LINK_UP) ?
903 "Up" : "Down. ",
904 trunk_errmsg[link.link1.fault]);
905
906 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
907 len += snprintf(buf + len, PAGE_SIZE - len,
908 "Trunk port 2: Link %s %s\n",
909 (link.link2.state == LPFC_LINK_UP) ?
910 "Up" : "Down. ",
911 trunk_errmsg[link.link2.fault]);
912
913 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
914 len += snprintf(buf + len, PAGE_SIZE - len,
915 "Trunk port 3: Link %s %s\n",
916 (link.link3.state == LPFC_LINK_UP) ?
917 "Up" : "Down. ",
918 trunk_errmsg[link.link3.fault]);
919
920 }
921
886 return len; 922 return len;
887} 923}
888 924
@@ -1156,6 +1192,82 @@ out:
1156} 1192}
1157 1193
1158/** 1194/**
1195 * lpfc_reset_pci_bus - resets PCI bridge controller's secondary bus of an HBA
1196 * @phba: lpfc_hba pointer.
1197 *
1198 * Description:
1199 * Issues a PCI secondary bus reset for the phba->pcidev.
1200 *
1201 * Notes:
1202 * First walks the bus_list to ensure only PCI devices with Emulex
1203 * vendor id, device ids that support hot reset, only one occurrence
1204 * of function 0, and all ports on the bus are in offline mode to ensure the
1205 * hot reset only affects one valid HBA.
1206 *
1207 * Returns:
1208 * -ENOTSUPP, cfg_enable_hba_reset must be of value 2
1209 * -ENODEV, NULL ptr to pcidev
1210 * -EBADSLT, detected invalid device
1211 * -EBUSY, port is not in offline state
1212 * 0, successful
1213 */
1214int
1215lpfc_reset_pci_bus(struct lpfc_hba *phba)
1216{
1217 struct pci_dev *pdev = phba->pcidev;
1218 struct Scsi_Host *shost = NULL;
1219 struct lpfc_hba *phba_other = NULL;
1220 struct pci_dev *ptr = NULL;
1221 int res;
1222
1223 if (phba->cfg_enable_hba_reset != 2)
1224 return -ENOTSUPP;
1225
1226 if (!pdev) {
1227 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n");
1228 return -ENODEV;
1229 }
1230
1231 res = lpfc_check_pci_resettable(phba);
1232 if (res)
1233 return res;
1234
1235 /* Walk the list of devices on the pci_dev's bus */
1236 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
1237 /* Check port is offline */
1238 shost = pci_get_drvdata(ptr);
1239 if (shost) {
1240 phba_other =
1241 ((struct lpfc_vport *)shost->hostdata)->phba;
1242 if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) {
1243 lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT,
1244 "8349 WWPN = 0x%02x%02x%02x%02x"
1245 "%02x%02x%02x%02x is not "
1246 "offline!\n",
1247 phba_other->wwpn[0],
1248 phba_other->wwpn[1],
1249 phba_other->wwpn[2],
1250 phba_other->wwpn[3],
1251 phba_other->wwpn[4],
1252 phba_other->wwpn[5],
1253 phba_other->wwpn[6],
1254 phba_other->wwpn[7]);
1255 return -EBUSY;
1256 }
1257 }
1258 }
1259
1260 /* Issue PCI bus reset */
1261 res = pci_reset_bus(pdev);
1262 if (res) {
1263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1264 "8350 PCI reset bus failed: %d\n", res);
1265 }
1266
1267 return res;
1268}
1269
1270/**
1159 * lpfc_selective_reset - Offline then onlines the port 1271 * lpfc_selective_reset - Offline then onlines the port
1160 * @phba: lpfc_hba pointer. 1272 * @phba: lpfc_hba pointer.
1161 * 1273 *
@@ -1322,7 +1434,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
1322 return -EACCES; 1434 return -EACCES;
1323 1435
1324 if ((phba->sli_rev < LPFC_SLI_REV4) || 1436 if ((phba->sli_rev < LPFC_SLI_REV4) ||
1325 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 1437 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
1326 LPFC_SLI_INTF_IF_TYPE_2)) 1438 LPFC_SLI_INTF_IF_TYPE_2))
1327 return -EPERM; 1439 return -EPERM;
1328 1440
@@ -1430,6 +1542,66 @@ lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
1430 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); 1542 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
1431} 1543}
1432 1544
1545int
1546lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
1547{
1548 LPFC_MBOXQ_t *mbox = NULL;
1549 unsigned long val = 0;
1550 char *pval = 0;
1551 int rc = 0;
1552
1553 if (!strncmp("enable", buff_out,
1554 strlen("enable"))) {
1555 pval = buff_out + strlen("enable") + 1;
1556 rc = kstrtoul(pval, 0, &val);
1557 if (rc)
1558 return rc; /* Invalid number */
1559 } else if (!strncmp("disable", buff_out,
1560 strlen("disable"))) {
1561 val = 0;
1562 } else {
1563 return -EINVAL; /* Invalid command */
1564 }
1565
1566 switch (val) {
1567 case 0:
1568 val = 0x0; /* Disable */
1569 break;
1570 case 2:
1571 val = 0x1; /* Enable two port trunk */
1572 break;
1573 case 4:
1574 val = 0x2; /* Enable four port trunk */
1575 break;
1576 default:
1577 return -EINVAL;
1578 }
1579
1580 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1581 "0070 Set trunk mode with val %ld ", val);
1582
1583 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1584 if (!mbox)
1585 return -ENOMEM;
1586
1587 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1588 LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE,
1589 12, LPFC_SLI4_MBX_EMBED);
1590
1591 bf_set(lpfc_mbx_set_trunk_mode,
1592 &mbox->u.mqe.un.set_trunk_mode,
1593 val);
1594 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1595 if (rc)
1596 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1597 "0071 Set trunk mode failed with status: %d",
1598 rc);
1599 if (rc != MBX_TIMEOUT)
1600 mempool_free(mbox, phba->mbox_mem_pool);
1601
1602 return 0;
1603}
1604
1433/** 1605/**
1434 * lpfc_board_mode_show - Return the state of the board 1606 * lpfc_board_mode_show - Return the state of the board
1435 * @dev: class device that is converted into a Scsi_host. 1607 * @dev: class device that is converted into a Scsi_host.
@@ -1522,6 +1694,11 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
1522 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET); 1694 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
1523 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0) 1695 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
1524 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET); 1696 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
1697 else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1)
1698 == 0)
1699 status = lpfc_reset_pci_bus(phba);
1700 else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0)
1701 status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk"));
1525 else 1702 else
1526 status = -EINVAL; 1703 status = -EINVAL;
1527 1704
@@ -1590,7 +1767,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
1590 pmb = &pmboxq->u.mb; 1767 pmb = &pmboxq->u.mb;
1591 pmb->mbxCommand = MBX_READ_CONFIG; 1768 pmb->mbxCommand = MBX_READ_CONFIG;
1592 pmb->mbxOwner = OWN_HOST; 1769 pmb->mbxOwner = OWN_HOST;
1593 pmboxq->context1 = NULL; 1770 pmboxq->ctx_buf = NULL;
1594 1771
1595 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 1772 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1596 rc = MBX_NOT_FINISHED; 1773 rc = MBX_NOT_FINISHED;
@@ -1622,6 +1799,9 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
1622 max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ? 1799 max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
1623 (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0; 1800 (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
1624 1801
1802 /* Limit the max we support */
1803 if (max_vpi > LPFC_MAX_VPI)
1804 max_vpi = LPFC_MAX_VPI;
1625 if (mvpi) 1805 if (mvpi)
1626 *mvpi = max_vpi; 1806 *mvpi = max_vpi;
1627 if (avpi) 1807 if (avpi)
@@ -1637,8 +1817,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
1637 *axri = pmb->un.varRdConfig.avail_xri; 1817 *axri = pmb->un.varRdConfig.avail_xri;
1638 if (mvpi) 1818 if (mvpi)
1639 *mvpi = pmb->un.varRdConfig.max_vpi; 1819 *mvpi = pmb->un.varRdConfig.max_vpi;
1640 if (avpi) 1820 if (avpi) {
1641 *avpi = pmb->un.varRdConfig.avail_vpi; 1821 /* avail_vpi is only valid if link is up and ready */
1822 if (phba->link_state == LPFC_HBA_READY)
1823 *avpi = pmb->un.varRdConfig.avail_vpi;
1824 else
1825 *avpi = pmb->un.varRdConfig.max_vpi;
1826 }
1642 } 1827 }
1643 1828
1644 mempool_free(pmboxq, phba->mbox_mem_pool); 1829 mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -3831,8 +4016,9 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
3831 val); 4016 val);
3832 return -EINVAL; 4017 return -EINVAL;
3833 } 4018 }
3834 if (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && 4019 if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
3835 val == 4) { 4020 phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
4021 val == 4) {
3836 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, 4022 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3837 "3114 Loop mode not supported\n"); 4023 "3114 Loop mode not supported\n");
3838 return -EINVAL; 4024 return -EINVAL;
@@ -4254,7 +4440,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
4254 uint32_t prev_val, if_type; 4440 uint32_t prev_val, if_type;
4255 4441
4256 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 4442 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
4257 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 && 4443 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 &&
4258 phba->hba_flag & HBA_FORCED_LINK_SPEED) 4444 phba->hba_flag & HBA_FORCED_LINK_SPEED)
4259 return -EPERM; 4445 return -EPERM;
4260 4446
@@ -5070,6 +5256,18 @@ LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_ROUND_ROBIN,
5070 "issuing commands [0] - Round Robin, [1] - Current CPU"); 5256 "issuing commands [0] - Round Robin, [1] - Current CPU");
5071 5257
5072/* 5258/*
5259 * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN
5260 * range is [0,1]. Default value is 0.
5261 * For [0], GID_FT is used for NameServer queries after RSCN (default)
5262 * For [1], GID_PT is used for NameServer queries after RSCN
5263 *
5264 */
5265LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT,
5266 LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT,
5267 "Determine algorithm NameServer queries after RSCN "
5268 "[0] - GID_FT, [1] - GID_PT");
5269
5270/*
5073# lpfc_fcp2_no_tgt_reset: Determine bus reset behavior 5271# lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
5074# range is [0,1]. Default value is 0. 5272# range is [0,1]. Default value is 0.
5075# For [0], bus reset issues target reset to ALL devices 5273# For [0], bus reset issues target reset to ALL devices
@@ -5257,9 +5455,10 @@ LPFC_ATTR_R(nvme_io_channel,
5257# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. 5455# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
5258# 0 = HBA resets disabled 5456# 0 = HBA resets disabled
5259# 1 = HBA resets enabled (default) 5457# 1 = HBA resets enabled (default)
5260# Value range is [0,1]. Default value is 1. 5458# 2 = HBA reset via PCI bus reset enabled
5459# Value range is [0,2]. Default value is 1.
5261*/ 5460*/
5262LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver."); 5461LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver.");
5263 5462
5264/* 5463/*
5265# lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer.. 5464# lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
@@ -5514,6 +5713,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
5514 &dev_attr_lpfc_scan_down, 5713 &dev_attr_lpfc_scan_down,
5515 &dev_attr_lpfc_link_speed, 5714 &dev_attr_lpfc_link_speed,
5516 &dev_attr_lpfc_fcp_io_sched, 5715 &dev_attr_lpfc_fcp_io_sched,
5716 &dev_attr_lpfc_ns_query,
5517 &dev_attr_lpfc_fcp2_no_tgt_reset, 5717 &dev_attr_lpfc_fcp2_no_tgt_reset,
5518 &dev_attr_lpfc_cr_delay, 5718 &dev_attr_lpfc_cr_delay,
5519 &dev_attr_lpfc_cr_count, 5719 &dev_attr_lpfc_cr_count,
@@ -6006,6 +6206,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
6006 case LPFC_LINK_SPEED_64GHZ: 6206 case LPFC_LINK_SPEED_64GHZ:
6007 fc_host_speed(shost) = FC_PORTSPEED_64GBIT; 6207 fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
6008 break; 6208 break;
6209 case LPFC_LINK_SPEED_128GHZ:
6210 fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
6211 break;
6009 default: 6212 default:
6010 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 6213 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6011 break; 6214 break;
@@ -6105,7 +6308,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
6105 pmb = &pmboxq->u.mb; 6308 pmb = &pmboxq->u.mb;
6106 pmb->mbxCommand = MBX_READ_STATUS; 6309 pmb->mbxCommand = MBX_READ_STATUS;
6107 pmb->mbxOwner = OWN_HOST; 6310 pmb->mbxOwner = OWN_HOST;
6108 pmboxq->context1 = NULL; 6311 pmboxq->ctx_buf = NULL;
6109 pmboxq->vport = vport; 6312 pmboxq->vport = vport;
6110 6313
6111 if (vport->fc_flag & FC_OFFLINE_MODE) 6314 if (vport->fc_flag & FC_OFFLINE_MODE)
@@ -6137,7 +6340,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
6137 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 6340 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
6138 pmb->mbxCommand = MBX_READ_LNK_STAT; 6341 pmb->mbxCommand = MBX_READ_LNK_STAT;
6139 pmb->mbxOwner = OWN_HOST; 6342 pmb->mbxOwner = OWN_HOST;
6140 pmboxq->context1 = NULL; 6343 pmboxq->ctx_buf = NULL;
6141 pmboxq->vport = vport; 6344 pmboxq->vport = vport;
6142 6345
6143 if (vport->fc_flag & FC_OFFLINE_MODE) 6346 if (vport->fc_flag & FC_OFFLINE_MODE)
@@ -6217,7 +6420,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
6217 pmb->mbxCommand = MBX_READ_STATUS; 6420 pmb->mbxCommand = MBX_READ_STATUS;
6218 pmb->mbxOwner = OWN_HOST; 6421 pmb->mbxOwner = OWN_HOST;
6219 pmb->un.varWords[0] = 0x1; /* reset request */ 6422 pmb->un.varWords[0] = 0x1; /* reset request */
6220 pmboxq->context1 = NULL; 6423 pmboxq->ctx_buf = NULL;
6221 pmboxq->vport = vport; 6424 pmboxq->vport = vport;
6222 6425
6223 if ((vport->fc_flag & FC_OFFLINE_MODE) || 6426 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
@@ -6235,7 +6438,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
6235 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 6438 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
6236 pmb->mbxCommand = MBX_READ_LNK_STAT; 6439 pmb->mbxCommand = MBX_READ_LNK_STAT;
6237 pmb->mbxOwner = OWN_HOST; 6440 pmb->mbxOwner = OWN_HOST;
6238 pmboxq->context1 = NULL; 6441 pmboxq->ctx_buf = NULL;
6239 pmboxq->vport = vport; 6442 pmboxq->vport = vport;
6240 6443
6241 if ((vport->fc_flag & FC_OFFLINE_MODE) || 6444 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
@@ -6564,6 +6767,7 @@ void
6564lpfc_get_cfgparam(struct lpfc_hba *phba) 6767lpfc_get_cfgparam(struct lpfc_hba *phba)
6565{ 6768{
6566 lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched); 6769 lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
6770 lpfc_ns_query_init(phba, lpfc_ns_query);
6567 lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset); 6771 lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
6568 lpfc_cr_delay_init(phba, lpfc_cr_delay); 6772 lpfc_cr_delay_init(phba, lpfc_cr_delay);
6569 lpfc_cr_count_init(phba, lpfc_cr_count); 6773 lpfc_cr_count_init(phba, lpfc_cr_count);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 7bd7ae86bed5..8698af86485d 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2222,7 +2222,7 @@ lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
2222 2222
2223 if (phba->sli_rev < LPFC_SLI_REV4) 2223 if (phba->sli_rev < LPFC_SLI_REV4)
2224 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job); 2224 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
2225 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 2225 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
2226 LPFC_SLI_INTF_IF_TYPE_2) 2226 LPFC_SLI_INTF_IF_TYPE_2)
2227 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job); 2227 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
2228 else 2228 else
@@ -2262,7 +2262,7 @@ lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
2262 2262
2263 if (phba->sli_rev < LPFC_SLI_REV4) 2263 if (phba->sli_rev < LPFC_SLI_REV4)
2264 return -ENODEV; 2264 return -ENODEV;
2265 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2265 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2266 LPFC_SLI_INTF_IF_TYPE_2) 2266 LPFC_SLI_INTF_IF_TYPE_2)
2267 return -ENODEV; 2267 return -ENODEV;
2268 2268
@@ -2354,7 +2354,7 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
2354 rc = -ENODEV; 2354 rc = -ENODEV;
2355 goto job_error; 2355 goto job_error;
2356 } 2356 }
2357 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2357 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2358 LPFC_SLI_INTF_IF_TYPE_2) { 2358 LPFC_SLI_INTF_IF_TYPE_2) {
2359 rc = -ENODEV; 2359 rc = -ENODEV;
2360 goto job_error; 2360 goto job_error;
@@ -2501,9 +2501,9 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2501 return -ENOMEM; 2501 return -ENOMEM;
2502 } 2502 }
2503 2503
2504 dmabuff = (struct lpfc_dmabuf *) mbox->context1; 2504 dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf;
2505 mbox->context1 = NULL; 2505 mbox->ctx_buf = NULL;
2506 mbox->context2 = NULL; 2506 mbox->ctx_ndlp = NULL;
2507 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2507 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2508 2508
2509 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2509 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
@@ -3388,7 +3388,7 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3388 unsigned long flags; 3388 unsigned long flags;
3389 uint8_t *pmb, *pmb_buf; 3389 uint8_t *pmb, *pmb_buf;
3390 3390
3391 dd_data = pmboxq->context1; 3391 dd_data = pmboxq->ctx_ndlp;
3392 3392
3393 /* 3393 /*
3394 * The outgoing buffer is readily referred from the dma buffer, 3394 * The outgoing buffer is readily referred from the dma buffer,
@@ -3573,7 +3573,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3573 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3573 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3574 uint8_t *pmbx; 3574 uint8_t *pmbx;
3575 3575
3576 dd_data = pmboxq->context1; 3576 dd_data = pmboxq->ctx_buf;
3577 3577
3578 /* Determine if job has been aborted */ 3578 /* Determine if job has been aborted */
3579 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3579 spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -3960,7 +3960,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
3960 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl; 3960 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3961 3961
3962 /* context fields to callback function */ 3962 /* context fields to callback function */
3963 pmboxq->context1 = dd_data; 3963 pmboxq->ctx_buf = dd_data;
3964 dd_data->type = TYPE_MBOX; 3964 dd_data->type = TYPE_MBOX;
3965 dd_data->set_job = job; 3965 dd_data->set_job = job;
3966 dd_data->context_un.mbox.pmboxq = pmboxq; 3966 dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4131,7 +4131,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
4131 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4131 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4132 4132
4133 /* context fields to callback function */ 4133 /* context fields to callback function */
4134 pmboxq->context1 = dd_data; 4134 pmboxq->ctx_buf = dd_data;
4135 dd_data->type = TYPE_MBOX; 4135 dd_data->type = TYPE_MBOX;
4136 dd_data->set_job = job; 4136 dd_data->set_job = job;
4137 dd_data->context_un.mbox.pmboxq = pmboxq; 4137 dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4476,7 +4476,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
4476 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4476 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4477 4477
4478 /* context fields to callback function */ 4478 /* context fields to callback function */
4479 pmboxq->context1 = dd_data; 4479 pmboxq->ctx_buf = dd_data;
4480 dd_data->type = TYPE_MBOX; 4480 dd_data->type = TYPE_MBOX;
4481 dd_data->set_job = job; 4481 dd_data->set_job = job;
4482 dd_data->context_un.mbox.pmboxq = pmboxq; 4482 dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4761,7 +4761,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4761 if (mbox_req->inExtWLen || mbox_req->outExtWLen) { 4761 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4762 from = pmbx; 4762 from = pmbx;
4763 ext = from + sizeof(MAILBOX_t); 4763 ext = from + sizeof(MAILBOX_t);
4764 pmboxq->context2 = ext; 4764 pmboxq->ctx_buf = ext;
4765 pmboxq->in_ext_byte_len = 4765 pmboxq->in_ext_byte_len =
4766 mbox_req->inExtWLen * sizeof(uint32_t); 4766 mbox_req->inExtWLen * sizeof(uint32_t);
4767 pmboxq->out_ext_byte_len = 4767 pmboxq->out_ext_byte_len =
@@ -4889,7 +4889,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4889 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl; 4889 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
4890 4890
4891 /* setup context field to pass wait_queue pointer to wake function */ 4891 /* setup context field to pass wait_queue pointer to wake function */
4892 pmboxq->context1 = dd_data; 4892 pmboxq->ctx_ndlp = dd_data;
4893 dd_data->type = TYPE_MBOX; 4893 dd_data->type = TYPE_MBOX;
4894 dd_data->set_job = job; 4894 dd_data->set_job = job;
4895 dd_data->context_un.mbox.pmboxq = pmboxq; 4895 dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -5348,7 +5348,7 @@ lpfc_bsg_get_ras_config(struct bsg_job *job)
5348 sizeof(struct fc_bsg_request) + 5348 sizeof(struct fc_bsg_request) +
5349 sizeof(struct lpfc_bsg_ras_req)) { 5349 sizeof(struct lpfc_bsg_ras_req)) {
5350 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 5350 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5351 "6181 Received RAS_LOG request " 5351 "6192 FW_LOG request received "
5352 "below minimum size\n"); 5352 "below minimum size\n");
5353 rc = -EINVAL; 5353 rc = -EINVAL;
5354 goto ras_job_error; 5354 goto ras_job_error;
@@ -5356,7 +5356,7 @@ lpfc_bsg_get_ras_config(struct bsg_job *job)
5356 5356
5357 /* Check FW log status */ 5357 /* Check FW log status */
5358 rc = lpfc_check_fwlog_support(phba); 5358 rc = lpfc_check_fwlog_support(phba);
5359 if (rc == -EACCES || rc == -EPERM) 5359 if (rc)
5360 goto ras_job_error; 5360 goto ras_job_error;
5361 5361
5362 ras_reply = (struct lpfc_bsg_get_ras_config_reply *) 5362 ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
@@ -5381,25 +5381,6 @@ ras_job_error:
5381} 5381}
5382 5382
5383/** 5383/**
5384 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
5385 * @phba: Pointer to HBA context object.
5386 *
5387 * Disable FW logging into host memory on the adapter. To
5388 * be done before reading logs from the host memory.
5389 **/
5390static void
5391lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
5392{
5393 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5394
5395 ras_fwlog->ras_active = false;
5396
5397 /* Disable FW logging to host memory */
5398 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
5399 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
5400}
5401
5402/**
5403 * lpfc_bsg_set_ras_config: Set FW logging parameters 5384 * lpfc_bsg_set_ras_config: Set FW logging parameters
5404 * @job: fc_bsg_job to handle 5385 * @job: fc_bsg_job to handle
5405 * 5386 *
@@ -5416,7 +5397,7 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
5416 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 5397 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5417 struct fc_bsg_reply *bsg_reply = job->reply; 5398 struct fc_bsg_reply *bsg_reply = job->reply;
5418 uint8_t action = 0, log_level = 0; 5399 uint8_t action = 0, log_level = 0;
5419 int rc = 0; 5400 int rc = 0, action_status = 0;
5420 5401
5421 if (job->request_len < 5402 if (job->request_len <
5422 sizeof(struct fc_bsg_request) + 5403 sizeof(struct fc_bsg_request) +
@@ -5430,7 +5411,7 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
5430 5411
5431 /* Check FW log status */ 5412 /* Check FW log status */
5432 rc = lpfc_check_fwlog_support(phba); 5413 rc = lpfc_check_fwlog_support(phba);
5433 if (rc == -EACCES || rc == -EPERM) 5414 if (rc)
5434 goto ras_job_error; 5415 goto ras_job_error;
5435 5416
5436 ras_req = (struct lpfc_bsg_set_ras_config_req *) 5417 ras_req = (struct lpfc_bsg_set_ras_config_req *)
@@ -5449,16 +5430,25 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
5449 lpfc_ras_stop_fwlog(phba); 5430 lpfc_ras_stop_fwlog(phba);
5450 } else { 5431 } else {
5451 /*action = LPFC_RASACTION_START_LOGGING*/ 5432 /*action = LPFC_RASACTION_START_LOGGING*/
5452 if (ras_fwlog->ras_active == true) { 5433
5453 rc = -EINPROGRESS; 5434 /* Even though FW-logging is active re-initialize
5454 goto ras_job_error; 5435 * FW-logging with new log-level. Return status
5455 } 5436 * "Logging already Running" to caller.
5437 **/
5438 if (ras_fwlog->ras_active)
5439 action_status = -EINPROGRESS;
5456 5440
5457 /* Enable logging */ 5441 /* Enable logging */
5458 rc = lpfc_sli4_ras_fwlog_init(phba, log_level, 5442 rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
5459 LPFC_RAS_ENABLE_LOGGING); 5443 LPFC_RAS_ENABLE_LOGGING);
5460 if (rc) 5444 if (rc) {
5461 rc = -EINVAL; 5445 rc = -EINVAL;
5446 goto ras_job_error;
5447 }
5448
5449 /* Check if FW-logging is re-initialized */
5450 if (action_status == -EINPROGRESS)
5451 rc = action_status;
5462 } 5452 }
5463ras_job_error: 5453ras_job_error:
5464 /* make error code available to userspace */ 5454 /* make error code available to userspace */
@@ -5487,12 +5477,11 @@ lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
5487 struct lpfc_hba *phba = vport->phba; 5477 struct lpfc_hba *phba = vport->phba;
5488 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 5478 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5489 struct fc_bsg_reply *bsg_reply = job->reply; 5479 struct fc_bsg_reply *bsg_reply = job->reply;
5490 uint32_t lwpd_offset = 0; 5480 u32 *lwpd_ptr = NULL;
5491 uint64_t wrap_value = 0;
5492 int rc = 0; 5481 int rc = 0;
5493 5482
5494 rc = lpfc_check_fwlog_support(phba); 5483 rc = lpfc_check_fwlog_support(phba);
5495 if (rc == -EACCES || rc == -EPERM) 5484 if (rc)
5496 goto ras_job_error; 5485 goto ras_job_error;
5497 5486
5498 if (job->request_len < 5487 if (job->request_len <
@@ -5508,11 +5497,19 @@ lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
5508 ras_reply = (struct lpfc_bsg_get_ras_lwpd *) 5497 ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
5509 bsg_reply->reply_data.vendor_reply.vendor_rsp; 5498 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5510 5499
5511 lwpd_offset = *((uint32_t *)ras_fwlog->lwpd.virt) & 0xffffffff; 5500 if (!ras_fwlog->lwpd.virt) {
5512 ras_reply->offset = be32_to_cpu(lwpd_offset); 5501 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5502 "6193 Restart FW Logging\n");
5503 rc = -EINVAL;
5504 goto ras_job_error;
5505 }
5506
5507 /* Get lwpd offset */
5508 lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt);
5509 ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff);
5513 5510
5514 wrap_value = *((uint64_t *)ras_fwlog->lwpd.virt); 5511 /* Get wrap count */
5515 ras_reply->wrap_count = be32_to_cpu((wrap_value >> 32) & 0xffffffff); 5512 ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff);
5516 5513
5517ras_job_error: 5514ras_job_error:
5518 /* make error code available to userspace */ 5515 /* make error code available to userspace */
@@ -5539,9 +5536,8 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
5539 struct fc_bsg_request *bsg_request = job->request; 5536 struct fc_bsg_request *bsg_request = job->request;
5540 struct fc_bsg_reply *bsg_reply = job->reply; 5537 struct fc_bsg_reply *bsg_reply = job->reply;
5541 struct lpfc_bsg_get_fwlog_req *ras_req; 5538 struct lpfc_bsg_get_fwlog_req *ras_req;
5542 uint32_t rd_offset, rd_index, offset, pending_wlen; 5539 u32 rd_offset, rd_index, offset;
5543 uint32_t boundary = 0, align_len = 0, write_len = 0; 5540 void *src, *fwlog_buff;
5544 void *dest, *src, *fwlog_buff;
5545 struct lpfc_ras_fwlog *ras_fwlog = NULL; 5541 struct lpfc_ras_fwlog *ras_fwlog = NULL;
5546 struct lpfc_dmabuf *dmabuf, *next; 5542 struct lpfc_dmabuf *dmabuf, *next;
5547 int rc = 0; 5543 int rc = 0;
@@ -5549,7 +5545,7 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
5549 ras_fwlog = &phba->ras_fwlog; 5545 ras_fwlog = &phba->ras_fwlog;
5550 5546
5551 rc = lpfc_check_fwlog_support(phba); 5547 rc = lpfc_check_fwlog_support(phba);
5552 if (rc == -EACCES || rc == -EPERM) 5548 if (rc)
5553 goto ras_job_error; 5549 goto ras_job_error;
5554 5550
5555 /* Logging to be stopped before reading */ 5551 /* Logging to be stopped before reading */
@@ -5581,8 +5577,6 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
5581 5577
5582 rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE); 5578 rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
5583 offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE); 5579 offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
5584 pending_wlen = ras_req->read_size;
5585 dest = fwlog_buff;
5586 5580
5587 list_for_each_entry_safe(dmabuf, next, 5581 list_for_each_entry_safe(dmabuf, next,
5588 &ras_fwlog->fwlog_buff_list, list) { 5582 &ras_fwlog->fwlog_buff_list, list) {
@@ -5590,29 +5584,9 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
5590 if (dmabuf->buffer_tag < rd_index) 5584 if (dmabuf->buffer_tag < rd_index)
5591 continue; 5585 continue;
5592 5586
5593 /* Align read to buffer size */
5594 if (offset) {
5595 boundary = ((dmabuf->buffer_tag + 1) *
5596 LPFC_RAS_MAX_ENTRY_SIZE);
5597
5598 align_len = (boundary - offset);
5599 write_len = min_t(u32, align_len,
5600 LPFC_RAS_MAX_ENTRY_SIZE);
5601 } else {
5602 write_len = min_t(u32, pending_wlen,
5603 LPFC_RAS_MAX_ENTRY_SIZE);
5604 align_len = 0;
5605 boundary = 0;
5606 }
5607 src = dmabuf->virt + offset; 5587 src = dmabuf->virt + offset;
5608 memcpy(dest, src, write_len); 5588 memcpy(fwlog_buff, src, ras_req->read_size);
5609 5589 break;
5610 pending_wlen -= write_len;
5611 if (!pending_wlen)
5612 break;
5613
5614 dest += write_len;
5615 offset = (offset + write_len) % LPFC_RAS_MAX_ENTRY_SIZE;
5616 } 5590 }
5617 5591
5618 bsg_reply->reply_payload_rcv_len = 5592 bsg_reply->reply_payload_rcv_len =
@@ -5629,6 +5603,77 @@ ras_job_error:
5629 return rc; 5603 return rc;
5630} 5604}
5631 5605
5606static int
5607lpfc_get_trunk_info(struct bsg_job *job)
5608{
5609 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5610 struct lpfc_hba *phba = vport->phba;
5611 struct fc_bsg_reply *bsg_reply = job->reply;
5612 struct lpfc_trunk_info *event_reply;
5613 int rc = 0;
5614
5615 if (job->request_len <
5616 sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) {
5617 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5618 "2744 Received GET TRUNK _INFO request below "
5619 "minimum size\n");
5620 rc = -EINVAL;
5621 goto job_error;
5622 }
5623
5624 event_reply = (struct lpfc_trunk_info *)
5625 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5626
5627 if (job->reply_len <
5628 sizeof(struct fc_bsg_request) + sizeof(struct lpfc_trunk_info)) {
5629 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5630 "2728 Received GET TRUNK _INFO reply below "
5631 "minimum size\n");
5632 rc = -EINVAL;
5633 goto job_error;
5634 }
5635 if (event_reply == NULL) {
5636 rc = -EINVAL;
5637 goto job_error;
5638 }
5639
5640 bsg_bf_set(lpfc_trunk_info_link_status, event_reply,
5641 (phba->link_state >= LPFC_LINK_UP) ? 1 : 0);
5642
5643 bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply,
5644 (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0);
5645
5646 bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply,
5647 (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0);
5648
5649 bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply,
5650 (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0);
5651
5652 bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply,
5653 (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0);
5654
5655 bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply,
5656 bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba));
5657
5658 bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply,
5659 bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba));
5660
5661 bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply,
5662 bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba));
5663
5664 bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply,
5665 bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba));
5666
5667 event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000;
5668 event_reply->logical_speed =
5669 phba->sli4_hba.link_state.logical_speed / 100;
5670job_error:
5671 bsg_reply->result = rc;
5672 bsg_job_done(job, bsg_reply->result,
5673 bsg_reply->reply_payload_rcv_len);
5674 return rc;
5675
5676}
5632 5677
5633/** 5678/**
5634 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 5679 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
@@ -5689,6 +5734,9 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
5689 case LPFC_BSG_VENDOR_RAS_SET_CONFIG: 5734 case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
5690 rc = lpfc_bsg_set_ras_config(job); 5735 rc = lpfc_bsg_set_ras_config(job);
5691 break; 5736 break;
5737 case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
5738 rc = lpfc_get_trunk_info(job);
5739 break;
5692 default: 5740 default:
5693 rc = -EINVAL; 5741 rc = -EINVAL;
5694 bsg_reply->reply_payload_rcv_len = 0; 5742 bsg_reply->reply_payload_rcv_len = 0;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 820323f1139b..9151824beea4 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -42,6 +42,7 @@
42#define LPFC_BSG_VENDOR_RAS_GET_FWLOG 17 42#define LPFC_BSG_VENDOR_RAS_GET_FWLOG 17
43#define LPFC_BSG_VENDOR_RAS_GET_CONFIG 18 43#define LPFC_BSG_VENDOR_RAS_GET_CONFIG 18
44#define LPFC_BSG_VENDOR_RAS_SET_CONFIG 19 44#define LPFC_BSG_VENDOR_RAS_SET_CONFIG 19
45#define LPFC_BSG_VENDOR_GET_TRUNK_INFO 20
45 46
46struct set_ct_event { 47struct set_ct_event {
47 uint32_t command; 48 uint32_t command;
@@ -331,6 +332,43 @@ struct lpfc_bsg_get_ras_config_reply {
331 uint32_t log_buff_sz; 332 uint32_t log_buff_sz;
332}; 333};
333 334
335struct lpfc_trunk_info {
336 uint32_t word0;
337#define lpfc_trunk_info_link_status_SHIFT 0
338#define lpfc_trunk_info_link_status_MASK 1
339#define lpfc_trunk_info_link_status_WORD word0
340#define lpfc_trunk_info_trunk_active0_SHIFT 8
341#define lpfc_trunk_info_trunk_active0_MASK 1
342#define lpfc_trunk_info_trunk_active0_WORD word0
343#define lpfc_trunk_info_trunk_active1_SHIFT 9
344#define lpfc_trunk_info_trunk_active1_MASK 1
345#define lpfc_trunk_info_trunk_active1_WORD word0
346#define lpfc_trunk_info_trunk_active2_SHIFT 10
347#define lpfc_trunk_info_trunk_active2_MASK 1
348#define lpfc_trunk_info_trunk_active2_WORD word0
349#define lpfc_trunk_info_trunk_active3_SHIFT 11
350#define lpfc_trunk_info_trunk_active3_MASK 1
351#define lpfc_trunk_info_trunk_active3_WORD word0
352#define lpfc_trunk_info_trunk_config0_SHIFT 12
353#define lpfc_trunk_info_trunk_config0_MASK 1
354#define lpfc_trunk_info_trunk_config0_WORD word0
355#define lpfc_trunk_info_trunk_config1_SHIFT 13
356#define lpfc_trunk_info_trunk_config1_MASK 1
357#define lpfc_trunk_info_trunk_config1_WORD word0
358#define lpfc_trunk_info_trunk_config2_SHIFT 14
359#define lpfc_trunk_info_trunk_config2_MASK 1
360#define lpfc_trunk_info_trunk_config2_WORD word0
361#define lpfc_trunk_info_trunk_config3_SHIFT 15
362#define lpfc_trunk_info_trunk_config3_MASK 1
363#define lpfc_trunk_info_trunk_config3_WORD word0
364 uint16_t port_speed;
365 uint16_t logical_speed;
366 uint32_t reserved3;
367};
368
369struct get_trunk_info_req {
370 uint32_t command;
371};
334 372
335/* driver only */ 373/* driver only */
336#define SLI_CONFIG_NOT_HANDLED 0 374#define SLI_CONFIG_NOT_HANDLED 0
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index e01136507780..39f3fa988732 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -74,7 +74,6 @@ void lpfc_mbx_cmpl_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *);
74void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 74void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
75void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); 75void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
76void lpfc_retry_pport_discovery(struct lpfc_hba *); 76void lpfc_retry_pport_discovery(struct lpfc_hba *);
77void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
78int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt); 77int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
79void lpfc_free_iocb_list(struct lpfc_hba *phba); 78void lpfc_free_iocb_list(struct lpfc_hba *phba);
80int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, 79int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
@@ -175,6 +174,7 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *);
175void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 174void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
176 struct lpfc_iocbq *); 175 struct lpfc_iocbq *);
177int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *); 176int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
177int lpfc_issue_gidpt(struct lpfc_vport *vport);
178int lpfc_issue_gidft(struct lpfc_vport *vport); 178int lpfc_issue_gidft(struct lpfc_vport *vport);
179int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq); 179int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq);
180int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); 180int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
@@ -380,8 +380,10 @@ void lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virtp, dma_addr_t dma);
380 380
381void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *); 381void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
382void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp); 382void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp);
383int lpfc_link_reset(struct lpfc_vport *vport);
383 384
384/* Function prototypes. */ 385/* Function prototypes. */
386int lpfc_check_pci_resettable(const struct lpfc_hba *phba);
385const char* lpfc_info(struct Scsi_Host *); 387const char* lpfc_info(struct Scsi_Host *);
386int lpfc_scan_finished(struct Scsi_Host *, unsigned long); 388int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
387 389
@@ -550,6 +552,7 @@ void lpfc_sli4_ras_init(struct lpfc_hba *phba);
550void lpfc_sli4_ras_setup(struct lpfc_hba *phba); 552void lpfc_sli4_ras_setup(struct lpfc_hba *phba);
551int lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, uint32_t fwlog_level, 553int lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, uint32_t fwlog_level,
552 uint32_t fwlog_enable); 554 uint32_t fwlog_enable);
555void lpfc_ras_stop_fwlog(struct lpfc_hba *phba);
553int lpfc_check_fwlog_support(struct lpfc_hba *phba); 556int lpfc_check_fwlog_support(struct lpfc_hba *phba);
554 557
555/* NVME interfaces. */ 558/* NVME interfaces. */
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 789ad1502534..552da8bf43e4 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -540,7 +540,17 @@ lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
540 struct lpfc_hba *phba = vport->phba; 540 struct lpfc_hba *phba = vport->phba;
541 struct lpfc_nodelist *ndlp = NULL; 541 struct lpfc_nodelist *ndlp = NULL;
542 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 542 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
543 char *str;
543 544
545 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT)
546 str = "GID_FT";
547 else
548 str = "GID_PT";
549 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
550 "6430 Process %s rsp for %08x type %x %s %s\n",
551 str, Did, fc4_type,
552 (fc4_type == FC_TYPE_FCP) ? "FCP" : " ",
553 (fc4_type == FC_TYPE_NVME) ? "NVME" : " ");
544 /* 554 /*
545 * To conserve rpi's, filter out addresses for other 555 * To conserve rpi's, filter out addresses for other
546 * vports on the same physical HBAs. 556 * vports on the same physical HBAs.
@@ -832,6 +842,198 @@ out:
832} 842}
833 843
834static void 844static void
845lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
846 struct lpfc_iocbq *rspiocb)
847{
848 struct lpfc_vport *vport = cmdiocb->vport;
849 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
850 IOCB_t *irsp;
851 struct lpfc_dmabuf *outp;
852 struct lpfc_dmabuf *inp;
853 struct lpfc_sli_ct_request *CTrsp;
854 struct lpfc_sli_ct_request *CTreq;
855 struct lpfc_nodelist *ndlp;
856 int rc;
857
858 /* First save ndlp, before we overwrite it */
859 ndlp = cmdiocb->context_un.ndlp;
860
861 /* we pass cmdiocb to state machine which needs rspiocb as well */
862 cmdiocb->context_un.rsp_iocb = rspiocb;
863 inp = (struct lpfc_dmabuf *)cmdiocb->context1;
864 outp = (struct lpfc_dmabuf *)cmdiocb->context2;
865 irsp = &rspiocb->iocb;
866
867 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
868 "GID_PT cmpl: status:x%x/x%x rtry:%d",
869 irsp->ulpStatus, irsp->un.ulpWord[4],
870 vport->fc_ns_retry);
871
872 /* Don't bother processing response if vport is being torn down. */
873 if (vport->load_flag & FC_UNLOADING) {
874 if (vport->fc_flag & FC_RSCN_MODE)
875 lpfc_els_flush_rscn(vport);
876 goto out;
877 }
878
879 if (lpfc_els_chk_latt(vport)) {
880 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
881 "4108 Link event during NS query\n");
882 if (vport->fc_flag & FC_RSCN_MODE)
883 lpfc_els_flush_rscn(vport);
884 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
885 goto out;
886 }
887 if (lpfc_error_lost_link(irsp)) {
888 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
889 "4101 NS query failed due to link event\n");
890 if (vport->fc_flag & FC_RSCN_MODE)
891 lpfc_els_flush_rscn(vport);
892 goto out;
893 }
894
895 spin_lock_irq(shost->host_lock);
896 if (vport->fc_flag & FC_RSCN_DEFERRED) {
897 vport->fc_flag &= ~FC_RSCN_DEFERRED;
898 spin_unlock_irq(shost->host_lock);
899
900 /* This is a GID_PT completing so the gidft_inp counter was
901 * incremented before the GID_PT was issued to the wire.
902 */
903 vport->gidft_inp--;
904
905 /*
906 * Skip processing the NS response
907 * Re-issue the NS cmd
908 */
909 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
910 "4102 Process Deferred RSCN Data: x%x x%x\n",
911 vport->fc_flag, vport->fc_rscn_id_cnt);
912 lpfc_els_handle_rscn(vport);
913
914 goto out;
915 }
916 spin_unlock_irq(shost->host_lock);
917
918 if (irsp->ulpStatus) {
919 /* Check for retry */
920 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
921 if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
922 (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
923 IOERR_NO_RESOURCES)
924 vport->fc_ns_retry++;
925
926 /* CT command is being retried */
927 vport->gidft_inp--;
928 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_PT,
929 vport->fc_ns_retry, GID_PT_N_PORT);
930 if (rc == 0)
931 goto out;
932 }
933 if (vport->fc_flag & FC_RSCN_MODE)
934 lpfc_els_flush_rscn(vport);
935 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
936 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
937 "4103 GID_FT Query error: 0x%x 0x%x\n",
938 irsp->ulpStatus, vport->fc_ns_retry);
939 } else {
940 /* Good status, continue checking */
941 CTreq = (struct lpfc_sli_ct_request *)inp->virt;
942 CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
943 if (CTrsp->CommandResponse.bits.CmdRsp ==
944 cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
945 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
946 "4105 NameServer Rsp Data: x%x x%x\n",
947 vport->fc_flag,
948 CTreq->un.gid.Fc4Type);
949
950 lpfc_ns_rsp(vport,
951 outp,
952 CTreq->un.gid.Fc4Type,
953 (uint32_t)(irsp->un.genreq64.bdl.bdeSize));
954 } else if (CTrsp->CommandResponse.bits.CmdRsp ==
955 be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
956 /* NameServer Rsp Error */
957 if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ)
958 && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) {
959 lpfc_printf_vlog(
960 vport, KERN_INFO, LOG_DISCOVERY,
961 "4106 No NameServer Entries "
962 "Data: x%x x%x x%x x%x\n",
963 CTrsp->CommandResponse.bits.CmdRsp,
964 (uint32_t)CTrsp->ReasonCode,
965 (uint32_t)CTrsp->Explanation,
966 vport->fc_flag);
967
968 lpfc_debugfs_disc_trc(
969 vport, LPFC_DISC_TRC_CT,
970 "GID_PT no entry cmd:x%x rsn:x%x exp:x%x",
971 (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
972 (uint32_t)CTrsp->ReasonCode,
973 (uint32_t)CTrsp->Explanation);
974 } else {
975 lpfc_printf_vlog(
976 vport, KERN_INFO, LOG_DISCOVERY,
977 "4107 NameServer Rsp Error "
978 "Data: x%x x%x x%x x%x\n",
979 CTrsp->CommandResponse.bits.CmdRsp,
980 (uint32_t)CTrsp->ReasonCode,
981 (uint32_t)CTrsp->Explanation,
982 vport->fc_flag);
983
984 lpfc_debugfs_disc_trc(
985 vport, LPFC_DISC_TRC_CT,
986 "GID_PT rsp err1 cmd:x%x rsn:x%x exp:x%x",
987 (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
988 (uint32_t)CTrsp->ReasonCode,
989 (uint32_t)CTrsp->Explanation);
990 }
991 } else {
992 /* NameServer Rsp Error */
993 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
994 "4109 NameServer Rsp Error "
995 "Data: x%x x%x x%x x%x\n",
996 CTrsp->CommandResponse.bits.CmdRsp,
997 (uint32_t)CTrsp->ReasonCode,
998 (uint32_t)CTrsp->Explanation,
999 vport->fc_flag);
1000
1001 lpfc_debugfs_disc_trc(
1002 vport, LPFC_DISC_TRC_CT,
1003 "GID_PT rsp err2 cmd:x%x rsn:x%x exp:x%x",
1004 (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
1005 (uint32_t)CTrsp->ReasonCode,
1006 (uint32_t)CTrsp->Explanation);
1007 }
1008 vport->gidft_inp--;
1009 }
1010 /* Link up / RSCN discovery */
1011 if ((vport->num_disc_nodes == 0) &&
1012 (vport->gidft_inp == 0)) {
1013 /*
1014 * The driver has cycled through all Nports in the RSCN payload.
1015 * Complete the handling by cleaning up and marking the
1016 * current driver state.
1017 */
1018 if (vport->port_state >= LPFC_DISC_AUTH) {
1019 if (vport->fc_flag & FC_RSCN_MODE) {
1020 lpfc_els_flush_rscn(vport);
1021 spin_lock_irq(shost->host_lock);
1022 vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
1023 spin_unlock_irq(shost->host_lock);
1024 } else {
1025 lpfc_els_flush_rscn(vport);
1026 }
1027 }
1028
1029 lpfc_disc_start(vport);
1030 }
1031out:
1032 cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
1033 lpfc_ct_free_iocb(phba, cmdiocb);
1034}
1035
1036static void
835lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1037lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
836 struct lpfc_iocbq *rspiocb) 1038 struct lpfc_iocbq *rspiocb)
837{ 1039{
@@ -857,6 +1059,13 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
857 CTrsp = (struct lpfc_sli_ct_request *) outp->virt; 1059 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
858 fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET]; 1060 fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
859 1061
1062 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1063 "6431 Process GFF_ID rsp for %08x "
1064 "fbits %02x %s %s\n",
1065 did, fbits,
1066 (fbits & FC4_FEATURE_INIT) ? "Initiator" : " ",
1067 (fbits & FC4_FEATURE_TARGET) ? "Target" : " ");
1068
860 if (CTrsp->CommandResponse.bits.CmdRsp == 1069 if (CTrsp->CommandResponse.bits.CmdRsp ==
861 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { 1070 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
862 if ((fbits & FC4_FEATURE_INIT) && 1071 if ((fbits & FC4_FEATURE_INIT) &&
@@ -979,9 +1188,15 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
979 CTrsp = (struct lpfc_sli_ct_request *)outp->virt; 1188 CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
980 fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]); 1189 fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]);
981 fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]); 1190 fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]);
1191
982 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1192 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
983 "3062 DID x%06x GFT Wd0 x%08x Wd1 x%08x\n", 1193 "6432 Process GFT_ID rsp for %08x "
984 did, fc4_data_0, fc4_data_1); 1194 "Data %08x %08x %s %s\n",
1195 did, fc4_data_0, fc4_data_1,
1196 (fc4_data_0 & LPFC_FC4_TYPE_BITMASK) ?
1197 "FCP" : " ",
1198 (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ?
1199 "NVME" : " ");
985 1200
986 ndlp = lpfc_findnode_did(vport, did); 1201 ndlp = lpfc_findnode_did(vport, did);
987 if (ndlp) { 1202 if (ndlp) {
@@ -1312,6 +1527,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1312 struct ulp_bde64 *bpl; 1527 struct ulp_bde64 *bpl;
1313 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 1528 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
1314 struct lpfc_iocbq *) = NULL; 1529 struct lpfc_iocbq *) = NULL;
1530 uint32_t *ptr;
1315 uint32_t rsp_size = 1024; 1531 uint32_t rsp_size = 1024;
1316 size_t size; 1532 size_t size;
1317 int rc = 0; 1533 int rc = 0;
@@ -1365,6 +1581,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1365 bpl->tus.f.bdeFlags = 0; 1581 bpl->tus.f.bdeFlags = 0;
1366 if (cmdcode == SLI_CTNS_GID_FT) 1582 if (cmdcode == SLI_CTNS_GID_FT)
1367 bpl->tus.f.bdeSize = GID_REQUEST_SZ; 1583 bpl->tus.f.bdeSize = GID_REQUEST_SZ;
1584 else if (cmdcode == SLI_CTNS_GID_PT)
1585 bpl->tus.f.bdeSize = GID_REQUEST_SZ;
1368 else if (cmdcode == SLI_CTNS_GFF_ID) 1586 else if (cmdcode == SLI_CTNS_GFF_ID)
1369 bpl->tus.f.bdeSize = GFF_REQUEST_SZ; 1587 bpl->tus.f.bdeSize = GFF_REQUEST_SZ;
1370 else if (cmdcode == SLI_CTNS_GFT_ID) 1588 else if (cmdcode == SLI_CTNS_GFT_ID)
@@ -1405,6 +1623,18 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1405 rsp_size = FC_MAX_NS_RSP; 1623 rsp_size = FC_MAX_NS_RSP;
1406 break; 1624 break;
1407 1625
1626 case SLI_CTNS_GID_PT:
1627 CtReq->CommandResponse.bits.CmdRsp =
1628 cpu_to_be16(SLI_CTNS_GID_PT);
1629 CtReq->un.gid.PortType = context;
1630
1631 if (vport->port_state < LPFC_NS_QRY)
1632 vport->port_state = LPFC_NS_QRY;
1633 lpfc_set_disctmo(vport);
1634 cmpl = lpfc_cmpl_ct_cmd_gid_pt;
1635 rsp_size = FC_MAX_NS_RSP;
1636 break;
1637
1408 case SLI_CTNS_GFF_ID: 1638 case SLI_CTNS_GFF_ID:
1409 CtReq->CommandResponse.bits.CmdRsp = 1639 CtReq->CommandResponse.bits.CmdRsp =
1410 cpu_to_be16(SLI_CTNS_GFF_ID); 1640 cpu_to_be16(SLI_CTNS_GFF_ID);
@@ -1436,8 +1666,18 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1436 */ 1666 */
1437 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 1667 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
1438 (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) 1668 (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
1439 CtReq->un.rft.rsvd[0] = cpu_to_be32(0x00000100); 1669 CtReq->un.rft.rsvd[0] =
1670 cpu_to_be32(LPFC_FC4_TYPE_BITMASK);
1440 1671
1672 ptr = (uint32_t *)CtReq;
1673 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1674 "6433 Issue RFT (%s %s): %08x %08x %08x %08x "
1675 "%08x %08x %08x %08x\n",
1676 CtReq->un.rft.fcpReg ? "FCP" : " ",
1677 CtReq->un.rft.rsvd[0] ? "NVME" : " ",
1678 *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
1679 *(ptr + 4), *(ptr + 5),
1680 *(ptr + 6), *(ptr + 7));
1441 cmpl = lpfc_cmpl_ct_cmd_rft_id; 1681 cmpl = lpfc_cmpl_ct_cmd_rft_id;
1442 break; 1682 break;
1443 1683
@@ -1512,6 +1752,14 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1512 else 1752 else
1513 goto ns_cmd_free_bmpvirt; 1753 goto ns_cmd_free_bmpvirt;
1514 1754
1755 ptr = (uint32_t *)CtReq;
1756 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1757 "6434 Issue RFF (%s): %08x %08x %08x %08x "
1758 "%08x %08x %08x %08x\n",
1759 (context == FC_TYPE_NVME) ? "NVME" : "FCP",
1760 *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
1761 *(ptr + 4), *(ptr + 5),
1762 *(ptr + 6), *(ptr + 7));
1515 cmpl = lpfc_cmpl_ct_cmd_rff_id; 1763 cmpl = lpfc_cmpl_ct_cmd_rff_id;
1516 break; 1764 break;
1517 } 1765 }
@@ -1758,7 +2006,7 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
1758 memset(ae, 0, 256); 2006 memset(ae, 0, 256);
1759 2007
1760 strncpy(ae->un.AttrString, 2008 strncpy(ae->un.AttrString,
1761 "Emulex Corporation", 2009 "Broadcom Inc.",
1762 sizeof(ae->un.AttrString)); 2010 sizeof(ae->un.AttrString));
1763 len = strnlen(ae->un.AttrString, 2011 len = strnlen(ae->un.AttrString,
1764 sizeof(ae->un.AttrString)); 2012 sizeof(ae->un.AttrString));
@@ -2134,6 +2382,8 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
2134 2382
2135 ae->un.AttrInt = 0; 2383 ae->un.AttrInt = 0;
2136 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 2384 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
2385 if (phba->lmt & LMT_128Gb)
2386 ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
2137 if (phba->lmt & LMT_64Gb) 2387 if (phba->lmt & LMT_64Gb)
2138 ae->un.AttrInt |= HBA_PORTSPEED_64GFC; 2388 ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
2139 if (phba->lmt & LMT_32Gb) 2389 if (phba->lmt & LMT_32Gb)
@@ -2210,6 +2460,9 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
2210 case LPFC_LINK_SPEED_64GHZ: 2460 case LPFC_LINK_SPEED_64GHZ:
2211 ae->un.AttrInt = HBA_PORTSPEED_64GFC; 2461 ae->un.AttrInt = HBA_PORTSPEED_64GFC;
2212 break; 2462 break;
2463 case LPFC_LINK_SPEED_128GHZ:
2464 ae->un.AttrInt = HBA_PORTSPEED_128GFC;
2465 break;
2213 default: 2466 default:
2214 ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN; 2467 ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
2215 break; 2468 break;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 34d311a7dbef..a58f0b3f03a9 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -645,6 +645,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
645 i, ndlp->cmd_qdepth); 645 i, ndlp->cmd_qdepth);
646 outio += i; 646 outio += i;
647 } 647 }
648 len += snprintf(buf + len, size - len, "defer:%x ",
649 ndlp->nlp_defer_did);
648 len += snprintf(buf+len, size-len, "\n"); 650 len += snprintf(buf+len, size-len, "\n");
649 } 651 }
650 spin_unlock_irq(shost->host_lock); 652 spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 28e2b60fc5c0..1c89c9f314fa 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -138,6 +138,7 @@ struct lpfc_nodelist {
138 138
139 uint32_t nvme_fb_size; /* NVME target's supported byte cnt */ 139 uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
140#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */ 140#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
141 uint32_t nlp_defer_did;
141}; 142};
142struct lpfc_node_rrq { 143struct lpfc_node_rrq {
143 struct list_head list; 144 struct list_head list;
@@ -165,6 +166,7 @@ struct lpfc_node_rrq {
165#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ 166#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */
166#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */ 167#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */
167#define NLP_FCP_PRLI_RJT 0x00002000 /* Rport does not support FCP PRLI. */ 168#define NLP_FCP_PRLI_RJT 0x00002000 /* Rport does not support FCP PRLI. */
169#define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */
168#define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */ 170#define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */
169#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ 171#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */
170#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */ 172#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */
@@ -293,4 +295,4 @@ struct lpfc_node_rrq {
293#define NLP_EVT_DEVICE_RM 0xb /* Device not found in NS / ALPAmap */ 295#define NLP_EVT_DEVICE_RM 0xb /* Device not found in NS / ALPAmap */
294#define NLP_EVT_DEVICE_RECOVERY 0xc /* Device existence unknown */ 296#define NLP_EVT_DEVICE_RECOVERY 0xc /* Device existence unknown */
295#define NLP_EVT_MAX_EVENT 0xd 297#define NLP_EVT_MAX_EVENT 0xd
296 298#define NLP_EVT_NOTHING_PENDING 0xff
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f1c1faa74b46..b3a4789468c3 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -242,6 +242,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
242 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 242 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
243 if (elscmd == ELS_CMD_FLOGI) 243 if (elscmd == ELS_CMD_FLOGI)
244 icmd->ulpTimeout = FF_DEF_RATOV * 2; 244 icmd->ulpTimeout = FF_DEF_RATOV * 2;
245 else if (elscmd == ELS_CMD_LOGO)
246 icmd->ulpTimeout = phba->fc_ratov;
245 else 247 else
246 icmd->ulpTimeout = phba->fc_ratov * 2; 248 icmd->ulpTimeout = phba->fc_ratov * 2;
247 } else { 249 } else {
@@ -313,20 +315,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
313 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 315 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
314 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 316 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
315 "0116 Xmit ELS command x%x to remote " 317 "0116 Xmit ELS command x%x to remote "
316 "NPORT x%x I/O tag: x%x, port state:x%x" 318 "NPORT x%x I/O tag: x%x, port state:x%x "
317 " fc_flag:x%x\n", 319 "rpi x%x fc_flag:x%x\n",
318 elscmd, did, elsiocb->iotag, 320 elscmd, did, elsiocb->iotag,
319 vport->port_state, 321 vport->port_state, ndlp->nlp_rpi,
320 vport->fc_flag); 322 vport->fc_flag);
321 } else { 323 } else {
322 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 324 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
323 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 325 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
324 "0117 Xmit ELS response x%x to remote " 326 "0117 Xmit ELS response x%x to remote "
325 "NPORT x%x I/O tag: x%x, size: x%x " 327 "NPORT x%x I/O tag: x%x, size: x%x "
326 "port_state x%x fc_flag x%x\n", 328 "port_state x%x rpi x%x fc_flag x%x\n",
327 elscmd, ndlp->nlp_DID, elsiocb->iotag, 329 elscmd, ndlp->nlp_DID, elsiocb->iotag,
328 cmdSize, vport->port_state, 330 cmdSize, vport->port_state,
329 vport->fc_flag); 331 ndlp->nlp_rpi, vport->fc_flag);
330 } 332 }
331 return elsiocb; 333 return elsiocb;
332 334
@@ -413,7 +415,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
413 /* increment the reference count on ndlp to hold reference 415 /* increment the reference count on ndlp to hold reference
414 * for the callback routine. 416 * for the callback routine.
415 */ 417 */
416 mbox->context2 = lpfc_nlp_get(ndlp); 418 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
417 419
418 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 420 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
419 if (rc == MBX_NOT_FINISHED) { 421 if (rc == MBX_NOT_FINISHED) {
@@ -428,7 +430,7 @@ fail_issue_reg_login:
428 * for the failed mbox command. 430 * for the failed mbox command.
429 */ 431 */
430 lpfc_nlp_put(ndlp); 432 lpfc_nlp_put(ndlp);
431 mp = (struct lpfc_dmabuf *) mbox->context1; 433 mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
432 lpfc_mbuf_free(phba, mp->virt, mp->phys); 434 lpfc_mbuf_free(phba, mp->virt, mp->phys);
433 kfree(mp); 435 kfree(mp);
434fail_free_mbox: 436fail_free_mbox:
@@ -502,7 +504,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
502 504
503 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 505 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
504 mboxq->vport = vport; 506 mboxq->vport = vport;
505 mboxq->context1 = dmabuf; 507 mboxq->ctx_buf = dmabuf;
506 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 508 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
507 if (rc == MBX_NOT_FINISHED) { 509 if (rc == MBX_NOT_FINISHED) {
508 rc = -ENXIO; 510 rc = -ENXIO;
@@ -1055,9 +1057,9 @@ stop_rr_fcf_flogi:
1055 goto flogifail; 1057 goto flogifail;
1056 1058
1057 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 1059 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
1058 "0150 FLOGI failure Status:x%x/x%x TMO:x%x\n", 1060 "0150 FLOGI failure Status:x%x/x%x xri x%x TMO:x%x\n",
1059 irsp->ulpStatus, irsp->un.ulpWord[4], 1061 irsp->ulpStatus, irsp->un.ulpWord[4],
1060 irsp->ulpTimeout); 1062 cmdiocb->sli4_xritag, irsp->ulpTimeout);
1061 1063
1062 /* FLOGI failed, so there is no fabric */ 1064 /* FLOGI failed, so there is no fabric */
1063 spin_lock_irq(shost->host_lock); 1065 spin_lock_irq(shost->host_lock);
@@ -1111,7 +1113,8 @@ stop_rr_fcf_flogi:
1111 /* FLOGI completes successfully */ 1113 /* FLOGI completes successfully */
1112 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1114 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1113 "0101 FLOGI completes successfully, I/O tag:x%x, " 1115 "0101 FLOGI completes successfully, I/O tag:x%x, "
1114 "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag, 1116 "xri x%x Data: x%x x%x x%x x%x x%x %x\n",
1117 cmdiocb->iotag, cmdiocb->sli4_xritag,
1115 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1118 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
1116 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1119 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
1117 vport->port_state, vport->fc_flag); 1120 vport->port_state, vport->fc_flag);
@@ -1155,6 +1158,7 @@ stop_rr_fcf_flogi:
1155 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1158 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1156 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1159 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1157 spin_unlock_irq(&phba->hbalock); 1160 spin_unlock_irq(&phba->hbalock);
1161 phba->fcf.fcf_redisc_attempted = 0; /* reset */
1158 goto out; 1162 goto out;
1159 } 1163 }
1160 if (!rc) { 1164 if (!rc) {
@@ -1169,6 +1173,7 @@ stop_rr_fcf_flogi:
1169 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1173 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1170 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1174 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1171 spin_unlock_irq(&phba->hbalock); 1175 spin_unlock_irq(&phba->hbalock);
1176 phba->fcf.fcf_redisc_attempted = 0; /* reset */
1172 goto out; 1177 goto out;
1173 } 1178 }
1174 } 1179 }
@@ -1229,9 +1234,10 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1229 struct serv_parm *sp; 1234 struct serv_parm *sp;
1230 IOCB_t *icmd; 1235 IOCB_t *icmd;
1231 struct lpfc_iocbq *elsiocb; 1236 struct lpfc_iocbq *elsiocb;
1237 struct lpfc_iocbq defer_flogi_acc;
1232 uint8_t *pcmd; 1238 uint8_t *pcmd;
1233 uint16_t cmdsize; 1239 uint16_t cmdsize;
1234 uint32_t tmo; 1240 uint32_t tmo, did;
1235 int rc; 1241 int rc;
1236 1242
1237 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1243 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
@@ -1303,6 +1309,35 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1303 phba->sli3_options, 0, 0); 1309 phba->sli3_options, 0, 0);
1304 1310
1305 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1311 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
1312
1313 phba->hba_flag |= HBA_FLOGI_ISSUED;
1314
1315 /* Check for a deferred FLOGI ACC condition */
1316 if (phba->defer_flogi_acc_flag) {
1317 did = vport->fc_myDID;
1318 vport->fc_myDID = Fabric_DID;
1319
1320 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
1321
1322 defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id;
1323 defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id =
1324 phba->defer_flogi_acc_ox_id;
1325
1326 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1327 "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
1328 " ox_id: x%x, hba_flag x%x\n",
1329 phba->defer_flogi_acc_rx_id,
1330 phba->defer_flogi_acc_ox_id, phba->hba_flag);
1331
1332 /* Send deferred FLOGI ACC */
1333 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
1334 ndlp, NULL);
1335
1336 phba->defer_flogi_acc_flag = false;
1337
1338 vport->fc_myDID = did;
1339 }
1340
1306 if (rc == IOCB_ERROR) { 1341 if (rc == IOCB_ERROR) {
1307 lpfc_els_free_iocb(phba, elsiocb); 1342 lpfc_els_free_iocb(phba, elsiocb);
1308 return 1; 1343 return 1;
@@ -1338,6 +1373,8 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
1338 Fabric_DID); 1373 Fabric_DID);
1339 1374
1340 pring = lpfc_phba_elsring(phba); 1375 pring = lpfc_phba_elsring(phba);
1376 if (unlikely(!pring))
1377 return -EIO;
1341 1378
1342 /* 1379 /*
1343 * Check the txcmplq for an iocb that matches the nport the driver is 1380 * Check the txcmplq for an iocb that matches the nport the driver is
@@ -1531,7 +1568,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1531 struct serv_parm *sp; 1568 struct serv_parm *sp;
1532 uint8_t name[sizeof(struct lpfc_name)]; 1569 uint8_t name[sizeof(struct lpfc_name)];
1533 uint32_t rc, keepDID = 0, keep_nlp_flag = 0; 1570 uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
1571 uint32_t keep_new_nlp_flag = 0;
1534 uint16_t keep_nlp_state; 1572 uint16_t keep_nlp_state;
1573 u32 keep_nlp_fc4_type = 0;
1535 struct lpfc_nvme_rport *keep_nrport = NULL; 1574 struct lpfc_nvme_rport *keep_nrport = NULL;
1536 int put_node; 1575 int put_node;
1537 int put_rport; 1576 int put_rport;
@@ -1551,8 +1590,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1551 */ 1590 */
1552 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1591 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
1553 1592
1593 /* return immediately if the WWPN matches ndlp */
1554 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) 1594 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
1555 return ndlp; 1595 return ndlp;
1596
1556 if (phba->sli_rev == LPFC_SLI_REV4) { 1597 if (phba->sli_rev == LPFC_SLI_REV4) {
1557 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1598 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
1558 GFP_KERNEL); 1599 GFP_KERNEL);
@@ -1561,9 +1602,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1561 phba->cfg_rrq_xri_bitmap_sz); 1602 phba->cfg_rrq_xri_bitmap_sz);
1562 } 1603 }
1563 1604
1564 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1605 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
1565 "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n", 1606 "3178 PLOGI confirm: ndlp x%x x%x x%x: "
1566 ndlp, ndlp->nlp_DID, new_ndlp); 1607 "new_ndlp x%x x%x x%x\n",
1608 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type,
1609 (new_ndlp ? new_ndlp->nlp_DID : 0),
1610 (new_ndlp ? new_ndlp->nlp_flag : 0),
1611 (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
1567 1612
1568 if (!new_ndlp) { 1613 if (!new_ndlp) {
1569 rc = memcmp(&ndlp->nlp_portname, name, 1614 rc = memcmp(&ndlp->nlp_portname, name,
@@ -1612,6 +1657,16 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1612 phba->cfg_rrq_xri_bitmap_sz); 1657 phba->cfg_rrq_xri_bitmap_sz);
1613 } 1658 }
1614 1659
1660 /* At this point in this routine, we know new_ndlp will be
1661 * returned. however, any previous GID_FTs that were done
1662 * would have updated nlp_fc4_type in ndlp, so we must ensure
1663 * new_ndlp has the right value.
1664 */
1665 if (vport->fc_flag & FC_FABRIC) {
1666 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type;
1667 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
1668 }
1669
1615 lpfc_unreg_rpi(vport, new_ndlp); 1670 lpfc_unreg_rpi(vport, new_ndlp);
1616 new_ndlp->nlp_DID = ndlp->nlp_DID; 1671 new_ndlp->nlp_DID = ndlp->nlp_DID;
1617 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1672 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
@@ -1621,9 +1676,36 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1621 phba->cfg_rrq_xri_bitmap_sz); 1676 phba->cfg_rrq_xri_bitmap_sz);
1622 1677
1623 spin_lock_irq(shost->host_lock); 1678 spin_lock_irq(shost->host_lock);
1624 keep_nlp_flag = new_ndlp->nlp_flag; 1679 keep_new_nlp_flag = new_ndlp->nlp_flag;
1680 keep_nlp_flag = ndlp->nlp_flag;
1625 new_ndlp->nlp_flag = ndlp->nlp_flag; 1681 new_ndlp->nlp_flag = ndlp->nlp_flag;
1626 ndlp->nlp_flag = keep_nlp_flag; 1682
1683 /* if new_ndlp had NLP_UNREG_INP set, keep it */
1684 if (keep_new_nlp_flag & NLP_UNREG_INP)
1685 new_ndlp->nlp_flag |= NLP_UNREG_INP;
1686 else
1687 new_ndlp->nlp_flag &= ~NLP_UNREG_INP;
1688
1689 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */
1690 if (keep_new_nlp_flag & NLP_RPI_REGISTERED)
1691 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1692 else
1693 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
1694
1695 ndlp->nlp_flag = keep_new_nlp_flag;
1696
1697 /* if ndlp had NLP_UNREG_INP set, keep it */
1698 if (keep_nlp_flag & NLP_UNREG_INP)
1699 ndlp->nlp_flag |= NLP_UNREG_INP;
1700 else
1701 ndlp->nlp_flag &= ~NLP_UNREG_INP;
1702
1703 /* if ndlp had NLP_RPI_REGISTERED set, keep it */
1704 if (keep_nlp_flag & NLP_RPI_REGISTERED)
1705 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1706 else
1707 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
1708
1627 spin_unlock_irq(shost->host_lock); 1709 spin_unlock_irq(shost->host_lock);
1628 1710
1629 /* Set nlp_states accordingly */ 1711 /* Set nlp_states accordingly */
@@ -1661,7 +1743,6 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1661 if (ndlp->nrport) { 1743 if (ndlp->nrport) {
1662 ndlp->nrport = NULL; 1744 ndlp->nrport = NULL;
1663 lpfc_nlp_put(ndlp); 1745 lpfc_nlp_put(ndlp);
1664 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
1665 } 1746 }
1666 1747
1667 /* We shall actually free the ndlp with both nlp_DID and 1748 /* We shall actually free the ndlp with both nlp_DID and
@@ -1674,7 +1755,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1674 spin_unlock_irq(&phba->ndlp_lock); 1755 spin_unlock_irq(&phba->ndlp_lock);
1675 } 1756 }
1676 1757
1677 /* Two ndlps cannot have the same did on the nodelist */ 1758 /* Two ndlps cannot have the same did on the nodelist.
1759 * Note: for this case, ndlp has a NULL WWPN so setting
1760 * the nlp_fc4_type isn't required.
1761 */
1678 ndlp->nlp_DID = keepDID; 1762 ndlp->nlp_DID = keepDID;
1679 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1763 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1680 if (phba->sli_rev == LPFC_SLI_REV4 && 1764 if (phba->sli_rev == LPFC_SLI_REV4 &&
@@ -1693,8 +1777,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1693 1777
1694 lpfc_unreg_rpi(vport, ndlp); 1778 lpfc_unreg_rpi(vport, ndlp);
1695 1779
1696 /* Two ndlps cannot have the same did */ 1780 /* Two ndlps cannot have the same did and the fc4
1781 * type must be transferred because the ndlp is in
1782 * flight.
1783 */
1697 ndlp->nlp_DID = keepDID; 1784 ndlp->nlp_DID = keepDID;
1785 ndlp->nlp_fc4_type = keep_nlp_fc4_type;
1786
1698 if (phba->sli_rev == LPFC_SLI_REV4 && 1787 if (phba->sli_rev == LPFC_SLI_REV4 &&
1699 active_rrqs_xri_bitmap) 1788 active_rrqs_xri_bitmap)
1700 memcpy(ndlp->active_rrqs_xri_bitmap, 1789 memcpy(ndlp->active_rrqs_xri_bitmap,
@@ -1735,6 +1824,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1735 active_rrqs_xri_bitmap) 1824 active_rrqs_xri_bitmap)
1736 mempool_free(active_rrqs_xri_bitmap, 1825 mempool_free(active_rrqs_xri_bitmap,
1737 phba->active_rrq_pool); 1826 phba->active_rrq_pool);
1827
1828 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
1829 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n",
1830 new_ndlp->nlp_DID, new_ndlp->nlp_flag,
1831 new_ndlp->nlp_fc4_type);
1832
1738 return new_ndlp; 1833 return new_ndlp;
1739} 1834}
1740 1835
@@ -1895,7 +1990,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1895 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1990 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1896 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1991 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1897 spin_unlock_irq(shost->host_lock); 1992 spin_unlock_irq(shost->host_lock);
1898 rc = 0; 1993 rc = 0;
1899 1994
1900 /* PLOGI completes to NPort <nlp_DID> */ 1995 /* PLOGI completes to NPort <nlp_DID> */
1901 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1996 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -2002,8 +2097,29 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
2002 int ret; 2097 int ret;
2003 2098
2004 ndlp = lpfc_findnode_did(vport, did); 2099 ndlp = lpfc_findnode_did(vport, did);
2005 if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) 2100
2006 ndlp = NULL; 2101 if (ndlp) {
2102 /* Defer the processing of the issue PLOGI until after the
2103 * outstanding UNREG_RPI mbox command completes, unless we
2104 * are going offline. This logic does not apply for Fabric DIDs
2105 */
2106 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2107 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
2108 !(vport->fc_flag & FC_OFFLINE_MODE)) {
2109 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2110 "4110 Issue PLOGI x%x deferred "
2111 "on NPort x%x rpi x%x Data: %p\n",
2112 ndlp->nlp_defer_did, ndlp->nlp_DID,
2113 ndlp->nlp_rpi, ndlp);
2114
2115 /* We can only defer 1st PLOGI */
2116 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
2117 ndlp->nlp_defer_did = did;
2118 return 0;
2119 }
2120 if (!NLP_CHK_NODE_ACT(ndlp))
2121 ndlp = NULL;
2122 }
2007 2123
2008 /* If ndlp is not NULL, we will bump the reference count on it */ 2124 /* If ndlp is not NULL, we will bump the reference count on it */
2009 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2125 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
@@ -2137,7 +2253,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2137 else 2253 else
2138 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2254 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2139 NLP_EVT_CMPL_PRLI); 2255 NLP_EVT_CMPL_PRLI);
2140 } else 2256 } else {
2141 /* Good status, call state machine. However, if another 2257 /* Good status, call state machine. However, if another
2142 * PRLI is outstanding, don't call the state machine 2258 * PRLI is outstanding, don't call the state machine
2143 * because final disposition to Mapped or Unmapped is 2259 * because final disposition to Mapped or Unmapped is
@@ -2145,6 +2261,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2145 */ 2261 */
2146 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2262 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2147 NLP_EVT_CMPL_PRLI); 2263 NLP_EVT_CMPL_PRLI);
2264 }
2148 2265
2149out: 2266out:
2150 lpfc_els_free_iocb(phba, cmdiocb); 2267 lpfc_els_free_iocb(phba, cmdiocb);
@@ -2203,7 +2320,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2203 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2320 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
2204 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2321 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
2205 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2322 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
2206 ndlp->nlp_flag &= ~NLP_FIRSTBURST; 2323 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC);
2207 ndlp->nvme_fb_size = 0; 2324 ndlp->nvme_fb_size = 0;
2208 2325
2209 send_next_prli: 2326 send_next_prli:
@@ -2682,16 +2799,15 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2682 goto out; 2799 goto out;
2683 } 2800 }
2684 2801
2802 /* The LOGO will not be retried on failure. A LOGO was
2803 * issued to the remote rport and a ACC or RJT or no Answer are
2804 * all acceptable. Note the failure and move forward with
2805 * discovery. The PLOGI will retry.
2806 */
2685 if (irsp->ulpStatus) { 2807 if (irsp->ulpStatus) {
2686 /* Check for retry */
2687 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2688 /* ELS command is being retried */
2689 skip_recovery = 1;
2690 goto out;
2691 }
2692 /* LOGO failed */ 2808 /* LOGO failed */
2693 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2809 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2694 "2756 LOGO failure DID:%06X Status:x%x/x%x\n", 2810 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
2695 ndlp->nlp_DID, irsp->ulpStatus, 2811 ndlp->nlp_DID, irsp->ulpStatus,
2696 irsp->un.ulpWord[4]); 2812 irsp->un.ulpWord[4]);
2697 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2813 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
@@ -2737,7 +2853,8 @@ out:
2737 * For any other port type, the rpi is unregistered as an implicit 2853 * For any other port type, the rpi is unregistered as an implicit
2738 * LOGO. 2854 * LOGO.
2739 */ 2855 */
2740 if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) { 2856 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
2857 skip_recovery == 0) {
2741 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2858 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2742 spin_lock_irqsave(shost->host_lock, flags); 2859 spin_lock_irqsave(shost->host_lock, flags);
2743 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2860 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -2770,6 +2887,8 @@ out:
2770 * will be stored into the context1 field of the IOCB for the completion 2887 * will be stored into the context1 field of the IOCB for the completion
2771 * callback function to the LOGO ELS command. 2888 * callback function to the LOGO ELS command.
2772 * 2889 *
2890 * Callers of this routine are expected to unregister the RPI first
2891 *
2773 * Return code 2892 * Return code
2774 * 0 - successfully issued logo 2893 * 0 - successfully issued logo
2775 * 1 - failed to issue logo 2894 * 1 - failed to issue logo
@@ -2811,22 +2930,6 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2811 "Issue LOGO: did:x%x", 2930 "Issue LOGO: did:x%x",
2812 ndlp->nlp_DID, 0, 0); 2931 ndlp->nlp_DID, 0, 0);
2813 2932
2814 /*
2815 * If we are issuing a LOGO, we may try to recover the remote NPort
2816 * by issuing a PLOGI later. Even though we issue ELS cmds by the
2817 * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
2818 * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
2819 * for that ELS cmd. To avoid this situation, lets get rid of the
2820 * RPI right now, before any ELS cmds are sent.
2821 */
2822 spin_lock_irq(shost->host_lock);
2823 ndlp->nlp_flag |= NLP_ISSUE_LOGO;
2824 spin_unlock_irq(shost->host_lock);
2825 if (lpfc_unreg_rpi(vport, ndlp)) {
2826 lpfc_els_free_iocb(phba, elsiocb);
2827 return 0;
2828 }
2829
2830 phba->fc_stat.elsXmitLOGO++; 2933 phba->fc_stat.elsXmitLOGO++;
2831 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 2934 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2832 spin_lock_irq(shost->host_lock); 2935 spin_lock_irq(shost->host_lock);
@@ -2834,7 +2937,6 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2834 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 2937 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
2835 spin_unlock_irq(shost->host_lock); 2938 spin_unlock_irq(shost->host_lock);
2836 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2939 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2837
2838 if (rc == IOCB_ERROR) { 2940 if (rc == IOCB_ERROR) {
2839 spin_lock_irq(shost->host_lock); 2941 spin_lock_irq(shost->host_lock);
2840 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2942 ndlp->nlp_flag &= ~NLP_LOGO_SND;
@@ -2842,6 +2944,11 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2842 lpfc_els_free_iocb(phba, elsiocb); 2944 lpfc_els_free_iocb(phba, elsiocb);
2843 return 1; 2945 return 1;
2844 } 2946 }
2947
2948 spin_lock_irq(shost->host_lock);
2949 ndlp->nlp_prev_state = ndlp->nlp_state;
2950 spin_unlock_irq(shost->host_lock);
2951 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
2845 return 0; 2952 return 0;
2846} 2953}
2847 2954
@@ -3250,6 +3357,62 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
3250} 3357}
3251 3358
3252/** 3359/**
3360 * lpfc_link_reset - Issue link reset
3361 * @vport: pointer to a virtual N_Port data structure.
3362 *
3363 * This routine performs link reset by sending INIT_LINK mailbox command.
3364 * For SLI-3 adapter, link attention interrupt is enabled before issuing
3365 * INIT_LINK mailbox command.
3366 *
3367 * Return code
3368 * 0 - Link reset initiated successfully
3369 * 1 - Failed to initiate link reset
3370 **/
3371int
3372lpfc_link_reset(struct lpfc_vport *vport)
3373{
3374 struct lpfc_hba *phba = vport->phba;
3375 LPFC_MBOXQ_t *mbox;
3376 uint32_t control;
3377 int rc;
3378
3379 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3380 "2851 Attempt link reset\n");
3381 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3382 if (!mbox) {
3383 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
3384 "2852 Failed to allocate mbox memory");
3385 return 1;
3386 }
3387
3388 /* Enable Link attention interrupts */
3389 if (phba->sli_rev <= LPFC_SLI_REV3) {
3390 spin_lock_irq(&phba->hbalock);
3391 phba->sli.sli_flag |= LPFC_PROCESS_LA;
3392 control = readl(phba->HCregaddr);
3393 control |= HC_LAINT_ENA;
3394 writel(control, phba->HCregaddr);
3395 readl(phba->HCregaddr); /* flush */
3396 spin_unlock_irq(&phba->hbalock);
3397 }
3398
3399 lpfc_init_link(phba, mbox, phba->cfg_topology,
3400 phba->cfg_link_speed);
3401 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3402 mbox->vport = vport;
3403 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3404 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3405 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
3406 "2853 Failed to issue INIT_LINK "
3407 "mbox command, rc:x%x\n", rc);
3408 mempool_free(mbox, phba->mbox_mem_pool);
3409 return 1;
3410 }
3411
3412 return 0;
3413}
3414
3415/**
3253 * lpfc_els_retry - Make retry decision on an els command iocb 3416 * lpfc_els_retry - Make retry decision on an els command iocb
3254 * @phba: pointer to lpfc hba data structure. 3417 * @phba: pointer to lpfc hba data structure.
3255 * @cmdiocb: pointer to lpfc command iocb data structure. 3418 * @cmdiocb: pointer to lpfc command iocb data structure.
@@ -3285,6 +3448,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3285 int logerr = 0; 3448 int logerr = 0;
3286 uint32_t cmd = 0; 3449 uint32_t cmd = 0;
3287 uint32_t did; 3450 uint32_t did;
3451 int link_reset = 0, rc;
3288 3452
3289 3453
3290 /* Note: context2 may be 0 for internal driver abort 3454 /* Note: context2 may be 0 for internal driver abort
@@ -3366,7 +3530,6 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3366 retry = 1; 3530 retry = 1;
3367 break; 3531 break;
3368 3532
3369 case IOERR_SEQUENCE_TIMEOUT:
3370 case IOERR_INVALID_RPI: 3533 case IOERR_INVALID_RPI:
3371 if (cmd == ELS_CMD_PLOGI && 3534 if (cmd == ELS_CMD_PLOGI &&
3372 did == NameServer_DID) { 3535 did == NameServer_DID) {
@@ -3377,6 +3540,18 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3377 } 3540 }
3378 retry = 1; 3541 retry = 1;
3379 break; 3542 break;
3543
3544 case IOERR_SEQUENCE_TIMEOUT:
3545 if (cmd == ELS_CMD_PLOGI &&
3546 did == NameServer_DID &&
3547 (cmdiocb->retry + 1) == maxretry) {
3548 /* Reset the Link */
3549 link_reset = 1;
3550 break;
3551 }
3552 retry = 1;
3553 delay = 100;
3554 break;
3380 } 3555 }
3381 break; 3556 break;
3382 3557
@@ -3533,6 +3708,19 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3533 break; 3708 break;
3534 } 3709 }
3535 3710
3711 if (link_reset) {
3712 rc = lpfc_link_reset(vport);
3713 if (rc) {
3714 /* Do not give up. Retry PLOGI one more time and attempt
3715 * link reset if PLOGI fails again.
3716 */
3717 retry = 1;
3718 delay = 100;
3719 goto out_retry;
3720 }
3721 return 1;
3722 }
3723
3536 if (did == FDMI_DID) 3724 if (did == FDMI_DID)
3537 retry = 1; 3725 retry = 1;
3538 3726
@@ -3895,11 +4083,11 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3895void 4083void
3896lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4084lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3897{ 4085{
3898 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 4086 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3899 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 4087 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3900 4088
3901 pmb->context1 = NULL; 4089 pmb->ctx_buf = NULL;
3902 pmb->context2 = NULL; 4090 pmb->ctx_ndlp = NULL;
3903 4091
3904 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4092 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3905 kfree(mp); 4093 kfree(mp);
@@ -3975,7 +4163,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3975 /* Check to see if link went down during discovery */ 4163 /* Check to see if link went down during discovery */
3976 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) { 4164 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
3977 if (mbox) { 4165 if (mbox) {
3978 mp = (struct lpfc_dmabuf *) mbox->context1; 4166 mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
3979 if (mp) { 4167 if (mp) {
3980 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4168 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3981 kfree(mp); 4169 kfree(mp);
@@ -4019,7 +4207,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4019 "Data: x%x x%x x%x\n", 4207 "Data: x%x x%x x%x\n",
4020 ndlp->nlp_DID, ndlp->nlp_state, 4208 ndlp->nlp_DID, ndlp->nlp_state,
4021 ndlp->nlp_rpi, ndlp->nlp_flag); 4209 ndlp->nlp_rpi, ndlp->nlp_flag);
4022 mp = mbox->context1; 4210 mp = mbox->ctx_buf;
4023 if (mp) { 4211 if (mp) {
4024 lpfc_mbuf_free(phba, mp->virt, 4212 lpfc_mbuf_free(phba, mp->virt,
4025 mp->phys); 4213 mp->phys);
@@ -4032,7 +4220,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4032 /* Increment reference count to ndlp to hold the 4220 /* Increment reference count to ndlp to hold the
4033 * reference to ndlp for the callback function. 4221 * reference to ndlp for the callback function.
4034 */ 4222 */
4035 mbox->context2 = lpfc_nlp_get(ndlp); 4223 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
4036 mbox->vport = vport; 4224 mbox->vport = vport;
4037 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 4225 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
4038 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 4226 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
@@ -4086,7 +4274,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4086 } 4274 }
4087 } 4275 }
4088 } 4276 }
4089 mp = (struct lpfc_dmabuf *) mbox->context1; 4277 mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
4090 if (mp) { 4278 if (mp) {
4091 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4279 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4092 kfree(mp); 4280 kfree(mp);
@@ -4272,14 +4460,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
4272 default: 4460 default:
4273 return 1; 4461 return 1;
4274 } 4462 }
4275 /* Xmit ELS ACC response tag <ulpIoTag> */
4276 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4277 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
4278 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x "
4279 "fc_flag x%x\n",
4280 elsiocb->iotag, elsiocb->iocb.ulpContext,
4281 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4282 ndlp->nlp_rpi, vport->fc_flag);
4283 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 4463 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
4284 spin_lock_irq(shost->host_lock); 4464 spin_lock_irq(shost->host_lock);
4285 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 4465 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
@@ -4448,6 +4628,15 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
4448 lpfc_els_free_iocb(phba, elsiocb); 4628 lpfc_els_free_iocb(phba, elsiocb);
4449 return 1; 4629 return 1;
4450 } 4630 }
4631
4632 /* Xmit ELS ACC response tag <ulpIoTag> */
4633 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4634 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
4635 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
4636 "RPI: x%x, fc_flag x%x\n",
4637 rc, elsiocb->iotag, elsiocb->sli4_xritag,
4638 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4639 ndlp->nlp_rpi, vport->fc_flag);
4451 return 0; 4640 return 0;
4452} 4641}
4453 4642
@@ -5281,6 +5470,8 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
5281 5470
5282 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 5471 desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
5283 5472
5473 if (phba->lmt & LMT_128Gb)
5474 rdp_cap |= RDP_PS_128GB;
5284 if (phba->lmt & LMT_64Gb) 5475 if (phba->lmt & LMT_64Gb)
5285 rdp_cap |= RDP_PS_64GB; 5476 rdp_cap |= RDP_PS_64GB;
5286 if (phba->lmt & LMT_32Gb) 5477 if (phba->lmt & LMT_32Gb)
@@ -5499,7 +5690,7 @@ lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
5499 goto prep_mbox_fail; 5690 goto prep_mbox_fail;
5500 mbox->vport = rdp_context->ndlp->vport; 5691 mbox->vport = rdp_context->ndlp->vport;
5501 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 5692 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
5502 mbox->context2 = (struct lpfc_rdp_context *) rdp_context; 5693 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
5503 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 5694 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5504 if (rc == MBX_NOT_FINISHED) 5695 if (rc == MBX_NOT_FINISHED)
5505 goto issue_mbox_fail; 5696 goto issue_mbox_fail;
@@ -5542,7 +5733,7 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5542 struct ls_rjt stat; 5733 struct ls_rjt stat;
5543 5734
5544 if (phba->sli_rev < LPFC_SLI_REV4 || 5735 if (phba->sli_rev < LPFC_SLI_REV4 ||
5545 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 5736 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
5546 LPFC_SLI_INTF_IF_TYPE_2) { 5737 LPFC_SLI_INTF_IF_TYPE_2) {
5547 rjt_err = LSRJT_UNABLE_TPC; 5738 rjt_err = LSRJT_UNABLE_TPC;
5548 rjt_expl = LSEXP_REQ_UNSUPPORTED; 5739 rjt_expl = LSEXP_REQ_UNSUPPORTED;
@@ -5624,10 +5815,10 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5624 int rc; 5815 int rc;
5625 5816
5626 mb = &pmb->u.mb; 5817 mb = &pmb->u.mb;
5627 lcb_context = (struct lpfc_lcb_context *)pmb->context1; 5818 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp;
5628 ndlp = lcb_context->ndlp; 5819 ndlp = lcb_context->ndlp;
5629 pmb->context1 = NULL; 5820 pmb->ctx_ndlp = NULL;
5630 pmb->context2 = NULL; 5821 pmb->ctx_buf = NULL;
5631 5822
5632 shdr = (union lpfc_sli4_cfg_shdr *) 5823 shdr = (union lpfc_sli4_cfg_shdr *)
5633 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 5824 &pmb->u.mqe.un.beacon_config.header.cfg_shdr;
@@ -5701,6 +5892,9 @@ error:
5701 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 5892 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
5702 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5893 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5703 5894
5895 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
5896 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
5897
5704 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5898 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5705 phba->fc_stat.elsXmitLSRJT++; 5899 phba->fc_stat.elsXmitLSRJT++;
5706 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5900 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
@@ -5731,7 +5925,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
5731 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5925 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5732 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 5926 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
5733 LPFC_SLI4_MBX_EMBED); 5927 LPFC_SLI4_MBX_EMBED);
5734 mbox->context1 = (void *)lcb_context; 5928 mbox->ctx_ndlp = (void *)lcb_context;
5735 mbox->vport = phba->pport; 5929 mbox->vport = phba->pport;
5736 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 5930 mbox->mbox_cmpl = lpfc_els_lcb_rsp;
5737 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 5931 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
@@ -6011,6 +6205,19 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
6011 if (vport->phba->nvmet_support) 6205 if (vport->phba->nvmet_support)
6012 continue; 6206 continue;
6013 6207
6208 /* If we are in the process of doing discovery on this
6209 * NPort, let it continue on its own.
6210 */
6211 switch (ndlp->nlp_state) {
6212 case NLP_STE_PLOGI_ISSUE:
6213 case NLP_STE_ADISC_ISSUE:
6214 case NLP_STE_REG_LOGIN_ISSUE:
6215 case NLP_STE_PRLI_ISSUE:
6216 case NLP_STE_LOGO_ISSUE:
6217 continue;
6218 }
6219
6220
6014 lpfc_disc_state_machine(vport, ndlp, NULL, 6221 lpfc_disc_state_machine(vport, ndlp, NULL,
6015 NLP_EVT_DEVICE_RECOVERY); 6222 NLP_EVT_DEVICE_RECOVERY);
6016 lpfc_cancel_retry_delay_tmo(vport, ndlp); 6223 lpfc_cancel_retry_delay_tmo(vport, ndlp);
@@ -6272,6 +6479,7 @@ int
6272lpfc_els_handle_rscn(struct lpfc_vport *vport) 6479lpfc_els_handle_rscn(struct lpfc_vport *vport)
6273{ 6480{
6274 struct lpfc_nodelist *ndlp; 6481 struct lpfc_nodelist *ndlp;
6482 struct lpfc_hba *phba = vport->phba;
6275 6483
6276 /* Ignore RSCN if the port is being torn down. */ 6484 /* Ignore RSCN if the port is being torn down. */
6277 if (vport->load_flag & FC_UNLOADING) { 6485 if (vport->load_flag & FC_UNLOADING) {
@@ -6300,8 +6508,15 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
6300 * flush the RSCN. Otherwise, the outstanding requests 6508 * flush the RSCN. Otherwise, the outstanding requests
6301 * need to complete. 6509 * need to complete.
6302 */ 6510 */
6303 if (lpfc_issue_gidft(vport) > 0) 6511 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) {
6512 if (lpfc_issue_gidft(vport) > 0)
6513 return 1;
6514 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) {
6515 if (lpfc_issue_gidpt(vport) > 0)
6516 return 1;
6517 } else {
6304 return 1; 6518 return 1;
6519 }
6305 } else { 6520 } else {
6306 /* Nameserver login in question. Revalidate. */ 6521 /* Nameserver login in question. Revalidate. */
6307 if (ndlp) { 6522 if (ndlp) {
@@ -6455,6 +6670,11 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6455 port_state = vport->port_state; 6670 port_state = vport->port_state;
6456 vport->fc_flag |= FC_PT2PT; 6671 vport->fc_flag |= FC_PT2PT;
6457 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 6672 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
6673
6674 /* Acking an unsol FLOGI. Count 1 for link bounce
6675 * work-around.
6676 */
6677 vport->rcv_flogi_cnt++;
6458 spin_unlock_irq(shost->host_lock); 6678 spin_unlock_irq(shost->host_lock);
6459 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6679 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6460 "3311 Rcv Flogi PS x%x new PS x%x " 6680 "3311 Rcv Flogi PS x%x new PS x%x "
@@ -6472,6 +6692,25 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6472 6692
6473 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 6693 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
6474 6694
6695 /* Defer ACC response until AFTER we issue a FLOGI */
6696 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) {
6697 phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext;
6698 phba->defer_flogi_acc_ox_id =
6699 cmdiocb->iocb.unsli3.rcvsli3.ox_id;
6700
6701 vport->fc_myDID = did;
6702
6703 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6704 "3344 Deferring FLOGI ACC: rx_id: x%x,"
6705 " ox_id: x%x, hba_flag x%x\n",
6706 phba->defer_flogi_acc_rx_id,
6707 phba->defer_flogi_acc_ox_id, phba->hba_flag);
6708
6709 phba->defer_flogi_acc_flag = true;
6710
6711 return 0;
6712 }
6713
6475 /* Send back ACC */ 6714 /* Send back ACC */
6476 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 6715 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
6477 6716
@@ -6644,11 +6883,11 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6644 6883
6645 mb = &pmb->u.mb; 6884 mb = &pmb->u.mb;
6646 6885
6647 ndlp = (struct lpfc_nodelist *) pmb->context2; 6886 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
6648 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff); 6887 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
6649 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff); 6888 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
6650 pmb->context1 = NULL; 6889 pmb->ctx_buf = NULL;
6651 pmb->context2 = NULL; 6890 pmb->ctx_ndlp = NULL;
6652 6891
6653 if (mb->mbxStatus) { 6892 if (mb->mbxStatus) {
6654 mempool_free(pmb, phba->mbox_mem_pool); 6893 mempool_free(pmb, phba->mbox_mem_pool);
@@ -6732,11 +6971,11 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6732 6971
6733 mb = &pmb->u.mb; 6972 mb = &pmb->u.mb;
6734 6973
6735 ndlp = (struct lpfc_nodelist *) pmb->context2; 6974 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
6736 rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff); 6975 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
6737 oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff); 6976 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
6738 pmb->context1 = NULL; 6977 pmb->ctx_ndlp = NULL;
6739 pmb->context2 = NULL; 6978 pmb->ctx_buf = NULL;
6740 6979
6741 if (mb->mbxStatus) { 6980 if (mb->mbxStatus) {
6742 mempool_free(pmb, phba->mbox_mem_pool); 6981 mempool_free(pmb, phba->mbox_mem_pool);
@@ -6827,10 +7066,10 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6827 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 7066 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
6828 if (mbox) { 7067 if (mbox) {
6829 lpfc_read_lnk_stat(phba, mbox); 7068 lpfc_read_lnk_stat(phba, mbox);
6830 mbox->context1 = (void *)((unsigned long) 7069 mbox->ctx_buf = (void *)((unsigned long)
6831 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 7070 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
6832 cmdiocb->iocb.ulpContext)); /* rx_id */ 7071 cmdiocb->iocb.ulpContext)); /* rx_id */
6833 mbox->context2 = lpfc_nlp_get(ndlp); 7072 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
6834 mbox->vport = vport; 7073 mbox->vport = vport;
6835 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 7074 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
6836 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 7075 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
@@ -6990,10 +7229,10 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6990 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 7229 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
6991 if (mbox) { 7230 if (mbox) {
6992 lpfc_read_lnk_stat(phba, mbox); 7231 lpfc_read_lnk_stat(phba, mbox);
6993 mbox->context1 = (void *)((unsigned long) 7232 mbox->ctx_buf = (void *)((unsigned long)
6994 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 7233 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
6995 cmdiocb->iocb.ulpContext)); /* rx_id */ 7234 cmdiocb->iocb.ulpContext)); /* rx_id */
6996 mbox->context2 = lpfc_nlp_get(ndlp); 7235 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
6997 mbox->vport = vport; 7236 mbox->vport = vport;
6998 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; 7237 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
6999 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 7238 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
@@ -7852,8 +8091,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7852 struct ls_rjt stat; 8091 struct ls_rjt stat;
7853 uint32_t *payload; 8092 uint32_t *payload;
7854 uint32_t cmd, did, newnode; 8093 uint32_t cmd, did, newnode;
7855 uint8_t rjt_exp, rjt_err = 0; 8094 uint8_t rjt_exp, rjt_err = 0, init_link = 0;
7856 IOCB_t *icmd = &elsiocb->iocb; 8095 IOCB_t *icmd = &elsiocb->iocb;
8096 LPFC_MBOXQ_t *mbox;
7857 8097
7858 if (!vport || !(elsiocb->context2)) 8098 if (!vport || !(elsiocb->context2))
7859 goto dropit; 8099 goto dropit;
@@ -7940,9 +8180,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7940 cmd, did, vport->port_state, vport->fc_flag, 8180 cmd, did, vport->port_state, vport->fc_flag,
7941 vport->fc_myDID, vport->fc_prevDID); 8181 vport->fc_myDID, vport->fc_prevDID);
7942 8182
7943 /* reject till our FLOGI completes */ 8183 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */
7944 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 8184 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
7945 (cmd != ELS_CMD_FLOGI)) { 8185 (cmd != ELS_CMD_FLOGI) &&
8186 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) {
7946 rjt_err = LSRJT_LOGICAL_BSY; 8187 rjt_err = LSRJT_LOGICAL_BSY;
7947 rjt_exp = LSEXP_NOTHING_MORE; 8188 rjt_exp = LSEXP_NOTHING_MORE;
7948 goto lsrjt; 8189 goto lsrjt;
@@ -8002,6 +8243,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8002 did, vport->port_state, ndlp->nlp_flag); 8243 did, vport->port_state, ndlp->nlp_flag);
8003 8244
8004 phba->fc_stat.elsRcvFLOGI++; 8245 phba->fc_stat.elsRcvFLOGI++;
8246
8247 /* If the driver believes fabric discovery is done and is ready,
8248 * bounce the link. There is some descrepancy.
8249 */
8250 if (vport->port_state >= LPFC_LOCAL_CFG_LINK &&
8251 vport->fc_flag & FC_PT2PT &&
8252 vport->rcv_flogi_cnt >= 1) {
8253 rjt_err = LSRJT_LOGICAL_BSY;
8254 rjt_exp = LSEXP_NOTHING_MORE;
8255 init_link++;
8256 goto lsrjt;
8257 }
8258
8005 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 8259 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
8006 if (newnode) 8260 if (newnode)
8007 lpfc_nlp_put(ndlp); 8261 lpfc_nlp_put(ndlp);
@@ -8230,6 +8484,27 @@ lsrjt:
8230 8484
8231 lpfc_nlp_put(elsiocb->context1); 8485 lpfc_nlp_put(elsiocb->context1);
8232 elsiocb->context1 = NULL; 8486 elsiocb->context1 = NULL;
8487
8488 /* Special case. Driver received an unsolicited command that
8489 * unsupportable given the driver's current state. Reset the
8490 * link and start over.
8491 */
8492 if (init_link) {
8493 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8494 if (!mbox)
8495 return;
8496 lpfc_linkdown(phba);
8497 lpfc_init_link(phba, mbox,
8498 phba->cfg_topology,
8499 phba->cfg_link_speed);
8500 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
8501 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
8502 mbox->vport = vport;
8503 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
8504 MBX_NOT_FINISHED)
8505 mempool_free(mbox, phba->mbox_mem_pool);
8506 }
8507
8233 return; 8508 return;
8234 8509
8235dropit: 8510dropit:
@@ -8453,7 +8728,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
8453{ 8728{
8454 struct lpfc_vport *vport = pmb->vport; 8729 struct lpfc_vport *vport = pmb->vport;
8455 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8730 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8456 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 8731 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
8457 MAILBOX_t *mb = &pmb->u.mb; 8732 MAILBOX_t *mb = &pmb->u.mb;
8458 int rc; 8733 int rc;
8459 8734
@@ -8571,7 +8846,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
8571 if (mbox) { 8846 if (mbox) {
8572 lpfc_reg_vpi(vport, mbox); 8847 lpfc_reg_vpi(vport, mbox);
8573 mbox->vport = vport; 8848 mbox->vport = vport;
8574 mbox->context2 = lpfc_nlp_get(ndlp); 8849 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
8575 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 8850 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
8576 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8851 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
8577 == MBX_NOT_FINISHED) { 8852 == MBX_NOT_FINISHED) {
@@ -9505,7 +9780,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
9505 "rport in state 0x%x\n", ndlp->nlp_state); 9780 "rport in state 0x%x\n", ndlp->nlp_state);
9506 return; 9781 return;
9507 } 9782 }
9508 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9783 lpfc_printf_log(phba, KERN_ERR,
9784 LOG_ELS | LOG_FCP_ERROR | LOG_NVME_IOERR,
9509 "3094 Start rport recovery on shost id 0x%x " 9785 "3094 Start rport recovery on shost id 0x%x "
9510 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 9786 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
9511 "flags 0x%x\n", 9787 "flags 0x%x\n",
@@ -9518,8 +9794,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
9518 */ 9794 */
9519 spin_lock_irqsave(shost->host_lock, flags); 9795 spin_lock_irqsave(shost->host_lock, flags);
9520 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 9796 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
9797 ndlp->nlp_flag |= NLP_ISSUE_LOGO;
9521 spin_unlock_irqrestore(shost->host_lock, flags); 9798 spin_unlock_irqrestore(shost->host_lock, flags);
9522 lpfc_issue_els_logo(vport, ndlp, 0); 9799 lpfc_unreg_rpi(vport, ndlp);
9523 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
9524} 9800}
9525 9801
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index f4deb862efc6..b183b882d506 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -888,17 +888,31 @@ lpfc_linkdown(struct lpfc_hba *phba)
888 LPFC_MBOXQ_t *mb; 888 LPFC_MBOXQ_t *mb;
889 int i; 889 int i;
890 890
891 if (phba->link_state == LPFC_LINK_DOWN) 891 if (phba->link_state == LPFC_LINK_DOWN) {
892 if (phba->sli4_hba.conf_trunk) {
893 phba->trunk_link.link0.state = 0;
894 phba->trunk_link.link1.state = 0;
895 phba->trunk_link.link2.state = 0;
896 phba->trunk_link.link3.state = 0;
897 }
892 return 0; 898 return 0;
893 899 }
894 /* Block all SCSI stack I/Os */ 900 /* Block all SCSI stack I/Os */
895 lpfc_scsi_dev_block(phba); 901 lpfc_scsi_dev_block(phba);
896 902
903 phba->defer_flogi_acc_flag = false;
904
897 spin_lock_irq(&phba->hbalock); 905 spin_lock_irq(&phba->hbalock);
898 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 906 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
899 spin_unlock_irq(&phba->hbalock); 907 spin_unlock_irq(&phba->hbalock);
900 if (phba->link_state > LPFC_LINK_DOWN) { 908 if (phba->link_state > LPFC_LINK_DOWN) {
901 phba->link_state = LPFC_LINK_DOWN; 909 phba->link_state = LPFC_LINK_DOWN;
910 if (phba->sli4_hba.conf_trunk) {
911 phba->trunk_link.link0.state = 0;
912 phba->trunk_link.link1.state = 0;
913 phba->trunk_link.link2.state = 0;
914 phba->trunk_link.link3.state = 0;
915 }
902 spin_lock_irq(shost->host_lock); 916 spin_lock_irq(shost->host_lock);
903 phba->pport->fc_flag &= ~FC_LBIT; 917 phba->pport->fc_flag &= ~FC_LBIT;
904 spin_unlock_irq(shost->host_lock); 918 spin_unlock_irq(shost->host_lock);
@@ -947,6 +961,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
947 } 961 }
948 spin_lock_irq(shost->host_lock); 962 spin_lock_irq(shost->host_lock);
949 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); 963 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
964 phba->pport->rcv_flogi_cnt = 0;
950 spin_unlock_irq(shost->host_lock); 965 spin_unlock_irq(shost->host_lock);
951 } 966 }
952 return 0; 967 return 0;
@@ -1018,6 +1033,7 @@ lpfc_linkup(struct lpfc_hba *phba)
1018{ 1033{
1019 struct lpfc_vport **vports; 1034 struct lpfc_vport **vports;
1020 int i; 1035 int i;
1036 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
1021 1037
1022 phba->link_state = LPFC_LINK_UP; 1038 phba->link_state = LPFC_LINK_UP;
1023 1039
@@ -1031,6 +1047,18 @@ lpfc_linkup(struct lpfc_hba *phba)
1031 lpfc_linkup_port(vports[i]); 1047 lpfc_linkup_port(vports[i]);
1032 lpfc_destroy_vport_work_array(phba, vports); 1048 lpfc_destroy_vport_work_array(phba, vports);
1033 1049
1050 /* Clear the pport flogi counter in case the link down was
1051 * absorbed without an ACQE. No lock here - in worker thread
1052 * and discovery is synchronized.
1053 */
1054 spin_lock_irq(shost->host_lock);
1055 phba->pport->rcv_flogi_cnt = 0;
1056 spin_unlock_irq(shost->host_lock);
1057
1058 /* reinitialize initial FLOGI flag */
1059 phba->hba_flag &= ~(HBA_FLOGI_ISSUED);
1060 phba->defer_flogi_acc_flag = false;
1061
1034 return 0; 1062 return 0;
1035} 1063}
1036 1064
@@ -1992,6 +2020,26 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
1992 "failover and change port state:x%x/x%x\n", 2020 "failover and change port state:x%x/x%x\n",
1993 phba->pport->port_state, LPFC_VPORT_UNKNOWN); 2021 phba->pport->port_state, LPFC_VPORT_UNKNOWN);
1994 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 2022 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2023
2024 if (!phba->fcf.fcf_redisc_attempted) {
2025 lpfc_unregister_fcf(phba);
2026
2027 rc = lpfc_sli4_redisc_fcf_table(phba);
2028 if (!rc) {
2029 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2030 "3195 Rediscover FCF table\n");
2031 phba->fcf.fcf_redisc_attempted = 1;
2032 lpfc_sli4_clear_fcf_rr_bmask(phba);
2033 } else {
2034 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2035 "3196 Rediscover FCF table "
2036 "failed. Status:x%x\n", rc);
2037 }
2038 } else {
2039 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2040 "3197 Already rediscover FCF table "
2041 "attempted. No more retry\n");
2042 }
1995 goto stop_flogi_current_fcf; 2043 goto stop_flogi_current_fcf;
1996 } else { 2044 } else {
1997 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, 2045 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
@@ -2915,7 +2963,7 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
2915void 2963void
2916lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2964lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2917{ 2965{
2918 struct lpfc_dmabuf *dmabuf = mboxq->context1; 2966 struct lpfc_dmabuf *dmabuf = mboxq->ctx_buf;
2919 struct lpfc_vport *vport = mboxq->vport; 2967 struct lpfc_vport *vport = mboxq->vport;
2920 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2968 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2921 2969
@@ -3008,7 +3056,7 @@ static void
3008lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3056lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3009{ 3057{
3010 MAILBOX_t *mb = &pmb->u.mb; 3058 MAILBOX_t *mb = &pmb->u.mb;
3011 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 3059 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3012 struct lpfc_vport *vport = pmb->vport; 3060 struct lpfc_vport *vport = pmb->vport;
3013 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3061 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3014 struct serv_parm *sp = &vport->fc_sparam; 3062 struct serv_parm *sp = &vport->fc_sparam;
@@ -3052,7 +3100,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3052 return; 3100 return;
3053 3101
3054out: 3102out:
3055 pmb->context1 = NULL; 3103 pmb->ctx_buf = NULL;
3056 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3104 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3057 kfree(mp); 3105 kfree(mp);
3058 lpfc_issue_clear_la(phba, vport); 3106 lpfc_issue_clear_la(phba, vport);
@@ -3085,6 +3133,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3085 case LPFC_LINK_SPEED_16GHZ: 3133 case LPFC_LINK_SPEED_16GHZ:
3086 case LPFC_LINK_SPEED_32GHZ: 3134 case LPFC_LINK_SPEED_32GHZ:
3087 case LPFC_LINK_SPEED_64GHZ: 3135 case LPFC_LINK_SPEED_64GHZ:
3136 case LPFC_LINK_SPEED_128GHZ:
3088 break; 3137 break;
3089 default: 3138 default:
3090 phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN; 3139 phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
@@ -3190,7 +3239,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3190 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 3239 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
3191 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); 3240 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
3192 if (rc == MBX_NOT_FINISHED) { 3241 if (rc == MBX_NOT_FINISHED) {
3193 mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 3242 mp = (struct lpfc_dmabuf *)sparam_mbox->ctx_buf;
3194 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3243 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3195 kfree(mp); 3244 kfree(mp);
3196 mempool_free(sparam_mbox, phba->mbox_mem_pool); 3245 mempool_free(sparam_mbox, phba->mbox_mem_pool);
@@ -3319,7 +3368,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3319 struct lpfc_mbx_read_top *la; 3368 struct lpfc_mbx_read_top *la;
3320 struct lpfc_sli_ring *pring; 3369 struct lpfc_sli_ring *pring;
3321 MAILBOX_t *mb = &pmb->u.mb; 3370 MAILBOX_t *mb = &pmb->u.mb;
3322 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3371 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3323 uint8_t attn_type; 3372 uint8_t attn_type;
3324 3373
3325 /* Unblock ELS traffic */ 3374 /* Unblock ELS traffic */
@@ -3476,12 +3525,12 @@ void
3476lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3525lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3477{ 3526{
3478 struct lpfc_vport *vport = pmb->vport; 3527 struct lpfc_vport *vport = pmb->vport;
3479 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3528 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3480 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3529 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3481 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3530 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3482 3531
3483 pmb->context1 = NULL; 3532 pmb->ctx_buf = NULL;
3484 pmb->context2 = NULL; 3533 pmb->ctx_ndlp = NULL;
3485 3534
3486 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 3535 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3487 "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n", 3536 "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
@@ -3689,8 +3738,8 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
3689 vport_buff = (uint8_t *) vport_info; 3738 vport_buff = (uint8_t *) vport_info;
3690 do { 3739 do {
3691 /* free dma buffer from previous round */ 3740 /* free dma buffer from previous round */
3692 if (pmb->context1) { 3741 if (pmb->ctx_buf) {
3693 mp = (struct lpfc_dmabuf *)pmb->context1; 3742 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3694 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3743 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3695 kfree(mp); 3744 kfree(mp);
3696 } 3745 }
@@ -3712,7 +3761,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
3712 3761
3713 if (phba->sli_rev == LPFC_SLI_REV4) { 3762 if (phba->sli_rev == LPFC_SLI_REV4) {
3714 byte_count = pmb->u.mqe.un.mb_words[5]; 3763 byte_count = pmb->u.mqe.un.mb_words[5];
3715 mp = (struct lpfc_dmabuf *)pmb->context1; 3764 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3716 if (byte_count > sizeof(struct static_vport_info) - 3765 if (byte_count > sizeof(struct static_vport_info) -
3717 offset) 3766 offset)
3718 byte_count = sizeof(struct static_vport_info) 3767 byte_count = sizeof(struct static_vport_info)
@@ -3777,8 +3826,8 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
3777out: 3826out:
3778 kfree(vport_info); 3827 kfree(vport_info);
3779 if (mbx_wait_rc != MBX_TIMEOUT) { 3828 if (mbx_wait_rc != MBX_TIMEOUT) {
3780 if (pmb->context1) { 3829 if (pmb->ctx_buf) {
3781 mp = (struct lpfc_dmabuf *)pmb->context1; 3830 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3782 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3831 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3783 kfree(mp); 3832 kfree(mp);
3784 } 3833 }
@@ -3799,13 +3848,13 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3799{ 3848{
3800 struct lpfc_vport *vport = pmb->vport; 3849 struct lpfc_vport *vport = pmb->vport;
3801 MAILBOX_t *mb = &pmb->u.mb; 3850 MAILBOX_t *mb = &pmb->u.mb;
3802 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3851 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3803 struct lpfc_nodelist *ndlp; 3852 struct lpfc_nodelist *ndlp;
3804 struct Scsi_Host *shost; 3853 struct Scsi_Host *shost;
3805 3854
3806 ndlp = (struct lpfc_nodelist *) pmb->context2; 3855 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3807 pmb->context1 = NULL; 3856 pmb->ctx_ndlp = NULL;
3808 pmb->context2 = NULL; 3857 pmb->ctx_buf = NULL;
3809 3858
3810 if (mb->mbxStatus) { 3859 if (mb->mbxStatus) {
3811 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 3860 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -3913,6 +3962,35 @@ lpfc_issue_gidft(struct lpfc_vport *vport)
3913 return vport->gidft_inp; 3962 return vport->gidft_inp;
3914} 3963}
3915 3964
3965/**
3966 * lpfc_issue_gidpt - issue a GID_PT for all N_Ports
3967 * @vport: The virtual port for which this call is being executed.
3968 *
3969 * This routine will issue a GID_PT to get a list of all N_Ports
3970 *
3971 * Return value :
3972 * 0 - Failure to issue a GID_PT
3973 * 1 - GID_PT issued
3974 **/
3975int
3976lpfc_issue_gidpt(struct lpfc_vport *vport)
3977{
3978 /* Good status, issue CT Request to NameServer */
3979 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) {
3980 /* Cannot issue NameServer FCP Query, so finish up
3981 * discovery
3982 */
3983 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
3984 "0606 %s Port TYPE %x %s\n",
3985 "Failed to issue GID_PT to ",
3986 GID_PT_N_PORT,
3987 "Finishing discovery.");
3988 return 0;
3989 }
3990 vport->gidft_inp++;
3991 return 1;
3992}
3993
3916/* 3994/*
3917 * This routine handles processing a NameServer REG_LOGIN mailbox 3995 * This routine handles processing a NameServer REG_LOGIN mailbox
3918 * command upon completion. It is setup in the LPFC_MBOXQ 3996 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -3923,12 +4001,12 @@ void
3923lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4001lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3924{ 4002{
3925 MAILBOX_t *mb = &pmb->u.mb; 4003 MAILBOX_t *mb = &pmb->u.mb;
3926 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 4004 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3927 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 4005 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3928 struct lpfc_vport *vport = pmb->vport; 4006 struct lpfc_vport *vport = pmb->vport;
3929 4007
3930 pmb->context1 = NULL; 4008 pmb->ctx_buf = NULL;
3931 pmb->context2 = NULL; 4009 pmb->ctx_ndlp = NULL;
3932 vport->gidft_inp = 0; 4010 vport->gidft_inp = 0;
3933 4011
3934 if (mb->mbxStatus) { 4012 if (mb->mbxStatus) {
@@ -4385,6 +4463,7 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4385 NLP_INT_NODE_ACT(ndlp); 4463 NLP_INT_NODE_ACT(ndlp);
4386 atomic_set(&ndlp->cmd_pending, 0); 4464 atomic_set(&ndlp->cmd_pending, 0);
4387 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; 4465 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4466 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4388} 4467}
4389 4468
4390struct lpfc_nodelist * 4469struct lpfc_nodelist *
@@ -4392,10 +4471,11 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4392 int state) 4471 int state)
4393{ 4472{
4394 struct lpfc_hba *phba = vport->phba; 4473 struct lpfc_hba *phba = vport->phba;
4395 uint32_t did; 4474 uint32_t did, flag;
4396 unsigned long flags; 4475 unsigned long flags;
4397 unsigned long *active_rrqs_xri_bitmap = NULL; 4476 unsigned long *active_rrqs_xri_bitmap = NULL;
4398 int rpi = LPFC_RPI_ALLOC_ERROR; 4477 int rpi = LPFC_RPI_ALLOC_ERROR;
4478 uint32_t defer_did = 0;
4399 4479
4400 if (!ndlp) 4480 if (!ndlp)
4401 return NULL; 4481 return NULL;
@@ -4428,16 +4508,23 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4428 goto free_rpi; 4508 goto free_rpi;
4429 } 4509 }
4430 4510
4431 /* Keep the original DID */ 4511 /* First preserve the orginal DID, xri_bitmap and some flags */
4432 did = ndlp->nlp_DID; 4512 did = ndlp->nlp_DID;
4513 flag = (ndlp->nlp_flag & NLP_UNREG_INP);
4514 if (flag & NLP_UNREG_INP)
4515 defer_did = ndlp->nlp_defer_did;
4433 if (phba->sli_rev == LPFC_SLI_REV4) 4516 if (phba->sli_rev == LPFC_SLI_REV4)
4434 active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap; 4517 active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
4435 4518
4436 /* re-initialize ndlp except of ndlp linked list pointer */ 4519 /* Zero ndlp except of ndlp linked list pointer */
4437 memset((((char *)ndlp) + sizeof (struct list_head)), 0, 4520 memset((((char *)ndlp) + sizeof (struct list_head)), 0,
4438 sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); 4521 sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
4439 lpfc_initialize_node(vport, ndlp, did);
4440 4522
4523 /* Next reinitialize and restore saved objects */
4524 lpfc_initialize_node(vport, ndlp, did);
4525 ndlp->nlp_flag |= flag;
4526 if (flag & NLP_UNREG_INP)
4527 ndlp->nlp_defer_did = defer_did;
4441 if (phba->sli_rev == LPFC_SLI_REV4) 4528 if (phba->sli_rev == LPFC_SLI_REV4)
4442 ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap; 4529 ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
4443 4530
@@ -4697,11 +4784,27 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4697 struct lpfc_vport *vport = pmb->vport; 4784 struct lpfc_vport *vport = pmb->vport;
4698 struct lpfc_nodelist *ndlp; 4785 struct lpfc_nodelist *ndlp;
4699 4786
4700 ndlp = (struct lpfc_nodelist *)(pmb->context1); 4787 ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
4701 if (!ndlp) 4788 if (!ndlp)
4702 return; 4789 return;
4703 lpfc_issue_els_logo(vport, ndlp, 0); 4790 lpfc_issue_els_logo(vport, ndlp, 0);
4704 mempool_free(pmb, phba->mbox_mem_pool); 4791 mempool_free(pmb, phba->mbox_mem_pool);
4792
4793 /* Check to see if there are any deferred events to process */
4794 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
4795 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
4796 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4797 "1434 UNREG cmpl deferred logo x%x "
4798 "on NPort x%x Data: x%x %p\n",
4799 ndlp->nlp_rpi, ndlp->nlp_DID,
4800 ndlp->nlp_defer_did, ndlp);
4801
4802 ndlp->nlp_flag &= ~NLP_UNREG_INP;
4803 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4804 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
4805 } else {
4806 ndlp->nlp_flag &= ~NLP_UNREG_INP;
4807 }
4705} 4808}
4706 4809
4707/* 4810/*
@@ -4730,6 +4833,21 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4730 "did x%x\n", 4833 "did x%x\n",
4731 ndlp->nlp_rpi, ndlp->nlp_flag, 4834 ndlp->nlp_rpi, ndlp->nlp_flag,
4732 ndlp->nlp_DID); 4835 ndlp->nlp_DID);
4836
4837 /* If there is already an UNREG in progress for this ndlp,
4838 * no need to queue up another one.
4839 */
4840 if (ndlp->nlp_flag & NLP_UNREG_INP) {
4841 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4842 "1436 unreg_rpi SKIP UNREG x%x on "
4843 "NPort x%x deferred x%x flg x%x "
4844 "Data: %p\n",
4845 ndlp->nlp_rpi, ndlp->nlp_DID,
4846 ndlp->nlp_defer_did,
4847 ndlp->nlp_flag, ndlp);
4848 goto out;
4849 }
4850
4733 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4851 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4734 if (mbox) { 4852 if (mbox) {
4735 /* SLI4 ports require the physical rpi value. */ 4853 /* SLI4 ports require the physical rpi value. */
@@ -4740,26 +4858,38 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4740 lpfc_unreg_login(phba, vport->vpi, rpi, mbox); 4858 lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
4741 mbox->vport = vport; 4859 mbox->vport = vport;
4742 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { 4860 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
4743 mbox->context1 = ndlp; 4861 mbox->ctx_ndlp = ndlp;
4744 mbox->mbox_cmpl = lpfc_nlp_logo_unreg; 4862 mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
4745 } else { 4863 } else {
4746 if (phba->sli_rev == LPFC_SLI_REV4 && 4864 if (phba->sli_rev == LPFC_SLI_REV4 &&
4747 (!(vport->load_flag & FC_UNLOADING)) && 4865 (!(vport->load_flag & FC_UNLOADING)) &&
4748 (bf_get(lpfc_sli_intf_if_type, 4866 (bf_get(lpfc_sli_intf_if_type,
4749 &phba->sli4_hba.sli_intf) == 4867 &phba->sli4_hba.sli_intf) >=
4750 LPFC_SLI_INTF_IF_TYPE_2) && 4868 LPFC_SLI_INTF_IF_TYPE_2) &&
4751 (kref_read(&ndlp->kref) > 0)) { 4869 (kref_read(&ndlp->kref) > 0)) {
4752 mbox->context1 = lpfc_nlp_get(ndlp); 4870 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
4753 mbox->mbox_cmpl = 4871 mbox->mbox_cmpl =
4754 lpfc_sli4_unreg_rpi_cmpl_clr; 4872 lpfc_sli4_unreg_rpi_cmpl_clr;
4755 /* 4873 /*
4756 * accept PLOGIs after unreg_rpi_cmpl 4874 * accept PLOGIs after unreg_rpi_cmpl
4757 */ 4875 */
4758 acc_plogi = 0; 4876 acc_plogi = 0;
4759 } else 4877 } else {
4878 mbox->ctx_ndlp = ndlp;
4760 mbox->mbox_cmpl = 4879 mbox->mbox_cmpl =
4761 lpfc_sli_def_mbox_cmpl; 4880 lpfc_sli_def_mbox_cmpl;
4881 }
4762 } 4882 }
4883 if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
4884 Fabric_DID_MASK) &&
4885 (!(vport->fc_flag & FC_OFFLINE_MODE)))
4886 ndlp->nlp_flag |= NLP_UNREG_INP;
4887
4888 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4889 "1433 unreg_rpi UNREG x%x on "
4890 "NPort x%x deferred flg x%x Data:%p\n",
4891 ndlp->nlp_rpi, ndlp->nlp_DID,
4892 ndlp->nlp_flag, ndlp);
4763 4893
4764 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4894 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4765 if (rc == MBX_NOT_FINISHED) { 4895 if (rc == MBX_NOT_FINISHED) {
@@ -4768,7 +4898,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4768 } 4898 }
4769 } 4899 }
4770 lpfc_no_rpi(phba, ndlp); 4900 lpfc_no_rpi(phba, ndlp);
4771 4901out:
4772 if (phba->sli_rev != LPFC_SLI_REV4) 4902 if (phba->sli_rev != LPFC_SLI_REV4)
4773 ndlp->nlp_rpi = 0; 4903 ndlp->nlp_rpi = 0;
4774 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 4904 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
@@ -4836,7 +4966,7 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
4836 mbox); 4966 mbox);
4837 mbox->vport = vport; 4967 mbox->vport = vport;
4838 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4968 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4839 mbox->context1 = NULL; 4969 mbox->ctx_ndlp = NULL;
4840 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 4970 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
4841 if (rc != MBX_TIMEOUT) 4971 if (rc != MBX_TIMEOUT)
4842 mempool_free(mbox, phba->mbox_mem_pool); 4972 mempool_free(mbox, phba->mbox_mem_pool);
@@ -4861,7 +4991,7 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
4861 mbox); 4991 mbox);
4862 mbox->vport = vport; 4992 mbox->vport = vport;
4863 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4993 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4864 mbox->context1 = NULL; 4994 mbox->ctx_ndlp = NULL;
4865 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 4995 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
4866 if (rc != MBX_TIMEOUT) 4996 if (rc != MBX_TIMEOUT)
4867 mempool_free(mbox, phba->mbox_mem_pool); 4997 mempool_free(mbox, phba->mbox_mem_pool);
@@ -4915,8 +5045,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4915 if ((mb = phba->sli.mbox_active)) { 5045 if ((mb = phba->sli.mbox_active)) {
4916 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 5046 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
4917 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && 5047 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
4918 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 5048 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
4919 mb->context2 = NULL; 5049 mb->ctx_ndlp = NULL;
4920 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5050 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4921 } 5051 }
4922 } 5052 }
@@ -4926,18 +5056,18 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4926 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 5056 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
4927 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) || 5057 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
4928 (mb->mbox_flag & LPFC_MBX_IMED_UNREG) || 5058 (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
4929 (ndlp != (struct lpfc_nodelist *) mb->context2)) 5059 (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
4930 continue; 5060 continue;
4931 5061
4932 mb->context2 = NULL; 5062 mb->ctx_ndlp = NULL;
4933 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5063 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4934 } 5064 }
4935 5065
4936 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 5066 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
4937 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 5067 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
4938 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && 5068 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
4939 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 5069 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
4940 mp = (struct lpfc_dmabuf *) (mb->context1); 5070 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
4941 if (mp) { 5071 if (mp) {
4942 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 5072 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
4943 kfree(mp); 5073 kfree(mp);
@@ -5007,7 +5137,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5007 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 5137 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
5008 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 5138 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
5009 mbox->vport = vport; 5139 mbox->vport = vport;
5010 mbox->context2 = ndlp; 5140 mbox->ctx_ndlp = ndlp;
5011 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 5141 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5012 if (rc == MBX_NOT_FINISHED) { 5142 if (rc == MBX_NOT_FINISHED) {
5013 mempool_free(mbox, phba->mbox_mem_pool); 5143 mempool_free(mbox, phba->mbox_mem_pool);
@@ -5772,12 +5902,12 @@ void
5772lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5902lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5773{ 5903{
5774 MAILBOX_t *mb = &pmb->u.mb; 5904 MAILBOX_t *mb = &pmb->u.mb;
5775 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 5905 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
5776 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 5906 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
5777 struct lpfc_vport *vport = pmb->vport; 5907 struct lpfc_vport *vport = pmb->vport;
5778 5908
5779 pmb->context1 = NULL; 5909 pmb->ctx_buf = NULL;
5780 pmb->context2 = NULL; 5910 pmb->ctx_ndlp = NULL;
5781 5911
5782 if (phba->sli_rev < LPFC_SLI_REV4) 5912 if (phba->sli_rev < LPFC_SLI_REV4)
5783 ndlp->nlp_rpi = mb->un.varWords[0]; 5913 ndlp->nlp_rpi = mb->un.varWords[0];
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 009aa0eee040..ec1227018913 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -115,6 +115,7 @@ struct lpfc_sli_ct_request {
115 uint32_t PortID; 115 uint32_t PortID;
116 struct gid { 116 struct gid {
117 uint8_t PortType; /* for GID_PT requests */ 117 uint8_t PortType; /* for GID_PT requests */
118#define GID_PT_N_PORT 1
118 uint8_t DomainScope; 119 uint8_t DomainScope;
119 uint8_t AreaScope; 120 uint8_t AreaScope;
120 uint8_t Fc4Type; /* for GID_FT requests */ 121 uint8_t Fc4Type; /* for GID_FT requests */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index bbd0a57e953f..c15b9b6fb840 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -197,6 +197,10 @@ struct lpfc_sli_intf {
197#define LPFC_FCP_SCHED_ROUND_ROBIN 0 197#define LPFC_FCP_SCHED_ROUND_ROBIN 0
198#define LPFC_FCP_SCHED_BY_CPU 1 198#define LPFC_FCP_SCHED_BY_CPU 1
199 199
200/* Algrithmns for NameServer Query after RSCN */
201#define LPFC_NS_QUERY_GID_FT 0
202#define LPFC_NS_QUERY_GID_PT 1
203
200/* Delay Multiplier constant */ 204/* Delay Multiplier constant */
201#define LPFC_DMULT_CONST 651042 205#define LPFC_DMULT_CONST 651042
202#define LPFC_DMULT_MAX 1023 206#define LPFC_DMULT_MAX 1023
@@ -1031,6 +1035,7 @@ struct mbox_header {
1031#define LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS 0x21 1035#define LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS 0x21
1032#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22 1036#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
1033#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23 1037#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
1038#define LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE 0x42
1034 1039
1035/* Low level Opcodes */ 1040/* Low level Opcodes */
1036#define LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION 0x37 1041#define LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION 0x37
@@ -2777,6 +2782,9 @@ struct lpfc_mbx_read_config {
2777#define lpfc_mbx_rd_conf_lnk_ldv_SHIFT 8 2782#define lpfc_mbx_rd_conf_lnk_ldv_SHIFT 8
2778#define lpfc_mbx_rd_conf_lnk_ldv_MASK 0x00000001 2783#define lpfc_mbx_rd_conf_lnk_ldv_MASK 0x00000001
2779#define lpfc_mbx_rd_conf_lnk_ldv_WORD word2 2784#define lpfc_mbx_rd_conf_lnk_ldv_WORD word2
2785#define lpfc_mbx_rd_conf_trunk_SHIFT 12
2786#define lpfc_mbx_rd_conf_trunk_MASK 0x0000000F
2787#define lpfc_mbx_rd_conf_trunk_WORD word2
2780#define lpfc_mbx_rd_conf_topology_SHIFT 24 2788#define lpfc_mbx_rd_conf_topology_SHIFT 24
2781#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF 2789#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
2782#define lpfc_mbx_rd_conf_topology_WORD word2 2790#define lpfc_mbx_rd_conf_topology_WORD word2
@@ -3512,6 +3520,15 @@ struct lpfc_mbx_set_host_data {
3512 uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE]; 3520 uint8_t data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
3513}; 3521};
3514 3522
3523struct lpfc_mbx_set_trunk_mode {
3524 struct mbox_header header;
3525 uint32_t word0;
3526#define lpfc_mbx_set_trunk_mode_WORD word0
3527#define lpfc_mbx_set_trunk_mode_SHIFT 0
3528#define lpfc_mbx_set_trunk_mode_MASK 0xFF
3529 uint32_t word1;
3530 uint32_t word2;
3531};
3515 3532
3516struct lpfc_mbx_get_sli4_parameters { 3533struct lpfc_mbx_get_sli4_parameters {
3517 struct mbox_header header; 3534 struct mbox_header header;
@@ -3833,6 +3850,9 @@ struct lpfc_mbx_wr_object {
3833#define lpfc_wr_object_eof_SHIFT 31 3850#define lpfc_wr_object_eof_SHIFT 31
3834#define lpfc_wr_object_eof_MASK 0x00000001 3851#define lpfc_wr_object_eof_MASK 0x00000001
3835#define lpfc_wr_object_eof_WORD word4 3852#define lpfc_wr_object_eof_WORD word4
3853#define lpfc_wr_object_eas_SHIFT 29
3854#define lpfc_wr_object_eas_MASK 0x00000001
3855#define lpfc_wr_object_eas_WORD word4
3836#define lpfc_wr_object_write_length_SHIFT 0 3856#define lpfc_wr_object_write_length_SHIFT 0
3837#define lpfc_wr_object_write_length_MASK 0x00FFFFFF 3857#define lpfc_wr_object_write_length_MASK 0x00FFFFFF
3838#define lpfc_wr_object_write_length_WORD word4 3858#define lpfc_wr_object_write_length_WORD word4
@@ -3843,6 +3863,15 @@ struct lpfc_mbx_wr_object {
3843 } request; 3863 } request;
3844 struct { 3864 struct {
3845 uint32_t actual_write_length; 3865 uint32_t actual_write_length;
3866 uint32_t word5;
3867#define lpfc_wr_object_change_status_SHIFT 0
3868#define lpfc_wr_object_change_status_MASK 0x000000FF
3869#define lpfc_wr_object_change_status_WORD word5
3870#define LPFC_CHANGE_STATUS_NO_RESET_NEEDED 0x00
3871#define LPFC_CHANGE_STATUS_PHYS_DEV_RESET 0x01
3872#define LPFC_CHANGE_STATUS_FW_RESET 0x02
3873#define LPFC_CHANGE_STATUS_PORT_MIGRATION 0x04
3874#define LPFC_CHANGE_STATUS_PCI_RESET 0x05
3846 } response; 3875 } response;
3847 } u; 3876 } u;
3848}; 3877};
@@ -3911,6 +3940,7 @@ struct lpfc_mqe {
3911 struct lpfc_mbx_set_feature set_feature; 3940 struct lpfc_mbx_set_feature set_feature;
3912 struct lpfc_mbx_memory_dump_type3 mem_dump_type3; 3941 struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
3913 struct lpfc_mbx_set_host_data set_host_data; 3942 struct lpfc_mbx_set_host_data set_host_data;
3943 struct lpfc_mbx_set_trunk_mode set_trunk_mode;
3914 struct lpfc_mbx_nop nop; 3944 struct lpfc_mbx_nop nop;
3915 struct lpfc_mbx_set_ras_fwlog ras_fwlog; 3945 struct lpfc_mbx_set_ras_fwlog ras_fwlog;
3916 } un; 3946 } un;
@@ -4047,6 +4077,23 @@ struct lpfc_acqe_grp5 {
4047 uint32_t trailer; 4077 uint32_t trailer;
4048}; 4078};
4049 4079
4080static char *const trunk_errmsg[] = { /* map errcode */
4081 "", /* There is no such error code at index 0*/
4082 "link negotiated speed does not match existing"
4083 " trunk - link was \"low\" speed",
4084 "link negotiated speed does not match"
4085 " existing trunk - link was \"middle\" speed",
4086 "link negotiated speed does not match existing"
4087 " trunk - link was \"high\" speed",
4088 "Attached to non-trunking port - F_Port",
4089 "Attached to non-trunking port - N_Port",
4090 "FLOGI response timeout",
4091 "non-FLOGI frame received",
4092 "Invalid FLOGI response",
4093 "Trunking initialization protocol",
4094 "Trunk peer device mismatch",
4095};
4096
4050struct lpfc_acqe_fc_la { 4097struct lpfc_acqe_fc_la {
4051 uint32_t word0; 4098 uint32_t word0;
4052#define lpfc_acqe_fc_la_speed_SHIFT 24 4099#define lpfc_acqe_fc_la_speed_SHIFT 24
@@ -4080,6 +4127,7 @@ struct lpfc_acqe_fc_la {
4080#define LPFC_FC_LA_TYPE_MDS_LINK_DOWN 0x4 4127#define LPFC_FC_LA_TYPE_MDS_LINK_DOWN 0x4
4081#define LPFC_FC_LA_TYPE_MDS_LOOPBACK 0x5 4128#define LPFC_FC_LA_TYPE_MDS_LOOPBACK 0x5
4082#define LPFC_FC_LA_TYPE_UNEXP_WWPN 0x6 4129#define LPFC_FC_LA_TYPE_UNEXP_WWPN 0x6
4130#define LPFC_FC_LA_TYPE_TRUNKING_EVENT 0x7
4083#define lpfc_acqe_fc_la_port_type_SHIFT 6 4131#define lpfc_acqe_fc_la_port_type_SHIFT 6
4084#define lpfc_acqe_fc_la_port_type_MASK 0x00000003 4132#define lpfc_acqe_fc_la_port_type_MASK 0x00000003
4085#define lpfc_acqe_fc_la_port_type_WORD word0 4133#define lpfc_acqe_fc_la_port_type_WORD word0
@@ -4088,6 +4136,32 @@ struct lpfc_acqe_fc_la {
4088#define lpfc_acqe_fc_la_port_number_SHIFT 0 4136#define lpfc_acqe_fc_la_port_number_SHIFT 0
4089#define lpfc_acqe_fc_la_port_number_MASK 0x0000003F 4137#define lpfc_acqe_fc_la_port_number_MASK 0x0000003F
4090#define lpfc_acqe_fc_la_port_number_WORD word0 4138#define lpfc_acqe_fc_la_port_number_WORD word0
4139
4140/* Attention Type is 0x07 (Trunking Event) word0 */
4141#define lpfc_acqe_fc_la_trunk_link_status_port0_SHIFT 16
4142#define lpfc_acqe_fc_la_trunk_link_status_port0_MASK 0x0000001
4143#define lpfc_acqe_fc_la_trunk_link_status_port0_WORD word0
4144#define lpfc_acqe_fc_la_trunk_link_status_port1_SHIFT 17
4145#define lpfc_acqe_fc_la_trunk_link_status_port1_MASK 0x0000001
4146#define lpfc_acqe_fc_la_trunk_link_status_port1_WORD word0
4147#define lpfc_acqe_fc_la_trunk_link_status_port2_SHIFT 18
4148#define lpfc_acqe_fc_la_trunk_link_status_port2_MASK 0x0000001
4149#define lpfc_acqe_fc_la_trunk_link_status_port2_WORD word0
4150#define lpfc_acqe_fc_la_trunk_link_status_port3_SHIFT 19
4151#define lpfc_acqe_fc_la_trunk_link_status_port3_MASK 0x0000001
4152#define lpfc_acqe_fc_la_trunk_link_status_port3_WORD word0
4153#define lpfc_acqe_fc_la_trunk_config_port0_SHIFT 20
4154#define lpfc_acqe_fc_la_trunk_config_port0_MASK 0x0000001
4155#define lpfc_acqe_fc_la_trunk_config_port0_WORD word0
4156#define lpfc_acqe_fc_la_trunk_config_port1_SHIFT 21
4157#define lpfc_acqe_fc_la_trunk_config_port1_MASK 0x0000001
4158#define lpfc_acqe_fc_la_trunk_config_port1_WORD word0
4159#define lpfc_acqe_fc_la_trunk_config_port2_SHIFT 22
4160#define lpfc_acqe_fc_la_trunk_config_port2_MASK 0x0000001
4161#define lpfc_acqe_fc_la_trunk_config_port2_WORD word0
4162#define lpfc_acqe_fc_la_trunk_config_port3_SHIFT 23
4163#define lpfc_acqe_fc_la_trunk_config_port3_MASK 0x0000001
4164#define lpfc_acqe_fc_la_trunk_config_port3_WORD word0
4091 uint32_t word1; 4165 uint32_t word1;
4092#define lpfc_acqe_fc_la_llink_spd_SHIFT 16 4166#define lpfc_acqe_fc_la_llink_spd_SHIFT 16
4093#define lpfc_acqe_fc_la_llink_spd_MASK 0x0000FFFF 4167#define lpfc_acqe_fc_la_llink_spd_MASK 0x0000FFFF
@@ -4095,6 +4169,12 @@ struct lpfc_acqe_fc_la {
4095#define lpfc_acqe_fc_la_fault_SHIFT 0 4169#define lpfc_acqe_fc_la_fault_SHIFT 0
4096#define lpfc_acqe_fc_la_fault_MASK 0x000000FF 4170#define lpfc_acqe_fc_la_fault_MASK 0x000000FF
4097#define lpfc_acqe_fc_la_fault_WORD word1 4171#define lpfc_acqe_fc_la_fault_WORD word1
4172#define lpfc_acqe_fc_la_trunk_fault_SHIFT 0
4173#define lpfc_acqe_fc_la_trunk_fault_MASK 0x0000000F
4174#define lpfc_acqe_fc_la_trunk_fault_WORD word1
4175#define lpfc_acqe_fc_la_trunk_linkmask_SHIFT 4
4176#define lpfc_acqe_fc_la_trunk_linkmask_MASK 0x000000F
4177#define lpfc_acqe_fc_la_trunk_linkmask_WORD word1
4098#define LPFC_FC_LA_FAULT_NONE 0x0 4178#define LPFC_FC_LA_FAULT_NONE 0x0
4099#define LPFC_FC_LA_FAULT_LOCAL 0x1 4179#define LPFC_FC_LA_FAULT_LOCAL 0x1
4100#define LPFC_FC_LA_FAULT_REMOTE 0x2 4180#define LPFC_FC_LA_FAULT_REMOTE 0x2
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 68d62d55a3a5..c1c36812c3d2 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -447,19 +447,19 @@ lpfc_config_port_post(struct lpfc_hba *phba)
447 "READ_SPARM mbxStatus x%x\n", 447 "READ_SPARM mbxStatus x%x\n",
448 mb->mbxCommand, mb->mbxStatus); 448 mb->mbxCommand, mb->mbxStatus);
449 phba->link_state = LPFC_HBA_ERROR; 449 phba->link_state = LPFC_HBA_ERROR;
450 mp = (struct lpfc_dmabuf *) pmb->context1; 450 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
451 mempool_free(pmb, phba->mbox_mem_pool); 451 mempool_free(pmb, phba->mbox_mem_pool);
452 lpfc_mbuf_free(phba, mp->virt, mp->phys); 452 lpfc_mbuf_free(phba, mp->virt, mp->phys);
453 kfree(mp); 453 kfree(mp);
454 return -EIO; 454 return -EIO;
455 } 455 }
456 456
457 mp = (struct lpfc_dmabuf *) pmb->context1; 457 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
458 458
459 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 459 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
460 lpfc_mbuf_free(phba, mp->virt, mp->phys); 460 lpfc_mbuf_free(phba, mp->virt, mp->phys);
461 kfree(mp); 461 kfree(mp);
462 pmb->context1 = NULL; 462 pmb->ctx_buf = NULL;
463 lpfc_update_vport_wwn(vport); 463 lpfc_update_vport_wwn(vport);
464 464
465 /* Update the fc_host data structures with new wwn. */ 465 /* Update the fc_host data structures with new wwn. */
@@ -1801,7 +1801,12 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1801 lpfc_offline(phba); 1801 lpfc_offline(phba);
1802 /* release interrupt for possible resource change */ 1802 /* release interrupt for possible resource change */
1803 lpfc_sli4_disable_intr(phba); 1803 lpfc_sli4_disable_intr(phba);
1804 lpfc_sli_brdrestart(phba); 1804 rc = lpfc_sli_brdrestart(phba);
1805 if (rc) {
1806 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1807 "6309 Failed to restart board\n");
1808 return rc;
1809 }
1805 /* request and enable interrupt */ 1810 /* request and enable interrupt */
1806 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1811 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1807 if (intr_mode == LPFC_INTR_ERROR) { 1812 if (intr_mode == LPFC_INTR_ERROR) {
@@ -4106,6 +4111,32 @@ finished:
4106 return stat; 4111 return stat;
4107} 4112}
4108 4113
4114void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4115{
4116 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4117 struct lpfc_hba *phba = vport->phba;
4118
4119 fc_host_supported_speeds(shost) = 0;
4120 if (phba->lmt & LMT_128Gb)
4121 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4122 if (phba->lmt & LMT_64Gb)
4123 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4124 if (phba->lmt & LMT_32Gb)
4125 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4126 if (phba->lmt & LMT_16Gb)
4127 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4128 if (phba->lmt & LMT_10Gb)
4129 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4130 if (phba->lmt & LMT_8Gb)
4131 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4132 if (phba->lmt & LMT_4Gb)
4133 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4134 if (phba->lmt & LMT_2Gb)
4135 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4136 if (phba->lmt & LMT_1Gb)
4137 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4138}
4139
4109/** 4140/**
4110 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 4141 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4111 * @shost: pointer to SCSI host data structure. 4142 * @shost: pointer to SCSI host data structure.
@@ -4133,23 +4164,7 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
4133 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 4164 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
4134 sizeof fc_host_symbolic_name(shost)); 4165 sizeof fc_host_symbolic_name(shost));
4135 4166
4136 fc_host_supported_speeds(shost) = 0; 4167 lpfc_host_supported_speeds_set(shost);
4137 if (phba->lmt & LMT_64Gb)
4138 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4139 if (phba->lmt & LMT_32Gb)
4140 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4141 if (phba->lmt & LMT_16Gb)
4142 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4143 if (phba->lmt & LMT_10Gb)
4144 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4145 if (phba->lmt & LMT_8Gb)
4146 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4147 if (phba->lmt & LMT_4Gb)
4148 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4149 if (phba->lmt & LMT_2Gb)
4150 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4151 if (phba->lmt & LMT_1Gb)
4152 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4153 4168
4154 fc_host_maxframe_size(shost) = 4169 fc_host_maxframe_size(shost) =
4155 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 4170 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
@@ -4467,6 +4482,9 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
4467 case LPFC_FC_LA_SPEED_64G: 4482 case LPFC_FC_LA_SPEED_64G:
4468 port_speed = 64000; 4483 port_speed = 64000;
4469 break; 4484 break;
4485 case LPFC_FC_LA_SPEED_128G:
4486 port_speed = 128000;
4487 break;
4470 default: 4488 default:
4471 port_speed = 0; 4489 port_speed = 0;
4472 } 4490 }
@@ -4609,6 +4627,136 @@ out_free_pmb:
4609} 4627}
4610 4628
4611/** 4629/**
4630 * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
4631 * topology.
4632 * @phba: pointer to lpfc hba data structure.
4633 * @evt_code: asynchronous event code.
4634 * @speed_code: asynchronous event link speed code.
4635 *
4636 * This routine is to parse the giving SLI4 async event link speed code into
4637 * value of Read topology link speed.
4638 *
4639 * Return: link speed in terms of Read topology.
4640 **/
4641static uint8_t
4642lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
4643{
4644 uint8_t port_speed;
4645
4646 switch (speed_code) {
4647 case LPFC_FC_LA_SPEED_1G:
4648 port_speed = LPFC_LINK_SPEED_1GHZ;
4649 break;
4650 case LPFC_FC_LA_SPEED_2G:
4651 port_speed = LPFC_LINK_SPEED_2GHZ;
4652 break;
4653 case LPFC_FC_LA_SPEED_4G:
4654 port_speed = LPFC_LINK_SPEED_4GHZ;
4655 break;
4656 case LPFC_FC_LA_SPEED_8G:
4657 port_speed = LPFC_LINK_SPEED_8GHZ;
4658 break;
4659 case LPFC_FC_LA_SPEED_16G:
4660 port_speed = LPFC_LINK_SPEED_16GHZ;
4661 break;
4662 case LPFC_FC_LA_SPEED_32G:
4663 port_speed = LPFC_LINK_SPEED_32GHZ;
4664 break;
4665 case LPFC_FC_LA_SPEED_64G:
4666 port_speed = LPFC_LINK_SPEED_64GHZ;
4667 break;
4668 case LPFC_FC_LA_SPEED_128G:
4669 port_speed = LPFC_LINK_SPEED_128GHZ;
4670 break;
4671 case LPFC_FC_LA_SPEED_256G:
4672 port_speed = LPFC_LINK_SPEED_256GHZ;
4673 break;
4674 default:
4675 port_speed = 0;
4676 break;
4677 }
4678
4679 return port_speed;
4680}
4681
4682#define trunk_link_status(__idx)\
4683 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
4684 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
4685 "Link up" : "Link down") : "NA"
4686/* Did port __idx reported an error */
4687#define trunk_port_fault(__idx)\
4688 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
4689 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
4690
4691static void
4692lpfc_update_trunk_link_status(struct lpfc_hba *phba,
4693 struct lpfc_acqe_fc_la *acqe_fc)
4694{
4695 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
4696 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
4697
4698 phba->sli4_hba.link_state.speed =
4699 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
4700 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
4701
4702 phba->sli4_hba.link_state.logical_speed =
4703 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
4704 /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
4705 phba->fc_linkspeed =
4706 lpfc_async_link_speed_to_read_top(
4707 phba,
4708 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
4709
4710 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
4711 phba->trunk_link.link0.state =
4712 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
4713 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
4714 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
4715 }
4716 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
4717 phba->trunk_link.link1.state =
4718 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
4719 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
4720 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
4721 }
4722 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
4723 phba->trunk_link.link2.state =
4724 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
4725 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
4726 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
4727 }
4728 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
4729 phba->trunk_link.link3.state =
4730 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
4731 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
4732 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
4733 }
4734
4735 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4736 "2910 Async FC Trunking Event - Speed:%d\n"
4737 "\tLogical speed:%d "
4738 "port0: %s port1: %s port2: %s port3: %s\n",
4739 phba->sli4_hba.link_state.speed,
4740 phba->sli4_hba.link_state.logical_speed,
4741 trunk_link_status(0), trunk_link_status(1),
4742 trunk_link_status(2), trunk_link_status(3));
4743
4744 if (port_fault)
4745 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4746 "3202 trunk error:0x%x (%s) seen on port0:%s "
4747 /*
4748 * SLI-4: We have only 0xA error codes
4749 * defined as of now. print an appropriate
4750 * message in case driver needs to be updated.
4751 */
4752 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
4753 "UNDEFINED. update driver." : trunk_errmsg[err],
4754 trunk_port_fault(0), trunk_port_fault(1),
4755 trunk_port_fault(2), trunk_port_fault(3));
4756}
4757
4758
4759/**
4612 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 4760 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
4613 * @phba: pointer to lpfc hba data structure. 4761 * @phba: pointer to lpfc hba data structure.
4614 * @acqe_fc: pointer to the async fc completion queue entry. 4762 * @acqe_fc: pointer to the async fc completion queue entry.
@@ -4633,6 +4781,13 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
4633 bf_get(lpfc_trailer_type, acqe_fc)); 4781 bf_get(lpfc_trailer_type, acqe_fc));
4634 return; 4782 return;
4635 } 4783 }
4784
4785 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
4786 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
4787 lpfc_update_trunk_link_status(phba, acqe_fc);
4788 return;
4789 }
4790
4636 /* Keep the link status for extra SLI4 state machine reference */ 4791 /* Keep the link status for extra SLI4 state machine reference */
4637 phba->sli4_hba.link_state.speed = 4792 phba->sli4_hba.link_state.speed =
4638 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 4793 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
@@ -4762,6 +4917,8 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
4762 struct temp_event temp_event_data; 4917 struct temp_event temp_event_data;
4763 struct lpfc_acqe_misconfigured_event *misconfigured; 4918 struct lpfc_acqe_misconfigured_event *misconfigured;
4764 struct Scsi_Host *shost; 4919 struct Scsi_Host *shost;
4920 struct lpfc_vport **vports;
4921 int rc, i;
4765 4922
4766 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 4923 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
4767 4924
@@ -4887,6 +5044,25 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
4887 sprintf(message, "Unknown event status x%02x", status); 5044 sprintf(message, "Unknown event status x%02x", status);
4888 break; 5045 break;
4889 } 5046 }
5047
5048 /* Issue READ_CONFIG mbox command to refresh supported speeds */
5049 rc = lpfc_sli4_read_config(phba);
5050 if (rc) {
5051 phba->lmt = 0;
5052 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5053 "3194 Unable to retrieve supported "
5054 "speeds, rc = 0x%x\n", rc);
5055 }
5056 vports = lpfc_create_vport_work_array(phba);
5057 if (vports != NULL) {
5058 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5059 i++) {
5060 shost = lpfc_shost_from_vport(vports[i]);
5061 lpfc_host_supported_speeds_set(shost);
5062 }
5063 }
5064 lpfc_destroy_vport_work_array(phba, vports);
5065
4890 phba->sli4_hba.lnk_info.optic_state = status; 5066 phba->sli4_hba.lnk_info.optic_state = status;
4891 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5067 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4892 "3176 Port Name %c %s\n", port_name, message); 5068 "3176 Port Name %c %s\n", port_name, message);
@@ -5044,7 +5220,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
5044 break; 5220 break;
5045 } 5221 }
5046 /* If fast FCF failover rescan event is pending, do nothing */ 5222 /* If fast FCF failover rescan event is pending, do nothing */
5047 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 5223 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
5048 spin_unlock_irq(&phba->hbalock); 5224 spin_unlock_irq(&phba->hbalock);
5049 break; 5225 break;
5050 } 5226 }
@@ -7181,26 +7357,19 @@ lpfc_post_init_setup(struct lpfc_hba *phba)
7181static int 7357static int
7182lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 7358lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7183{ 7359{
7184 struct pci_dev *pdev; 7360 struct pci_dev *pdev = phba->pcidev;
7185 unsigned long bar0map_len, bar2map_len; 7361 unsigned long bar0map_len, bar2map_len;
7186 int i, hbq_count; 7362 int i, hbq_count;
7187 void *ptr; 7363 void *ptr;
7188 int error = -ENODEV; 7364 int error = -ENODEV;
7189 7365
7190 /* Obtain PCI device reference */ 7366 if (!pdev)
7191 if (!phba->pcidev)
7192 return error; 7367 return error;
7193 else
7194 pdev = phba->pcidev;
7195 7368
7196 /* Set the device DMA mask size */ 7369 /* Set the device DMA mask size */
7197 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 7370 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
7198 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 7371 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
7199 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 7372 return error;
7200 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
7201 return error;
7202 }
7203 }
7204 7373
7205 /* Get the bus address of Bar0 and Bar2 and the number of bytes 7374 /* Get the bus address of Bar0 and Bar2 and the number of bytes
7206 * required by each mapping. 7375 * required by each mapping.
@@ -7779,6 +7948,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
7779 phba->sli4_hba.bbscn_params.word0 = rd_config->word8; 7948 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
7780 } 7949 }
7781 7950
7951 phba->sli4_hba.conf_trunk =
7952 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
7782 phba->sli4_hba.extents_in_use = 7953 phba->sli4_hba.extents_in_use =
7783 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 7954 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
7784 phba->sli4_hba.max_cfg_param.max_xri = 7955 phba->sli4_hba.max_cfg_param.max_xri =
@@ -7787,6 +7958,9 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
7787 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 7958 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
7788 phba->sli4_hba.max_cfg_param.max_vpi = 7959 phba->sli4_hba.max_cfg_param.max_vpi =
7789 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 7960 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
7961 /* Limit the max we support */
7962 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
7963 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
7790 phba->sli4_hba.max_cfg_param.vpi_base = 7964 phba->sli4_hba.max_cfg_param.vpi_base =
7791 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 7965 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
7792 phba->sli4_hba.max_cfg_param.max_rpi = 7966 phba->sli4_hba.max_cfg_param.max_rpi =
@@ -9562,25 +9736,18 @@ out:
9562static int 9736static int
9563lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 9737lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
9564{ 9738{
9565 struct pci_dev *pdev; 9739 struct pci_dev *pdev = phba->pcidev;
9566 unsigned long bar0map_len, bar1map_len, bar2map_len; 9740 unsigned long bar0map_len, bar1map_len, bar2map_len;
9567 int error = -ENODEV; 9741 int error = -ENODEV;
9568 uint32_t if_type; 9742 uint32_t if_type;
9569 9743
9570 /* Obtain PCI device reference */ 9744 if (!pdev)
9571 if (!phba->pcidev)
9572 return error; 9745 return error;
9573 else
9574 pdev = phba->pcidev;
9575 9746
9576 /* Set the device DMA mask size */ 9747 /* Set the device DMA mask size */
9577 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 9748 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
9578 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 9749 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
9579 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 9750 return error;
9580 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
9581 return error;
9582 }
9583 }
9584 9751
9585 /* 9752 /*
9586 * The BARs and register set definitions and offset locations are 9753 * The BARs and register set definitions and offset locations are
@@ -10523,12 +10690,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
10523 kthread_stop(phba->worker_thread); 10690 kthread_stop(phba->worker_thread);
10524 10691
10525 /* Disable FW logging to host memory */ 10692 /* Disable FW logging to host memory */
10526 writel(LPFC_CTL_PDEV_CTL_DDL_RAS, 10693 lpfc_ras_stop_fwlog(phba);
10527 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
10528
10529 /* Free RAS DMA memory */
10530 if (phba->ras_fwlog.ras_enabled == true)
10531 lpfc_sli4_ras_dma_free(phba);
10532 10694
10533 /* Unset the queues shared with the hardware then release all 10695 /* Unset the queues shared with the hardware then release all
10534 * allocated resources. 10696 * allocated resources.
@@ -10539,6 +10701,10 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
10539 /* Reset SLI4 HBA FCoE function */ 10701 /* Reset SLI4 HBA FCoE function */
10540 lpfc_pci_function_reset(phba); 10702 lpfc_pci_function_reset(phba);
10541 10703
10704 /* Free RAS DMA memory */
10705 if (phba->ras_fwlog.ras_enabled)
10706 lpfc_sli4_ras_dma_free(phba);
10707
10542 /* Stop the SLI4 device port */ 10708 /* Stop the SLI4 device port */
10543 phba->pport->work_port_events = 0; 10709 phba->pport->work_port_events = 0;
10544} 10710}
@@ -12476,7 +12642,8 @@ lpfc_sli4_ras_init(struct lpfc_hba *phba)
12476 case PCI_DEVICE_ID_LANCER_G6_FC: 12642 case PCI_DEVICE_ID_LANCER_G6_FC:
12477 case PCI_DEVICE_ID_LANCER_G7_FC: 12643 case PCI_DEVICE_ID_LANCER_G7_FC:
12478 phba->ras_fwlog.ras_hwsupport = true; 12644 phba->ras_fwlog.ras_hwsupport = true;
12479 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn)) 12645 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
12646 phba->cfg_ras_fwlog_buffsize)
12480 phba->ras_fwlog.ras_enabled = true; 12647 phba->ras_fwlog.ras_enabled = true;
12481 else 12648 else
12482 phba->ras_fwlog.ras_enabled = false; 12649 phba->ras_fwlog.ras_enabled = false;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index deb094fdbb79..f6a5083a621e 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -94,7 +94,7 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
94 memset(mp->virt, 0, LPFC_BPL_SIZE); 94 memset(mp->virt, 0, LPFC_BPL_SIZE);
95 INIT_LIST_HEAD(&mp->list); 95 INIT_LIST_HEAD(&mp->list);
96 /* save address for completion */ 96 /* save address for completion */
97 pmb->context1 = (uint8_t *)mp; 97 pmb->ctx_buf = (uint8_t *)mp;
98 mb->un.varWords[3] = putPaddrLow(mp->phys); 98 mb->un.varWords[3] = putPaddrLow(mp->phys);
99 mb->un.varWords[4] = putPaddrHigh(mp->phys); 99 mb->un.varWords[4] = putPaddrHigh(mp->phys);
100 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info); 100 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
@@ -139,7 +139,7 @@ lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
139 void *ctx; 139 void *ctx;
140 140
141 mb = &pmb->u.mb; 141 mb = &pmb->u.mb;
142 ctx = pmb->context2; 142 ctx = pmb->ctx_buf;
143 143
144 /* Setup to dump VPD region */ 144 /* Setup to dump VPD region */
145 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 145 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -151,7 +151,7 @@ lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
151 mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t)); 151 mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
152 mb->un.varDmp.co = 0; 152 mb->un.varDmp.co = 0;
153 mb->un.varDmp.resp_offset = 0; 153 mb->un.varDmp.resp_offset = 0;
154 pmb->context2 = ctx; 154 pmb->ctx_buf = ctx;
155 mb->mbxOwner = OWN_HOST; 155 mb->mbxOwner = OWN_HOST;
156 return; 156 return;
157} 157}
@@ -172,7 +172,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
172 172
173 mb = &pmb->u.mb; 173 mb = &pmb->u.mb;
174 /* Save context so that we can restore after memset */ 174 /* Save context so that we can restore after memset */
175 ctx = pmb->context2; 175 ctx = pmb->ctx_buf;
176 176
177 /* Setup to dump VPD region */ 177 /* Setup to dump VPD region */
178 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 178 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
@@ -186,7 +186,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
186 mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE; 186 mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
187 mb->un.varDmp.co = 0; 187 mb->un.varDmp.co = 0;
188 mb->un.varDmp.resp_offset = 0; 188 mb->un.varDmp.resp_offset = 0;
189 pmb->context2 = ctx; 189 pmb->ctx_buf = ctx;
190 return; 190 return;
191} 191}
192 192
@@ -304,7 +304,7 @@ lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
304 /* Save address for later completion and set the owner to host so that 304 /* Save address for later completion and set the owner to host so that
305 * the FW knows this mailbox is available for processing. 305 * the FW knows this mailbox is available for processing.
306 */ 306 */
307 pmb->context1 = (uint8_t *)mp; 307 pmb->ctx_buf = (uint8_t *)mp;
308 mb->mbxOwner = OWN_HOST; 308 mb->mbxOwner = OWN_HOST;
309 return (0); 309 return (0);
310} 310}
@@ -513,9 +513,9 @@ lpfc_init_link(struct lpfc_hba * phba,
513 break; 513 break;
514 } 514 }
515 515
516 if (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && 516 if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
517 mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) { 517 phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
518 /* Failover is not tried for Lancer G6 */ 518 mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
519 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; 519 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
520 phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT; 520 phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
521 } 521 }
@@ -631,7 +631,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
631 mb->un.varRdSparm.vpi = phba->vpi_ids[vpi]; 631 mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
632 632
633 /* save address for completion */ 633 /* save address for completion */
634 pmb->context1 = mp; 634 pmb->ctx_buf = mp;
635 635
636 return (0); 636 return (0);
637} 637}
@@ -783,7 +783,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
783 memcpy(sparam, param, sizeof (struct serv_parm)); 783 memcpy(sparam, param, sizeof (struct serv_parm));
784 784
785 /* save address for completion */ 785 /* save address for completion */
786 pmb->context1 = (uint8_t *) mp; 786 pmb->ctx_buf = (uint8_t *)mp;
787 787
788 mb->mbxCommand = MBX_REG_LOGIN64; 788 mb->mbxCommand = MBX_REG_LOGIN64;
789 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 789 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
@@ -858,7 +858,7 @@ lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
858 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000; 858 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
859 mbox->vport = vport; 859 mbox->vport = vport;
860 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 860 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
861 mbox->context1 = NULL; 861 mbox->ctx_ndlp = NULL;
862 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 862 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
863 if (rc == MBX_NOT_FINISHED) 863 if (rc == MBX_NOT_FINISHED)
864 mempool_free(mbox, phba->mbox_mem_pool); 864 mempool_free(mbox, phba->mbox_mem_pool);
@@ -2288,7 +2288,7 @@ lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2288 INIT_LIST_HEAD(&mp->list); 2288 INIT_LIST_HEAD(&mp->list);
2289 2289
2290 /* save address for completion */ 2290 /* save address for completion */
2291 mbox->context1 = (uint8_t *) mp; 2291 mbox->ctx_buf = (uint8_t *)mp;
2292 2292
2293 mb->mbxCommand = MBX_DUMP_MEMORY; 2293 mb->mbxCommand = MBX_DUMP_MEMORY;
2294 mb->un.varDmp.type = DMP_NV_PARAMS; 2294 mb->un.varDmp.type = DMP_NV_PARAMS;
@@ -2305,7 +2305,7 @@ lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2305 MAILBOX_t *mb; 2305 MAILBOX_t *mb;
2306 int rc = FAILURE; 2306 int rc = FAILURE;
2307 struct lpfc_rdp_context *rdp_context = 2307 struct lpfc_rdp_context *rdp_context =
2308 (struct lpfc_rdp_context *)(mboxq->context2); 2308 (struct lpfc_rdp_context *)(mboxq->ctx_ndlp);
2309 2309
2310 mb = &mboxq->u.mb; 2310 mb = &mboxq->u.mb;
2311 if (mb->mbxStatus) 2311 if (mb->mbxStatus)
@@ -2323,9 +2323,9 @@ mbx_failed:
2323static void 2323static void
2324lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 2324lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2325{ 2325{
2326 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) mbox->context1; 2326 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
2327 struct lpfc_rdp_context *rdp_context = 2327 struct lpfc_rdp_context *rdp_context =
2328 (struct lpfc_rdp_context *)(mbox->context2); 2328 (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
2329 2329
2330 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) 2330 if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
2331 goto error_mbuf_free; 2331 goto error_mbuf_free;
@@ -2341,7 +2341,7 @@ lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2341 lpfc_read_lnk_stat(phba, mbox); 2341 lpfc_read_lnk_stat(phba, mbox);
2342 mbox->vport = rdp_context->ndlp->vport; 2342 mbox->vport = rdp_context->ndlp->vport;
2343 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat; 2343 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
2344 mbox->context2 = (struct lpfc_rdp_context *) rdp_context; 2344 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
2345 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED) 2345 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
2346 goto error_cmd_free; 2346 goto error_cmd_free;
2347 2347
@@ -2359,9 +2359,9 @@ void
2359lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 2359lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2360{ 2360{
2361 int rc; 2361 int rc;
2362 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (mbox->context1); 2362 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
2363 struct lpfc_rdp_context *rdp_context = 2363 struct lpfc_rdp_context *rdp_context =
2364 (struct lpfc_rdp_context *)(mbox->context2); 2364 (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
2365 2365
2366 if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) 2366 if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
2367 goto error; 2367 goto error;
@@ -2375,7 +2375,7 @@ lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2375 INIT_LIST_HEAD(&mp->list); 2375 INIT_LIST_HEAD(&mp->list);
2376 2376
2377 /* save address for completion */ 2377 /* save address for completion */
2378 mbox->context1 = mp; 2378 mbox->ctx_buf = mp;
2379 mbox->vport = rdp_context->ndlp->vport; 2379 mbox->vport = rdp_context->ndlp->vport;
2380 2380
2381 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); 2381 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
@@ -2391,7 +2391,7 @@ lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2391 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); 2391 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
2392 2392
2393 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2; 2393 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2;
2394 mbox->context2 = (struct lpfc_rdp_context *) rdp_context; 2394 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
2395 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 2395 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2396 if (rc == MBX_NOT_FINISHED) 2396 if (rc == MBX_NOT_FINISHED)
2397 goto error; 2397 goto error;
@@ -2436,7 +2436,7 @@ lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2436 2436
2437 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); 2437 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
2438 /* save address for completion */ 2438 /* save address for completion */
2439 mbox->context1 = mp; 2439 mbox->ctx_buf = mp;
2440 2440
2441 bf_set(lpfc_mbx_memory_dump_type3_type, 2441 bf_set(lpfc_mbx_memory_dump_type3_type,
2442 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); 2442 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 9c22a2c93462..66191fa35f63 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -330,7 +330,7 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
330 330
331 /* Free memory used in mailbox queue back to mailbox memory pool */ 331 /* Free memory used in mailbox queue back to mailbox memory pool */
332 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { 332 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
333 mp = (struct lpfc_dmabuf *) (mbox->context1); 333 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
334 if (mp) { 334 if (mp) {
335 lpfc_mbuf_free(phba, mp->virt, mp->phys); 335 lpfc_mbuf_free(phba, mp->virt, mp->phys);
336 kfree(mp); 336 kfree(mp);
@@ -340,7 +340,7 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
340 } 340 }
341 /* Free memory used in mailbox cmpl list back to mailbox memory pool */ 341 /* Free memory used in mailbox cmpl list back to mailbox memory pool */
342 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { 342 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
343 mp = (struct lpfc_dmabuf *) (mbox->context1); 343 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
344 if (mp) { 344 if (mp) {
345 lpfc_mbuf_free(phba, mp->virt, mp->phys); 345 lpfc_mbuf_free(phba, mp->virt, mp->phys);
346 kfree(mp); 346 kfree(mp);
@@ -354,7 +354,7 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
354 spin_unlock_irq(&phba->hbalock); 354 spin_unlock_irq(&phba->hbalock);
355 if (psli->mbox_active) { 355 if (psli->mbox_active) {
356 mbox = psli->mbox_active; 356 mbox = psli->mbox_active;
357 mp = (struct lpfc_dmabuf *) (mbox->context1); 357 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
358 if (mp) { 358 if (mp) {
359 lpfc_mbuf_free(phba, mp->virt, mp->phys); 359 lpfc_mbuf_free(phba, mp->virt, mp->phys);
360 kfree(mp); 360 kfree(mp);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 269808e8480f..96bc3789a166 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -467,7 +467,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
467 */ 467 */
468 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 468 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
469 /* 469 /*
470 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox 470 * mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
471 * command issued in lpfc_cmpl_els_acc(). 471 * command issued in lpfc_cmpl_els_acc().
472 */ 472 */
473 mbox->vport = vport; 473 mbox->vport = vport;
@@ -535,8 +535,8 @@ lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
535 struct lpfc_nodelist *ndlp; 535 struct lpfc_nodelist *ndlp;
536 uint32_t cmd; 536 uint32_t cmd;
537 537
538 elsiocb = (struct lpfc_iocbq *)mboxq->context1; 538 elsiocb = (struct lpfc_iocbq *)mboxq->ctx_buf;
539 ndlp = (struct lpfc_nodelist *) mboxq->context2; 539 ndlp = (struct lpfc_nodelist *)mboxq->ctx_ndlp;
540 vport = mboxq->vport; 540 vport = mboxq->vport;
541 cmd = elsiocb->drvrTimeout; 541 cmd = elsiocb->drvrTimeout;
542 542
@@ -836,7 +836,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
836 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 836 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
837 837
838 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { 838 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
839 spin_lock_irq(shost->host_lock);
839 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 840 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
841 spin_unlock_irq(shost->host_lock);
840 return 0; 842 return 0;
841 } 843 }
842 844
@@ -851,7 +853,10 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
851 return 1; 853 return 1;
852 } 854 }
853 } 855 }
856
857 spin_lock_irq(shost->host_lock);
854 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 858 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
859 spin_unlock_irq(shost->host_lock);
855 lpfc_unreg_rpi(vport, ndlp); 860 lpfc_unreg_rpi(vport, ndlp);
856 return 0; 861 return 0;
857} 862}
@@ -866,13 +871,26 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
866 * to release a rpi. 871 * to release a rpi.
867 **/ 872 **/
868void 873void
869lpfc_release_rpi(struct lpfc_hba *phba, 874lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
870 struct lpfc_vport *vport, 875 struct lpfc_nodelist *ndlp, uint16_t rpi)
871 uint16_t rpi)
872{ 876{
873 LPFC_MBOXQ_t *pmb; 877 LPFC_MBOXQ_t *pmb;
874 int rc; 878 int rc;
875 879
880 /* If there is already an UNREG in progress for this ndlp,
881 * no need to queue up another one.
882 */
883 if (ndlp->nlp_flag & NLP_UNREG_INP) {
884 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
885 "1435 release_rpi SKIP UNREG x%x on "
886 "NPort x%x deferred x%x flg x%x "
887 "Data: %p\n",
888 ndlp->nlp_rpi, ndlp->nlp_DID,
889 ndlp->nlp_defer_did,
890 ndlp->nlp_flag, ndlp);
891 return;
892 }
893
876 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 894 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
877 GFP_KERNEL); 895 GFP_KERNEL);
878 if (!pmb) 896 if (!pmb)
@@ -881,6 +899,18 @@ lpfc_release_rpi(struct lpfc_hba *phba,
881 else { 899 else {
882 lpfc_unreg_login(phba, vport->vpi, rpi, pmb); 900 lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
883 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 901 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
902 pmb->vport = vport;
903 pmb->ctx_ndlp = ndlp;
904
905 if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
906 (!(vport->fc_flag & FC_OFFLINE_MODE)))
907 ndlp->nlp_flag |= NLP_UNREG_INP;
908
909 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
910 "1437 release_rpi UNREG x%x "
911 "on NPort x%x flg x%x\n",
912 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag);
913
884 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 914 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
885 if (rc == MBX_NOT_FINISHED) 915 if (rc == MBX_NOT_FINISHED)
886 mempool_free(pmb, phba->mbox_mem_pool); 916 mempool_free(pmb, phba->mbox_mem_pool);
@@ -901,7 +931,7 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
901 (evt == NLP_EVT_CMPL_REG_LOGIN) && 931 (evt == NLP_EVT_CMPL_REG_LOGIN) &&
902 (!pmb->u.mb.mbxStatus)) { 932 (!pmb->u.mb.mbxStatus)) {
903 rpi = pmb->u.mb.un.varWords[0]; 933 rpi = pmb->u.mb.un.varWords[0];
904 lpfc_release_rpi(phba, vport, rpi); 934 lpfc_release_rpi(phba, vport, ndlp, rpi);
905 } 935 }
906 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 936 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
907 "0271 Illegal State Transition: node x%x " 937 "0271 Illegal State Transition: node x%x "
@@ -1253,7 +1283,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
1253 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 1283 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
1254 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 1284 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1255 } 1285 }
1256 mbox->context2 = lpfc_nlp_get(ndlp); 1286 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
1257 mbox->vport = vport; 1287 mbox->vport = vport;
1258 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 1288 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
1259 != MBX_NOT_FINISHED) { 1289 != MBX_NOT_FINISHED) {
@@ -1267,7 +1297,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
1267 * command 1297 * command
1268 */ 1298 */
1269 lpfc_nlp_put(ndlp); 1299 lpfc_nlp_put(ndlp);
1270 mp = (struct lpfc_dmabuf *) mbox->context1; 1300 mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
1271 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1301 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1272 kfree(mp); 1302 kfree(mp);
1273 mempool_free(mbox, phba->mbox_mem_pool); 1303 mempool_free(mbox, phba->mbox_mem_pool);
@@ -1329,7 +1359,7 @@ lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
1329 if (!(phba->pport->load_flag & FC_UNLOADING) && 1359 if (!(phba->pport->load_flag & FC_UNLOADING) &&
1330 !mb->mbxStatus) { 1360 !mb->mbxStatus) {
1331 rpi = pmb->u.mb.un.varWords[0]; 1361 rpi = pmb->u.mb.un.varWords[0];
1332 lpfc_release_rpi(phba, vport, rpi); 1362 lpfc_release_rpi(phba, vport, ndlp, rpi);
1333 } 1363 }
1334 return ndlp->nlp_state; 1364 return ndlp->nlp_state;
1335} 1365}
@@ -1636,10 +1666,10 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1636 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1666 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1637 if ((mb = phba->sli.mbox_active)) { 1667 if ((mb = phba->sli.mbox_active)) {
1638 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 1668 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1639 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1669 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
1640 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 1670 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1641 lpfc_nlp_put(ndlp); 1671 lpfc_nlp_put(ndlp);
1642 mb->context2 = NULL; 1672 mb->ctx_ndlp = NULL;
1643 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1673 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1644 } 1674 }
1645 } 1675 }
@@ -1647,8 +1677,8 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1647 spin_lock_irq(&phba->hbalock); 1677 spin_lock_irq(&phba->hbalock);
1648 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1678 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1649 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 1679 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1650 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1680 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
1651 mp = (struct lpfc_dmabuf *) (mb->context1); 1681 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
1652 if (mp) { 1682 if (mp) {
1653 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 1683 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
1654 kfree(mp); 1684 kfree(mp);
@@ -1770,9 +1800,16 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1770 ndlp->nlp_fc4_type |= NLP_FC4_FCP; 1800 ndlp->nlp_fc4_type |= NLP_FC4_FCP;
1771 1801
1772 } else if (ndlp->nlp_fc4_type == 0) { 1802 } else if (ndlp->nlp_fc4_type == 0) {
1773 rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID, 1803 /* If we are only configured for FCP, the driver
1774 0, ndlp->nlp_DID); 1804 * should just issue PRLI for FCP. Otherwise issue
1775 return ndlp->nlp_state; 1805 * GFT_ID to determine if remote port supports NVME.
1806 */
1807 if (phba->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
1808 rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID,
1809 0, ndlp->nlp_DID);
1810 return ndlp->nlp_state;
1811 }
1812 ndlp->nlp_fc4_type = NLP_FC4_FCP;
1776 } 1813 }
1777 1814
1778 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1815 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
@@ -2863,8 +2900,9 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2863 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ 2900 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
2864 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2901 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2865 "0211 DSM in event x%x on NPort x%x in " 2902 "0211 DSM in event x%x on NPort x%x in "
2866 "state %d Data: x%x\n", 2903 "state %d rpi x%x Data: x%x x%x\n",
2867 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag); 2904 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi,
2905 ndlp->nlp_flag, ndlp->nlp_fc4_type);
2868 2906
2869 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2907 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2870 "DSM in: evt:%d ste:%d did:x%x", 2908 "DSM in: evt:%d ste:%d did:x%x",
@@ -2876,8 +2914,9 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2876 /* DSM out state <rc> on NPort <nlp_DID> */ 2914 /* DSM out state <rc> on NPort <nlp_DID> */
2877 if (got_ndlp) { 2915 if (got_ndlp) {
2878 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2916 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2879 "0212 DSM out state %d on NPort x%x Data: x%x\n", 2917 "0212 DSM out state %d on NPort x%x "
2880 rc, ndlp->nlp_DID, ndlp->nlp_flag); 2918 "rpi x%x Data: x%x\n",
2919 rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag);
2881 2920
2882 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2921 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2883 "DSM out: ste:%d did:x%x flg:x%x", 2922 "DSM out: ste:%d did:x%x flg:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index ba831def9301..4c66b19e6199 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1855,7 +1855,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1855 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); 1855 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1856 1856
1857 /* word 7 */ 1857 /* word 7 */
1858 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
1859 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 1858 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
1860 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, 1859 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
1861 nvmereq_wqe->iocb.ulpClass); 1860 nvmereq_wqe->iocb.ulpClass);
@@ -1870,7 +1869,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1870 abts_buf->iotag); 1869 abts_buf->iotag);
1871 1870
1872 /* word 10 */ 1871 /* word 10 */
1873 bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, nvmereq_wqe->hba_wqidx);
1874 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); 1872 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
1875 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); 1873 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
1876 1874
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index baed2b891efb..b4f1a840b3b4 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -2734,6 +2734,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2734 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 2734 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2735 int prot_group_type = 0; 2735 int prot_group_type = 0;
2736 int fcpdl; 2736 int fcpdl;
2737 struct lpfc_vport *vport = phba->pport;
2737 2738
2738 /* 2739 /*
2739 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd 2740 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
@@ -2839,6 +2840,14 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2839 */ 2840 */
2840 iocb_cmd->un.fcpi.fcpi_parm = fcpdl; 2841 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2841 2842
2843 /*
2844 * For First burst, we may need to adjust the initial transfer
2845 * length for DIF
2846 */
2847 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2848 (fcpdl < vport->cfg_first_burst_size))
2849 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2850
2842 return 0; 2851 return 0;
2843err: 2852err:
2844 if (lpfc_cmd->seg_cnt) 2853 if (lpfc_cmd->seg_cnt)
@@ -3403,6 +3412,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3403 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 3412 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3404 int prot_group_type = 0; 3413 int prot_group_type = 0;
3405 int fcpdl; 3414 int fcpdl;
3415 struct lpfc_vport *vport = phba->pport;
3406 3416
3407 /* 3417 /*
3408 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd 3418 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
@@ -3519,6 +3529,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3519 iocb_cmd->un.fcpi.fcpi_parm = fcpdl; 3529 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3520 3530
3521 /* 3531 /*
3532 * For First burst, we may need to adjust the initial transfer
3533 * length for DIF
3534 */
3535 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
3536 (fcpdl < vport->cfg_first_burst_size))
3537 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
3538
3539 /*
3522 * If the OAS driver feature is enabled and the lun is enabled for 3540 * If the OAS driver feature is enabled and the lun is enabled for
3523 * OAS, set the oas iocb related flags. 3541 * OAS, set the oas iocb related flags.
3524 */ 3542 */
@@ -4163,7 +4181,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4163 /* If pCmd was set to NULL from abort path, do not call scsi_done */ 4181 /* If pCmd was set to NULL from abort path, do not call scsi_done */
4164 if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) { 4182 if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
4165 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4183 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4166 "0711 FCP cmd already NULL, sid: 0x%06x, " 4184 "5688 FCP cmd already NULL, sid: 0x%06x, "
4167 "did: 0x%06x, oxid: 0x%04x\n", 4185 "did: 0x%06x, oxid: 0x%04x\n",
4168 vport->fc_myDID, 4186 vport->fc_myDID,
4169 (pnode) ? pnode->nlp_DID : 0, 4187 (pnode) ? pnode->nlp_DID : 0,
@@ -4442,6 +4460,66 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4442} 4460}
4443 4461
4444/** 4462/**
4463 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
4464 * if issuing a pci_bus_reset is possibly unsafe
4465 * @phba: lpfc_hba pointer.
4466 *
4467 * Description:
4468 * Walks the bus_list to ensure only PCI devices with Emulex
4469 * vendor id, device ids that support hot reset, and only one occurrence
4470 * of function 0.
4471 *
4472 * Returns:
4473 * -EBADSLT, detected invalid device
4474 * 0, successful
4475 */
4476int
4477lpfc_check_pci_resettable(const struct lpfc_hba *phba)
4478{
4479 const struct pci_dev *pdev = phba->pcidev;
4480 struct pci_dev *ptr = NULL;
4481 u8 counter = 0;
4482
4483 /* Walk the list of devices on the pci_dev's bus */
4484 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
4485 /* Check for Emulex Vendor ID */
4486 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
4487 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4488 "8346 Non-Emulex vendor found: "
4489 "0x%04x\n", ptr->vendor);
4490 return -EBADSLT;
4491 }
4492
4493 /* Check for valid Emulex Device ID */
4494 switch (ptr->device) {
4495 case PCI_DEVICE_ID_LANCER_FC:
4496 case PCI_DEVICE_ID_LANCER_G6_FC:
4497 case PCI_DEVICE_ID_LANCER_G7_FC:
4498 break;
4499 default:
4500 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4501 "8347 Invalid device found: "
4502 "0x%04x\n", ptr->device);
4503 return -EBADSLT;
4504 }
4505
4506 /* Check for only one function 0 ID to ensure only one HBA on
4507 * secondary bus
4508 */
4509 if (ptr->devfn == 0) {
4510 if (++counter > 1) {
4511 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4512 "8348 More than one device on "
4513 "secondary bus found\n");
4514 return -EBADSLT;
4515 }
4516 }
4517 }
4518
4519 return 0;
4520}
4521
4522/**
4445 * lpfc_info - Info entry point of scsi_host_template data structure 4523 * lpfc_info - Info entry point of scsi_host_template data structure
4446 * @host: The scsi host for which this call is being executed. 4524 * @host: The scsi host for which this call is being executed.
4447 * 4525 *
@@ -4455,32 +4533,53 @@ lpfc_info(struct Scsi_Host *host)
4455{ 4533{
4456 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 4534 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4457 struct lpfc_hba *phba = vport->phba; 4535 struct lpfc_hba *phba = vport->phba;
4458 int len, link_speed = 0; 4536 int link_speed = 0;
4459 static char lpfcinfobuf[384]; 4537 static char lpfcinfobuf[384];
4538 char tmp[384] = {0};
4460 4539
4461 memset(lpfcinfobuf,0,384); 4540 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
4462 if (phba && phba->pcidev){ 4541 if (phba && phba->pcidev){
4463 strncpy(lpfcinfobuf, phba->ModelDesc, 256); 4542 /* Model Description */
4464 len = strlen(lpfcinfobuf); 4543 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
4465 snprintf(lpfcinfobuf + len, 4544 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4466 384-len, 4545 sizeof(lpfcinfobuf))
4467 " on PCI bus %02x device %02x irq %d", 4546 goto buffer_done;
4468 phba->pcidev->bus->number, 4547
4469 phba->pcidev->devfn, 4548 /* PCI Info */
4470 phba->pcidev->irq); 4549 scnprintf(tmp, sizeof(tmp),
4471 len = strlen(lpfcinfobuf); 4550 " on PCI bus %02x device %02x irq %d",
4551 phba->pcidev->bus->number, phba->pcidev->devfn,
4552 phba->pcidev->irq);
4553 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4554 sizeof(lpfcinfobuf))
4555 goto buffer_done;
4556
4557 /* Port Number */
4472 if (phba->Port[0]) { 4558 if (phba->Port[0]) {
4473 snprintf(lpfcinfobuf + len, 4559 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
4474 384-len, 4560 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4475 " port %s", 4561 sizeof(lpfcinfobuf))
4476 phba->Port); 4562 goto buffer_done;
4477 } 4563 }
4478 len = strlen(lpfcinfobuf); 4564
4565 /* Link Speed */
4479 link_speed = lpfc_sli_port_speed_get(phba); 4566 link_speed = lpfc_sli_port_speed_get(phba);
4480 if (link_speed != 0) 4567 if (link_speed != 0) {
4481 snprintf(lpfcinfobuf + len, 384-len, 4568 scnprintf(tmp, sizeof(tmp),
4482 " Logical Link Speed: %d Mbps", link_speed); 4569 " Logical Link Speed: %d Mbps", link_speed);
4570 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4571 sizeof(lpfcinfobuf))
4572 goto buffer_done;
4573 }
4574
4575 /* PCI resettable */
4576 if (!lpfc_check_pci_resettable(phba)) {
4577 scnprintf(tmp, sizeof(tmp), " PCI resettable");
4578 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
4579 }
4483 } 4580 }
4581
4582buffer_done:
4484 return lpfcinfobuf; 4583 return lpfcinfobuf;
4485} 4584}
4486 4585
@@ -6036,7 +6135,6 @@ struct scsi_host_template lpfc_template_nvme = {
6036 .this_id = -1, 6135 .this_id = -1,
6037 .sg_tablesize = 1, 6136 .sg_tablesize = 1,
6038 .cmd_per_lun = 1, 6137 .cmd_per_lun = 1,
6039 .use_clustering = ENABLE_CLUSTERING,
6040 .shost_attrs = lpfc_hba_attrs, 6138 .shost_attrs = lpfc_hba_attrs,
6041 .max_sectors = 0xFFFF, 6139 .max_sectors = 0xFFFF,
6042 .vendor_id = LPFC_NL_VENDOR_ID, 6140 .vendor_id = LPFC_NL_VENDOR_ID,
@@ -6061,7 +6159,6 @@ struct scsi_host_template lpfc_template_no_hr = {
6061 .this_id = -1, 6159 .this_id = -1,
6062 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 6160 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6063 .cmd_per_lun = LPFC_CMD_PER_LUN, 6161 .cmd_per_lun = LPFC_CMD_PER_LUN,
6064 .use_clustering = ENABLE_CLUSTERING,
6065 .shost_attrs = lpfc_hba_attrs, 6162 .shost_attrs = lpfc_hba_attrs,
6066 .max_sectors = 0xFFFF, 6163 .max_sectors = 0xFFFF,
6067 .vendor_id = LPFC_NL_VENDOR_ID, 6164 .vendor_id = LPFC_NL_VENDOR_ID,
@@ -6088,7 +6185,6 @@ struct scsi_host_template lpfc_template = {
6088 .this_id = -1, 6185 .this_id = -1,
6089 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 6186 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6090 .cmd_per_lun = LPFC_CMD_PER_LUN, 6187 .cmd_per_lun = LPFC_CMD_PER_LUN,
6091 .use_clustering = ENABLE_CLUSTERING,
6092 .shost_attrs = lpfc_hba_attrs, 6188 .shost_attrs = lpfc_hba_attrs,
6093 .max_sectors = 0xFFFF, 6189 .max_sectors = 0xFFFF,
6094 .vendor_id = LPFC_NL_VENDOR_ID, 6190 .vendor_id = LPFC_NL_VENDOR_ID,
@@ -6113,7 +6209,6 @@ struct scsi_host_template lpfc_vport_template = {
6113 .this_id = -1, 6209 .this_id = -1,
6114 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 6210 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6115 .cmd_per_lun = LPFC_CMD_PER_LUN, 6211 .cmd_per_lun = LPFC_CMD_PER_LUN,
6116 .use_clustering = ENABLE_CLUSTERING,
6117 .shost_attrs = lpfc_vport_attrs, 6212 .shost_attrs = lpfc_vport_attrs,
6118 .max_sectors = 0xFFFF, 6213 .max_sectors = 0xFFFF,
6119 .change_queue_depth = scsi_change_queue_depth, 6214 .change_queue_depth = scsi_change_queue_depth,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index cc99859774ff..b759b089432c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -194,6 +194,10 @@ struct lpfc_scsi_buf {
194#define NO_MORE_OAS_LUN -1 194#define NO_MORE_OAS_LUN -1
195#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN 195#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN
196 196
197#ifndef FC_PORTSPEED_128GBIT
198#define FC_PORTSPEED_128GBIT 0x2000
199#endif
200
197#define TXRDY_PAYLOAD_LEN 12 201#define TXRDY_PAYLOAD_LEN 12
198 202
199int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, 203int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index b9e5cd79931a..30734caf77e1 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2456,7 +2456,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2456 uint16_t rpi, vpi; 2456 uint16_t rpi, vpi;
2457 int rc; 2457 int rc;
2458 2458
2459 mp = (struct lpfc_dmabuf *) (pmb->context1); 2459 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2460 2460
2461 if (mp) { 2461 if (mp) {
2462 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2462 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -2491,9 +2491,35 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2491 } 2491 }
2492 2492
2493 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2493 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2494 ndlp = (struct lpfc_nodelist *)pmb->context2; 2494 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2495 lpfc_nlp_put(ndlp); 2495 lpfc_nlp_put(ndlp);
2496 pmb->context2 = NULL; 2496 pmb->ctx_buf = NULL;
2497 pmb->ctx_ndlp = NULL;
2498 }
2499
2500 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2501 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2502
2503 /* Check to see if there are any deferred events to process */
2504 if (ndlp) {
2505 lpfc_printf_vlog(
2506 vport,
2507 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2508 "1438 UNREG cmpl deferred mbox x%x "
2509 "on NPort x%x Data: x%x x%x %p\n",
2510 ndlp->nlp_rpi, ndlp->nlp_DID,
2511 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2512
2513 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2514 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2515 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2516 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2517 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2518 } else {
2519 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2520 }
2521 }
2522 pmb->ctx_ndlp = NULL;
2497 } 2523 }
2498 2524
2499 /* Check security permission status on INIT_LINK mailbox command */ 2525 /* Check security permission status on INIT_LINK mailbox command */
@@ -2527,21 +2553,46 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2527 struct lpfc_vport *vport = pmb->vport; 2553 struct lpfc_vport *vport = pmb->vport;
2528 struct lpfc_nodelist *ndlp; 2554 struct lpfc_nodelist *ndlp;
2529 2555
2530 ndlp = pmb->context1; 2556 ndlp = pmb->ctx_ndlp;
2531 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2557 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2532 if (phba->sli_rev == LPFC_SLI_REV4 && 2558 if (phba->sli_rev == LPFC_SLI_REV4 &&
2533 (bf_get(lpfc_sli_intf_if_type, 2559 (bf_get(lpfc_sli_intf_if_type,
2534 &phba->sli4_hba.sli_intf) >= 2560 &phba->sli4_hba.sli_intf) >=
2535 LPFC_SLI_INTF_IF_TYPE_2)) { 2561 LPFC_SLI_INTF_IF_TYPE_2)) {
2536 if (ndlp) { 2562 if (ndlp) {
2537 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 2563 lpfc_printf_vlog(
2538 "0010 UNREG_LOGIN vpi:%x " 2564 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2539 "rpi:%x DID:%x map:%x %p\n", 2565 "0010 UNREG_LOGIN vpi:%x "
2540 vport->vpi, ndlp->nlp_rpi, 2566 "rpi:%x DID:%x defer x%x flg x%x "
2541 ndlp->nlp_DID, 2567 "map:%x %p\n",
2542 ndlp->nlp_usg_map, ndlp); 2568 vport->vpi, ndlp->nlp_rpi,
2569 ndlp->nlp_DID, ndlp->nlp_defer_did,
2570 ndlp->nlp_flag,
2571 ndlp->nlp_usg_map, ndlp);
2543 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2572 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2544 lpfc_nlp_put(ndlp); 2573 lpfc_nlp_put(ndlp);
2574
2575 /* Check to see if there are any deferred
2576 * events to process
2577 */
2578 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2579 (ndlp->nlp_defer_did !=
2580 NLP_EVT_NOTHING_PENDING)) {
2581 lpfc_printf_vlog(
2582 vport, KERN_INFO, LOG_DISCOVERY,
2583 "4111 UNREG cmpl deferred "
2584 "clr x%x on "
2585 "NPort x%x Data: x%x %p\n",
2586 ndlp->nlp_rpi, ndlp->nlp_DID,
2587 ndlp->nlp_defer_did, ndlp);
2588 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2589 ndlp->nlp_defer_did =
2590 NLP_EVT_NOTHING_PENDING;
2591 lpfc_issue_els_plogi(
2592 vport, ndlp->nlp_DID, 0);
2593 } else {
2594 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2595 }
2545 } 2596 }
2546 } 2597 }
2547 } 2598 }
@@ -4640,6 +4691,8 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4640 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4691 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4641 4692
4642 rc = lpfc_sli4_brdreset(phba); 4693 rc = lpfc_sli4_brdreset(phba);
4694 if (rc)
4695 return rc;
4643 4696
4644 spin_lock_irq(&phba->hbalock); 4697 spin_lock_irq(&phba->hbalock);
4645 phba->pport->stopped = 0; 4698 phba->pport->stopped = 0;
@@ -5228,7 +5281,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5228 goto out_free_mboxq; 5281 goto out_free_mboxq;
5229 } 5282 }
5230 5283
5231 mp = (struct lpfc_dmabuf *) mboxq->context1; 5284 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5232 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5285 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5233 5286
5234 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5287 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
@@ -6148,6 +6201,25 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6148} 6201}
6149 6202
6150/** 6203/**
6204 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6205 * @phba: Pointer to HBA context object.
6206 *
6207 * Disable FW logging into host memory on the adapter. To
6208 * be done before reading logs from the host memory.
6209 **/
6210void
6211lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6212{
6213 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6214
6215 ras_fwlog->ras_active = false;
6216
6217 /* Disable FW logging to host memory */
6218 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6219 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6220}
6221
6222/**
6151 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging. 6223 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6152 * @phba: Pointer to HBA context object. 6224 * @phba: Pointer to HBA context object.
6153 * 6225 *
@@ -6211,7 +6283,7 @@ lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6211 &ras_fwlog->lwpd.phys, 6283 &ras_fwlog->lwpd.phys,
6212 GFP_KERNEL); 6284 GFP_KERNEL);
6213 if (!ras_fwlog->lwpd.virt) { 6285 if (!ras_fwlog->lwpd.virt) {
6214 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6215 "6185 LWPD Memory Alloc Failed\n"); 6287 "6185 LWPD Memory Alloc Failed\n");
6216 6288
6217 return -ENOMEM; 6289 return -ENOMEM;
@@ -6228,7 +6300,7 @@ lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6228 goto free_mem; 6300 goto free_mem;
6229 } 6301 }
6230 6302
6231 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6303 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
6232 LPFC_RAS_MAX_ENTRY_SIZE, 6304 LPFC_RAS_MAX_ENTRY_SIZE,
6233 &dmabuf->phys, 6305 &dmabuf->phys,
6234 GFP_KERNEL); 6306 GFP_KERNEL);
@@ -6239,7 +6311,6 @@ lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6239 "6187 DMA Alloc Failed FW logging"); 6311 "6187 DMA Alloc Failed FW logging");
6240 goto free_mem; 6312 goto free_mem;
6241 } 6313 }
6242 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6243 dmabuf->buffer_tag = i; 6314 dmabuf->buffer_tag = i;
6244 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list); 6315 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6245 } 6316 }
@@ -6274,11 +6345,13 @@ lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6274 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6345 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6275 6346
6276 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) { 6347 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6277 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6348 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
6278 "6188 FW LOG mailbox " 6349 "6188 FW LOG mailbox "
6279 "completed with status x%x add_status x%x," 6350 "completed with status x%x add_status x%x,"
6280 " mbx status x%x\n", 6351 " mbx status x%x\n",
6281 shdr_status, shdr_add_status, mb->mbxStatus); 6352 shdr_status, shdr_add_status, mb->mbxStatus);
6353
6354 ras_fwlog->ras_hwsupport = false;
6282 goto disable_ras; 6355 goto disable_ras;
6283 } 6356 }
6284 6357
@@ -6326,7 +6399,7 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6326 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count); 6399 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6327 if (rc) { 6400 if (rc) {
6328 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6401 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6329 "6189 RAS FW Log Support Not Enabled"); 6402 "6189 FW Log Memory Allocation Failed");
6330 return rc; 6403 return rc;
6331 } 6404 }
6332 } 6405 }
@@ -6334,7 +6407,7 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6334 /* Setup Mailbox command */ 6407 /* Setup Mailbox command */
6335 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6408 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6336 if (!mbox) { 6409 if (!mbox) {
6337 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6338 "6190 RAS MBX Alloc Failed"); 6411 "6190 RAS MBX Alloc Failed");
6339 rc = -ENOMEM; 6412 rc = -ENOMEM;
6340 goto mem_free; 6413 goto mem_free;
@@ -6379,8 +6452,8 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6379 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6452 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6380 6453
6381 if (rc == MBX_NOT_FINISHED) { 6454 if (rc == MBX_NOT_FINISHED) {
6382 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6455 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6383 "6191 RAS Mailbox failed. " 6456 "6191 FW-Log Mailbox failed. "
6384 "status %d mbxStatus : x%x", rc, 6457 "status %d mbxStatus : x%x", rc,
6385 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 6458 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6386 mempool_free(mbox, phba->mbox_mem_pool); 6459 mempool_free(mbox, phba->mbox_mem_pool);
@@ -7348,7 +7421,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7348 7421
7349 mboxq->vport = vport; 7422 mboxq->vport = vport;
7350 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7423 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7351 mp = (struct lpfc_dmabuf *) mboxq->context1; 7424 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7352 if (rc == MBX_SUCCESS) { 7425 if (rc == MBX_SUCCESS) {
7353 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 7426 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7354 rc = 0; 7427 rc = 0;
@@ -7360,7 +7433,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7360 */ 7433 */
7361 lpfc_mbuf_free(phba, mp->virt, mp->phys); 7434 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7362 kfree(mp); 7435 kfree(mp);
7363 mboxq->context1 = NULL; 7436 mboxq->ctx_buf = NULL;
7364 if (unlikely(rc)) { 7437 if (unlikely(rc)) {
7365 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7438 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7366 "0382 READ_SPARAM command failed " 7439 "0382 READ_SPARAM command failed "
@@ -7635,7 +7708,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7635 */ 7708 */
7636 spin_lock_irq(&phba->hbalock); 7709 spin_lock_irq(&phba->hbalock);
7637 phba->link_state = LPFC_LINK_DOWN; 7710 phba->link_state = LPFC_LINK_DOWN;
7711
7712 /* Check if physical ports are trunked */
7713 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7714 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7715 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7716 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7717 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7718 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7719 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7720 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
7638 spin_unlock_irq(&phba->hbalock); 7721 spin_unlock_irq(&phba->hbalock);
7722
7639 if (!(phba->hba_flag & HBA_FCOE_MODE) && 7723 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7640 (phba->hba_flag & LINK_DISABLED)) { 7724 (phba->hba_flag & LINK_DISABLED)) {
7641 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
@@ -8119,10 +8203,10 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8119 } 8203 }
8120 8204
8121 /* Copy the mailbox extension data */ 8205 /* Copy the mailbox extension data */
8122 if (pmbox->in_ext_byte_len && pmbox->context2) { 8206 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8123 lpfc_sli_pcimem_bcopy(pmbox->context2, 8207 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8124 (uint8_t *)phba->mbox_ext, 8208 (uint8_t *)phba->mbox_ext,
8125 pmbox->in_ext_byte_len); 8209 pmbox->in_ext_byte_len);
8126 } 8210 }
8127 /* Copy command data to host SLIM area */ 8211 /* Copy command data to host SLIM area */
8128 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 8212 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
@@ -8133,10 +8217,10 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8133 = MAILBOX_HBA_EXT_OFFSET; 8217 = MAILBOX_HBA_EXT_OFFSET;
8134 8218
8135 /* Copy the mailbox extension data */ 8219 /* Copy the mailbox extension data */
8136 if (pmbox->in_ext_byte_len && pmbox->context2) 8220 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8137 lpfc_memcpy_to_slim(phba->MBslimaddr + 8221 lpfc_memcpy_to_slim(phba->MBslimaddr +
8138 MAILBOX_HBA_EXT_OFFSET, 8222 MAILBOX_HBA_EXT_OFFSET,
8139 pmbox->context2, pmbox->in_ext_byte_len); 8223 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8140 8224
8141 if (mbx->mbxCommand == MBX_CONFIG_PORT) 8225 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8142 /* copy command data into host mbox for cmpl */ 8226 /* copy command data into host mbox for cmpl */
@@ -8259,9 +8343,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8259 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, 8343 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8260 MAILBOX_CMD_SIZE); 8344 MAILBOX_CMD_SIZE);
8261 /* Copy the mailbox extension data */ 8345 /* Copy the mailbox extension data */
8262 if (pmbox->out_ext_byte_len && pmbox->context2) { 8346 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8263 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 8347 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8264 pmbox->context2, 8348 pmbox->ctx_buf,
8265 pmbox->out_ext_byte_len); 8349 pmbox->out_ext_byte_len);
8266 } 8350 }
8267 } else { 8351 } else {
@@ -8269,8 +8353,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8269 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, 8353 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8270 MAILBOX_CMD_SIZE); 8354 MAILBOX_CMD_SIZE);
8271 /* Copy the mailbox extension data */ 8355 /* Copy the mailbox extension data */
8272 if (pmbox->out_ext_byte_len && pmbox->context2) { 8356 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8273 lpfc_memcpy_from_slim(pmbox->context2, 8357 lpfc_memcpy_from_slim(
8358 pmbox->ctx_buf,
8274 phba->MBslimaddr + 8359 phba->MBslimaddr +
8275 MAILBOX_HBA_EXT_OFFSET, 8360 MAILBOX_HBA_EXT_OFFSET,
8276 pmbox->out_ext_byte_len); 8361 pmbox->out_ext_byte_len);
@@ -11265,19 +11350,12 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11265 11350
11266 /* Complete prepping the abort wqe and issue to the FW. */ 11351 /* Complete prepping the abort wqe and issue to the FW. */
11267 abts_wqe = &abtsiocbp->wqe; 11352 abts_wqe = &abtsiocbp->wqe;
11268 bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0);
11269 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
11270
11271 /* Explicitly set reserved fields to zero.*/
11272 abts_wqe->abort_cmd.rsrvd4 = 0;
11273 abts_wqe->abort_cmd.rsrvd5 = 0;
11274 11353
11275 /* WQE Common - word 6. Context is XRI tag. Set 0. */ 11354 /* Clear any stale WQE contents */
11276 bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0); 11355 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
11277 bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0); 11356 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
11278 11357
11279 /* word 7 */ 11358 /* word 7 */
11280 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
11281 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 11359 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
11282 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, 11360 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
11283 cmdiocb->iocb.ulpClass); 11361 cmdiocb->iocb.ulpClass);
@@ -11292,7 +11370,6 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11292 abtsiocbp->iotag); 11370 abtsiocbp->iotag);
11293 11371
11294 /* word 10 */ 11372 /* word 10 */
11295 bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx);
11296 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); 11373 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
11297 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); 11374 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
11298 11375
@@ -12545,10 +12622,10 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12545 lpfc_sli_pcimem_bcopy(mbox, pmbox, 12622 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12546 MAILBOX_CMD_SIZE); 12623 MAILBOX_CMD_SIZE);
12547 if (pmb->out_ext_byte_len && 12624 if (pmb->out_ext_byte_len &&
12548 pmb->context2) 12625 pmb->ctx_buf)
12549 lpfc_sli_pcimem_bcopy( 12626 lpfc_sli_pcimem_bcopy(
12550 phba->mbox_ext, 12627 phba->mbox_ext,
12551 pmb->context2, 12628 pmb->ctx_buf,
12552 pmb->out_ext_byte_len); 12629 pmb->out_ext_byte_len);
12553 } 12630 }
12554 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 12631 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
@@ -12563,9 +12640,9 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12563 12640
12564 if (!pmbox->mbxStatus) { 12641 if (!pmbox->mbxStatus) {
12565 mp = (struct lpfc_dmabuf *) 12642 mp = (struct lpfc_dmabuf *)
12566 (pmb->context1); 12643 (pmb->ctx_buf);
12567 ndlp = (struct lpfc_nodelist *) 12644 ndlp = (struct lpfc_nodelist *)
12568 pmb->context2; 12645 pmb->ctx_ndlp;
12569 12646
12570 /* Reg_LOGIN of dflt RPI was 12647 /* Reg_LOGIN of dflt RPI was
12571 * successful. new lets get 12648 * successful. new lets get
@@ -12578,8 +12655,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12578 pmb); 12655 pmb);
12579 pmb->mbox_cmpl = 12656 pmb->mbox_cmpl =
12580 lpfc_mbx_cmpl_dflt_rpi; 12657 lpfc_mbx_cmpl_dflt_rpi;
12581 pmb->context1 = mp; 12658 pmb->ctx_buf = mp;
12582 pmb->context2 = ndlp; 12659 pmb->ctx_ndlp = ndlp;
12583 pmb->vport = vport; 12660 pmb->vport = vport;
12584 rc = lpfc_sli_issue_mbox(phba, 12661 rc = lpfc_sli_issue_mbox(phba,
12585 pmb, 12662 pmb,
@@ -13185,16 +13262,16 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13185 mcqe_status, 13262 mcqe_status,
13186 pmbox->un.varWords[0], 0); 13263 pmbox->un.varWords[0], 0);
13187 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 13264 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13188 mp = (struct lpfc_dmabuf *)(pmb->context1); 13265 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13189 ndlp = (struct lpfc_nodelist *)pmb->context2; 13266 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13190 /* Reg_LOGIN of dflt RPI was successful. Now lets get 13267 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13191 * RID of the PPI using the same mbox buffer. 13268 * RID of the PPI using the same mbox buffer.
13192 */ 13269 */
13193 lpfc_unreg_login(phba, vport->vpi, 13270 lpfc_unreg_login(phba, vport->vpi,
13194 pmbox->un.varWords[0], pmb); 13271 pmbox->un.varWords[0], pmb);
13195 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 13272 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13196 pmb->context1 = mp; 13273 pmb->ctx_buf = mp;
13197 pmb->context2 = ndlp; 13274 pmb->ctx_ndlp = ndlp;
13198 pmb->vport = vport; 13275 pmb->vport = vport;
13199 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 13276 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13200 if (rc != MBX_BUSY) 13277 if (rc != MBX_BUSY)
@@ -13413,6 +13490,8 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13413 return workposted; 13490 return workposted;
13414} 13491}
13415 13492
13493#define FC_RCTL_MDS_DIAGS 0xF4
13494
13416/** 13495/**
13417 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 13496 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13418 * @phba: Pointer to HBA context object. 13497 * @phba: Pointer to HBA context object.
@@ -13426,6 +13505,7 @@ static bool
13426lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 13505lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13427{ 13506{
13428 bool workposted = false; 13507 bool workposted = false;
13508 struct fc_frame_header *fc_hdr;
13429 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 13509 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13430 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 13510 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13431 struct lpfc_nvmet_tgtport *tgtp; 13511 struct lpfc_nvmet_tgtport *tgtp;
@@ -13462,7 +13542,17 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13462 hrq->RQ_buf_posted--; 13542 hrq->RQ_buf_posted--;
13463 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 13543 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13464 13544
13465 /* save off the frame for the word thread to process */ 13545 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13546
13547 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13548 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13549 spin_unlock_irqrestore(&phba->hbalock, iflags);
13550 /* Handle MDS Loopback frames */
13551 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13552 break;
13553 }
13554
13555 /* save off the frame for the work thread to process */
13466 list_add_tail(&dma_buf->cq_event.list, 13556 list_add_tail(&dma_buf->cq_event.list,
13467 &phba->sli4_hba.sp_queue_event); 13557 &phba->sli4_hba.sp_queue_event);
13468 /* Frame received */ 13558 /* Frame received */
@@ -14501,7 +14591,8 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14501 hw_page_size))/hw_page_size; 14591 hw_page_size))/hw_page_size;
14502 14592
14503 /* If needed, Adjust page count to match the max the adapter supports */ 14593 /* If needed, Adjust page count to match the max the adapter supports */
14504 if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt) 14594 if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
14595 (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
14505 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt; 14596 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
14506 14597
14507 INIT_LIST_HEAD(&queue->list); 14598 INIT_LIST_HEAD(&queue->list);
@@ -14669,7 +14760,8 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14669 14760
14670 mbox->vport = phba->pport; 14761 mbox->vport = phba->pport;
14671 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14762 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14672 mbox->context1 = NULL; 14763 mbox->ctx_buf = NULL;
14764 mbox->ctx_ndlp = NULL;
14673 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14765 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14674 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 14766 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14675 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14767 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -14789,7 +14881,8 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14789 } 14881 }
14790 mbox->vport = phba->pport; 14882 mbox->vport = phba->pport;
14791 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14883 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14792 mbox->context1 = NULL; 14884 mbox->ctx_buf = NULL;
14885 mbox->ctx_ndlp = NULL;
14793 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14886 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14794 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14887 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14795 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14888 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
@@ -16863,8 +16956,6 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16863 struct fc_vft_header *fc_vft_hdr; 16956 struct fc_vft_header *fc_vft_hdr;
16864 uint32_t *header = (uint32_t *) fc_hdr; 16957 uint32_t *header = (uint32_t *) fc_hdr;
16865 16958
16866#define FC_RCTL_MDS_DIAGS 0xF4
16867
16868 switch (fc_hdr->fh_r_ctl) { 16959 switch (fc_hdr->fh_r_ctl) {
16869 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 16960 case FC_RCTL_DD_UNCAT: /* uncategorized information */
16870 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 16961 case FC_RCTL_DD_SOL_DATA: /* solicited data */
@@ -16903,15 +16994,12 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16903 goto drop; 16994 goto drop;
16904 } 16995 }
16905 16996
16906#define FC_TYPE_VENDOR_UNIQUE 0xFF
16907
16908 switch (fc_hdr->fh_type) { 16997 switch (fc_hdr->fh_type) {
16909 case FC_TYPE_BLS: 16998 case FC_TYPE_BLS:
16910 case FC_TYPE_ELS: 16999 case FC_TYPE_ELS:
16911 case FC_TYPE_FCP: 17000 case FC_TYPE_FCP:
16912 case FC_TYPE_CT: 17001 case FC_TYPE_CT:
16913 case FC_TYPE_NVME: 17002 case FC_TYPE_NVME:
16914 case FC_TYPE_VENDOR_UNIQUE:
16915 break; 17003 break;
16916 case FC_TYPE_IP: 17004 case FC_TYPE_IP:
16917 case FC_TYPE_ILS: 17005 case FC_TYPE_ILS:
@@ -17741,6 +17829,7 @@ lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17741 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17829 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17742 kfree(pcmd); 17830 kfree(pcmd);
17743 lpfc_sli_release_iocbq(phba, cmdiocb); 17831 lpfc_sli_release_iocbq(phba, cmdiocb);
17832 lpfc_drain_txq(phba);
17744} 17833}
17745 17834
17746static void 17835static void
@@ -17754,14 +17843,23 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17754 struct lpfc_dmabuf *pcmd = NULL; 17843 struct lpfc_dmabuf *pcmd = NULL;
17755 uint32_t frame_len; 17844 uint32_t frame_len;
17756 int rc; 17845 int rc;
17846 unsigned long iflags;
17757 17847
17758 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17848 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17759 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); 17849 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17760 17850
17761 /* Send the received frame back */ 17851 /* Send the received frame back */
17762 iocbq = lpfc_sli_get_iocbq(phba); 17852 iocbq = lpfc_sli_get_iocbq(phba);
17763 if (!iocbq) 17853 if (!iocbq) {
17764 goto exit; 17854 /* Queue cq event and wakeup worker thread to process it */
17855 spin_lock_irqsave(&phba->hbalock, iflags);
17856 list_add_tail(&dmabuf->cq_event.list,
17857 &phba->sli4_hba.sp_queue_event);
17858 phba->hba_flag |= HBA_SP_QUEUE_EVT;
17859 spin_unlock_irqrestore(&phba->hbalock, iflags);
17860 lpfc_worker_wake_up(phba);
17861 return;
17862 }
17765 17863
17766 /* Allocate buffer for command payload */ 17864 /* Allocate buffer for command payload */
17767 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 17865 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
@@ -17846,6 +17944,14 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17846 /* Process each received buffer */ 17944 /* Process each received buffer */
17847 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17945 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17848 17946
17947 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
17948 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
17949 vport = phba->pport;
17950 /* Handle MDS Loopback frames */
17951 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17952 return;
17953 }
17954
17849 /* check to see if this a valid type of frame */ 17955 /* check to see if this a valid type of frame */
17850 if (lpfc_fc_frame_check(phba, fc_hdr)) { 17956 if (lpfc_fc_frame_check(phba, fc_hdr)) {
17851 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17957 lpfc_in_buf_free(phba, &dmabuf->dbuf);
@@ -17860,13 +17966,6 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17860 fcfi = bf_get(lpfc_rcqe_fcf_id, 17966 fcfi = bf_get(lpfc_rcqe_fcf_id,
17861 &dmabuf->cq_event.cqe.rcqe_cmpl); 17967 &dmabuf->cq_event.cqe.rcqe_cmpl);
17862 17968
17863 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
17864 vport = phba->pport;
17865 /* Handle MDS Loopback frames */
17866 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17867 return;
17868 }
17869
17870 /* d_id this frame is directed to */ 17969 /* d_id this frame is directed to */
17871 did = sli4_did_from_fc_hdr(fc_hdr); 17970 did = sli4_did_from_fc_hdr(fc_hdr);
17872 17971
@@ -18207,8 +18306,8 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18207 lpfc_resume_rpi(mboxq, ndlp); 18306 lpfc_resume_rpi(mboxq, ndlp);
18208 if (cmpl) { 18307 if (cmpl) {
18209 mboxq->mbox_cmpl = cmpl; 18308 mboxq->mbox_cmpl = cmpl;
18210 mboxq->context1 = arg; 18309 mboxq->ctx_buf = arg;
18211 mboxq->context2 = ndlp; 18310 mboxq->ctx_ndlp = ndlp;
18212 } else 18311 } else
18213 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 18312 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18214 mboxq->vport = ndlp->vport; 18313 mboxq->vport = ndlp->vport;
@@ -18711,15 +18810,8 @@ next_priority:
18711 goto initial_priority; 18810 goto initial_priority;
18712 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 18811 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18713 "2844 No roundrobin failover FCF available\n"); 18812 "2844 No roundrobin failover FCF available\n");
18714 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 18813
18715 return LPFC_FCOE_FCF_NEXT_NONE; 18814 return LPFC_FCOE_FCF_NEXT_NONE;
18716 else {
18717 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18718 "3063 Only FCF available idx %d, flag %x\n",
18719 next_fcf_index,
18720 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
18721 return next_fcf_index;
18722 }
18723 } 18815 }
18724 18816
18725 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 18817 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
@@ -19026,7 +19118,7 @@ lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19026 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 19118 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19027 goto out; 19119 goto out;
19028 mqe = &mboxq->u.mqe; 19120 mqe = &mboxq->u.mqe;
19029 mp = (struct lpfc_dmabuf *) mboxq->context1; 19121 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19030 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 19122 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19031 if (rc) 19123 if (rc)
19032 goto out; 19124 goto out;
@@ -19171,11 +19263,11 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19171 struct lpfc_mbx_wr_object *wr_object; 19263 struct lpfc_mbx_wr_object *wr_object;
19172 LPFC_MBOXQ_t *mbox; 19264 LPFC_MBOXQ_t *mbox;
19173 int rc = 0, i = 0; 19265 int rc = 0, i = 0;
19174 uint32_t shdr_status, shdr_add_status; 19266 uint32_t shdr_status, shdr_add_status, shdr_change_status;
19175 uint32_t mbox_tmo; 19267 uint32_t mbox_tmo;
19176 union lpfc_sli4_cfg_shdr *shdr;
19177 struct lpfc_dmabuf *dmabuf; 19268 struct lpfc_dmabuf *dmabuf;
19178 uint32_t written = 0; 19269 uint32_t written = 0;
19270 bool check_change_status = false;
19179 19271
19180 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19272 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19181 if (!mbox) 19273 if (!mbox)
@@ -19203,6 +19295,8 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19203 (size - written); 19295 (size - written);
19204 written += (size - written); 19296 written += (size - written);
19205 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 19297 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19298 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19299 check_change_status = true;
19206 } else { 19300 } else {
19207 wr_object->u.request.bde[i].tus.f.bdeSize = 19301 wr_object->u.request.bde[i].tus.f.bdeSize =
19208 SLI4_PAGE_SIZE; 19302 SLI4_PAGE_SIZE;
@@ -19219,9 +19313,39 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19219 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 19313 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19220 } 19314 }
19221 /* The IOCTL status is embedded in the mailbox subheader. */ 19315 /* The IOCTL status is embedded in the mailbox subheader. */
19222 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr; 19316 shdr_status = bf_get(lpfc_mbox_hdr_status,
19223 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 19317 &wr_object->header.cfg_shdr.response);
19224 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 19318 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19319 &wr_object->header.cfg_shdr.response);
19320 if (check_change_status) {
19321 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19322 &wr_object->u.response);
19323 switch (shdr_change_status) {
19324 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19325 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19326 "3198 Firmware write complete: System "
19327 "reboot required to instantiate\n");
19328 break;
19329 case (LPFC_CHANGE_STATUS_FW_RESET):
19330 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19331 "3199 Firmware write complete: Firmware"
19332 " reset required to instantiate\n");
19333 break;
19334 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19335 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19336 "3200 Firmware write complete: Port "
19337 "Migration or PCI Reset required to "
19338 "instantiate\n");
19339 break;
19340 case (LPFC_CHANGE_STATUS_PCI_RESET):
19341 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19342 "3201 Firmware write complete: PCI "
19343 "Reset required to instantiate\n");
19344 break;
19345 default:
19346 break;
19347 }
19348 }
19225 if (rc != MBX_TIMEOUT) 19349 if (rc != MBX_TIMEOUT)
19226 mempool_free(mbox, phba->mbox_mem_pool); 19350 mempool_free(mbox, phba->mbox_mem_pool);
19227 if (shdr_status || shdr_add_status || rc) { 19351 if (shdr_status || shdr_add_status || rc) {
@@ -19277,7 +19401,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19277 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 19401 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19278 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19402 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19279 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19403 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19280 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2; 19404 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19281 /* Put reference count for delayed processing */ 19405 /* Put reference count for delayed processing */
19282 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 19406 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19283 /* Unregister the RPI when mailbox complete */ 19407 /* Unregister the RPI when mailbox complete */
@@ -19302,7 +19426,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19302 19426
19303 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19427 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19304 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19428 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19305 ndlp = (struct lpfc_nodelist *)mb->context2; 19429 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19306 /* Unregister the RPI when mailbox complete */ 19430 /* Unregister the RPI when mailbox complete */
19307 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 19431 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19308 restart_loop = 1; 19432 restart_loop = 1;
@@ -19322,13 +19446,14 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19322 while (!list_empty(&mbox_cmd_list)) { 19446 while (!list_empty(&mbox_cmd_list)) {
19323 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 19447 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19324 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19448 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19325 mp = (struct lpfc_dmabuf *) (mb->context1); 19449 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
19326 if (mp) { 19450 if (mp) {
19327 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 19451 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19328 kfree(mp); 19452 kfree(mp);
19329 } 19453 }
19330 ndlp = (struct lpfc_nodelist *) mb->context2; 19454 mb->ctx_buf = NULL;
19331 mb->context2 = NULL; 19455 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19456 mb->ctx_ndlp = NULL;
19332 if (ndlp) { 19457 if (ndlp) {
19333 spin_lock(shost->host_lock); 19458 spin_lock(shost->host_lock);
19334 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19459 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 34b7ab69b9b4..7abb395bb64a 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -144,9 +144,9 @@ typedef struct lpfcMboxq {
144 MAILBOX_t mb; /* Mailbox cmd */ 144 MAILBOX_t mb; /* Mailbox cmd */
145 struct lpfc_mqe mqe; 145 struct lpfc_mqe mqe;
146 } u; 146 } u;
147 struct lpfc_vport *vport;/* virtual port pointer */ 147 struct lpfc_vport *vport; /* virtual port pointer */
148 void *context1; /* caller context information */ 148 void *ctx_ndlp; /* caller ndlp information */
149 void *context2; /* caller context information */ 149 void *ctx_buf; /* caller buffer information */
150 void *context3; 150 void *context3;
151 151
152 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); 152 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index e76c380e1a84..6b2d2350e2c6 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -279,6 +279,7 @@ struct lpfc_fcf {
279#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ 279#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
280#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ 280#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
281#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT) 281#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
282 uint16_t fcf_redisc_attempted;
282 uint32_t addr_mode; 283 uint32_t addr_mode;
283 uint32_t eligible_fcf_cnt; 284 uint32_t eligible_fcf_cnt;
284 struct lpfc_fcf_rec current_rec; 285 struct lpfc_fcf_rec current_rec;
@@ -717,6 +718,19 @@ struct lpfc_sli4_hba {
717 uint16_t num_online_cpu; 718 uint16_t num_online_cpu;
718 uint16_t num_present_cpu; 719 uint16_t num_present_cpu;
719 uint16_t curr_disp_cpu; 720 uint16_t curr_disp_cpu;
721 uint32_t conf_trunk;
722#define lpfc_conf_trunk_port0_WORD conf_trunk
723#define lpfc_conf_trunk_port0_SHIFT 0
724#define lpfc_conf_trunk_port0_MASK 0x1
725#define lpfc_conf_trunk_port1_WORD conf_trunk
726#define lpfc_conf_trunk_port1_SHIFT 1
727#define lpfc_conf_trunk_port1_MASK 0x1
728#define lpfc_conf_trunk_port2_WORD conf_trunk
729#define lpfc_conf_trunk_port2_SHIFT 2
730#define lpfc_conf_trunk_port2_MASK 0x1
731#define lpfc_conf_trunk_port3_WORD conf_trunk
732#define lpfc_conf_trunk_port3_SHIFT 3
733#define lpfc_conf_trunk_port3_MASK 0x1
720}; 734};
721 735
722enum lpfc_sge_type { 736enum lpfc_sge_type {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 5a0d512ff497..3f4398ffb567 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
20 * included with this package. * 20 * included with this package. *
21 *******************************************************************/ 21 *******************************************************************/
22 22
23#define LPFC_DRIVER_VERSION "12.0.0.7" 23#define LPFC_DRIVER_VERSION "12.0.0.10"
24#define LPFC_DRIVER_NAME "lpfc" 24#define LPFC_DRIVER_NAME "lpfc"
25 25
26/* Used for SLI 2/3 */ 26/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index c340e0e47473..102a011ff6d4 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -138,8 +138,8 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
138 * Grab buffer pointer and clear context1 so we can use 138 * Grab buffer pointer and clear context1 so we can use
139 * lpfc_sli_issue_box_wait 139 * lpfc_sli_issue_box_wait
140 */ 140 */
141 mp = (struct lpfc_dmabuf *) pmb->context1; 141 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
142 pmb->context1 = NULL; 142 pmb->ctx_buf = NULL;
143 143
144 pmb->vport = vport; 144 pmb->vport = vport;
145 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2); 145 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 177701dfdfcb..c8e6ae98a4a6 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -403,7 +403,7 @@ static struct scsi_host_template mac53c94_template = {
403 .can_queue = 1, 403 .can_queue = 1,
404 .this_id = 7, 404 .this_id = 7,
405 .sg_tablesize = SG_ALL, 405 .sg_tablesize = SG_ALL,
406 .use_clustering = DISABLE_CLUSTERING, 406 .max_segment_size = 65535,
407}; 407};
408 408
409static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match) 409static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 764d320bb2ca..ee741207fd4e 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -307,7 +307,7 @@ static int esp_mac_probe(struct platform_device *dev)
307 goto fail; 307 goto fail;
308 308
309 host->max_id = 8; 309 host->max_id = 8;
310 host->use_clustering = DISABLE_CLUSTERING; 310 host->dma_boundary = PAGE_SIZE - 1;
311 esp = shost_priv(host); 311 esp = shost_priv(host);
312 312
313 esp->host = host; 313 esp->host = host;
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index dd6057359d7c..8b4b5b1a13d7 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -333,7 +333,7 @@ static struct scsi_host_template mac_scsi_template = {
333 .this_id = 7, 333 .this_id = 7,
334 .sg_tablesize = 1, 334 .sg_tablesize = 1,
335 .cmd_per_lun = 2, 335 .cmd_per_lun = 2,
336 .use_clustering = DISABLE_CLUSTERING, 336 .dma_boundary = PAGE_SIZE - 1,
337 .cmd_size = NCR5380_CMD_SIZE, 337 .cmd_size = NCR5380_CMD_SIZE,
338 .max_sectors = 128, 338 .max_sectors = 128,
339}; 339};
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 8c7154143a4e..4862f65ec3e8 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4148,7 +4148,6 @@ static struct scsi_host_template megaraid_template = {
4148 .this_id = DEFAULT_INITIATOR_ID, 4148 .this_id = DEFAULT_INITIATOR_ID,
4149 .sg_tablesize = MAX_SGLIST, 4149 .sg_tablesize = MAX_SGLIST,
4150 .cmd_per_lun = DEF_CMD_PER_LUN, 4150 .cmd_per_lun = DEF_CMD_PER_LUN,
4151 .use_clustering = ENABLE_CLUSTERING,
4152 .eh_abort_handler = megaraid_abort, 4151 .eh_abort_handler = megaraid_abort,
4153 .eh_device_reset_handler = megaraid_reset, 4152 .eh_device_reset_handler = megaraid_reset,
4154 .eh_bus_reset_handler = megaraid_reset, 4153 .eh_bus_reset_handler = megaraid_reset,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 3b7abe5ca7f5..e836392b75e8 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -336,7 +336,6 @@ static struct scsi_host_template megaraid_template_g = {
336 .eh_abort_handler = megaraid_abort_handler, 336 .eh_abort_handler = megaraid_abort_handler,
337 .eh_host_reset_handler = megaraid_reset_handler, 337 .eh_host_reset_handler = megaraid_reset_handler,
338 .change_queue_depth = scsi_change_queue_depth, 338 .change_queue_depth = scsi_change_queue_depth,
339 .use_clustering = ENABLE_CLUSTERING,
340 .no_write_same = 1, 339 .no_write_same = 1,
341 .sdev_attrs = megaraid_sdev_attrs, 340 .sdev_attrs = megaraid_sdev_attrs,
342 .shost_attrs = megaraid_shost_attrs, 341 .shost_attrs = megaraid_shost_attrs,
@@ -1243,8 +1242,7 @@ megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
1243 dma_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr, 1242 dma_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
1244 sg_pci_blk[i].dma_addr); 1243 sg_pci_blk[i].dma_addr);
1245 } 1244 }
1246 if (raid_dev->sg_pool_handle) 1245 dma_pool_destroy(raid_dev->sg_pool_handle);
1247 dma_pool_destroy(raid_dev->sg_pool_handle);
1248 1246
1249 1247
1250 epthru_pci_blk = raid_dev->epthru_pool; 1248 epthru_pci_blk = raid_dev->epthru_pool;
@@ -1252,8 +1250,7 @@ megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
1252 dma_pool_free(raid_dev->epthru_pool_handle, 1250 dma_pool_free(raid_dev->epthru_pool_handle,
1253 epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr); 1251 epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
1254 } 1252 }
1255 if (raid_dev->epthru_pool_handle) 1253 dma_pool_destroy(raid_dev->epthru_pool_handle);
1256 dma_pool_destroy(raid_dev->epthru_pool_handle);
1257 1254
1258 1255
1259 mbox_pci_blk = raid_dev->mbox_pool; 1256 mbox_pci_blk = raid_dev->mbox_pool;
@@ -1261,8 +1258,7 @@ megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
1261 dma_pool_free(raid_dev->mbox_pool_handle, 1258 dma_pool_free(raid_dev->mbox_pool_handle,
1262 mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr); 1259 mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
1263 } 1260 }
1264 if (raid_dev->mbox_pool_handle) 1261 dma_pool_destroy(raid_dev->mbox_pool_handle);
1265 dma_pool_destroy(raid_dev->mbox_pool_handle);
1266 1262
1267 return; 1263 return;
1268} 1264}
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 8428247015db..3ce837e4b24c 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -1017,8 +1017,7 @@ memalloc_error:
1017 kfree(adapter->kioc_list); 1017 kfree(adapter->kioc_list);
1018 kfree(adapter->mbox_list); 1018 kfree(adapter->mbox_list);
1019 1019
1020 if (adapter->pthru_dma_pool) 1020 dma_pool_destroy(adapter->pthru_dma_pool);
1021 dma_pool_destroy(adapter->pthru_dma_pool);
1022 1021
1023 kfree(adapter); 1022 kfree(adapter);
1024 1023
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 67d356d84717..16536c41f0c5 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2,7 +2,8 @@
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2003-2013 LSI Corporation 4 * Copyright (c) 2003-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies 5 * Copyright (c) 2013-2016 Avago Technologies
6 * Copyright (c) 2016-2018 Broadcom Inc.
6 * 7 *
7 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
@@ -19,14 +20,11 @@
19 * 20 *
20 * FILE: megaraid_sas.h 21 * FILE: megaraid_sas.h
21 * 22 *
22 * Authors: Avago Technologies 23 * Authors: Broadcom Inc.
23 * Kashyap Desai <kashyap.desai@avagotech.com> 24 * Kashyap Desai <kashyap.desai@broadcom.com>
24 * Sumit Saxena <sumit.saxena@avagotech.com> 25 * Sumit Saxena <sumit.saxena@broadcom.com>
25 * 26 *
26 * Send feedback to: megaraidlinux.pdl@avagotech.com 27 * Send feedback to: megaraidlinux.pdl@broadcom.com
27 *
28 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
29 * San Jose, California 95131
30 */ 28 */
31 29
32#ifndef LSI_MEGARAID_SAS_H 30#ifndef LSI_MEGARAID_SAS_H
@@ -35,8 +33,8 @@
35/* 33/*
36 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
37 */ 35 */
38#define MEGASAS_VERSION "07.706.03.00-rc1" 36#define MEGASAS_VERSION "07.707.50.00-rc1"
39#define MEGASAS_RELDATE "May 21, 2018" 37#define MEGASAS_RELDATE "December 18, 2018"
40 38
41/* 39/*
42 * Device IDs 40 * Device IDs
@@ -62,6 +60,10 @@
62#define PCI_DEVICE_ID_LSI_TOMCAT 0x0017 60#define PCI_DEVICE_ID_LSI_TOMCAT 0x0017
63#define PCI_DEVICE_ID_LSI_VENTURA_4PORT 0x001B 61#define PCI_DEVICE_ID_LSI_VENTURA_4PORT 0x001B
64#define PCI_DEVICE_ID_LSI_CRUSADER_4PORT 0x001C 62#define PCI_DEVICE_ID_LSI_CRUSADER_4PORT 0x001C
63#define PCI_DEVICE_ID_LSI_AERO_10E1 0x10e1
64#define PCI_DEVICE_ID_LSI_AERO_10E2 0x10e2
65#define PCI_DEVICE_ID_LSI_AERO_10E5 0x10e5
66#define PCI_DEVICE_ID_LSI_AERO_10E6 0x10e6
65 67
66/* 68/*
67 * Intel HBA SSDIDs 69 * Intel HBA SSDIDs
@@ -142,6 +144,7 @@
142 * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver 144 * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
143 * HOTPLUG : Resume from Hotplug 145 * HOTPLUG : Resume from Hotplug
144 * MFI_STOP_ADP : Send signal to FW to stop processing 146 * MFI_STOP_ADP : Send signal to FW to stop processing
147 * MFI_ADP_TRIGGER_SNAP_DUMP: Inform firmware to initiate snap dump
145 */ 148 */
146#define WRITE_SEQUENCE_OFFSET (0x0000000FC) /* I20 */ 149#define WRITE_SEQUENCE_OFFSET (0x0000000FC) /* I20 */
147#define HOST_DIAGNOSTIC_OFFSET (0x000000F8) /* I20 */ 150#define HOST_DIAGNOSTIC_OFFSET (0x000000F8) /* I20 */
@@ -158,6 +161,7 @@
158#define MFI_RESET_FLAGS MFI_INIT_READY| \ 161#define MFI_RESET_FLAGS MFI_INIT_READY| \
159 MFI_INIT_MFIMODE| \ 162 MFI_INIT_MFIMODE| \
160 MFI_INIT_ABORT 163 MFI_INIT_ABORT
164#define MFI_ADP_TRIGGER_SNAP_DUMP 0x00000100
161#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01) 165#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01)
162 166
163/* 167/*
@@ -860,8 +864,22 @@ struct megasas_ctrl_prop {
860 u32 reserved:18; 864 u32 reserved:18;
861#endif 865#endif
862 } OnOffProperties; 866 } OnOffProperties;
863 u8 autoSnapVDSpace; 867
864 u8 viewSpace; 868 union {
869 u8 autoSnapVDSpace;
870 u8 viewSpace;
871 struct {
872#if defined(__BIG_ENDIAN_BITFIELD)
873 u16 reserved2:11;
874 u16 enable_snap_dump:1;
875 u16 reserved1:4;
876#else
877 u16 reserved1:4;
878 u16 enable_snap_dump:1;
879 u16 reserved2:11;
880#endif
881 } on_off_properties2;
882 };
865 __le16 spinDownTime; 883 __le16 spinDownTime;
866 u8 reserved[24]; 884 u8 reserved[24];
867} __packed; 885} __packed;
@@ -1485,7 +1503,6 @@ enum FW_BOOT_CONTEXT {
1485#define MEGASAS_IOCTL_CMD 0 1503#define MEGASAS_IOCTL_CMD 0
1486#define MEGASAS_DEFAULT_CMD_TIMEOUT 90 1504#define MEGASAS_DEFAULT_CMD_TIMEOUT 90
1487#define MEGASAS_THROTTLE_QUEUE_DEPTH 16 1505#define MEGASAS_THROTTLE_QUEUE_DEPTH 16
1488#define MEGASAS_BLOCKED_CMD_TIMEOUT 60
1489#define MEGASAS_DEFAULT_TM_TIMEOUT 50 1506#define MEGASAS_DEFAULT_TM_TIMEOUT 50
1490/* 1507/*
1491 * FW reports the maximum of number of commands that it can accept (maximum 1508 * FW reports the maximum of number of commands that it can accept (maximum
@@ -1544,11 +1561,16 @@ enum FW_BOOT_CONTEXT {
1544 1561
1545#define MR_CAN_HANDLE_64_BIT_DMA_OFFSET (1 << 25) 1562#define MR_CAN_HANDLE_64_BIT_DMA_OFFSET (1 << 25)
1546 1563
1564#define MEGASAS_WATCHDOG_THREAD_INTERVAL 1000
1565#define MEGASAS_WAIT_FOR_NEXT_DMA_MSECS 20
1566#define MEGASAS_WATCHDOG_WAIT_COUNT 50
1567
1547enum MR_ADAPTER_TYPE { 1568enum MR_ADAPTER_TYPE {
1548 MFI_SERIES = 1, 1569 MFI_SERIES = 1,
1549 THUNDERBOLT_SERIES = 2, 1570 THUNDERBOLT_SERIES = 2,
1550 INVADER_SERIES = 3, 1571 INVADER_SERIES = 3,
1551 VENTURA_SERIES = 4, 1572 VENTURA_SERIES = 4,
1573 AERO_SERIES = 5,
1552}; 1574};
1553 1575
1554/* 1576/*
@@ -1588,11 +1610,10 @@ struct megasas_register_set {
1588 1610
1589 u32 reserved_3[3]; /*00A4h*/ 1611 u32 reserved_3[3]; /*00A4h*/
1590 1612
1591 u32 outbound_scratch_pad ; /*00B0h*/ 1613 u32 outbound_scratch_pad_0; /*00B0h*/
1592 u32 outbound_scratch_pad_2; /*00B4h*/ 1614 u32 outbound_scratch_pad_1; /*00B4h*/
1593 u32 outbound_scratch_pad_3; /*00B8h*/ 1615 u32 outbound_scratch_pad_2; /*00B8h*/
1594 u32 outbound_scratch_pad_4; /*00BCh*/ 1616 u32 outbound_scratch_pad_3; /*00BCh*/
1595
1596 1617
1597 u32 inbound_low_queue_port ; /*00C0h*/ 1618 u32 inbound_low_queue_port ; /*00C0h*/
1598 1619
@@ -2181,6 +2202,9 @@ struct megasas_instance {
2181 struct MR_LD_TARGETID_LIST *ld_targetid_list_buf; 2202 struct MR_LD_TARGETID_LIST *ld_targetid_list_buf;
2182 dma_addr_t ld_targetid_list_buf_h; 2203 dma_addr_t ld_targetid_list_buf_h;
2183 2204
2205 struct MR_SNAPDUMP_PROPERTIES *snapdump_prop;
2206 dma_addr_t snapdump_prop_h;
2207
2184 void *crash_buf[MAX_CRASH_DUMP_SIZE]; 2208 void *crash_buf[MAX_CRASH_DUMP_SIZE];
2185 unsigned int fw_crash_buffer_size; 2209 unsigned int fw_crash_buffer_size;
2186 unsigned int fw_crash_state; 2210 unsigned int fw_crash_state;
@@ -2250,7 +2274,9 @@ struct megasas_instance {
2250 struct megasas_instance_template *instancet; 2274 struct megasas_instance_template *instancet;
2251 struct tasklet_struct isr_tasklet; 2275 struct tasklet_struct isr_tasklet;
2252 struct work_struct work_init; 2276 struct work_struct work_init;
2253 struct work_struct crash_init; 2277 struct delayed_work fw_fault_work;
2278 struct workqueue_struct *fw_fault_work_q;
2279 char fault_handler_work_q_name[48];
2254 2280
2255 u8 flag; 2281 u8 flag;
2256 u8 unload; 2282 u8 unload;
@@ -2310,6 +2336,7 @@ struct megasas_instance {
2310 bool support_nvme_passthru; 2336 bool support_nvme_passthru;
2311 u8 task_abort_tmo; 2337 u8 task_abort_tmo;
2312 u8 max_reset_tmo; 2338 u8 max_reset_tmo;
2339 u8 snapdump_wait_time;
2313}; 2340};
2314struct MR_LD_VF_MAP { 2341struct MR_LD_VF_MAP {
2315 u32 size; 2342 u32 size;
@@ -2386,9 +2413,9 @@ struct megasas_instance_template {
2386 void (*enable_intr)(struct megasas_instance *); 2413 void (*enable_intr)(struct megasas_instance *);
2387 void (*disable_intr)(struct megasas_instance *); 2414 void (*disable_intr)(struct megasas_instance *);
2388 2415
2389 int (*clear_intr)(struct megasas_register_set __iomem *); 2416 int (*clear_intr)(struct megasas_instance *);
2390 2417
2391 u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *); 2418 u32 (*read_fw_status_reg)(struct megasas_instance *);
2392 int (*adp_reset)(struct megasas_instance *, \ 2419 int (*adp_reset)(struct megasas_instance *, \
2393 struct megasas_register_set __iomem *); 2420 struct megasas_register_set __iomem *);
2394 int (*check_reset)(struct megasas_instance *, \ 2421 int (*check_reset)(struct megasas_instance *, \
@@ -2535,11 +2562,11 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
2535 bool is_target_prop); 2562 bool is_target_prop);
2536int megasas_get_target_prop(struct megasas_instance *instance, 2563int megasas_get_target_prop(struct megasas_instance *instance,
2537 struct scsi_device *sdev); 2564 struct scsi_device *sdev);
2565void megasas_get_snapdump_properties(struct megasas_instance *instance);
2538 2566
2539int megasas_set_crash_dump_params(struct megasas_instance *instance, 2567int megasas_set_crash_dump_params(struct megasas_instance *instance,
2540 u8 crash_buf_state); 2568 u8 crash_buf_state);
2541void megasas_free_host_crash_buffer(struct megasas_instance *instance); 2569void megasas_free_host_crash_buffer(struct megasas_instance *instance);
2542void megasas_fusion_crash_dump_wq(struct work_struct *work);
2543 2570
2544void megasas_return_cmd_fusion(struct megasas_instance *instance, 2571void megasas_return_cmd_fusion(struct megasas_instance *instance,
2545 struct megasas_cmd_fusion *cmd); 2572 struct megasas_cmd_fusion *cmd);
@@ -2560,6 +2587,9 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd);
2560u32 mega_mod64(u64 dividend, u32 divisor); 2587u32 mega_mod64(u64 dividend, u32 divisor);
2561int megasas_alloc_fusion_context(struct megasas_instance *instance); 2588int megasas_alloc_fusion_context(struct megasas_instance *instance);
2562void megasas_free_fusion_context(struct megasas_instance *instance); 2589void megasas_free_fusion_context(struct megasas_instance *instance);
2590int megasas_fusion_start_watchdog(struct megasas_instance *instance);
2591void megasas_fusion_stop_watchdog(struct megasas_instance *instance);
2592
2563void megasas_set_dma_settings(struct megasas_instance *instance, 2593void megasas_set_dma_settings(struct megasas_instance *instance,
2564 struct megasas_dcmd_frame *dcmd, 2594 struct megasas_dcmd_frame *dcmd,
2565 dma_addr_t dma_addr, u32 dma_len); 2595 dma_addr_t dma_addr, u32 dma_len);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 9b90c716f06d..f7bdd783360a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2,7 +2,8 @@
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2003-2013 LSI Corporation 4 * Copyright (c) 2003-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies 5 * Copyright (c) 2013-2016 Avago Technologies
6 * Copyright (c) 2016-2018 Broadcom Inc.
6 * 7 *
7 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
@@ -17,18 +18,15 @@
17 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * 20 *
20 * Authors: Avago Technologies 21 * Authors: Broadcom Inc.
21 * Sreenivas Bagalkote 22 * Sreenivas Bagalkote
22 * Sumant Patro 23 * Sumant Patro
23 * Bo Yang 24 * Bo Yang
24 * Adam Radford 25 * Adam Radford
25 * Kashyap Desai <kashyap.desai@avagotech.com> 26 * Kashyap Desai <kashyap.desai@broadcom.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com> 27 * Sumit Saxena <sumit.saxena@broadcom.com>
27 * 28 *
28 * Send feedback to: megaraidlinux.pdl@avagotech.com 29 * Send feedback to: megaraidlinux.pdl@broadcom.com
29 *
30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31 * San Jose, California 95131
32 */ 30 */
33 31
34#include <linux/kernel.h> 32#include <linux/kernel.h>
@@ -87,8 +85,7 @@ MODULE_PARM_DESC(throttlequeuedepth,
87 85
88unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME; 86unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
89module_param(resetwaittime, int, S_IRUGO); 87module_param(resetwaittime, int, S_IRUGO);
90MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout " 88MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
91 "before resetting adapter. Default: 180");
92 89
93int smp_affinity_enable = 1; 90int smp_affinity_enable = 1;
94module_param(smp_affinity_enable, int, S_IRUGO); 91module_param(smp_affinity_enable, int, S_IRUGO);
@@ -96,7 +93,7 @@ MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Defau
96 93
97int rdpq_enable = 1; 94int rdpq_enable = 1;
98module_param(rdpq_enable, int, S_IRUGO); 95module_param(rdpq_enable, int, S_IRUGO);
99MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)"); 96MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
100 97
101unsigned int dual_qdepth_disable; 98unsigned int dual_qdepth_disable;
102module_param(dual_qdepth_disable, int, S_IRUGO); 99module_param(dual_qdepth_disable, int, S_IRUGO);
@@ -108,8 +105,8 @@ MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See
108 105
109MODULE_LICENSE("GPL"); 106MODULE_LICENSE("GPL");
110MODULE_VERSION(MEGASAS_VERSION); 107MODULE_VERSION(MEGASAS_VERSION);
111MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com"); 108MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
112MODULE_DESCRIPTION("Avago MegaRAID SAS Driver"); 109MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
113 110
114int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 111int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
115static int megasas_get_pd_list(struct megasas_instance *instance); 112static int megasas_get_pd_list(struct megasas_instance *instance);
@@ -165,6 +162,10 @@ static struct pci_device_id megasas_pci_table[] = {
165 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, 162 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, 163 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
167 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, 164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
165 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
167 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
168 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
168 {} 169 {}
169}; 170};
170 171
@@ -189,7 +190,7 @@ void
189megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 190megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
190 u8 alt_status); 191 u8 alt_status);
191static u32 192static u32
192megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs); 193megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
193static int 194static int
194megasas_adp_reset_gen2(struct megasas_instance *instance, 195megasas_adp_reset_gen2(struct megasas_instance *instance,
195 struct megasas_register_set __iomem *reg_set); 196 struct megasas_register_set __iomem *reg_set);
@@ -219,6 +220,28 @@ megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
219static inline void 220static inline void
220megasas_init_ctrl_params(struct megasas_instance *instance); 221megasas_init_ctrl_params(struct megasas_instance *instance);
221 222
223u32 megasas_readl(struct megasas_instance *instance,
224 const volatile void __iomem *addr)
225{
226 u32 i = 0, ret_val;
227 /*
228 * Due to a HW errata in Aero controllers, reads to certain
229 * Fusion registers could intermittently return all zeroes.
230 * This behavior is transient in nature and subsequent reads will
231 * return valid value. As a workaround in driver, retry readl for
232 * upto three times until a non-zero value is read.
233 */
234 if (instance->adapter_type == AERO_SERIES) {
235 do {
236 ret_val = readl(addr);
237 i++;
238 } while (ret_val == 0 && i < 3);
239 return ret_val;
240 } else {
241 return readl(addr);
242 }
243}
244
222/** 245/**
223 * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs 246 * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs
224 * @instance: Adapter soft state 247 * @instance: Adapter soft state
@@ -419,19 +442,21 @@ megasas_disable_intr_xscale(struct megasas_instance *instance)
419 * @regs: MFI register set 442 * @regs: MFI register set
420 */ 443 */
421static u32 444static u32
422megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs) 445megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
423{ 446{
424 return readl(&(regs)->outbound_msg_0); 447 return readl(&instance->reg_set->outbound_msg_0);
425} 448}
426/** 449/**
427 * megasas_clear_interrupt_xscale - Check & clear interrupt 450 * megasas_clear_interrupt_xscale - Check & clear interrupt
428 * @regs: MFI register set 451 * @regs: MFI register set
429 */ 452 */
430static int 453static int
431megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) 454megasas_clear_intr_xscale(struct megasas_instance *instance)
432{ 455{
433 u32 status; 456 u32 status;
434 u32 mfiStatus = 0; 457 u32 mfiStatus = 0;
458 struct megasas_register_set __iomem *regs;
459 regs = instance->reg_set;
435 460
436 /* 461 /*
437 * Check if it is our interrupt 462 * Check if it is our interrupt
@@ -596,9 +621,9 @@ megasas_disable_intr_ppc(struct megasas_instance *instance)
596 * @regs: MFI register set 621 * @regs: MFI register set
597 */ 622 */
598static u32 623static u32
599megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs) 624megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
600{ 625{
601 return readl(&(regs)->outbound_scratch_pad); 626 return readl(&instance->reg_set->outbound_scratch_pad_0);
602} 627}
603 628
604/** 629/**
@@ -606,9 +631,11 @@ megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
606 * @regs: MFI register set 631 * @regs: MFI register set
607 */ 632 */
608static int 633static int
609megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) 634megasas_clear_intr_ppc(struct megasas_instance *instance)
610{ 635{
611 u32 status, mfiStatus = 0; 636 u32 status, mfiStatus = 0;
637 struct megasas_register_set __iomem *regs;
638 regs = instance->reg_set;
612 639
613 /* 640 /*
614 * Check if it is our interrupt 641 * Check if it is our interrupt
@@ -721,9 +748,9 @@ megasas_disable_intr_skinny(struct megasas_instance *instance)
721 * @regs: MFI register set 748 * @regs: MFI register set
722 */ 749 */
723static u32 750static u32
724megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs) 751megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
725{ 752{
726 return readl(&(regs)->outbound_scratch_pad); 753 return readl(&instance->reg_set->outbound_scratch_pad_0);
727} 754}
728 755
729/** 756/**
@@ -731,10 +758,12 @@ megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
731 * @regs: MFI register set 758 * @regs: MFI register set
732 */ 759 */
733static int 760static int
734megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) 761megasas_clear_intr_skinny(struct megasas_instance *instance)
735{ 762{
736 u32 status; 763 u32 status;
737 u32 mfiStatus = 0; 764 u32 mfiStatus = 0;
765 struct megasas_register_set __iomem *regs;
766 regs = instance->reg_set;
738 767
739 /* 768 /*
740 * Check if it is our interrupt 769 * Check if it is our interrupt
@@ -748,7 +777,7 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
748 /* 777 /*
749 * Check if it is our interrupt 778 * Check if it is our interrupt
750 */ 779 */
751 if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) == 780 if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
752 MFI_STATE_FAULT) { 781 MFI_STATE_FAULT) {
753 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; 782 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
754 } else 783 } else
@@ -866,9 +895,9 @@ megasas_disable_intr_gen2(struct megasas_instance *instance)
866 * @regs: MFI register set 895 * @regs: MFI register set
867 */ 896 */
868static u32 897static u32
869megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs) 898megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
870{ 899{
871 return readl(&(regs)->outbound_scratch_pad); 900 return readl(&instance->reg_set->outbound_scratch_pad_0);
872} 901}
873 902
874/** 903/**
@@ -876,10 +905,12 @@ megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
876 * @regs: MFI register set 905 * @regs: MFI register set
877 */ 906 */
878static int 907static int
879megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) 908megasas_clear_intr_gen2(struct megasas_instance *instance)
880{ 909{
881 u32 status; 910 u32 status;
882 u32 mfiStatus = 0; 911 u32 mfiStatus = 0;
912 struct megasas_register_set __iomem *regs;
913 regs = instance->reg_set;
883 914
884 /* 915 /*
885 * Check if it is our interrupt 916 * Check if it is our interrupt
@@ -2079,9 +2110,11 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
2079 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2110 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2080 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 2111 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2081 (instance->adapter_type != MFI_SERIES)) { 2112 (instance->adapter_type != MFI_SERIES)) {
2082 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 2113 if (!instance->requestorId) {
2083 /* Flush */ 2114 writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2084 readl(&instance->reg_set->doorbell); 2115 /* Flush */
2116 readl(&instance->reg_set->doorbell);
2117 }
2085 if (instance->requestorId && instance->peerIsPresent) 2118 if (instance->requestorId && instance->peerIsPresent)
2086 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 2119 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2087 } else { 2120 } else {
@@ -2682,7 +2715,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2682 2715
2683 i = 0; 2716 i = 0;
2684 outstanding = atomic_read(&instance->fw_outstanding); 2717 outstanding = atomic_read(&instance->fw_outstanding);
2685 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; 2718 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2686 2719
2687 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2720 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2688 goto no_outstanding; 2721 goto no_outstanding;
@@ -2711,7 +2744,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2711 2744
2712 outstanding = atomic_read(&instance->fw_outstanding); 2745 outstanding = atomic_read(&instance->fw_outstanding);
2713 2746
2714 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; 2747 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2715 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL))) 2748 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2716 goto no_outstanding; 2749 goto no_outstanding;
2717 } 2750 }
@@ -3186,7 +3219,6 @@ static struct scsi_host_template megasas_template = {
3186 .eh_timed_out = megasas_reset_timer, 3219 .eh_timed_out = megasas_reset_timer,
3187 .shost_attrs = megaraid_host_attrs, 3220 .shost_attrs = megaraid_host_attrs,
3188 .bios_param = megasas_bios_param, 3221 .bios_param = megasas_bios_param,
3189 .use_clustering = ENABLE_CLUSTERING,
3190 .change_queue_depth = scsi_change_queue_depth, 3222 .change_queue_depth = scsi_change_queue_depth,
3191 .no_write_same = 1, 3223 .no_write_same = 1,
3192}; 3224};
@@ -3278,6 +3310,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3278 megasas_complete_int_cmd(instance, cmd); 3310 megasas_complete_int_cmd(instance, cmd);
3279 break; 3311 break;
3280 } 3312 }
3313 /* fall through */
3281 3314
3282 case MFI_CMD_LD_READ: 3315 case MFI_CMD_LD_READ:
3283 case MFI_CMD_LD_WRITE: 3316 case MFI_CMD_LD_WRITE:
@@ -3665,9 +3698,8 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
3665 return IRQ_HANDLED; 3698 return IRQ_HANDLED;
3666 } 3699 }
3667 3700
3668 if ((mfiStatus = instance->instancet->clear_intr( 3701 mfiStatus = instance->instancet->clear_intr(instance);
3669 instance->reg_set) 3702 if (mfiStatus == 0) {
3670 ) == 0) {
3671 /* Hardware may not set outbound_intr_status in MSI-X mode */ 3703 /* Hardware may not set outbound_intr_status in MSI-X mode */
3672 if (!instance->msix_vectors) 3704 if (!instance->msix_vectors)
3673 return IRQ_NONE; 3705 return IRQ_NONE;
@@ -3677,7 +3709,7 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
3677 3709
3678 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { 3710 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3679 fw_state = instance->instancet->read_fw_status_reg( 3711 fw_state = instance->instancet->read_fw_status_reg(
3680 instance->reg_set) & MFI_STATE_MASK; 3712 instance) & MFI_STATE_MASK;
3681 3713
3682 if (fw_state != MFI_STATE_FAULT) { 3714 if (fw_state != MFI_STATE_FAULT) {
3683 dev_notice(&instance->pdev->dev, "fw state:%x\n", 3715 dev_notice(&instance->pdev->dev, "fw state:%x\n",
@@ -3760,7 +3792,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3760 u32 cur_state; 3792 u32 cur_state;
3761 u32 abs_state, curr_abs_state; 3793 u32 abs_state, curr_abs_state;
3762 3794
3763 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set); 3795 abs_state = instance->instancet->read_fw_status_reg(instance);
3764 fw_state = abs_state & MFI_STATE_MASK; 3796 fw_state = abs_state & MFI_STATE_MASK;
3765 3797
3766 if (fw_state != MFI_STATE_READY) 3798 if (fw_state != MFI_STATE_READY)
@@ -3832,7 +3864,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3832 3864
3833 if (instance->adapter_type != MFI_SERIES) { 3865 if (instance->adapter_type != MFI_SERIES) {
3834 for (i = 0; i < (10 * 1000); i += 20) { 3866 for (i = 0; i < (10 * 1000); i += 20) {
3835 if (readl( 3867 if (megasas_readl(
3868 instance,
3836 &instance-> 3869 &instance->
3837 reg_set-> 3870 reg_set->
3838 doorbell) & 1) 3871 doorbell) & 1)
@@ -3891,12 +3924,12 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3891 /* 3924 /*
3892 * The cur_state should not last for more than max_wait secs 3925 * The cur_state should not last for more than max_wait secs
3893 */ 3926 */
3894 for (i = 0; i < (max_wait * 1000); i++) { 3927 for (i = 0; i < max_wait; i++) {
3895 curr_abs_state = instance->instancet-> 3928 curr_abs_state = instance->instancet->
3896 read_fw_status_reg(instance->reg_set); 3929 read_fw_status_reg(instance);
3897 3930
3898 if (abs_state == curr_abs_state) { 3931 if (abs_state == curr_abs_state) {
3899 msleep(1); 3932 msleep(1000);
3900 } else 3933 } else
3901 break; 3934 break;
3902 } 3935 }
@@ -4634,9 +4667,9 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4634 } 4667 }
4635 4668
4636 dev_info(&instance->pdev->dev, 4669 dev_info(&instance->pdev->dev,
4637 "firmware type\t: %s\n", 4670 "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
4638 instance->supportmax256vd ? "Extended VD(240 VD)firmware" : 4671 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
4639 "Legacy(64 VD) firmware"); 4672 instance->ctrl_info_buf->max_lds);
4640 4673
4641 if (instance->max_raid_mapsize) { 4674 if (instance->max_raid_mapsize) {
4642 ventura_map_sz = instance->max_raid_mapsize * 4675 ventura_map_sz = instance->max_raid_mapsize *
@@ -4661,6 +4694,87 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4661 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); 4694 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
4662} 4695}
4663 4696
4697/*
4698 * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
4699 * dcmd.hdr.length - number of bytes to read
4700 * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES
4701 * Desc: Fill in snapdump properties
4702 * Status: MFI_STAT_OK- Command successful
4703 */
4704void megasas_get_snapdump_properties(struct megasas_instance *instance)
4705{
4706 int ret = 0;
4707 struct megasas_cmd *cmd;
4708 struct megasas_dcmd_frame *dcmd;
4709 struct MR_SNAPDUMP_PROPERTIES *ci;
4710 dma_addr_t ci_h = 0;
4711
4712 ci = instance->snapdump_prop;
4713 ci_h = instance->snapdump_prop_h;
4714
4715 if (!ci)
4716 return;
4717
4718 cmd = megasas_get_cmd(instance);
4719
4720 if (!cmd) {
4721 dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
4722 return;
4723 }
4724
4725 dcmd = &cmd->frame->dcmd;
4726
4727 memset(ci, 0, sizeof(*ci));
4728 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4729
4730 dcmd->cmd = MFI_CMD_DCMD;
4731 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4732 dcmd->sge_count = 1;
4733 dcmd->flags = MFI_FRAME_DIR_READ;
4734 dcmd->timeout = 0;
4735 dcmd->pad_0 = 0;
4736 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
4737 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
4738
4739 megasas_set_dma_settings(instance, dcmd, ci_h,
4740 sizeof(struct MR_SNAPDUMP_PROPERTIES));
4741
4742 if (!instance->mask_interrupts) {
4743 ret = megasas_issue_blocked_cmd(instance, cmd,
4744 MFI_IO_TIMEOUT_SECS);
4745 } else {
4746 ret = megasas_issue_polled(instance, cmd);
4747 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4748 }
4749
4750 switch (ret) {
4751 case DCMD_SUCCESS:
4752 instance->snapdump_wait_time =
4753 min_t(u8, ci->trigger_min_num_sec_before_ocr,
4754 MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
4755 break;
4756
4757 case DCMD_TIMEOUT:
4758 switch (dcmd_timeout_ocr_possible(instance)) {
4759 case INITIATE_OCR:
4760 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4761 megasas_reset_fusion(instance->host,
4762 MFI_IO_TIMEOUT_OCR);
4763 break;
4764 case KILL_ADAPTER:
4765 megaraid_sas_kill_hba(instance);
4766 break;
4767 case IGNORE_TIMEOUT:
4768 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4769 __func__, __LINE__);
4770 break;
4771 }
4772 }
4773
4774 if (ret != DCMD_TIMEOUT)
4775 megasas_return_cmd(instance, cmd);
4776}
4777
4664/** 4778/**
4665 * megasas_get_controller_info - Returns FW's controller structure 4779 * megasas_get_controller_info - Returns FW's controller structure
4666 * @instance: Adapter soft state 4780 * @instance: Adapter soft state
@@ -4720,6 +4834,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
4720 * CPU endianness format. 4834 * CPU endianness format.
4721 */ 4835 */
4722 le32_to_cpus((u32 *)&ci->properties.OnOffProperties); 4836 le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
4837 le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
4723 le32_to_cpus((u32 *)&ci->adapterOperations2); 4838 le32_to_cpus((u32 *)&ci->adapterOperations2);
4724 le32_to_cpus((u32 *)&ci->adapterOperations3); 4839 le32_to_cpus((u32 *)&ci->adapterOperations3);
4725 le16_to_cpus((u16 *)&ci->adapter_operations4); 4840 le16_to_cpus((u16 *)&ci->adapter_operations4);
@@ -4741,6 +4856,11 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
4741 4856
4742 /*Check whether controller is iMR or MR */ 4857 /*Check whether controller is iMR or MR */
4743 instance->is_imr = (ci->memory_size ? 0 : 1); 4858 instance->is_imr = (ci->memory_size ? 0 : 1);
4859
4860 instance->snapdump_wait_time =
4861 (ci->properties.on_off_properties2.enable_snap_dump ?
4862 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
4863
4744 dev_info(&instance->pdev->dev, 4864 dev_info(&instance->pdev->dev,
4745 "controller type\t: %s(%dMB)\n", 4865 "controller type\t: %s(%dMB)\n",
4746 instance->is_imr ? "iMR" : "MR", 4866 instance->is_imr ? "iMR" : "MR",
@@ -4942,16 +5062,13 @@ fail_fw_init:
4942static u32 5062static u32
4943megasas_init_adapter_mfi(struct megasas_instance *instance) 5063megasas_init_adapter_mfi(struct megasas_instance *instance)
4944{ 5064{
4945 struct megasas_register_set __iomem *reg_set;
4946 u32 context_sz; 5065 u32 context_sz;
4947 u32 reply_q_sz; 5066 u32 reply_q_sz;
4948 5067
4949 reg_set = instance->reg_set;
4950
4951 /* 5068 /*
4952 * Get various operational parameters from status register 5069 * Get various operational parameters from status register
4953 */ 5070 */
4954 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; 5071 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
4955 /* 5072 /*
4956 * Reduce the max supported cmds by 1. This is to ensure that the 5073 * Reduce the max supported cmds by 1. This is to ensure that the
4957 * reply_q_sz (1 more than the max cmd that driver may send) 5074 * reply_q_sz (1 more than the max cmd that driver may send)
@@ -4959,7 +5076,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
4959 */ 5076 */
4960 instance->max_fw_cmds = instance->max_fw_cmds-1; 5077 instance->max_fw_cmds = instance->max_fw_cmds-1;
4961 instance->max_mfi_cmds = instance->max_fw_cmds; 5078 instance->max_mfi_cmds = instance->max_fw_cmds;
4962 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> 5079 instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
4963 0x10; 5080 0x10;
4964 /* 5081 /*
4965 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands 5082 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
@@ -5015,7 +5132,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
5015 5132
5016 instance->fw_support_ieee = 0; 5133 instance->fw_support_ieee = 0;
5017 instance->fw_support_ieee = 5134 instance->fw_support_ieee =
5018 (instance->instancet->read_fw_status_reg(reg_set) & 5135 (instance->instancet->read_fw_status_reg(instance) &
5019 0x04000000); 5136 0x04000000);
5020 5137
5021 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d", 5138 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
@@ -5213,14 +5330,14 @@ static int megasas_init_fw(struct megasas_instance *instance)
5213{ 5330{
5214 u32 max_sectors_1; 5331 u32 max_sectors_1;
5215 u32 max_sectors_2, tmp_sectors, msix_enable; 5332 u32 max_sectors_2, tmp_sectors, msix_enable;
5216 u32 scratch_pad_2, scratch_pad_3, scratch_pad_4; 5333 u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
5217 resource_size_t base_addr; 5334 resource_size_t base_addr;
5218 struct megasas_register_set __iomem *reg_set;
5219 struct megasas_ctrl_info *ctrl_info = NULL; 5335 struct megasas_ctrl_info *ctrl_info = NULL;
5220 unsigned long bar_list; 5336 unsigned long bar_list;
5221 int i, j, loop, fw_msix_count = 0; 5337 int i, j, loop, fw_msix_count = 0;
5222 struct IOV_111 *iovPtr; 5338 struct IOV_111 *iovPtr;
5223 struct fusion_context *fusion; 5339 struct fusion_context *fusion;
5340 bool do_adp_reset = true;
5224 5341
5225 fusion = instance->ctrl_context; 5342 fusion = instance->ctrl_context;
5226 5343
@@ -5241,8 +5358,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
5241 goto fail_ioremap; 5358 goto fail_ioremap;
5242 } 5359 }
5243 5360
5244 reg_set = instance->reg_set;
5245
5246 if (instance->adapter_type != MFI_SERIES) 5361 if (instance->adapter_type != MFI_SERIES)
5247 instance->instancet = &megasas_instance_template_fusion; 5362 instance->instancet = &megasas_instance_template_fusion;
5248 else { 5363 else {
@@ -5269,19 +5384,29 @@ static int megasas_init_fw(struct megasas_instance *instance)
5269 } 5384 }
5270 5385
5271 if (megasas_transition_to_ready(instance, 0)) { 5386 if (megasas_transition_to_ready(instance, 0)) {
5272 atomic_set(&instance->fw_reset_no_pci_access, 1); 5387 if (instance->adapter_type >= INVADER_SERIES) {
5273 instance->instancet->adp_reset 5388 status_reg = instance->instancet->read_fw_status_reg(
5274 (instance, instance->reg_set); 5389 instance);
5275 atomic_set(&instance->fw_reset_no_pci_access, 0); 5390 do_adp_reset = status_reg & MFI_RESET_ADAPTER;
5276 dev_info(&instance->pdev->dev, 5391 }
5277 "FW restarted successfully from %s!\n",
5278 __func__);
5279 5392
5280 /*waitting for about 30 second before retry*/ 5393 if (do_adp_reset) {
5281 ssleep(30); 5394 atomic_set(&instance->fw_reset_no_pci_access, 1);
5395 instance->instancet->adp_reset
5396 (instance, instance->reg_set);
5397 atomic_set(&instance->fw_reset_no_pci_access, 0);
5398 dev_info(&instance->pdev->dev,
5399 "FW restarted successfully from %s!\n",
5400 __func__);
5401
5402 /*waiting for about 30 second before retry*/
5403 ssleep(30);
5282 5404
5283 if (megasas_transition_to_ready(instance, 0)) 5405 if (megasas_transition_to_ready(instance, 0))
5406 goto fail_ready_state;
5407 } else {
5284 goto fail_ready_state; 5408 goto fail_ready_state;
5409 }
5285 } 5410 }
5286 5411
5287 megasas_init_ctrl_params(instance); 5412 megasas_init_ctrl_params(instance);
@@ -5297,38 +5422,57 @@ static int megasas_init_fw(struct megasas_instance *instance)
5297 5422
5298 fusion = instance->ctrl_context; 5423 fusion = instance->ctrl_context;
5299 5424
5300 if (instance->adapter_type == VENTURA_SERIES) { 5425 if (instance->adapter_type >= VENTURA_SERIES) {
5301 scratch_pad_3 = 5426 scratch_pad_2 =
5302 readl(&instance->reg_set->outbound_scratch_pad_3); 5427 megasas_readl(instance,
5303 instance->max_raid_mapsize = ((scratch_pad_3 >> 5428 &instance->reg_set->outbound_scratch_pad_2);
5429 instance->max_raid_mapsize = ((scratch_pad_2 >>
5304 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & 5430 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5305 MR_MAX_RAID_MAP_SIZE_MASK); 5431 MR_MAX_RAID_MAP_SIZE_MASK);
5306 } 5432 }
5307 5433
5308 /* Check if MSI-X is supported while in ready state */ 5434 /* Check if MSI-X is supported while in ready state */
5309 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & 5435 msix_enable = (instance->instancet->read_fw_status_reg(instance) &
5310 0x4000000) >> 0x1a; 5436 0x4000000) >> 0x1a;
5311 if (msix_enable && !msix_disable) { 5437 if (msix_enable && !msix_disable) {
5312 int irq_flags = PCI_IRQ_MSIX; 5438 int irq_flags = PCI_IRQ_MSIX;
5313 5439
5314 scratch_pad_2 = readl 5440 scratch_pad_1 = megasas_readl
5315 (&instance->reg_set->outbound_scratch_pad_2); 5441 (instance, &instance->reg_set->outbound_scratch_pad_1);
5316 /* Check max MSI-X vectors */ 5442 /* Check max MSI-X vectors */
5317 if (fusion) { 5443 if (fusion) {
5318 if (instance->adapter_type == THUNDERBOLT_SERIES) { 5444 if (instance->adapter_type == THUNDERBOLT_SERIES) {
5319 /* Thunderbolt Series*/ 5445 /* Thunderbolt Series*/
5320 instance->msix_vectors = (scratch_pad_2 5446 instance->msix_vectors = (scratch_pad_1
5321 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 5447 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5322 fw_msix_count = instance->msix_vectors; 5448 fw_msix_count = instance->msix_vectors;
5323 } else { /* Invader series supports more than 8 MSI-x vectors*/ 5449 } else {
5324 instance->msix_vectors = ((scratch_pad_2 5450 instance->msix_vectors = ((scratch_pad_1
5325 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 5451 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5326 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 5452 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5327 if (instance->msix_vectors > 16) 5453
5328 instance->msix_combined = true; 5454 /*
5455 * For Invader series, > 8 MSI-x vectors
5456 * supported by FW/HW implies combined
5457 * reply queue mode is enabled.
5458 * For Ventura series, > 16 MSI-x vectors
5459 * supported by FW/HW implies combined
5460 * reply queue mode is enabled.
5461 */
5462 switch (instance->adapter_type) {
5463 case INVADER_SERIES:
5464 if (instance->msix_vectors > 8)
5465 instance->msix_combined = true;
5466 break;
5467 case AERO_SERIES:
5468 case VENTURA_SERIES:
5469 if (instance->msix_vectors > 16)
5470 instance->msix_combined = true;
5471 break;
5472 }
5329 5473
5330 if (rdpq_enable) 5474 if (rdpq_enable)
5331 instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 5475 instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
5332 1 : 0; 5476 1 : 0;
5333 fw_msix_count = instance->msix_vectors; 5477 fw_msix_count = instance->msix_vectors;
5334 /* Save 1-15 reply post index address to local memory 5478 /* Save 1-15 reply post index address to local memory
@@ -5377,7 +5521,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
5377 if (!instance->msix_vectors) { 5521 if (!instance->msix_vectors) {
5378 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 5522 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
5379 if (i < 0) 5523 if (i < 0)
5380 goto fail_setup_irqs; 5524 goto fail_init_adapter;
5381 } 5525 }
5382 5526
5383 megasas_setup_reply_map(instance); 5527 megasas_setup_reply_map(instance);
@@ -5403,13 +5547,14 @@ static int megasas_init_fw(struct megasas_instance *instance)
5403 if (instance->instancet->init_adapter(instance)) 5547 if (instance->instancet->init_adapter(instance))
5404 goto fail_init_adapter; 5548 goto fail_init_adapter;
5405 5549
5406 if (instance->adapter_type == VENTURA_SERIES) { 5550 if (instance->adapter_type >= VENTURA_SERIES) {
5407 scratch_pad_4 = 5551 scratch_pad_3 =
5408 readl(&instance->reg_set->outbound_scratch_pad_4); 5552 megasas_readl(instance,
5409 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= 5553 &instance->reg_set->outbound_scratch_pad_3);
5554 if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
5410 MR_DEFAULT_NVME_PAGE_SHIFT) 5555 MR_DEFAULT_NVME_PAGE_SHIFT)
5411 instance->nvme_page_size = 5556 instance->nvme_page_size =
5412 (1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK)); 5557 (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
5413 5558
5414 dev_info(&instance->pdev->dev, 5559 dev_info(&instance->pdev->dev,
5415 "NVME page size\t: (%d)\n", instance->nvme_page_size); 5560 "NVME page size\t: (%d)\n", instance->nvme_page_size);
@@ -5439,7 +5584,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
5439 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 5584 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5440 5585
5441 /* stream detection initialization */ 5586 /* stream detection initialization */
5442 if (instance->adapter_type == VENTURA_SERIES) { 5587 if (instance->adapter_type >= VENTURA_SERIES) {
5443 fusion->stream_detect_by_ld = 5588 fusion->stream_detect_by_ld =
5444 kcalloc(MAX_LOGICAL_DRIVES_EXT, 5589 kcalloc(MAX_LOGICAL_DRIVES_EXT,
5445 sizeof(struct LD_STREAM_DETECT *), 5590 sizeof(struct LD_STREAM_DETECT *),
@@ -5539,6 +5684,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
5539 instance->crash_dump_buf = NULL; 5684 instance->crash_dump_buf = NULL;
5540 } 5685 }
5541 5686
5687 if (instance->snapdump_wait_time) {
5688 megasas_get_snapdump_properties(instance);
5689 dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
5690 instance->snapdump_wait_time);
5691 }
5542 5692
5543 dev_info(&instance->pdev->dev, 5693 dev_info(&instance->pdev->dev,
5544 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", 5694 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
@@ -5553,7 +5703,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
5553 dev_info(&instance->pdev->dev, "jbod sync map : %s\n", 5703 dev_info(&instance->pdev->dev, "jbod sync map : %s\n",
5554 instance->use_seqnum_jbod_fp ? "yes" : "no"); 5704 instance->use_seqnum_jbod_fp ? "yes" : "no");
5555 5705
5556
5557 instance->max_sectors_per_req = instance->max_num_sge * 5706 instance->max_sectors_per_req = instance->max_num_sge *
5558 SGE_BUFFER_SIZE / 512; 5707 SGE_BUFFER_SIZE / 512;
5559 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) 5708 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
@@ -5576,19 +5725,32 @@ static int megasas_init_fw(struct megasas_instance *instance)
5576 5725
5577 /* Launch SR-IOV heartbeat timer */ 5726 /* Launch SR-IOV heartbeat timer */
5578 if (instance->requestorId) { 5727 if (instance->requestorId) {
5579 if (!megasas_sriov_start_heartbeat(instance, 1)) 5728 if (!megasas_sriov_start_heartbeat(instance, 1)) {
5580 megasas_start_timer(instance); 5729 megasas_start_timer(instance);
5581 else 5730 } else {
5582 instance->skip_heartbeat_timer_del = 1; 5731 instance->skip_heartbeat_timer_del = 1;
5732 goto fail_get_ld_pd_list;
5733 }
5583 } 5734 }
5584 5735
5736 /*
5737 * Create and start watchdog thread which will monitor
5738 * controller state every 1 sec and trigger OCR when
5739 * it enters fault state
5740 */
5741 if (instance->adapter_type != MFI_SERIES)
5742 if (megasas_fusion_start_watchdog(instance) != SUCCESS)
5743 goto fail_start_watchdog;
5744
5585 return 0; 5745 return 0;
5586 5746
5747fail_start_watchdog:
5748 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
5749 del_timer_sync(&instance->sriov_heartbeat_timer);
5587fail_get_ld_pd_list: 5750fail_get_ld_pd_list:
5588 instance->instancet->disable_intr(instance); 5751 instance->instancet->disable_intr(instance);
5589fail_init_adapter:
5590 megasas_destroy_irqs(instance); 5752 megasas_destroy_irqs(instance);
5591fail_setup_irqs: 5753fail_init_adapter:
5592 if (instance->msix_vectors) 5754 if (instance->msix_vectors)
5593 pci_free_irq_vectors(instance->pdev); 5755 pci_free_irq_vectors(instance->pdev);
5594 instance->msix_vectors = 0; 5756 instance->msix_vectors = 0;
@@ -6022,13 +6184,13 @@ static int megasas_io_attach(struct megasas_instance *instance)
6022 * @instance: Adapter soft state 6184 * @instance: Adapter soft state
6023 * Description: 6185 * Description:
6024 * 6186 *
6025 * For Ventura, driver/FW will operate in 64bit DMA addresses. 6187 * For Ventura, driver/FW will operate in 63bit DMA addresses.
6026 * 6188 *
6027 * For invader- 6189 * For invader-
6028 * By default, driver/FW will operate in 32bit DMA addresses 6190 * By default, driver/FW will operate in 32bit DMA addresses
6029 * for consistent DMA mapping but if 32 bit consistent 6191 * for consistent DMA mapping but if 32 bit consistent
6030 * DMA mask fails, driver will try with 64 bit consistent 6192 * DMA mask fails, driver will try with 63 bit consistent
6031 * mask provided FW is true 64bit DMA capable 6193 * mask provided FW is true 63bit DMA capable
6032 * 6194 *
6033 * For older controllers(Thunderbolt and MFI based adapters)- 6195 * For older controllers(Thunderbolt and MFI based adapters)-
6034 * driver/FW will operate in 32 bit consistent DMA addresses. 6196 * driver/FW will operate in 32 bit consistent DMA addresses.
@@ -6038,31 +6200,31 @@ megasas_set_dma_mask(struct megasas_instance *instance)
6038{ 6200{
6039 u64 consistent_mask; 6201 u64 consistent_mask;
6040 struct pci_dev *pdev; 6202 struct pci_dev *pdev;
6041 u32 scratch_pad_2; 6203 u32 scratch_pad_1;
6042 6204
6043 pdev = instance->pdev; 6205 pdev = instance->pdev;
6044 consistent_mask = (instance->adapter_type == VENTURA_SERIES) ? 6206 consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
6045 DMA_BIT_MASK(64) : DMA_BIT_MASK(32); 6207 DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
6046 6208
6047 if (IS_DMA64) { 6209 if (IS_DMA64) {
6048 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 6210 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
6049 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 6211 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6050 goto fail_set_dma_mask; 6212 goto fail_set_dma_mask;
6051 6213
6052 if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) && 6214 if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
6053 (dma_set_coherent_mask(&pdev->dev, consistent_mask) && 6215 (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
6054 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) { 6216 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
6055 /* 6217 /*
6056 * If 32 bit DMA mask fails, then try for 64 bit mask 6218 * If 32 bit DMA mask fails, then try for 64 bit mask
6057 * for FW capable of handling 64 bit DMA. 6219 * for FW capable of handling 64 bit DMA.
6058 */ 6220 */
6059 scratch_pad_2 = readl 6221 scratch_pad_1 = megasas_readl
6060 (&instance->reg_set->outbound_scratch_pad_2); 6222 (instance, &instance->reg_set->outbound_scratch_pad_1);
6061 6223
6062 if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET)) 6224 if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
6063 goto fail_set_dma_mask; 6225 goto fail_set_dma_mask;
6064 else if (dma_set_mask_and_coherent(&pdev->dev, 6226 else if (dma_set_mask_and_coherent(&pdev->dev,
6065 DMA_BIT_MASK(64))) 6227 DMA_BIT_MASK(63)))
6066 goto fail_set_dma_mask; 6228 goto fail_set_dma_mask;
6067 } 6229 }
6068 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) 6230 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
@@ -6074,8 +6236,8 @@ megasas_set_dma_mask(struct megasas_instance *instance)
6074 instance->consistent_mask_64bit = true; 6236 instance->consistent_mask_64bit = true;
6075 6237
6076 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", 6238 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6077 ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"), 6239 ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"),
6078 (instance->consistent_mask_64bit ? "64" : "32")); 6240 (instance->consistent_mask_64bit ? "63" : "32"));
6079 6241
6080 return 0; 6242 return 0;
6081 6243
@@ -6088,12 +6250,14 @@ fail_set_dma_mask:
6088/* 6250/*
6089 * megasas_set_adapter_type - Set adapter type. 6251 * megasas_set_adapter_type - Set adapter type.
6090 * Supported controllers can be divided in 6252 * Supported controllers can be divided in
6091 * 4 categories- enum MR_ADAPTER_TYPE { 6253 * different categories-
6092 * MFI_SERIES = 1, 6254 * enum MR_ADAPTER_TYPE {
6093 * THUNDERBOLT_SERIES = 2, 6255 * MFI_SERIES = 1,
6094 * INVADER_SERIES = 3, 6256 * THUNDERBOLT_SERIES = 2,
6095 * VENTURA_SERIES = 4, 6257 * INVADER_SERIES = 3,
6096 * }; 6258 * VENTURA_SERIES = 4,
6259 * AERO_SERIES = 5,
6260 * };
6097 * @instance: Adapter soft state 6261 * @instance: Adapter soft state
6098 * return: void 6262 * return: void
6099 */ 6263 */
@@ -6104,6 +6268,12 @@ static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6104 instance->adapter_type = MFI_SERIES; 6268 instance->adapter_type = MFI_SERIES;
6105 } else { 6269 } else {
6106 switch (instance->pdev->device) { 6270 switch (instance->pdev->device) {
6271 case PCI_DEVICE_ID_LSI_AERO_10E1:
6272 case PCI_DEVICE_ID_LSI_AERO_10E2:
6273 case PCI_DEVICE_ID_LSI_AERO_10E5:
6274 case PCI_DEVICE_ID_LSI_AERO_10E6:
6275 instance->adapter_type = AERO_SERIES;
6276 break;
6107 case PCI_DEVICE_ID_LSI_VENTURA: 6277 case PCI_DEVICE_ID_LSI_VENTURA:
6108 case PCI_DEVICE_ID_LSI_CRUSADER: 6278 case PCI_DEVICE_ID_LSI_CRUSADER:
6109 case PCI_DEVICE_ID_LSI_HARPOON: 6279 case PCI_DEVICE_ID_LSI_HARPOON:
@@ -6171,6 +6341,7 @@ static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6171 if (megasas_alloc_mfi_ctrl_mem(instance)) 6341 if (megasas_alloc_mfi_ctrl_mem(instance))
6172 goto fail; 6342 goto fail;
6173 break; 6343 break;
6344 case AERO_SERIES:
6174 case VENTURA_SERIES: 6345 case VENTURA_SERIES:
6175 case THUNDERBOLT_SERIES: 6346 case THUNDERBOLT_SERIES:
6176 case INVADER_SERIES: 6347 case INVADER_SERIES:
@@ -6245,6 +6416,14 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
6245 "Failed to allocate PD list buffer\n"); 6416 "Failed to allocate PD list buffer\n");
6246 return -ENOMEM; 6417 return -ENOMEM;
6247 } 6418 }
6419
6420 instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
6421 sizeof(struct MR_SNAPDUMP_PROPERTIES),
6422 &instance->snapdump_prop_h, GFP_KERNEL);
6423
6424 if (!instance->snapdump_prop)
6425 dev_err(&pdev->dev,
6426 "Failed to allocate snapdump properties buffer\n");
6248 } 6427 }
6249 6428
6250 instance->pd_list_buf = 6429 instance->pd_list_buf =
@@ -6388,6 +6567,12 @@ void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
6388 dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE, 6567 dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
6389 instance->crash_dump_buf, 6568 instance->crash_dump_buf,
6390 instance->crash_dump_h); 6569 instance->crash_dump_h);
6570
6571 if (instance->snapdump_prop)
6572 dma_free_coherent(&pdev->dev,
6573 sizeof(struct MR_SNAPDUMP_PROPERTIES),
6574 instance->snapdump_prop,
6575 instance->snapdump_prop_h);
6391} 6576}
6392 6577
6393/* 6578/*
@@ -6434,12 +6619,10 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
6434 instance->disableOnlineCtrlReset = 1; 6619 instance->disableOnlineCtrlReset = 1;
6435 instance->UnevenSpanSupport = 0; 6620 instance->UnevenSpanSupport = 0;
6436 6621
6437 if (instance->adapter_type != MFI_SERIES) { 6622 if (instance->adapter_type != MFI_SERIES)
6438 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 6623 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
6439 INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq); 6624 else
6440 } else {
6441 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 6625 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
6442 }
6443} 6626}
6444 6627
6445/** 6628/**
@@ -6455,6 +6638,13 @@ static int megasas_probe_one(struct pci_dev *pdev,
6455 struct megasas_instance *instance; 6638 struct megasas_instance *instance;
6456 u16 control = 0; 6639 u16 control = 0;
6457 6640
6641 switch (pdev->device) {
6642 case PCI_DEVICE_ID_LSI_AERO_10E1:
6643 case PCI_DEVICE_ID_LSI_AERO_10E5:
6644 dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
6645 break;
6646 }
6647
6458 /* Reset MSI-X in the kdump kernel */ 6648 /* Reset MSI-X in the kdump kernel */
6459 if (reset_devices) { 6649 if (reset_devices) {
6460 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 6650 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
@@ -6708,6 +6898,10 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
6708 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6898 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6709 del_timer_sync(&instance->sriov_heartbeat_timer); 6899 del_timer_sync(&instance->sriov_heartbeat_timer);
6710 6900
6901 /* Stop the FW fault detection watchdog */
6902 if (instance->adapter_type != MFI_SERIES)
6903 megasas_fusion_stop_watchdog(instance);
6904
6711 megasas_flush_cache(instance); 6905 megasas_flush_cache(instance);
6712 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); 6906 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
6713 6907
@@ -6843,8 +7037,16 @@ megasas_resume(struct pci_dev *pdev)
6843 if (megasas_start_aen(instance)) 7037 if (megasas_start_aen(instance))
6844 dev_err(&instance->pdev->dev, "Start AEN failed\n"); 7038 dev_err(&instance->pdev->dev, "Start AEN failed\n");
6845 7039
7040 /* Re-launch FW fault watchdog */
7041 if (instance->adapter_type != MFI_SERIES)
7042 if (megasas_fusion_start_watchdog(instance) != SUCCESS)
7043 goto fail_start_watchdog;
7044
6846 return 0; 7045 return 0;
6847 7046
7047fail_start_watchdog:
7048 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7049 del_timer_sync(&instance->sriov_heartbeat_timer);
6848fail_init_mfi: 7050fail_init_mfi:
6849 megasas_free_ctrl_dma_buffers(instance); 7051 megasas_free_ctrl_dma_buffers(instance);
6850 megasas_free_ctrl_mem(instance); 7052 megasas_free_ctrl_mem(instance);
@@ -6912,6 +7114,10 @@ static void megasas_detach_one(struct pci_dev *pdev)
6912 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7114 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6913 del_timer_sync(&instance->sriov_heartbeat_timer); 7115 del_timer_sync(&instance->sriov_heartbeat_timer);
6914 7116
7117 /* Stop the FW fault detection watchdog */
7118 if (instance->adapter_type != MFI_SERIES)
7119 megasas_fusion_stop_watchdog(instance);
7120
6915 if (instance->fw_crash_state != UNAVAILABLE) 7121 if (instance->fw_crash_state != UNAVAILABLE)
6916 megasas_free_host_crash_buffer(instance); 7122 megasas_free_host_crash_buffer(instance);
6917 scsi_remove_host(instance->host); 7123 scsi_remove_host(instance->host);
@@ -6956,7 +7162,7 @@ skip_firing_dcmds:
6956 if (instance->msix_vectors) 7162 if (instance->msix_vectors)
6957 pci_free_irq_vectors(instance->pdev); 7163 pci_free_irq_vectors(instance->pdev);
6958 7164
6959 if (instance->adapter_type == VENTURA_SERIES) { 7165 if (instance->adapter_type >= VENTURA_SERIES) {
6960 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 7166 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
6961 kfree(fusion->stream_detect_by_ld[i]); 7167 kfree(fusion->stream_detect_by_ld[i]);
6962 kfree(fusion->stream_detect_by_ld); 7168 kfree(fusion->stream_detect_by_ld);
@@ -7737,8 +7943,15 @@ megasas_aen_polling(struct work_struct *work)
7737 break; 7943 break;
7738 7944
7739 case MR_EVT_CTRL_PROP_CHANGED: 7945 case MR_EVT_CTRL_PROP_CHANGED:
7740 dcmd_ret = megasas_get_ctrl_info(instance); 7946 dcmd_ret = megasas_get_ctrl_info(instance);
7741 break; 7947 if (dcmd_ret == DCMD_SUCCESS &&
7948 instance->snapdump_wait_time) {
7949 megasas_get_snapdump_properties(instance);
7950 dev_info(&instance->pdev->dev,
7951 "Snap dump wait time\t: %d\n",
7952 instance->snapdump_wait_time);
7953 }
7954 break;
7742 default: 7955 default:
7743 doscan = 0; 7956 doscan = 0;
7744 break; 7957 break;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 59ecbb3b53b5..87c2c0472c8f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -2,7 +2,8 @@
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2013 LSI Corporation 4 * Copyright (c) 2009-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies 5 * Copyright (c) 2013-2016 Avago Technologies
6 * Copyright (c) 2016-2018 Broadcom Inc.
6 * 7 *
7 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
@@ -19,17 +20,14 @@
19 * 20 *
20 * FILE: megaraid_sas_fp.c 21 * FILE: megaraid_sas_fp.c
21 * 22 *
22 * Authors: Avago Technologies 23 * Authors: Broadcom Inc.
23 * Sumant Patro 24 * Sumant Patro
24 * Varad Talamacki 25 * Varad Talamacki
25 * Manoj Jose 26 * Manoj Jose
26 * Kashyap Desai <kashyap.desai@avagotech.com> 27 * Kashyap Desai <kashyap.desai@broadcom.com>
27 * Sumit Saxena <sumit.saxena@avagotech.com> 28 * Sumit Saxena <sumit.saxena@broadcom.com>
28 * 29 *
29 * Send feedback to: megaraidlinux.pdl@avagotech.com 30 * Send feedback to: megaraidlinux.pdl@broadcom.com
30 *
31 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
32 * San Jose, California 95131
33 */ 31 */
34 32
35#include <linux/kernel.h> 33#include <linux/kernel.h>
@@ -745,7 +743,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
745 *pDevHandle = MR_PdDevHandleGet(pd, map); 743 *pDevHandle = MR_PdDevHandleGet(pd, map);
746 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 744 *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
747 /* get second pd also for raid 1/10 fast path writes*/ 745 /* get second pd also for raid 1/10 fast path writes*/
748 if ((instance->adapter_type == VENTURA_SERIES) && 746 if ((instance->adapter_type >= VENTURA_SERIES) &&
749 (raid->level == 1) && 747 (raid->level == 1) &&
750 !io_info->isRead) { 748 !io_info->isRead) {
751 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); 749 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -770,7 +768,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
770 } 768 }
771 769
772 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); 770 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
773 if (instance->adapter_type == VENTURA_SERIES) { 771 if (instance->adapter_type >= VENTURA_SERIES) {
774 ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = 772 ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
775 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 773 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
776 io_info->span_arm = 774 io_info->span_arm =
@@ -861,7 +859,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
861 *pDevHandle = MR_PdDevHandleGet(pd, map); 859 *pDevHandle = MR_PdDevHandleGet(pd, map);
862 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 860 *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
863 /* get second pd also for raid 1/10 fast path writes*/ 861 /* get second pd also for raid 1/10 fast path writes*/
864 if ((instance->adapter_type == VENTURA_SERIES) && 862 if ((instance->adapter_type >= VENTURA_SERIES) &&
865 (raid->level == 1) && 863 (raid->level == 1) &&
866 !io_info->isRead) { 864 !io_info->isRead) {
867 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); 865 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -888,7 +886,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
888 } 886 }
889 887
890 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); 888 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
891 if (instance->adapter_type == VENTURA_SERIES) { 889 if (instance->adapter_type >= VENTURA_SERIES) {
892 ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = 890 ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
893 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 891 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
894 io_info->span_arm = 892 io_info->span_arm =
@@ -1266,7 +1264,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
1266 1264
1267 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { 1265 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
1268 ld = MR_TargetIdToLdGet(ldCount, drv_map); 1266 ld = MR_TargetIdToLdGet(ldCount, drv_map);
1269 if (ld >= MAX_LOGICAL_DRIVES_EXT) { 1267 if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
1270 lbInfo[ldCount].loadBalanceFlag = 0; 1268 lbInfo[ldCount].loadBalanceFlag = 0;
1271 continue; 1269 continue;
1272 } 1270 }
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f74b5ea24f0f..211c17c33aa0 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2,7 +2,8 @@
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2013 LSI Corporation 4 * Copyright (c) 2009-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies 5 * Copyright (c) 2013-2016 Avago Technologies
6 * Copyright (c) 2016-2018 Broadcom Inc.
6 * 7 *
7 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
@@ -19,16 +20,13 @@
19 * 20 *
20 * FILE: megaraid_sas_fusion.c 21 * FILE: megaraid_sas_fusion.c
21 * 22 *
22 * Authors: Avago Technologies 23 * Authors: Broadcom Inc.
23 * Sumant Patro 24 * Sumant Patro
24 * Adam Radford 25 * Adam Radford
25 * Kashyap Desai <kashyap.desai@avagotech.com> 26 * Kashyap Desai <kashyap.desai@broadcom.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com> 27 * Sumit Saxena <sumit.saxena@broadcom.com>
27 * 28 *
28 * Send feedback to: megaraidlinux.pdl@avagotech.com 29 * Send feedback to: megaraidlinux.pdl@broadcom.com
29 *
30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31 * San Jose, California 95131
32 */ 30 */
33 31
34#include <linux/kernel.h> 32#include <linux/kernel.h>
@@ -48,6 +46,7 @@
48#include <linux/mutex.h> 46#include <linux/mutex.h>
49#include <linux/poll.h> 47#include <linux/poll.h>
50#include <linux/vmalloc.h> 48#include <linux/vmalloc.h>
49#include <linux/workqueue.h>
51 50
52#include <scsi/scsi.h> 51#include <scsi/scsi.h>
53#include <scsi/scsi_cmnd.h> 52#include <scsi/scsi_cmnd.h>
@@ -74,7 +73,7 @@ void
74megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd); 73megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
75int megasas_alloc_cmds(struct megasas_instance *instance); 74int megasas_alloc_cmds(struct megasas_instance *instance);
76int 75int
77megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs); 76megasas_clear_intr_fusion(struct megasas_instance *instance);
78int 77int
79megasas_issue_polled(struct megasas_instance *instance, 78megasas_issue_polled(struct megasas_instance *instance,
80 struct megasas_cmd *cmd); 79 struct megasas_cmd *cmd);
@@ -95,6 +94,9 @@ static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
95static void megasas_free_reply_fusion(struct megasas_instance *instance); 94static void megasas_free_reply_fusion(struct megasas_instance *instance);
96static inline 95static inline
97void megasas_configure_queue_sizes(struct megasas_instance *instance); 96void megasas_configure_queue_sizes(struct megasas_instance *instance);
97static void megasas_fusion_crash_dump(struct megasas_instance *instance);
98extern u32 megasas_readl(struct megasas_instance *instance,
99 const volatile void __iomem *addr);
98 100
99/** 101/**
100 * megasas_check_same_4gb_region - check if allocation 102 * megasas_check_same_4gb_region - check if allocation
@@ -165,9 +167,11 @@ megasas_disable_intr_fusion(struct megasas_instance *instance)
165} 167}
166 168
167int 169int
168megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs) 170megasas_clear_intr_fusion(struct megasas_instance *instance)
169{ 171{
170 u32 status; 172 u32 status;
173 struct megasas_register_set __iomem *regs;
174 regs = instance->reg_set;
171 /* 175 /*
172 * Check if it is our interrupt 176 * Check if it is our interrupt
173 */ 177 */
@@ -262,16 +266,17 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
262 266
263 reg_set = instance->reg_set; 267 reg_set = instance->reg_set;
264 268
265 /* ventura FW does not fill outbound_scratch_pad_3 with queue depth */ 269 /* ventura FW does not fill outbound_scratch_pad_2 with queue depth */
266 if (instance->adapter_type < VENTURA_SERIES) 270 if (instance->adapter_type < VENTURA_SERIES)
267 cur_max_fw_cmds = 271 cur_max_fw_cmds =
268 readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF; 272 megasas_readl(instance,
273 &instance->reg_set->outbound_scratch_pad_2) & 0x00FFFF;
269 274
270 if (dual_qdepth_disable || !cur_max_fw_cmds) 275 if (dual_qdepth_disable || !cur_max_fw_cmds)
271 cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; 276 cur_max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
272 else 277 else
273 ldio_threshold = 278 ldio_threshold =
274 (instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS; 279 (instance->instancet->read_fw_status_reg(instance) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
275 280
276 dev_info(&instance->pdev->dev, 281 dev_info(&instance->pdev->dev,
277 "Current firmware supports maximum commands: %d\t LDIO threshold: %d\n", 282 "Current firmware supports maximum commands: %d\t LDIO threshold: %d\n",
@@ -807,10 +812,8 @@ megasas_free_rdpq_fusion(struct megasas_instance *instance) {
807 812
808 } 813 }
809 814
810 if (fusion->reply_frames_desc_pool) 815 dma_pool_destroy(fusion->reply_frames_desc_pool);
811 dma_pool_destroy(fusion->reply_frames_desc_pool); 816 dma_pool_destroy(fusion->reply_frames_desc_pool_align);
812 if (fusion->reply_frames_desc_pool_align)
813 dma_pool_destroy(fusion->reply_frames_desc_pool_align);
814 817
815 if (fusion->rdpq_virt) 818 if (fusion->rdpq_virt)
816 dma_free_coherent(&instance->pdev->dev, 819 dma_free_coherent(&instance->pdev->dev,
@@ -830,8 +833,7 @@ megasas_free_reply_fusion(struct megasas_instance *instance) {
830 fusion->reply_frames_desc[0], 833 fusion->reply_frames_desc[0],
831 fusion->reply_frames_desc_phys[0]); 834 fusion->reply_frames_desc_phys[0]);
832 835
833 if (fusion->reply_frames_desc_pool) 836 dma_pool_destroy(fusion->reply_frames_desc_pool);
834 dma_pool_destroy(fusion->reply_frames_desc_pool);
835 837
836} 838}
837 839
@@ -974,7 +976,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
974 struct megasas_header *frame_hdr; 976 struct megasas_header *frame_hdr;
975 const char *sys_info; 977 const char *sys_info;
976 MFI_CAPABILITIES *drv_ops; 978 MFI_CAPABILITIES *drv_ops;
977 u32 scratch_pad_2; 979 u32 scratch_pad_1;
978 ktime_t time; 980 ktime_t time;
979 bool cur_fw_64bit_dma_capable; 981 bool cur_fw_64bit_dma_capable;
980 982
@@ -985,14 +987,14 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
985 987
986 cmd = fusion->ioc_init_cmd; 988 cmd = fusion->ioc_init_cmd;
987 989
988 scratch_pad_2 = readl 990 scratch_pad_1 = megasas_readl
989 (&instance->reg_set->outbound_scratch_pad_2); 991 (instance, &instance->reg_set->outbound_scratch_pad_1);
990 992
991 cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0; 993 cur_rdpq_mode = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
992 994
993 if (instance->adapter_type == INVADER_SERIES) { 995 if (instance->adapter_type == INVADER_SERIES) {
994 cur_fw_64bit_dma_capable = 996 cur_fw_64bit_dma_capable =
995 (scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false; 997 (scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
996 998
997 if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) { 999 if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) {
998 dev_err(&instance->pdev->dev, "Driver was operating on 64bit " 1000 dev_err(&instance->pdev->dev, "Driver was operating on 64bit "
@@ -1010,7 +1012,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
1010 goto fail_fw_init; 1012 goto fail_fw_init;
1011 } 1013 }
1012 1014
1013 instance->fw_sync_cache_support = (scratch_pad_2 & 1015 instance->fw_sync_cache_support = (scratch_pad_1 &
1014 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0; 1016 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
1015 dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n", 1017 dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
1016 instance->fw_sync_cache_support ? "Yes" : "No"); 1018 instance->fw_sync_cache_support ? "Yes" : "No");
@@ -1043,9 +1045,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
1043 1045
1044 frame_hdr = &cmd->frame->hdr; 1046 frame_hdr = &cmd->frame->hdr;
1045 frame_hdr->cmd_status = 0xFF; 1047 frame_hdr->cmd_status = 0xFF;
1046 frame_hdr->flags = cpu_to_le16( 1048 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1047 le16_to_cpu(frame_hdr->flags) |
1048 MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1049 1049
1050 init_frame->cmd = MFI_CMD_INIT; 1050 init_frame->cmd = MFI_CMD_INIT;
1051 init_frame->cmd_status = 0xFF; 1051 init_frame->cmd_status = 0xFF;
@@ -1107,7 +1107,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
1107 instance->instancet->disable_intr(instance); 1107 instance->instancet->disable_intr(instance);
1108 1108
1109 for (i = 0; i < (10 * 1000); i += 20) { 1109 for (i = 0; i < (10 * 1000); i += 20) {
1110 if (readl(&instance->reg_set->doorbell) & 1) 1110 if (megasas_readl(instance, &instance->reg_set->doorbell) & 1)
1111 msleep(20); 1111 msleep(20);
1112 else 1112 else
1113 break; 1113 break;
@@ -1115,7 +1115,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
1115 1115
1116 megasas_fire_cmd_fusion(instance, &req_desc); 1116 megasas_fire_cmd_fusion(instance, &req_desc);
1117 1117
1118 wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); 1118 wait_and_poll(instance, cmd, MFI_IO_TIMEOUT_SECS);
1119 1119
1120 frame_hdr = &cmd->frame->hdr; 1120 frame_hdr = &cmd->frame->hdr;
1121 if (frame_hdr->cmd_status != 0) { 1121 if (frame_hdr->cmd_status != 0) {
@@ -1559,14 +1559,12 @@ void megasas_configure_queue_sizes(struct megasas_instance *instance)
1559 fusion = instance->ctrl_context; 1559 fusion = instance->ctrl_context;
1560 max_cmd = instance->max_fw_cmds; 1560 max_cmd = instance->max_fw_cmds;
1561 1561
1562 if (instance->adapter_type == VENTURA_SERIES) 1562 if (instance->adapter_type >= VENTURA_SERIES)
1563 instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS; 1563 instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS;
1564 else 1564 else
1565 instance->max_mpt_cmds = instance->max_fw_cmds; 1565 instance->max_mpt_cmds = instance->max_fw_cmds;
1566 1566
1567 instance->max_scsi_cmds = instance->max_fw_cmds - 1567 instance->max_scsi_cmds = instance->max_fw_cmds - instance->max_mfi_cmds;
1568 (MEGASAS_FUSION_INTERNAL_CMDS +
1569 MEGASAS_FUSION_IOCTL_CMDS);
1570 instance->cur_can_queue = instance->max_scsi_cmds; 1568 instance->cur_can_queue = instance->max_scsi_cmds;
1571 instance->host->can_queue = instance->cur_can_queue; 1569 instance->host->can_queue = instance->cur_can_queue;
1572 1570
@@ -1627,8 +1625,7 @@ static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance)
1627 fusion->ioc_init_cmd->frame, 1625 fusion->ioc_init_cmd->frame,
1628 fusion->ioc_init_cmd->frame_phys_addr); 1626 fusion->ioc_init_cmd->frame_phys_addr);
1629 1627
1630 if (fusion->ioc_init_cmd) 1628 kfree(fusion->ioc_init_cmd);
1631 kfree(fusion->ioc_init_cmd);
1632} 1629}
1633 1630
1634/** 1631/**
@@ -1642,7 +1639,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1642{ 1639{
1643 struct megasas_register_set __iomem *reg_set; 1640 struct megasas_register_set __iomem *reg_set;
1644 struct fusion_context *fusion; 1641 struct fusion_context *fusion;
1645 u32 scratch_pad_2; 1642 u32 scratch_pad_1;
1646 int i = 0, count; 1643 int i = 0, count;
1647 1644
1648 fusion = instance->ctrl_context; 1645 fusion = instance->ctrl_context;
@@ -1659,20 +1656,21 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1659 1656
1660 megasas_configure_queue_sizes(instance); 1657 megasas_configure_queue_sizes(instance);
1661 1658
1662 scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2); 1659 scratch_pad_1 = megasas_readl(instance,
1663 /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, 1660 &instance->reg_set->outbound_scratch_pad_1);
1661 /* If scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
1664 * Firmware support extended IO chain frame which is 4 times more than 1662 * Firmware support extended IO chain frame which is 4 times more than
1665 * legacy Firmware. 1663 * legacy Firmware.
1666 * Legacy Firmware - Frame size is (8 * 128) = 1K 1664 * Legacy Firmware - Frame size is (8 * 128) = 1K
1667 * 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K 1665 * 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
1668 */ 1666 */
1669 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) 1667 if (scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
1670 instance->max_chain_frame_sz = 1668 instance->max_chain_frame_sz =
1671 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 1669 ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1672 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO; 1670 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO;
1673 else 1671 else
1674 instance->max_chain_frame_sz = 1672 instance->max_chain_frame_sz =
1675 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 1673 ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1676 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO; 1674 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO;
1677 1675
1678 if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) { 1676 if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) {
@@ -1760,6 +1758,90 @@ fail_alloc_mfi_cmds:
1760} 1758}
1761 1759
1762/** 1760/**
1761 * megasas_fault_detect_work - Worker function of
1762 * FW fault handling workqueue.
1763 */
1764static void
1765megasas_fault_detect_work(struct work_struct *work)
1766{
1767 struct megasas_instance *instance =
1768 container_of(work, struct megasas_instance,
1769 fw_fault_work.work);
1770 u32 fw_state, dma_state, status;
1771
1772 /* Check the fw state */
1773 fw_state = instance->instancet->read_fw_status_reg(instance) &
1774 MFI_STATE_MASK;
1775
1776 if (fw_state == MFI_STATE_FAULT) {
1777 dma_state = instance->instancet->read_fw_status_reg(instance) &
1778 MFI_STATE_DMADONE;
1779 /* Start collecting crash, if DMA bit is done */
1780 if (instance->crash_dump_drv_support &&
1781 instance->crash_dump_app_support && dma_state) {
1782 megasas_fusion_crash_dump(instance);
1783 } else {
1784 if (instance->unload == 0) {
1785 status = megasas_reset_fusion(instance->host, 0);
1786 if (status != SUCCESS) {
1787 dev_err(&instance->pdev->dev,
1788 "Failed from %s %d, do not re-arm timer\n",
1789 __func__, __LINE__);
1790 return;
1791 }
1792 }
1793 }
1794 }
1795
1796 if (instance->fw_fault_work_q)
1797 queue_delayed_work(instance->fw_fault_work_q,
1798 &instance->fw_fault_work,
1799 msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL));
1800}
1801
1802int
1803megasas_fusion_start_watchdog(struct megasas_instance *instance)
1804{
1805 /* Check if the Fault WQ is already started */
1806 if (instance->fw_fault_work_q)
1807 return SUCCESS;
1808
1809 INIT_DELAYED_WORK(&instance->fw_fault_work, megasas_fault_detect_work);
1810
1811 snprintf(instance->fault_handler_work_q_name,
1812 sizeof(instance->fault_handler_work_q_name),
1813 "poll_megasas%d_status", instance->host->host_no);
1814
1815 instance->fw_fault_work_q =
1816 create_singlethread_workqueue(instance->fault_handler_work_q_name);
1817 if (!instance->fw_fault_work_q) {
1818 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1819 __func__, __LINE__);
1820 return FAILED;
1821 }
1822
1823 queue_delayed_work(instance->fw_fault_work_q,
1824 &instance->fw_fault_work,
1825 msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL));
1826
1827 return SUCCESS;
1828}
1829
1830void
1831megasas_fusion_stop_watchdog(struct megasas_instance *instance)
1832{
1833 struct workqueue_struct *wq;
1834
1835 if (instance->fw_fault_work_q) {
1836 wq = instance->fw_fault_work_q;
1837 instance->fw_fault_work_q = NULL;
1838 if (!cancel_delayed_work_sync(&instance->fw_fault_work))
1839 flush_workqueue(wq);
1840 destroy_workqueue(wq);
1841 }
1842}
1843
1844/**
1763 * map_cmd_status - Maps FW cmd status to OS cmd status 1845 * map_cmd_status - Maps FW cmd status to OS cmd status
1764 * @cmd : Pointer to cmd 1846 * @cmd : Pointer to cmd
1765 * @status : status of cmd returned by FW 1847 * @status : status of cmd returned by FW
@@ -2543,19 +2625,22 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2543 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 2625 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
2544 u8 *raidLUN; 2626 u8 *raidLUN;
2545 unsigned long spinlock_flags; 2627 unsigned long spinlock_flags;
2546 union RAID_CONTEXT_UNION *praid_context;
2547 struct MR_LD_RAID *raid = NULL; 2628 struct MR_LD_RAID *raid = NULL;
2548 struct MR_PRIV_DEVICE *mrdev_priv; 2629 struct MR_PRIV_DEVICE *mrdev_priv;
2630 struct RAID_CONTEXT *rctx;
2631 struct RAID_CONTEXT_G35 *rctx_g35;
2549 2632
2550 device_id = MEGASAS_DEV_INDEX(scp); 2633 device_id = MEGASAS_DEV_INDEX(scp);
2551 2634
2552 fusion = instance->ctrl_context; 2635 fusion = instance->ctrl_context;
2553 2636
2554 io_request = cmd->io_request; 2637 io_request = cmd->io_request;
2555 io_request->RaidContext.raid_context.virtual_disk_tgt_id = 2638 rctx = &io_request->RaidContext.raid_context;
2556 cpu_to_le16(device_id); 2639 rctx_g35 = &io_request->RaidContext.raid_context_g35;
2557 io_request->RaidContext.raid_context.status = 0; 2640
2558 io_request->RaidContext.raid_context.ex_status = 0; 2641 rctx->virtual_disk_tgt_id = cpu_to_le16(device_id);
2642 rctx->status = 0;
2643 rctx->ex_status = 0;
2559 2644
2560 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; 2645 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
2561 2646
@@ -2631,11 +2716,10 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2631 raid = MR_LdRaidGet(ld, local_map_ptr); 2716 raid = MR_LdRaidGet(ld, local_map_ptr);
2632 2717
2633 if (!raid || (!fusion->fast_path_io)) { 2718 if (!raid || (!fusion->fast_path_io)) {
2634 io_request->RaidContext.raid_context.reg_lock_flags = 0; 2719 rctx->reg_lock_flags = 0;
2635 fp_possible = false; 2720 fp_possible = false;
2636 } else { 2721 } else {
2637 if (MR_BuildRaidContext(instance, &io_info, 2722 if (MR_BuildRaidContext(instance, &io_info, rctx,
2638 &io_request->RaidContext.raid_context,
2639 local_map_ptr, &raidLUN)) 2723 local_map_ptr, &raidLUN))
2640 fp_possible = (io_info.fpOkForIo > 0) ? true : false; 2724 fp_possible = (io_info.fpOkForIo > 0) ? true : false;
2641 } 2725 }
@@ -2643,9 +2727,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2643 cmd->request_desc->SCSIIO.MSIxIndex = 2727 cmd->request_desc->SCSIIO.MSIxIndex =
2644 instance->reply_map[raw_smp_processor_id()]; 2728 instance->reply_map[raw_smp_processor_id()];
2645 2729
2646 praid_context = &io_request->RaidContext; 2730 if (instance->adapter_type >= VENTURA_SERIES) {
2647
2648 if (instance->adapter_type == VENTURA_SERIES) {
2649 /* FP for Optimal raid level 1. 2731 /* FP for Optimal raid level 1.
2650 * All large RAID-1 writes (> 32 KiB, both WT and WB modes) 2732 * All large RAID-1 writes (> 32 KiB, both WT and WB modes)
2651 * are built by the driver as LD I/Os. 2733 * are built by the driver as LD I/Os.
@@ -2681,17 +2763,17 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2681 /* In ventura if stream detected for a read and it is 2763 /* In ventura if stream detected for a read and it is
2682 * read ahead capable make this IO as LDIO 2764 * read ahead capable make this IO as LDIO
2683 */ 2765 */
2684 if (is_stream_detected(&io_request->RaidContext.raid_context_g35)) 2766 if (is_stream_detected(rctx_g35))
2685 fp_possible = false; 2767 fp_possible = false;
2686 } 2768 }
2687 2769
2688 /* If raid is NULL, set CPU affinity to default CPU0 */ 2770 /* If raid is NULL, set CPU affinity to default CPU0 */
2689 if (raid) 2771 if (raid)
2690 megasas_set_raidflag_cpu_affinity(praid_context, 2772 megasas_set_raidflag_cpu_affinity(&io_request->RaidContext,
2691 raid, fp_possible, io_info.isRead, 2773 raid, fp_possible, io_info.isRead,
2692 scsi_buff_len); 2774 scsi_buff_len);
2693 else 2775 else
2694 praid_context->raid_context_g35.routing_flags |= 2776 rctx_g35->routing_flags |=
2695 (MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT); 2777 (MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
2696 } 2778 }
2697 2779
@@ -2703,25 +2785,20 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2703 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO 2785 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO
2704 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2786 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2705 if (instance->adapter_type == INVADER_SERIES) { 2787 if (instance->adapter_type == INVADER_SERIES) {
2706 if (io_request->RaidContext.raid_context.reg_lock_flags == 2788 if (rctx->reg_lock_flags == REGION_TYPE_UNUSED)
2707 REGION_TYPE_UNUSED)
2708 cmd->request_desc->SCSIIO.RequestFlags = 2789 cmd->request_desc->SCSIIO.RequestFlags =
2709 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 2790 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
2710 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2791 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2711 io_request->RaidContext.raid_context.type 2792 rctx->type = MPI2_TYPE_CUDA;
2712 = MPI2_TYPE_CUDA; 2793 rctx->nseg = 0x1;
2713 io_request->RaidContext.raid_context.nseg = 0x1;
2714 io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 2794 io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
2715 io_request->RaidContext.raid_context.reg_lock_flags |= 2795 rctx->reg_lock_flags |=
2716 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | 2796 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
2717 MR_RL_FLAGS_SEQ_NUM_ENABLE); 2797 MR_RL_FLAGS_SEQ_NUM_ENABLE);
2718 } else if (instance->adapter_type == VENTURA_SERIES) { 2798 } else if (instance->adapter_type >= VENTURA_SERIES) {
2719 io_request->RaidContext.raid_context_g35.nseg_type |= 2799 rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT);
2720 (1 << RAID_CONTEXT_NSEG_SHIFT); 2800 rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
2721 io_request->RaidContext.raid_context_g35.nseg_type |= 2801 rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
2722 (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
2723 io_request->RaidContext.raid_context_g35.routing_flags |=
2724 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
2725 io_request->IoFlags |= 2802 io_request->IoFlags |=
2726 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 2803 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
2727 } 2804 }
@@ -2734,17 +2811,15 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2734 &io_info, local_map_ptr); 2811 &io_info, local_map_ptr);
2735 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; 2812 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
2736 cmd->pd_r1_lb = io_info.pd_after_lb; 2813 cmd->pd_r1_lb = io_info.pd_after_lb;
2737 if (instance->adapter_type == VENTURA_SERIES) 2814 if (instance->adapter_type >= VENTURA_SERIES)
2738 io_request->RaidContext.raid_context_g35.span_arm 2815 rctx_g35->span_arm = io_info.span_arm;
2739 = io_info.span_arm;
2740 else 2816 else
2741 io_request->RaidContext.raid_context.span_arm 2817 rctx->span_arm = io_info.span_arm;
2742 = io_info.span_arm;
2743 2818
2744 } else 2819 } else
2745 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 2820 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
2746 2821
2747 if (instance->adapter_type == VENTURA_SERIES) 2822 if (instance->adapter_type >= VENTURA_SERIES)
2748 cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle; 2823 cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
2749 else 2824 else
2750 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 2825 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
@@ -2762,31 +2837,26 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2762 /* populate the LUN field */ 2837 /* populate the LUN field */
2763 memcpy(io_request->LUN, raidLUN, 8); 2838 memcpy(io_request->LUN, raidLUN, 8);
2764 } else { 2839 } else {
2765 io_request->RaidContext.raid_context.timeout_value = 2840 rctx->timeout_value =
2766 cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); 2841 cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
2767 cmd->request_desc->SCSIIO.RequestFlags = 2842 cmd->request_desc->SCSIIO.RequestFlags =
2768 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 2843 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
2769 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2844 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2770 if (instance->adapter_type == INVADER_SERIES) { 2845 if (instance->adapter_type == INVADER_SERIES) {
2771 if (io_info.do_fp_rlbypass || 2846 if (io_info.do_fp_rlbypass ||
2772 (io_request->RaidContext.raid_context.reg_lock_flags 2847 (rctx->reg_lock_flags == REGION_TYPE_UNUSED))
2773 == REGION_TYPE_UNUSED))
2774 cmd->request_desc->SCSIIO.RequestFlags = 2848 cmd->request_desc->SCSIIO.RequestFlags =
2775 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 2849 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
2776 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2850 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2777 io_request->RaidContext.raid_context.type 2851 rctx->type = MPI2_TYPE_CUDA;
2778 = MPI2_TYPE_CUDA; 2852 rctx->reg_lock_flags |=
2779 io_request->RaidContext.raid_context.reg_lock_flags |=
2780 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | 2853 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
2781 MR_RL_FLAGS_SEQ_NUM_ENABLE); 2854 MR_RL_FLAGS_SEQ_NUM_ENABLE);
2782 io_request->RaidContext.raid_context.nseg = 0x1; 2855 rctx->nseg = 0x1;
2783 } else if (instance->adapter_type == VENTURA_SERIES) { 2856 } else if (instance->adapter_type >= VENTURA_SERIES) {
2784 io_request->RaidContext.raid_context_g35.routing_flags |= 2857 rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
2785 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); 2858 rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT);
2786 io_request->RaidContext.raid_context_g35.nseg_type |= 2859 rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
2787 (1 << RAID_CONTEXT_NSEG_SHIFT);
2788 io_request->RaidContext.raid_context_g35.nseg_type |=
2789 (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
2790 } 2860 }
2791 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 2861 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
2792 io_request->DevHandle = cpu_to_le16(device_id); 2862 io_request->DevHandle = cpu_to_le16(device_id);
@@ -2832,7 +2902,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
2832 device_id < instance->fw_supported_vd_count)) { 2902 device_id < instance->fw_supported_vd_count)) {
2833 2903
2834 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 2904 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
2835 if (ld >= instance->fw_supported_vd_count) 2905 if (ld >= instance->fw_supported_vd_count - 1)
2836 fp_possible = 0; 2906 fp_possible = 0;
2837 else { 2907 else {
2838 raid = MR_LdRaidGet(ld, local_map_ptr); 2908 raid = MR_LdRaidGet(ld, local_map_ptr);
@@ -2855,7 +2925,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
2855 2925
2856 /* set RAID context values */ 2926 /* set RAID context values */
2857 pRAID_Context->config_seq_num = raid->seqNum; 2927 pRAID_Context->config_seq_num = raid->seqNum;
2858 if (instance->adapter_type != VENTURA_SERIES) 2928 if (instance->adapter_type < VENTURA_SERIES)
2859 pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ; 2929 pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
2860 pRAID_Context->timeout_value = 2930 pRAID_Context->timeout_value =
2861 cpu_to_le16(raid->fpIoTimeoutForLd); 2931 cpu_to_le16(raid->fpIoTimeoutForLd);
@@ -2940,7 +3010,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
2940 cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1)); 3010 cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
2941 pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum; 3011 pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
2942 io_request->DevHandle = pd_sync->seq[pd_index].devHandle; 3012 io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
2943 if (instance->adapter_type == VENTURA_SERIES) { 3013 if (instance->adapter_type >= VENTURA_SERIES) {
2944 io_request->RaidContext.raid_context_g35.routing_flags |= 3014 io_request->RaidContext.raid_context_g35.routing_flags |=
2945 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); 3015 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
2946 io_request->RaidContext.raid_context_g35.nseg_type |= 3016 io_request->RaidContext.raid_context_g35.nseg_type |=
@@ -3073,7 +3143,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
3073 return 1; 3143 return 1;
3074 } 3144 }
3075 3145
3076 if (instance->adapter_type == VENTURA_SERIES) { 3146 if (instance->adapter_type >= VENTURA_SERIES) {
3077 set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count); 3147 set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
3078 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags); 3148 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
3079 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type); 3149 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
@@ -3385,7 +3455,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
3385 atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]); 3455 atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
3386 cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 3456 cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
3387 } 3457 }
3388 //Fall thru and complete IO 3458 /* Fall through - and complete IO */
3389 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ 3459 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
3390 atomic_dec(&instance->fw_outstanding); 3460 atomic_dec(&instance->fw_outstanding);
3391 if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { 3461 if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
@@ -3501,18 +3571,13 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
3501{ 3571{
3502 struct megasas_instance *instance = 3572 struct megasas_instance *instance =
3503 (struct megasas_instance *)instance_addr; 3573 (struct megasas_instance *)instance_addr;
3504 unsigned long flags;
3505 u32 count, MSIxIndex; 3574 u32 count, MSIxIndex;
3506 3575
3507 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 3576 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
3508 3577
3509 /* If we have already declared adapter dead, donot complete cmds */ 3578 /* If we have already declared adapter dead, donot complete cmds */
3510 spin_lock_irqsave(&instance->hba_lock, flags); 3579 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
3511 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
3512 spin_unlock_irqrestore(&instance->hba_lock, flags);
3513 return; 3580 return;
3514 }
3515 spin_unlock_irqrestore(&instance->hba_lock, flags);
3516 3581
3517 for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) 3582 for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
3518 complete_cmd_fusion(instance, MSIxIndex); 3583 complete_cmd_fusion(instance, MSIxIndex);
@@ -3525,48 +3590,24 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
3525{ 3590{
3526 struct megasas_irq_context *irq_context = devp; 3591 struct megasas_irq_context *irq_context = devp;
3527 struct megasas_instance *instance = irq_context->instance; 3592 struct megasas_instance *instance = irq_context->instance;
3528 u32 mfiStatus, fw_state, dma_state; 3593 u32 mfiStatus;
3529 3594
3530 if (instance->mask_interrupts) 3595 if (instance->mask_interrupts)
3531 return IRQ_NONE; 3596 return IRQ_NONE;
3532 3597
3533 if (!instance->msix_vectors) { 3598 if (!instance->msix_vectors) {
3534 mfiStatus = instance->instancet->clear_intr(instance->reg_set); 3599 mfiStatus = instance->instancet->clear_intr(instance);
3535 if (!mfiStatus) 3600 if (!mfiStatus)
3536 return IRQ_NONE; 3601 return IRQ_NONE;
3537 } 3602 }
3538 3603
3539 /* If we are resetting, bail */ 3604 /* If we are resetting, bail */
3540 if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) { 3605 if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
3541 instance->instancet->clear_intr(instance->reg_set); 3606 instance->instancet->clear_intr(instance);
3542 return IRQ_HANDLED; 3607 return IRQ_HANDLED;
3543 } 3608 }
3544 3609
3545 if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) { 3610 return complete_cmd_fusion(instance, irq_context->MSIxIndex);
3546 instance->instancet->clear_intr(instance->reg_set);
3547 /* If we didn't complete any commands, check for FW fault */
3548 fw_state = instance->instancet->read_fw_status_reg(
3549 instance->reg_set) & MFI_STATE_MASK;
3550 dma_state = instance->instancet->read_fw_status_reg
3551 (instance->reg_set) & MFI_STATE_DMADONE;
3552 if (instance->crash_dump_drv_support &&
3553 instance->crash_dump_app_support) {
3554 /* Start collecting crash, if DMA bit is done */
3555 if ((fw_state == MFI_STATE_FAULT) && dma_state)
3556 schedule_work(&instance->crash_init);
3557 else if (fw_state == MFI_STATE_FAULT) {
3558 if (instance->unload == 0)
3559 schedule_work(&instance->work_init);
3560 }
3561 } else if (fw_state == MFI_STATE_FAULT) {
3562 dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
3563 "for scsi%d\n", instance->host->host_no);
3564 if (instance->unload == 0)
3565 schedule_work(&instance->work_init);
3566 }
3567 }
3568
3569 return IRQ_HANDLED;
3570} 3611}
3571 3612
3572/** 3613/**
@@ -3692,9 +3733,9 @@ megasas_release_fusion(struct megasas_instance *instance)
3692 * @regs: MFI register set 3733 * @regs: MFI register set
3693 */ 3734 */
3694static u32 3735static u32
3695megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs) 3736megasas_read_fw_status_reg_fusion(struct megasas_instance *instance)
3696{ 3737{
3697 return readl(&(regs)->outbound_scratch_pad); 3738 return megasas_readl(instance, &instance->reg_set->outbound_scratch_pad_0);
3698} 3739}
3699 3740
3700/** 3741/**
@@ -3756,11 +3797,12 @@ megasas_adp_reset_fusion(struct megasas_instance *instance,
3756 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset); 3797 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3757 3798
3758 /* Check that the diag write enable (DRWE) bit is on */ 3799 /* Check that the diag write enable (DRWE) bit is on */
3759 host_diag = readl(&instance->reg_set->fusion_host_diag); 3800 host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag);
3760 retry = 0; 3801 retry = 0;
3761 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 3802 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
3762 msleep(100); 3803 msleep(100);
3763 host_diag = readl(&instance->reg_set->fusion_host_diag); 3804 host_diag = megasas_readl(instance,
3805 &instance->reg_set->fusion_host_diag);
3764 if (retry++ == 100) { 3806 if (retry++ == 100) {
3765 dev_warn(&instance->pdev->dev, 3807 dev_warn(&instance->pdev->dev,
3766 "Host diag unlock failed from %s %d\n", 3808 "Host diag unlock failed from %s %d\n",
@@ -3777,11 +3819,12 @@ megasas_adp_reset_fusion(struct megasas_instance *instance,
3777 msleep(3000); 3819 msleep(3000);
3778 3820
3779 /* Make sure reset adapter bit is cleared */ 3821 /* Make sure reset adapter bit is cleared */
3780 host_diag = readl(&instance->reg_set->fusion_host_diag); 3822 host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag);
3781 retry = 0; 3823 retry = 0;
3782 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 3824 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
3783 msleep(100); 3825 msleep(100);
3784 host_diag = readl(&instance->reg_set->fusion_host_diag); 3826 host_diag = megasas_readl(instance,
3827 &instance->reg_set->fusion_host_diag);
3785 if (retry++ == 1000) { 3828 if (retry++ == 1000) {
3786 dev_warn(&instance->pdev->dev, 3829 dev_warn(&instance->pdev->dev,
3787 "Diag reset adapter never cleared %s %d\n", 3830 "Diag reset adapter never cleared %s %d\n",
@@ -3792,14 +3835,14 @@ megasas_adp_reset_fusion(struct megasas_instance *instance,
3792 if (host_diag & HOST_DIAG_RESET_ADAPTER) 3835 if (host_diag & HOST_DIAG_RESET_ADAPTER)
3793 return -1; 3836 return -1;
3794 3837
3795 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set) 3838 abs_state = instance->instancet->read_fw_status_reg(instance)
3796 & MFI_STATE_MASK; 3839 & MFI_STATE_MASK;
3797 retry = 0; 3840 retry = 0;
3798 3841
3799 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { 3842 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3800 msleep(100); 3843 msleep(100);
3801 abs_state = instance->instancet-> 3844 abs_state = instance->instancet->
3802 read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; 3845 read_fw_status_reg(instance) & MFI_STATE_MASK;
3803 } 3846 }
3804 if (abs_state <= MFI_STATE_FW_INIT) { 3847 if (abs_state <= MFI_STATE_FW_INIT) {
3805 dev_warn(&instance->pdev->dev, 3848 dev_warn(&instance->pdev->dev,
@@ -3822,17 +3865,60 @@ megasas_check_reset_fusion(struct megasas_instance *instance,
3822 return 0; 3865 return 0;
3823} 3866}
3824 3867
3868/**
3869 * megasas_trigger_snap_dump - Trigger snap dump in FW
3870 * @instance: Soft instance of adapter
3871 */
3872static inline void megasas_trigger_snap_dump(struct megasas_instance *instance)
3873{
3874 int j;
3875 u32 fw_state;
3876
3877 if (!instance->disableOnlineCtrlReset) {
3878 dev_info(&instance->pdev->dev, "Trigger snap dump\n");
3879 writel(MFI_ADP_TRIGGER_SNAP_DUMP,
3880 &instance->reg_set->doorbell);
3881 readl(&instance->reg_set->doorbell);
3882 }
3883
3884 for (j = 0; j < instance->snapdump_wait_time; j++) {
3885 fw_state = instance->instancet->read_fw_status_reg(instance) &
3886 MFI_STATE_MASK;
3887 if (fw_state == MFI_STATE_FAULT) {
3888 dev_err(&instance->pdev->dev,
3889 "Found FW in FAULT state, after snap dump trigger\n");
3890 return;
3891 }
3892 msleep(1000);
3893 }
3894}
3895
3825/* This function waits for outstanding commands on fusion to complete */ 3896/* This function waits for outstanding commands on fusion to complete */
3826int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, 3897int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
3827 int reason, int *convert) 3898 int reason, int *convert)
3828{ 3899{
3829 int i, outstanding, retval = 0, hb_seconds_missed = 0; 3900 int i, outstanding, retval = 0, hb_seconds_missed = 0;
3830 u32 fw_state; 3901 u32 fw_state;
3902 u32 waittime_for_io_completion;
3903
3904 waittime_for_io_completion =
3905 min_t(u32, resetwaittime,
3906 (resetwaittime - instance->snapdump_wait_time));
3831 3907
3832 for (i = 0; i < resetwaittime; i++) { 3908 if (reason == MFI_IO_TIMEOUT_OCR) {
3909 dev_info(&instance->pdev->dev,
3910 "MFI command is timed out\n");
3911 megasas_complete_cmd_dpc_fusion((unsigned long)instance);
3912 if (instance->snapdump_wait_time)
3913 megasas_trigger_snap_dump(instance);
3914 retval = 1;
3915 goto out;
3916 }
3917
3918 for (i = 0; i < waittime_for_io_completion; i++) {
3833 /* Check if firmware is in fault state */ 3919 /* Check if firmware is in fault state */
3834 fw_state = instance->instancet->read_fw_status_reg( 3920 fw_state = instance->instancet->read_fw_status_reg(instance) &
3835 instance->reg_set) & MFI_STATE_MASK; 3921 MFI_STATE_MASK;
3836 if (fw_state == MFI_STATE_FAULT) { 3922 if (fw_state == MFI_STATE_FAULT) {
3837 dev_warn(&instance->pdev->dev, "Found FW in FAULT state," 3923 dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
3838 " will reset adapter scsi%d.\n", 3924 " will reset adapter scsi%d.\n",
@@ -3850,13 +3936,6 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
3850 goto out; 3936 goto out;
3851 } 3937 }
3852 3938
3853 if (reason == MFI_IO_TIMEOUT_OCR) {
3854 dev_info(&instance->pdev->dev,
3855 "MFI IO is timed out, initiating OCR\n");
3856 megasas_complete_cmd_dpc_fusion((unsigned long)instance);
3857 retval = 1;
3858 goto out;
3859 }
3860 3939
3861 /* If SR-IOV VF mode & heartbeat timeout, don't wait */ 3940 /* If SR-IOV VF mode & heartbeat timeout, don't wait */
3862 if (instance->requestorId && !reason) { 3941 if (instance->requestorId && !reason) {
@@ -3901,6 +3980,12 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
3901 msleep(1000); 3980 msleep(1000);
3902 } 3981 }
3903 3982
3983 if (instance->snapdump_wait_time) {
3984 megasas_trigger_snap_dump(instance);
3985 retval = 1;
3986 goto out;
3987 }
3988
3904 if (atomic_read(&instance->fw_outstanding)) { 3989 if (atomic_read(&instance->fw_outstanding)) {
3905 dev_err(&instance->pdev->dev, "pending commands remain after waiting, " 3990 dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
3906 "will reset adapter scsi%d.\n", 3991 "will reset adapter scsi%d.\n",
@@ -3908,6 +3993,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
3908 *convert = 1; 3993 *convert = 1;
3909 retval = 1; 3994 retval = 1;
3910 } 3995 }
3996
3911out: 3997out:
3912 return retval; 3998 return retval;
3913} 3999}
@@ -4518,7 +4604,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
4518 mutex_unlock(&instance->reset_mutex); 4604 mutex_unlock(&instance->reset_mutex);
4519 return FAILED; 4605 return FAILED;
4520 } 4606 }
4521 status_reg = instance->instancet->read_fw_status_reg(instance->reg_set); 4607 status_reg = instance->instancet->read_fw_status_reg(instance);
4522 abs_state = status_reg & MFI_STATE_MASK; 4608 abs_state = status_reg & MFI_STATE_MASK;
4523 4609
4524 /* IO timeout detected, forcibly put FW in FAULT state */ 4610 /* IO timeout detected, forcibly put FW in FAULT state */
@@ -4527,7 +4613,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
4527 dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, " 4613 dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, "
4528 "forcibly FAULT Firmware\n"); 4614 "forcibly FAULT Firmware\n");
4529 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT); 4615 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
4530 status_reg = readl(&instance->reg_set->doorbell); 4616 status_reg = megasas_readl(instance, &instance->reg_set->doorbell);
4531 writel(status_reg | MFI_STATE_FORCE_OCR, 4617 writel(status_reg | MFI_STATE_FORCE_OCR,
4532 &instance->reg_set->doorbell); 4618 &instance->reg_set->doorbell);
4533 readl(&instance->reg_set->doorbell); 4619 readl(&instance->reg_set->doorbell);
@@ -4578,7 +4664,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
4578 for (i = 0 ; i < instance->max_scsi_cmds; i++) { 4664 for (i = 0 ; i < instance->max_scsi_cmds; i++) {
4579 cmd_fusion = fusion->cmd_list[i]; 4665 cmd_fusion = fusion->cmd_list[i];
4580 /*check for extra commands issued by driver*/ 4666 /*check for extra commands issued by driver*/
4581 if (instance->adapter_type == VENTURA_SERIES) { 4667 if (instance->adapter_type >= VENTURA_SERIES) {
4582 r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds]; 4668 r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
4583 megasas_return_cmd_fusion(instance, r1_cmd); 4669 megasas_return_cmd_fusion(instance, r1_cmd);
4584 } 4670 }
@@ -4605,8 +4691,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
4605 4691
4606 atomic_set(&instance->fw_outstanding, 0); 4692 atomic_set(&instance->fw_outstanding, 0);
4607 4693
4608 status_reg = instance->instancet->read_fw_status_reg( 4694 status_reg = instance->instancet->read_fw_status_reg(instance);
4609 instance->reg_set);
4610 abs_state = status_reg & MFI_STATE_MASK; 4695 abs_state = status_reg & MFI_STATE_MASK;
4611 reset_adapter = status_reg & MFI_RESET_ADAPTER; 4696 reset_adapter = status_reg & MFI_RESET_ADAPTER;
4612 if (instance->disableOnlineCtrlReset || 4697 if (instance->disableOnlineCtrlReset ||
@@ -4677,7 +4762,7 @@ transition_to_ready:
4677 megasas_setup_jbod_map(instance); 4762 megasas_setup_jbod_map(instance);
4678 4763
4679 /* reset stream detection array */ 4764 /* reset stream detection array */
4680 if (instance->adapter_type == VENTURA_SERIES) { 4765 if (instance->adapter_type >= VENTURA_SERIES) {
4681 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { 4766 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
4682 memset(fusion->stream_detect_by_ld[j], 4767 memset(fusion->stream_detect_by_ld[j],
4683 0, sizeof(struct LD_STREAM_DETECT)); 4768 0, sizeof(struct LD_STREAM_DETECT));
@@ -4721,6 +4806,13 @@ transition_to_ready:
4721 megasas_set_crash_dump_params(instance, 4806 megasas_set_crash_dump_params(instance,
4722 MR_CRASH_BUF_TURN_OFF); 4807 MR_CRASH_BUF_TURN_OFF);
4723 4808
4809 if (instance->snapdump_wait_time) {
4810 megasas_get_snapdump_properties(instance);
4811 dev_info(&instance->pdev->dev,
4812 "Snap dump wait time\t: %d\n",
4813 instance->snapdump_wait_time);
4814 }
4815
4724 retval = SUCCESS; 4816 retval = SUCCESS;
4725 4817
4726 /* Adapter reset completed successfully */ 4818 /* Adapter reset completed successfully */
@@ -4752,16 +4844,15 @@ out:
4752 return retval; 4844 return retval;
4753} 4845}
4754 4846
4755/* Fusion Crash dump collection work queue */ 4847/* Fusion Crash dump collection */
4756void megasas_fusion_crash_dump_wq(struct work_struct *work) 4848void megasas_fusion_crash_dump(struct megasas_instance *instance)
4757{ 4849{
4758 struct megasas_instance *instance =
4759 container_of(work, struct megasas_instance, crash_init);
4760 u32 status_reg; 4850 u32 status_reg;
4761 u8 partial_copy = 0; 4851 u8 partial_copy = 0;
4852 int wait = 0;
4762 4853
4763 4854
4764 status_reg = instance->instancet->read_fw_status_reg(instance->reg_set); 4855 status_reg = instance->instancet->read_fw_status_reg(instance);
4765 4856
4766 /* 4857 /*
4767 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer 4858 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
@@ -4777,8 +4868,8 @@ void megasas_fusion_crash_dump_wq(struct work_struct *work)
4777 "crash dump and initiating OCR\n"); 4868 "crash dump and initiating OCR\n");
4778 status_reg |= MFI_STATE_CRASH_DUMP_DONE; 4869 status_reg |= MFI_STATE_CRASH_DUMP_DONE;
4779 writel(status_reg, 4870 writel(status_reg,
4780 &instance->reg_set->outbound_scratch_pad); 4871 &instance->reg_set->outbound_scratch_pad_0);
4781 readl(&instance->reg_set->outbound_scratch_pad); 4872 readl(&instance->reg_set->outbound_scratch_pad_0);
4782 return; 4873 return;
4783 } 4874 }
4784 megasas_alloc_host_crash_buffer(instance); 4875 megasas_alloc_host_crash_buffer(instance);
@@ -4786,21 +4877,41 @@ void megasas_fusion_crash_dump_wq(struct work_struct *work)
4786 "allocated: %d\n", instance->drv_buf_alloc); 4877 "allocated: %d\n", instance->drv_buf_alloc);
4787 } 4878 }
4788 4879
4789 /* 4880 while (!(status_reg & MFI_STATE_CRASH_DUMP_DONE) &&
4790 * Driver has allocated max buffers, which can be allocated 4881 (wait < MEGASAS_WATCHDOG_WAIT_COUNT)) {
4791 * and FW has more crash dump data, then driver will 4882 if (!(status_reg & MFI_STATE_DMADONE)) {
4792 * ignore the data. 4883 /*
4793 */ 4884 * Next crash dump buffer is not yet DMA'd by FW
4794 if (instance->drv_buf_index >= (instance->drv_buf_alloc)) { 4885 * Check after 10ms. Wait for 1 second for FW to
4795 dev_info(&instance->pdev->dev, "Driver is done copying " 4886 * post the next buffer. If not bail out.
4796 "the buffer: %d\n", instance->drv_buf_alloc); 4887 */
4797 status_reg |= MFI_STATE_CRASH_DUMP_DONE; 4888 wait++;
4798 partial_copy = 1; 4889 msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS);
4799 } else { 4890 status_reg = instance->instancet->read_fw_status_reg(
4800 memcpy(instance->crash_buf[instance->drv_buf_index], 4891 instance);
4801 instance->crash_dump_buf, CRASH_DMA_BUF_SIZE); 4892 continue;
4802 instance->drv_buf_index++; 4893 }
4803 status_reg &= ~MFI_STATE_DMADONE; 4894
4895 wait = 0;
4896 if (instance->drv_buf_index >= instance->drv_buf_alloc) {
4897 dev_info(&instance->pdev->dev,
4898 "Driver is done copying the buffer: %d\n",
4899 instance->drv_buf_alloc);
4900 status_reg |= MFI_STATE_CRASH_DUMP_DONE;
4901 partial_copy = 1;
4902 break;
4903 } else {
4904 memcpy(instance->crash_buf[instance->drv_buf_index],
4905 instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
4906 instance->drv_buf_index++;
4907 status_reg &= ~MFI_STATE_DMADONE;
4908 }
4909
4910 writel(status_reg, &instance->reg_set->outbound_scratch_pad_0);
4911 readl(&instance->reg_set->outbound_scratch_pad_0);
4912
4913 msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS);
4914 status_reg = instance->instancet->read_fw_status_reg(instance);
4804 } 4915 }
4805 4916
4806 if (status_reg & MFI_STATE_CRASH_DUMP_DONE) { 4917 if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
@@ -4809,13 +4920,10 @@ void megasas_fusion_crash_dump_wq(struct work_struct *work)
4809 instance->fw_crash_buffer_size = instance->drv_buf_index; 4920 instance->fw_crash_buffer_size = instance->drv_buf_index;
4810 instance->fw_crash_state = AVAILABLE; 4921 instance->fw_crash_state = AVAILABLE;
4811 instance->drv_buf_index = 0; 4922 instance->drv_buf_index = 0;
4812 writel(status_reg, &instance->reg_set->outbound_scratch_pad); 4923 writel(status_reg, &instance->reg_set->outbound_scratch_pad_0);
4813 readl(&instance->reg_set->outbound_scratch_pad); 4924 readl(&instance->reg_set->outbound_scratch_pad_0);
4814 if (!partial_copy) 4925 if (!partial_copy)
4815 megasas_reset_fusion(instance->host, 0); 4926 megasas_reset_fusion(instance->host, 0);
4816 } else {
4817 writel(status_reg, &instance->reg_set->outbound_scratch_pad);
4818 readl(&instance->reg_set->outbound_scratch_pad);
4819 } 4927 }
4820} 4928}
4821 4929
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 8e5ebee6517f..ca73c50fe723 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -2,7 +2,8 @@
2 * Linux MegaRAID driver for SAS based RAID controllers 2 * Linux MegaRAID driver for SAS based RAID controllers
3 * 3 *
4 * Copyright (c) 2009-2013 LSI Corporation 4 * Copyright (c) 2009-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies 5 * Copyright (c) 2013-2016 Avago Technologies
6 * Copyright (c) 2016-2018 Broadcom Inc.
6 * 7 *
7 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
@@ -19,16 +20,13 @@
19 * 20 *
20 * FILE: megaraid_sas_fusion.h 21 * FILE: megaraid_sas_fusion.h
21 * 22 *
22 * Authors: Avago Technologies 23 * Authors: Broadcom Inc.
23 * Manoj Jose 24 * Manoj Jose
24 * Sumant Patro 25 * Sumant Patro
25 * Kashyap Desai <kashyap.desai@avagotech.com> 26 * Kashyap Desai <kashyap.desai@broadcom.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com> 27 * Sumit Saxena <sumit.saxena@broadcom.com>
27 * 28 *
28 * Send feedback to: megaraidlinux.pdl@avagotech.com 29 * Send feedback to: megaraidlinux.pdl@broadcom.com
29 *
30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31 * San Jose, California 95131
32 */ 30 */
33 31
34#ifndef _MEGARAID_SAS_FUSION_H_ 32#ifndef _MEGARAID_SAS_FUSION_H_
@@ -725,6 +723,7 @@ struct MPI2_IOC_INIT_REQUEST {
725#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/ 723#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
726#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200 724#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
727#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200 725#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
726#define MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 0x01200100
728 727
729struct MR_DEV_HANDLE_INFO { 728struct MR_DEV_HANDLE_INFO {
730 __le16 curDevHdl; 729 __le16 curDevHdl;
@@ -1063,6 +1062,9 @@ struct MR_FW_RAID_MAP_DYNAMIC {
1063#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08) 1062#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08)
1064#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10) 1063#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10)
1065 1064
1065#define MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME 15
1066#define MEGASAS_MAX_SNAP_DUMP_WAIT_TIME 60
1067
1066struct megasas_register_set; 1068struct megasas_register_set;
1067struct megasas_instance; 1069struct megasas_instance;
1068 1070
@@ -1350,6 +1352,14 @@ enum CMD_RET_VALUES {
1350 RETURN_CMD = 3, 1352 RETURN_CMD = 3,
1351}; 1353};
1352 1354
1355struct MR_SNAPDUMP_PROPERTIES {
1356 u8 offload_num;
1357 u8 max_num_supported;
1358 u8 cur_num_supported;
1359 u8 trigger_min_num_sec_before_ocr;
1360 u8 reserved[12];
1361};
1362
1353void megasas_free_cmds_fusion(struct megasas_instance *instance); 1363void megasas_free_cmds_fusion(struct megasas_instance *instance);
1354int megasas_ioc_init_fusion(struct megasas_instance *instance); 1364int megasas_ioc_init_fusion(struct megasas_instance *instance);
1355u8 megasas_get_map_info(struct megasas_instance *instance); 1365u8 megasas_get_map_info(struct megasas_instance *instance);
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index ec6940f2fcb3..f3e182eb0970 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1838,7 +1838,7 @@ static struct scsi_host_template mesh_template = {
1838 .this_id = 7, 1838 .this_id = 7,
1839 .sg_tablesize = SG_ALL, 1839 .sg_tablesize = SG_ALL,
1840 .cmd_per_lun = 2, 1840 .cmd_per_lun = 2,
1841 .use_clustering = DISABLE_CLUSTERING, 1841 .max_segment_size = 65535,
1842}; 1842};
1843 1843
1844static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match) 1844static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 1e45268a78fc..7efd17a3c25b 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -1,6 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright 2000-2015 Avago Technologies. All rights reserved. 3 * Copyright 2000-2020 Broadcom Inc. All rights reserved.
4 * 4 *
5 * 5 *
6 * Name: mpi2.h 6 * Name: mpi2.h
@@ -9,7 +9,7 @@
9 * scatter/gather formats. 9 * scatter/gather formats.
10 * Creation Date: June 21, 2006 10 * Creation Date: June 21, 2006
11 * 11 *
12 * mpi2.h Version: 02.00.50 12 * mpi2.h Version: 02.00.53
13 * 13 *
14 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 14 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
15 * prefix are for use only on MPI v2.5 products, and must not be used 15 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -116,7 +116,12 @@
116 * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT. 116 * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT.
117 * 06-13-17 02.00.49 Bumped MPI2_HEADER_VERSION_UNIT. 117 * 06-13-17 02.00.49 Bumped MPI2_HEADER_VERSION_UNIT.
118 * 09-29-17 02.00.50 Bumped MPI2_HEADER_VERSION_UNIT. 118 * 09-29-17 02.00.50 Bumped MPI2_HEADER_VERSION_UNIT.
119 * -------------------------------------------------------------------------- 119 * 07-22-18 02.00.51 Added SECURE_BOOT define.
120 * Bumped MPI2_HEADER_VERSION_UNIT
121 * 08-15-18 02.00.52 Bumped MPI2_HEADER_VERSION_UNIT.
122 * 08-28-18 02.00.53 Bumped MPI2_HEADER_VERSION_UNIT.
123 * Added MPI2_IOCSTATUS_FAILURE
124 * --------------------------------------------------------------------------
120 */ 125 */
121 126
122#ifndef MPI2_H 127#ifndef MPI2_H
@@ -156,7 +161,7 @@
156 161
157 162
158/* Unit and Dev versioning for this MPI header set */ 163/* Unit and Dev versioning for this MPI header set */
159#define MPI2_HEADER_VERSION_UNIT (0x32) 164#define MPI2_HEADER_VERSION_UNIT (0x35)
160#define MPI2_HEADER_VERSION_DEV (0x00) 165#define MPI2_HEADER_VERSION_DEV (0x00)
161#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 166#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
162#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 167#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -257,6 +262,8 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
257 */ 262 */
258#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008) 263#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008)
259 264
265#define MPI26_DIAG_SECURE_BOOT (0x80000000)
266
260#define MPI2_DIAG_SBR_RELOAD (0x00002000) 267#define MPI2_DIAG_SBR_RELOAD (0x00002000)
261 268
262#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800) 269#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800)
@@ -687,7 +694,9 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
687#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007) 694#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007)
688#define MPI2_IOCSTATUS_INVALID_STATE (0x0008) 695#define MPI2_IOCSTATUS_INVALID_STATE (0x0008)
689#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009) 696#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009)
697/*MPI v2.6 and later */
690#define MPI2_IOCSTATUS_INSUFFICIENT_POWER (0x000A) 698#define MPI2_IOCSTATUS_INSUFFICIENT_POWER (0x000A)
699#define MPI2_IOCSTATUS_FAILURE (0x000F)
691 700
692/**************************************************************************** 701/****************************************************************************
693* Config IOCStatus values 702* Config IOCStatus values
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 5122920a961a..398fa6fde960 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -1,13 +1,13 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright 2000-2015 Avago Technologies. All rights reserved. 3 * Copyright 2000-2020 Broadcom Inc. All rights reserved.
4 * 4 *
5 * 5 *
6 * Name: mpi2_cnfg.h 6 * Name: mpi2_cnfg.h
7 * Title: MPI Configuration messages and pages 7 * Title: MPI Configuration messages and pages
8 * Creation Date: November 10, 2006 8 * Creation Date: November 10, 2006
9 * 9 *
10 * mpi2_cnfg.h Version: 02.00.42 10 * mpi2_cnfg.h Version: 02.00.46
11 * 11 *
12 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 12 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
13 * prefix are for use only on MPI v2.5 products, and must not be used 13 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -231,6 +231,18 @@
231 * Added NOIOB field to PCIe Device Page 2. 231 * Added NOIOB field to PCIe Device Page 2.
232 * Added MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN to 232 * Added MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN to
233 * the Capabilities field of PCIe Device Page 2. 233 * the Capabilities field of PCIe Device Page 2.
234 * 07-22-18 02.00.43 Added defines for SAS3916 and SAS3816.
235 * Added WRiteCache defines to IO Unit Page 1.
236 * Added MaxEnclosureLevel to BIOS Page 1.
237 * Added OEMRD to SAS Enclosure Page 1.
238 * Added DMDReportPCIe to PCIe IO Unit Page 1.
239 * Added Flags field and flags for Retimers to
240 * PCIe Switch Page 1.
241 * 08-02-18 02.00.44 Added Slotx2, Slotx4 to ManPage 7.
242 * 08-15-18 02.00.45 Added ProductSpecific field at end of IOC Page 1
243 * 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1
244 * Added DMDReport Delay Time defines to
245 * PCIeIOUnitPage1
234 * -------------------------------------------------------------------------- 246 * --------------------------------------------------------------------------
235 */ 247 */
236 248
@@ -568,8 +580,17 @@ typedef struct _MPI2_CONFIG_REPLY {
568#define MPI26_MFGPAGE_DEVID_SAS3616 (0x00D1) 580#define MPI26_MFGPAGE_DEVID_SAS3616 (0x00D1)
569#define MPI26_MFGPAGE_DEVID_SAS3708 (0x00D2) 581#define MPI26_MFGPAGE_DEVID_SAS3708 (0x00D2)
570 582
571#define MPI26_MFGPAGE_DEVID_SAS3816 (0x00A1) 583#define MPI26_MFGPAGE_DEVID_SEC_MASK_3916 (0x0003)
572#define MPI26_MFGPAGE_DEVID_SAS3916 (0x00A0) 584#define MPI26_MFGPAGE_DEVID_INVALID0_3916 (0x00E0)
585#define MPI26_MFGPAGE_DEVID_CFG_SEC_3916 (0x00E1)
586#define MPI26_MFGPAGE_DEVID_HARD_SEC_3916 (0x00E2)
587#define MPI26_MFGPAGE_DEVID_INVALID1_3916 (0x00E3)
588
589#define MPI26_MFGPAGE_DEVID_SEC_MASK_3816 (0x0003)
590#define MPI26_MFGPAGE_DEVID_INVALID0_3816 (0x00E4)
591#define MPI26_MFGPAGE_DEVID_CFG_SEC_3816 (0x00E5)
592#define MPI26_MFGPAGE_DEVID_HARD_SEC_3816 (0x00E6)
593#define MPI26_MFGPAGE_DEVID_INVALID1_3816 (0x00E7)
573 594
574 595
575/*Manufacturing Page 0 */ 596/*Manufacturing Page 0 */
@@ -932,7 +953,11 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 {
932 953
933#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04) 954#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
934 955
935/*IO Unit Page 1 Flags defines */ 956/* IO Unit Page 1 Flags defines */
957#define MPI26_IOUNITPAGE1_NVME_WRCACHE_MASK (0x00030000)
958#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE (0x00000000)
959#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE (0x00010000)
960#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE (0x00020000)
936#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000) 961#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000)
937#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000) 962#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000)
938#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000) 963#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000)
@@ -1511,7 +1536,7 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 {
1511 U32 BiosOptions; /*0x04 */ 1536 U32 BiosOptions; /*0x04 */
1512 U32 IOCSettings; /*0x08 */ 1537 U32 IOCSettings; /*0x08 */
1513 U8 SSUTimeout; /*0x0C */ 1538 U8 SSUTimeout; /*0x0C */
1514 U8 Reserved1; /*0x0D */ 1539 U8 MaxEnclosureLevel; /*0x0D */
1515 U16 Reserved2; /*0x0E */ 1540 U16 Reserved2; /*0x0E */
1516 U32 DeviceSettings; /*0x10 */ 1541 U32 DeviceSettings; /*0x10 */
1517 U16 NumberOfDevices; /*0x14 */ 1542 U16 NumberOfDevices; /*0x14 */
@@ -1531,7 +1556,6 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 {
1531#define MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG (0x00004000) 1556#define MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG (0x00004000)
1532 1557
1533#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800) 1558#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800)
1534#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800)
1535#define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000) 1559#define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000)
1536#define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800) 1560#define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800)
1537#define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID (0x00001000) 1561#define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID (0x00001000)
@@ -3271,10 +3295,12 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
3271 U16 NumSlots; /*0x18 */ 3295 U16 NumSlots; /*0x18 */
3272 U16 StartSlot; /*0x1A */ 3296 U16 StartSlot; /*0x1A */
3273 U8 ChassisSlot; /*0x1C */ 3297 U8 ChassisSlot; /*0x1C */
3274 U8 EnclosureLeve; /*0x1D */ 3298 U8 EnclosureLevel; /*0x1D */
3275 U16 SEPDevHandle; /*0x1E */ 3299 U16 SEPDevHandle; /*0x1E */
3276 U32 Reserved3; /*0x20 */ 3300 U8 OEMRD; /*0x20 */
3277 U32 Reserved4; /*0x24 */ 3301 U8 Reserved1a; /*0x21 */
3302 U16 Reserved2; /*0x22 */
3303 U32 Reserved3; /*0x24 */
3278} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, 3304} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
3279 *PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, 3305 *PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
3280 Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t, 3306 Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t,
@@ -3285,6 +3311,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
3285#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04) 3311#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04)
3286 3312
3287/*values for SAS Enclosure Page 0 Flags field */ 3313/*values for SAS Enclosure Page 0 Flags field */
3314#define MPI26_SAS_ENCLS0_FLAGS_OEMRD_VALID (0x0080)
3315#define MPI26_SAS_ENCLS0_FLAGS_OEMRD_COLLECTING (0x0040)
3288#define MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) 3316#define MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
3289#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010) 3317#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
3290#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F) 3318#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
@@ -3298,6 +3326,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
3298#define MPI26_ENCLOSURE0_PAGEVERSION (0x04) 3326#define MPI26_ENCLOSURE0_PAGEVERSION (0x04)
3299 3327
3300/*Values for Enclosure Page 0 Flags field */ 3328/*Values for Enclosure Page 0 Flags field */
3329#define MPI26_ENCLS0_FLAGS_OEMRD_VALID (0x0080)
3330#define MPI26_ENCLS0_FLAGS_OEMRD_COLLECTING (0x0040)
3301#define MPI26_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) 3331#define MPI26_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
3302#define MPI26_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010) 3332#define MPI26_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
3303#define MPI26_ENCLS0_FLAGS_MNG_MASK (0x000F) 3333#define MPI26_ENCLS0_FLAGS_MNG_MASK (0x000F)
@@ -3696,8 +3726,9 @@ typedef struct _MPI26_PCIE_IO_UNIT1_PHY_DATA {
3696 Mpi26PCIeIOUnit1PhyData_t, *pMpi26PCIeIOUnit1PhyData_t; 3726 Mpi26PCIeIOUnit1PhyData_t, *pMpi26PCIeIOUnit1PhyData_t;
3697 3727
3698/*values for LinkFlags */ 3728/*values for LinkFlags */
3699#define MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS (0x00) 3729#define MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK (0x00)
3700#define MPI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS (0x01) 3730#define MPI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN (0x01)
3731#define MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN (0x02)
3701 3732
3702/* 3733/*
3703 *Host code (drivers, BIOS, utilities, etc.) should leave this define set to 3734 *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
@@ -3714,7 +3745,7 @@ typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 {
3714 U16 AdditionalControlFlags; /*0x0C */ 3745 U16 AdditionalControlFlags; /*0x0C */
3715 U16 NVMeMaxQueueDepth; /*0x0E */ 3746 U16 NVMeMaxQueueDepth; /*0x0E */
3716 U8 NumPhys; /*0x10 */ 3747 U8 NumPhys; /*0x10 */
3717 U8 Reserved1; /*0x11 */ 3748 U8 DMDReportPCIe; /*0x11 */
3718 U16 Reserved2; /*0x12 */ 3749 U16 Reserved2; /*0x12 */
3719 MPI26_PCIE_IO_UNIT1_PHY_DATA 3750 MPI26_PCIE_IO_UNIT1_PHY_DATA
3720 PhyData[MPI26_PCIE_IOUNIT1_PHY_MAX];/*0x14 */ 3751 PhyData[MPI26_PCIE_IOUNIT1_PHY_MAX];/*0x14 */
@@ -3736,6 +3767,12 @@ typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 {
3736#define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40) 3767#define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40)
3737#define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50) 3768#define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50)
3738 3769
3770/*values for PCIe IO Unit Page 1 DMDReportPCIe */
3771#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_MASK (0x80)
3772#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_1_SEC (0x00)
3773#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_16_SEC (0x80)
3774#define MPI26_PCIEIOUNIT1_DMDRPT_DELAY_TIME_MASK (0x7F)
3775
3739/*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo 3776/*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo
3740 *values 3777 *values
3741 */ 3778 */
@@ -3788,6 +3825,9 @@ typedef struct _MPI26_CONFIG_PAGE_PSWITCH_1 {
3788 3825
3789/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ 3826/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
3790 3827
3828/* defines for the Flags field */
3829#define MPI26_PCIESWITCH1_2_RETIMER_PRESENCE (0x0002)
3830#define MPI26_PCIESWITCH1_RETIMER_PRESENCE (0x0001)
3791 3831
3792/**************************************************************************** 3832/****************************************************************************
3793* PCIe Device Config Pages (MPI v2.6 and later) 3833* PCIe Device Config Pages (MPI v2.6 and later)
@@ -3849,19 +3889,21 @@ typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 {
3849 *field 3889 *field
3850 */ 3890 */
3851 3891
3852/*values for PCIe Device Page 0 Flags field */ 3892/*values for PCIe Device Page 0 Flags field*/
3853#define MPI26_PCIEDEV0_FLAGS_UNAUTHORIZED_DEVICE (0x8000) 3893#define MPI26_PCIEDEV0_FLAGS_2_RETIMER_PRESENCE (0x00020000)
3854#define MPI26_PCIEDEV0_FLAGS_ENABLED_FAST_PATH (0x4000) 3894#define MPI26_PCIEDEV0_FLAGS_RETIMER_PRESENCE (0x00010000)
3855#define MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE (0x2000) 3895#define MPI26_PCIEDEV0_FLAGS_UNAUTHORIZED_DEVICE (0x00008000)
3856#define MPI26_PCIEDEV0_FLAGS_ASYNCHRONOUS_NOTIFICATION (0x0400) 3896#define MPI26_PCIEDEV0_FLAGS_ENABLED_FAST_PATH (0x00004000)
3857#define MPI26_PCIEDEV0_FLAGS_ATA_SW_PRESERVATION (0x0200) 3897#define MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE (0x00002000)
3858#define MPI26_PCIEDEV0_FLAGS_UNSUPPORTED_DEVICE (0x0100) 3898#define MPI26_PCIEDEV0_FLAGS_ASYNCHRONOUS_NOTIFICATION (0x00000400)
3859#define MPI26_PCIEDEV0_FLAGS_ATA_48BIT_LBA_SUPPORTED (0x0080) 3899#define MPI26_PCIEDEV0_FLAGS_ATA_SW_PRESERVATION (0x00000200)
3860#define MPI26_PCIEDEV0_FLAGS_ATA_SMART_SUPPORTED (0x0040) 3900#define MPI26_PCIEDEV0_FLAGS_UNSUPPORTED_DEVICE (0x00000100)
3861#define MPI26_PCIEDEV0_FLAGS_ATA_NCQ_SUPPORTED (0x0020) 3901#define MPI26_PCIEDEV0_FLAGS_ATA_48BIT_LBA_SUPPORTED (0x00000080)
3862#define MPI26_PCIEDEV0_FLAGS_ATA_FUA_SUPPORTED (0x0010) 3902#define MPI26_PCIEDEV0_FLAGS_ATA_SMART_SUPPORTED (0x00000040)
3863#define MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID (0x0002) 3903#define MPI26_PCIEDEV0_FLAGS_ATA_NCQ_SUPPORTED (0x00000020)
3864#define MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT (0x0001) 3904#define MPI26_PCIEDEV0_FLAGS_ATA_FUA_SUPPORTED (0x00000010)
3905#define MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID (0x00000002)
3906#define MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT (0x00000001)
3865 3907
3866/* values for PCIe Device Page 0 SupportedLinkRates field */ 3908/* values for PCIe Device Page 0 SupportedLinkRates field */
3867#define MPI26_PCIEDEV0_LINK_RATE_16_0_SUPPORTED (0x08) 3909#define MPI26_PCIEDEV0_LINK_RATE_16_0_SUPPORTED (0x08)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_image.h b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
new file mode 100644
index 000000000000..4959585f029d
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
@@ -0,0 +1,506 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright 2016-2020 Broadcom Limited. All rights reserved.
4 *
5 * Name: mpi2_image.h
6 * Description: Contains definitions for firmware and other component images
7 * Creation Date: 04/02/2018
8 * Version: 02.06.03
9 *
10 *
11 * Version History
12 * ---------------
13 *
14 * Date Version Description
15 * -------- -------- ------------------------------------------------------
16 * 08-01-18 02.06.00 Initial version for MPI 2.6.5.
17 * 08-14-18 02.06.01 Corrected define for MPI26_IMAGE_HEADER_SIGNATURE0_MPI26
18 * 08-28-18 02.06.02 Added MPI2_EXT_IMAGE_TYPE_RDE
19 * 09-07-18 02.06.03 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES
20 */
21#ifndef MPI2_IMAGE_H
22#define MPI2_IMAGE_H
23
24
25/*FW Image Header */
26typedef struct _MPI2_FW_IMAGE_HEADER {
27 U32 Signature; /*0x00 */
28 U32 Signature0; /*0x04 */
29 U32 Signature1; /*0x08 */
30 U32 Signature2; /*0x0C */
31 MPI2_VERSION_UNION MPIVersion; /*0x10 */
32 MPI2_VERSION_UNION FWVersion; /*0x14 */
33 MPI2_VERSION_UNION NVDATAVersion; /*0x18 */
34 MPI2_VERSION_UNION PackageVersion; /*0x1C */
35 U16 VendorID; /*0x20 */
36 U16 ProductID; /*0x22 */
37 U16 ProtocolFlags; /*0x24 */
38 U16 Reserved26; /*0x26 */
39 U32 IOCCapabilities; /*0x28 */
40 U32 ImageSize; /*0x2C */
41 U32 NextImageHeaderOffset; /*0x30 */
42 U32 Checksum; /*0x34 */
43 U32 Reserved38; /*0x38 */
44 U32 Reserved3C; /*0x3C */
45 U32 Reserved40; /*0x40 */
46 U32 Reserved44; /*0x44 */
47 U32 Reserved48; /*0x48 */
48 U32 Reserved4C; /*0x4C */
49 U32 Reserved50; /*0x50 */
50 U32 Reserved54; /*0x54 */
51 U32 Reserved58; /*0x58 */
52 U32 Reserved5C; /*0x5C */
53 U32 BootFlags; /*0x60 */
54 U32 FirmwareVersionNameWhat; /*0x64 */
55 U8 FirmwareVersionName[32]; /*0x68 */
56 U32 VendorNameWhat; /*0x88 */
57 U8 VendorName[32]; /*0x8C */
58 U32 PackageNameWhat; /*0x88 */
59 U8 PackageName[32]; /*0x8C */
60 U32 ReservedD0; /*0xD0 */
61 U32 ReservedD4; /*0xD4 */
62 U32 ReservedD8; /*0xD8 */
63 U32 ReservedDC; /*0xDC */
64 U32 ReservedE0; /*0xE0 */
65 U32 ReservedE4; /*0xE4 */
66 U32 ReservedE8; /*0xE8 */
67 U32 ReservedEC; /*0xEC */
68 U32 ReservedF0; /*0xF0 */
69 U32 ReservedF4; /*0xF4 */
70 U32 ReservedF8; /*0xF8 */
71 U32 ReservedFC; /*0xFC */
72} MPI2_FW_IMAGE_HEADER, *PTR_MPI2_FW_IMAGE_HEADER,
73 Mpi2FWImageHeader_t, *pMpi2FWImageHeader_t;
74
75/*Signature field */
76#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00)
77#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000)
78#define MPI2_FW_HEADER_SIGNATURE (0xEA000000)
79#define MPI26_FW_HEADER_SIGNATURE (0xEB000000)
80
81/*Signature0 field */
82#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04)
83#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A)
84/*Last byte is defined by architecture */
85#define MPI26_FW_HEADER_SIGNATURE0_BASE (0x5AEAA500)
86#define MPI26_FW_HEADER_SIGNATURE0_ARC_0 (0x5A)
87#define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00)
88#define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01)
89/*legacy (0x5AEAA55A) */
90#define MPI26_FW_HEADER_SIGNATURE0_ARC_3 (0x02)
91#define MPI26_FW_HEADER_SIGNATURE0 \
92 (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0)
93#define MPI26_FW_HEADER_SIGNATURE0_3516 \
94 (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1)
95#define MPI26_FW_HEADER_SIGNATURE0_4008 \
96 (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_3)
97
98/*Signature1 field */
99#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
100#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5)
101#define MPI26_FW_HEADER_SIGNATURE1 (0xA55AEAA5)
102
103/*Signature2 field */
104#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C)
105#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA)
106#define MPI26_FW_HEADER_SIGNATURE2 (0x5AA55AEA)
107
108/*defines for using the ProductID field */
109#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
110#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000)
111
112#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
113#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
114#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
115#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
116
117#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF)
118/*SAS ProductID Family bits */
119#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013)
120#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014)
121#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021)
122#define MPI26_FW_HEADER_PID_FAMILY_3324_SAS (0x0028)
123#define MPI26_FW_HEADER_PID_FAMILY_3516_SAS (0x0031)
124
125/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
126
127/*use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */
128
129#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C)
130#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30)
131
132#define MPI26_FW_HEADER_BOOTFLAGS_OFFSET (0x60)
133#define MPI2_FW_HEADER_BOOTFLAGS_ISSI32M_FLAG (0x00000001)
134#define MPI2_FW_HEADER_BOOTFLAGS_W25Q256JW_FLAG (0x00000002)
135/*This image has a auto-discovery version of SPI */
136#define MPI2_FW_HEADER_BOOTFLAGS_AUTO_SPI_FLAG (0x00000004)
137
138
139#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64)
140
141#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840)
142
143#define MPI2_FW_HEADER_SIZE (0x100)
144
145
146/****************************************************************************
147 * Component Image Format and related defines *
148 ****************************************************************************/
149
150/*Maximum number of Hash Exclusion entries in a Component Image Header */
151#define MPI26_COMP_IMG_HDR_NUM_HASH_EXCL (4)
152
153/*Hash Exclusion Format */
154typedef struct _MPI26_HASH_EXCLUSION_FORMAT {
155 U32 Offset; /*0x00 */
156 U32 Size; /*0x04 */
157} MPI26_HASH_EXCLUSION_FORMAT,
158 *PTR_MPI26_HASH_EXCLUSION_FORMAT,
159 Mpi26HashSxclusionFormat_t,
160 *pMpi26HashExclusionFormat_t;
161
162/*FW Image Header */
163typedef struct _MPI26_COMPONENT_IMAGE_HEADER {
164 U32 Signature0; /*0x00 */
165 U32 LoadAddress; /*0x04 */
166 U32 DataSize; /*0x08 */
167 U32 StartAddress; /*0x0C */
168 U32 Signature1; /*0x10 */
169 U32 FlashOffset; /*0x14 */
170 U32 FlashSize; /*0x18 */
171 U32 VersionStringOffset; /*0x1C */
172 U32 BuildDateStringOffset; /*0x20 */
173 U32 BuildTimeStringOffset; /*0x24 */
174 U32 EnvironmentVariableOffset; /*0x28 */
175 U32 ApplicationSpecific; /*0x2C */
176 U32 Signature2; /*0x30 */
177 U32 HeaderSize; /*0x34 */
178 U32 Crc; /*0x38 */
179 U8 NotFlashImage; /*0x3C */
180 U8 Compressed; /*0x3D */
181 U16 Reserved3E; /*0x3E */
182 U32 SecondaryFlashOffset; /*0x40 */
183 U32 Reserved44; /*0x44 */
184 U32 Reserved48; /*0x48 */
185 MPI2_VERSION_UNION RMCInterfaceVersion; /*0x4C */
186 MPI2_VERSION_UNION Reserved50; /*0x50 */
187 MPI2_VERSION_UNION FWVersion; /*0x54 */
188 MPI2_VERSION_UNION NvdataVersion; /*0x58 */
189 MPI26_HASH_EXCLUSION_FORMAT
190 HashExclusion[MPI26_COMP_IMG_HDR_NUM_HASH_EXCL];/*0x5C */
191 U32 NextImageHeaderOffset; /*0x7C */
192 U32 Reserved80[32]; /*0x80 -- 0xFC */
193} MPI26_COMPONENT_IMAGE_HEADER,
194 *PTR_MPI26_COMPONENT_IMAGE_HEADER,
195 Mpi26ComponentImageHeader_t,
196 *pMpi26ComponentImageHeader_t;
197
198
199/**** Definitions for Signature0 field ****/
200#define MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 (0xEB000042)
201
202/**** Definitions for Signature1 field ****/
203#define MPI26_IMAGE_HEADER_SIGNATURE1_APPLICATION (0x20505041)
204#define MPI26_IMAGE_HEADER_SIGNATURE1_CBB (0x20424243)
205#define MPI26_IMAGE_HEADER_SIGNATURE1_MFG (0x2047464D)
206#define MPI26_IMAGE_HEADER_SIGNATURE1_BIOS (0x534F4942)
207#define MPI26_IMAGE_HEADER_SIGNATURE1_HIIM (0x4D494948)
208#define MPI26_IMAGE_HEADER_SIGNATURE1_HIIA (0x41494948)
209#define MPI26_IMAGE_HEADER_SIGNATURE1_CPLD (0x444C5043)
210#define MPI26_IMAGE_HEADER_SIGNATURE1_SPD (0x20445053)
211#define MPI26_IMAGE_HEADER_SIGNATURE1_NVDATA (0x5444564E)
212#define MPI26_IMAGE_HEADER_SIGNATURE1_GAS_GAUGE (0x20534147)
213#define MPI26_IMAGE_HEADER_SIGNATURE1_PBLP (0x50424C50)
214
215/**** Definitions for Signature2 field ****/
216#define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
217
218/**** Offsets for Image Header Fields ****/
219#define MPI26_IMAGE_HEADER_SIGNATURE0_OFFSET (0x00)
220#define MPI26_IMAGE_HEADER_LOAD_ADDRESS_OFFSET (0x04)
221#define MPI26_IMAGE_HEADER_DATA_SIZE_OFFSET (0x08)
222#define MPI26_IMAGE_HEADER_START_ADDRESS_OFFSET (0x0C)
223#define MPI26_IMAGE_HEADER_SIGNATURE1_OFFSET (0x10)
224#define MPI26_IMAGE_HEADER_FLASH_OFFSET_OFFSET (0x14)
225#define MPI26_IMAGE_HEADER_FLASH_SIZE_OFFSET (0x18)
226#define MPI26_IMAGE_HEADER_VERSION_STRING_OFFSET_OFFSET (0x1C)
227#define MPI26_IMAGE_HEADER_BUILD_DATE_STRING_OFFSET_OFFSET (0x20)
228#define MPI26_IMAGE_HEADER_BUILD_TIME_OFFSET_OFFSET (0x24)
229#define MPI26_IMAGE_HEADER_ENVIROMENT_VAR_OFFSET_OFFSET (0x28)
230#define MPI26_IMAGE_HEADER_APPLICATION_SPECIFIC_OFFSET (0x2C)
231#define MPI26_IMAGE_HEADER_SIGNATURE2_OFFSET (0x30)
232#define MPI26_IMAGE_HEADER_HEADER_SIZE_OFFSET (0x34)
233#define MPI26_IMAGE_HEADER_CRC_OFFSET (0x38)
234#define MPI26_IMAGE_HEADER_NOT_FLASH_IMAGE_OFFSET (0x3C)
235#define MPI26_IMAGE_HEADER_COMPRESSED_OFFSET (0x3D)
236#define MPI26_IMAGE_HEADER_SECONDARY_FLASH_OFFSET_OFFSET (0x40)
237#define MPI26_IMAGE_HEADER_RMC_INTERFACE_VER_OFFSET (0x4C)
238#define MPI26_IMAGE_HEADER_COMPONENT_IMAGE_VER_OFFSET (0x54)
239#define MPI26_IMAGE_HEADER_HASH_EXCLUSION_OFFSET (0x5C)
240#define MPI26_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET (0x7C)
241
242
243#define MPI26_IMAGE_HEADER_SIZE (0x100)
244
245
246/*Extended Image Header */
247typedef struct _MPI2_EXT_IMAGE_HEADER {
248 U8 ImageType; /*0x00 */
249 U8 Reserved1; /*0x01 */
250 U16 Reserved2; /*0x02 */
251 U32 Checksum; /*0x04 */
252 U32 ImageSize; /*0x08 */
253 U32 NextImageHeaderOffset; /*0x0C */
254 U32 PackageVersion; /*0x10 */
255 U32 Reserved3; /*0x14 */
256 U32 Reserved4; /*0x18 */
257 U32 Reserved5; /*0x1C */
258 U8 IdentifyString[32]; /*0x20 */
259} MPI2_EXT_IMAGE_HEADER, *PTR_MPI2_EXT_IMAGE_HEADER,
260 Mpi2ExtImageHeader_t, *pMpi2ExtImageHeader_t;
261
262/*useful offsets */
263#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00)
264#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08)
265#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C)
266#define MPI2_EXT_IMAGE_PACKAGEVERSION_OFFSET (0x10)
267
268#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
269
270/*defines for the ImageType field */
271#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
272#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
273#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
274#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
275#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
276#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
277#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
278#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
279#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09)
280#define MPI2_EXT_IMAGE_TYPE_RDE (0x0A)
281#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
282#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
283
284#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC)
285
286/*FLASH Layout Extended Image Data */
287
288/*
289 *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
290 *one and check RegionsPerLayout at runtime.
291 */
292#ifndef MPI2_FLASH_NUMBER_OF_REGIONS
293#define MPI2_FLASH_NUMBER_OF_REGIONS (1)
294#endif
295
296/*
297 *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
298 *one and check NumberOfLayouts at runtime.
299 */
300#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS
301#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1)
302#endif
303
304typedef struct _MPI2_FLASH_REGION {
305 U8 RegionType; /*0x00 */
306 U8 Reserved1; /*0x01 */
307 U16 Reserved2; /*0x02 */
308 U32 RegionOffset; /*0x04 */
309 U32 RegionSize; /*0x08 */
310 U32 Reserved3; /*0x0C */
311} MPI2_FLASH_REGION, *PTR_MPI2_FLASH_REGION,
312 Mpi2FlashRegion_t, *pMpi2FlashRegion_t;
313
314typedef struct _MPI2_FLASH_LAYOUT {
315 U32 FlashSize; /*0x00 */
316 U32 Reserved1; /*0x04 */
317 U32 Reserved2; /*0x08 */
318 U32 Reserved3; /*0x0C */
319 MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS]; /*0x10 */
320} MPI2_FLASH_LAYOUT, *PTR_MPI2_FLASH_LAYOUT,
321 Mpi2FlashLayout_t, *pMpi2FlashLayout_t;
322
323typedef struct _MPI2_FLASH_LAYOUT_DATA {
324 U8 ImageRevision; /*0x00 */
325 U8 Reserved1; /*0x01 */
326 U8 SizeOfRegion; /*0x02 */
327 U8 Reserved2; /*0x03 */
328 U16 NumberOfLayouts; /*0x04 */
329 U16 RegionsPerLayout; /*0x06 */
330 U16 MinimumSectorAlignment; /*0x08 */
331 U16 Reserved3; /*0x0A */
332 U32 Reserved4; /*0x0C */
333 MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS]; /*0x10 */
334} MPI2_FLASH_LAYOUT_DATA, *PTR_MPI2_FLASH_LAYOUT_DATA,
335 Mpi2FlashLayoutData_t, *pMpi2FlashLayoutData_t;
336
337/*defines for the RegionType field */
338#define MPI2_FLASH_REGION_UNUSED (0x00)
339#define MPI2_FLASH_REGION_FIRMWARE (0x01)
340#define MPI2_FLASH_REGION_BIOS (0x02)
341#define MPI2_FLASH_REGION_NVDATA (0x03)
342#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05)
343#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06)
344#define MPI2_FLASH_REGION_CONFIG_1 (0x07)
345#define MPI2_FLASH_REGION_CONFIG_2 (0x08)
346#define MPI2_FLASH_REGION_MEGARAID (0x09)
347#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A)
348#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK)
349#define MPI2_FLASH_REGION_CBB_BACKUP (0x0D)
350#define MPI2_FLASH_REGION_SBR (0x0E)
351#define MPI2_FLASH_REGION_SBR_BACKUP (0x0F)
352#define MPI2_FLASH_REGION_HIIM (0x10)
353#define MPI2_FLASH_REGION_HIIA (0x11)
354#define MPI2_FLASH_REGION_CTLR (0x12)
355#define MPI2_FLASH_REGION_IMR_FIRMWARE (0x13)
356#define MPI2_FLASH_REGION_MR_NVDATA (0x14)
357#define MPI2_FLASH_REGION_CPLD (0x15)
358#define MPI2_FLASH_REGION_PSOC (0x16)
359
360/*ImageRevision */
361#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
362
363/*Supported Devices Extended Image Data */
364
365/*
366 *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
367 *one and check NumberOfDevices at runtime.
368 */
369#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES
370#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1)
371#endif
372
373typedef struct _MPI2_SUPPORTED_DEVICE {
374 U16 DeviceID; /*0x00 */
375 U16 VendorID; /*0x02 */
376 U16 DeviceIDMask; /*0x04 */
377 U16 Reserved1; /*0x06 */
378 U8 LowPCIRev; /*0x08 */
379 U8 HighPCIRev; /*0x09 */
380 U16 Reserved2; /*0x0A */
381 U32 Reserved3; /*0x0C */
382} MPI2_SUPPORTED_DEVICE, *PTR_MPI2_SUPPORTED_DEVICE,
383 Mpi2SupportedDevice_t, *pMpi2SupportedDevice_t;
384
385typedef struct _MPI2_SUPPORTED_DEVICES_DATA {
386 U8 ImageRevision; /*0x00 */
387 U8 Reserved1; /*0x01 */
388 U8 NumberOfDevices; /*0x02 */
389 U8 Reserved2; /*0x03 */
390 U32 Reserved3; /*0x04 */
391 MPI2_SUPPORTED_DEVICE
392 SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES];/*0x08 */
393} MPI2_SUPPORTED_DEVICES_DATA, *PTR_MPI2_SUPPORTED_DEVICES_DATA,
394 Mpi2SupportedDevicesData_t, *pMpi2SupportedDevicesData_t;
395
396/*ImageRevision */
397#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00)
398
399/*Init Extended Image Data */
400
401typedef struct _MPI2_INIT_IMAGE_FOOTER {
402 U32 BootFlags; /*0x00 */
403 U32 ImageSize; /*0x04 */
404 U32 Signature0; /*0x08 */
405 U32 Signature1; /*0x0C */
406 U32 Signature2; /*0x10 */
407 U32 ResetVector; /*0x14 */
408} MPI2_INIT_IMAGE_FOOTER, *PTR_MPI2_INIT_IMAGE_FOOTER,
409 Mpi2InitImageFooter_t, *pMpi2InitImageFooter_t;
410
411/*defines for the BootFlags field */
412#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00)
413
414/*defines for the ImageSize field */
415#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04)
416
417/*defines for the Signature0 field */
418#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08)
419#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA)
420
421/*defines for the Signature1 field */
422#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C)
423#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5)
424
425/*defines for the Signature2 field */
426#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10)
427#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A)
428
429/*Signature fields as individual bytes */
430#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA)
431#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A)
432#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5)
433#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A)
434
435#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5)
436#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA)
437#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A)
438#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5)
439
440#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A)
441#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5)
442#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA)
443#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A)
444
445/*defines for the ResetVector field */
446#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
447
448
449/* Encrypted Hash Extended Image Data */
450
451typedef struct _MPI25_ENCRYPTED_HASH_ENTRY {
452 U8 HashImageType; /*0x00 */
453 U8 HashAlgorithm; /*0x01 */
454 U8 EncryptionAlgorithm; /*0x02 */
455 U8 Reserved1; /*0x03 */
456 U32 Reserved2; /*0x04 */
457 U32 EncryptedHash[1]; /*0x08 */ /* variable length */
458} MPI25_ENCRYPTED_HASH_ENTRY, *PTR_MPI25_ENCRYPTED_HASH_ENTRY,
459Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t;
460
461/* values for HashImageType */
462#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00)
463#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01)
464#define MPI25_HASH_IMAGE_TYPE_BIOS (0x02)
465
466#define MPI26_HASH_IMAGE_TYPE_UNUSED (0x00)
467#define MPI26_HASH_IMAGE_TYPE_FIRMWARE (0x01)
468#define MPI26_HASH_IMAGE_TYPE_BIOS (0x02)
469#define MPI26_HASH_IMAGE_TYPE_KEY_HASH (0x03)
470
471/* values for HashAlgorithm */
472#define MPI25_HASH_ALGORITHM_UNUSED (0x00)
473#define MPI25_HASH_ALGORITHM_SHA256 (0x01)
474
475#define MPI26_HASH_ALGORITHM_VERSION_MASK (0xE0)
476#define MPI26_HASH_ALGORITHM_VERSION_NONE (0x00)
477#define MPI26_HASH_ALGORITHM_VERSION_SHA1 (0x20)
478#define MPI26_HASH_ALGORITHM_VERSION_SHA2 (0x40)
479#define MPI26_HASH_ALGORITHM_VERSION_SHA3 (0x60)
480#define MPI26_HASH_ALGORITHM_SIZE_MASK (0x1F)
481#define MPI26_HASH_ALGORITHM_SIZE_256 (0x01)
482#define MPI26_HASH_ALGORITHM_SIZE_512 (0x02)
483
484
485/* values for EncryptionAlgorithm */
486#define MPI25_ENCRYPTION_ALG_UNUSED (0x00)
487#define MPI25_ENCRYPTION_ALG_RSA256 (0x01)
488
489#define MPI26_ENCRYPTION_ALG_UNUSED (0x00)
490#define MPI26_ENCRYPTION_ALG_RSA256 (0x01)
491#define MPI26_ENCRYPTION_ALG_RSA512 (0x02)
492#define MPI26_ENCRYPTION_ALG_RSA1024 (0x03)
493#define MPI26_ENCRYPTION_ALG_RSA2048 (0x04)
494#define MPI26_ENCRYPTION_ALG_RSA4096 (0x05)
495
496typedef struct _MPI25_ENCRYPTED_HASH_DATA {
497 U8 ImageVersion; /*0x00 */
498 U8 NumHash; /*0x01 */
499 U16 Reserved1; /*0x02 */
500 U32 Reserved2; /*0x04 */
501 MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /*0x08 */
502} MPI25_ENCRYPTED_HASH_DATA, *PTR_MPI25_ENCRYPTED_HASH_DATA,
503Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t;
504
505
506#endif /* MPI2_IMAGE_H */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
index 6213ce6791ac..8f1b903fe0a9 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -1,6 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright 2000-2015 Avago Technologies. All rights reserved. 3 * Copyright 2000-2020 Broadcom Inc. All rights reserved.
4 * 4 *
5 * 5 *
6 * Name: mpi2_init.h 6 * Name: mpi2_init.h
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index 1faec3a93e69..68ea408cd5c5 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -1,13 +1,13 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright 2000-2015 Avago Technologies. All rights reserved. 3 * Copyright 2000-2020 Broadcom Inc. All rights reserved.
4 * 4 *
5 * 5 *
6 * Name: mpi2_ioc.h 6 * Name: mpi2_ioc.h
7 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 7 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
8 * Creation Date: October 11, 2006 8 * Creation Date: October 11, 2006
9 * 9 *
10 * mpi2_ioc.h Version: 02.00.34 10 * mpi2_ioc.h Version: 02.00.37
11 * 11 *
12 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 12 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
13 * prefix are for use only on MPI v2.5 products, and must not be used 13 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -171,6 +171,10 @@
171 * 09-29-17 02.00.34 Added MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED 171 * 09-29-17 02.00.34 Added MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED
172 * to the ReasonCode field in PCIe Device Status Change 172 * to the ReasonCode field in PCIe Device Status Change
173 * Event Data. 173 * Event Data.
174 * 07-22-18 02.00.35 Added FW_DOWNLOAD_ITYPE_CPLD and _PSOC.
175 * Moved FW image definitions ionto new mpi2_image,h
176 * 08-14-18 02.00.36 Fixed definition of MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16)
177 * 09-07-18 02.00.37 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES
174 * -------------------------------------------------------------------------- 178 * --------------------------------------------------------------------------
175 */ 179 */
176 180
@@ -1255,6 +1259,7 @@ typedef struct _MPI26_EVENT_PCIE_TOPO_PORT_ENTRY {
1255#define MPI26_EVENT_PCIE_TOPO_PI_2_LANES (0x20) 1259#define MPI26_EVENT_PCIE_TOPO_PI_2_LANES (0x20)
1256#define MPI26_EVENT_PCIE_TOPO_PI_4_LANES (0x30) 1260#define MPI26_EVENT_PCIE_TOPO_PI_4_LANES (0x30)
1257#define MPI26_EVENT_PCIE_TOPO_PI_8_LANES (0x40) 1261#define MPI26_EVENT_PCIE_TOPO_PI_8_LANES (0x40)
1262#define MPI26_EVENT_PCIE_TOPO_PI_16_LANES (0x50)
1258 1263
1259#define MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0F) 1264#define MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0F)
1260#define MPI26_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00) 1265#define MPI26_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00)
@@ -1450,7 +1455,11 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST {
1450#define MPI2_FW_DOWNLOAD_ITYPE_CTLR (0x12) 1455#define MPI2_FW_DOWNLOAD_ITYPE_CTLR (0x12)
1451#define MPI2_FW_DOWNLOAD_ITYPE_IMR_FIRMWARE (0x13) 1456#define MPI2_FW_DOWNLOAD_ITYPE_IMR_FIRMWARE (0x13)
1452#define MPI2_FW_DOWNLOAD_ITYPE_MR_NVDATA (0x14) 1457#define MPI2_FW_DOWNLOAD_ITYPE_MR_NVDATA (0x14)
1458/*MPI v2.6 and newer */
1459#define MPI2_FW_DOWNLOAD_ITYPE_CPLD (0x15)
1460#define MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16)
1453#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0) 1461#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
1462#define MPI2_FW_DOWNLOAD_ITYPE_TERMINATE (0xFF)
1454 1463
1455/*MPI v2.0 FWDownload TransactionContext Element */ 1464/*MPI v2.0 FWDownload TransactionContext Element */
1456typedef struct _MPI2_FW_DOWNLOAD_TCSGE { 1465typedef struct _MPI2_FW_DOWNLOAD_TCSGE {
@@ -1597,352 +1606,6 @@ typedef struct _MPI2_FW_UPLOAD_REPLY {
1597} MPI2_FW_UPLOAD_REPLY, *PTR_MPI2_FW_UPLOAD_REPLY, 1606} MPI2_FW_UPLOAD_REPLY, *PTR_MPI2_FW_UPLOAD_REPLY,
1598 Mpi2FWUploadReply_t, *pMPi2FWUploadReply_t; 1607 Mpi2FWUploadReply_t, *pMPi2FWUploadReply_t;
1599 1608
1600/*FW Image Header */
1601typedef struct _MPI2_FW_IMAGE_HEADER {
1602 U32 Signature; /*0x00 */
1603 U32 Signature0; /*0x04 */
1604 U32 Signature1; /*0x08 */
1605 U32 Signature2; /*0x0C */
1606 MPI2_VERSION_UNION MPIVersion; /*0x10 */
1607 MPI2_VERSION_UNION FWVersion; /*0x14 */
1608 MPI2_VERSION_UNION NVDATAVersion; /*0x18 */
1609 MPI2_VERSION_UNION PackageVersion; /*0x1C */
1610 U16 VendorID; /*0x20 */
1611 U16 ProductID; /*0x22 */
1612 U16 ProtocolFlags; /*0x24 */
1613 U16 Reserved26; /*0x26 */
1614 U32 IOCCapabilities; /*0x28 */
1615 U32 ImageSize; /*0x2C */
1616 U32 NextImageHeaderOffset; /*0x30 */
1617 U32 Checksum; /*0x34 */
1618 U32 Reserved38; /*0x38 */
1619 U32 Reserved3C; /*0x3C */
1620 U32 Reserved40; /*0x40 */
1621 U32 Reserved44; /*0x44 */
1622 U32 Reserved48; /*0x48 */
1623 U32 Reserved4C; /*0x4C */
1624 U32 Reserved50; /*0x50 */
1625 U32 Reserved54; /*0x54 */
1626 U32 Reserved58; /*0x58 */
1627 U32 Reserved5C; /*0x5C */
1628 U32 BootFlags; /*0x60 */
1629 U32 FirmwareVersionNameWhat; /*0x64 */
1630 U8 FirmwareVersionName[32]; /*0x68 */
1631 U32 VendorNameWhat; /*0x88 */
1632 U8 VendorName[32]; /*0x8C */
1633 U32 PackageNameWhat; /*0x88 */
1634 U8 PackageName[32]; /*0x8C */
1635 U32 ReservedD0; /*0xD0 */
1636 U32 ReservedD4; /*0xD4 */
1637 U32 ReservedD8; /*0xD8 */
1638 U32 ReservedDC; /*0xDC */
1639 U32 ReservedE0; /*0xE0 */
1640 U32 ReservedE4; /*0xE4 */
1641 U32 ReservedE8; /*0xE8 */
1642 U32 ReservedEC; /*0xEC */
1643 U32 ReservedF0; /*0xF0 */
1644 U32 ReservedF4; /*0xF4 */
1645 U32 ReservedF8; /*0xF8 */
1646 U32 ReservedFC; /*0xFC */
1647} MPI2_FW_IMAGE_HEADER, *PTR_MPI2_FW_IMAGE_HEADER,
1648 Mpi2FWImageHeader_t, *pMpi2FWImageHeader_t;
1649
1650/*Signature field */
1651#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00)
1652#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000)
1653#define MPI2_FW_HEADER_SIGNATURE (0xEA000000)
1654#define MPI26_FW_HEADER_SIGNATURE (0xEB000000)
1655
1656/*Signature0 field */
1657#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04)
1658#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A)
1659/* Last byte is defined by architecture */
1660#define MPI26_FW_HEADER_SIGNATURE0_BASE (0x5AEAA500)
1661#define MPI26_FW_HEADER_SIGNATURE0_ARC_0 (0x5A)
1662#define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00)
1663#define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01)
1664/* legacy (0x5AEAA55A) */
1665#define MPI26_FW_HEADER_SIGNATURE0_ARC_3 (0x02)
1666#define MPI26_FW_HEADER_SIGNATURE0 \
1667 (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0)
1668#define MPI26_FW_HEADER_SIGNATURE0_3516 \
1669 (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1)
1670#define MPI26_FW_HEADER_SIGNATURE0_4008 \
1671 (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_3)
1672
1673/*Signature1 field */
1674#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
1675#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5)
1676#define MPI26_FW_HEADER_SIGNATURE1 (0xA55AEAA5)
1677
1678/*Signature2 field */
1679#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C)
1680#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA)
1681#define MPI26_FW_HEADER_SIGNATURE2 (0x5AA55AEA)
1682
1683/*defines for using the ProductID field */
1684#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
1685#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000)
1686
1687#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
1688#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
1689#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
1690#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
1691
1692#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF)
1693/*SAS ProductID Family bits */
1694#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013)
1695#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014)
1696#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021)
1697#define MPI26_FW_HEADER_PID_FAMILY_3324_SAS (0x0028)
1698#define MPI26_FW_HEADER_PID_FAMILY_3516_SAS (0x0031)
1699
1700/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
1701
1702/*use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */
1703
1704#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C)
1705#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30)
1706#define MPI26_FW_HEADER_BOOTFLAGS_OFFSET (0x60)
1707#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64)
1708
1709#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840)
1710
1711#define MPI2_FW_HEADER_SIZE (0x100)
1712
1713/*Extended Image Header */
1714typedef struct _MPI2_EXT_IMAGE_HEADER {
1715 U8 ImageType; /*0x00 */
1716 U8 Reserved1; /*0x01 */
1717 U16 Reserved2; /*0x02 */
1718 U32 Checksum; /*0x04 */
1719 U32 ImageSize; /*0x08 */
1720 U32 NextImageHeaderOffset; /*0x0C */
1721 U32 PackageVersion; /*0x10 */
1722 U32 Reserved3; /*0x14 */
1723 U32 Reserved4; /*0x18 */
1724 U32 Reserved5; /*0x1C */
1725 U8 IdentifyString[32]; /*0x20 */
1726} MPI2_EXT_IMAGE_HEADER, *PTR_MPI2_EXT_IMAGE_HEADER,
1727 Mpi2ExtImageHeader_t, *pMpi2ExtImageHeader_t;
1728
1729/*useful offsets */
1730#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00)
1731#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08)
1732#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C)
1733
1734#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
1735
1736/*defines for the ImageType field */
1737#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
1738#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
1739#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
1740#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
1741#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
1742#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
1743#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
1744#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
1745#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09)
1746#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
1747#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
1748
1749#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC)
1750
1751/*FLASH Layout Extended Image Data */
1752
1753/*
1754 *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1755 *one and check RegionsPerLayout at runtime.
1756 */
1757#ifndef MPI2_FLASH_NUMBER_OF_REGIONS
1758#define MPI2_FLASH_NUMBER_OF_REGIONS (1)
1759#endif
1760
1761/*
1762 *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1763 *one and check NumberOfLayouts at runtime.
1764 */
1765#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS
1766#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1)
1767#endif
1768
1769typedef struct _MPI2_FLASH_REGION {
1770 U8 RegionType; /*0x00 */
1771 U8 Reserved1; /*0x01 */
1772 U16 Reserved2; /*0x02 */
1773 U32 RegionOffset; /*0x04 */
1774 U32 RegionSize; /*0x08 */
1775 U32 Reserved3; /*0x0C */
1776} MPI2_FLASH_REGION, *PTR_MPI2_FLASH_REGION,
1777 Mpi2FlashRegion_t, *pMpi2FlashRegion_t;
1778
1779typedef struct _MPI2_FLASH_LAYOUT {
1780 U32 FlashSize; /*0x00 */
1781 U32 Reserved1; /*0x04 */
1782 U32 Reserved2; /*0x08 */
1783 U32 Reserved3; /*0x0C */
1784 MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS]; /*0x10 */
1785} MPI2_FLASH_LAYOUT, *PTR_MPI2_FLASH_LAYOUT,
1786 Mpi2FlashLayout_t, *pMpi2FlashLayout_t;
1787
1788typedef struct _MPI2_FLASH_LAYOUT_DATA {
1789 U8 ImageRevision; /*0x00 */
1790 U8 Reserved1; /*0x01 */
1791 U8 SizeOfRegion; /*0x02 */
1792 U8 Reserved2; /*0x03 */
1793 U16 NumberOfLayouts; /*0x04 */
1794 U16 RegionsPerLayout; /*0x06 */
1795 U16 MinimumSectorAlignment; /*0x08 */
1796 U16 Reserved3; /*0x0A */
1797 U32 Reserved4; /*0x0C */
1798 MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS]; /*0x10 */
1799} MPI2_FLASH_LAYOUT_DATA, *PTR_MPI2_FLASH_LAYOUT_DATA,
1800 Mpi2FlashLayoutData_t, *pMpi2FlashLayoutData_t;
1801
1802/*defines for the RegionType field */
1803#define MPI2_FLASH_REGION_UNUSED (0x00)
1804#define MPI2_FLASH_REGION_FIRMWARE (0x01)
1805#define MPI2_FLASH_REGION_BIOS (0x02)
1806#define MPI2_FLASH_REGION_NVDATA (0x03)
1807#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05)
1808#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06)
1809#define MPI2_FLASH_REGION_CONFIG_1 (0x07)
1810#define MPI2_FLASH_REGION_CONFIG_2 (0x08)
1811#define MPI2_FLASH_REGION_MEGARAID (0x09)
1812#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A)
1813#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK)
1814#define MPI2_FLASH_REGION_CBB_BACKUP (0x0D)
1815#define MPI2_FLASH_REGION_SBR (0x0E)
1816#define MPI2_FLASH_REGION_SBR_BACKUP (0x0F)
1817#define MPI2_FLASH_REGION_HIIM (0x10)
1818#define MPI2_FLASH_REGION_HIIA (0x11)
1819#define MPI2_FLASH_REGION_CTLR (0x12)
1820#define MPI2_FLASH_REGION_IMR_FIRMWARE (0x13)
1821#define MPI2_FLASH_REGION_MR_NVDATA (0x14)
1822
1823/*ImageRevision */
1824#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
1825
1826/*Supported Devices Extended Image Data */
1827
1828/*
1829 *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1830 *one and check NumberOfDevices at runtime.
1831 */
1832#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES
1833#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1)
1834#endif
1835
1836typedef struct _MPI2_SUPPORTED_DEVICE {
1837 U16 DeviceID; /*0x00 */
1838 U16 VendorID; /*0x02 */
1839 U16 DeviceIDMask; /*0x04 */
1840 U16 Reserved1; /*0x06 */
1841 U8 LowPCIRev; /*0x08 */
1842 U8 HighPCIRev; /*0x09 */
1843 U16 Reserved2; /*0x0A */
1844 U32 Reserved3; /*0x0C */
1845} MPI2_SUPPORTED_DEVICE, *PTR_MPI2_SUPPORTED_DEVICE,
1846 Mpi2SupportedDevice_t, *pMpi2SupportedDevice_t;
1847
1848typedef struct _MPI2_SUPPORTED_DEVICES_DATA {
1849 U8 ImageRevision; /*0x00 */
1850 U8 Reserved1; /*0x01 */
1851 U8 NumberOfDevices; /*0x02 */
1852 U8 Reserved2; /*0x03 */
1853 U32 Reserved3; /*0x04 */
1854 MPI2_SUPPORTED_DEVICE
1855 SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES];/*0x08 */
1856} MPI2_SUPPORTED_DEVICES_DATA, *PTR_MPI2_SUPPORTED_DEVICES_DATA,
1857 Mpi2SupportedDevicesData_t, *pMpi2SupportedDevicesData_t;
1858
1859/*ImageRevision */
1860#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00)
1861
1862/*Init Extended Image Data */
1863
1864typedef struct _MPI2_INIT_IMAGE_FOOTER {
1865 U32 BootFlags; /*0x00 */
1866 U32 ImageSize; /*0x04 */
1867 U32 Signature0; /*0x08 */
1868 U32 Signature1; /*0x0C */
1869 U32 Signature2; /*0x10 */
1870 U32 ResetVector; /*0x14 */
1871} MPI2_INIT_IMAGE_FOOTER, *PTR_MPI2_INIT_IMAGE_FOOTER,
1872 Mpi2InitImageFooter_t, *pMpi2InitImageFooter_t;
1873
1874/*defines for the BootFlags field */
1875#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00)
1876
1877/*defines for the ImageSize field */
1878#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04)
1879
1880/*defines for the Signature0 field */
1881#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08)
1882#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA)
1883
1884/*defines for the Signature1 field */
1885#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C)
1886#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5)
1887
1888/*defines for the Signature2 field */
1889#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10)
1890#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A)
1891
1892/*Signature fields as individual bytes */
1893#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA)
1894#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A)
1895#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5)
1896#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A)
1897
1898#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5)
1899#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA)
1900#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A)
1901#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5)
1902
1903#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A)
1904#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5)
1905#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA)
1906#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A)
1907
1908/*defines for the ResetVector field */
1909#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
1910
1911
1912/* Encrypted Hash Extended Image Data */
1913
1914typedef struct _MPI25_ENCRYPTED_HASH_ENTRY {
1915 U8 HashImageType; /* 0x00 */
1916 U8 HashAlgorithm; /* 0x01 */
1917 U8 EncryptionAlgorithm; /* 0x02 */
1918 U8 Reserved1; /* 0x03 */
1919 U32 Reserved2; /* 0x04 */
1920 U32 EncryptedHash[1]; /* 0x08 */ /* variable length */
1921} MPI25_ENCRYPTED_HASH_ENTRY, *PTR_MPI25_ENCRYPTED_HASH_ENTRY,
1922Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t;
1923
1924/* values for HashImageType */
1925#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00)
1926#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01)
1927#define MPI25_HASH_IMAGE_TYPE_BIOS (0x02)
1928
1929/* values for HashAlgorithm */
1930#define MPI25_HASH_ALGORITHM_UNUSED (0x00)
1931#define MPI25_HASH_ALGORITHM_SHA256 (0x01)
1932
1933/* values for EncryptionAlgorithm */
1934#define MPI25_ENCRYPTION_ALG_UNUSED (0x00)
1935#define MPI25_ENCRYPTION_ALG_RSA256 (0x01)
1936
1937typedef struct _MPI25_ENCRYPTED_HASH_DATA {
1938 U8 ImageVersion; /* 0x00 */
1939 U8 NumHash; /* 0x01 */
1940 U16 Reserved1; /* 0x02 */
1941 U32 Reserved2; /* 0x04 */
1942 MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /* 0x08 */
1943} MPI25_ENCRYPTED_HASH_DATA, *PTR_MPI25_ENCRYPTED_HASH_DATA,
1944Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t;
1945
1946 1609
1947/**************************************************************************** 1610/****************************************************************************
1948* PowerManagementControl message 1611* PowerManagementControl message
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
index f0281f943ec9..63a09509d7d1 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright 2012-2015 Avago Technologies. All rights reserved. 2 * Copyright 2000-2020 Broadcom Inc. All rights reserved.
3 * 3 *
4 * 4 *
5 * Name: mpi2_pci.h 5 * Name: mpi2_pci.h
6 * Title: MPI PCIe Attached Devices structures and definitions. 6 * Title: MPI PCIe Attached Devices structures and definitions.
7 * Creation Date: October 9, 2012 7 * Creation Date: October 9, 2012
8 * 8 *
9 * mpi2_pci.h Version: 02.00.02 9 * mpi2_pci.h Version: 02.00.03
10 * 10 *
11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
12 * prefix are for use only on MPI v2.5 products, and must not be used 12 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -23,6 +23,7 @@
23 * Removed SOP support. 23 * Removed SOP support.
24 * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to 24 * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to
25 * NVME Encapsulated Request. 25 * NVME Encapsulated Request.
26 * 07-22-18 02.00.03 Updted flags field for NVME Encapsulated req
26 * -------------------------------------------------------------------------- 27 * --------------------------------------------------------------------------
27 */ 28 */
28 29
@@ -75,10 +76,10 @@ typedef struct _MPI26_NVME_ENCAPSULATED_REQUEST {
75#define MPI26_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0010) 76#define MPI26_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0010)
76/*Error Response Address Space */ 77/*Error Response Address Space */
77#define MPI26_NVME_FLAGS_MASK_ERROR_RSP_ADDR (0x000C) 78#define MPI26_NVME_FLAGS_MASK_ERROR_RSP_ADDR (0x000C)
79#define MPI26_NVME_FLAGS_MASK_ERROR_RSP_ADDR_MASK (0x000C)
78#define MPI26_NVME_FLAGS_SYSTEM_RSP_ADDR (0x0000) 80#define MPI26_NVME_FLAGS_SYSTEM_RSP_ADDR (0x0000)
79#define MPI26_NVME_FLAGS_IOCPLB_RSP_ADDR (0x0008) 81#define MPI26_NVME_FLAGS_IOCCTL_RSP_ADDR (0x0008)
80#define MPI26_NVME_FLAGS_IOCPLBNTA_RSP_ADDR (0x000C) 82/* Data Direction*/
81/*Data Direction*/
82#define MPI26_NVME_FLAGS_DATADIRECTION_MASK (0x0003) 83#define MPI26_NVME_FLAGS_DATADIRECTION_MASK (0x0003)
83#define MPI26_NVME_FLAGS_NODATATRANSFER (0x0000) 84#define MPI26_NVME_FLAGS_NODATATRANSFER (0x0000)
84#define MPI26_NVME_FLAGS_WRITE (0x0001) 85#define MPI26_NVME_FLAGS_WRITE (0x0001)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
index b9bb1c178f12..b770eb516c14 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
@@ -1,6 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright 2000-2014 Avago Technologies. All rights reserved. 3 * Copyright 2000-2020 Broadcom Inc. All rights reserved.
4 * 4 *
5 * 5 *
6 * Name: mpi2_raid.h 6 * Name: mpi2_raid.h
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
index afa17ff246b4..16c922a8a02b 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
@@ -1,6 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright 2000-2015 Avago Technologies. All rights reserved. 3 * Copyright 2000-2020 Broadcom Inc. All rights reserved.
4 * 4 *
5 * 5 *
6 * Name: mpi2_sas.h 6 * Name: mpi2_sas.h
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
index 629296ee9236..3f966b6796b3 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -1,13 +1,13 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright 2000-2014 Avago Technologies. All rights reserved. 3 * Copyright 2000-2020 Broadcom Inc. All rights reserved.
4 * 4 *
5 * 5 *
6 * Name: mpi2_tool.h 6 * Name: mpi2_tool.h
7 * Title: MPI diagnostic tool structures and definitions 7 * Title: MPI diagnostic tool structures and definitions
8 * Creation Date: March 26, 2007 8 * Creation Date: March 26, 2007
9 * 9 *
10 * mpi2_tool.h Version: 02.00.14 10 * mpi2_tool.h Version: 02.00.15
11 * 11 *
12 * Version History 12 * Version History
13 * --------------- 13 * ---------------
@@ -38,6 +38,8 @@
38 * 11-18-14 02.00.13 Updated copyright information. 38 * 11-18-14 02.00.13 Updated copyright information.
39 * 08-25-16 02.00.14 Added new values for the Flags field of Toolbox Clean 39 * 08-25-16 02.00.14 Added new values for the Flags field of Toolbox Clean
40 * Tool Request Message. 40 * Tool Request Message.
41 * 07-22-18 02.00.15 Added defines for new TOOLBOX_PCIE_LANE_MARGINING tool.
42 * Added option for DeviceInfo field in ISTWI tool.
41 * -------------------------------------------------------------------------- 43 * --------------------------------------------------------------------------
42 */ 44 */
43 45
@@ -58,6 +60,7 @@
58#define MPI2_TOOLBOX_BEACON_TOOL (0x05) 60#define MPI2_TOOLBOX_BEACON_TOOL (0x05)
59#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06) 61#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06)
60#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07) 62#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07)
63#define MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN (0x08)
61 64
62/**************************************************************************** 65/****************************************************************************
63* Toolbox reply 66* Toolbox reply
@@ -226,6 +229,13 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
226#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80) 229#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80)
227#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07) 230#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07)
228 231
232/*MPI26 TOOLBOX Request MsgFlags defines */
233#define MPI26_TOOLBOX_REQ_MSGFLAGS_ADDRESSING_MASK (0x01)
234/*Request uses Man Page 43 device index addressing */
235#define MPI26_TOOLBOX_REQ_MSGFLAGS_ADDRESSING_DEVINDEX (0x00)
236/*Request uses Man Page 43 device info struct addressing */
237#define MPI26_TOOLBOX_REQ_MSGFLAGS_ADDRESSING_DEVINFO (0x01)
238
229/*Toolbox ISTWI Read Write Tool reply message */ 239/*Toolbox ISTWI Read Write Tool reply message */
230typedef struct _MPI2_TOOLBOX_ISTWI_REPLY { 240typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {
231 U8 Tool; /*0x00 */ 241 U8 Tool; /*0x00 */
@@ -387,6 +397,64 @@ Mpi2ToolboxTextDisplayRequest_t,
387#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01) 397#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01)
388 398
389 399
400/***************************************************************************
401 * Toolbox Backend Lane Margining Tool
402 ***************************************************************************
403 */
404
405/*Toolbox Backend Lane Margining Tool request message */
406typedef struct _MPI26_TOOLBOX_LANE_MARGINING_REQUEST {
407 U8 Tool; /*0x00 */
408 U8 Reserved1; /*0x01 */
409 U8 ChainOffset; /*0x02 */
410 U8 Function; /*0x03 */
411 U16 Reserved2; /*0x04 */
412 U8 Reserved3; /*0x06 */
413 U8 MsgFlags; /*0x07 */
414 U8 VP_ID; /*0x08 */
415 U8 VF_ID; /*0x09 */
416 U16 Reserved4; /*0x0A */
417 U8 Command; /*0x0C */
418 U8 SwitchPort; /*0x0D */
419 U16 DevHandle; /*0x0E */
420 U8 RegisterOffset; /*0x10 */
421 U8 Reserved5; /*0x11 */
422 U16 DataLength; /*0x12 */
423 MPI25_SGE_IO_UNION SGL; /*0x14 */
424} MPI26_TOOLBOX_LANE_MARGINING_REQUEST,
425 *PTR_MPI2_TOOLBOX_LANE_MARGINING_REQUEST,
426 Mpi26ToolboxLaneMarginingRequest_t,
427 *pMpi2ToolboxLaneMarginingRequest_t;
428
429/* defines for the Command field */
430#define MPI26_TOOL_MARGIN_COMMAND_ENTER_MARGIN_MODE (0x01)
431#define MPI26_TOOL_MARGIN_COMMAND_READ_REGISTER_DATA (0x02)
432#define MPI26_TOOL_MARGIN_COMMAND_WRITE_REGISTER_DATA (0x03)
433#define MPI26_TOOL_MARGIN_COMMAND_EXIT_MARGIN_MODE (0x04)
434
435
436/*Toolbox Backend Lane Margining Tool reply message */
437typedef struct _MPI26_TOOLBOX_LANE_MARGINING_REPLY {
438 U8 Tool; /*0x00 */
439 U8 Reserved1; /*0x01 */
440 U8 MsgLength; /*0x02 */
441 U8 Function; /*0x03 */
442 U16 Reserved2; /*0x04 */
443 U8 Reserved3; /*0x06 */
444 U8 MsgFlags; /*0x07 */
445 U8 VP_ID; /*0x08 */
446 U8 VF_ID; /*0x09 */
447 U16 Reserved4; /*0x0A */
448 U16 Reserved5; /*0x0C */
449 U16 IOCStatus; /*0x0E */
450 U32 IOCLogInfo; /*0x10 */
451 U16 ReturnedDataLength; /*0x14 */
452 U16 Reserved6; /*0x16 */
453} MPI26_TOOLBOX_LANE_MARGINING_REPLY,
454 *PTR_MPI26_TOOLBOX_LANE_MARGINING_REPLY,
455 Mpi26ToolboxLaneMarginingReply_t,
456 *pMpi26ToolboxLaneMarginingReply_t;
457
390 458
391/***************************************************************************** 459/*****************************************************************************
392* 460*
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 2500377d0723..0a6cb8f0680c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -157,6 +157,32 @@ module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
157 param_get_int, &mpt3sas_fwfault_debug, 0644); 157 param_get_int, &mpt3sas_fwfault_debug, 0644);
158 158
159/** 159/**
160 * _base_readl_aero - retry readl for max three times.
161 * @addr - MPT Fusion system interface register address
162 *
163 * Retry the readl() for max three times if it gets zero value
164 * while reading the system interface register.
165 */
166static inline u32
167_base_readl_aero(const volatile void __iomem *addr)
168{
169 u32 i = 0, ret_val;
170
171 do {
172 ret_val = readl(addr);
173 i++;
174 } while (ret_val == 0 && i < 3);
175
176 return ret_val;
177}
178
179static inline u32
180_base_readl(const volatile void __iomem *addr)
181{
182 return readl(addr);
183}
184
185/**
160 * _base_clone_reply_to_sys_mem - copies reply to reply free iomem 186 * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
161 * in BAR0 space. 187 * in BAR0 space.
162 * 188 *
@@ -716,7 +742,7 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
716 742
717 dump_stack(); 743 dump_stack();
718 744
719 doorbell = readl(&ioc->chip->Doorbell); 745 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
720 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) 746 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
721 mpt3sas_base_fault_info(ioc , doorbell); 747 mpt3sas_base_fault_info(ioc , doorbell);
722 else { 748 else {
@@ -1325,10 +1351,10 @@ _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1325 u32 him_register; 1351 u32 him_register;
1326 1352
1327 ioc->mask_interrupts = 1; 1353 ioc->mask_interrupts = 1;
1328 him_register = readl(&ioc->chip->HostInterruptMask); 1354 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1329 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK; 1355 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1330 writel(him_register, &ioc->chip->HostInterruptMask); 1356 writel(him_register, &ioc->chip->HostInterruptMask);
1331 readl(&ioc->chip->HostInterruptMask); 1357 ioc->base_readl(&ioc->chip->HostInterruptMask);
1332} 1358}
1333 1359
1334/** 1360/**
@@ -1342,7 +1368,7 @@ _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1342{ 1368{
1343 u32 him_register; 1369 u32 him_register;
1344 1370
1345 him_register = readl(&ioc->chip->HostInterruptMask); 1371 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1346 him_register &= ~MPI2_HIM_RIM; 1372 him_register &= ~MPI2_HIM_RIM;
1347 writel(him_register, &ioc->chip->HostInterruptMask); 1373 writel(him_register, &ioc->chip->HostInterruptMask);
1348 ioc->mask_interrupts = 0; 1374 ioc->mask_interrupts = 0;
@@ -3319,8 +3345,9 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3319static inline void 3345static inline void
3320_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) 3346_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3321{ 3347{
3348 wmb();
3322 __raw_writeq(b, addr); 3349 __raw_writeq(b, addr);
3323 mmiowb(); 3350 barrier();
3324} 3351}
3325#else 3352#else
3326static inline void 3353static inline void
@@ -4060,7 +4087,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
4060 * flag unset in NVDATA. 4087 * flag unset in NVDATA.
4061 */ 4088 */
4062 mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11); 4089 mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
4063 if (ioc->manu_pg11.EEDPTagMode == 0) { 4090 if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
4064 pr_err("%s: overriding NVDATA EEDPTagMode setting\n", 4091 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
4065 ioc->name); 4092 ioc->name);
4066 ioc->manu_pg11.EEDPTagMode &= ~0x3; 4093 ioc->manu_pg11.EEDPTagMode &= ~0x3;
@@ -4854,7 +4881,7 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
4854{ 4881{
4855 u32 s, sc; 4882 u32 s, sc;
4856 4883
4857 s = readl(&ioc->chip->Doorbell); 4884 s = ioc->base_readl(&ioc->chip->Doorbell);
4858 sc = s & MPI2_IOC_STATE_MASK; 4885 sc = s & MPI2_IOC_STATE_MASK;
4859 return cooked ? sc : s; 4886 return cooked ? sc : s;
4860} 4887}
@@ -4910,7 +4937,7 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
4910 count = 0; 4937 count = 0;
4911 cntdn = 1000 * timeout; 4938 cntdn = 1000 * timeout;
4912 do { 4939 do {
4913 int_status = readl(&ioc->chip->HostInterruptStatus); 4940 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
4914 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 4941 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
4915 dhsprintk(ioc, 4942 dhsprintk(ioc,
4916 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", 4943 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
@@ -4936,7 +4963,7 @@ _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
4936 count = 0; 4963 count = 0;
4937 cntdn = 2000 * timeout; 4964 cntdn = 2000 * timeout;
4938 do { 4965 do {
4939 int_status = readl(&ioc->chip->HostInterruptStatus); 4966 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
4940 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 4967 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
4941 dhsprintk(ioc, 4968 dhsprintk(ioc,
4942 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", 4969 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
@@ -4974,14 +5001,14 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
4974 count = 0; 5001 count = 0;
4975 cntdn = 1000 * timeout; 5002 cntdn = 1000 * timeout;
4976 do { 5003 do {
4977 int_status = readl(&ioc->chip->HostInterruptStatus); 5004 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
4978 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { 5005 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
4979 dhsprintk(ioc, 5006 dhsprintk(ioc,
4980 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", 5007 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
4981 __func__, count, timeout)); 5008 __func__, count, timeout));
4982 return 0; 5009 return 0;
4983 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 5010 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
4984 doorbell = readl(&ioc->chip->Doorbell); 5011 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
4985 if ((doorbell & MPI2_IOC_STATE_MASK) == 5012 if ((doorbell & MPI2_IOC_STATE_MASK) ==
4986 MPI2_IOC_STATE_FAULT) { 5013 MPI2_IOC_STATE_FAULT) {
4987 mpt3sas_base_fault_info(ioc , doorbell); 5014 mpt3sas_base_fault_info(ioc , doorbell);
@@ -5016,7 +5043,7 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
5016 count = 0; 5043 count = 0;
5017 cntdn = 1000 * timeout; 5044 cntdn = 1000 * timeout;
5018 do { 5045 do {
5019 doorbell_reg = readl(&ioc->chip->Doorbell); 5046 doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
5020 if (!(doorbell_reg & MPI2_DOORBELL_USED)) { 5047 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
5021 dhsprintk(ioc, 5048 dhsprintk(ioc,
5022 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n", 5049 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
@@ -5078,6 +5105,39 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
5078} 5105}
5079 5106
5080/** 5107/**
5108 * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
5109 * @ioc: per adapter object
5110 * @wait_count: timeout in seconds
5111 *
5112 * Return: Waits up to timeout seconds for the IOC to
5113 * become operational. Returns 0 if IOC is present
5114 * and operational; otherwise returns -EFAULT.
5115 */
5116
5117int
5118mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
5119{
5120 int wait_state_count = 0;
5121 u32 ioc_state;
5122
5123 do {
5124 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5125 if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
5126 break;
5127 ssleep(1);
5128 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
5129 __func__, ++wait_state_count);
5130 } while (--timeout);
5131 if (!timeout) {
5132 ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
5133 return -EFAULT;
5134 }
5135 if (wait_state_count)
5136 ioc_info(ioc, "ioc is operational\n");
5137 return 0;
5138}
5139
5140/**
5081 * _base_handshake_req_reply_wait - send request thru doorbell interface 5141 * _base_handshake_req_reply_wait - send request thru doorbell interface
5082 * @ioc: per adapter object 5142 * @ioc: per adapter object
5083 * @request_bytes: request length 5143 * @request_bytes: request length
@@ -5098,13 +5158,13 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5098 __le32 *mfp; 5158 __le32 *mfp;
5099 5159
5100 /* make sure doorbell is not in use */ 5160 /* make sure doorbell is not in use */
5101 if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { 5161 if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
5102 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__); 5162 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
5103 return -EFAULT; 5163 return -EFAULT;
5104 } 5164 }
5105 5165
5106 /* clear pending doorbell interrupts from previous state changes */ 5166 /* clear pending doorbell interrupts from previous state changes */
5107 if (readl(&ioc->chip->HostInterruptStatus) & 5167 if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
5108 MPI2_HIS_IOC2SYS_DB_STATUS) 5168 MPI2_HIS_IOC2SYS_DB_STATUS)
5109 writel(0, &ioc->chip->HostInterruptStatus); 5169 writel(0, &ioc->chip->HostInterruptStatus);
5110 5170
@@ -5147,7 +5207,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5147 } 5207 }
5148 5208
5149 /* read the first two 16-bits, it gives the total length of the reply */ 5209 /* read the first two 16-bits, it gives the total length of the reply */
5150 reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell) 5210 reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5151 & MPI2_DOORBELL_DATA_MASK); 5211 & MPI2_DOORBELL_DATA_MASK);
5152 writel(0, &ioc->chip->HostInterruptStatus); 5212 writel(0, &ioc->chip->HostInterruptStatus);
5153 if ((_base_wait_for_doorbell_int(ioc, 5))) { 5213 if ((_base_wait_for_doorbell_int(ioc, 5))) {
@@ -5155,7 +5215,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5155 __LINE__); 5215 __LINE__);
5156 return -EFAULT; 5216 return -EFAULT;
5157 } 5217 }
5158 reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell) 5218 reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5159 & MPI2_DOORBELL_DATA_MASK); 5219 & MPI2_DOORBELL_DATA_MASK);
5160 writel(0, &ioc->chip->HostInterruptStatus); 5220 writel(0, &ioc->chip->HostInterruptStatus);
5161 5221
@@ -5166,9 +5226,10 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5166 return -EFAULT; 5226 return -EFAULT;
5167 } 5227 }
5168 if (i >= reply_bytes/2) /* overflow case */ 5228 if (i >= reply_bytes/2) /* overflow case */
5169 readl(&ioc->chip->Doorbell); 5229 ioc->base_readl(&ioc->chip->Doorbell);
5170 else 5230 else
5171 reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell) 5231 reply[i] = le16_to_cpu(
5232 ioc->base_readl(&ioc->chip->Doorbell)
5172 & MPI2_DOORBELL_DATA_MASK); 5233 & MPI2_DOORBELL_DATA_MASK);
5173 writel(0, &ioc->chip->HostInterruptStatus); 5234 writel(0, &ioc->chip->HostInterruptStatus);
5174 } 5235 }
@@ -5211,11 +5272,9 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
5211 Mpi2SasIoUnitControlRequest_t *mpi_request) 5272 Mpi2SasIoUnitControlRequest_t *mpi_request)
5212{ 5273{
5213 u16 smid; 5274 u16 smid;
5214 u32 ioc_state;
5215 u8 issue_reset = 0; 5275 u8 issue_reset = 0;
5216 int rc; 5276 int rc;
5217 void *request; 5277 void *request;
5218 u16 wait_state_count;
5219 5278
5220 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 5279 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5221 5280
@@ -5227,20 +5286,9 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
5227 goto out; 5286 goto out;
5228 } 5287 }
5229 5288
5230 wait_state_count = 0; 5289 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
5231 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 5290 if (rc)
5232 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 5291 goto out;
5233 if (wait_state_count++ == 10) {
5234 ioc_err(ioc, "%s: failed due to ioc not operational\n",
5235 __func__);
5236 rc = -EFAULT;
5237 goto out;
5238 }
5239 ssleep(1);
5240 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5241 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
5242 __func__, wait_state_count);
5243 }
5244 5292
5245 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 5293 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5246 if (!smid) { 5294 if (!smid) {
@@ -5306,11 +5354,9 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
5306 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request) 5354 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
5307{ 5355{
5308 u16 smid; 5356 u16 smid;
5309 u32 ioc_state;
5310 u8 issue_reset = 0; 5357 u8 issue_reset = 0;
5311 int rc; 5358 int rc;
5312 void *request; 5359 void *request;
5313 u16 wait_state_count;
5314 5360
5315 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 5361 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5316 5362
@@ -5322,20 +5368,9 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
5322 goto out; 5368 goto out;
5323 } 5369 }
5324 5370
5325 wait_state_count = 0; 5371 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
5326 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 5372 if (rc)
5327 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 5373 goto out;
5328 if (wait_state_count++ == 10) {
5329 ioc_err(ioc, "%s: failed due to ioc not operational\n",
5330 __func__);
5331 rc = -EFAULT;
5332 goto out;
5333 }
5334 ssleep(1);
5335 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5336 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
5337 __func__, wait_state_count);
5338 }
5339 5374
5340 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 5375 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5341 if (!smid) { 5376 if (!smid) {
@@ -6020,14 +6055,14 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
6020 if (count++ > 20) 6055 if (count++ > 20)
6021 goto out; 6056 goto out;
6022 6057
6023 host_diagnostic = readl(&ioc->chip->HostDiagnostic); 6058 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6024 drsprintk(ioc, 6059 drsprintk(ioc,
6025 ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n", 6060 ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
6026 count, host_diagnostic)); 6061 count, host_diagnostic));
6027 6062
6028 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0); 6063 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
6029 6064
6030 hcb_size = readl(&ioc->chip->HCBSize); 6065 hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
6031 6066
6032 drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n")); 6067 drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
6033 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, 6068 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
@@ -6040,7 +6075,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
6040 for (count = 0; count < (300000000 / 6075 for (count = 0; count < (300000000 /
6041 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) { 6076 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
6042 6077
6043 host_diagnostic = readl(&ioc->chip->HostDiagnostic); 6078 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6044 6079
6045 if (host_diagnostic == 0xFFFFFFFF) 6080 if (host_diagnostic == 0xFFFFFFFF)
6046 goto out; 6081 goto out;
@@ -6391,6 +6426,10 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
6391 6426
6392 ioc->rdpq_array_enable_assigned = 0; 6427 ioc->rdpq_array_enable_assigned = 0;
6393 ioc->dma_mask = 0; 6428 ioc->dma_mask = 0;
6429 if (ioc->is_aero_ioc)
6430 ioc->base_readl = &_base_readl_aero;
6431 else
6432 ioc->base_readl = &_base_readl;
6394 r = mpt3sas_base_map_resources(ioc); 6433 r = mpt3sas_base_map_resources(ioc);
6395 if (r) 6434 if (r)
6396 goto out_free_resources; 6435 goto out_free_resources;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 8f1d6b071b39..800351932cc3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -55,6 +55,7 @@
55#include "mpi/mpi2_tool.h" 55#include "mpi/mpi2_tool.h"
56#include "mpi/mpi2_sas.h" 56#include "mpi/mpi2_sas.h"
57#include "mpi/mpi2_pci.h" 57#include "mpi/mpi2_pci.h"
58#include "mpi/mpi2_image.h"
58 59
59#include <scsi/scsi.h> 60#include <scsi/scsi.h>
60#include <scsi/scsi_cmnd.h> 61#include <scsi/scsi_cmnd.h>
@@ -74,9 +75,9 @@
74#define MPT3SAS_DRIVER_NAME "mpt3sas" 75#define MPT3SAS_DRIVER_NAME "mpt3sas"
75#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" 76#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
76#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" 77#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
77#define MPT3SAS_DRIVER_VERSION "26.100.00.00" 78#define MPT3SAS_DRIVER_VERSION "27.101.00.00"
78#define MPT3SAS_MAJOR_VERSION 26 79#define MPT3SAS_MAJOR_VERSION 27
79#define MPT3SAS_MINOR_VERSION 100 80#define MPT3SAS_MINOR_VERSION 101
80#define MPT3SAS_BUILD_VERSION 0 81#define MPT3SAS_BUILD_VERSION 0
81#define MPT3SAS_RELEASE_VERSION 00 82#define MPT3SAS_RELEASE_VERSION 00
82 83
@@ -139,6 +140,9 @@
139#define DEFAULT_NUM_FWCHAIN_ELEMTS 8 140#define DEFAULT_NUM_FWCHAIN_ELEMTS 8
140 141
141#define FW_IMG_HDR_READ_TIMEOUT 15 142#define FW_IMG_HDR_READ_TIMEOUT 15
143
144#define IOC_OPERATIONAL_WAIT_COUNT 10
145
142/* 146/*
143 * NVMe defines 147 * NVMe defines
144 */ 148 */
@@ -908,6 +912,7 @@ typedef void (*NVME_BUILD_PRP)(struct MPT3SAS_ADAPTER *ioc, u16 smid,
908typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid, 912typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
909 u16 funcdep); 913 u16 funcdep);
910typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid); 914typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid);
915typedef u32 (*BASE_READ_REG) (const volatile void __iomem *addr);
911 916
912/* IOC Facts and Port Facts converted from little endian to cpu */ 917/* IOC Facts and Port Facts converted from little endian to cpu */
913union mpi3_version_union { 918union mpi3_version_union {
@@ -1388,6 +1393,7 @@ struct MPT3SAS_ADAPTER {
1388 u8 hide_drives; 1393 u8 hide_drives;
1389 spinlock_t diag_trigger_lock; 1394 spinlock_t diag_trigger_lock;
1390 u8 diag_trigger_active; 1395 u8 diag_trigger_active;
1396 BASE_READ_REG base_readl;
1391 struct SL_WH_MASTER_TRIGGER_T diag_trigger_master; 1397 struct SL_WH_MASTER_TRIGGER_T diag_trigger_master;
1392 struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event; 1398 struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
1393 struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi; 1399 struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
@@ -1395,6 +1401,7 @@ struct MPT3SAS_ADAPTER {
1395 void *device_remove_in_progress; 1401 void *device_remove_in_progress;
1396 u16 device_remove_in_progress_sz; 1402 u16 device_remove_in_progress_sz;
1397 u8 is_gen35_ioc; 1403 u8 is_gen35_ioc;
1404 u8 is_aero_ioc;
1398 PUT_SMID_IO_FP_HIP put_smid_scsi_io; 1405 PUT_SMID_IO_FP_HIP put_smid_scsi_io;
1399 1406
1400}; 1407};
@@ -1487,6 +1494,7 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc);
1487 1494
1488u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc, 1495u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
1489 u8 status, void *mpi_request, int sz); 1496 u8 status, void *mpi_request, int sz);
1497int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count);
1490 1498
1491/* scsih shared API */ 1499/* scsih shared API */
1492struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, 1500struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 02209447f4ef..fb0a17252f86 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -119,7 +119,7 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
119 desc = "raid_volume"; 119 desc = "raid_volume";
120 break; 120 break;
121 case MPI2_CONFIG_PAGETYPE_MANUFACTURING: 121 case MPI2_CONFIG_PAGETYPE_MANUFACTURING:
122 desc = "manufaucturing"; 122 desc = "manufacturing";
123 break; 123 break;
124 case MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK: 124 case MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK:
125 desc = "physdisk"; 125 desc = "physdisk";
@@ -300,11 +300,9 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
300 void *config_page, u16 config_page_sz) 300 void *config_page, u16 config_page_sz)
301{ 301{
302 u16 smid; 302 u16 smid;
303 u32 ioc_state;
304 Mpi2ConfigRequest_t *config_request; 303 Mpi2ConfigRequest_t *config_request;
305 int r; 304 int r;
306 u8 retry_count, issue_host_reset = 0; 305 u8 retry_count, issue_host_reset = 0;
307 u16 wait_state_count;
308 struct config_request mem; 306 struct config_request mem;
309 u32 ioc_status = UINT_MAX; 307 u32 ioc_status = UINT_MAX;
310 308
@@ -361,23 +359,10 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
361 ioc_info(ioc, "%s: attempting retry (%d)\n", 359 ioc_info(ioc, "%s: attempting retry (%d)\n",
362 __func__, retry_count); 360 __func__, retry_count);
363 } 361 }
364 wait_state_count = 0; 362
365 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 363 r = mpt3sas_wait_for_ioc(ioc, MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT);
366 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 364 if (r)
367 if (wait_state_count++ == MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT) { 365 goto free_mem;
368 ioc_err(ioc, "%s: failed due to ioc not operational\n",
369 __func__);
370 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
371 r = -EFAULT;
372 goto free_mem;
373 }
374 ssleep(1);
375 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
376 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
377 __func__, wait_state_count);
378 }
379 if (wait_state_count)
380 ioc_info(ioc, "%s: ioc is operational\n", __func__);
381 366
382 smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx); 367 smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx);
383 if (!smid) { 368 if (!smid) {
@@ -673,10 +658,6 @@ mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
673 r = _config_request(ioc, &mpi_request, mpi_reply, 658 r = _config_request(ioc, &mpi_request, mpi_reply,
674 MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, 659 MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
675 sizeof(*config_page)); 660 sizeof(*config_page));
676 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
677 r = _config_request(ioc, &mpi_request, mpi_reply,
678 MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
679 sizeof(*config_page));
680 out: 661 out:
681 return r; 662 return r;
682} 663}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 4afa597cbfba..b2bb47c14d35 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -641,7 +641,6 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
641 MPI2DefaultReply_t *mpi_reply; 641 MPI2DefaultReply_t *mpi_reply;
642 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL; 642 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
643 struct _pcie_device *pcie_device = NULL; 643 struct _pcie_device *pcie_device = NULL;
644 u32 ioc_state;
645 u16 smid; 644 u16 smid;
646 u8 timeout; 645 u8 timeout;
647 u8 issue_reset; 646 u8 issue_reset;
@@ -654,7 +653,6 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
654 dma_addr_t data_in_dma = 0; 653 dma_addr_t data_in_dma = 0;
655 size_t data_in_sz = 0; 654 size_t data_in_sz = 0;
656 long ret; 655 long ret;
657 u16 wait_state_count;
658 u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE; 656 u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
659 u8 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 657 u8 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
660 658
@@ -666,22 +664,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
666 goto out; 664 goto out;
667 } 665 }
668 666
669 wait_state_count = 0; 667 ret = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
670 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 668 if (ret)
671 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 669 goto out;
672 if (wait_state_count++ == 10) {
673 ioc_err(ioc, "%s: failed due to ioc not operational\n",
674 __func__);
675 ret = -EFAULT;
676 goto out;
677 }
678 ssleep(1);
679 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
680 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
681 __func__, wait_state_count);
682 }
683 if (wait_state_count)
684 ioc_info(ioc, "%s: ioc is operational\n", __func__);
685 670
686 mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); 671 mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
687 if (!mpi_request) { 672 if (!mpi_request) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 03c52847ed07..6be39dc27103 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3748,6 +3748,40 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3748 return _scsih_check_for_pending_tm(ioc, smid); 3748 return _scsih_check_for_pending_tm(ioc, smid);
3749} 3749}
3750 3750
3751/** _scsih_allow_scmd_to_device - check whether scmd needs to
3752 * issue to IOC or not.
3753 * @ioc: per adapter object
3754 * @scmd: pointer to scsi command object
3755 *
3756 * Returns true if scmd can be issued to IOC otherwise returns false.
3757 */
3758inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
3759 struct scsi_cmnd *scmd)
3760{
3761
3762 if (ioc->pci_error_recovery)
3763 return false;
3764
3765 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
3766 if (ioc->remove_host)
3767 return false;
3768
3769 return true;
3770 }
3771
3772 if (ioc->remove_host) {
3773
3774 switch (scmd->cmnd[0]) {
3775 case SYNCHRONIZE_CACHE:
3776 case START_STOP:
3777 return true;
3778 default:
3779 return false;
3780 }
3781 }
3782
3783 return true;
3784}
3751 3785
3752/** 3786/**
3753 * _scsih_sas_control_complete - completion routine 3787 * _scsih_sas_control_complete - completion routine
@@ -4571,7 +4605,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4571 return 0; 4605 return 0;
4572 } 4606 }
4573 4607
4574 if (ioc->pci_error_recovery || ioc->remove_host) { 4608 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
4575 scmd->result = DID_NO_CONNECT << 16; 4609 scmd->result = DID_NO_CONNECT << 16;
4576 scmd->scsi_done(scmd); 4610 scmd->scsi_done(scmd);
4577 return 0; 4611 return 0;
@@ -9641,6 +9675,7 @@ static void scsih_remove(struct pci_dev *pdev)
9641 9675
9642 /* release all the volumes */ 9676 /* release all the volumes */
9643 _scsih_ir_shutdown(ioc); 9677 _scsih_ir_shutdown(ioc);
9678 sas_remove_host(shost);
9644 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, 9679 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
9645 list) { 9680 list) {
9646 if (raid_device->starget) { 9681 if (raid_device->starget) {
@@ -9682,7 +9717,6 @@ static void scsih_remove(struct pci_dev *pdev)
9682 ioc->sas_hba.num_phys = 0; 9717 ioc->sas_hba.num_phys = 0;
9683 } 9718 }
9684 9719
9685 sas_remove_host(shost);
9686 mpt3sas_base_detach(ioc); 9720 mpt3sas_base_detach(ioc);
9687 spin_lock(&gioc_lock); 9721 spin_lock(&gioc_lock);
9688 list_del(&ioc->list); 9722 list_del(&ioc->list);
@@ -10139,7 +10173,6 @@ static struct scsi_host_template mpt2sas_driver_template = {
10139 .sg_tablesize = MPT2SAS_SG_DEPTH, 10173 .sg_tablesize = MPT2SAS_SG_DEPTH,
10140 .max_sectors = 32767, 10174 .max_sectors = 32767,
10141 .cmd_per_lun = 7, 10175 .cmd_per_lun = 7,
10142 .use_clustering = ENABLE_CLUSTERING,
10143 .shost_attrs = mpt3sas_host_attrs, 10176 .shost_attrs = mpt3sas_host_attrs,
10144 .sdev_attrs = mpt3sas_dev_attrs, 10177 .sdev_attrs = mpt3sas_dev_attrs,
10145 .track_queue_depth = 1, 10178 .track_queue_depth = 1,
@@ -10178,7 +10211,6 @@ static struct scsi_host_template mpt3sas_driver_template = {
10178 .sg_tablesize = MPT3SAS_SG_DEPTH, 10211 .sg_tablesize = MPT3SAS_SG_DEPTH,
10179 .max_sectors = 32767, 10212 .max_sectors = 32767,
10180 .cmd_per_lun = 7, 10213 .cmd_per_lun = 7,
10181 .use_clustering = ENABLE_CLUSTERING,
10182 .shost_attrs = mpt3sas_host_attrs, 10214 .shost_attrs = mpt3sas_host_attrs,
10183 .sdev_attrs = mpt3sas_dev_attrs, 10215 .sdev_attrs = mpt3sas_dev_attrs,
10184 .track_queue_depth = 1, 10216 .track_queue_depth = 1,
@@ -10250,6 +10282,10 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
10250 case MPI26_MFGPAGE_DEVID_SAS3516_1: 10282 case MPI26_MFGPAGE_DEVID_SAS3516_1:
10251 case MPI26_MFGPAGE_DEVID_SAS3416: 10283 case MPI26_MFGPAGE_DEVID_SAS3416:
10252 case MPI26_MFGPAGE_DEVID_SAS3616: 10284 case MPI26_MFGPAGE_DEVID_SAS3616:
10285 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10286 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10287 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10288 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10253 return MPI26_VERSION; 10289 return MPI26_VERSION;
10254 } 10290 }
10255 return 0; 10291 return 0;
@@ -10337,8 +10373,17 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10337 case MPI26_MFGPAGE_DEVID_SAS3616: 10373 case MPI26_MFGPAGE_DEVID_SAS3616:
10338 ioc->is_gen35_ioc = 1; 10374 ioc->is_gen35_ioc = 1;
10339 break; 10375 break;
10376 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10377 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10378 dev_info(&pdev->dev,
10379 "HBA is in Configurable Secure mode\n");
10380 /* fall through */
10381 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10382 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10383 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
10384 break;
10340 default: 10385 default:
10341 ioc->is_gen35_ioc = 0; 10386 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
10342 } 10387 }
10343 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION && 10388 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
10344 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) || 10389 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
@@ -10795,6 +10840,23 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
10795 /* Mercator ~ 3616*/ 10840 /* Mercator ~ 3616*/
10796 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616, 10841 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
10797 PCI_ANY_ID, PCI_ANY_ID }, 10842 PCI_ANY_ID, PCI_ANY_ID },
10843
10844 /* Aero SI 0x00E1 Configurable Secure
10845 * 0x00E2 Hard Secure
10846 */
10847 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
10848 PCI_ANY_ID, PCI_ANY_ID },
10849 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
10850 PCI_ANY_ID, PCI_ANY_ID },
10851
10852 /* Sea SI 0x00E5 Configurable Secure
10853 * 0x00E6 Hard Secure
10854 */
10855 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
10856 PCI_ANY_ID, PCI_ANY_ID },
10857 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
10858 PCI_ANY_ID, PCI_ANY_ID },
10859
10798 {0} /* Terminating entry */ 10860 {0} /* Terminating entry */
10799}; 10861};
10800MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); 10862MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 6a8a3c09b4b1..60ae2d0feb2b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -296,7 +296,6 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
296 struct rep_manu_request *manufacture_request; 296 struct rep_manu_request *manufacture_request;
297 int rc; 297 int rc;
298 u16 smid; 298 u16 smid;
299 u32 ioc_state;
300 void *psge; 299 void *psge;
301 u8 issue_reset = 0; 300 u8 issue_reset = 0;
302 void *data_out = NULL; 301 void *data_out = NULL;
@@ -304,7 +303,6 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
304 dma_addr_t data_in_dma; 303 dma_addr_t data_in_dma;
305 size_t data_in_sz; 304 size_t data_in_sz;
306 size_t data_out_sz; 305 size_t data_out_sz;
307 u16 wait_state_count;
308 306
309 if (ioc->shost_recovery || ioc->pci_error_recovery) { 307 if (ioc->shost_recovery || ioc->pci_error_recovery) {
310 ioc_info(ioc, "%s: host reset in progress!\n", __func__); 308 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
@@ -320,22 +318,9 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
320 } 318 }
321 ioc->transport_cmds.status = MPT3_CMD_PENDING; 319 ioc->transport_cmds.status = MPT3_CMD_PENDING;
322 320
323 wait_state_count = 0; 321 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
324 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 322 if (rc)
325 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 323 goto out;
326 if (wait_state_count++ == 10) {
327 ioc_err(ioc, "%s: failed due to ioc not operational\n",
328 __func__);
329 rc = -EFAULT;
330 goto out;
331 }
332 ssleep(1);
333 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
334 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
335 __func__, wait_state_count);
336 }
337 if (wait_state_count)
338 ioc_info(ioc, "%s: ioc is operational\n", __func__);
339 324
340 smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); 325 smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
341 if (!smid) { 326 if (!smid) {
@@ -821,10 +806,13 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
821 mpt3sas_port->remote_identify.sas_address, 806 mpt3sas_port->remote_identify.sas_address,
822 mpt3sas_phy->phy_id); 807 mpt3sas_phy->phy_id);
823 mpt3sas_phy->phy_belongs_to_port = 0; 808 mpt3sas_phy->phy_belongs_to_port = 0;
824 sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy); 809 if (!ioc->remove_host)
810 sas_port_delete_phy(mpt3sas_port->port,
811 mpt3sas_phy->phy);
825 list_del(&mpt3sas_phy->port_siblings); 812 list_del(&mpt3sas_phy->port_siblings);
826 } 813 }
827 sas_port_delete(mpt3sas_port->port); 814 if (!ioc->remove_host)
815 sas_port_delete(mpt3sas_port->port);
828 kfree(mpt3sas_port); 816 kfree(mpt3sas_port);
829} 817}
830 818
@@ -1076,13 +1064,11 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
1076 struct phy_error_log_reply *phy_error_log_reply; 1064 struct phy_error_log_reply *phy_error_log_reply;
1077 int rc; 1065 int rc;
1078 u16 smid; 1066 u16 smid;
1079 u32 ioc_state;
1080 void *psge; 1067 void *psge;
1081 u8 issue_reset = 0; 1068 u8 issue_reset = 0;
1082 void *data_out = NULL; 1069 void *data_out = NULL;
1083 dma_addr_t data_out_dma; 1070 dma_addr_t data_out_dma;
1084 u32 sz; 1071 u32 sz;
1085 u16 wait_state_count;
1086 1072
1087 if (ioc->shost_recovery || ioc->pci_error_recovery) { 1073 if (ioc->shost_recovery || ioc->pci_error_recovery) {
1088 ioc_info(ioc, "%s: host reset in progress!\n", __func__); 1074 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
@@ -1098,22 +1084,9 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
1098 } 1084 }
1099 ioc->transport_cmds.status = MPT3_CMD_PENDING; 1085 ioc->transport_cmds.status = MPT3_CMD_PENDING;
1100 1086
1101 wait_state_count = 0; 1087 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
1102 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1088 if (rc)
1103 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1089 goto out;
1104 if (wait_state_count++ == 10) {
1105 ioc_err(ioc, "%s: failed due to ioc not operational\n",
1106 __func__);
1107 rc = -EFAULT;
1108 goto out;
1109 }
1110 ssleep(1);
1111 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1112 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
1113 __func__, wait_state_count);
1114 }
1115 if (wait_state_count)
1116 ioc_info(ioc, "%s: ioc is operational\n", __func__);
1117 1090
1118 smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); 1091 smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
1119 if (!smid) { 1092 if (!smid) {
@@ -1381,13 +1354,11 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
1381 struct phy_control_reply *phy_control_reply; 1354 struct phy_control_reply *phy_control_reply;
1382 int rc; 1355 int rc;
1383 u16 smid; 1356 u16 smid;
1384 u32 ioc_state;
1385 void *psge; 1357 void *psge;
1386 u8 issue_reset = 0; 1358 u8 issue_reset = 0;
1387 void *data_out = NULL; 1359 void *data_out = NULL;
1388 dma_addr_t data_out_dma; 1360 dma_addr_t data_out_dma;
1389 u32 sz; 1361 u32 sz;
1390 u16 wait_state_count;
1391 1362
1392 if (ioc->shost_recovery || ioc->pci_error_recovery) { 1363 if (ioc->shost_recovery || ioc->pci_error_recovery) {
1393 ioc_info(ioc, "%s: host reset in progress!\n", __func__); 1364 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
@@ -1403,22 +1374,9 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
1403 } 1374 }
1404 ioc->transport_cmds.status = MPT3_CMD_PENDING; 1375 ioc->transport_cmds.status = MPT3_CMD_PENDING;
1405 1376
1406 wait_state_count = 0; 1377 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
1407 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1378 if (rc)
1408 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1379 goto out;
1409 if (wait_state_count++ == 10) {
1410 ioc_err(ioc, "%s: failed due to ioc not operational\n",
1411 __func__);
1412 rc = -EFAULT;
1413 goto out;
1414 }
1415 ssleep(1);
1416 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1417 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
1418 __func__, wait_state_count);
1419 }
1420 if (wait_state_count)
1421 ioc_info(ioc, "%s: ioc is operational\n", __func__);
1422 1380
1423 smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); 1381 smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
1424 if (!smid) { 1382 if (!smid) {
@@ -1880,7 +1838,6 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
1880 Mpi2SmpPassthroughReply_t *mpi_reply; 1838 Mpi2SmpPassthroughReply_t *mpi_reply;
1881 int rc; 1839 int rc;
1882 u16 smid; 1840 u16 smid;
1883 u32 ioc_state;
1884 void *psge; 1841 void *psge;
1885 dma_addr_t dma_addr_in; 1842 dma_addr_t dma_addr_in;
1886 dma_addr_t dma_addr_out; 1843 dma_addr_t dma_addr_out;
@@ -1888,7 +1845,6 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
1888 void *addr_out = NULL; 1845 void *addr_out = NULL;
1889 size_t dma_len_in; 1846 size_t dma_len_in;
1890 size_t dma_len_out; 1847 size_t dma_len_out;
1891 u16 wait_state_count;
1892 unsigned int reslen = 0; 1848 unsigned int reslen = 0;
1893 1849
1894 if (ioc->shost_recovery || ioc->pci_error_recovery) { 1850 if (ioc->shost_recovery || ioc->pci_error_recovery) {
@@ -1924,22 +1880,9 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
1924 if (rc) 1880 if (rc)
1925 goto unmap_out; 1881 goto unmap_out;
1926 1882
1927 wait_state_count = 0; 1883 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
1928 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1884 if (rc)
1929 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1885 goto unmap_in;
1930 if (wait_state_count++ == 10) {
1931 ioc_err(ioc, "%s: failed due to ioc not operational\n",
1932 __func__);
1933 rc = -EFAULT;
1934 goto unmap_in;
1935 }
1936 ssleep(1);
1937 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1938 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
1939 __func__, wait_state_count);
1940 }
1941 if (wait_state_count)
1942 ioc_info(ioc, "%s: ioc is operational\n", __func__);
1943 1886
1944 smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); 1887 smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
1945 if (!smid) { 1888 if (!smid) {
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index 7d1ab414b78f..ca96d6d9c350 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -78,7 +78,6 @@ static struct scsi_host_template mvme147_host_template = {
78 .this_id = 7, 78 .this_id = 7,
79 .sg_tablesize = SG_ALL, 79 .sg_tablesize = SG_ALL,
80 .cmd_per_lun = CMD_PER_LUN, 80 .cmd_per_lun = CMD_PER_LUN,
81 .use_clustering = ENABLE_CLUSTERING
82}; 81};
83 82
84static struct Scsi_Host *mvme147_shost; 83static struct Scsi_Host *mvme147_shost;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 3ac34373746c..030d911ee374 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -59,7 +59,6 @@ static struct scsi_host_template mvs_sht = {
59 .this_id = -1, 59 .this_id = -1,
60 .sg_tablesize = SG_ALL, 60 .sg_tablesize = SG_ALL,
61 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 61 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
62 .use_clustering = ENABLE_CLUSTERING,
63 .eh_device_reset_handler = sas_eh_device_reset_handler, 62 .eh_device_reset_handler = sas_eh_device_reset_handler,
64 .eh_target_reset_handler = sas_eh_target_reset_handler, 63 .eh_target_reset_handler = sas_eh_target_reset_handler,
65 .target_destroy = sas_target_destroy, 64 .target_destroy = sas_target_destroy,
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 2458974d1af6..dbe753fba486 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2197,6 +2197,7 @@ static struct scsi_host_template mvumi_template = {
2197 .eh_timed_out = mvumi_timed_out, 2197 .eh_timed_out = mvumi_timed_out,
2198 .eh_host_reset_handler = mvumi_host_reset, 2198 .eh_host_reset_handler = mvumi_host_reset,
2199 .bios_param = mvumi_bios_param, 2199 .bios_param = mvumi_bios_param,
2200 .dma_boundary = PAGE_SIZE - 1,
2200 .this_id = -1, 2201 .this_id = -1,
2201}; 2202};
2202 2203
@@ -2620,7 +2621,7 @@ static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
2620 } 2621 }
2621 2622
2622 ret = mvumi_pci_set_master(pdev); 2623 ret = mvumi_pci_set_master(pdev);
2623 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2624 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2624 if (ret) 2625 if (ret)
2625 goto fail; 2626 goto fail;
2626 ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME); 2627 ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 0642f2d0a3bb..539ac8ce4fcd 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1528,6 +1528,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1528 scmd->scsi_done(scmd); 1528 scmd->scsi_done(scmd);
1529 return 0; 1529 return 0;
1530 } 1530 }
1531 /* fall through */
1531 case WRITE_6: 1532 case WRITE_6:
1532 lba = (((scmd->cmnd[1] & 0x1F) << 16) | 1533 lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1533 (scmd->cmnd[2] << 8) | 1534 (scmd->cmnd[2] << 8) |
@@ -1544,6 +1545,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1544 scmd->scsi_done(scmd); 1545 scmd->scsi_done(scmd);
1545 return 0; 1546 return 0;
1546 } 1547 }
1548 /* fall through */
1547 case WRITE_10: 1549 case WRITE_10:
1548 case VERIFY: /* 0x2F */ 1550 case VERIFY: /* 0x2F */
1549 case WRITE_VERIFY: /* 0x2E */ 1551 case WRITE_VERIFY: /* 0x2E */
@@ -1560,6 +1562,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1560 scmd->scsi_done(scmd); 1562 scmd->scsi_done(scmd);
1561 return 0; 1563 return 0;
1562 } 1564 }
1565 /* fall through */
1563 case WRITE_12: 1566 case WRITE_12:
1564 case VERIFY_12: /* 0xAF */ 1567 case VERIFY_12: /* 0xAF */
1565 case WRITE_VERIFY_12: /* 0xAE */ 1568 case WRITE_VERIFY_12: /* 0xAE */
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 6cd3e289ef99..1a236a3dfd51 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -8313,7 +8313,6 @@ struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt,
8313 tpnt->this_id = 7; 8313 tpnt->this_id = 7;
8314 tpnt->sg_tablesize = SCSI_NCR_SG_TABLESIZE; 8314 tpnt->sg_tablesize = SCSI_NCR_SG_TABLESIZE;
8315 tpnt->cmd_per_lun = SCSI_NCR_CMD_PER_LUN; 8315 tpnt->cmd_per_lun = SCSI_NCR_CMD_PER_LUN;
8316 tpnt->use_clustering = ENABLE_CLUSTERING;
8317 8316
8318 if (device->differential) 8317 if (device->differential)
8319 driver_setup.diff_support = device->differential; 8318 driver_setup.diff_support = device->differential;
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 5aac3e801903..00e3cbee55b8 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -274,7 +274,7 @@ static struct scsi_host_template nsp32_template = {
274 .sg_tablesize = NSP32_SG_SIZE, 274 .sg_tablesize = NSP32_SG_SIZE,
275 .max_sectors = 128, 275 .max_sectors = 128,
276 .this_id = NSP32_HOST_SCSIID, 276 .this_id = NSP32_HOST_SCSIID,
277 .use_clustering = DISABLE_CLUSTERING, 277 .dma_boundary = PAGE_SIZE - 1,
278 .eh_abort_handler = nsp32_eh_abort, 278 .eh_abort_handler = nsp32_eh_abort,
279 .eh_host_reset_handler = nsp32_eh_host_reset, 279 .eh_host_reset_handler = nsp32_eh_host_reset,
280/* .highmem_io = 1, */ 280/* .highmem_io = 1, */
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index f3230494a8c9..1bd6825a4f14 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -86,7 +86,7 @@ static struct scsi_host_template nsp_driver_template = {
86 .can_queue = 1, 86 .can_queue = 1,
87 .this_id = NSP_INITIATOR_ID, 87 .this_id = NSP_INITIATOR_ID,
88 .sg_tablesize = SG_ALL, 88 .sg_tablesize = SG_ALL,
89 .use_clustering = DISABLE_CLUSTERING, 89 .dma_boundary = PAGE_SIZE - 1,
90}; 90};
91 91
92static nsp_hw_data nsp_data_base; /* attach <-> detect glue */ 92static nsp_hw_data nsp_data_base; /* attach <-> detect glue */
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index 173351a8554b..828d53faf09a 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -72,7 +72,7 @@ static struct scsi_host_template qlogicfas_driver_template = {
72 .can_queue = 1, 72 .can_queue = 1,
73 .this_id = -1, 73 .this_id = -1,
74 .sg_tablesize = SG_ALL, 74 .sg_tablesize = SG_ALL,
75 .use_clustering = DISABLE_CLUSTERING, 75 .dma_boundary = PAGE_SIZE - 1,
76}; 76};
77 77
78/*====================================================================*/ 78/*====================================================================*/
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index a3b63bea0e50..d1e98a6ea28f 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -680,7 +680,6 @@ static struct scsi_host_template sym53c500_driver_template = {
680 .can_queue = 1, 680 .can_queue = 1,
681 .this_id = 7, 681 .this_id = 7,
682 .sg_tablesize = 32, 682 .sg_tablesize = 32,
683 .use_clustering = ENABLE_CLUSTERING,
684 .shost_attrs = SYM53C500_shost_attrs 683 .shost_attrs = SYM53C500_shost_attrs
685}; 684};
686 685
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index d71e7e4ec29c..a36060c23b37 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -84,7 +84,6 @@ static struct scsi_host_template pm8001_sht = {
84 .this_id = -1, 84 .this_id = -1,
85 .sg_tablesize = SG_ALL, 85 .sg_tablesize = SG_ALL,
86 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 86 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
87 .use_clustering = ENABLE_CLUSTERING,
88 .eh_device_reset_handler = sas_eh_device_reset_handler, 87 .eh_device_reset_handler = sas_eh_device_reset_handler,
89 .eh_target_reset_handler = sas_eh_target_reset_handler, 88 .eh_target_reset_handler = sas_eh_target_reset_handler,
90 .target_destroy = sas_target_destroy, 89 .target_destroy = sas_target_destroy,
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 4e86994e10e8..7c4673308f5b 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -846,16 +846,9 @@ static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
846 cmd->ioa_cb->ioarcb.cdb[0], ioasc); 846 cmd->ioa_cb->ioarcb.cdb[0], ioasc);
847 } 847 }
848 848
849 /* if we had allocated sense buffers for request sense, copy the sense 849 if (cmd->sense_buffer) {
850 * release the buffers 850 dma_unmap_single(&pinstance->pdev->dev, cmd->sense_buffer_dma,
851 */ 851 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
852 if (cmd->sense_buffer != NULL) {
853 memcpy(scsi_cmd->sense_buffer,
854 cmd->sense_buffer,
855 SCSI_SENSE_BUFFERSIZE);
856 pci_free_consistent(pinstance->pdev,
857 SCSI_SENSE_BUFFERSIZE,
858 cmd->sense_buffer, cmd->sense_buffer_dma);
859 cmd->sense_buffer = NULL; 852 cmd->sense_buffer = NULL;
860 cmd->sense_buffer_dma = 0; 853 cmd->sense_buffer_dma = 0;
861 } 854 }
@@ -2444,13 +2437,12 @@ static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
2444{ 2437{
2445 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; 2438 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2446 struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl; 2439 struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
2440 struct device *dev = &cmd->drv_inst->pdev->dev;
2447 2441
2448 /* allocate DMAable memory for sense buffers */ 2442 cmd->sense_buffer = cmd->scsi_cmd->sense_buffer;
2449 cmd->sense_buffer = pci_alloc_consistent(cmd->drv_inst->pdev, 2443 cmd->sense_buffer_dma = dma_map_single(dev, cmd->sense_buffer,
2450 SCSI_SENSE_BUFFERSIZE, 2444 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
2451 &cmd->sense_buffer_dma); 2445 if (dma_mapping_error(dev, cmd->sense_buffer_dma)) {
2452
2453 if (cmd->sense_buffer == NULL) {
2454 pmcraid_err 2446 pmcraid_err
2455 ("couldn't allocate sense buffer for request sense\n"); 2447 ("couldn't allocate sense buffer for request sense\n");
2456 pmcraid_erp_done(cmd); 2448 pmcraid_erp_done(cmd);
@@ -2491,17 +2483,15 @@ static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
2491/** 2483/**
2492 * pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery 2484 * pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery
2493 * @cmd: command that failed 2485 * @cmd: command that failed
2494 * @sense: true if request_sense is required after cancel all 2486 * @need_sense: true if request_sense is required after cancel all
2495 * 2487 *
2496 * This function sends a cancel all to a device to clear the queue. 2488 * This function sends a cancel all to a device to clear the queue.
2497 */ 2489 */
2498static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense) 2490static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, bool need_sense)
2499{ 2491{
2500 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd; 2492 struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
2501 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; 2493 struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
2502 struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata; 2494 struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
2503 void (*cmd_done) (struct pmcraid_cmd *) = sense ? pmcraid_erp_done
2504 : pmcraid_request_sense;
2505 2495
2506 memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN); 2496 memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
2507 ioarcb->request_flags0 = SYNC_OVERRIDE; 2497 ioarcb->request_flags0 = SYNC_OVERRIDE;
@@ -2519,7 +2509,8 @@ static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense)
2519 /* writing to IOARRIN must be protected by host_lock, as mid-layer 2509 /* writing to IOARRIN must be protected by host_lock, as mid-layer
2520 * schedule queuecommand while we are doing this 2510 * schedule queuecommand while we are doing this
2521 */ 2511 */
2522 pmcraid_send_cmd(cmd, cmd_done, 2512 pmcraid_send_cmd(cmd, need_sense ?
2513 pmcraid_erp_done : pmcraid_request_sense,
2523 PMCRAID_REQUEST_SENSE_TIMEOUT, 2514 PMCRAID_REQUEST_SENSE_TIMEOUT,
2524 pmcraid_timeout_handler); 2515 pmcraid_timeout_handler);
2525} 2516}
@@ -2612,7 +2603,7 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
2612 struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa; 2603 struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
2613 u32 ioasc = le32_to_cpu(ioasa->ioasc); 2604 u32 ioasc = le32_to_cpu(ioasa->ioasc);
2614 u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK; 2605 u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK;
2615 u32 sense_copied = 0; 2606 bool sense_copied = false;
2616 2607
2617 if (!res) { 2608 if (!res) {
2618 pmcraid_info("resource pointer is NULL\n"); 2609 pmcraid_info("resource pointer is NULL\n");
@@ -2684,7 +2675,7 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
2684 memcpy(scsi_cmd->sense_buffer, 2675 memcpy(scsi_cmd->sense_buffer,
2685 ioasa->sense_data, 2676 ioasa->sense_data,
2686 data_size); 2677 data_size);
2687 sense_copied = 1; 2678 sense_copied = true;
2688 } 2679 }
2689 2680
2690 if (RES_IS_GSCSI(res->cfg_entry)) 2681 if (RES_IS_GSCSI(res->cfg_entry))
@@ -3523,7 +3514,7 @@ static int pmcraid_build_passthrough_ioadls(
3523 return -ENOMEM; 3514 return -ENOMEM;
3524 } 3515 }
3525 3516
3526 sglist->num_dma_sg = pci_map_sg(cmd->drv_inst->pdev, 3517 sglist->num_dma_sg = dma_map_sg(&cmd->drv_inst->pdev->dev,
3527 sglist->scatterlist, 3518 sglist->scatterlist,
3528 sglist->num_sg, direction); 3519 sglist->num_sg, direction);
3529 3520
@@ -3572,7 +3563,7 @@ static void pmcraid_release_passthrough_ioadls(
3572 struct pmcraid_sglist *sglist = cmd->sglist; 3563 struct pmcraid_sglist *sglist = cmd->sglist;
3573 3564
3574 if (buflen > 0) { 3565 if (buflen > 0) {
3575 pci_unmap_sg(cmd->drv_inst->pdev, 3566 dma_unmap_sg(&cmd->drv_inst->pdev->dev,
3576 sglist->scatterlist, 3567 sglist->scatterlist,
3577 sglist->num_sg, 3568 sglist->num_sg,
3578 direction); 3569 direction);
@@ -4158,7 +4149,6 @@ static struct scsi_host_template pmcraid_host_template = {
4158 .max_sectors = PMCRAID_IOA_MAX_SECTORS, 4149 .max_sectors = PMCRAID_IOA_MAX_SECTORS,
4159 .no_write_same = 1, 4150 .no_write_same = 1,
4160 .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN, 4151 .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
4161 .use_clustering = ENABLE_CLUSTERING,
4162 .shost_attrs = pmcraid_host_attrs, 4152 .shost_attrs = pmcraid_host_attrs,
4163 .proc_name = PMCRAID_DRIVER_NAME, 4153 .proc_name = PMCRAID_DRIVER_NAME,
4164}; 4154};
@@ -4708,9 +4698,9 @@ static void
4708pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex) 4698pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
4709{ 4699{
4710 int i; 4700 int i;
4711 for (i = 0; i < maxindex; i++) {
4712 4701
4713 pci_free_consistent(pinstance->pdev, 4702 for (i = 0; i < maxindex; i++) {
4703 dma_free_coherent(&pinstance->pdev->dev,
4714 HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD, 4704 HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD,
4715 pinstance->hrrq_start[i], 4705 pinstance->hrrq_start[i],
4716 pinstance->hrrq_start_bus_addr[i]); 4706 pinstance->hrrq_start_bus_addr[i]);
@@ -4737,11 +4727,9 @@ static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
4737 4727
4738 for (i = 0; i < pinstance->num_hrrq; i++) { 4728 for (i = 0; i < pinstance->num_hrrq; i++) {
4739 pinstance->hrrq_start[i] = 4729 pinstance->hrrq_start[i] =
4740 pci_alloc_consistent( 4730 dma_alloc_coherent(&pinstance->pdev->dev, buffer_size,
4741 pinstance->pdev, 4731 &pinstance->hrrq_start_bus_addr[i],
4742 buffer_size, 4732 GFP_KERNEL);
4743 &(pinstance->hrrq_start_bus_addr[i]));
4744
4745 if (!pinstance->hrrq_start[i]) { 4733 if (!pinstance->hrrq_start[i]) {
4746 pmcraid_err("pci_alloc failed for hrrq vector : %d\n", 4734 pmcraid_err("pci_alloc failed for hrrq vector : %d\n",
4747 i); 4735 i);
@@ -4770,7 +4758,7 @@ static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
4770static void pmcraid_release_hcams(struct pmcraid_instance *pinstance) 4758static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
4771{ 4759{
4772 if (pinstance->ccn.msg != NULL) { 4760 if (pinstance->ccn.msg != NULL) {
4773 pci_free_consistent(pinstance->pdev, 4761 dma_free_coherent(&pinstance->pdev->dev,
4774 PMCRAID_AEN_HDR_SIZE + 4762 PMCRAID_AEN_HDR_SIZE +
4775 sizeof(struct pmcraid_hcam_ccn_ext), 4763 sizeof(struct pmcraid_hcam_ccn_ext),
4776 pinstance->ccn.msg, 4764 pinstance->ccn.msg,
@@ -4782,7 +4770,7 @@ static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
4782 } 4770 }
4783 4771
4784 if (pinstance->ldn.msg != NULL) { 4772 if (pinstance->ldn.msg != NULL) {
4785 pci_free_consistent(pinstance->pdev, 4773 dma_free_coherent(&pinstance->pdev->dev,
4786 PMCRAID_AEN_HDR_SIZE + 4774 PMCRAID_AEN_HDR_SIZE +
4787 sizeof(struct pmcraid_hcam_ldn), 4775 sizeof(struct pmcraid_hcam_ldn),
4788 pinstance->ldn.msg, 4776 pinstance->ldn.msg,
@@ -4803,17 +4791,15 @@ static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
4803 */ 4791 */
4804static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance) 4792static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance)
4805{ 4793{
4806 pinstance->ccn.msg = pci_alloc_consistent( 4794 pinstance->ccn.msg = dma_alloc_coherent(&pinstance->pdev->dev,
4807 pinstance->pdev,
4808 PMCRAID_AEN_HDR_SIZE + 4795 PMCRAID_AEN_HDR_SIZE +
4809 sizeof(struct pmcraid_hcam_ccn_ext), 4796 sizeof(struct pmcraid_hcam_ccn_ext),
4810 &(pinstance->ccn.baddr)); 4797 &pinstance->ccn.baddr, GFP_KERNEL);
4811 4798
4812 pinstance->ldn.msg = pci_alloc_consistent( 4799 pinstance->ldn.msg = dma_alloc_coherent(&pinstance->pdev->dev,
4813 pinstance->pdev,
4814 PMCRAID_AEN_HDR_SIZE + 4800 PMCRAID_AEN_HDR_SIZE +
4815 sizeof(struct pmcraid_hcam_ldn), 4801 sizeof(struct pmcraid_hcam_ldn),
4816 &(pinstance->ldn.baddr)); 4802 &pinstance->ldn.baddr, GFP_KERNEL);
4817 4803
4818 if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) { 4804 if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) {
4819 pmcraid_release_hcams(pinstance); 4805 pmcraid_release_hcams(pinstance);
@@ -4841,7 +4827,7 @@ static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance)
4841{ 4827{
4842 if (pinstance->cfg_table != NULL && 4828 if (pinstance->cfg_table != NULL &&
4843 pinstance->cfg_table_bus_addr != 0) { 4829 pinstance->cfg_table_bus_addr != 0) {
4844 pci_free_consistent(pinstance->pdev, 4830 dma_free_coherent(&pinstance->pdev->dev,
4845 sizeof(struct pmcraid_config_table), 4831 sizeof(struct pmcraid_config_table),
4846 pinstance->cfg_table, 4832 pinstance->cfg_table,
4847 pinstance->cfg_table_bus_addr); 4833 pinstance->cfg_table_bus_addr);
@@ -4886,10 +4872,10 @@ static int pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
4886 list_add_tail(&pinstance->res_entries[i].queue, 4872 list_add_tail(&pinstance->res_entries[i].queue,
4887 &pinstance->free_res_q); 4873 &pinstance->free_res_q);
4888 4874
4889 pinstance->cfg_table = 4875 pinstance->cfg_table = dma_alloc_coherent(&pinstance->pdev->dev,
4890 pci_alloc_consistent(pinstance->pdev,
4891 sizeof(struct pmcraid_config_table), 4876 sizeof(struct pmcraid_config_table),
4892 &pinstance->cfg_table_bus_addr); 4877 &pinstance->cfg_table_bus_addr,
4878 GFP_KERNEL);
4893 4879
4894 if (NULL == pinstance->cfg_table) { 4880 if (NULL == pinstance->cfg_table) {
4895 pmcraid_err("couldn't alloc DMA memory for config table\n"); 4881 pmcraid_err("couldn't alloc DMA memory for config table\n");
@@ -4954,7 +4940,7 @@ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
4954 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq); 4940 pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
4955 4941
4956 if (pinstance->inq_data != NULL) { 4942 if (pinstance->inq_data != NULL) {
4957 pci_free_consistent(pinstance->pdev, 4943 dma_free_coherent(&pinstance->pdev->dev,
4958 sizeof(struct pmcraid_inquiry_data), 4944 sizeof(struct pmcraid_inquiry_data),
4959 pinstance->inq_data, 4945 pinstance->inq_data,
4960 pinstance->inq_data_baddr); 4946 pinstance->inq_data_baddr);
@@ -4964,7 +4950,7 @@ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
4964 } 4950 }
4965 4951
4966 if (pinstance->timestamp_data != NULL) { 4952 if (pinstance->timestamp_data != NULL) {
4967 pci_free_consistent(pinstance->pdev, 4953 dma_free_coherent(&pinstance->pdev->dev,
4968 sizeof(struct pmcraid_timestamp_data), 4954 sizeof(struct pmcraid_timestamp_data),
4969 pinstance->timestamp_data, 4955 pinstance->timestamp_data,
4970 pinstance->timestamp_data_baddr); 4956 pinstance->timestamp_data_baddr);
@@ -4981,8 +4967,8 @@ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
4981 * This routine pre-allocates memory based on the type of block as below: 4967 * This routine pre-allocates memory based on the type of block as below:
4982 * cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator, 4968 * cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator,
4983 * IOARCBs(PMCRAID_MAX_CMD) : DMAable memory, using pci pool allocator 4969 * IOARCBs(PMCRAID_MAX_CMD) : DMAable memory, using pci pool allocator
4984 * config-table entries : DMAable memory using pci_alloc_consistent 4970 * config-table entries : DMAable memory using dma_alloc_coherent
4985 * HostRRQs : DMAable memory, using pci_alloc_consistent 4971 * HostRRQs : DMAable memory, using dma_alloc_coherent
4986 * 4972 *
4987 * Return Value 4973 * Return Value
4988 * 0 in case all of the blocks are allocated, -ENOMEM otherwise. 4974 * 0 in case all of the blocks are allocated, -ENOMEM otherwise.
@@ -5019,11 +5005,9 @@ static int pmcraid_init_buffers(struct pmcraid_instance *pinstance)
5019 } 5005 }
5020 5006
5021 /* allocate DMAable memory for page D0 INQUIRY buffer */ 5007 /* allocate DMAable memory for page D0 INQUIRY buffer */
5022 pinstance->inq_data = pci_alloc_consistent( 5008 pinstance->inq_data = dma_alloc_coherent(&pinstance->pdev->dev,
5023 pinstance->pdev,
5024 sizeof(struct pmcraid_inquiry_data), 5009 sizeof(struct pmcraid_inquiry_data),
5025 &pinstance->inq_data_baddr); 5010 &pinstance->inq_data_baddr, GFP_KERNEL);
5026
5027 if (pinstance->inq_data == NULL) { 5011 if (pinstance->inq_data == NULL) {
5028 pmcraid_err("couldn't allocate DMA memory for INQUIRY\n"); 5012 pmcraid_err("couldn't allocate DMA memory for INQUIRY\n");
5029 pmcraid_release_buffers(pinstance); 5013 pmcraid_release_buffers(pinstance);
@@ -5031,11 +5015,10 @@ static int pmcraid_init_buffers(struct pmcraid_instance *pinstance)
5031 } 5015 }
5032 5016
5033 /* allocate DMAable memory for set timestamp data buffer */ 5017 /* allocate DMAable memory for set timestamp data buffer */
5034 pinstance->timestamp_data = pci_alloc_consistent( 5018 pinstance->timestamp_data = dma_alloc_coherent(&pinstance->pdev->dev,
5035 pinstance->pdev,
5036 sizeof(struct pmcraid_timestamp_data), 5019 sizeof(struct pmcraid_timestamp_data),
5037 &pinstance->timestamp_data_baddr); 5020 &pinstance->timestamp_data_baddr,
5038 5021 GFP_KERNEL);
5039 if (pinstance->timestamp_data == NULL) { 5022 if (pinstance->timestamp_data == NULL) {
5040 pmcraid_err("couldn't allocate DMA memory for \ 5023 pmcraid_err("couldn't allocate DMA memory for \
5041 set time_stamp \n"); 5024 set time_stamp \n");
@@ -5324,12 +5307,12 @@ static int pmcraid_resume(struct pci_dev *pdev)
5324 5307
5325 pci_set_master(pdev); 5308 pci_set_master(pdev);
5326 5309
5327 if ((sizeof(dma_addr_t) == 4) || 5310 if (sizeof(dma_addr_t) == 4 ||
5328 pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 5311 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
5329 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 5312 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
5330 5313
5331 if (rc == 0) 5314 if (rc == 0)
5332 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 5315 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5333 5316
5334 if (rc != 0) { 5317 if (rc != 0) {
5335 dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n"); 5318 dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n");
@@ -5733,19 +5716,19 @@ static int pmcraid_probe(struct pci_dev *pdev,
5733 /* Firmware requires the system bus address of IOARCB to be within 5716 /* Firmware requires the system bus address of IOARCB to be within
5734 * 32-bit addressable range though it has 64-bit IOARRIN register. 5717 * 32-bit addressable range though it has 64-bit IOARRIN register.
5735 * However, firmware supports 64-bit streaming DMA buffers, whereas 5718 * However, firmware supports 64-bit streaming DMA buffers, whereas
5736 * coherent buffers are to be 32-bit. Since pci_alloc_consistent always 5719 * coherent buffers are to be 32-bit. Since dma_alloc_coherent always
5737 * returns memory within 4GB (if not, change this logic), coherent 5720 * returns memory within 4GB (if not, change this logic), coherent
5738 * buffers are within firmware acceptable address ranges. 5721 * buffers are within firmware acceptable address ranges.
5739 */ 5722 */
5740 if ((sizeof(dma_addr_t) == 4) || 5723 if (sizeof(dma_addr_t) == 4 ||
5741 pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 5724 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
5742 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 5725 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
5743 5726
5744 /* firmware expects 32-bit DMA addresses for IOARRIN register; set 32 5727 /* firmware expects 32-bit DMA addresses for IOARRIN register; set 32
5745 * bit mask for pci_alloc_consistent to return addresses within 4GB 5728 * bit mask for dma_alloc_coherent to return addresses within 4GB
5746 */ 5729 */
5747 if (rc == 0) 5730 if (rc == 0)
5748 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 5731 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5749 5732
5750 if (rc != 0) { 5733 if (rc != 0) {
5751 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); 5734 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index ee86a0c62dbf..c182b5458f98 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -978,7 +978,6 @@ static struct scsi_host_template ppa_template = {
978 .bios_param = ppa_biosparam, 978 .bios_param = ppa_biosparam,
979 .this_id = -1, 979 .this_id = -1,
980 .sg_tablesize = SG_ALL, 980 .sg_tablesize = SG_ALL,
981 .use_clustering = ENABLE_CLUSTERING,
982 .can_queue = 1, 981 .can_queue = 1,
983 .slave_alloc = ppa_adjust_queue, 982 .slave_alloc = ppa_adjust_queue,
984}; 983};
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 4924424d20fe..8d769138c01c 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -349,7 +349,6 @@ static struct scsi_host_template ps3rom_host_template = {
349 .sg_tablesize = SG_ALL, 349 .sg_tablesize = SG_ALL,
350 .emulated = 1, /* only sg driver uses this */ 350 .emulated = 1, /* only sg driver uses this */
351 .max_sectors = PS3ROM_MAX_SECTORS, 351 .max_sectors = PS3ROM_MAX_SECTORS,
352 .use_clustering = ENABLE_CLUSTERING,
353 .module = THIS_MODULE, 352 .module = THIS_MODULE,
354}; 353};
355 354
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index d5a4f17fce51..edcaf4b0cb0b 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -785,7 +785,6 @@ static struct scsi_host_template qedf_host_template = {
785 .name = QEDF_MODULE_NAME, 785 .name = QEDF_MODULE_NAME,
786 .this_id = -1, 786 .this_id = -1,
787 .cmd_per_lun = 32, 787 .cmd_per_lun = 32,
788 .use_clustering = ENABLE_CLUSTERING,
789 .max_sectors = 0xffff, 788 .max_sectors = 0xffff,
790 .queuecommand = qedf_queuecommand, 789 .queuecommand = qedf_queuecommand,
791 .shost_attrs = qedf_host_attrs, 790 .shost_attrs = qedf_host_attrs,
@@ -2935,8 +2934,7 @@ static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
2935 2934
2936 qedf_free_global_queues(qedf); 2935 qedf_free_global_queues(qedf);
2937 2936
2938 if (qedf->global_queues) 2937 kfree(qedf->global_queues);
2939 kfree(qedf->global_queues);
2940} 2938}
2941 2939
2942/* 2940/*
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index a6f96b35e971..a26bb5066b90 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -45,7 +45,7 @@ struct qedi_endpoint;
45#define QEDI_MAX_TASK_NUM 0x0FFF 45#define QEDI_MAX_TASK_NUM 0x0FFF
46#define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024 46#define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024
47#define QEDI_ISCSI_MAX_BDS_PER_CMD 255 /* Firmware max BDs is 255 */ 47#define QEDI_ISCSI_MAX_BDS_PER_CMD 255 /* Firmware max BDs is 255 */
48#define MAX_OUSTANDING_TASKS_PER_CON 1024 48#define MAX_OUTSTANDING_TASKS_PER_CON 1024
49 49
50#define QEDI_MAX_BD_LEN 0xffff 50#define QEDI_MAX_BD_LEN 0xffff
51#define QEDI_BD_SPLIT_SZ 0x1000 51#define QEDI_BD_SPLIT_SZ 0x1000
@@ -63,12 +63,9 @@ struct qedi_endpoint;
63#define QEDI_LOCAL_PORT_INVALID 0xffff 63#define QEDI_LOCAL_PORT_INVALID 0xffff
64#define TX_RX_RING 16 64#define TX_RX_RING 16
65#define RX_RING (TX_RX_RING - 1) 65#define RX_RING (TX_RX_RING - 1)
66#define LL2_SINGLE_BUF_SIZE 0x400
67#define QEDI_PAGE_SIZE 4096
68#define QEDI_PAGE_ALIGN(addr) ALIGN(addr, QEDI_PAGE_SIZE) 66#define QEDI_PAGE_ALIGN(addr) ALIGN(addr, QEDI_PAGE_SIZE)
69#define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1)) 67#define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1))
70 68
71#define QEDI_PAGE_SIZE 4096
72#define QEDI_HW_DMA_BOUNDARY 0xfff 69#define QEDI_HW_DMA_BOUNDARY 0xfff
73#define QEDI_PATH_HANDLE 0xFE0000000UL 70#define QEDI_PATH_HANDLE 0xFE0000000UL
74 71
@@ -146,7 +143,7 @@ struct skb_work_list {
146}; 143};
147 144
148/* Queue sizes in number of elements */ 145/* Queue sizes in number of elements */
149#define QEDI_SQ_SIZE MAX_OUSTANDING_TASKS_PER_CON 146#define QEDI_SQ_SIZE MAX_OUTSTANDING_TASKS_PER_CON
150#define QEDI_CQ_SIZE 2048 147#define QEDI_CQ_SIZE 2048
151#define QEDI_CMDQ_SIZE QEDI_MAX_ISCSI_TASK 148#define QEDI_CMDQ_SIZE QEDI_MAX_ISCSI_TASK
152#define QEDI_PROTO_CQ_PROD_IDX 0 149#define QEDI_PROTO_CQ_PROD_IDX 0
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 2f0a4f2c5ff8..4da660c1c431 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -61,7 +61,6 @@ struct scsi_host_template qedi_host_template = {
61 .max_sectors = 0xffff, 61 .max_sectors = 0xffff,
62 .dma_boundary = QEDI_HW_DMA_BOUNDARY, 62 .dma_boundary = QEDI_HW_DMA_BOUNDARY,
63 .cmd_per_lun = 128, 63 .cmd_per_lun = 128,
64 .use_clustering = ENABLE_CLUSTERING,
65 .shost_attrs = qedi_shost_attrs, 64 .shost_attrs = qedi_shost_attrs,
66}; 65};
67 66
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 311eb22068e1..5c53409a8cea 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -44,6 +44,11 @@ module_param(qedi_io_tracing, uint, 0644);
44MODULE_PARM_DESC(qedi_io_tracing, 44MODULE_PARM_DESC(qedi_io_tracing,
45 " Enable logging of SCSI requests/completions into trace buffer. (default off)."); 45 " Enable logging of SCSI requests/completions into trace buffer. (default off).");
46 46
47uint qedi_ll2_buf_size = 0x400;
48module_param(qedi_ll2_buf_size, uint, 0644);
49MODULE_PARM_DESC(qedi_ll2_buf_size,
50 "parameter to set ping packet size, default - 0x400, Jumbo packets - 0x2400.");
51
47const struct qed_iscsi_ops *qedi_ops; 52const struct qed_iscsi_ops *qedi_ops;
48static struct scsi_transport_template *qedi_scsi_transport; 53static struct scsi_transport_template *qedi_scsi_transport;
49static struct pci_driver qedi_pci_driver; 54static struct pci_driver qedi_pci_driver;
@@ -228,7 +233,7 @@ static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
228 } 233 }
229 234
230 /* Allocating memory for Tx/Rx pkt buffer */ 235 /* Allocating memory for Tx/Rx pkt buffer */
231 udev->ll2_buf_size = TX_RX_RING * LL2_SINGLE_BUF_SIZE; 236 udev->ll2_buf_size = TX_RX_RING * qedi_ll2_buf_size;
232 udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size); 237 udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size);
233 udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP | 238 udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP |
234 __GFP_ZERO, 2); 239 __GFP_ZERO, 2);
@@ -283,7 +288,7 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
283 qedi->udev = udev; 288 qedi->udev = udev;
284 289
285 udev->tx_pkt = udev->ll2_buf; 290 udev->tx_pkt = udev->ll2_buf;
286 udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE; 291 udev->rx_pkt = udev->ll2_buf + qedi_ll2_buf_size;
287 return 0; 292 return 0;
288 293
289 err_uctrl: 294 err_uctrl:
@@ -658,7 +663,7 @@ static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
658 struct qedi_uio_dev *udev; 663 struct qedi_uio_dev *udev;
659 struct qedi_uio_ctrl *uctrl; 664 struct qedi_uio_ctrl *uctrl;
660 struct skb_work_list *work; 665 struct skb_work_list *work;
661 u32 prod; 666 struct ethhdr *eh;
662 667
663 if (!qedi) { 668 if (!qedi) {
664 QEDI_ERR(NULL, "qedi is NULL\n"); 669 QEDI_ERR(NULL, "qedi is NULL\n");
@@ -672,6 +677,29 @@ static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
672 return 0; 677 return 0;
673 } 678 }
674 679
680 eh = (struct ethhdr *)skb->data;
681 /* Undo VLAN encapsulation */
682 if (eh->h_proto == htons(ETH_P_8021Q)) {
683 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
684 eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
685 skb_reset_mac_header(skb);
686 }
687
688 /* Filter out non FIP/FCoE frames here to free them faster */
689 if (eh->h_proto != htons(ETH_P_ARP) &&
690 eh->h_proto != htons(ETH_P_IP) &&
691 eh->h_proto != htons(ETH_P_IPV6)) {
692 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2,
693 "Dropping frame ethertype [0x%x] len [0x%x].\n",
694 eh->h_proto, skb->len);
695 kfree_skb(skb);
696 return 0;
697 }
698
699 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2,
700 "Allowed frame ethertype [0x%x] len [0x%x].\n",
701 eh->h_proto, skb->len);
702
675 udev = qedi->udev; 703 udev = qedi->udev;
676 uctrl = udev->uctrl; 704 uctrl = udev->uctrl;
677 705
@@ -694,17 +722,10 @@ static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
694 722
695 spin_lock_bh(&qedi->ll2_lock); 723 spin_lock_bh(&qedi->ll2_lock);
696 list_add_tail(&work->list, &qedi->ll2_skb_list); 724 list_add_tail(&work->list, &qedi->ll2_skb_list);
725 spin_unlock_bh(&qedi->ll2_lock);
697 726
698 ++uctrl->hw_rx_prod_cnt; 727 wake_up_process(qedi->ll2_recv_thread);
699 prod = (uctrl->hw_rx_prod + 1) % RX_RING;
700 if (prod != uctrl->host_rx_cons) {
701 uctrl->hw_rx_prod = prod;
702 spin_unlock_bh(&qedi->ll2_lock);
703 wake_up_process(qedi->ll2_recv_thread);
704 return 0;
705 }
706 728
707 spin_unlock_bh(&qedi->ll2_lock);
708 return 0; 729 return 0;
709} 730}
710 731
@@ -719,6 +740,7 @@ static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
719 u32 rx_bd_prod; 740 u32 rx_bd_prod;
720 void *pkt; 741 void *pkt;
721 int len = 0; 742 int len = 0;
743 u32 prod;
722 744
723 if (!qedi) { 745 if (!qedi) {
724 QEDI_ERR(NULL, "qedi is NULL\n"); 746 QEDI_ERR(NULL, "qedi is NULL\n");
@@ -727,12 +749,16 @@ static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
727 749
728 udev = qedi->udev; 750 udev = qedi->udev;
729 uctrl = udev->uctrl; 751 uctrl = udev->uctrl;
730 pkt = udev->rx_pkt + (uctrl->hw_rx_prod * LL2_SINGLE_BUF_SIZE); 752
731 len = min_t(u32, skb->len, (u32)LL2_SINGLE_BUF_SIZE); 753 ++uctrl->hw_rx_prod_cnt;
754 prod = (uctrl->hw_rx_prod + 1) % RX_RING;
755
756 pkt = udev->rx_pkt + (prod * qedi_ll2_buf_size);
757 len = min_t(u32, skb->len, (u32)qedi_ll2_buf_size);
732 memcpy(pkt, skb->data, len); 758 memcpy(pkt, skb->data, len);
733 759
734 memset(&rxbd, 0, sizeof(rxbd)); 760 memset(&rxbd, 0, sizeof(rxbd));
735 rxbd.rx_pkt_index = uctrl->hw_rx_prod; 761 rxbd.rx_pkt_index = prod;
736 rxbd.rx_pkt_len = len; 762 rxbd.rx_pkt_len = len;
737 rxbd.vlan_id = vlan_id; 763 rxbd.vlan_id = vlan_id;
738 764
@@ -743,6 +769,16 @@ static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
743 769
744 memcpy(p_rxbd, &rxbd, sizeof(rxbd)); 770 memcpy(p_rxbd, &rxbd, sizeof(rxbd));
745 771
772 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2,
773 "hw_rx_prod [%d] prod [%d] hw_rx_bd_prod [%d] rx_pkt_idx [%d] rx_len [%d].\n",
774 uctrl->hw_rx_prod, prod, uctrl->hw_rx_bd_prod,
775 rxbd.rx_pkt_index, rxbd.rx_pkt_len);
776 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_LL2,
777 "host_rx_cons [%d] hw_rx_bd_cons [%d].\n",
778 uctrl->host_rx_cons, uctrl->host_rx_bd_cons);
779
780 uctrl->hw_rx_prod = prod;
781
746 /* notify the iscsiuio about new packet */ 782 /* notify the iscsiuio about new packet */
747 uio_event_notify(&udev->qedi_uinfo); 783 uio_event_notify(&udev->qedi_uinfo);
748 784
@@ -795,7 +831,7 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
795 int rval = 0; 831 int rval = 0;
796 832
797 833
798 num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE; 834 num_sq_pages = (MAX_OUTSTANDING_TASKS_PER_CON * 8) / QEDI_PAGE_SIZE;
799 835
800 qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi); 836 qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi);
801 837
@@ -833,7 +869,7 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
833 qedi->pf_params.iscsi_pf_params.max_fin_rt = 2; 869 qedi->pf_params.iscsi_pf_params.max_fin_rt = 2;
834 870
835 for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) { 871 for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
836 if ((1 << log_page_size) == PAGE_SIZE) 872 if ((1 << log_page_size) == QEDI_PAGE_SIZE)
837 break; 873 break;
838 } 874 }
839 qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size; 875 qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size;
@@ -951,6 +987,9 @@ static int qedi_find_boot_info(struct qedi_ctx *qedi,
951 cls_sess = iscsi_conn_to_session(cls_conn); 987 cls_sess = iscsi_conn_to_session(cls_conn);
952 sess = cls_sess->dd_data; 988 sess = cls_sess->dd_data;
953 989
990 if (!iscsi_is_session_online(cls_sess))
991 continue;
992
954 if (pri_ctrl_flags) { 993 if (pri_ctrl_flags) {
955 if (!strcmp(pri_tgt->iscsi_name, sess->targetname) && 994 if (!strcmp(pri_tgt->iscsi_name, sess->targetname) &&
956 !strcmp(pri_tgt->ip_addr, ep_ip_addr)) { 995 !strcmp(pri_tgt->ip_addr, ep_ip_addr)) {
@@ -1297,7 +1336,7 @@ static int qedi_request_msix_irq(struct qedi_ctx *qedi)
1297 int i, rc, cpu; 1336 int i, rc, cpu;
1298 1337
1299 cpu = cpumask_first(cpu_online_mask); 1338 cpu = cpumask_first(cpu_online_mask);
1300 for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) { 1339 for (i = 0; i < qedi->int_info.msix_cnt; i++) {
1301 rc = request_irq(qedi->int_info.msix[i].vector, 1340 rc = request_irq(qedi->int_info.msix[i].vector,
1302 qedi_msix_handler, 0, "qedi", 1341 qedi_msix_handler, 0, "qedi",
1303 &qedi->fp_array[i]); 1342 &qedi->fp_array[i]);
@@ -1375,7 +1414,7 @@ static void qedi_free_bdq(struct qedi_ctx *qedi)
1375 int i; 1414 int i;
1376 1415
1377 if (qedi->bdq_pbl_list) 1416 if (qedi->bdq_pbl_list)
1378 dma_free_coherent(&qedi->pdev->dev, PAGE_SIZE, 1417 dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
1379 qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma); 1418 qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma);
1380 1419
1381 if (qedi->bdq_pbl) 1420 if (qedi->bdq_pbl)
@@ -1436,7 +1475,7 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
1436 1475
1437 /* Alloc dma memory for BDQ page buffer list */ 1476 /* Alloc dma memory for BDQ page buffer list */
1438 qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd); 1477 qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd);
1439 qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, PAGE_SIZE); 1478 qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, QEDI_PAGE_SIZE);
1440 qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd); 1479 qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd);
1441 1480
1442 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n", 1481 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n",
@@ -1471,7 +1510,8 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
1471 } 1510 }
1472 1511
1473 /* Allocate list of PBL pages */ 1512 /* Allocate list of PBL pages */
1474 qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, PAGE_SIZE, 1513 qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev,
1514 QEDI_PAGE_SIZE,
1475 &qedi->bdq_pbl_list_dma, 1515 &qedi->bdq_pbl_list_dma,
1476 GFP_KERNEL); 1516 GFP_KERNEL);
1477 if (!qedi->bdq_pbl_list) { 1517 if (!qedi->bdq_pbl_list) {
@@ -1484,13 +1524,14 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
1484 * Now populate PBL list with pages that contain pointers to the 1524 * Now populate PBL list with pages that contain pointers to the
1485 * individual buffers. 1525 * individual buffers.
1486 */ 1526 */
1487 qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / PAGE_SIZE; 1527 qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size /
1528 QEDI_PAGE_SIZE;
1488 list = (u64 *)qedi->bdq_pbl_list; 1529 list = (u64 *)qedi->bdq_pbl_list;
1489 page = qedi->bdq_pbl_list_dma; 1530 page = qedi->bdq_pbl_list_dma;
1490 for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) { 1531 for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
1491 *list = qedi->bdq_pbl_dma; 1532 *list = qedi->bdq_pbl_dma;
1492 list++; 1533 list++;
1493 page += PAGE_SIZE; 1534 page += QEDI_PAGE_SIZE;
1494 } 1535 }
1495 1536
1496 return 0; 1537 return 0;
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
index 8a0e523fc089..41bcbbafebd4 100644
--- a/drivers/scsi/qedi/qedi_version.h
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -7,8 +7,8 @@
7 * this source tree. 7 * this source tree.
8 */ 8 */
9 9
10#define QEDI_MODULE_VERSION "8.33.0.20" 10#define QEDI_MODULE_VERSION "8.33.0.21"
11#define QEDI_DRIVER_MAJOR_VER 8 11#define QEDI_DRIVER_MAJOR_VER 8
12#define QEDI_DRIVER_MINOR_VER 33 12#define QEDI_DRIVER_MINOR_VER 33
13#define QEDI_DRIVER_REV_VER 0 13#define QEDI_DRIVER_REV_VER 0
14#define QEDI_DRIVER_ENG_VER 20 14#define QEDI_DRIVER_ENG_VER 21
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 15a50cc7e4b3..a414f51302b7 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -383,20 +383,10 @@
383 383
384#include "qla1280.h" 384#include "qla1280.h"
385 385
386#ifndef BITS_PER_LONG 386#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
387#error "BITS_PER_LONG not defined!"
388#endif
389#if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM
390#define QLA_64BIT_PTR 1 387#define QLA_64BIT_PTR 1
391#endif 388#endif
392 389
393#ifdef QLA_64BIT_PTR
394#define pci_dma_hi32(a) ((a >> 16) >> 16)
395#else
396#define pci_dma_hi32(a) 0
397#endif
398#define pci_dma_lo32(a) (a & 0xffffffff)
399
400#define NVRAM_DELAY() udelay(500) /* 2 microseconds */ 390#define NVRAM_DELAY() udelay(500) /* 2 microseconds */
401 391
402#if defined(__ia64__) && !defined(ia64_platform_is) 392#if defined(__ia64__) && !defined(ia64_platform_is)
@@ -1790,8 +1780,8 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1790 mb[4] = cnt; 1780 mb[4] = cnt;
1791 mb[3] = ha->request_dma & 0xffff; 1781 mb[3] = ha->request_dma & 0xffff;
1792 mb[2] = (ha->request_dma >> 16) & 0xffff; 1782 mb[2] = (ha->request_dma >> 16) & 0xffff;
1793 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff; 1783 mb[7] = upper_32_bits(ha->request_dma) & 0xffff;
1794 mb[6] = pci_dma_hi32(ha->request_dma) >> 16; 1784 mb[6] = upper_32_bits(ha->request_dma) >> 16;
1795 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n", 1785 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1796 __func__, mb[0], 1786 __func__, mb[0],
1797 (void *)(long)ha->request_dma, 1787 (void *)(long)ha->request_dma,
@@ -1810,8 +1800,8 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1810 mb[4] = cnt; 1800 mb[4] = cnt;
1811 mb[3] = p_tbuf & 0xffff; 1801 mb[3] = p_tbuf & 0xffff;
1812 mb[2] = (p_tbuf >> 16) & 0xffff; 1802 mb[2] = (p_tbuf >> 16) & 0xffff;
1813 mb[7] = pci_dma_hi32(p_tbuf) & 0xffff; 1803 mb[7] = upper_32_bits(p_tbuf) & 0xffff;
1814 mb[6] = pci_dma_hi32(p_tbuf) >> 16; 1804 mb[6] = upper_32_bits(p_tbuf) >> 16;
1815 1805
1816 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 | 1806 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1817 BIT_1 | BIT_0, mb); 1807 BIT_1 | BIT_0, mb);
@@ -1933,8 +1923,8 @@ qla1280_init_rings(struct scsi_qla_host *ha)
1933 mb[3] = ha->request_dma & 0xffff; 1923 mb[3] = ha->request_dma & 0xffff;
1934 mb[2] = (ha->request_dma >> 16) & 0xffff; 1924 mb[2] = (ha->request_dma >> 16) & 0xffff;
1935 mb[4] = 0; 1925 mb[4] = 0;
1936 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff; 1926 mb[7] = upper_32_bits(ha->request_dma) & 0xffff;
1937 mb[6] = pci_dma_hi32(ha->request_dma) >> 16; 1927 mb[6] = upper_32_bits(ha->request_dma) >> 16;
1938 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 | 1928 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
1939 BIT_3 | BIT_2 | BIT_1 | BIT_0, 1929 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1940 &mb[0]))) { 1930 &mb[0]))) {
@@ -1947,8 +1937,8 @@ qla1280_init_rings(struct scsi_qla_host *ha)
1947 mb[3] = ha->response_dma & 0xffff; 1937 mb[3] = ha->response_dma & 0xffff;
1948 mb[2] = (ha->response_dma >> 16) & 0xffff; 1938 mb[2] = (ha->response_dma >> 16) & 0xffff;
1949 mb[5] = 0; 1939 mb[5] = 0;
1950 mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff; 1940 mb[7] = upper_32_bits(ha->response_dma) & 0xffff;
1951 mb[6] = pci_dma_hi32(ha->response_dma) >> 16; 1941 mb[6] = upper_32_bits(ha->response_dma) >> 16;
1952 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 | 1942 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
1953 BIT_3 | BIT_2 | BIT_1 | BIT_0, 1943 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1954 &mb[0]); 1944 &mb[0]);
@@ -2914,13 +2904,13 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2914 SCSI_BUS_32(cmd)); 2904 SCSI_BUS_32(cmd));
2915#endif 2905#endif
2916 *dword_ptr++ = 2906 *dword_ptr++ =
2917 cpu_to_le32(pci_dma_lo32(dma_handle)); 2907 cpu_to_le32(lower_32_bits(dma_handle));
2918 *dword_ptr++ = 2908 *dword_ptr++ =
2919 cpu_to_le32(pci_dma_hi32(dma_handle)); 2909 cpu_to_le32(upper_32_bits(dma_handle));
2920 *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); 2910 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2921 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", 2911 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2922 cpu_to_le32(pci_dma_hi32(dma_handle)), 2912 cpu_to_le32(upper_32_bits(dma_handle)),
2923 cpu_to_le32(pci_dma_lo32(dma_handle)), 2913 cpu_to_le32(lower_32_bits(dma_handle)),
2924 cpu_to_le32(sg_dma_len(sg_next(s)))); 2914 cpu_to_le32(sg_dma_len(sg_next(s))));
2925 remseg--; 2915 remseg--;
2926 } 2916 }
@@ -2976,14 +2966,14 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2976 SCSI_BUS_32(cmd)); 2966 SCSI_BUS_32(cmd));
2977#endif 2967#endif
2978 *dword_ptr++ = 2968 *dword_ptr++ =
2979 cpu_to_le32(pci_dma_lo32(dma_handle)); 2969 cpu_to_le32(lower_32_bits(dma_handle));
2980 *dword_ptr++ = 2970 *dword_ptr++ =
2981 cpu_to_le32(pci_dma_hi32(dma_handle)); 2971 cpu_to_le32(upper_32_bits(dma_handle));
2982 *dword_ptr++ = 2972 *dword_ptr++ =
2983 cpu_to_le32(sg_dma_len(s)); 2973 cpu_to_le32(sg_dma_len(s));
2984 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n", 2974 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2985 cpu_to_le32(pci_dma_hi32(dma_handle)), 2975 cpu_to_le32(upper_32_bits(dma_handle)),
2986 cpu_to_le32(pci_dma_lo32(dma_handle)), 2976 cpu_to_le32(lower_32_bits(dma_handle)),
2987 cpu_to_le32(sg_dma_len(s))); 2977 cpu_to_le32(sg_dma_len(s)));
2988 } 2978 }
2989 remseg -= cnt; 2979 remseg -= cnt;
@@ -3178,10 +3168,10 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3178 if (cnt == 4) 3168 if (cnt == 4)
3179 break; 3169 break;
3180 *dword_ptr++ = 3170 *dword_ptr++ =
3181 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); 3171 cpu_to_le32(lower_32_bits(sg_dma_address(s)));
3182 *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); 3172 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3183 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", 3173 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3184 (pci_dma_lo32(sg_dma_address(s))), 3174 (lower_32_bits(sg_dma_address(s))),
3185 (sg_dma_len(s))); 3175 (sg_dma_len(s)));
3186 remseg--; 3176 remseg--;
3187 } 3177 }
@@ -3224,13 +3214,13 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3224 if (cnt == 7) 3214 if (cnt == 7)
3225 break; 3215 break;
3226 *dword_ptr++ = 3216 *dword_ptr++ =
3227 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); 3217 cpu_to_le32(lower_32_bits(sg_dma_address(s)));
3228 *dword_ptr++ = 3218 *dword_ptr++ =
3229 cpu_to_le32(sg_dma_len(s)); 3219 cpu_to_le32(sg_dma_len(s));
3230 dprintk(1, 3220 dprintk(1,
3231 "S/G Segment Cont. phys_addr=0x%x, " 3221 "S/G Segment Cont. phys_addr=0x%x, "
3232 "len=0x%x\n", 3222 "len=0x%x\n",
3233 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))), 3223 cpu_to_le32(lower_32_bits(sg_dma_address(s))),
3234 cpu_to_le32(sg_dma_len(s))); 3224 cpu_to_le32(sg_dma_len(s)));
3235 } 3225 }
3236 remseg -= cnt; 3226 remseg -= cnt;
@@ -4213,7 +4203,6 @@ static struct scsi_host_template qla1280_driver_template = {
4213 .can_queue = MAX_OUTSTANDING_COMMANDS, 4203 .can_queue = MAX_OUTSTANDING_COMMANDS,
4214 .this_id = -1, 4204 .this_id = -1,
4215 .sg_tablesize = SG_ALL, 4205 .sg_tablesize = SG_ALL,
4216 .use_clustering = ENABLE_CLUSTERING,
4217}; 4206};
4218 4207
4219 4208
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 0bb9ac6ece92..00444dc79756 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2712,6 +2712,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
2712 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) 2712 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
2713 msleep(1000); 2713 msleep(1000);
2714 2714
2715 qla_nvme_delete(vha);
2716
2715 qla24xx_disable_vp(vha); 2717 qla24xx_disable_vp(vha);
2716 qla2x00_wait_for_sess_deletion(vha); 2718 qla2x00_wait_for_sess_deletion(vha);
2717 2719
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index eb59c796a795..364bb52ed2a6 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -237,15 +237,13 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
237 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 237 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
238 238
239 sp->done = qla2x00_async_login_sp_done; 239 sp->done = qla2x00_async_login_sp_done;
240 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) { 240 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport))
241 lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY; 241 lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
242 } else { 242 else
243 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; 243 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
244 244
245 if (fcport->fc4f_nvme) 245 if (fcport->fc4f_nvme)
246 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; 246 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
247
248 }
249 247
250 ql_dbg(ql_dbg_disc, vha, 0x2072, 248 ql_dbg(ql_dbg_disc, vha, 0x2072,
251 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x " 249 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index d620f4bebcd0..099d8e9851cb 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -507,6 +507,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
507 qla2x00_start_timer(vha, WATCH_INTERVAL); 507 qla2x00_start_timer(vha, WATCH_INTERVAL);
508 508
509 vha->req = base_vha->req; 509 vha->req = base_vha->req;
510 vha->flags.nvme_enabled = base_vha->flags.nvme_enabled;
510 host->can_queue = base_vha->req->length + 128; 511 host->can_queue = base_vha->req->length + 128;
511 host->cmd_per_lun = 3; 512 host->cmd_per_lun = 3;
512 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 513 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index fccc733145fc..39d892bbd219 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -463,21 +463,10 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
463 int rval = -ENODEV; 463 int rval = -ENODEV;
464 srb_t *sp; 464 srb_t *sp;
465 struct qla_qpair *qpair = hw_queue_handle; 465 struct qla_qpair *qpair = hw_queue_handle;
466 struct nvme_private *priv; 466 struct nvme_private *priv = fd->private;
467 struct qla_nvme_rport *qla_rport = rport->private; 467 struct qla_nvme_rport *qla_rport = rport->private;
468 468
469 if (!fd || !qpair) {
470 ql_log(ql_log_warn, NULL, 0x2134,
471 "NO NVMe request or Queue Handle\n");
472 return rval;
473 }
474
475 priv = fd->private;
476 fcport = qla_rport->fcport; 469 fcport = qla_rport->fcport;
477 if (!fcport) {
478 ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n");
479 return rval;
480 }
481 470
482 vha = fcport->vha; 471 vha = fcport->vha;
483 472
@@ -506,6 +495,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
506 sp->name = "nvme_cmd"; 495 sp->name = "nvme_cmd";
507 sp->done = qla_nvme_sp_done; 496 sp->done = qla_nvme_sp_done;
508 sp->qpair = qpair; 497 sp->qpair = qpair;
498 sp->vha = vha;
509 nvme = &sp->u.iocb_cmd; 499 nvme = &sp->u.iocb_cmd;
510 nvme->u.nvme.desc = fd; 500 nvme->u.nvme.desc = fd;
511 501
@@ -553,7 +543,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
553 schedule_work(&fcport->free_work); 543 schedule_work(&fcport->free_work);
554 } 544 }
555 545
556 fcport->nvme_flag &= ~(NVME_FLAG_REGISTERED | NVME_FLAG_DELETING); 546 fcport->nvme_flag &= ~NVME_FLAG_DELETING;
557 ql_log(ql_log_info, fcport->vha, 0x2110, 547 ql_log(ql_log_info, fcport->vha, 0x2110,
558 "remoteport_delete of %p completed.\n", fcport); 548 "remoteport_delete of %p completed.\n", fcport);
559} 549}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f92196ec5489..ea69dafc9774 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -328,7 +328,6 @@ struct scsi_host_template qla2xxx_driver_template = {
328 .map_queues = qla2xxx_map_queues, 328 .map_queues = qla2xxx_map_queues,
329 .this_id = -1, 329 .this_id = -1,
330 .cmd_per_lun = 3, 330 .cmd_per_lun = 3,
331 .use_clustering = ENABLE_CLUSTERING,
332 .sg_tablesize = SG_ALL, 331 .sg_tablesize = SG_ALL,
333 332
334 .max_sectors = 0xFFFF, 333 .max_sectors = 0xFFFF,
@@ -1742,10 +1741,53 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1742 return QLA_SUCCESS; 1741 return QLA_SUCCESS;
1743} 1742}
1744 1743
1744static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
1745 unsigned long *flags)
1746 __releases(qp->qp_lock_ptr)
1747 __acquires(qp->qp_lock_ptr)
1748{
1749 scsi_qla_host_t *vha = qp->vha;
1750 struct qla_hw_data *ha = vha->hw;
1751
1752 if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS) {
1753 if (!sp_get(sp)) {
1754 /* got sp */
1755 spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
1756 qla_nvme_abort(ha, sp, res);
1757 spin_lock_irqsave(qp->qp_lock_ptr, *flags);
1758 }
1759 } else if (GET_CMD_SP(sp) && !ha->flags.eeh_busy &&
1760 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
1761 !qla2x00_isp_reg_stat(ha) && sp->type == SRB_SCSI_CMD) {
1762 /*
1763 * Don't abort commands in adapter during EEH recovery as it's
1764 * not accessible/responding.
1765 *
1766 * Get a reference to the sp and drop the lock. The reference
1767 * ensures this sp->done() call and not the call in
1768 * qla2xxx_eh_abort() ends the SCSI cmd (with result 'res').
1769 */
1770 if (!sp_get(sp)) {
1771 int status;
1772
1773 spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
1774 status = qla2xxx_eh_abort(GET_CMD_SP(sp));
1775 spin_lock_irqsave(qp->qp_lock_ptr, *flags);
1776 /*
1777 * Get rid of extra reference caused
1778 * by early exit from qla2xxx_eh_abort
1779 */
1780 if (status == FAST_IO_FAIL)
1781 atomic_dec(&sp->ref_count);
1782 }
1783 }
1784 sp->done(sp, res);
1785}
1786
1745static void 1787static void
1746__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) 1788__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
1747{ 1789{
1748 int cnt, status; 1790 int cnt;
1749 unsigned long flags; 1791 unsigned long flags;
1750 srb_t *sp; 1792 srb_t *sp;
1751 scsi_qla_host_t *vha = qp->vha; 1793 scsi_qla_host_t *vha = qp->vha;
@@ -1764,50 +1806,7 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
1764 req->outstanding_cmds[cnt] = NULL; 1806 req->outstanding_cmds[cnt] = NULL;
1765 switch (sp->cmd_type) { 1807 switch (sp->cmd_type) {
1766 case TYPE_SRB: 1808 case TYPE_SRB:
1767 if (sp->type == SRB_NVME_CMD || 1809 qla2x00_abort_srb(qp, sp, res, &flags);
1768 sp->type == SRB_NVME_LS) {
1769 if (!sp_get(sp)) {
1770 /* got sp */
1771 spin_unlock_irqrestore
1772 (qp->qp_lock_ptr,
1773 flags);
1774 qla_nvme_abort(ha, sp, res);
1775 spin_lock_irqsave
1776 (qp->qp_lock_ptr, flags);
1777 }
1778 } else if (GET_CMD_SP(sp) &&
1779 !ha->flags.eeh_busy &&
1780 (!test_bit(ABORT_ISP_ACTIVE,
1781 &vha->dpc_flags)) &&
1782 !qla2x00_isp_reg_stat(ha) &&
1783 (sp->type == SRB_SCSI_CMD)) {
1784 /*
1785 * Don't abort commands in adapter
1786 * during EEH recovery as it's not
1787 * accessible/responding.
1788 *
1789 * Get a reference to the sp and drop
1790 * the lock. The reference ensures this
1791 * sp->done() call and not the call in
1792 * qla2xxx_eh_abort() ends the SCSI cmd
1793 * (with result 'res').
1794 */
1795 if (!sp_get(sp)) {
1796 spin_unlock_irqrestore
1797 (qp->qp_lock_ptr, flags);
1798 status = qla2xxx_eh_abort(
1799 GET_CMD_SP(sp));
1800 spin_lock_irqsave
1801 (qp->qp_lock_ptr, flags);
1802 /*
1803 * Get rid of extra reference caused
1804 * by early exit from qla2xxx_eh_abort
1805 */
1806 if (status == FAST_IO_FAIL)
1807 atomic_dec(&sp->ref_count);
1808 }
1809 }
1810 sp->done(sp, res);
1811 break; 1810 break;
1812 case TYPE_TGT_CMD: 1811 case TYPE_TGT_CMD:
1813 if (!vha->hw->tgt.tgt_ops || !tgt || 1812 if (!vha->hw->tgt.tgt_ops || !tgt ||
@@ -3558,6 +3557,8 @@ qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
3558 spin_unlock_irqrestore(&ha->vport_slock, flags); 3557 spin_unlock_irqrestore(&ha->vport_slock, flags);
3559 mutex_unlock(&ha->vport_lock); 3558 mutex_unlock(&ha->vport_lock);
3560 3559
3560 qla_nvme_delete(vha);
3561
3561 fc_vport_terminate(vha->fc_vport); 3562 fc_vport_terminate(vha->fc_vport);
3562 scsi_host_put(vha->host); 3563 scsi_host_put(vha->host);
3563 3564
@@ -4179,12 +4180,10 @@ fail_free_nvram:
4179 kfree(ha->nvram); 4180 kfree(ha->nvram);
4180 ha->nvram = NULL; 4181 ha->nvram = NULL;
4181fail_free_ctx_mempool: 4182fail_free_ctx_mempool:
4182 if (ha->ctx_mempool) 4183 mempool_destroy(ha->ctx_mempool);
4183 mempool_destroy(ha->ctx_mempool);
4184 ha->ctx_mempool = NULL; 4184 ha->ctx_mempool = NULL;
4185fail_free_srb_mempool: 4185fail_free_srb_mempool:
4186 if (ha->srb_mempool) 4186 mempool_destroy(ha->srb_mempool);
4187 mempool_destroy(ha->srb_mempool);
4188 ha->srb_mempool = NULL; 4187 ha->srb_mempool = NULL;
4189fail_free_gid_list: 4188fail_free_gid_list:
4190 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 4189 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
@@ -4486,8 +4485,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
4486 dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump, 4485 dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
4487 ha->mctp_dump_dma); 4486 ha->mctp_dump_dma);
4488 4487
4489 if (ha->srb_mempool) 4488 mempool_destroy(ha->srb_mempool);
4490 mempool_destroy(ha->srb_mempool);
4491 4489
4492 if (ha->dcbx_tlv) 4490 if (ha->dcbx_tlv)
4493 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, 4491 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
@@ -4519,8 +4517,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
4519 if (ha->async_pd) 4517 if (ha->async_pd)
4520 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); 4518 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
4521 4519
4522 if (ha->s_dma_pool) 4520 dma_pool_destroy(ha->s_dma_pool);
4523 dma_pool_destroy(ha->s_dma_pool);
4524 4521
4525 if (ha->gid_list) 4522 if (ha->gid_list)
4526 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 4523 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
@@ -4541,14 +4538,11 @@ qla2x00_mem_free(struct qla_hw_data *ha)
4541 } 4538 }
4542 } 4539 }
4543 4540
4544 if (ha->dl_dma_pool) 4541 dma_pool_destroy(ha->dl_dma_pool);
4545 dma_pool_destroy(ha->dl_dma_pool);
4546 4542
4547 if (ha->fcp_cmnd_dma_pool) 4543 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
4548 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
4549 4544
4550 if (ha->ctx_mempool) 4545 mempool_destroy(ha->ctx_mempool);
4551 mempool_destroy(ha->ctx_mempool);
4552 4546
4553 qlt_mem_free(ha); 4547 qlt_mem_free(ha);
4554 4548
@@ -7095,8 +7089,7 @@ qla2x00_module_exit(void)
7095 qla2x00_release_firmware(); 7089 qla2x00_release_firmware();
7096 kmem_cache_destroy(srb_cachep); 7090 kmem_cache_destroy(srb_cachep);
7097 qlt_exit(); 7091 qlt_exit();
7098 if (ctx_cachep) 7092 kmem_cache_destroy(ctx_cachep);
7099 kmem_cache_destroy(ctx_cachep);
7100 fc_release_transport(qla2xxx_transport_template); 7093 fc_release_transport(qla2xxx_transport_template);
7101 fc_release_transport(qla2xxx_transport_vport_template); 7094 fc_release_transport(qla2xxx_transport_vport_template);
7102} 7095}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index c4504740f0e2..510337eac106 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2379,20 +2379,20 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2379 } 2379 }
2380 2380
2381 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) { 2381 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
2382 if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == 2382 switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) {
2383 ELS_LOGO || 2383 case ELS_LOGO:
2384 mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == 2384 case ELS_PRLO:
2385 ELS_PRLO || 2385 case ELS_TPRLO:
2386 mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
2387 ELS_TPRLO) {
2388 ql_dbg(ql_dbg_disc, vha, 0x2106, 2386 ql_dbg(ql_dbg_disc, vha, 0x2106,
2389 "TM response logo %phC status %#x state %#x", 2387 "TM response logo %phC status %#x state %#x",
2390 mcmd->sess->port_name, mcmd->fc_tm_rsp, 2388 mcmd->sess->port_name, mcmd->fc_tm_rsp,
2391 mcmd->flags); 2389 mcmd->flags);
2392 qlt_schedule_sess_for_deletion(mcmd->sess); 2390 qlt_schedule_sess_for_deletion(mcmd->sess);
2393 } else { 2391 break;
2392 default:
2394 qlt_send_notify_ack(vha->hw->base_qpair, 2393 qlt_send_notify_ack(vha->hw->base_qpair,
2395 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); 2394 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2395 break;
2396 } 2396 }
2397 } else { 2397 } else {
2398 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) { 2398 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
@@ -2660,9 +2660,9 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
2660 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt; 2660 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
2661 cnt++, prm->seg_cnt--) { 2661 cnt++, prm->seg_cnt--) {
2662 *dword_ptr++ = 2662 *dword_ptr++ =
2663 cpu_to_le32(pci_dma_lo32 2663 cpu_to_le32(lower_32_bits
2664 (sg_dma_address(prm->sg))); 2664 (sg_dma_address(prm->sg)));
2665 *dword_ptr++ = cpu_to_le32(pci_dma_hi32 2665 *dword_ptr++ = cpu_to_le32(upper_32_bits
2666 (sg_dma_address(prm->sg))); 2666 (sg_dma_address(prm->sg)));
2667 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 2667 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
2668 2668
@@ -2704,9 +2704,9 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm)
2704 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt; 2704 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
2705 cnt++, prm->seg_cnt--) { 2705 cnt++, prm->seg_cnt--) {
2706 *dword_ptr++ = 2706 *dword_ptr++ =
2707 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg))); 2707 cpu_to_le32(lower_32_bits(sg_dma_address(prm->sg)));
2708 2708
2709 *dword_ptr++ = cpu_to_le32(pci_dma_hi32( 2709 *dword_ptr++ = cpu_to_le32(upper_32_bits(
2710 sg_dma_address(prm->sg))); 2710 sg_dma_address(prm->sg)));
2711 2711
2712 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 2712 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 721da593b1bc..577e1786a3f1 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -771,14 +771,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
771#define FC_TM_REJECT 4 771#define FC_TM_REJECT 4
772#define FC_TM_FAILED 5 772#define FC_TM_FAILED 5
773 773
774#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
775#define pci_dma_lo32(a) (a & 0xffffffff)
776#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
777#else
778#define pci_dma_lo32(a) (a & 0xffffffff)
779#define pci_dma_hi32(a) 0
780#endif
781
782#define QLA_TGT_SENSE_VALID(sense) ((sense != NULL) && \ 774#define QLA_TGT_SENSE_VALID(sense) ((sense != NULL) && \
783 (((const uint8_t *)(sense))[0] & 0x70) == 0x70) 775 (((const uint8_t *)(sense))[0] & 0x70) == 0x70)
784 776
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 12bafff71a1a..ca7945cb959b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "10.00.00.11-k" 10#define QLA2XXX_VERSION "10.00.00.12-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 10 12#define QLA_DRIVER_MAJOR_VER 10
13#define QLA_DRIVER_MINOR_VER 0 13#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 65053c066680..283e6b80abb5 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -108,11 +108,6 @@ static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
108 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); 108 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
109} 109}
110 110
111static char *tcm_qla2xxx_get_fabric_name(void)
112{
113 return "qla2xxx";
114}
115
116/* 111/*
117 * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn 112 * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
118 */ 113 */
@@ -178,11 +173,6 @@ static int tcm_qla2xxx_npiv_parse_wwn(
178 return 0; 173 return 0;
179} 174}
180 175
181static char *tcm_qla2xxx_npiv_get_fabric_name(void)
182{
183 return "qla2xxx_npiv";
184}
185
186static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg) 176static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
187{ 177{
188 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 178 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
@@ -964,38 +954,14 @@ static ssize_t tcm_qla2xxx_tpg_enable_show(struct config_item *item,
964 atomic_read(&tpg->lport_tpg_enabled)); 954 atomic_read(&tpg->lport_tpg_enabled));
965} 955}
966 956
967static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
968{
969 struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
970 struct tcm_qla2xxx_tpg, tpg_base_work);
971 struct se_portal_group *se_tpg = &base_tpg->se_tpg;
972 struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
973
974 if (!target_depend_item(&se_tpg->tpg_group.cg_item)) {
975 atomic_set(&base_tpg->lport_tpg_enabled, 1);
976 qlt_enable_vha(base_vha);
977 }
978 complete(&base_tpg->tpg_base_comp);
979}
980
981static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
982{
983 struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
984 struct tcm_qla2xxx_tpg, tpg_base_work);
985 struct se_portal_group *se_tpg = &base_tpg->se_tpg;
986 struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
987
988 if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
989 atomic_set(&base_tpg->lport_tpg_enabled, 0);
990 target_undepend_item(&se_tpg->tpg_group.cg_item);
991 }
992 complete(&base_tpg->tpg_base_comp);
993}
994
995static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item, 957static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
996 const char *page, size_t count) 958 const char *page, size_t count)
997{ 959{
998 struct se_portal_group *se_tpg = to_tpg(item); 960 struct se_portal_group *se_tpg = to_tpg(item);
961 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
962 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
963 struct tcm_qla2xxx_lport, lport_wwn);
964 struct scsi_qla_host *vha = lport->qla_vha;
999 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 965 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
1000 struct tcm_qla2xxx_tpg, se_tpg); 966 struct tcm_qla2xxx_tpg, se_tpg);
1001 unsigned long op; 967 unsigned long op;
@@ -1014,24 +980,16 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
1014 if (atomic_read(&tpg->lport_tpg_enabled)) 980 if (atomic_read(&tpg->lport_tpg_enabled))
1015 return -EEXIST; 981 return -EEXIST;
1016 982
1017 INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg); 983 atomic_set(&tpg->lport_tpg_enabled, 1);
984 qlt_enable_vha(vha);
1018 } else { 985 } else {
1019 if (!atomic_read(&tpg->lport_tpg_enabled)) 986 if (!atomic_read(&tpg->lport_tpg_enabled))
1020 return count; 987 return count;
1021 988
1022 INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg); 989 atomic_set(&tpg->lport_tpg_enabled, 0);
990 qlt_stop_phase1(vha->vha_tgt.qla_tgt);
1023 } 991 }
1024 init_completion(&tpg->tpg_base_comp);
1025 schedule_work(&tpg->tpg_base_work);
1026 wait_for_completion(&tpg->tpg_base_comp);
1027 992
1028 if (op) {
1029 if (!atomic_read(&tpg->lport_tpg_enabled))
1030 return -ENODEV;
1031 } else {
1032 if (atomic_read(&tpg->lport_tpg_enabled))
1033 return -EPERM;
1034 }
1035 return count; 993 return count;
1036} 994}
1037 995
@@ -1920,14 +1878,13 @@ static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
1920 1878
1921static const struct target_core_fabric_ops tcm_qla2xxx_ops = { 1879static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
1922 .module = THIS_MODULE, 1880 .module = THIS_MODULE,
1923 .name = "qla2xxx", 1881 .fabric_name = "qla2xxx",
1924 .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), 1882 .node_acl_size = sizeof(struct tcm_qla2xxx_nacl),
1925 /* 1883 /*
1926 * XXX: Limit assumes single page per scatter-gather-list entry. 1884 * XXX: Limit assumes single page per scatter-gather-list entry.
1927 * Current maximum is ~4.9 MB per se_cmd->t_data_sg with PAGE_SIZE=4096 1885 * Current maximum is ~4.9 MB per se_cmd->t_data_sg with PAGE_SIZE=4096
1928 */ 1886 */
1929 .max_data_sg_nents = 1200, 1887 .max_data_sg_nents = 1200,
1930 .get_fabric_name = tcm_qla2xxx_get_fabric_name,
1931 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, 1888 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
1932 .tpg_get_tag = tcm_qla2xxx_get_tag, 1889 .tpg_get_tag = tcm_qla2xxx_get_tag,
1933 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, 1890 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
@@ -1969,9 +1926,8 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
1969 1926
1970static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { 1927static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
1971 .module = THIS_MODULE, 1928 .module = THIS_MODULE,
1972 .name = "qla2xxx_npiv", 1929 .fabric_name = "qla2xxx_npiv",
1973 .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), 1930 .node_acl_size = sizeof(struct tcm_qla2xxx_nacl),
1974 .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
1975 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, 1931 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
1976 .tpg_get_tag = tcm_qla2xxx_get_tag, 1932 .tpg_get_tag = tcm_qla2xxx_get_tag,
1977 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, 1933 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 7550ba2831c3..147cf6c90366 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -48,9 +48,6 @@ struct tcm_qla2xxx_tpg {
48 struct tcm_qla2xxx_tpg_attrib tpg_attrib; 48 struct tcm_qla2xxx_tpg_attrib tpg_attrib;
49 /* Returned by tcm_qla2xxx_make_tpg() */ 49 /* Returned by tcm_qla2xxx_make_tpg() */
50 struct se_portal_group se_tpg; 50 struct se_portal_group se_tpg;
51 /* Items for dealing with configfs_depend_item */
52 struct completion tpg_base_comp;
53 struct work_struct tpg_base_work;
54}; 51};
55 52
56struct tcm_qla2xxx_fc_loopid { 53struct tcm_qla2xxx_fc_loopid {
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 051164f755a4..949e186cc5d7 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -205,7 +205,6 @@ static struct scsi_host_template qla4xxx_driver_template = {
205 205
206 .this_id = -1, 206 .this_id = -1,
207 .cmd_per_lun = 3, 207 .cmd_per_lun = 3,
208 .use_clustering = ENABLE_CLUSTERING,
209 .sg_tablesize = SG_ALL, 208 .sg_tablesize = SG_ALL,
210 209
211 .max_sectors = 0xFFFF, 210 .max_sectors = 0xFFFF,
@@ -4160,20 +4159,16 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
4160 ha->fw_dump_size = 0; 4159 ha->fw_dump_size = 0;
4161 4160
4162 /* Free srb pool. */ 4161 /* Free srb pool. */
4163 if (ha->srb_mempool) 4162 mempool_destroy(ha->srb_mempool);
4164 mempool_destroy(ha->srb_mempool);
4165
4166 ha->srb_mempool = NULL; 4163 ha->srb_mempool = NULL;
4167 4164
4168 if (ha->chap_dma_pool) 4165 dma_pool_destroy(ha->chap_dma_pool);
4169 dma_pool_destroy(ha->chap_dma_pool);
4170 4166
4171 if (ha->chap_list) 4167 if (ha->chap_list)
4172 vfree(ha->chap_list); 4168 vfree(ha->chap_list);
4173 ha->chap_list = NULL; 4169 ha->chap_list = NULL;
4174 4170
4175 if (ha->fw_ddb_dma_pool) 4171 dma_pool_destroy(ha->fw_ddb_dma_pool);
4176 dma_pool_destroy(ha->fw_ddb_dma_pool);
4177 4172
4178 /* release io space registers */ 4173 /* release io space registers */
4179 if (is_qla8022(ha)) { 4174 if (is_qla8022(ha)) {
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
index 95431d605c24..8f709002f746 100644
--- a/drivers/scsi/qlogicfas.c
+++ b/drivers/scsi/qlogicfas.c
@@ -193,7 +193,7 @@ static struct scsi_host_template qlogicfas_driver_template = {
193 .can_queue = 1, 193 .can_queue = 1,
194 .this_id = -1, 194 .this_id = -1,
195 .sg_tablesize = SG_ALL, 195 .sg_tablesize = SG_ALL,
196 .use_clustering = DISABLE_CLUSTERING, 196 .dma_boundary = PAGE_SIZE - 1,
197}; 197};
198 198
199static __init int qlogicfas_init(void) 199static __init int qlogicfas_init(void)
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 9d09228eee28..e35ce762d454 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1287,7 +1287,6 @@ static struct scsi_host_template qpti_template = {
1287 .can_queue = QLOGICPTI_REQ_QUEUE_LEN, 1287 .can_queue = QLOGICPTI_REQ_QUEUE_LEN,
1288 .this_id = 7, 1288 .this_id = 7,
1289 .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN), 1289 .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN),
1290 .use_clustering = ENABLE_CLUSTERING,
1291}; 1290};
1292 1291
1293static const struct of_device_id qpti_match[]; 1292static const struct of_device_id qpti_match[];
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 4740f1e9dd17..661512bec3ac 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -3973,7 +3973,6 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
3973 return 1; /* no resources, will be marked offline */ 3973 return 1; /* no resources, will be marked offline */
3974 } 3974 }
3975 sdp->hostdata = devip; 3975 sdp->hostdata = devip;
3976 blk_queue_max_segment_size(sdp->request_queue, -1U);
3977 if (sdebug_no_uld) 3976 if (sdebug_no_uld)
3978 sdp->no_uld_attach = 1; 3977 sdp->no_uld_attach = 1;
3979 config_cdb_len(sdp); 3978 config_cdb_len(sdp);
@@ -5851,7 +5850,7 @@ static struct scsi_host_template sdebug_driver_template = {
5851 .sg_tablesize = SG_MAX_SEGMENTS, 5850 .sg_tablesize = SG_MAX_SEGMENTS,
5852 .cmd_per_lun = DEF_CMD_PER_LUN, 5851 .cmd_per_lun = DEF_CMD_PER_LUN,
5853 .max_sectors = -1U, 5852 .max_sectors = -1U,
5854 .use_clustering = DISABLE_CLUSTERING, 5853 .max_segment_size = -1U,
5855 .module = THIS_MODULE, 5854 .module = THIS_MODULE,
5856 .track_queue_depth = 1, 5855 .track_queue_depth = 1,
5857}; 5856};
@@ -5866,8 +5865,9 @@ static int sdebug_driver_probe(struct device *dev)
5866 sdbg_host = to_sdebug_host(dev); 5865 sdbg_host = to_sdebug_host(dev);
5867 5866
5868 sdebug_driver_template.can_queue = sdebug_max_queue; 5867 sdebug_driver_template.can_queue = sdebug_max_queue;
5869 if (sdebug_clustering) 5868 if (!sdebug_clustering)
5870 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING; 5869 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
5870
5871 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); 5871 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5872 if (NULL == hpnt) { 5872 if (NULL == hpnt) {
5873 pr_err("scsi_host_alloc failed\n"); 5873 pr_err("scsi_host_alloc failed\n");
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0dbf25512778..b13cc9288ba0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1842,10 +1842,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
1842 blk_queue_segment_boundary(q, shost->dma_boundary); 1842 blk_queue_segment_boundary(q, shost->dma_boundary);
1843 dma_set_seg_boundary(dev, shost->dma_boundary); 1843 dma_set_seg_boundary(dev, shost->dma_boundary);
1844 1844
1845 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1845 blk_queue_max_segment_size(q,
1846 1846 min(shost->max_segment_size, dma_get_max_seg_size(dev)));
1847 if (!shost->use_clustering)
1848 q->limits.cluster = 0;
1849 1847
1850 /* 1848 /*
1851 * Set a reasonable default alignment: The larger of 32-byte (dword), 1849 * Set a reasonable default alignment: The larger of 32-byte (dword),
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index ff123023e5a5..0508831d6fb9 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -37,6 +37,18 @@
37 37
38#define ISCSI_TRANSPORT_VERSION "2.0-870" 38#define ISCSI_TRANSPORT_VERSION "2.0-870"
39 39
40#define CREATE_TRACE_POINTS
41#include <trace/events/iscsi.h>
42
43/*
44 * Export tracepoint symbols to be used by other modules.
45 */
46EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_conn);
47EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_eh);
48EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_session);
49EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_tcp);
50EXPORT_TRACEPOINT_SYMBOL_GPL(iscsi_dbg_sw_tcp);
51
40static int dbg_session; 52static int dbg_session;
41module_param_named(debug_session, dbg_session, int, 53module_param_named(debug_session, dbg_session, int,
42 S_IRUGO | S_IWUSR); 54 S_IRUGO | S_IWUSR);
@@ -59,6 +71,9 @@ MODULE_PARM_DESC(debug_conn,
59 iscsi_cls_session_printk(KERN_INFO, _session, \ 71 iscsi_cls_session_printk(KERN_INFO, _session, \
60 "%s: " dbg_fmt, \ 72 "%s: " dbg_fmt, \
61 __func__, ##arg); \ 73 __func__, ##arg); \
74 iscsi_dbg_trace(trace_iscsi_dbg_trans_session, \
75 &(_session)->dev, \
76 "%s " dbg_fmt, __func__, ##arg); \
62 } while (0); 77 } while (0);
63 78
64#define ISCSI_DBG_TRANS_CONN(_conn, dbg_fmt, arg...) \ 79#define ISCSI_DBG_TRANS_CONN(_conn, dbg_fmt, arg...) \
@@ -66,7 +81,10 @@ MODULE_PARM_DESC(debug_conn,
66 if (dbg_conn) \ 81 if (dbg_conn) \
67 iscsi_cls_conn_printk(KERN_INFO, _conn, \ 82 iscsi_cls_conn_printk(KERN_INFO, _conn, \
68 "%s: " dbg_fmt, \ 83 "%s: " dbg_fmt, \
69 __func__, ##arg); \ 84 __func__, ##arg); \
85 iscsi_dbg_trace(trace_iscsi_dbg_trans_conn, \
86 &(_conn)->dev, \
87 "%s " dbg_fmt, __func__, ##arg); \
70 } while (0); 88 } while (0);
71 89
72struct iscsi_internal { 90struct iscsi_internal {
@@ -4494,6 +4512,20 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
4494} 4512}
4495EXPORT_SYMBOL_GPL(iscsi_unregister_transport); 4513EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
4496 4514
4515void iscsi_dbg_trace(void (*trace)(struct device *dev, struct va_format *),
4516 struct device *dev, const char *fmt, ...)
4517{
4518 struct va_format vaf;
4519 va_list args;
4520
4521 va_start(args, fmt);
4522 vaf.fmt = fmt;
4523 vaf.va = &args;
4524 trace(dev, &vaf);
4525 va_end(args);
4526}
4527EXPORT_SYMBOL_GPL(iscsi_dbg_trace);
4528
4497static __init int iscsi_transport_init(void) 4529static __init int iscsi_transport_init(void)
4498{ 4530{
4499 int err; 4531 int err;
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 5ed696dc9bbd..713bce998b0e 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -208,7 +208,7 @@ static struct scsi_host_template sgiwd93_template = {
208 .this_id = 7, 208 .this_id = 7,
209 .sg_tablesize = SG_ALL, 209 .sg_tablesize = SG_ALL,
210 .cmd_per_lun = 8, 210 .cmd_per_lun = 8,
211 .use_clustering = DISABLE_CLUSTERING, 211 .dma_boundary = PAGE_SIZE - 1,
212}; 212};
213 213
214static int sgiwd93_probe(struct platform_device *pdev) 214static int sgiwd93_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index e97bf2670315..af962368818b 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -21,6 +21,9 @@
21#if !defined(_SMARTPQI_H) 21#if !defined(_SMARTPQI_H)
22#define _SMARTPQI_H 22#define _SMARTPQI_H
23 23
24#include <scsi/scsi_host.h>
25#include <linux/bsg-lib.h>
26
24#pragma pack(1) 27#pragma pack(1)
25 28
26#define PQI_DEVICE_SIGNATURE "PQI DREG" 29#define PQI_DEVICE_SIGNATURE "PQI DREG"
@@ -97,6 +100,12 @@ struct pqi_ctrl_registers {
97 struct pqi_device_registers pqi_registers; /* 4000h */ 100 struct pqi_device_registers pqi_registers; /* 4000h */
98}; 101};
99 102
103#if ((HZ) < 1000)
104#define PQI_HZ 1000
105#else
106#define PQI_HZ (HZ)
107#endif
108
100#define PQI_DEVICE_REGISTERS_OFFSET 0x4000 109#define PQI_DEVICE_REGISTERS_OFFSET 0x4000
101 110
102enum pqi_io_path { 111enum pqi_io_path {
@@ -347,6 +356,10 @@ struct pqi_event_config {
347 356
348#define PQI_MAX_EVENT_DESCRIPTORS 255 357#define PQI_MAX_EVENT_DESCRIPTORS 255
349 358
359#define PQI_EVENT_OFA_MEMORY_ALLOCATION 0x0
360#define PQI_EVENT_OFA_QUIESCE 0x1
361#define PQI_EVENT_OFA_CANCELLED 0x2
362
350struct pqi_event_response { 363struct pqi_event_response {
351 struct pqi_iu_header header; 364 struct pqi_iu_header header;
352 u8 event_type; 365 u8 event_type;
@@ -354,7 +367,17 @@ struct pqi_event_response {
354 u8 request_acknowlege : 1; 367 u8 request_acknowlege : 1;
355 __le16 event_id; 368 __le16 event_id;
356 __le32 additional_event_id; 369 __le32 additional_event_id;
357 u8 data[16]; 370 union {
371 struct {
372 __le32 bytes_requested;
373 u8 reserved[12];
374 } ofa_memory_allocation;
375
376 struct {
377 __le16 reason; /* reason for cancellation */
378 u8 reserved[14];
379 } ofa_cancelled;
380 } data;
358}; 381};
359 382
360struct pqi_event_acknowledge_request { 383struct pqi_event_acknowledge_request {
@@ -389,6 +412,54 @@ struct pqi_task_management_response {
389 u8 response_code; 412 u8 response_code;
390}; 413};
391 414
415struct pqi_vendor_general_request {
416 struct pqi_iu_header header;
417 __le16 request_id;
418 __le16 function_code;
419 union {
420 struct {
421 __le16 first_section;
422 __le16 last_section;
423 u8 reserved[48];
424 } config_table_update;
425
426 struct {
427 __le64 buffer_address;
428 __le32 buffer_length;
429 u8 reserved[40];
430 } ofa_memory_allocation;
431 } data;
432};
433
434struct pqi_vendor_general_response {
435 struct pqi_iu_header header;
436 __le16 request_id;
437 __le16 function_code;
438 __le16 status;
439 u8 reserved[2];
440};
441
442#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0
443#define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE 1
444
445#define PQI_OFA_VERSION 1
446#define PQI_OFA_SIGNATURE "OFA_QRM"
447#define PQI_OFA_MAX_SG_DESCRIPTORS 64
448
449#define PQI_OFA_MEMORY_DESCRIPTOR_LENGTH \
450 (offsetof(struct pqi_ofa_memory, sg_descriptor) + \
451 (PQI_OFA_MAX_SG_DESCRIPTORS * sizeof(struct pqi_sg_descriptor)))
452
453struct pqi_ofa_memory {
454 __le64 signature; /* "OFA_QRM" */
455 __le16 version; /* version of this struct(1 = 1st version) */
456 u8 reserved[62];
457 __le32 bytes_allocated; /* total allocated memory in bytes */
458 __le16 num_memory_descriptors;
459 u8 reserved1[2];
460 struct pqi_sg_descriptor sg_descriptor[1];
461};
462
392struct pqi_aio_error_info { 463struct pqi_aio_error_info {
393 u8 status; 464 u8 status;
394 u8 service_response; 465 u8 service_response;
@@ -419,6 +490,7 @@ struct pqi_raid_error_info {
419#define PQI_REQUEST_IU_GENERAL_ADMIN 0x60 490#define PQI_REQUEST_IU_GENERAL_ADMIN 0x60
420#define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72 491#define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72
421#define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73 492#define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73
493#define PQI_REQUEST_IU_VENDOR_GENERAL 0x75
422#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6 494#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
423 495
424#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81 496#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81
@@ -430,6 +502,7 @@ struct pqi_raid_error_info {
430#define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR 0xf3 502#define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR 0xf3
431#define PQI_RESPONSE_IU_AIO_PATH_DISABLED 0xf4 503#define PQI_RESPONSE_IU_AIO_PATH_DISABLED 0xf4
432#define PQI_RESPONSE_IU_VENDOR_EVENT 0xf5 504#define PQI_RESPONSE_IU_VENDOR_EVENT 0xf5
505#define PQI_RESPONSE_IU_VENDOR_GENERAL 0xf7
433 506
434#define PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY 0x0 507#define PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY 0x0
435#define PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ 0x10 508#define PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ 0x10
@@ -492,6 +565,7 @@ struct pqi_raid_error_info {
492#define PQI_EVENT_TYPE_HARDWARE 0x2 565#define PQI_EVENT_TYPE_HARDWARE 0x2
493#define PQI_EVENT_TYPE_PHYSICAL_DEVICE 0x4 566#define PQI_EVENT_TYPE_PHYSICAL_DEVICE 0x4
494#define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5 567#define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5
568#define PQI_EVENT_TYPE_OFA 0xfb
495#define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd 569#define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd
496#define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe 570#define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe
497 571
@@ -556,6 +630,7 @@ typedef u32 pqi_index_t;
556#define SOP_TASK_ATTRIBUTE_ACA 4 630#define SOP_TASK_ATTRIBUTE_ACA 4
557 631
558#define SOP_TMF_COMPLETE 0x0 632#define SOP_TMF_COMPLETE 0x0
633#define SOP_TMF_REJECTED 0x4
559#define SOP_TMF_FUNCTION_SUCCEEDED 0x8 634#define SOP_TMF_FUNCTION_SUCCEEDED 0x8
560 635
561/* additional CDB bytes usage field codes */ 636/* additional CDB bytes usage field codes */
@@ -644,11 +719,13 @@ struct pqi_encryption_info {
644#define PQI_CONFIG_TABLE_MAX_LENGTH ((u16)~0) 719#define PQI_CONFIG_TABLE_MAX_LENGTH ((u16)~0)
645 720
646/* configuration table section IDs */ 721/* configuration table section IDs */
722#define PQI_CONFIG_TABLE_ALL_SECTIONS (-1)
647#define PQI_CONFIG_TABLE_SECTION_GENERAL_INFO 0 723#define PQI_CONFIG_TABLE_SECTION_GENERAL_INFO 0
648#define PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES 1 724#define PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES 1
649#define PQI_CONFIG_TABLE_SECTION_FIRMWARE_ERRATA 2 725#define PQI_CONFIG_TABLE_SECTION_FIRMWARE_ERRATA 2
650#define PQI_CONFIG_TABLE_SECTION_DEBUG 3 726#define PQI_CONFIG_TABLE_SECTION_DEBUG 3
651#define PQI_CONFIG_TABLE_SECTION_HEARTBEAT 4 727#define PQI_CONFIG_TABLE_SECTION_HEARTBEAT 4
728#define PQI_CONFIG_TABLE_SECTION_SOFT_RESET 5
652 729
653struct pqi_config_table { 730struct pqi_config_table {
654 u8 signature[8]; /* "CFGTABLE" */ 731 u8 signature[8]; /* "CFGTABLE" */
@@ -680,6 +757,18 @@ struct pqi_config_table_general_info {
680 /* command */ 757 /* command */
681}; 758};
682 759
760struct pqi_config_table_firmware_features {
761 struct pqi_config_table_section_header header;
762 __le16 num_elements;
763 u8 features_supported[];
764/* u8 features_requested_by_host[]; */
765/* u8 features_enabled[]; */
766};
767
768#define PQI_FIRMWARE_FEATURE_OFA 0
769#define PQI_FIRMWARE_FEATURE_SMP 1
770#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11
771
683struct pqi_config_table_debug { 772struct pqi_config_table_debug {
684 struct pqi_config_table_section_header header; 773 struct pqi_config_table_section_header header;
685 __le32 scratchpad; 774 __le32 scratchpad;
@@ -690,6 +779,22 @@ struct pqi_config_table_heartbeat {
690 __le32 heartbeat_counter; 779 __le32 heartbeat_counter;
691}; 780};
692 781
782struct pqi_config_table_soft_reset {
783 struct pqi_config_table_section_header header;
784 u8 soft_reset_status;
785};
786
787#define PQI_SOFT_RESET_INITIATE 0x1
788#define PQI_SOFT_RESET_ABORT 0x2
789
790enum pqi_soft_reset_status {
791 RESET_INITIATE_FIRMWARE,
792 RESET_INITIATE_DRIVER,
793 RESET_ABORT,
794 RESET_NORESPONSE,
795 RESET_TIMEDOUT
796};
797
693union pqi_reset_register { 798union pqi_reset_register {
694 struct { 799 struct {
695 u32 reset_type : 3; 800 u32 reset_type : 3;
@@ -808,8 +913,10 @@ struct pqi_scsi_dev {
808 u8 scsi3addr[8]; 913 u8 scsi3addr[8];
809 __be64 wwid; 914 __be64 wwid;
810 u8 volume_id[16]; 915 u8 volume_id[16];
916 u8 unique_id[16];
811 u8 is_physical_device : 1; 917 u8 is_physical_device : 1;
812 u8 is_external_raid_device : 1; 918 u8 is_external_raid_device : 1;
919 u8 is_expander_smp_device : 1;
813 u8 target_lun_valid : 1; 920 u8 target_lun_valid : 1;
814 u8 device_gone : 1; 921 u8 device_gone : 1;
815 u8 new_device : 1; 922 u8 new_device : 1;
@@ -817,6 +924,7 @@ struct pqi_scsi_dev {
817 u8 volume_offline : 1; 924 u8 volume_offline : 1;
818 bool aio_enabled; /* only valid for physical disks */ 925 bool aio_enabled; /* only valid for physical disks */
819 bool in_reset; 926 bool in_reset;
927 bool in_remove;
820 bool device_offline; 928 bool device_offline;
821 u8 vendor[8]; /* bytes 8-15 of inquiry data */ 929 u8 vendor[8]; /* bytes 8-15 of inquiry data */
822 u8 model[16]; /* bytes 16-31 of inquiry data */ 930 u8 model[16]; /* bytes 16-31 of inquiry data */
@@ -854,6 +962,8 @@ struct pqi_scsi_dev {
854#define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */ 962#define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
855#define CISS_VPD_LV_BYPASS_STATUS 0xc2 /* vendor-specific page */ 963#define CISS_VPD_LV_BYPASS_STATUS 0xc2 /* vendor-specific page */
856#define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */ 964#define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */
965#define SCSI_VPD_HEADER_SZ 4
966#define SCSI_VPD_DEVICE_ID_IDX 8 /* Index of page id in page */
857 967
858#define VPD_PAGE (1 << 8) 968#define VPD_PAGE (1 << 8)
859 969
@@ -916,6 +1026,7 @@ struct pqi_sas_node {
916struct pqi_sas_port { 1026struct pqi_sas_port {
917 struct list_head port_list_entry; 1027 struct list_head port_list_entry;
918 u64 sas_address; 1028 u64 sas_address;
1029 struct pqi_scsi_dev *device;
919 struct sas_port *port; 1030 struct sas_port *port;
920 int next_phy_index; 1031 int next_phy_index;
921 struct list_head phy_list_head; 1032 struct list_head phy_list_head;
@@ -947,13 +1058,15 @@ struct pqi_io_request {
947 struct list_head request_list_entry; 1058 struct list_head request_list_entry;
948}; 1059};
949 1060
950#define PQI_NUM_SUPPORTED_EVENTS 6 1061#define PQI_NUM_SUPPORTED_EVENTS 7
951 1062
952struct pqi_event { 1063struct pqi_event {
953 bool pending; 1064 bool pending;
954 u8 event_type; 1065 u8 event_type;
955 __le16 event_id; 1066 __le16 event_id;
956 __le32 additional_event_id; 1067 __le32 additional_event_id;
1068 __le32 ofa_bytes_requested;
1069 __le16 ofa_cancel_reason;
957}; 1070};
958 1071
959#define PQI_RESERVED_IO_SLOTS_LUN_RESET 1 1072#define PQI_RESERVED_IO_SLOTS_LUN_RESET 1
@@ -1014,12 +1127,16 @@ struct pqi_ctrl_info {
1014 1127
1015 struct mutex scan_mutex; 1128 struct mutex scan_mutex;
1016 struct mutex lun_reset_mutex; 1129 struct mutex lun_reset_mutex;
1130 struct mutex ofa_mutex; /* serialize ofa */
1017 bool controller_online; 1131 bool controller_online;
1018 bool block_requests; 1132 bool block_requests;
1133 bool in_shutdown;
1134 bool in_ofa;
1019 u8 inbound_spanning_supported : 1; 1135 u8 inbound_spanning_supported : 1;
1020 u8 outbound_spanning_supported : 1; 1136 u8 outbound_spanning_supported : 1;
1021 u8 pqi_mode_enabled : 1; 1137 u8 pqi_mode_enabled : 1;
1022 u8 pqi_reset_quiesce_supported : 1; 1138 u8 pqi_reset_quiesce_supported : 1;
1139 u8 soft_reset_handshake_supported : 1;
1023 1140
1024 struct list_head scsi_device_list; 1141 struct list_head scsi_device_list;
1025 spinlock_t scsi_device_list_lock; 1142 spinlock_t scsi_device_list_lock;
@@ -1040,6 +1157,7 @@ struct pqi_ctrl_info {
1040 int previous_num_interrupts; 1157 int previous_num_interrupts;
1041 u32 previous_heartbeat_count; 1158 u32 previous_heartbeat_count;
1042 __le32 __iomem *heartbeat_counter; 1159 __le32 __iomem *heartbeat_counter;
1160 u8 __iomem *soft_reset_status;
1043 struct timer_list heartbeat_timer; 1161 struct timer_list heartbeat_timer;
1044 struct work_struct ctrl_offline_work; 1162 struct work_struct ctrl_offline_work;
1045 1163
@@ -1051,6 +1169,10 @@ struct pqi_ctrl_info {
1051 struct list_head raid_bypass_retry_list; 1169 struct list_head raid_bypass_retry_list;
1052 spinlock_t raid_bypass_retry_list_lock; 1170 spinlock_t raid_bypass_retry_list_lock;
1053 struct work_struct raid_bypass_retry_work; 1171 struct work_struct raid_bypass_retry_work;
1172
1173 struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
1174 dma_addr_t pqi_ofa_mem_dma_handle;
1175 void **pqi_ofa_chunk_virt_addr;
1054}; 1176};
1055 1177
1056enum pqi_ctrl_mode { 1178enum pqi_ctrl_mode {
@@ -1080,8 +1202,13 @@ enum pqi_ctrl_mode {
1080#define BMIC_WRITE 0x27 1202#define BMIC_WRITE 0x27
1081#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64 1203#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
1082#define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66 1204#define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66
1205#define BMIC_CSMI_PASSTHRU 0x68
1083#define BMIC_WRITE_HOST_WELLNESS 0xa5 1206#define BMIC_WRITE_HOST_WELLNESS 0xa5
1084#define BMIC_FLUSH_CACHE 0xc2 1207#define BMIC_FLUSH_CACHE 0xc2
1208#define BMIC_SET_DIAG_OPTIONS 0xf4
1209#define BMIC_SENSE_DIAG_OPTIONS 0xf5
1210
1211#define CSMI_CC_SAS_SMP_PASSTHRU 0X17
1085 1212
1086#define SA_FLUSH_CACHE 0x1 1213#define SA_FLUSH_CACHE 0x1
1087 1214
@@ -1109,6 +1236,10 @@ struct bmic_identify_controller {
1109 u8 reserved3[32]; 1236 u8 reserved3[32];
1110}; 1237};
1111 1238
1239#define SA_EXPANDER_SMP_DEVICE 0x05
1240/*SCSI Invalid Device Type for SAS devices*/
1241#define PQI_SAS_SCSI_INVALID_DEVTYPE 0xff
1242
1112struct bmic_identify_physical_device { 1243struct bmic_identify_physical_device {
1113 u8 scsi_bus; /* SCSI Bus number on controller */ 1244 u8 scsi_bus; /* SCSI Bus number on controller */
1114 u8 scsi_id; /* SCSI ID on this bus */ 1245 u8 scsi_id; /* SCSI ID on this bus */
@@ -1189,6 +1320,50 @@ struct bmic_identify_physical_device {
1189 u8 padding_to_multiple_of_512[9]; 1320 u8 padding_to_multiple_of_512[9];
1190}; 1321};
1191 1322
1323struct bmic_smp_request {
1324 u8 frame_type;
1325 u8 function;
1326 u8 allocated_response_length;
1327 u8 request_length;
1328 u8 additional_request_bytes[1016];
1329};
1330
1331struct bmic_smp_response {
1332 u8 frame_type;
1333 u8 function;
1334 u8 function_result;
1335 u8 response_length;
1336 u8 additional_response_bytes[1016];
1337};
1338
1339struct bmic_csmi_ioctl_header {
1340 __le32 header_length;
1341 u8 signature[8];
1342 __le32 timeout;
1343 __le32 control_code;
1344 __le32 return_code;
1345 __le32 length;
1346};
1347
1348struct bmic_csmi_smp_passthru {
1349 u8 phy_identifier;
1350 u8 port_identifier;
1351 u8 connection_rate;
1352 u8 reserved;
1353 __be64 destination_sas_address;
1354 __le32 request_length;
1355 struct bmic_smp_request request;
1356 u8 connection_status;
1357 u8 reserved1[3];
1358 __le32 response_length;
1359 struct bmic_smp_response response;
1360};
1361
1362struct bmic_csmi_smp_passthru_buffer {
1363 struct bmic_csmi_ioctl_header ioctl_header;
1364 struct bmic_csmi_smp_passthru parameters;
1365};
1366
1192struct bmic_flush_cache { 1367struct bmic_flush_cache {
1193 u8 disable_flag; 1368 u8 disable_flag;
1194 u8 system_power_action; 1369 u8 system_power_action;
@@ -1206,8 +1381,42 @@ enum bmic_flush_cache_shutdown_event {
1206 RESTART = 4 1381 RESTART = 4
1207}; 1382};
1208 1383
1384struct bmic_diag_options {
1385 __le32 options;
1386};
1387
1209#pragma pack() 1388#pragma pack()
1210 1389
1390static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
1391{
1392 void *hostdata = shost_priv(shost);
1393
1394 return *((struct pqi_ctrl_info **)hostdata);
1395}
1396
1397static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
1398{
1399 return !ctrl_info->controller_online;
1400}
1401
1402static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
1403{
1404 atomic_inc(&ctrl_info->num_busy_threads);
1405}
1406
1407static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
1408{
1409 atomic_dec(&ctrl_info->num_busy_threads);
1410}
1411
1412static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
1413{
1414 return ctrl_info->block_requests;
1415}
1416
1417void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
1418 struct sas_rphy *rphy);
1419
1211int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info); 1420int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info);
1212void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info); 1421void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info);
1213int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node, 1422int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
@@ -1216,6 +1425,9 @@ void pqi_remove_sas_device(struct pqi_scsi_dev *device);
1216struct pqi_scsi_dev *pqi_find_device_by_sas_rphy( 1425struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
1217 struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy); 1426 struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy);
1218void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd); 1427void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd);
1428int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
1429 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
1430 struct pqi_raid_error_info *error_info);
1219 1431
1220extern struct sas_function_template pqi_sas_transport_functions; 1432extern struct sas_function_template pqi_sas_transport_functions;
1221 1433
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index bac084260d80..e2fa3f476227 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -40,11 +40,11 @@
40#define BUILD_TIMESTAMP 40#define BUILD_TIMESTAMP
41#endif 41#endif
42 42
43#define DRIVER_VERSION "1.1.4-130" 43#define DRIVER_VERSION "1.2.4-070"
44#define DRIVER_MAJOR 1 44#define DRIVER_MAJOR 1
45#define DRIVER_MINOR 1 45#define DRIVER_MINOR 2
46#define DRIVER_RELEASE 4 46#define DRIVER_RELEASE 4
47#define DRIVER_REVISION 130 47#define DRIVER_REVISION 70
48 48
49#define DRIVER_NAME "Microsemi PQI Driver (v" \ 49#define DRIVER_NAME "Microsemi PQI Driver (v" \
50 DRIVER_VERSION BUILD_TIMESTAMP ")" 50 DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -74,6 +74,15 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
75 unsigned int cdb_length, struct pqi_queue_group *queue_group, 75 unsigned int cdb_length, struct pqi_queue_group *queue_group,
76 struct pqi_encryption_info *encryption_info, bool raid_bypass); 76 struct pqi_encryption_info *encryption_info, bool raid_bypass);
77static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
78static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
79static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
80static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
81 u32 bytes_requested);
82static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
83static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
84static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
85 struct pqi_scsi_dev *device, unsigned long timeout_secs);
77 86
78/* for flags argument to pqi_submit_raid_request_synchronous() */ 87/* for flags argument to pqi_submit_raid_request_synchronous() */
79#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 88#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
@@ -113,6 +122,7 @@ static unsigned int pqi_supported_event_types[] = {
113 PQI_EVENT_TYPE_HARDWARE, 122 PQI_EVENT_TYPE_HARDWARE,
114 PQI_EVENT_TYPE_PHYSICAL_DEVICE, 123 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
115 PQI_EVENT_TYPE_LOGICAL_DEVICE, 124 PQI_EVENT_TYPE_LOGICAL_DEVICE,
125 PQI_EVENT_TYPE_OFA,
116 PQI_EVENT_TYPE_AIO_STATE_CHANGE, 126 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
117 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, 127 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
118}; 128};
@@ -176,16 +186,14 @@ static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
176 scmd->scsi_done(scmd); 186 scmd->scsi_done(scmd);
177} 187}
178 188
179static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) 189static inline void pqi_disable_write_same(struct scsi_device *sdev)
180{ 190{
181 return memcmp(scsi3addr1, scsi3addr2, 8) == 0; 191 sdev->no_write_same = 1;
182} 192}
183 193
184static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost) 194static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
185{ 195{
186 void *hostdata = shost_priv(shost); 196 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
187
188 return *((struct pqi_ctrl_info **)hostdata);
189} 197}
190 198
191static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) 199static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
@@ -198,11 +206,6 @@ static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
198 return scsi3addr[2] != 0; 206 return scsi3addr[2] != 0;
199} 207}
200 208
201static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
202{
203 return !ctrl_info->controller_online;
204}
205
206static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) 209static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
207{ 210{
208 if (ctrl_info->controller_online) 211 if (ctrl_info->controller_online)
@@ -241,11 +244,6 @@ static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
241 scsi_unblock_requests(ctrl_info->scsi_host); 244 scsi_unblock_requests(ctrl_info->scsi_host);
242} 245}
243 246
244static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
245{
246 return ctrl_info->block_requests;
247}
248
249static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, 247static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
250 unsigned long timeout_msecs) 248 unsigned long timeout_msecs)
251{ 249{
@@ -275,16 +273,6 @@ static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
275 return remaining_msecs; 273 return remaining_msecs;
276} 274}
277 275
278static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
279{
280 atomic_inc(&ctrl_info->num_busy_threads);
281}
282
283static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
284{
285 atomic_dec(&ctrl_info->num_busy_threads);
286}
287
288static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) 276static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
289{ 277{
290 while (atomic_read(&ctrl_info->num_busy_threads) > 278 while (atomic_read(&ctrl_info->num_busy_threads) >
@@ -312,11 +300,39 @@ static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
312 return device->in_reset; 300 return device->in_reset;
313} 301}
314 302
303static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
304{
305 ctrl_info->in_ofa = true;
306}
307
308static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
309{
310 ctrl_info->in_ofa = false;
311}
312
313static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
314{
315 return ctrl_info->in_ofa;
316}
317
318static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
319{
320 device->in_remove = true;
321}
322
323static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
324 struct pqi_scsi_dev *device)
325{
326 return device->in_remove & !ctrl_info->in_shutdown;
327}
328
315static inline void pqi_schedule_rescan_worker_with_delay( 329static inline void pqi_schedule_rescan_worker_with_delay(
316 struct pqi_ctrl_info *ctrl_info, unsigned long delay) 330 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
317{ 331{
318 if (pqi_ctrl_offline(ctrl_info)) 332 if (pqi_ctrl_offline(ctrl_info))
319 return; 333 return;
334 if (pqi_ctrl_in_ofa(ctrl_info))
335 return;
320 336
321 schedule_delayed_work(&ctrl_info->rescan_work, delay); 337 schedule_delayed_work(&ctrl_info->rescan_work, delay);
322} 338}
@@ -326,7 +342,7 @@ static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
326 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); 342 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
327} 343}
328 344
329#define PQI_RESCAN_WORK_DELAY (10 * HZ) 345#define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
330 346
331static inline void pqi_schedule_rescan_worker_delayed( 347static inline void pqi_schedule_rescan_worker_delayed(
332 struct pqi_ctrl_info *ctrl_info) 348 struct pqi_ctrl_info *ctrl_info)
@@ -347,6 +363,27 @@ static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
347 return readl(ctrl_info->heartbeat_counter); 363 return readl(ctrl_info->heartbeat_counter);
348} 364}
349 365
366static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
367{
368 if (!ctrl_info->soft_reset_status)
369 return 0;
370
371 return readb(ctrl_info->soft_reset_status);
372}
373
374static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
375 u8 clear)
376{
377 u8 status;
378
379 if (!ctrl_info->soft_reset_status)
380 return;
381
382 status = pqi_read_soft_reset_status(ctrl_info);
383 status &= ~clear;
384 writeb(status, ctrl_info->soft_reset_status);
385}
386
350static int pqi_map_single(struct pci_dev *pci_dev, 387static int pqi_map_single(struct pci_dev *pci_dev,
351 struct pqi_sg_descriptor *sg_descriptor, void *buffer, 388 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
352 size_t buffer_length, enum dma_data_direction data_direction) 389 size_t buffer_length, enum dma_data_direction data_direction)
@@ -390,6 +427,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
390 u16 vpd_page, enum dma_data_direction *dir) 427 u16 vpd_page, enum dma_data_direction *dir)
391{ 428{
392 u8 *cdb; 429 u8 *cdb;
430 size_t cdb_length = buffer_length;
393 431
394 memset(request, 0, sizeof(*request)); 432 memset(request, 0, sizeof(*request));
395 433
@@ -412,7 +450,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
412 cdb[1] = 0x1; 450 cdb[1] = 0x1;
413 cdb[2] = (u8)vpd_page; 451 cdb[2] = (u8)vpd_page;
414 } 452 }
415 cdb[4] = (u8)buffer_length; 453 cdb[4] = (u8)cdb_length;
416 break; 454 break;
417 case CISS_REPORT_LOG: 455 case CISS_REPORT_LOG:
418 case CISS_REPORT_PHYS: 456 case CISS_REPORT_PHYS:
@@ -422,32 +460,45 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
422 cdb[1] = CISS_REPORT_PHYS_EXTENDED; 460 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
423 else 461 else
424 cdb[1] = CISS_REPORT_LOG_EXTENDED; 462 cdb[1] = CISS_REPORT_LOG_EXTENDED;
425 put_unaligned_be32(buffer_length, &cdb[6]); 463 put_unaligned_be32(cdb_length, &cdb[6]);
426 break; 464 break;
427 case CISS_GET_RAID_MAP: 465 case CISS_GET_RAID_MAP:
428 request->data_direction = SOP_READ_FLAG; 466 request->data_direction = SOP_READ_FLAG;
429 cdb[0] = CISS_READ; 467 cdb[0] = CISS_READ;
430 cdb[1] = CISS_GET_RAID_MAP; 468 cdb[1] = CISS_GET_RAID_MAP;
431 put_unaligned_be32(buffer_length, &cdb[6]); 469 put_unaligned_be32(cdb_length, &cdb[6]);
432 break; 470 break;
433 case SA_FLUSH_CACHE: 471 case SA_FLUSH_CACHE:
434 request->data_direction = SOP_WRITE_FLAG; 472 request->data_direction = SOP_WRITE_FLAG;
435 cdb[0] = BMIC_WRITE; 473 cdb[0] = BMIC_WRITE;
436 cdb[6] = BMIC_FLUSH_CACHE; 474 cdb[6] = BMIC_FLUSH_CACHE;
437 put_unaligned_be16(buffer_length, &cdb[7]); 475 put_unaligned_be16(cdb_length, &cdb[7]);
438 break; 476 break;
477 case BMIC_SENSE_DIAG_OPTIONS:
478 cdb_length = 0;
479 /* fall through */
439 case BMIC_IDENTIFY_CONTROLLER: 480 case BMIC_IDENTIFY_CONTROLLER:
440 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 481 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
441 request->data_direction = SOP_READ_FLAG; 482 request->data_direction = SOP_READ_FLAG;
442 cdb[0] = BMIC_READ; 483 cdb[0] = BMIC_READ;
443 cdb[6] = cmd; 484 cdb[6] = cmd;
444 put_unaligned_be16(buffer_length, &cdb[7]); 485 put_unaligned_be16(cdb_length, &cdb[7]);
445 break; 486 break;
487 case BMIC_SET_DIAG_OPTIONS:
488 cdb_length = 0;
489 /* fall through */
446 case BMIC_WRITE_HOST_WELLNESS: 490 case BMIC_WRITE_HOST_WELLNESS:
447 request->data_direction = SOP_WRITE_FLAG; 491 request->data_direction = SOP_WRITE_FLAG;
448 cdb[0] = BMIC_WRITE; 492 cdb[0] = BMIC_WRITE;
449 cdb[6] = cmd; 493 cdb[6] = cmd;
450 put_unaligned_be16(buffer_length, &cdb[7]); 494 put_unaligned_be16(cdb_length, &cdb[7]);
495 break;
496 case BMIC_CSMI_PASSTHRU:
497 request->data_direction = SOP_BIDIRECTIONAL;
498 cdb[0] = BMIC_WRITE;
499 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
500 cdb[6] = cmd;
501 put_unaligned_be16(cdb_length, &cdb[7]);
451 break; 502 break;
452 default: 503 default:
453 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", 504 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
@@ -509,43 +560,130 @@ static void pqi_free_io_request(struct pqi_io_request *io_request)
509 atomic_dec(&io_request->refcount); 560 atomic_dec(&io_request->refcount);
510} 561}
511 562
512static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, 563static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
513 struct bmic_identify_controller *buffer) 564 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
565 struct pqi_raid_error_info *error_info,
566 unsigned long timeout_msecs)
514{ 567{
515 int rc; 568 int rc;
516 enum dma_data_direction dir; 569 enum dma_data_direction dir;
517 struct pqi_raid_path_request request; 570 struct pqi_raid_path_request request;
518 571
519 rc = pqi_build_raid_path_request(ctrl_info, &request, 572 rc = pqi_build_raid_path_request(ctrl_info, &request,
520 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer, 573 cmd, scsi3addr, buffer,
521 sizeof(*buffer), 0, &dir); 574 buffer_length, vpd_page, &dir);
522 if (rc) 575 if (rc)
523 return rc; 576 return rc;
524 577
525 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 578 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
526 NULL, NO_TIMEOUT); 579 0, error_info, timeout_msecs);
527 580
528 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 581 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
529 return rc; 582 return rc;
530} 583}
531 584
532static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, 585/* Helper functions for pqi_send_scsi_raid_request */
586
587static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
588 u8 cmd, void *buffer, size_t buffer_length)
589{
590 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
591 buffer, buffer_length, 0, NULL, NO_TIMEOUT);
592}
593
594static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
595 u8 cmd, void *buffer, size_t buffer_length,
596 struct pqi_raid_error_info *error_info)
597{
598 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
599 buffer, buffer_length, 0, error_info, NO_TIMEOUT);
600}
601
602
603static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
604 struct bmic_identify_controller *buffer)
605{
606 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
607 buffer, sizeof(*buffer));
608}
609
610static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
533 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 611 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
534{ 612{
613 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
614 buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
615}
616
617static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info,
618 u8 *scsi3addr, u16 vpd_page)
619{
535 int rc; 620 int rc;
536 enum dma_data_direction dir; 621 int i;
537 struct pqi_raid_path_request request; 622 int pages;
623 unsigned char *buf, bufsize;
538 624
539 rc = pqi_build_raid_path_request(ctrl_info, &request, 625 buf = kzalloc(256, GFP_KERNEL);
540 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page, 626 if (!buf)
541 &dir); 627 return false;
542 if (rc)
543 return rc;
544 628
545 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 629 /* Get the size of the page list first */
546 NULL, NO_TIMEOUT); 630 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
631 VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
632 buf, SCSI_VPD_HEADER_SZ);
633 if (rc != 0)
634 goto exit_unsupported;
635
636 pages = buf[3];
637 if ((pages + SCSI_VPD_HEADER_SZ) <= 255)
638 bufsize = pages + SCSI_VPD_HEADER_SZ;
639 else
640 bufsize = 255;
641
642 /* Get the whole VPD page list */
643 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
644 VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
645 buf, bufsize);
646 if (rc != 0)
647 goto exit_unsupported;
648
649 pages = buf[3];
650 for (i = 1; i <= pages; i++)
651 if (buf[3 + i] == vpd_page)
652 goto exit_supported;
653
654exit_unsupported:
655 kfree(buf);
656 return false;
657
658exit_supported:
659 kfree(buf);
660 return true;
661}
662
663static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info,
664 u8 *scsi3addr, u8 *device_id, int buflen)
665{
666 int rc;
667 unsigned char *buf;
668
669 if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID))
670 return 1; /* function not supported */
671
672 buf = kzalloc(64, GFP_KERNEL);
673 if (!buf)
674 return -ENOMEM;
675
676 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
677 VPD_PAGE | SCSI_VPD_DEVICE_ID,
678 buf, 64);
679 if (rc == 0) {
680 if (buflen > 16)
681 buflen = 16;
682 memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen);
683 }
684
685 kfree(buf);
547 686
548 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
549 return rc; 687 return rc;
550} 688}
551 689
@@ -580,9 +718,7 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
580 enum bmic_flush_cache_shutdown_event shutdown_event) 718 enum bmic_flush_cache_shutdown_event shutdown_event)
581{ 719{
582 int rc; 720 int rc;
583 struct pqi_raid_path_request request;
584 struct bmic_flush_cache *flush_cache; 721 struct bmic_flush_cache *flush_cache;
585 enum dma_data_direction dir;
586 722
587 /* 723 /*
588 * Don't bother trying to flush the cache if the controller is 724 * Don't bother trying to flush the cache if the controller is
@@ -597,42 +733,55 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
597 733
598 flush_cache->shutdown_event = shutdown_event; 734 flush_cache->shutdown_event = shutdown_event;
599 735
600 rc = pqi_build_raid_path_request(ctrl_info, &request, 736 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
601 SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache, 737 sizeof(*flush_cache));
602 sizeof(*flush_cache), 0, &dir);
603 if (rc)
604 goto out;
605
606 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
607 0, NULL, NO_TIMEOUT);
608 738
609 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
610out:
611 kfree(flush_cache); 739 kfree(flush_cache);
612 740
613 return rc; 741 return rc;
614} 742}
615 743
616static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, 744int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
617 void *buffer, size_t buffer_length) 745 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
746 struct pqi_raid_error_info *error_info)
747{
748 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
749 buffer, buffer_length, error_info);
750}
751
752#define PQI_FETCH_PTRAID_DATA (1UL<<31)
753
754static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
618{ 755{
619 int rc; 756 int rc;
620 struct pqi_raid_path_request request; 757 struct bmic_diag_options *diag;
621 enum dma_data_direction dir;
622 758
623 rc = pqi_build_raid_path_request(ctrl_info, &request, 759 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
624 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer, 760 if (!diag)
625 buffer_length, 0, &dir); 761 return -ENOMEM;
762
763 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
764 diag, sizeof(*diag));
626 if (rc) 765 if (rc)
627 return rc; 766 goto out;
628 767
629 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 768 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
630 0, NULL, NO_TIMEOUT); 769
770 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS,
771 diag, sizeof(*diag));
772out:
773 kfree(diag);
631 774
632 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
633 return rc; 775 return rc;
634} 776}
635 777
778static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
779 void *buffer, size_t buffer_length)
780{
781 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
782 buffer, buffer_length);
783}
784
636#pragma pack(1) 785#pragma pack(1)
637 786
638struct bmic_host_wellness_driver_version { 787struct bmic_host_wellness_driver_version {
@@ -640,6 +789,7 @@ struct bmic_host_wellness_driver_version {
640 u8 driver_version_tag[2]; 789 u8 driver_version_tag[2];
641 __le16 driver_version_length; 790 __le16 driver_version_length;
642 char driver_version[32]; 791 char driver_version[32];
792 u8 dont_write_tag[2];
643 u8 end_tag[2]; 793 u8 end_tag[2];
644}; 794};
645 795
@@ -669,6 +819,8 @@ static int pqi_write_driver_version_to_host_wellness(
669 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, 819 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
670 sizeof(buffer->driver_version) - 1); 820 sizeof(buffer->driver_version) - 1);
671 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; 821 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
822 buffer->dont_write_tag[0] = 'D';
823 buffer->dont_write_tag[1] = 'W';
672 buffer->end_tag[0] = 'Z'; 824 buffer->end_tag[0] = 'Z';
673 buffer->end_tag[1] = 'Z'; 825 buffer->end_tag[1] = 'Z';
674 826
@@ -742,7 +894,7 @@ static int pqi_write_current_time_to_host_wellness(
742 return rc; 894 return rc;
743} 895}
744 896
745#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ) 897#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
746 898
747static void pqi_update_time_worker(struct work_struct *work) 899static void pqi_update_time_worker(struct work_struct *work)
748{ 900{
@@ -776,23 +928,11 @@ static inline void pqi_cancel_update_time_worker(
776 cancel_delayed_work_sync(&ctrl_info->update_time_work); 928 cancel_delayed_work_sync(&ctrl_info->update_time_work);
777} 929}
778 930
779static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 931static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
780 void *buffer, size_t buffer_length) 932 void *buffer, size_t buffer_length)
781{ 933{
782 int rc; 934 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
783 enum dma_data_direction dir; 935 buffer_length);
784 struct pqi_raid_path_request request;
785
786 rc = pqi_build_raid_path_request(ctrl_info, &request,
787 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &dir);
788 if (rc)
789 return rc;
790
791 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
792 NULL, NO_TIMEOUT);
793
794 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
795 return rc;
796} 936}
797 937
798static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 938static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
@@ -1010,8 +1150,6 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1010 char *err_msg; 1150 char *err_msg;
1011 u32 raid_map_size; 1151 u32 raid_map_size;
1012 u32 r5or6_blocks_per_row; 1152 u32 r5or6_blocks_per_row;
1013 unsigned int num_phys_disks;
1014 unsigned int num_raid_map_entries;
1015 1153
1016 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1154 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1017 1155
@@ -1020,22 +1158,6 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1020 goto bad_raid_map; 1158 goto bad_raid_map;
1021 } 1159 }
1022 1160
1023 if (raid_map_size > sizeof(*raid_map)) {
1024 err_msg = "RAID map too large";
1025 goto bad_raid_map;
1026 }
1027
1028 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1029 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1030 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1031 num_raid_map_entries = num_phys_disks *
1032 get_unaligned_le16(&raid_map->row_cnt);
1033
1034 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
1035 err_msg = "invalid number of map entries in RAID map";
1036 goto bad_raid_map;
1037 }
1038
1039 if (device->raid_level == SA_RAID_1) { 1161 if (device->raid_level == SA_RAID_1) {
1040 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { 1162 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1041 err_msg = "invalid RAID-1 map"; 1163 err_msg = "invalid RAID-1 map";
@@ -1074,27 +1196,45 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1074 struct pqi_scsi_dev *device) 1196 struct pqi_scsi_dev *device)
1075{ 1197{
1076 int rc; 1198 int rc;
1077 enum dma_data_direction dir; 1199 u32 raid_map_size;
1078 struct pqi_raid_path_request request;
1079 struct raid_map *raid_map; 1200 struct raid_map *raid_map;
1080 1201
1081 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); 1202 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1082 if (!raid_map) 1203 if (!raid_map)
1083 return -ENOMEM; 1204 return -ENOMEM;
1084 1205
1085 rc = pqi_build_raid_path_request(ctrl_info, &request, 1206 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1086 CISS_GET_RAID_MAP, device->scsi3addr, raid_map, 1207 device->scsi3addr, raid_map, sizeof(*raid_map),
1087 sizeof(*raid_map), 0, &dir); 1208 0, NULL, NO_TIMEOUT);
1209
1088 if (rc) 1210 if (rc)
1089 goto error; 1211 goto error;
1090 1212
1091 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 1213 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1092 NULL, NO_TIMEOUT);
1093 1214
1094 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 1215 if (raid_map_size > sizeof(*raid_map)) {
1095 1216
1096 if (rc) 1217 kfree(raid_map);
1097 goto error; 1218
1219 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1220 if (!raid_map)
1221 return -ENOMEM;
1222
1223 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1224 device->scsi3addr, raid_map, raid_map_size,
1225 0, NULL, NO_TIMEOUT);
1226 if (rc)
1227 goto error;
1228
1229 if (get_unaligned_le32(&raid_map->structure_size)
1230 != raid_map_size) {
1231 dev_warn(&ctrl_info->pci_dev->dev,
1232 "Requested %d bytes, received %d bytes",
1233 raid_map_size,
1234 get_unaligned_le32(&raid_map->structure_size));
1235 goto error;
1236 }
1237 }
1098 1238
1099 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); 1239 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1100 if (rc) 1240 if (rc)
@@ -1165,6 +1305,9 @@ static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1165 if (rc) 1305 if (rc)
1166 goto out; 1306 goto out;
1167 1307
1308 if (vpd->page_code != CISS_VPD_LV_STATUS)
1309 goto out;
1310
1168 page_length = offsetof(struct ciss_vpd_logical_volume_status, 1311 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1169 volume_status) + vpd->page_length; 1312 volume_status) + vpd->page_length;
1170 if (page_length < sizeof(*vpd)) 1313 if (page_length < sizeof(*vpd))
@@ -1190,6 +1333,9 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1190 u8 *buffer; 1333 u8 *buffer;
1191 unsigned int retries; 1334 unsigned int retries;
1192 1335
1336 if (device->is_expander_smp_device)
1337 return 0;
1338
1193 buffer = kmalloc(64, GFP_KERNEL); 1339 buffer = kmalloc(64, GFP_KERNEL);
1194 if (!buffer) 1340 if (!buffer)
1195 return -ENOMEM; 1341 return -ENOMEM;
@@ -1225,6 +1371,14 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1225 } 1371 }
1226 } 1372 }
1227 1373
1374 if (pqi_get_device_id(ctrl_info, device->scsi3addr,
1375 device->unique_id, sizeof(device->unique_id)) < 0)
1376 dev_warn(&ctrl_info->pci_dev->dev,
1377 "Can't get device id for scsi %d:%d:%d:%d\n",
1378 ctrl_info->scsi_host->host_no,
1379 device->bus, device->target,
1380 device->lun);
1381
1228out: 1382out:
1229 kfree(buffer); 1383 kfree(buffer);
1230 1384
@@ -1387,9 +1541,24 @@ static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1387 return rc; 1541 return rc;
1388} 1542}
1389 1543
1544#define PQI_PENDING_IO_TIMEOUT_SECS 20
1545
1390static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, 1546static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1391 struct pqi_scsi_dev *device) 1547 struct pqi_scsi_dev *device)
1392{ 1548{
1549 int rc;
1550
1551 pqi_device_remove_start(device);
1552
1553 rc = pqi_device_wait_for_pending_io(ctrl_info, device,
1554 PQI_PENDING_IO_TIMEOUT_SECS);
1555 if (rc)
1556 dev_err(&ctrl_info->pci_dev->dev,
1557 "scsi %d:%d:%d:%d removing device with %d outstanding commands\n",
1558 ctrl_info->scsi_host->host_no, device->bus,
1559 device->target, device->lun,
1560 atomic_read(&device->scsi_cmds_outstanding));
1561
1393 if (pqi_is_logical_device(device)) 1562 if (pqi_is_logical_device(device))
1394 scsi_remove_device(device->sdev); 1563 scsi_remove_device(device->sdev);
1395 else 1564 else
@@ -1454,6 +1623,14 @@ static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1454 return DEVICE_NOT_FOUND; 1623 return DEVICE_NOT_FOUND;
1455} 1624}
1456 1625
1626static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1627{
1628 if (device->is_expander_smp_device)
1629 return "Enclosure SMP ";
1630
1631 return scsi_device_type(device->devtype);
1632}
1633
1457#define PQI_DEV_INFO_BUFFER_LENGTH 128 1634#define PQI_DEV_INFO_BUFFER_LENGTH 128
1458 1635
1459static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, 1636static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
@@ -1489,7 +1666,7 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1489 1666
1490 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, 1667 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1491 " %s %.8s %.16s ", 1668 " %s %.8s %.16s ",
1492 scsi_device_type(device->devtype), 1669 pqi_device_type(device),
1493 device->vendor, 1670 device->vendor,
1494 device->model); 1671 device->model);
1495 1672
@@ -1534,6 +1711,8 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1534 existing_device->is_physical_device = new_device->is_physical_device; 1711 existing_device->is_physical_device = new_device->is_physical_device;
1535 existing_device->is_external_raid_device = 1712 existing_device->is_external_raid_device =
1536 new_device->is_external_raid_device; 1713 new_device->is_external_raid_device;
1714 existing_device->is_expander_smp_device =
1715 new_device->is_expander_smp_device;
1537 existing_device->aio_enabled = new_device->aio_enabled; 1716 existing_device->aio_enabled = new_device->aio_enabled;
1538 memcpy(existing_device->vendor, new_device->vendor, 1717 memcpy(existing_device->vendor, new_device->vendor,
1539 sizeof(existing_device->vendor)); 1718 sizeof(existing_device->vendor));
@@ -1558,6 +1737,7 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1558 new_device->raid_bypass_configured; 1737 new_device->raid_bypass_configured;
1559 existing_device->raid_bypass_enabled = 1738 existing_device->raid_bypass_enabled =
1560 new_device->raid_bypass_enabled; 1739 new_device->raid_bypass_enabled;
1740 existing_device->device_offline = false;
1561 1741
1562 /* To prevent this from being freed later. */ 1742 /* To prevent this from being freed later. */
1563 new_device->raid_map = NULL; 1743 new_device->raid_map = NULL;
@@ -1589,6 +1769,14 @@ static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1589 device->keep_device = false; 1769 device->keep_device = false;
1590} 1770}
1591 1771
1772static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
1773{
1774 if (device->is_expander_smp_device)
1775 return device->sas_port != NULL;
1776
1777 return device->sdev != NULL;
1778}
1779
1592static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, 1780static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1593 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) 1781 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1594{ 1782{
@@ -1674,6 +1862,9 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1674 1862
1675 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1863 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1676 1864
1865 if (pqi_ctrl_in_ofa(ctrl_info))
1866 pqi_ctrl_ofa_done(ctrl_info);
1867
1677 /* Remove all devices that have gone away. */ 1868 /* Remove all devices that have gone away. */
1678 list_for_each_entry_safe(device, next, &delete_list, 1869 list_for_each_entry_safe(device, next, &delete_list,
1679 delete_list_entry) { 1870 delete_list_entry) {
@@ -1683,7 +1874,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1683 } else { 1874 } else {
1684 pqi_dev_info(ctrl_info, "removed", device); 1875 pqi_dev_info(ctrl_info, "removed", device);
1685 } 1876 }
1686 if (device->sdev) 1877 if (pqi_is_device_added(device))
1687 pqi_remove_device(ctrl_info, device); 1878 pqi_remove_device(ctrl_info, device);
1688 list_del(&device->delete_list_entry); 1879 list_del(&device->delete_list_entry);
1689 pqi_free_device(device); 1880 pqi_free_device(device);
@@ -1705,7 +1896,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1705 1896
1706 /* Expose any new devices. */ 1897 /* Expose any new devices. */
1707 list_for_each_entry_safe(device, next, &add_list, add_list_entry) { 1898 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1708 if (!device->sdev) { 1899 if (!pqi_is_device_added(device)) {
1709 pqi_dev_info(ctrl_info, "added", device); 1900 pqi_dev_info(ctrl_info, "added", device);
1710 rc = pqi_add_device(ctrl_info, device); 1901 rc = pqi_add_device(ctrl_info, device);
1711 if (rc) { 1902 if (rc) {
@@ -1722,7 +1913,12 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1722 1913
1723static bool pqi_is_supported_device(struct pqi_scsi_dev *device) 1914static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1724{ 1915{
1725 bool is_supported = false; 1916 bool is_supported;
1917
1918 if (device->is_expander_smp_device)
1919 return true;
1920
1921 is_supported = false;
1726 1922
1727 switch (device->devtype) { 1923 switch (device->devtype) {
1728 case TYPE_DISK: 1924 case TYPE_DISK:
@@ -1756,6 +1952,30 @@ static inline bool pqi_skip_device(u8 *scsi3addr)
1756 return false; 1952 return false;
1757} 1953}
1758 1954
1955static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
1956{
1957 if (!device->is_physical_device)
1958 return false;
1959
1960 if (device->is_expander_smp_device)
1961 return true;
1962
1963 switch (device->devtype) {
1964 case TYPE_DISK:
1965 case TYPE_ZBC:
1966 case TYPE_ENCLOSURE:
1967 return true;
1968 }
1969
1970 return false;
1971}
1972
1973static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1974{
1975 return !device->is_physical_device ||
1976 !pqi_skip_device(device->scsi3addr);
1977}
1978
1759static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1979static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1760{ 1980{
1761 int i; 1981 int i;
@@ -1864,9 +2084,14 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1864 2084
1865 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 2085 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1866 device->is_physical_device = is_physical_device; 2086 device->is_physical_device = is_physical_device;
1867 if (!is_physical_device) 2087 if (is_physical_device) {
2088 if (phys_lun_ext_entry->device_type ==
2089 SA_EXPANDER_SMP_DEVICE)
2090 device->is_expander_smp_device = true;
2091 } else {
1868 device->is_external_raid_device = 2092 device->is_external_raid_device =
1869 pqi_is_external_raid_addr(scsi3addr); 2093 pqi_is_external_raid_addr(scsi3addr);
2094 }
1870 2095
1871 /* Gather information about the device. */ 2096 /* Gather information about the device. */
1872 rc = pqi_get_device_info(ctrl_info, device); 2097 rc = pqi_get_device_info(ctrl_info, device);
@@ -1899,30 +2124,23 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1899 device->wwid = phys_lun_ext_entry->wwid; 2124 device->wwid = phys_lun_ext_entry->wwid;
1900 if ((phys_lun_ext_entry->device_flags & 2125 if ((phys_lun_ext_entry->device_flags &
1901 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) && 2126 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1902 phys_lun_ext_entry->aio_handle) 2127 phys_lun_ext_entry->aio_handle) {
1903 device->aio_enabled = true; 2128 device->aio_enabled = true;
2129 device->aio_handle =
2130 phys_lun_ext_entry->aio_handle;
2131 }
2132 if (device->devtype == TYPE_DISK ||
2133 device->devtype == TYPE_ZBC) {
2134 pqi_get_physical_disk_info(ctrl_info,
2135 device, id_phys);
2136 }
1904 } else { 2137 } else {
1905 memcpy(device->volume_id, log_lun_ext_entry->volume_id, 2138 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1906 sizeof(device->volume_id)); 2139 sizeof(device->volume_id));
1907 } 2140 }
1908 2141
1909 switch (device->devtype) { 2142 if (pqi_is_device_with_sas_address(device))
1910 case TYPE_DISK: 2143 device->sas_address = get_unaligned_be64(&device->wwid);
1911 case TYPE_ZBC:
1912 case TYPE_ENCLOSURE:
1913 if (device->is_physical_device) {
1914 device->sas_address =
1915 get_unaligned_be64(&device->wwid);
1916 if (device->devtype == TYPE_DISK ||
1917 device->devtype == TYPE_ZBC) {
1918 device->aio_handle =
1919 phys_lun_ext_entry->aio_handle;
1920 pqi_get_physical_disk_info(ctrl_info,
1921 device, id_phys);
1922 }
1923 }
1924 break;
1925 }
1926 2144
1927 new_device_list[num_valid_devices++] = device; 2145 new_device_list[num_valid_devices++] = device;
1928 } 2146 }
@@ -1965,7 +2183,7 @@ static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1965 if (!device) 2183 if (!device)
1966 break; 2184 break;
1967 2185
1968 if (device->sdev) 2186 if (pqi_is_device_added(device))
1969 pqi_remove_device(ctrl_info, device); 2187 pqi_remove_device(ctrl_info, device);
1970 pqi_free_device(device); 2188 pqi_free_device(device);
1971 } 2189 }
@@ -1991,7 +2209,13 @@ static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1991 2209
1992static void pqi_scan_start(struct Scsi_Host *shost) 2210static void pqi_scan_start(struct Scsi_Host *shost)
1993{ 2211{
1994 pqi_scan_scsi_devices(shost_to_hba(shost)); 2212 struct pqi_ctrl_info *ctrl_info;
2213
2214 ctrl_info = shost_to_hba(shost);
2215 if (pqi_ctrl_in_ofa(ctrl_info))
2216 return;
2217
2218 pqi_scan_scsi_devices(ctrl_info);
1995} 2219}
1996 2220
1997/* Returns TRUE if scan is finished. */ 2221/* Returns TRUE if scan is finished. */
@@ -2018,6 +2242,12 @@ static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2018 mutex_unlock(&ctrl_info->lun_reset_mutex); 2242 mutex_unlock(&ctrl_info->lun_reset_mutex);
2019} 2243}
2020 2244
2245static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
2246{
2247 mutex_lock(&ctrl_info->ofa_mutex);
2248 mutex_unlock(&ctrl_info->ofa_mutex);
2249}
2250
2021static inline void pqi_set_encryption_info( 2251static inline void pqi_set_encryption_info(
2022 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map, 2252 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2023 u64 first_block) 2253 u64 first_block)
@@ -2325,9 +2555,6 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2325 (map_row * total_disks_per_row) + first_column; 2555 (map_row * total_disks_per_row) + first_column;
2326 } 2556 }
2327 2557
2328 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2329 return PQI_RAID_BYPASS_INELIGIBLE;
2330
2331 aio_handle = raid_map->disk_data[map_index].aio_handle; 2558 aio_handle = raid_map->disk_data[map_index].aio_handle;
2332 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + 2559 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2333 first_row * strip_size + 2560 first_row * strip_size +
@@ -2397,7 +2624,7 @@ static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2397 u8 status; 2624 u8 status;
2398 2625
2399 pqi_registers = ctrl_info->pqi_registers; 2626 pqi_registers = ctrl_info->pqi_registers;
2400 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; 2627 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
2401 2628
2402 while (1) { 2629 while (1) {
2403 signature = readq(&pqi_registers->signature); 2630 signature = readq(&pqi_registers->signature);
@@ -2458,10 +2685,9 @@ static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
2458 return; 2685 return;
2459 2686
2460 device->device_offline = true; 2687 device->device_offline = true;
2461 scsi_device_set_state(sdev, SDEV_OFFLINE);
2462 ctrl_info = shost_to_hba(sdev->host); 2688 ctrl_info = shost_to_hba(sdev->host);
2463 pqi_schedule_rescan_worker(ctrl_info); 2689 pqi_schedule_rescan_worker(ctrl_info);
2464 dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n", 2690 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
2465 path, ctrl_info->scsi_host->host_no, device->bus, 2691 path, ctrl_info->scsi_host->host_no, device->bus,
2466 device->target, device->lun); 2692 device->target, device->lun);
2467} 2693}
@@ -2665,6 +2891,9 @@ static int pqi_interpret_task_management_response(
2665 case SOP_TMF_FUNCTION_SUCCEEDED: 2891 case SOP_TMF_FUNCTION_SUCCEEDED:
2666 rc = 0; 2892 rc = 0;
2667 break; 2893 break;
2894 case SOP_TMF_REJECTED:
2895 rc = -EAGAIN;
2896 break;
2668 default: 2897 default:
2669 rc = -EIO; 2898 rc = -EIO;
2670 break; 2899 break;
@@ -2704,8 +2933,17 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2704 switch (response->header.iu_type) { 2933 switch (response->header.iu_type) {
2705 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: 2934 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2706 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: 2935 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2936 if (io_request->scmd)
2937 io_request->scmd->result = 0;
2938 /* fall through */
2707 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: 2939 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2708 break; 2940 break;
2941 case PQI_RESPONSE_IU_VENDOR_GENERAL:
2942 io_request->status =
2943 get_unaligned_le16(
2944 &((struct pqi_vendor_general_response *)
2945 response)->status);
2946 break;
2709 case PQI_RESPONSE_IU_TASK_MANAGEMENT: 2947 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2710 io_request->status = 2948 io_request->status =
2711 pqi_interpret_task_management_response( 2949 pqi_interpret_task_management_response(
@@ -2825,6 +3063,111 @@ static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2825 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); 3063 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
2826} 3064}
2827 3065
3066#define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3067#define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3068
3069static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3070 struct pqi_ctrl_info *ctrl_info)
3071{
3072 unsigned long timeout;
3073 u8 status;
3074
3075 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
3076
3077 while (1) {
3078 status = pqi_read_soft_reset_status(ctrl_info);
3079 if (status & PQI_SOFT_RESET_INITIATE)
3080 return RESET_INITIATE_DRIVER;
3081
3082 if (status & PQI_SOFT_RESET_ABORT)
3083 return RESET_ABORT;
3084
3085 if (time_after(jiffies, timeout)) {
3086 dev_err(&ctrl_info->pci_dev->dev,
3087 "timed out waiting for soft reset status\n");
3088 return RESET_TIMEDOUT;
3089 }
3090
3091 if (!sis_is_firmware_running(ctrl_info))
3092 return RESET_NORESPONSE;
3093
3094 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3095 }
3096}
3097
3098static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
3099 enum pqi_soft_reset_status reset_status)
3100{
3101 int rc;
3102
3103 switch (reset_status) {
3104 case RESET_INITIATE_DRIVER:
3105 /* fall through */
3106 case RESET_TIMEDOUT:
3107 dev_info(&ctrl_info->pci_dev->dev,
3108 "resetting controller %u\n", ctrl_info->ctrl_id);
3109 sis_soft_reset(ctrl_info);
3110 /* fall through */
3111 case RESET_INITIATE_FIRMWARE:
3112 rc = pqi_ofa_ctrl_restart(ctrl_info);
3113 pqi_ofa_free_host_buffer(ctrl_info);
3114 dev_info(&ctrl_info->pci_dev->dev,
3115 "Online Firmware Activation for controller %u: %s\n",
3116 ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
3117 break;
3118 case RESET_ABORT:
3119 pqi_ofa_ctrl_unquiesce(ctrl_info);
3120 dev_info(&ctrl_info->pci_dev->dev,
3121 "Online Firmware Activation for controller %u: %s\n",
3122 ctrl_info->ctrl_id, "ABORTED");
3123 break;
3124 case RESET_NORESPONSE:
3125 pqi_ofa_free_host_buffer(ctrl_info);
3126 pqi_take_ctrl_offline(ctrl_info);
3127 break;
3128 }
3129}
3130
3131static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3132 struct pqi_event *event)
3133{
3134 u16 event_id;
3135 enum pqi_soft_reset_status status;
3136
3137 event_id = get_unaligned_le16(&event->event_id);
3138
3139 mutex_lock(&ctrl_info->ofa_mutex);
3140
3141 if (event_id == PQI_EVENT_OFA_QUIESCE) {
3142 dev_info(&ctrl_info->pci_dev->dev,
3143 "Received Online Firmware Activation quiesce event for controller %u\n",
3144 ctrl_info->ctrl_id);
3145 pqi_ofa_ctrl_quiesce(ctrl_info);
3146 pqi_acknowledge_event(ctrl_info, event);
3147 if (ctrl_info->soft_reset_handshake_supported) {
3148 status = pqi_poll_for_soft_reset_status(ctrl_info);
3149 pqi_process_soft_reset(ctrl_info, status);
3150 } else {
3151 pqi_process_soft_reset(ctrl_info,
3152 RESET_INITIATE_FIRMWARE);
3153 }
3154
3155 } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3156 pqi_acknowledge_event(ctrl_info, event);
3157 pqi_ofa_setup_host_buffer(ctrl_info,
3158 le32_to_cpu(event->ofa_bytes_requested));
3159 pqi_ofa_host_memory_update(ctrl_info);
3160 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3161 pqi_ofa_free_host_buffer(ctrl_info);
3162 pqi_acknowledge_event(ctrl_info, event);
3163 dev_info(&ctrl_info->pci_dev->dev,
3164 "Online Firmware Activation(%u) cancel reason : %u\n",
3165 ctrl_info->ctrl_id, event->ofa_cancel_reason);
3166 }
3167
3168 mutex_unlock(&ctrl_info->ofa_mutex);
3169}
3170
2828static void pqi_event_worker(struct work_struct *work) 3171static void pqi_event_worker(struct work_struct *work)
2829{ 3172{
2830 unsigned int i; 3173 unsigned int i;
@@ -2844,6 +3187,11 @@ static void pqi_event_worker(struct work_struct *work)
2844 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 3187 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
2845 if (event->pending) { 3188 if (event->pending) {
2846 event->pending = false; 3189 event->pending = false;
3190 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3191 pqi_ctrl_unbusy(ctrl_info);
3192 pqi_ofa_process_event(ctrl_info, event);
3193 return;
3194 }
2847 pqi_acknowledge_event(ctrl_info, event); 3195 pqi_acknowledge_event(ctrl_info, event);
2848 } 3196 }
2849 event++; 3197 event++;
@@ -2853,7 +3201,7 @@ out:
2853 pqi_ctrl_unbusy(ctrl_info); 3201 pqi_ctrl_unbusy(ctrl_info);
2854} 3202}
2855 3203
2856#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) 3204#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
2857 3205
2858static void pqi_heartbeat_timer_handler(struct timer_list *t) 3206static void pqi_heartbeat_timer_handler(struct timer_list *t)
2859{ 3207{
@@ -2922,6 +3270,24 @@ static inline bool pqi_is_supported_event(unsigned int event_type)
2922 return pqi_event_type_to_event_index(event_type) != -1; 3270 return pqi_event_type_to_event_index(event_type) != -1;
2923} 3271}
2924 3272
3273static void pqi_ofa_capture_event_payload(struct pqi_event *event,
3274 struct pqi_event_response *response)
3275{
3276 u16 event_id;
3277
3278 event_id = get_unaligned_le16(&event->event_id);
3279
3280 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3281 if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3282 event->ofa_bytes_requested =
3283 response->data.ofa_memory_allocation.bytes_requested;
3284 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3285 event->ofa_cancel_reason =
3286 response->data.ofa_cancelled.reason;
3287 }
3288 }
3289}
3290
2925static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) 3291static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2926{ 3292{
2927 unsigned int num_events; 3293 unsigned int num_events;
@@ -2956,6 +3322,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2956 event->event_id = response->event_id; 3322 event->event_id = response->event_id;
2957 event->additional_event_id = 3323 event->additional_event_id =
2958 response->additional_event_id; 3324 response->additional_event_id;
3325 pqi_ofa_capture_event_payload(event, response);
2959 } 3326 }
2960 } 3327 }
2961 3328
@@ -3389,7 +3756,7 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3389 return 0; 3756 return 0;
3390} 3757}
3391 3758
3392#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ 3759#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
3393#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 3760#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3394 3761
3395static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) 3762static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
@@ -3482,7 +3849,7 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3482 admin_queues = &ctrl_info->admin_queues; 3849 admin_queues = &ctrl_info->admin_queues;
3483 oq_ci = admin_queues->oq_ci_copy; 3850 oq_ci = admin_queues->oq_ci_copy;
3484 3851
3485 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; 3852 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
3486 3853
3487 while (1) { 3854 while (1) {
3488 oq_pi = readl(admin_queues->oq_pi); 3855 oq_pi = readl(admin_queues->oq_pi);
@@ -3597,7 +3964,7 @@ static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3597 3964
3598 while (1) { 3965 while (1) {
3599 if (wait_for_completion_io_timeout(wait, 3966 if (wait_for_completion_io_timeout(wait,
3600 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { 3967 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
3601 rc = 0; 3968 rc = 0;
3602 break; 3969 break;
3603 } 3970 }
@@ -4927,7 +5294,17 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
4927{ 5294{
4928 struct pqi_scsi_dev *device; 5295 struct pqi_scsi_dev *device;
4929 5296
5297 if (!scmd->device) {
5298 set_host_byte(scmd, DID_NO_CONNECT);
5299 return;
5300 }
5301
4930 device = scmd->device->hostdata; 5302 device = scmd->device->hostdata;
5303 if (!device) {
5304 set_host_byte(scmd, DID_NO_CONNECT);
5305 return;
5306 }
5307
4931 atomic_dec(&device->scsi_cmds_outstanding); 5308 atomic_dec(&device->scsi_cmds_outstanding);
4932} 5309}
4933 5310
@@ -4944,16 +5321,24 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
4944 device = scmd->device->hostdata; 5321 device = scmd->device->hostdata;
4945 ctrl_info = shost_to_hba(shost); 5322 ctrl_info = shost_to_hba(shost);
4946 5323
5324 if (!device) {
5325 set_host_byte(scmd, DID_NO_CONNECT);
5326 pqi_scsi_done(scmd);
5327 return 0;
5328 }
5329
4947 atomic_inc(&device->scsi_cmds_outstanding); 5330 atomic_inc(&device->scsi_cmds_outstanding);
4948 5331
4949 if (pqi_ctrl_offline(ctrl_info)) { 5332 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
5333 device)) {
4950 set_host_byte(scmd, DID_NO_CONNECT); 5334 set_host_byte(scmd, DID_NO_CONNECT);
4951 pqi_scsi_done(scmd); 5335 pqi_scsi_done(scmd);
4952 return 0; 5336 return 0;
4953 } 5337 }
4954 5338
4955 pqi_ctrl_busy(ctrl_info); 5339 pqi_ctrl_busy(ctrl_info);
4956 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) { 5340 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
5341 pqi_ctrl_in_ofa(ctrl_info)) {
4957 rc = SCSI_MLQUEUE_HOST_BUSY; 5342 rc = SCSI_MLQUEUE_HOST_BUSY;
4958 goto out; 5343 goto out;
4959 } 5344 }
@@ -5098,25 +5483,75 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5098 } 5483 }
5099} 5484}
5100 5485
5486static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
5487{
5488 unsigned int i;
5489 unsigned int path;
5490 struct pqi_queue_group *queue_group;
5491 unsigned long flags;
5492 struct pqi_io_request *io_request;
5493 struct pqi_io_request *next;
5494 struct scsi_cmnd *scmd;
5495
5496 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5497 queue_group = &ctrl_info->queue_groups[i];
5498
5499 for (path = 0; path < 2; path++) {
5500 spin_lock_irqsave(&queue_group->submit_lock[path],
5501 flags);
5502
5503 list_for_each_entry_safe(io_request, next,
5504 &queue_group->request_list[path],
5505 request_list_entry) {
5506
5507 scmd = io_request->scmd;
5508 if (!scmd)
5509 continue;
5510
5511 list_del(&io_request->request_list_entry);
5512 set_host_byte(scmd, DID_RESET);
5513 pqi_scsi_done(scmd);
5514 }
5515
5516 spin_unlock_irqrestore(
5517 &queue_group->submit_lock[path], flags);
5518 }
5519 }
5520}
5521
5101static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 5522static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5102 struct pqi_scsi_dev *device) 5523 struct pqi_scsi_dev *device, unsigned long timeout_secs)
5103{ 5524{
5525 unsigned long timeout;
5526
5527 timeout = (timeout_secs * PQI_HZ) + jiffies;
5528
5104 while (atomic_read(&device->scsi_cmds_outstanding)) { 5529 while (atomic_read(&device->scsi_cmds_outstanding)) {
5105 pqi_check_ctrl_health(ctrl_info); 5530 pqi_check_ctrl_health(ctrl_info);
5106 if (pqi_ctrl_offline(ctrl_info)) 5531 if (pqi_ctrl_offline(ctrl_info))
5107 return -ENXIO; 5532 return -ENXIO;
5533 if (timeout_secs != NO_TIMEOUT) {
5534 if (time_after(jiffies, timeout)) {
5535 dev_err(&ctrl_info->pci_dev->dev,
5536 "timed out waiting for pending IO\n");
5537 return -ETIMEDOUT;
5538 }
5539 }
5108 usleep_range(1000, 2000); 5540 usleep_range(1000, 2000);
5109 } 5541 }
5110 5542
5111 return 0; 5543 return 0;
5112} 5544}
5113 5545
5114static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info) 5546static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5547 unsigned long timeout_secs)
5115{ 5548{
5116 bool io_pending; 5549 bool io_pending;
5117 unsigned long flags; 5550 unsigned long flags;
5551 unsigned long timeout;
5118 struct pqi_scsi_dev *device; 5552 struct pqi_scsi_dev *device;
5119 5553
5554 timeout = (timeout_secs * PQI_HZ) + jiffies;
5120 while (1) { 5555 while (1) {
5121 io_pending = false; 5556 io_pending = false;
5122 5557
@@ -5138,6 +5573,13 @@ static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
5138 if (pqi_ctrl_offline(ctrl_info)) 5573 if (pqi_ctrl_offline(ctrl_info))
5139 return -ENXIO; 5574 return -ENXIO;
5140 5575
5576 if (timeout_secs != NO_TIMEOUT) {
5577 if (time_after(jiffies, timeout)) {
5578 dev_err(&ctrl_info->pci_dev->dev,
5579 "timed out waiting for pending IO\n");
5580 return -ETIMEDOUT;
5581 }
5582 }
5141 usleep_range(1000, 2000); 5583 usleep_range(1000, 2000);
5142 } 5584 }
5143 5585
@@ -5161,7 +5603,7 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5161 5603
5162 while (1) { 5604 while (1) {
5163 if (wait_for_completion_io_timeout(wait, 5605 if (wait_for_completion_io_timeout(wait,
5164 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) { 5606 PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) {
5165 rc = 0; 5607 rc = 0;
5166 break; 5608 break;
5167 } 5609 }
@@ -5212,20 +5654,56 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
5212 return rc; 5654 return rc;
5213} 5655}
5214 5656
5657#define PQI_LUN_RESET_RETRIES 3
5658#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
5215/* Performs a reset at the LUN level. */ 5659/* Performs a reset at the LUN level. */
5216 5660
5217static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5661static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5218 struct pqi_scsi_dev *device) 5662 struct pqi_scsi_dev *device)
5219{ 5663{
5220 int rc; 5664 int rc;
5665 unsigned int retries;
5666 unsigned long timeout_secs;
5221 5667
5222 rc = pqi_lun_reset(ctrl_info, device); 5668 for (retries = 0;;) {
5223 if (rc == 0) 5669 rc = pqi_lun_reset(ctrl_info, device);
5224 rc = pqi_device_wait_for_pending_io(ctrl_info, device); 5670 if (rc != -EAGAIN ||
5671 ++retries > PQI_LUN_RESET_RETRIES)
5672 break;
5673 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
5674 }
5675 timeout_secs = rc ? PQI_LUN_RESET_TIMEOUT_SECS : NO_TIMEOUT;
5676
5677 rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
5225 5678
5226 return rc == 0 ? SUCCESS : FAILED; 5679 return rc == 0 ? SUCCESS : FAILED;
5227} 5680}
5228 5681
5682static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5683 struct pqi_scsi_dev *device)
5684{
5685 int rc;
5686
5687 mutex_lock(&ctrl_info->lun_reset_mutex);
5688
5689 pqi_ctrl_block_requests(ctrl_info);
5690 pqi_ctrl_wait_until_quiesced(ctrl_info);
5691 pqi_fail_io_queued_for_device(ctrl_info, device);
5692 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5693 pqi_device_reset_start(device);
5694 pqi_ctrl_unblock_requests(ctrl_info);
5695
5696 if (rc)
5697 rc = FAILED;
5698 else
5699 rc = _pqi_device_reset(ctrl_info, device);
5700
5701 pqi_device_reset_done(device);
5702
5703 mutex_unlock(&ctrl_info->lun_reset_mutex);
5704 return rc;
5705}
5706
5229static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) 5707static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5230{ 5708{
5231 int rc; 5709 int rc;
@@ -5243,28 +5721,16 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5243 5721
5244 pqi_check_ctrl_health(ctrl_info); 5722 pqi_check_ctrl_health(ctrl_info);
5245 if (pqi_ctrl_offline(ctrl_info)) { 5723 if (pqi_ctrl_offline(ctrl_info)) {
5724 dev_err(&ctrl_info->pci_dev->dev,
5725 "controller %u offlined - cannot send device reset\n",
5726 ctrl_info->ctrl_id);
5246 rc = FAILED; 5727 rc = FAILED;
5247 goto out; 5728 goto out;
5248 } 5729 }
5249 5730
5250 mutex_lock(&ctrl_info->lun_reset_mutex); 5731 pqi_wait_until_ofa_finished(ctrl_info);
5251
5252 pqi_ctrl_block_requests(ctrl_info);
5253 pqi_ctrl_wait_until_quiesced(ctrl_info);
5254 pqi_fail_io_queued_for_device(ctrl_info, device);
5255 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5256 pqi_device_reset_start(device);
5257 pqi_ctrl_unblock_requests(ctrl_info);
5258
5259 if (rc)
5260 rc = FAILED;
5261 else
5262 rc = pqi_device_reset(ctrl_info, device);
5263
5264 pqi_device_reset_done(device);
5265
5266 mutex_unlock(&ctrl_info->lun_reset_mutex);
5267 5732
5733 rc = pqi_device_reset(ctrl_info, device);
5268out: 5734out:
5269 dev_err(&ctrl_info->pci_dev->dev, 5735 dev_err(&ctrl_info->pci_dev->dev,
5270 "reset of scsi %d:%d:%d:%d: %s\n", 5736 "reset of scsi %d:%d:%d:%d: %s\n",
@@ -5308,6 +5774,10 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
5308 scsi_change_queue_depth(sdev, 5774 scsi_change_queue_depth(sdev,
5309 device->advertised_queue_depth); 5775 device->advertised_queue_depth);
5310 } 5776 }
5777 if (pqi_is_logical_device(device))
5778 pqi_disable_write_same(sdev);
5779 else
5780 sdev->allow_restart = 1;
5311 } 5781 }
5312 5782
5313 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5783 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -5580,6 +6050,9 @@ static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5580 6050
5581 ctrl_info = shost_to_hba(sdev->host); 6051 ctrl_info = shost_to_hba(sdev->host);
5582 6052
6053 if (pqi_ctrl_in_ofa(ctrl_info))
6054 return -EBUSY;
6055
5583 switch (cmd) { 6056 switch (cmd) {
5584 case CCISS_DEREGDISK: 6057 case CCISS_DEREGDISK:
5585 case CCISS_REGNEWDISK: 6058 case CCISS_REGNEWDISK:
@@ -5684,6 +6157,150 @@ static struct device_attribute *pqi_shost_attrs[] = {
5684 NULL 6157 NULL
5685}; 6158};
5686 6159
6160static ssize_t pqi_unique_id_show(struct device *dev,
6161 struct device_attribute *attr, char *buffer)
6162{
6163 struct pqi_ctrl_info *ctrl_info;
6164 struct scsi_device *sdev;
6165 struct pqi_scsi_dev *device;
6166 unsigned long flags;
6167 unsigned char uid[16];
6168
6169 sdev = to_scsi_device(dev);
6170 ctrl_info = shost_to_hba(sdev->host);
6171
6172 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6173
6174 device = sdev->hostdata;
6175 if (!device) {
6176 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6177 flags);
6178 return -ENODEV;
6179 }
6180 memcpy(uid, device->unique_id, sizeof(uid));
6181
6182 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6183
6184 return snprintf(buffer, PAGE_SIZE,
6185 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
6186 uid[0], uid[1], uid[2], uid[3],
6187 uid[4], uid[5], uid[6], uid[7],
6188 uid[8], uid[9], uid[10], uid[11],
6189 uid[12], uid[13], uid[14], uid[15]);
6190}
6191
6192static ssize_t pqi_lunid_show(struct device *dev,
6193 struct device_attribute *attr, char *buffer)
6194{
6195 struct pqi_ctrl_info *ctrl_info;
6196 struct scsi_device *sdev;
6197 struct pqi_scsi_dev *device;
6198 unsigned long flags;
6199 u8 lunid[8];
6200
6201 sdev = to_scsi_device(dev);
6202 ctrl_info = shost_to_hba(sdev->host);
6203
6204 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6205
6206 device = sdev->hostdata;
6207 if (!device) {
6208 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6209 flags);
6210 return -ENODEV;
6211 }
6212 memcpy(lunid, device->scsi3addr, sizeof(lunid));
6213
6214 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6215
6216 return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6217}
6218
6219#define MAX_PATHS 8
6220static ssize_t pqi_path_info_show(struct device *dev,
6221 struct device_attribute *attr, char *buf)
6222{
6223 struct pqi_ctrl_info *ctrl_info;
6224 struct scsi_device *sdev;
6225 struct pqi_scsi_dev *device;
6226 unsigned long flags;
6227 int i;
6228 int output_len = 0;
6229 u8 box;
6230 u8 bay;
6231 u8 path_map_index = 0;
6232 char *active;
6233 unsigned char phys_connector[2];
6234
6235 sdev = to_scsi_device(dev);
6236 ctrl_info = shost_to_hba(sdev->host);
6237
6238 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6239
6240 device = sdev->hostdata;
6241 if (!device) {
6242 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6243 flags);
6244 return -ENODEV;
6245 }
6246
6247 bay = device->bay;
6248 for (i = 0; i < MAX_PATHS; i++) {
6249 path_map_index = 1<<i;
6250 if (i == device->active_path_index)
6251 active = "Active";
6252 else if (device->path_map & path_map_index)
6253 active = "Inactive";
6254 else
6255 continue;
6256
6257 output_len += scnprintf(buf + output_len,
6258 PAGE_SIZE - output_len,
6259 "[%d:%d:%d:%d] %20.20s ",
6260 ctrl_info->scsi_host->host_no,
6261 device->bus, device->target,
6262 device->lun,
6263 scsi_device_type(device->devtype));
6264
6265 if (device->devtype == TYPE_RAID ||
6266 pqi_is_logical_device(device))
6267 goto end_buffer;
6268
6269 memcpy(&phys_connector, &device->phys_connector[i],
6270 sizeof(phys_connector));
6271 if (phys_connector[0] < '0')
6272 phys_connector[0] = '0';
6273 if (phys_connector[1] < '0')
6274 phys_connector[1] = '0';
6275
6276 output_len += scnprintf(buf + output_len,
6277 PAGE_SIZE - output_len,
6278 "PORT: %.2s ", phys_connector);
6279
6280 box = device->box[i];
6281 if (box != 0 && box != 0xFF)
6282 output_len += scnprintf(buf + output_len,
6283 PAGE_SIZE - output_len,
6284 "BOX: %hhu ", box);
6285
6286 if ((device->devtype == TYPE_DISK ||
6287 device->devtype == TYPE_ZBC) &&
6288 pqi_expose_device(device))
6289 output_len += scnprintf(buf + output_len,
6290 PAGE_SIZE - output_len,
6291 "BAY: %hhu ", bay);
6292
6293end_buffer:
6294 output_len += scnprintf(buf + output_len,
6295 PAGE_SIZE - output_len,
6296 "%s\n", active);
6297 }
6298
6299 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6300 return output_len;
6301}
6302
6303
5687static ssize_t pqi_sas_address_show(struct device *dev, 6304static ssize_t pqi_sas_address_show(struct device *dev,
5688 struct device_attribute *attr, char *buffer) 6305 struct device_attribute *attr, char *buffer)
5689{ 6306{
@@ -5760,12 +6377,18 @@ static ssize_t pqi_raid_level_show(struct device *dev,
5760 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level); 6377 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
5761} 6378}
5762 6379
6380static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
6381static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
6382static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
5763static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); 6383static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
5764static DEVICE_ATTR(ssd_smart_path_enabled, 0444, 6384static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
5765 pqi_ssd_smart_path_enabled_show, NULL); 6385 pqi_ssd_smart_path_enabled_show, NULL);
5766static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); 6386static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
5767 6387
5768static struct device_attribute *pqi_sdev_attrs[] = { 6388static struct device_attribute *pqi_sdev_attrs[] = {
6389 &dev_attr_lunid,
6390 &dev_attr_unique_id,
6391 &dev_attr_path_info,
5769 &dev_attr_sas_address, 6392 &dev_attr_sas_address,
5770 &dev_attr_ssd_smart_path_enabled, 6393 &dev_attr_ssd_smart_path_enabled,
5771 &dev_attr_raid_level, 6394 &dev_attr_raid_level,
@@ -5780,7 +6403,6 @@ static struct scsi_host_template pqi_driver_template = {
5780 .scan_start = pqi_scan_start, 6403 .scan_start = pqi_scan_start,
5781 .scan_finished = pqi_scan_finished, 6404 .scan_finished = pqi_scan_finished,
5782 .this_id = -1, 6405 .this_id = -1,
5783 .use_clustering = ENABLE_CLUSTERING,
5784 .eh_device_reset_handler = pqi_eh_device_reset_handler, 6406 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5785 .ioctl = pqi_ioctl, 6407 .ioctl = pqi_ioctl,
5786 .slave_alloc = pqi_slave_alloc, 6408 .slave_alloc = pqi_slave_alloc,
@@ -5948,6 +6570,244 @@ out:
5948 return rc; 6570 return rc;
5949} 6571}
5950 6572
6573struct pqi_config_table_section_info {
6574 struct pqi_ctrl_info *ctrl_info;
6575 void *section;
6576 u32 section_offset;
6577 void __iomem *section_iomem_addr;
6578};
6579
6580static inline bool pqi_is_firmware_feature_supported(
6581 struct pqi_config_table_firmware_features *firmware_features,
6582 unsigned int bit_position)
6583{
6584 unsigned int byte_index;
6585
6586 byte_index = bit_position / BITS_PER_BYTE;
6587
6588 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
6589 return false;
6590
6591 return firmware_features->features_supported[byte_index] &
6592 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6593}
6594
6595static inline bool pqi_is_firmware_feature_enabled(
6596 struct pqi_config_table_firmware_features *firmware_features,
6597 void __iomem *firmware_features_iomem_addr,
6598 unsigned int bit_position)
6599{
6600 unsigned int byte_index;
6601 u8 __iomem *features_enabled_iomem_addr;
6602
6603 byte_index = (bit_position / BITS_PER_BYTE) +
6604 (le16_to_cpu(firmware_features->num_elements) * 2);
6605
6606 features_enabled_iomem_addr = firmware_features_iomem_addr +
6607 offsetof(struct pqi_config_table_firmware_features,
6608 features_supported) + byte_index;
6609
6610 return *((__force u8 *)features_enabled_iomem_addr) &
6611 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6612}
6613
6614static inline void pqi_request_firmware_feature(
6615 struct pqi_config_table_firmware_features *firmware_features,
6616 unsigned int bit_position)
6617{
6618 unsigned int byte_index;
6619
6620 byte_index = (bit_position / BITS_PER_BYTE) +
6621 le16_to_cpu(firmware_features->num_elements);
6622
6623 firmware_features->features_supported[byte_index] |=
6624 (1 << (bit_position % BITS_PER_BYTE));
6625}
6626
6627static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
6628 u16 first_section, u16 last_section)
6629{
6630 struct pqi_vendor_general_request request;
6631
6632 memset(&request, 0, sizeof(request));
6633
6634 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
6635 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
6636 &request.header.iu_length);
6637 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
6638 &request.function_code);
6639 put_unaligned_le16(first_section,
6640 &request.data.config_table_update.first_section);
6641 put_unaligned_le16(last_section,
6642 &request.data.config_table_update.last_section);
6643
6644 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6645 0, NULL, NO_TIMEOUT);
6646}
6647
6648static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
6649 struct pqi_config_table_firmware_features *firmware_features,
6650 void __iomem *firmware_features_iomem_addr)
6651{
6652 void *features_requested;
6653 void __iomem *features_requested_iomem_addr;
6654
6655 features_requested = firmware_features->features_supported +
6656 le16_to_cpu(firmware_features->num_elements);
6657
6658 features_requested_iomem_addr = firmware_features_iomem_addr +
6659 (features_requested - (void *)firmware_features);
6660
6661 memcpy_toio(features_requested_iomem_addr, features_requested,
6662 le16_to_cpu(firmware_features->num_elements));
6663
6664 return pqi_config_table_update(ctrl_info,
6665 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
6666 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
6667}
6668
6669struct pqi_firmware_feature {
6670 char *feature_name;
6671 unsigned int feature_bit;
6672 bool supported;
6673 bool enabled;
6674 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
6675 struct pqi_firmware_feature *firmware_feature);
6676};
6677
6678static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
6679 struct pqi_firmware_feature *firmware_feature)
6680{
6681 if (!firmware_feature->supported) {
6682 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
6683 firmware_feature->feature_name);
6684 return;
6685 }
6686
6687 if (firmware_feature->enabled) {
6688 dev_info(&ctrl_info->pci_dev->dev,
6689 "%s enabled\n", firmware_feature->feature_name);
6690 return;
6691 }
6692
6693 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
6694 firmware_feature->feature_name);
6695}
6696
6697static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
6698 struct pqi_firmware_feature *firmware_feature)
6699{
6700 if (firmware_feature->feature_status)
6701 firmware_feature->feature_status(ctrl_info, firmware_feature);
6702}
6703
6704static DEFINE_MUTEX(pqi_firmware_features_mutex);
6705
6706static struct pqi_firmware_feature pqi_firmware_features[] = {
6707 {
6708 .feature_name = "Online Firmware Activation",
6709 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
6710 .feature_status = pqi_firmware_feature_status,
6711 },
6712 {
6713 .feature_name = "Serial Management Protocol",
6714 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
6715 .feature_status = pqi_firmware_feature_status,
6716 },
6717 {
6718 .feature_name = "New Soft Reset Handshake",
6719 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
6720 .feature_status = pqi_firmware_feature_status,
6721 },
6722};
6723
6724static void pqi_process_firmware_features(
6725 struct pqi_config_table_section_info *section_info)
6726{
6727 int rc;
6728 struct pqi_ctrl_info *ctrl_info;
6729 struct pqi_config_table_firmware_features *firmware_features;
6730 void __iomem *firmware_features_iomem_addr;
6731 unsigned int i;
6732 unsigned int num_features_supported;
6733
6734 ctrl_info = section_info->ctrl_info;
6735 firmware_features = section_info->section;
6736 firmware_features_iomem_addr = section_info->section_iomem_addr;
6737
6738 for (i = 0, num_features_supported = 0;
6739 i < ARRAY_SIZE(pqi_firmware_features); i++) {
6740 if (pqi_is_firmware_feature_supported(firmware_features,
6741 pqi_firmware_features[i].feature_bit)) {
6742 pqi_firmware_features[i].supported = true;
6743 num_features_supported++;
6744 } else {
6745 pqi_firmware_feature_update(ctrl_info,
6746 &pqi_firmware_features[i]);
6747 }
6748 }
6749
6750 if (num_features_supported == 0)
6751 return;
6752
6753 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6754 if (!pqi_firmware_features[i].supported)
6755 continue;
6756 pqi_request_firmware_feature(firmware_features,
6757 pqi_firmware_features[i].feature_bit);
6758 }
6759
6760 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
6761 firmware_features_iomem_addr);
6762 if (rc) {
6763 dev_err(&ctrl_info->pci_dev->dev,
6764 "failed to enable firmware features in PQI configuration table\n");
6765 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6766 if (!pqi_firmware_features[i].supported)
6767 continue;
6768 pqi_firmware_feature_update(ctrl_info,
6769 &pqi_firmware_features[i]);
6770 }
6771 return;
6772 }
6773
6774 ctrl_info->soft_reset_handshake_supported = false;
6775 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6776 if (!pqi_firmware_features[i].supported)
6777 continue;
6778 if (pqi_is_firmware_feature_enabled(firmware_features,
6779 firmware_features_iomem_addr,
6780 pqi_firmware_features[i].feature_bit)) {
6781 pqi_firmware_features[i].enabled = true;
6782 if (pqi_firmware_features[i].feature_bit ==
6783 PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE)
6784 ctrl_info->soft_reset_handshake_supported =
6785 true;
6786 }
6787 pqi_firmware_feature_update(ctrl_info,
6788 &pqi_firmware_features[i]);
6789 }
6790}
6791
6792static void pqi_init_firmware_features(void)
6793{
6794 unsigned int i;
6795
6796 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6797 pqi_firmware_features[i].supported = false;
6798 pqi_firmware_features[i].enabled = false;
6799 }
6800}
6801
6802static void pqi_process_firmware_features_section(
6803 struct pqi_config_table_section_info *section_info)
6804{
6805 mutex_lock(&pqi_firmware_features_mutex);
6806 pqi_init_firmware_features();
6807 pqi_process_firmware_features(section_info);
6808 mutex_unlock(&pqi_firmware_features_mutex);
6809}
6810
5951static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) 6811static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
5952{ 6812{
5953 u32 table_length; 6813 u32 table_length;
@@ -5955,8 +6815,11 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
5955 void __iomem *table_iomem_addr; 6815 void __iomem *table_iomem_addr;
5956 struct pqi_config_table *config_table; 6816 struct pqi_config_table *config_table;
5957 struct pqi_config_table_section_header *section; 6817 struct pqi_config_table_section_header *section;
6818 struct pqi_config_table_section_info section_info;
5958 6819
5959 table_length = ctrl_info->config_table_length; 6820 table_length = ctrl_info->config_table_length;
6821 if (table_length == 0)
6822 return 0;
5960 6823
5961 config_table = kmalloc(table_length, GFP_KERNEL); 6824 config_table = kmalloc(table_length, GFP_KERNEL);
5962 if (!config_table) { 6825 if (!config_table) {
@@ -5973,13 +6836,22 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
5973 ctrl_info->config_table_offset; 6836 ctrl_info->config_table_offset;
5974 memcpy_fromio(config_table, table_iomem_addr, table_length); 6837 memcpy_fromio(config_table, table_iomem_addr, table_length);
5975 6838
6839 section_info.ctrl_info = ctrl_info;
5976 section_offset = 6840 section_offset =
5977 get_unaligned_le32(&config_table->first_section_offset); 6841 get_unaligned_le32(&config_table->first_section_offset);
5978 6842
5979 while (section_offset) { 6843 while (section_offset) {
5980 section = (void *)config_table + section_offset; 6844 section = (void *)config_table + section_offset;
5981 6845
6846 section_info.section = section;
6847 section_info.section_offset = section_offset;
6848 section_info.section_iomem_addr =
6849 table_iomem_addr + section_offset;
6850
5982 switch (get_unaligned_le16(&section->section_id)) { 6851 switch (get_unaligned_le16(&section->section_id)) {
6852 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
6853 pqi_process_firmware_features_section(&section_info);
6854 break;
5983 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: 6855 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
5984 if (pqi_disable_heartbeat) 6856 if (pqi_disable_heartbeat)
5985 dev_warn(&ctrl_info->pci_dev->dev, 6857 dev_warn(&ctrl_info->pci_dev->dev,
@@ -5992,6 +6864,13 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
5992 struct pqi_config_table_heartbeat, 6864 struct pqi_config_table_heartbeat,
5993 heartbeat_counter); 6865 heartbeat_counter);
5994 break; 6866 break;
6867 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
6868 ctrl_info->soft_reset_status =
6869 table_iomem_addr +
6870 section_offset +
6871 offsetof(struct pqi_config_table_soft_reset,
6872 soft_reset_status);
6873 break;
5995 } 6874 }
5996 6875
5997 section_offset = 6876 section_offset =
@@ -6124,10 +7003,6 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
6124 ctrl_info->pqi_mode_enabled = true; 7003 ctrl_info->pqi_mode_enabled = true;
6125 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 7004 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6126 7005
6127 rc = pqi_process_config_table(ctrl_info);
6128 if (rc)
6129 return rc;
6130
6131 rc = pqi_alloc_admin_queues(ctrl_info); 7006 rc = pqi_alloc_admin_queues(ctrl_info);
6132 if (rc) { 7007 if (rc) {
6133 dev_err(&ctrl_info->pci_dev->dev, 7008 dev_err(&ctrl_info->pci_dev->dev,
@@ -6189,6 +7064,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
6189 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 7064 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6190 7065
6191 ctrl_info->controller_online = true; 7066 ctrl_info->controller_online = true;
7067
7068 rc = pqi_process_config_table(ctrl_info);
7069 if (rc)
7070 return rc;
7071
6192 pqi_start_heartbeat_timer(ctrl_info); 7072 pqi_start_heartbeat_timer(ctrl_info);
6193 7073
6194 rc = pqi_enable_events(ctrl_info); 7074 rc = pqi_enable_events(ctrl_info);
@@ -6210,6 +7090,13 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
6210 return rc; 7090 return rc;
6211 } 7091 }
6212 7092
7093 rc = pqi_set_diag_rescan(ctrl_info);
7094 if (rc) {
7095 dev_err(&ctrl_info->pci_dev->dev,
7096 "error enabling multi-lun rescan\n");
7097 return rc;
7098 }
7099
6213 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 7100 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6214 if (rc) { 7101 if (rc) {
6215 dev_err(&ctrl_info->pci_dev->dev, 7102 dev_err(&ctrl_info->pci_dev->dev,
@@ -6267,6 +7154,24 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
6267 return rc; 7154 return rc;
6268 7155
6269 /* 7156 /*
7157 * Get the controller properties. This allows us to determine
7158 * whether or not it supports PQI mode.
7159 */
7160 rc = sis_get_ctrl_properties(ctrl_info);
7161 if (rc) {
7162 dev_err(&ctrl_info->pci_dev->dev,
7163 "error obtaining controller properties\n");
7164 return rc;
7165 }
7166
7167 rc = sis_get_pqi_capabilities(ctrl_info);
7168 if (rc) {
7169 dev_err(&ctrl_info->pci_dev->dev,
7170 "error obtaining controller capabilities\n");
7171 return rc;
7172 }
7173
7174 /*
6270 * If the function we are about to call succeeds, the 7175 * If the function we are about to call succeeds, the
6271 * controller will transition from legacy SIS mode 7176 * controller will transition from legacy SIS mode
6272 * into PQI mode. 7177 * into PQI mode.
@@ -6306,9 +7211,14 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
6306 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 7211 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6307 7212
6308 ctrl_info->controller_online = true; 7213 ctrl_info->controller_online = true;
6309 pqi_start_heartbeat_timer(ctrl_info);
6310 pqi_ctrl_unblock_requests(ctrl_info); 7214 pqi_ctrl_unblock_requests(ctrl_info);
6311 7215
7216 rc = pqi_process_config_table(ctrl_info);
7217 if (rc)
7218 return rc;
7219
7220 pqi_start_heartbeat_timer(ctrl_info);
7221
6312 rc = pqi_enable_events(ctrl_info); 7222 rc = pqi_enable_events(ctrl_info);
6313 if (rc) { 7223 if (rc) {
6314 dev_err(&ctrl_info->pci_dev->dev, 7224 dev_err(&ctrl_info->pci_dev->dev,
@@ -6316,6 +7226,20 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
6316 return rc; 7226 return rc;
6317 } 7227 }
6318 7228
7229 rc = pqi_get_ctrl_firmware_version(ctrl_info);
7230 if (rc) {
7231 dev_err(&ctrl_info->pci_dev->dev,
7232 "error obtaining firmware version\n");
7233 return rc;
7234 }
7235
7236 rc = pqi_set_diag_rescan(ctrl_info);
7237 if (rc) {
7238 dev_err(&ctrl_info->pci_dev->dev,
7239 "error enabling multi-lun rescan\n");
7240 return rc;
7241 }
7242
6319 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 7243 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6320 if (rc) { 7244 if (rc) {
6321 dev_err(&ctrl_info->pci_dev->dev, 7245 dev_err(&ctrl_info->pci_dev->dev,
@@ -6426,6 +7350,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
6426 7350
6427 mutex_init(&ctrl_info->scan_mutex); 7351 mutex_init(&ctrl_info->scan_mutex);
6428 mutex_init(&ctrl_info->lun_reset_mutex); 7352 mutex_init(&ctrl_info->lun_reset_mutex);
7353 mutex_init(&ctrl_info->ofa_mutex);
6429 7354
6430 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); 7355 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
6431 spin_lock_init(&ctrl_info->scsi_device_list_lock); 7356 spin_lock_init(&ctrl_info->scsi_device_list_lock);
@@ -6502,6 +7427,217 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
6502 pqi_free_ctrl_resources(ctrl_info); 7427 pqi_free_ctrl_resources(ctrl_info);
6503} 7428}
6504 7429
7430static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
7431{
7432 pqi_cancel_update_time_worker(ctrl_info);
7433 pqi_cancel_rescan_worker(ctrl_info);
7434 pqi_wait_until_lun_reset_finished(ctrl_info);
7435 pqi_wait_until_scan_finished(ctrl_info);
7436 pqi_ctrl_ofa_start(ctrl_info);
7437 pqi_ctrl_block_requests(ctrl_info);
7438 pqi_ctrl_wait_until_quiesced(ctrl_info);
7439 pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
7440 pqi_fail_io_queued_for_all_devices(ctrl_info);
7441 pqi_wait_until_inbound_queues_empty(ctrl_info);
7442 pqi_stop_heartbeat_timer(ctrl_info);
7443 ctrl_info->pqi_mode_enabled = false;
7444 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7445}
7446
7447static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
7448{
7449 pqi_ofa_free_host_buffer(ctrl_info);
7450 ctrl_info->pqi_mode_enabled = true;
7451 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7452 ctrl_info->controller_online = true;
7453 pqi_ctrl_unblock_requests(ctrl_info);
7454 pqi_start_heartbeat_timer(ctrl_info);
7455 pqi_schedule_update_time_worker(ctrl_info);
7456 pqi_clear_soft_reset_status(ctrl_info,
7457 PQI_SOFT_RESET_ABORT);
7458 pqi_scan_scsi_devices(ctrl_info);
7459}
7460
7461static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
7462 u32 total_size, u32 chunk_size)
7463{
7464 u32 sg_count;
7465 u32 size;
7466 int i;
7467 struct pqi_sg_descriptor *mem_descriptor = NULL;
7468 struct device *dev;
7469 struct pqi_ofa_memory *ofap;
7470
7471 dev = &ctrl_info->pci_dev->dev;
7472
7473 sg_count = (total_size + chunk_size - 1);
7474 sg_count /= chunk_size;
7475
7476 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7477
7478 if (sg_count*chunk_size < total_size)
7479 goto out;
7480
7481 ctrl_info->pqi_ofa_chunk_virt_addr =
7482 kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
7483 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
7484 goto out;
7485
7486 for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
7487 dma_addr_t dma_handle;
7488
7489 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
7490 dma_zalloc_coherent(dev, chunk_size, &dma_handle,
7491 GFP_KERNEL);
7492
7493 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
7494 break;
7495
7496 mem_descriptor = &ofap->sg_descriptor[i];
7497 put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
7498 put_unaligned_le32 (chunk_size, &mem_descriptor->length);
7499 }
7500
7501 if (!size || size < total_size)
7502 goto out_free_chunks;
7503
7504 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
7505 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
7506 put_unaligned_le32(size, &ofap->bytes_allocated);
7507
7508 return 0;
7509
7510out_free_chunks:
7511 while (--i >= 0) {
7512 mem_descriptor = &ofap->sg_descriptor[i];
7513 dma_free_coherent(dev, chunk_size,
7514 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7515 get_unaligned_le64(&mem_descriptor->address));
7516 }
7517 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7518
7519out:
7520 put_unaligned_le32 (0, &ofap->bytes_allocated);
7521 return -ENOMEM;
7522}
7523
7524static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
7525{
7526 u32 total_size;
7527 u32 min_chunk_size;
7528 u32 chunk_sz;
7529
7530 total_size = le32_to_cpu(
7531 ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
7532 min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
7533
7534 for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
7535 if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
7536 return 0;
7537
7538 return -ENOMEM;
7539}
7540
7541static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
7542 u32 bytes_requested)
7543{
7544 struct pqi_ofa_memory *pqi_ofa_memory;
7545 struct device *dev;
7546
7547 dev = &ctrl_info->pci_dev->dev;
7548 pqi_ofa_memory = dma_zalloc_coherent(dev,
7549 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
7550 &ctrl_info->pqi_ofa_mem_dma_handle,
7551 GFP_KERNEL);
7552
7553 if (!pqi_ofa_memory)
7554 return;
7555
7556 put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
7557 memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
7558 sizeof(pqi_ofa_memory->signature));
7559 pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
7560
7561 ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
7562
7563 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
7564 dev_err(dev, "Failed to allocate host buffer of size = %u",
7565 bytes_requested);
7566 }
7567}
7568
7569static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
7570{
7571 int i;
7572 struct pqi_sg_descriptor *mem_descriptor;
7573 struct pqi_ofa_memory *ofap;
7574
7575 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7576
7577 if (!ofap)
7578 return;
7579
7580 if (!ofap->bytes_allocated)
7581 goto out;
7582
7583 mem_descriptor = ofap->sg_descriptor;
7584
7585 for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
7586 i++) {
7587 dma_free_coherent(&ctrl_info->pci_dev->dev,
7588 get_unaligned_le32(&mem_descriptor[i].length),
7589 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7590 get_unaligned_le64(&mem_descriptor[i].address));
7591 }
7592 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7593
7594out:
7595 dma_free_coherent(&ctrl_info->pci_dev->dev,
7596 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
7597 ctrl_info->pqi_ofa_mem_dma_handle);
7598 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
7599}
7600
7601static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
7602{
7603 struct pqi_vendor_general_request request;
7604 size_t size;
7605 struct pqi_ofa_memory *ofap;
7606
7607 memset(&request, 0, sizeof(request));
7608
7609 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7610
7611 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7612 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7613 &request.header.iu_length);
7614 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
7615 &request.function_code);
7616
7617 if (ofap) {
7618 size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
7619 get_unaligned_le16(&ofap->num_memory_descriptors) *
7620 sizeof(struct pqi_sg_descriptor);
7621
7622 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
7623 &request.data.ofa_memory_allocation.buffer_address);
7624 put_unaligned_le32(size,
7625 &request.data.ofa_memory_allocation.buffer_length);
7626
7627 }
7628
7629 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
7630 0, NULL, NO_TIMEOUT);
7631}
7632
7633#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
7634
7635static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
7636{
7637 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
7638 return pqi_ctrl_init_resume(ctrl_info);
7639}
7640
6505static void pqi_perform_lockup_action(void) 7641static void pqi_perform_lockup_action(void)
6506{ 7642{
6507 switch (pqi_lockup_action) { 7643 switch (pqi_lockup_action) {
@@ -6600,7 +7736,7 @@ static int pqi_pci_probe(struct pci_dev *pci_dev,
6600 const struct pci_device_id *id) 7736 const struct pci_device_id *id)
6601{ 7737{
6602 int rc; 7738 int rc;
6603 int node; 7739 int node, cp_node;
6604 struct pqi_ctrl_info *ctrl_info; 7740 struct pqi_ctrl_info *ctrl_info;
6605 7741
6606 pqi_print_ctrl_info(pci_dev, id); 7742 pqi_print_ctrl_info(pci_dev, id);
@@ -6618,8 +7754,12 @@ static int pqi_pci_probe(struct pci_dev *pci_dev,
6618 "controller device ID matched using wildcards\n"); 7754 "controller device ID matched using wildcards\n");
6619 7755
6620 node = dev_to_node(&pci_dev->dev); 7756 node = dev_to_node(&pci_dev->dev);
6621 if (node == NUMA_NO_NODE) 7757 if (node == NUMA_NO_NODE) {
6622 set_dev_node(&pci_dev->dev, 0); 7758 cp_node = cpu_to_node(0);
7759 if (cp_node == NUMA_NO_NODE)
7760 cp_node = 0;
7761 set_dev_node(&pci_dev->dev, cp_node);
7762 }
6623 7763
6624 ctrl_info = pqi_alloc_ctrl_info(node); 7764 ctrl_info = pqi_alloc_ctrl_info(node);
6625 if (!ctrl_info) { 7765 if (!ctrl_info) {
@@ -6654,6 +7794,8 @@ static void pqi_pci_remove(struct pci_dev *pci_dev)
6654 if (!ctrl_info) 7794 if (!ctrl_info)
6655 return; 7795 return;
6656 7796
7797 ctrl_info->in_shutdown = true;
7798
6657 pqi_remove_ctrl(ctrl_info); 7799 pqi_remove_ctrl(ctrl_info);
6658} 7800}
6659 7801
@@ -6671,6 +7813,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
6671 * storage. 7813 * storage.
6672 */ 7814 */
6673 rc = pqi_flush_cache(ctrl_info, SHUTDOWN); 7815 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
7816 pqi_free_interrupts(ctrl_info);
6674 pqi_reset(ctrl_info); 7817 pqi_reset(ctrl_info);
6675 if (rc == 0) 7818 if (rc == 0)
6676 return; 7819 return;
@@ -6715,11 +7858,12 @@ static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t stat
6715 pqi_cancel_rescan_worker(ctrl_info); 7858 pqi_cancel_rescan_worker(ctrl_info);
6716 pqi_wait_until_scan_finished(ctrl_info); 7859 pqi_wait_until_scan_finished(ctrl_info);
6717 pqi_wait_until_lun_reset_finished(ctrl_info); 7860 pqi_wait_until_lun_reset_finished(ctrl_info);
7861 pqi_wait_until_ofa_finished(ctrl_info);
6718 pqi_flush_cache(ctrl_info, SUSPEND); 7862 pqi_flush_cache(ctrl_info, SUSPEND);
6719 pqi_ctrl_block_requests(ctrl_info); 7863 pqi_ctrl_block_requests(ctrl_info);
6720 pqi_ctrl_wait_until_quiesced(ctrl_info); 7864 pqi_ctrl_wait_until_quiesced(ctrl_info);
6721 pqi_wait_until_inbound_queues_empty(ctrl_info); 7865 pqi_wait_until_inbound_queues_empty(ctrl_info);
6722 pqi_ctrl_wait_for_pending_io(ctrl_info); 7866 pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
6723 pqi_stop_heartbeat_timer(ctrl_info); 7867 pqi_stop_heartbeat_timer(ctrl_info);
6724 7868
6725 if (state.event == PM_EVENT_FREEZE) 7869 if (state.event == PM_EVENT_FREEZE)
@@ -6805,6 +7949,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
6805 }, 7949 },
6806 { 7950 {
6807 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7951 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7952 0x193d, 0xc460)
7953 },
7954 {
7955 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7956 0x193d, 0xc461)
7957 },
7958 {
7959 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6808 0x193d, 0xf460) 7960 0x193d, 0xf460)
6809 }, 7961 },
6810 { 7962 {
@@ -6841,6 +7993,30 @@ static const struct pci_device_id pqi_pci_id_table[] = {
6841 }, 7993 },
6842 { 7994 {
6843 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7995 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7996 0x19e5, 0xd227)
7997 },
7998 {
7999 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8000 0x19e5, 0xd228)
8001 },
8002 {
8003 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8004 0x19e5, 0xd229)
8005 },
8006 {
8007 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8008 0x19e5, 0xd22a)
8009 },
8010 {
8011 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8012 0x19e5, 0xd22b)
8013 },
8014 {
8015 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8016 0x19e5, 0xd22c)
8017 },
8018 {
8019 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6844 PCI_VENDOR_ID_ADAPTEC2, 0x0110) 8020 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
6845 }, 8021 },
6846 { 8022 {
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index b209a35e482e..0e4ef215115f 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -17,9 +17,11 @@
17 */ 17 */
18 18
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/bsg-lib.h>
20#include <scsi/scsi_host.h> 21#include <scsi/scsi_host.h>
21#include <scsi/scsi_cmnd.h> 22#include <scsi/scsi_cmnd.h>
22#include <scsi/scsi_transport_sas.h> 23#include <scsi/scsi_transport_sas.h>
24#include <asm/unaligned.h>
23#include "smartpqi.h" 25#include "smartpqi.h"
24 26
25static struct pqi_sas_phy *pqi_alloc_sas_phy(struct pqi_sas_port *pqi_sas_port) 27static struct pqi_sas_phy *pqi_alloc_sas_phy(struct pqi_sas_port *pqi_sas_port)
@@ -97,14 +99,32 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port,
97 99
98 identify = &rphy->identify; 100 identify = &rphy->identify;
99 identify->sas_address = pqi_sas_port->sas_address; 101 identify->sas_address = pqi_sas_port->sas_address;
100 identify->initiator_port_protocols = SAS_PROTOCOL_STP; 102
101 identify->target_port_protocols = SAS_PROTOCOL_STP; 103 if (pqi_sas_port->device &&
104 pqi_sas_port->device->is_expander_smp_device) {
105 identify->initiator_port_protocols = SAS_PROTOCOL_SMP;
106 identify->target_port_protocols = SAS_PROTOCOL_SMP;
107 } else {
108 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
109 identify->target_port_protocols = SAS_PROTOCOL_STP;
110 }
102 111
103 return sas_rphy_add(rphy); 112 return sas_rphy_add(rphy);
104} 113}
105 114
115static struct sas_rphy *pqi_sas_rphy_alloc(struct pqi_sas_port *pqi_sas_port)
116{
117 if (pqi_sas_port->device &&
118 pqi_sas_port->device->is_expander_smp_device)
119 return sas_expander_alloc(pqi_sas_port->port,
120 SAS_FANOUT_EXPANDER_DEVICE);
121
122 return sas_end_device_alloc(pqi_sas_port->port);
123}
124
106static struct pqi_sas_port *pqi_alloc_sas_port( 125static struct pqi_sas_port *pqi_alloc_sas_port(
107 struct pqi_sas_node *pqi_sas_node, u64 sas_address) 126 struct pqi_sas_node *pqi_sas_node, u64 sas_address,
127 struct pqi_scsi_dev *device)
108{ 128{
109 int rc; 129 int rc;
110 struct pqi_sas_port *pqi_sas_port; 130 struct pqi_sas_port *pqi_sas_port;
@@ -127,6 +147,7 @@ static struct pqi_sas_port *pqi_alloc_sas_port(
127 147
128 pqi_sas_port->port = port; 148 pqi_sas_port->port = port;
129 pqi_sas_port->sas_address = sas_address; 149 pqi_sas_port->sas_address = sas_address;
150 pqi_sas_port->device = device;
130 list_add_tail(&pqi_sas_port->port_list_entry, 151 list_add_tail(&pqi_sas_port->port_list_entry,
131 &pqi_sas_node->port_list_head); 152 &pqi_sas_node->port_list_head);
132 153
@@ -146,7 +167,7 @@ static void pqi_free_sas_port(struct pqi_sas_port *pqi_sas_port)
146 struct pqi_sas_phy *next; 167 struct pqi_sas_phy *next;
147 168
148 list_for_each_entry_safe(pqi_sas_phy, next, 169 list_for_each_entry_safe(pqi_sas_phy, next,
149 &pqi_sas_port->phy_list_head, phy_list_entry) 170 &pqi_sas_port->phy_list_head, phy_list_entry)
150 pqi_free_sas_phy(pqi_sas_phy); 171 pqi_free_sas_phy(pqi_sas_phy);
151 172
152 sas_port_delete(pqi_sas_port->port); 173 sas_port_delete(pqi_sas_port->port);
@@ -176,7 +197,7 @@ static void pqi_free_sas_node(struct pqi_sas_node *pqi_sas_node)
176 return; 197 return;
177 198
178 list_for_each_entry_safe(pqi_sas_port, next, 199 list_for_each_entry_safe(pqi_sas_port, next,
179 &pqi_sas_node->port_list_head, port_list_entry) 200 &pqi_sas_node->port_list_head, port_list_entry)
180 pqi_free_sas_port(pqi_sas_port); 201 pqi_free_sas_port(pqi_sas_port);
181 202
182 kfree(pqi_sas_node); 203 kfree(pqi_sas_node);
@@ -206,13 +227,14 @@ int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info)
206 struct pqi_sas_port *pqi_sas_port; 227 struct pqi_sas_port *pqi_sas_port;
207 struct pqi_sas_phy *pqi_sas_phy; 228 struct pqi_sas_phy *pqi_sas_phy;
208 229
209 parent_dev = &shost->shost_gendev; 230 parent_dev = &shost->shost_dev;
210 231
211 pqi_sas_node = pqi_alloc_sas_node(parent_dev); 232 pqi_sas_node = pqi_alloc_sas_node(parent_dev);
212 if (!pqi_sas_node) 233 if (!pqi_sas_node)
213 return -ENOMEM; 234 return -ENOMEM;
214 235
215 pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, ctrl_info->sas_address); 236 pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node,
237 ctrl_info->sas_address, NULL);
216 if (!pqi_sas_port) { 238 if (!pqi_sas_port) {
217 rc = -ENODEV; 239 rc = -ENODEV;
218 goto free_sas_node; 240 goto free_sas_node;
@@ -254,11 +276,12 @@ int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
254 struct pqi_sas_port *pqi_sas_port; 276 struct pqi_sas_port *pqi_sas_port;
255 struct sas_rphy *rphy; 277 struct sas_rphy *rphy;
256 278
257 pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, device->sas_address); 279 pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node,
280 device->sas_address, device);
258 if (!pqi_sas_port) 281 if (!pqi_sas_port)
259 return -ENOMEM; 282 return -ENOMEM;
260 283
261 rphy = sas_end_device_alloc(pqi_sas_port->port); 284 rphy = pqi_sas_rphy_alloc(pqi_sas_port);
262 if (!rphy) { 285 if (!rphy) {
263 rc = -ENODEV; 286 rc = -ENODEV;
264 goto free_sas_port; 287 goto free_sas_port;
@@ -329,6 +352,128 @@ static int pqi_sas_phy_speed(struct sas_phy *phy,
329 return -EINVAL; 352 return -EINVAL;
330} 353}
331 354
355#define CSMI_IOCTL_TIMEOUT 60
356#define SMP_CRC_FIELD_LENGTH 4
357
358static struct bmic_csmi_smp_passthru_buffer *
359pqi_build_csmi_smp_passthru_buffer(struct sas_rphy *rphy,
360 struct bsg_job *job)
361{
362 struct bmic_csmi_smp_passthru_buffer *smp_buf;
363 struct bmic_csmi_ioctl_header *ioctl_header;
364 struct bmic_csmi_smp_passthru *parameters;
365 u32 req_size;
366 u32 resp_size;
367
368 smp_buf = kzalloc(sizeof(*smp_buf), GFP_KERNEL);
369 if (!smp_buf)
370 return NULL;
371
372 req_size = job->request_payload.payload_len;
373 resp_size = job->reply_payload.payload_len;
374
375 ioctl_header = &smp_buf->ioctl_header;
376 put_unaligned_le32(sizeof(smp_buf->ioctl_header),
377 &ioctl_header->header_length);
378 put_unaligned_le32(CSMI_IOCTL_TIMEOUT, &ioctl_header->timeout);
379 put_unaligned_le32(CSMI_CC_SAS_SMP_PASSTHRU,
380 &ioctl_header->control_code);
381 put_unaligned_le32(sizeof(smp_buf->parameters), &ioctl_header->length);
382
383 parameters = &smp_buf->parameters;
384 parameters->phy_identifier = rphy->identify.phy_identifier;
385 parameters->port_identifier = 0;
386 parameters->connection_rate = 0;
387 put_unaligned_be64(rphy->identify.sas_address,
388 &parameters->destination_sas_address);
389
390 if (req_size > SMP_CRC_FIELD_LENGTH)
391 req_size -= SMP_CRC_FIELD_LENGTH;
392
393 put_unaligned_le32(req_size, &parameters->request_length);
394
395 put_unaligned_le32(resp_size, &parameters->response_length);
396
397 sg_copy_to_buffer(job->request_payload.sg_list,
398 job->reply_payload.sg_cnt, &parameters->request,
399 req_size);
400
401 return smp_buf;
402}
403
404static unsigned int pqi_build_sas_smp_handler_reply(
405 struct bmic_csmi_smp_passthru_buffer *smp_buf, struct bsg_job *job,
406 struct pqi_raid_error_info *error_info)
407{
408 sg_copy_from_buffer(job->reply_payload.sg_list,
409 job->reply_payload.sg_cnt, &smp_buf->parameters.response,
410 le32_to_cpu(smp_buf->parameters.response_length));
411
412 job->reply_len = le16_to_cpu(error_info->sense_data_length);
413 memcpy(job->reply, error_info->data,
414 le16_to_cpu(error_info->sense_data_length));
415
416 return job->reply_payload.payload_len -
417 get_unaligned_le32(&error_info->data_in_transferred);
418}
419
420void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
421 struct sas_rphy *rphy)
422{
423 int rc;
424 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
425 struct bmic_csmi_smp_passthru_buffer *smp_buf;
426 struct pqi_raid_error_info error_info;
427 unsigned int reslen = 0;
428
429 pqi_ctrl_busy(ctrl_info);
430
431 if (job->reply_payload.payload_len == 0) {
432 rc = -ENOMEM;
433 goto out;
434 }
435
436 if (!rphy) {
437 rc = -EINVAL;
438 goto out;
439 }
440
441 if (rphy->identify.device_type != SAS_FANOUT_EXPANDER_DEVICE) {
442 rc = -EINVAL;
443 goto out;
444 }
445
446 if (job->request_payload.sg_cnt > 1 || job->reply_payload.sg_cnt > 1) {
447 rc = -EINVAL;
448 goto out;
449 }
450
451 if (pqi_ctrl_offline(ctrl_info)) {
452 rc = -ENXIO;
453 goto out;
454 }
455
456 if (pqi_ctrl_blocked(ctrl_info)) {
457 rc = -EBUSY;
458 goto out;
459 }
460
461 smp_buf = pqi_build_csmi_smp_passthru_buffer(rphy, job);
462 if (!smp_buf) {
463 rc = -ENOMEM;
464 goto out;
465 }
466
467 rc = pqi_csmi_smp_passthru(ctrl_info, smp_buf, sizeof(*smp_buf),
468 &error_info);
469 if (rc)
470 goto out;
471
472 reslen = pqi_build_sas_smp_handler_reply(smp_buf, job, &error_info);
473out:
474 bsg_job_done(job, rc, reslen);
475 pqi_ctrl_unbusy(ctrl_info);
476}
332struct sas_function_template pqi_sas_transport_functions = { 477struct sas_function_template pqi_sas_transport_functions = {
333 .get_linkerrors = pqi_sas_get_linkerrors, 478 .get_linkerrors = pqi_sas_get_linkerrors,
334 .get_enclosure_identifier = pqi_sas_get_enclosure_identifier, 479 .get_enclosure_identifier = pqi_sas_get_enclosure_identifier,
@@ -338,4 +483,5 @@ struct sas_function_template pqi_sas_transport_functions = {
338 .phy_setup = pqi_sas_phy_setup, 483 .phy_setup = pqi_sas_phy_setup,
339 .phy_release = pqi_sas_phy_release, 484 .phy_release = pqi_sas_phy_release,
340 .set_phy_speed = pqi_sas_phy_speed, 485 .set_phy_speed = pqi_sas_phy_speed,
486 .smp_handler = pqi_sas_smp_handler,
341}; 487};
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index ea91658c7060..dcd11c6418cc 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -34,6 +34,7 @@
34#define SIS_REENABLE_SIS_MODE 0x1 34#define SIS_REENABLE_SIS_MODE 0x1
35#define SIS_ENABLE_MSIX 0x40 35#define SIS_ENABLE_MSIX 0x40
36#define SIS_ENABLE_INTX 0x80 36#define SIS_ENABLE_INTX 0x80
37#define SIS_SOFT_RESET 0x100
37#define SIS_CMD_READY 0x200 38#define SIS_CMD_READY 0x200
38#define SIS_TRIGGER_SHUTDOWN 0x800000 39#define SIS_TRIGGER_SHUTDOWN 0x800000
39#define SIS_PQI_RESET_QUIESCE 0x1000000 40#define SIS_PQI_RESET_QUIESCE 0x1000000
@@ -59,7 +60,7 @@
59 60
60#define SIS_CTRL_KERNEL_UP 0x80 61#define SIS_CTRL_KERNEL_UP 0x80
61#define SIS_CTRL_KERNEL_PANIC 0x100 62#define SIS_CTRL_KERNEL_PANIC 0x100
62#define SIS_CTRL_READY_TIMEOUT_SECS 30 63#define SIS_CTRL_READY_TIMEOUT_SECS 180
63#define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90 64#define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90
64#define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10 65#define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10
65 66
@@ -90,7 +91,7 @@ static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info,
90 unsigned long timeout; 91 unsigned long timeout;
91 u32 status; 92 u32 status;
92 93
93 timeout = (timeout_secs * HZ) + jiffies; 94 timeout = (timeout_secs * PQI_HZ) + jiffies;
94 95
95 while (1) { 96 while (1) {
96 status = readl(&ctrl_info->registers->sis_firmware_status); 97 status = readl(&ctrl_info->registers->sis_firmware_status);
@@ -202,7 +203,7 @@ static int sis_send_sync_cmd(struct pqi_ctrl_info *ctrl_info,
202 * the top of the loop in order to give the controller time to start 203 * the top of the loop in order to give the controller time to start
203 * processing the command before we start polling. 204 * processing the command before we start polling.
204 */ 205 */
205 timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * HZ) + jiffies; 206 timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * PQI_HZ) + jiffies;
206 while (1) { 207 while (1) {
207 msleep(SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS); 208 msleep(SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS);
208 doorbell = readl(&registers->sis_ctrl_to_host_doorbell); 209 doorbell = readl(&registers->sis_ctrl_to_host_doorbell);
@@ -348,7 +349,7 @@ static int sis_wait_for_doorbell_bit_to_clear(
348 u32 doorbell_register; 349 u32 doorbell_register;
349 unsigned long timeout; 350 unsigned long timeout;
350 351
351 timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * HZ) + jiffies; 352 timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * PQI_HZ) + jiffies;
352 353
353 while (1) { 354 while (1) {
354 doorbell_register = 355 doorbell_register =
@@ -420,6 +421,12 @@ u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info)
420 return readl(&ctrl_info->registers->sis_driver_scratch); 421 return readl(&ctrl_info->registers->sis_driver_scratch);
421} 422}
422 423
424void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
425{
426 writel(SIS_SOFT_RESET,
427 &ctrl_info->registers->sis_host_to_ctrl_doorbell);
428}
429
423static void __attribute__((unused)) verify_structures(void) 430static void __attribute__((unused)) verify_structures(void)
424{ 431{
425 BUILD_BUG_ON(offsetof(struct sis_base_struct, 432 BUILD_BUG_ON(offsetof(struct sis_base_struct,
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 2bf889dbf5ab..d018cb9c3f82 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -33,5 +33,6 @@ int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info);
33int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info); 33int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info);
34void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value); 34void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value);
35u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info); 35u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
36void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
36 37
37#endif /* _SMARTPQI_SIS_H */ 38#endif /* _SMARTPQI_SIS_H */
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 5295277d6325..5e824fd6047a 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -127,7 +127,6 @@ static struct scsi_host_template snic_host_template = {
127 .this_id = -1, 127 .this_id = -1,
128 .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH, 128 .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
129 .can_queue = SNIC_MAX_IO_REQ, 129 .can_queue = SNIC_MAX_IO_REQ,
130 .use_clustering = ENABLE_CLUSTERING,
131 .sg_tablesize = SNIC_MAX_SG_DESC_CNT, 130 .sg_tablesize = SNIC_MAX_SG_DESC_CNT,
132 .max_sectors = 0x800, 131 .max_sectors = 0x800,
133 .shost_attrs = snic_attrs, 132 .shost_attrs = snic_attrs,
diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c
index fc60c933d6c0..458eaba24c78 100644
--- a/drivers/scsi/snic/snic_trc.c
+++ b/drivers/scsi/snic/snic_trc.c
@@ -126,7 +126,7 @@ snic_trc_init(void)
126 int tbuf_sz = 0, ret; 126 int tbuf_sz = 0, ret;
127 127
128 tbuf_sz = (snic_trace_max_pages * PAGE_SIZE); 128 tbuf_sz = (snic_trace_max_pages * PAGE_SIZE);
129 tbuf = vmalloc(tbuf_sz); 129 tbuf = vzalloc(tbuf_sz);
130 if (!tbuf) { 130 if (!tbuf) {
131 SNIC_ERR("Failed to Allocate Trace Buffer Size. %d\n", tbuf_sz); 131 SNIC_ERR("Failed to Allocate Trace Buffer Size. %d\n", tbuf_sz);
132 SNIC_ERR("Trace Facility not enabled.\n"); 132 SNIC_ERR("Trace Facility not enabled.\n");
@@ -135,7 +135,6 @@ snic_trc_init(void)
135 return ret; 135 return ret;
136 } 136 }
137 137
138 memset(tbuf, 0, tbuf_sz);
139 trc->buf = (struct snic_trc_data *) tbuf; 138 trc->buf = (struct snic_trc_data *) tbuf;
140 spin_lock_init(&trc->lock); 139 spin_lock_init(&trc->lock);
141 140
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 9b20643ab49d..f6bef7ad65e7 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1489,6 +1489,7 @@ static struct scsi_host_template driver_template = {
1489 .eh_abort_handler = stex_abort, 1489 .eh_abort_handler = stex_abort,
1490 .eh_host_reset_handler = stex_reset, 1490 .eh_host_reset_handler = stex_reset,
1491 .this_id = -1, 1491 .this_id = -1,
1492 .dma_boundary = PAGE_SIZE - 1,
1492}; 1493};
1493 1494
1494static struct pci_device_id stex_pci_tbl[] = { 1495static struct pci_device_id stex_pci_tbl[] = {
@@ -1617,19 +1618,6 @@ static struct st_card_info stex_card_info[] = {
1617 }, 1618 },
1618}; 1619};
1619 1620
1620static int stex_set_dma_mask(struct pci_dev * pdev)
1621{
1622 int ret;
1623
1624 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
1625 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1626 return 0;
1627 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1628 if (!ret)
1629 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1630 return ret;
1631}
1632
1633static int stex_request_irq(struct st_hba *hba) 1621static int stex_request_irq(struct st_hba *hba)
1634{ 1622{
1635 struct pci_dev *pdev = hba->pdev; 1623 struct pci_dev *pdev = hba->pdev;
@@ -1710,7 +1698,9 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1710 goto out_release_regions; 1698 goto out_release_regions;
1711 } 1699 }
1712 1700
1713 err = stex_set_dma_mask(pdev); 1701 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1702 if (err)
1703 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1714 if (err) { 1704 if (err) {
1715 printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n", 1705 printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
1716 pci_name(pdev)); 1706 pci_name(pdev));
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 8f88348ebe42..84380bae20f1 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1698,7 +1698,6 @@ static struct scsi_host_template scsi_driver = {
1698 .slave_configure = storvsc_device_configure, 1698 .slave_configure = storvsc_device_configure,
1699 .cmd_per_lun = 2048, 1699 .cmd_per_lun = 2048,
1700 .this_id = -1, 1700 .this_id = -1,
1701 .use_clustering = ENABLE_CLUSTERING,
1702 /* Make sure we dont get a sg segment crosses a page boundary */ 1701 /* Make sure we dont get a sg segment crosses a page boundary */
1703 .dma_boundary = PAGE_SIZE-1, 1702 .dma_boundary = PAGE_SIZE-1,
1704 .no_write_same = 1, 1703 .no_write_same = 1,
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 9492638296c8..95a7ea7eefa0 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -500,7 +500,7 @@ static struct scsi_host_template sun3_scsi_template = {
500 .this_id = 7, 500 .this_id = 7,
501 .sg_tablesize = SG_NONE, 501 .sg_tablesize = SG_NONE,
502 .cmd_per_lun = 2, 502 .cmd_per_lun = 2,
503 .use_clustering = DISABLE_CLUSTERING, 503 .dma_boundary = PAGE_SIZE - 1,
504 .cmd_size = NCR5380_CMD_SIZE, 504 .cmd_size = NCR5380_CMD_SIZE,
505}; 505};
506 506
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index a11efbcb7f8b..c71bd01fef94 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -529,11 +529,10 @@ static int esp_sbus_probe(struct platform_device *op)
529 int hme = 0; 529 int hme = 0;
530 int ret; 530 int ret;
531 531
532 if (dp->parent && 532 if (of_node_name_eq(dp->parent, "espdma") ||
533 (!strcmp(dp->parent->name, "espdma") || 533 of_node_name_eq(dp->parent, "dma"))
534 !strcmp(dp->parent->name, "dma")))
535 dma_node = dp->parent; 534 dma_node = dp->parent;
536 else if (!strcmp(dp->name, "SUNW,fas")) { 535 else if (of_node_name_eq(dp, "SUNW,fas")) {
537 dma_node = op->dev.of_node; 536 dma_node = op->dev.of_node;
538 hme = 1; 537 hme = 1;
539 } 538 }
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 5f10aa9bad9b..57f6d63e4c40 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1312,9 +1312,9 @@ static struct Scsi_Host *sym_attach(struct scsi_host_template *tpnt, int unit,
1312 sprintf(np->s.inst_name, "sym%d", np->s.unit); 1312 sprintf(np->s.inst_name, "sym%d", np->s.unit);
1313 1313
1314 if ((SYM_CONF_DMA_ADDRESSING_MODE > 0) && (np->features & FE_DAC) && 1314 if ((SYM_CONF_DMA_ADDRESSING_MODE > 0) && (np->features & FE_DAC) &&
1315 !pci_set_dma_mask(pdev, DMA_DAC_MASK)) { 1315 !dma_set_mask(&pdev->dev, DMA_DAC_MASK)) {
1316 set_dac(np); 1316 set_dac(np);
1317 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 1317 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
1318 printf_warning("%s: No suitable DMA available\n", sym_name(np)); 1318 printf_warning("%s: No suitable DMA available\n", sym_name(np));
1319 goto attach_failed; 1319 goto attach_failed;
1320 } 1320 }
@@ -1660,7 +1660,6 @@ static struct scsi_host_template sym2_template = {
1660 .eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler, 1660 .eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler,
1661 .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler, 1661 .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler,
1662 .this_id = 7, 1662 .this_id = 7,
1663 .use_clustering = ENABLE_CLUSTERING,
1664 .max_sectors = 0xFFFF, 1663 .max_sectors = 0xFFFF,
1665#ifdef SYM_LINUX_PROC_INFO_SUPPORT 1664#ifdef SYM_LINUX_PROC_INFO_SUPPORT
1666 .show_info = sym_show_info, 1665 .show_info = sym_show_info,
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 2ddd426323e9..2ddbb26d9c26 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -80,6 +80,14 @@ config SCSI_UFSHCD_PLATFORM
80 80
81 If unsure, say N. 81 If unsure, say N.
82 82
83config SCSI_UFS_CDNS_PLATFORM
84 tristate "Cadence UFS Controller platform driver"
85 depends on SCSI_UFSHCD_PLATFORM
86 help
87 This selects the Cadence-specific additions to UFSHCD platform driver.
88
89 If unsure, say N.
90
83config SCSI_UFS_DWC_TC_PLATFORM 91config SCSI_UFS_DWC_TC_PLATFORM
84 tristate "DesignWare platform support using a G210 Test Chip" 92 tristate "DesignWare platform support using a G210 Test Chip"
85 depends on SCSI_UFSHCD_PLATFORM 93 depends on SCSI_UFSHCD_PLATFORM
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index aca481329828..a3bd70c3652c 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -2,6 +2,7 @@
2# UFSHCD makefile 2# UFSHCD makefile
3obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o 3obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
4obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o 4obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
5obj-$(CONFIG_SCSI_UFS_CDNS_PLATFORM) += cdns-pltfrm.o
5obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o 6obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
6obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o 7obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
7ufshcd-core-y += ufshcd.o ufs-sysfs.o 8ufshcd-core-y += ufshcd.o ufs-sysfs.o
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
new file mode 100644
index 000000000000..4a37b4f57164
--- /dev/null
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -0,0 +1,148 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Platform UFS Host driver for Cadence controller
4 *
5 * Copyright (C) 2018 Cadence Design Systems, Inc.
6 *
7 * Authors:
8 * Jan Kotas <jank@cadence.com>
9 *
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/of.h>
16#include <linux/time.h>
17
18#include "ufshcd-pltfrm.h"
19
20#define CDNS_UFS_REG_HCLKDIV 0xFC
21
22/**
23 * Sets HCLKDIV register value based on the core_clk
24 * @hba: host controller instance
25 *
26 * Return zero for success and non-zero for failure
27 */
28static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
29{
30 struct ufs_clk_info *clki;
31 struct list_head *head = &hba->clk_list_head;
32 unsigned long core_clk_rate = 0;
33 u32 core_clk_div = 0;
34
35 if (list_empty(head))
36 return 0;
37
38 list_for_each_entry(clki, head, list) {
39 if (IS_ERR_OR_NULL(clki->clk))
40 continue;
41 if (!strcmp(clki->name, "core_clk"))
42 core_clk_rate = clk_get_rate(clki->clk);
43 }
44
45 if (!core_clk_rate) {
46 dev_err(hba->dev, "%s: unable to find core_clk rate\n",
47 __func__);
48 return -EINVAL;
49 }
50
51 core_clk_div = core_clk_rate / USEC_PER_SEC;
52
53 ufshcd_writel(hba, core_clk_div, CDNS_UFS_REG_HCLKDIV);
54 /**
55 * Make sure the register was updated,
56 * UniPro layer will not work with an incorrect value.
57 */
58 mb();
59
60 return 0;
61}
62
63/**
64 * Sets clocks used by the controller
65 * @hba: host controller instance
66 * @on: if true, enable clocks, otherwise disable
67 * @status: notify stage (pre, post change)
68 *
69 * Return zero for success and non-zero for failure
70 */
71static int cdns_ufs_setup_clocks(struct ufs_hba *hba, bool on,
72 enum ufs_notify_change_status status)
73{
74 if ((!on) || (status == PRE_CHANGE))
75 return 0;
76
77 return cdns_ufs_set_hclkdiv(hba);
78}
79
80static struct ufs_hba_variant_ops cdns_pltfm_hba_vops = {
81 .name = "cdns-ufs-pltfm",
82 .setup_clocks = cdns_ufs_setup_clocks,
83};
84
85/**
86 * cdns_ufs_pltfrm_probe - probe routine of the driver
87 * @pdev: pointer to platform device handle
88 *
89 * Return zero for success and non-zero for failure
90 */
91static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
92{
93 int err;
94 struct device *dev = &pdev->dev;
95
96 /* Perform generic probe */
97 err = ufshcd_pltfrm_init(pdev, &cdns_pltfm_hba_vops);
98 if (err)
99 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
100
101 return err;
102}
103
104/**
105 * cdns_ufs_pltfrm_remove - removes the ufs driver
106 * @pdev: pointer to platform device handle
107 *
108 * Always returns 0
109 */
110static int cdns_ufs_pltfrm_remove(struct platform_device *pdev)
111{
112 struct ufs_hba *hba = platform_get_drvdata(pdev);
113
114 ufshcd_remove(hba);
115 return 0;
116}
117
118static const struct of_device_id cdns_ufs_of_match[] = {
119 { .compatible = "cdns,ufshc" },
120 {},
121};
122
123MODULE_DEVICE_TABLE(of, cdns_ufs_of_match);
124
125static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
126 .suspend = ufshcd_pltfrm_suspend,
127 .resume = ufshcd_pltfrm_resume,
128 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
129 .runtime_resume = ufshcd_pltfrm_runtime_resume,
130 .runtime_idle = ufshcd_pltfrm_runtime_idle,
131};
132
133static struct platform_driver cdns_ufs_pltfrm_driver = {
134 .probe = cdns_ufs_pltfrm_probe,
135 .remove = cdns_ufs_pltfrm_remove,
136 .driver = {
137 .name = "cdns-ufshcd",
138 .pm = &cdns_ufs_dev_pm_ops,
139 .of_match_table = cdns_ufs_of_match,
140 },
141};
142
143module_platform_driver(cdns_ufs_pltfrm_driver);
144
145MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
146MODULE_DESCRIPTION("Cadence UFS host controller platform driver");
147MODULE_LICENSE("GPL v2");
148MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 58087d3916d0..dd65fea07687 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -46,7 +46,7 @@
46#define QUERY_DESC_HDR_SIZE 2 46#define QUERY_DESC_HDR_SIZE 2
47#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \ 47#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
48 (sizeof(struct utp_upiu_header))) 48 (sizeof(struct utp_upiu_header)))
49#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18 49#define UFS_SENSE_SIZE 18
50 50
51#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\ 51#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
52 cpu_to_be32((byte3 << 24) | (byte2 << 16) |\ 52 cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
@@ -378,6 +378,20 @@ enum query_opcode {
378 UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8, 378 UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
379}; 379};
380 380
381/* bRefClkFreq attribute values */
382enum ufs_ref_clk_freq {
383 REF_CLK_FREQ_19_2_MHZ = 0,
384 REF_CLK_FREQ_26_MHZ = 1,
385 REF_CLK_FREQ_38_4_MHZ = 2,
386 REF_CLK_FREQ_52_MHZ = 3,
387 REF_CLK_FREQ_INVAL = -1,
388};
389
390struct ufs_ref_clk {
391 unsigned long freq_hz;
392 enum ufs_ref_clk_freq val;
393};
394
381/* Query response result code */ 395/* Query response result code */
382enum { 396enum {
383 QUERY_RESULT_SUCCESS = 0x00, 397 QUERY_RESULT_SUCCESS = 0x00,
@@ -444,7 +458,7 @@ struct utp_cmd_rsp {
444 __be32 residual_transfer_count; 458 __be32 residual_transfer_count;
445 __be32 reserved[4]; 459 __be32 reserved[4];
446 __be16 sense_data_len; 460 __be16 sense_data_len;
447 u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH]; 461 u8 sense_data[UFS_SENSE_SIZE];
448}; 462};
449 463
450/** 464/**
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index f1c57cd33b5b..9ba7671b84f8 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -51,8 +51,6 @@
51#define CREATE_TRACE_POINTS 51#define CREATE_TRACE_POINTS
52#include <trace/events/ufs.h> 52#include <trace/events/ufs.h>
53 53
54#define UFSHCD_REQ_SENSE_SIZE 18
55
56#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ 54#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
57 UTP_TASK_REQ_COMPL |\ 55 UTP_TASK_REQ_COMPL |\
58 UFSHCD_ERROR_MASK) 56 UFSHCD_ERROR_MASK)
@@ -1551,6 +1549,7 @@ start:
1551 * currently running. Hence, fall through to cancel gating 1549 * currently running. Hence, fall through to cancel gating
1552 * work and to enable clocks. 1550 * work and to enable clocks.
1553 */ 1551 */
1552 /* fallthrough */
1554 case CLKS_OFF: 1553 case CLKS_OFF:
1555 ufshcd_scsi_block_requests(hba); 1554 ufshcd_scsi_block_requests(hba);
1556 hba->clk_gating.state = REQ_CLKS_ON; 1555 hba->clk_gating.state = REQ_CLKS_ON;
@@ -1562,6 +1561,7 @@ start:
1562 * fall through to check if we should wait for this 1561 * fall through to check if we should wait for this
1563 * work to be done or not. 1562 * work to be done or not.
1564 */ 1563 */
1564 /* fallthrough */
1565 case REQ_CLKS_ON: 1565 case REQ_CLKS_ON:
1566 if (async) { 1566 if (async) {
1567 rc = -EAGAIN; 1567 rc = -EAGAIN;
@@ -1890,11 +1890,10 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1890 int len_to_copy; 1890 int len_to_copy;
1891 1891
1892 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); 1892 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1893 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len); 1893 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
1894 1894
1895 memcpy(lrbp->sense_buffer, 1895 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
1896 lrbp->ucd_rsp_ptr->sr.sense_data, 1896 len_to_copy);
1897 min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
1898 } 1897 }
1899} 1898}
1900 1899
@@ -2456,7 +2455,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2456 2455
2457 WARN_ON(lrbp->cmd); 2456 WARN_ON(lrbp->cmd);
2458 lrbp->cmd = cmd; 2457 lrbp->cmd = cmd;
2459 lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE; 2458 lrbp->sense_bufflen = UFS_SENSE_SIZE;
2460 lrbp->sense_buffer = cmd->sense_buffer; 2459 lrbp->sense_buffer = cmd->sense_buffer;
2461 lrbp->task_tag = tag; 2460 lrbp->task_tag = tag;
2462 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); 2461 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
@@ -4620,6 +4619,7 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4620 switch (scsi_status) { 4619 switch (scsi_status) {
4621 case SAM_STAT_CHECK_CONDITION: 4620 case SAM_STAT_CHECK_CONDITION:
4622 ufshcd_copy_sense_data(lrbp); 4621 ufshcd_copy_sense_data(lrbp);
4622 /* fallthrough */
4623 case SAM_STAT_GOOD: 4623 case SAM_STAT_GOOD:
4624 result |= DID_OK << 16 | 4624 result |= DID_OK << 16 |
4625 COMMAND_COMPLETE << 8 | 4625 COMMAND_COMPLETE << 8 |
@@ -6701,6 +6701,74 @@ static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
6701 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; 6701 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
6702} 6702}
6703 6703
6704static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
6705 {19200000, REF_CLK_FREQ_19_2_MHZ},
6706 {26000000, REF_CLK_FREQ_26_MHZ},
6707 {38400000, REF_CLK_FREQ_38_4_MHZ},
6708 {52000000, REF_CLK_FREQ_52_MHZ},
6709 {0, REF_CLK_FREQ_INVAL},
6710};
6711
6712static enum ufs_ref_clk_freq
6713ufs_get_bref_clk_from_hz(unsigned long freq)
6714{
6715 int i;
6716
6717 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
6718 if (ufs_ref_clk_freqs[i].freq_hz == freq)
6719 return ufs_ref_clk_freqs[i].val;
6720
6721 return REF_CLK_FREQ_INVAL;
6722}
6723
6724void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
6725{
6726 unsigned long freq;
6727
6728 freq = clk_get_rate(refclk);
6729
6730 hba->dev_ref_clk_freq =
6731 ufs_get_bref_clk_from_hz(freq);
6732
6733 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
6734 dev_err(hba->dev,
6735 "invalid ref_clk setting = %ld\n", freq);
6736}
6737
6738static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
6739{
6740 int err;
6741 u32 ref_clk;
6742 u32 freq = hba->dev_ref_clk_freq;
6743
6744 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6745 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
6746
6747 if (err) {
6748 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
6749 err);
6750 goto out;
6751 }
6752
6753 if (ref_clk == freq)
6754 goto out; /* nothing to update */
6755
6756 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6757 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
6758
6759 if (err) {
6760 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
6761 ufs_ref_clk_freqs[freq].freq_hz);
6762 goto out;
6763 }
6764
6765 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
6766 ufs_ref_clk_freqs[freq].freq_hz);
6767
6768out:
6769 return err;
6770}
6771
6704/** 6772/**
6705 * ufshcd_probe_hba - probe hba to detect device and initialize 6773 * ufshcd_probe_hba - probe hba to detect device and initialize
6706 * @hba: per-adapter instance 6774 * @hba: per-adapter instance
@@ -6766,6 +6834,12 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
6766 "%s: Failed getting max supported power mode\n", 6834 "%s: Failed getting max supported power mode\n",
6767 __func__); 6835 __func__);
6768 } else { 6836 } else {
6837 /*
6838 * Set the right value to bRefClkFreq before attempting to
6839 * switch to HS gears.
6840 */
6841 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
6842 ufshcd_set_dev_ref_clk(hba);
6769 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); 6843 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
6770 if (ret) { 6844 if (ret) {
6771 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", 6845 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
@@ -6910,6 +6984,7 @@ static struct scsi_host_template ufshcd_driver_template = {
6910 .max_host_blocked = 1, 6984 .max_host_blocked = 1,
6911 .track_queue_depth = 1, 6985 .track_queue_depth = 1,
6912 .sdev_groups = ufshcd_driver_groups, 6986 .sdev_groups = ufshcd_driver_groups,
6987 .dma_boundary = PAGE_SIZE - 1,
6913}; 6988};
6914 6989
6915static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, 6990static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
@@ -7252,6 +7327,14 @@ static int ufshcd_init_clocks(struct ufs_hba *hba)
7252 goto out; 7327 goto out;
7253 } 7328 }
7254 7329
7330 /*
7331 * Parse device ref clk freq as per device tree "ref_clk".
7332 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
7333 * in ufshcd_alloc_host().
7334 */
7335 if (!strcmp(clki->name, "ref_clk"))
7336 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
7337
7255 if (clki->max_freq) { 7338 if (clki->max_freq) {
7256 ret = clk_set_rate(clki->clk, clki->max_freq); 7339 ret = clk_set_rate(clki->clk, clki->max_freq);
7257 if (ret) { 7340 if (ret) {
@@ -7379,19 +7462,19 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
7379 0, 7462 0,
7380 0, 7463 0,
7381 0, 7464 0,
7382 UFSHCD_REQ_SENSE_SIZE, 7465 UFS_SENSE_SIZE,
7383 0}; 7466 0};
7384 char *buffer; 7467 char *buffer;
7385 int ret; 7468 int ret;
7386 7469
7387 buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL); 7470 buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
7388 if (!buffer) { 7471 if (!buffer) {
7389 ret = -ENOMEM; 7472 ret = -ENOMEM;
7390 goto out; 7473 goto out;
7391 } 7474 }
7392 7475
7393 ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer, 7476 ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
7394 UFSHCD_REQ_SENSE_SIZE, NULL, NULL, 7477 UFS_SENSE_SIZE, NULL, NULL,
7395 msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL); 7478 msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
7396 if (ret) 7479 if (ret)
7397 pr_err("%s: failed with err %d\n", __func__, ret); 7480 pr_err("%s: failed with err %d\n", __func__, ret);
@@ -8105,6 +8188,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
8105 hba->host = host; 8188 hba->host = host;
8106 hba->dev = dev; 8189 hba->dev = dev;
8107 *hba_handle = hba; 8190 *hba_handle = hba;
8191 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
8108 8192
8109 INIT_LIST_HEAD(&hba->clk_list_head); 8193 INIT_LIST_HEAD(&hba->clk_list_head);
8110 8194
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 1a1c2b487a4e..69ba7445d2b3 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -550,6 +550,7 @@ struct ufs_hba {
550 void *priv; 550 void *priv;
551 unsigned int irq; 551 unsigned int irq;
552 bool is_irq_enabled; 552 bool is_irq_enabled;
553 enum ufs_ref_clk_freq dev_ref_clk_freq;
553 554
554 /* Interrupt aggregation support is broken */ 555 /* Interrupt aggregation support is broken */
555 #define UFSHCD_QUIRK_BROKEN_INTR_AGGR 0x1 556 #define UFSHCD_QUIRK_BROKEN_INTR_AGGR 0x1
@@ -768,6 +769,7 @@ void ufshcd_remove(struct ufs_hba *);
768int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, 769int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
769 u32 val, unsigned long interval_us, 770 u32 val, unsigned long interval_us,
770 unsigned long timeout_ms, bool can_sleep); 771 unsigned long timeout_ms, bool can_sleep);
772void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
771 773
772static inline void check_upiu_size(void) 774static inline void check_upiu_size(void)
773{ 775{
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index c3c95b314286..772b976e4ee4 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -68,33 +68,6 @@ struct virtio_scsi_vq {
68 struct virtqueue *vq; 68 struct virtqueue *vq;
69}; 69};
70 70
71/*
72 * Per-target queue state.
73 *
74 * This struct holds the data needed by the queue steering policy. When a
75 * target is sent multiple requests, we need to drive them to the same queue so
76 * that FIFO processing order is kept. However, if a target was idle, we can
77 * choose a queue arbitrarily. In this case the queue is chosen according to
78 * the current VCPU, so the driver expects the number of request queues to be
79 * equal to the number of VCPUs. This makes it easy and fast to select the
80 * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
81 * (each virtqueue's affinity is set to the CPU that "owns" the queue).
82 *
83 * tgt_seq is held to serialize reading and writing req_vq.
84 *
85 * Decrements of reqs are never concurrent with writes of req_vq: before the
86 * decrement reqs will be != 0; after the decrement the virtqueue completion
87 * routine will not use the req_vq so it can be changed by a new request.
88 * Thus they can happen outside the tgt_seq, provided of course we make reqs
89 * an atomic_t.
90 */
91struct virtio_scsi_target_state {
92 seqcount_t tgt_seq;
93
94 /* Currently active virtqueue for requests sent to this target. */
95 struct virtio_scsi_vq *req_vq;
96};
97
98/* Driver instance state */ 71/* Driver instance state */
99struct virtio_scsi { 72struct virtio_scsi {
100 struct virtio_device *vdev; 73 struct virtio_device *vdev;
@@ -693,29 +666,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
693 return virtscsi_tmf(vscsi, cmd); 666 return virtscsi_tmf(vscsi, cmd);
694} 667}
695 668
696static int virtscsi_target_alloc(struct scsi_target *starget)
697{
698 struct Scsi_Host *sh = dev_to_shost(starget->dev.parent);
699 struct virtio_scsi *vscsi = shost_priv(sh);
700
701 struct virtio_scsi_target_state *tgt =
702 kmalloc(sizeof(*tgt), GFP_KERNEL);
703 if (!tgt)
704 return -ENOMEM;
705
706 seqcount_init(&tgt->tgt_seq);
707 tgt->req_vq = &vscsi->req_vqs[0];
708
709 starget->hostdata = tgt;
710 return 0;
711}
712
713static void virtscsi_target_destroy(struct scsi_target *starget)
714{
715 struct virtio_scsi_target_state *tgt = starget->hostdata;
716 kfree(tgt);
717}
718
719static int virtscsi_map_queues(struct Scsi_Host *shost) 669static int virtscsi_map_queues(struct Scsi_Host *shost)
720{ 670{
721 struct virtio_scsi *vscsi = shost_priv(shost); 671 struct virtio_scsi *vscsi = shost_priv(shost);
@@ -748,9 +698,6 @@ static struct scsi_host_template virtscsi_host_template = {
748 .slave_alloc = virtscsi_device_alloc, 698 .slave_alloc = virtscsi_device_alloc,
749 699
750 .dma_boundary = UINT_MAX, 700 .dma_boundary = UINT_MAX,
751 .use_clustering = ENABLE_CLUSTERING,
752 .target_alloc = virtscsi_target_alloc,
753 .target_destroy = virtscsi_target_destroy,
754 .map_queues = virtscsi_map_queues, 701 .map_queues = virtscsi_map_queues,
755 .track_queue_depth = 1, 702 .track_queue_depth = 1,
756 .force_blk_mq = 1, 703 .force_blk_mq = 1,
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 0d6b2a88fc8e..ecee4b3ff073 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1007,7 +1007,6 @@ static struct scsi_host_template pvscsi_template = {
1007 .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT, 1007 .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
1008 .dma_boundary = UINT_MAX, 1008 .dma_boundary = UINT_MAX,
1009 .max_sectors = 0xffff, 1009 .max_sectors = 0xffff,
1010 .use_clustering = ENABLE_CLUSTERING,
1011 .change_queue_depth = pvscsi_change_queue_depth, 1010 .change_queue_depth = pvscsi_change_queue_depth,
1012 .eh_abort_handler = pvscsi_abort, 1011 .eh_abort_handler = pvscsi_abort,
1013 .eh_device_reset_handler = pvscsi_device_reset, 1012 .eh_device_reset_handler = pvscsi_device_reset,
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 974bfb3f30f4..e3310e9488d2 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -153,8 +153,6 @@ static int wd719x_direct_cmd(struct wd719x *wd, u8 opcode, u8 dev, u8 lun,
153 153
154static void wd719x_destroy(struct wd719x *wd) 154static void wd719x_destroy(struct wd719x *wd)
155{ 155{
156 struct wd719x_scb *scb;
157
158 /* stop the RISC */ 156 /* stop the RISC */
159 if (wd719x_direct_cmd(wd, WD719X_CMD_SLEEP, 0, 0, 0, 0, 157 if (wd719x_direct_cmd(wd, WD719X_CMD_SLEEP, 0, 0, 0, 0,
160 WD719X_WAIT_FOR_RISC)) 158 WD719X_WAIT_FOR_RISC))
@@ -162,37 +160,35 @@ static void wd719x_destroy(struct wd719x *wd)
162 /* disable RISC */ 160 /* disable RISC */
163 wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0); 161 wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0);
164 162
165 /* free all SCBs */ 163 WARN_ON_ONCE(!list_empty(&wd->active_scbs));
166 list_for_each_entry(scb, &wd->active_scbs, list) 164
167 pci_free_consistent(wd->pdev, sizeof(struct wd719x_scb), scb,
168 scb->phys);
169 list_for_each_entry(scb, &wd->free_scbs, list)
170 pci_free_consistent(wd->pdev, sizeof(struct wd719x_scb), scb,
171 scb->phys);
172 /* free internal buffers */ 165 /* free internal buffers */
173 pci_free_consistent(wd->pdev, wd->fw_size, wd->fw_virt, wd->fw_phys); 166 dma_free_coherent(&wd->pdev->dev, wd->fw_size, wd->fw_virt,
167 wd->fw_phys);
174 wd->fw_virt = NULL; 168 wd->fw_virt = NULL;
175 pci_free_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE, wd->hash_virt, 169 dma_free_coherent(&wd->pdev->dev, WD719X_HASH_TABLE_SIZE, wd->hash_virt,
176 wd->hash_phys); 170 wd->hash_phys);
177 wd->hash_virt = NULL; 171 wd->hash_virt = NULL;
178 pci_free_consistent(wd->pdev, sizeof(struct wd719x_host_param), 172 dma_free_coherent(&wd->pdev->dev, sizeof(struct wd719x_host_param),
179 wd->params, wd->params_phys); 173 wd->params, wd->params_phys);
180 wd->params = NULL; 174 wd->params = NULL;
181 free_irq(wd->pdev->irq, wd); 175 free_irq(wd->pdev->irq, wd);
182} 176}
183 177
184/* finish a SCSI command, mark SCB (if any) as free, unmap buffers */ 178/* finish a SCSI command, unmap buffers */
185static void wd719x_finish_cmd(struct scsi_cmnd *cmd, int result) 179static void wd719x_finish_cmd(struct wd719x_scb *scb, int result)
186{ 180{
181 struct scsi_cmnd *cmd = scb->cmd;
187 struct wd719x *wd = shost_priv(cmd->device->host); 182 struct wd719x *wd = shost_priv(cmd->device->host);
188 struct wd719x_scb *scb = (struct wd719x_scb *) cmd->host_scribble;
189 183
190 if (scb) { 184 list_del(&scb->list);
191 list_move(&scb->list, &wd->free_scbs); 185
192 dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle, 186 dma_unmap_single(&wd->pdev->dev, scb->phys,
193 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); 187 sizeof(struct wd719x_scb), DMA_BIDIRECTIONAL);
194 scsi_dma_unmap(cmd); 188 scsi_dma_unmap(cmd);
195 } 189 dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
190 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
191
196 cmd->result = result << 16; 192 cmd->result = result << 16;
197 cmd->scsi_done(cmd); 193 cmd->scsi_done(cmd);
198} 194}
@@ -202,36 +198,10 @@ static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
202{ 198{
203 int i, count_sg; 199 int i, count_sg;
204 unsigned long flags; 200 unsigned long flags;
205 struct wd719x_scb *scb; 201 struct wd719x_scb *scb = scsi_cmd_priv(cmd);
206 struct wd719x *wd = shost_priv(sh); 202 struct wd719x *wd = shost_priv(sh);
207 dma_addr_t phys;
208 203
209 cmd->host_scribble = NULL;
210
211 /* get a free SCB - either from existing ones or allocate a new one */
212 spin_lock_irqsave(wd->sh->host_lock, flags);
213 scb = list_first_entry_or_null(&wd->free_scbs, struct wd719x_scb, list);
214 if (scb) {
215 list_del(&scb->list);
216 phys = scb->phys;
217 } else {
218 spin_unlock_irqrestore(wd->sh->host_lock, flags);
219 scb = pci_alloc_consistent(wd->pdev, sizeof(struct wd719x_scb),
220 &phys);
221 spin_lock_irqsave(wd->sh->host_lock, flags);
222 if (!scb) {
223 dev_err(&wd->pdev->dev, "unable to allocate SCB\n");
224 wd719x_finish_cmd(cmd, DID_ERROR);
225 spin_unlock_irqrestore(wd->sh->host_lock, flags);
226 return 0;
227 }
228 }
229 memset(scb, 0, sizeof(struct wd719x_scb));
230 list_add(&scb->list, &wd->active_scbs);
231
232 scb->phys = phys;
233 scb->cmd = cmd; 204 scb->cmd = cmd;
234 cmd->host_scribble = (char *) scb;
235 205
236 scb->CDB_tag = 0; /* Tagged queueing not supported yet */ 206 scb->CDB_tag = 0; /* Tagged queueing not supported yet */
237 scb->devid = cmd->device->id; 207 scb->devid = cmd->device->id;
@@ -240,10 +210,19 @@ static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
240 /* copy the command */ 210 /* copy the command */
241 memcpy(scb->CDB, cmd->cmnd, cmd->cmd_len); 211 memcpy(scb->CDB, cmd->cmnd, cmd->cmd_len);
242 212
213 /* map SCB */
214 scb->phys = dma_map_single(&wd->pdev->dev, scb, sizeof(*scb),
215 DMA_BIDIRECTIONAL);
216
217 if (dma_mapping_error(&wd->pdev->dev, scb->phys))
218 goto out_error;
219
243 /* map sense buffer */ 220 /* map sense buffer */
244 scb->sense_buf_length = SCSI_SENSE_BUFFERSIZE; 221 scb->sense_buf_length = SCSI_SENSE_BUFFERSIZE;
245 cmd->SCp.dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer, 222 cmd->SCp.dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer,
246 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); 223 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
224 if (dma_mapping_error(&wd->pdev->dev, cmd->SCp.dma_handle))
225 goto out_unmap_scb;
247 scb->sense_buf = cpu_to_le32(cmd->SCp.dma_handle); 226 scb->sense_buf = cpu_to_le32(cmd->SCp.dma_handle);
248 227
249 /* request autosense */ 228 /* request autosense */
@@ -258,11 +237,8 @@ static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
258 237
259 /* Scather/gather */ 238 /* Scather/gather */
260 count_sg = scsi_dma_map(cmd); 239 count_sg = scsi_dma_map(cmd);
261 if (count_sg < 0) { 240 if (count_sg < 0)
262 wd719x_finish_cmd(cmd, DID_ERROR); 241 goto out_unmap_sense;
263 spin_unlock_irqrestore(wd->sh->host_lock, flags);
264 return 0;
265 }
266 BUG_ON(count_sg > WD719X_SG); 242 BUG_ON(count_sg > WD719X_SG);
267 243
268 if (count_sg) { 244 if (count_sg) {
@@ -283,19 +259,33 @@ static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
283 scb->data_p = 0; 259 scb->data_p = 0;
284 } 260 }
285 261
262 spin_lock_irqsave(wd->sh->host_lock, flags);
263
286 /* check if the Command register is free */ 264 /* check if the Command register is free */
287 if (wd719x_readb(wd, WD719X_AMR_COMMAND) != WD719X_CMD_READY) { 265 if (wd719x_readb(wd, WD719X_AMR_COMMAND) != WD719X_CMD_READY) {
288 spin_unlock_irqrestore(wd->sh->host_lock, flags); 266 spin_unlock_irqrestore(wd->sh->host_lock, flags);
289 return SCSI_MLQUEUE_HOST_BUSY; 267 return SCSI_MLQUEUE_HOST_BUSY;
290 } 268 }
291 269
270 list_add(&scb->list, &wd->active_scbs);
271
292 /* write pointer to the AMR */ 272 /* write pointer to the AMR */
293 wd719x_writel(wd, WD719X_AMR_SCB_IN, scb->phys); 273 wd719x_writel(wd, WD719X_AMR_SCB_IN, scb->phys);
294 /* send SCB opcode */ 274 /* send SCB opcode */
295 wd719x_writeb(wd, WD719X_AMR_COMMAND, WD719X_CMD_PROCESS_SCB); 275 wd719x_writeb(wd, WD719X_AMR_COMMAND, WD719X_CMD_PROCESS_SCB);
296 276
297 spin_unlock_irqrestore(wd->sh->host_lock, flags); 277 spin_unlock_irqrestore(wd->sh->host_lock, flags);
278 return 0;
298 279
280out_unmap_sense:
281 dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
282 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
283out_unmap_scb:
284 dma_unmap_single(&wd->pdev->dev, scb->phys, sizeof(*scb),
285 DMA_BIDIRECTIONAL);
286out_error:
287 cmd->result = DID_ERROR << 16;
288 cmd->scsi_done(cmd);
299 return 0; 289 return 0;
300} 290}
301 291
@@ -327,8 +317,8 @@ static int wd719x_chip_init(struct wd719x *wd)
327 wd->fw_size = ALIGN(fw_wcs->size, 4) + fw_risc->size; 317 wd->fw_size = ALIGN(fw_wcs->size, 4) + fw_risc->size;
328 318
329 if (!wd->fw_virt) 319 if (!wd->fw_virt)
330 wd->fw_virt = pci_alloc_consistent(wd->pdev, wd->fw_size, 320 wd->fw_virt = dma_alloc_coherent(&wd->pdev->dev, wd->fw_size,
331 &wd->fw_phys); 321 &wd->fw_phys, GFP_KERNEL);
332 if (!wd->fw_virt) { 322 if (!wd->fw_virt) {
333 ret = -ENOMEM; 323 ret = -ENOMEM;
334 goto wd719x_init_end; 324 goto wd719x_init_end;
@@ -464,7 +454,7 @@ static int wd719x_abort(struct scsi_cmnd *cmd)
464{ 454{
465 int action, result; 455 int action, result;
466 unsigned long flags; 456 unsigned long flags;
467 struct wd719x_scb *scb = (struct wd719x_scb *)cmd->host_scribble; 457 struct wd719x_scb *scb = scsi_cmd_priv(cmd);
468 struct wd719x *wd = shost_priv(cmd->device->host); 458 struct wd719x *wd = shost_priv(cmd->device->host);
469 459
470 dev_info(&wd->pdev->dev, "abort command, tag: %x\n", cmd->tag); 460 dev_info(&wd->pdev->dev, "abort command, tag: %x\n", cmd->tag);
@@ -526,10 +516,8 @@ static int wd719x_host_reset(struct scsi_cmnd *cmd)
526 result = FAILED; 516 result = FAILED;
527 517
528 /* flush all SCBs */ 518 /* flush all SCBs */
529 list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list) { 519 list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list)
530 struct scsi_cmnd *tmp_cmd = scb->cmd; 520 wd719x_finish_cmd(scb, result);
531 wd719x_finish_cmd(tmp_cmd, result);
532 }
533 spin_unlock_irqrestore(wd->sh->host_lock, flags); 521 spin_unlock_irqrestore(wd->sh->host_lock, flags);
534 522
535 return result; 523 return result;
@@ -555,7 +543,6 @@ static inline void wd719x_interrupt_SCB(struct wd719x *wd,
555 union wd719x_regs regs, 543 union wd719x_regs regs,
556 struct wd719x_scb *scb) 544 struct wd719x_scb *scb)
557{ 545{
558 struct scsi_cmnd *cmd;
559 int result; 546 int result;
560 547
561 /* now have to find result from card */ 548 /* now have to find result from card */
@@ -643,9 +630,8 @@ static inline void wd719x_interrupt_SCB(struct wd719x *wd,
643 result = DID_ERROR; 630 result = DID_ERROR;
644 break; 631 break;
645 } 632 }
646 cmd = scb->cmd;
647 633
648 wd719x_finish_cmd(cmd, result); 634 wd719x_finish_cmd(scb, result);
649} 635}
650 636
651static irqreturn_t wd719x_interrupt(int irq, void *dev_id) 637static irqreturn_t wd719x_interrupt(int irq, void *dev_id)
@@ -809,7 +795,6 @@ static int wd719x_board_found(struct Scsi_Host *sh)
809 int ret; 795 int ret;
810 796
811 INIT_LIST_HEAD(&wd->active_scbs); 797 INIT_LIST_HEAD(&wd->active_scbs);
812 INIT_LIST_HEAD(&wd->free_scbs);
813 798
814 sh->base = pci_resource_start(wd->pdev, 0); 799 sh->base = pci_resource_start(wd->pdev, 0);
815 800
@@ -820,17 +805,18 @@ static int wd719x_board_found(struct Scsi_Host *sh)
820 wd->fw_virt = NULL; 805 wd->fw_virt = NULL;
821 806
822 /* memory area for host (EEPROM) parameters */ 807 /* memory area for host (EEPROM) parameters */
823 wd->params = pci_alloc_consistent(wd->pdev, 808 wd->params = dma_alloc_coherent(&wd->pdev->dev,
824 sizeof(struct wd719x_host_param), 809 sizeof(struct wd719x_host_param),
825 &wd->params_phys); 810 &wd->params_phys, GFP_KERNEL);
826 if (!wd->params) { 811 if (!wd->params) {
827 dev_warn(&wd->pdev->dev, "unable to allocate parameter buffer\n"); 812 dev_warn(&wd->pdev->dev, "unable to allocate parameter buffer\n");
828 return -ENOMEM; 813 return -ENOMEM;
829 } 814 }
830 815
831 /* memory area for the RISC for hash table of outstanding requests */ 816 /* memory area for the RISC for hash table of outstanding requests */
832 wd->hash_virt = pci_alloc_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE, 817 wd->hash_virt = dma_alloc_coherent(&wd->pdev->dev,
833 &wd->hash_phys); 818 WD719X_HASH_TABLE_SIZE,
819 &wd->hash_phys, GFP_KERNEL);
834 if (!wd->hash_virt) { 820 if (!wd->hash_virt) {
835 dev_warn(&wd->pdev->dev, "unable to allocate hash buffer\n"); 821 dev_warn(&wd->pdev->dev, "unable to allocate hash buffer\n");
836 ret = -ENOMEM; 822 ret = -ENOMEM;
@@ -862,10 +848,10 @@ static int wd719x_board_found(struct Scsi_Host *sh)
862fail_free_irq: 848fail_free_irq:
863 free_irq(wd->pdev->irq, wd); 849 free_irq(wd->pdev->irq, wd);
864fail_free_hash: 850fail_free_hash:
865 pci_free_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE, wd->hash_virt, 851 dma_free_coherent(&wd->pdev->dev, WD719X_HASH_TABLE_SIZE, wd->hash_virt,
866 wd->hash_phys); 852 wd->hash_phys);
867fail_free_params: 853fail_free_params:
868 pci_free_consistent(wd->pdev, sizeof(struct wd719x_host_param), 854 dma_free_coherent(&wd->pdev->dev, sizeof(struct wd719x_host_param),
869 wd->params, wd->params_phys); 855 wd->params, wd->params_phys);
870 856
871 return ret; 857 return ret;
@@ -874,6 +860,7 @@ fail_free_params:
874static struct scsi_host_template wd719x_template = { 860static struct scsi_host_template wd719x_template = {
875 .module = THIS_MODULE, 861 .module = THIS_MODULE,
876 .name = "Western Digital 719x", 862 .name = "Western Digital 719x",
863 .cmd_size = sizeof(struct wd719x_scb),
877 .queuecommand = wd719x_queuecommand, 864 .queuecommand = wd719x_queuecommand,
878 .eh_abort_handler = wd719x_abort, 865 .eh_abort_handler = wd719x_abort,
879 .eh_device_reset_handler = wd719x_dev_reset, 866 .eh_device_reset_handler = wd719x_dev_reset,
@@ -884,7 +871,6 @@ static struct scsi_host_template wd719x_template = {
884 .can_queue = 255, 871 .can_queue = 255,
885 .this_id = 7, 872 .this_id = 7,
886 .sg_tablesize = WD719X_SG, 873 .sg_tablesize = WD719X_SG,
887 .use_clustering = ENABLE_CLUSTERING,
888}; 874};
889 875
890static int wd719x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *d) 876static int wd719x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *d)
@@ -897,7 +883,7 @@ static int wd719x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *d)
897 if (err) 883 if (err)
898 goto fail; 884 goto fail;
899 885
900 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 886 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
901 dev_warn(&pdev->dev, "Unable to set 32-bit DMA mask\n"); 887 dev_warn(&pdev->dev, "Unable to set 32-bit DMA mask\n");
902 goto disable_device; 888 goto disable_device;
903 } 889 }
diff --git a/drivers/scsi/wd719x.h b/drivers/scsi/wd719x.h
index 0455b1633ca7..abaabd419a54 100644
--- a/drivers/scsi/wd719x.h
+++ b/drivers/scsi/wd719x.h
@@ -74,7 +74,6 @@ struct wd719x {
74 void *hash_virt; /* hash table CPU address */ 74 void *hash_virt; /* hash table CPU address */
75 dma_addr_t hash_phys; /* hash table bus address */ 75 dma_addr_t hash_phys; /* hash table bus address */
76 struct list_head active_scbs; 76 struct list_head active_scbs;
77 struct list_head free_scbs;
78}; 77};
79 78
80/* timeout delays in microsecs */ 79/* timeout delays in microsecs */
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 61389bdc7926..f0068e96a177 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -696,7 +696,6 @@ static struct scsi_host_template scsifront_sht = {
696 .this_id = -1, 696 .this_id = -1,
697 .cmd_size = sizeof(struct vscsifrnt_shadow), 697 .cmd_size = sizeof(struct vscsifrnt_shadow),
698 .sg_tablesize = VSCSIIF_SG_TABLESIZE, 698 .sg_tablesize = VSCSIIF_SG_TABLESIZE,
699 .use_clustering = DISABLE_CLUSTERING,
700 .proc_name = "scsifront", 699 .proc_name = "scsifront",
701}; 700};
702 701
@@ -1112,7 +1111,7 @@ static void scsifront_backend_changed(struct xenbus_device *dev,
1112 case XenbusStateClosed: 1111 case XenbusStateClosed:
1113 if (dev->state == XenbusStateClosed) 1112 if (dev->state == XenbusStateClosed)
1114 break; 1113 break;
1115 /* Missed the backend's Closing state -- fallthrough */ 1114 /* fall through - Missed the backend's Closing state */
1116 case XenbusStateClosing: 1115 case XenbusStateClosing:
1117 scsifront_disconnect(info); 1116 scsifront_disconnect(info);
1118 break; 1117 break;
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 69e6abe14abf..c57d66a7405f 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -237,12 +237,6 @@ static struct scsi_host_template rtsx_host_template = {
237 /* limit the total size of a transfer to 120 KB */ 237 /* limit the total size of a transfer to 120 KB */
238 .max_sectors = 240, 238 .max_sectors = 240,
239 239
240 /* merge commands... this seems to help performance, but
241 * periodically someone should test to see which setting is more
242 * optimal.
243 */
244 .use_clustering = 1,
245
246 /* emulated HBA */ 240 /* emulated HBA */
247 .emulated = 1, 241 .emulated = 1,
248 242
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 4fc521c51c0e..5cf93e8eb77c 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -645,7 +645,6 @@ static struct scsi_host_template visorhba_driver_template = {
645 .this_id = -1, 645 .this_id = -1,
646 .slave_alloc = visorhba_slave_alloc, 646 .slave_alloc = visorhba_slave_alloc,
647 .slave_destroy = visorhba_slave_destroy, 647 .slave_destroy = visorhba_slave_destroy,
648 .use_clustering = ENABLE_CLUSTERING,
649}; 648};
650 649
651/* 650/*
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index c1d5a173553d..984941e036c8 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1493,8 +1493,6 @@ __iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
1493 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1493 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1494 iscsit_stop_dataout_timer(cmd); 1494 iscsit_stop_dataout_timer(cmd);
1495 1495
1496 transport_check_aborted_status(se_cmd,
1497 (hdr->flags & ISCSI_FLAG_CMD_FINAL));
1498 return iscsit_dump_data_payload(conn, payload_length, 1); 1496 return iscsit_dump_data_payload(conn, payload_length, 1);
1499 } 1497 }
1500 } else { 1498 } else {
@@ -1509,12 +1507,9 @@ __iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
1509 * TASK_ABORTED status. 1507 * TASK_ABORTED status.
1510 */ 1508 */
1511 if (se_cmd->transport_state & CMD_T_ABORTED) { 1509 if (se_cmd->transport_state & CMD_T_ABORTED) {
1512 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1510 if (hdr->flags & ISCSI_FLAG_CMD_FINAL &&
1513 if (--cmd->outstanding_r2ts < 1) { 1511 --cmd->outstanding_r2ts < 1)
1514 iscsit_stop_dataout_timer(cmd); 1512 iscsit_stop_dataout_timer(cmd);
1515 transport_check_aborted_status(
1516 se_cmd, 1);
1517 }
1518 1513
1519 return iscsit_dump_data_payload(conn, payload_length, 1); 1514 return iscsit_dump_data_payload(conn, payload_length, 1);
1520 } 1515 }
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 95d0a22b2ad6..a5481dfeae8d 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1343,11 +1343,6 @@ static struct configfs_attribute *lio_target_discovery_auth_attrs[] = {
1343 1343
1344/* Start functions for target_core_fabric_ops */ 1344/* Start functions for target_core_fabric_ops */
1345 1345
1346static char *iscsi_get_fabric_name(void)
1347{
1348 return "iSCSI";
1349}
1350
1351static int iscsi_get_cmd_state(struct se_cmd *se_cmd) 1346static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
1352{ 1347{
1353 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1348 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
@@ -1549,9 +1544,9 @@ static void lio_release_cmd(struct se_cmd *se_cmd)
1549 1544
1550const struct target_core_fabric_ops iscsi_ops = { 1545const struct target_core_fabric_ops iscsi_ops = {
1551 .module = THIS_MODULE, 1546 .module = THIS_MODULE,
1552 .name = "iscsi", 1547 .fabric_alias = "iscsi",
1548 .fabric_name = "iSCSI",
1553 .node_acl_size = sizeof(struct iscsi_node_acl), 1549 .node_acl_size = sizeof(struct iscsi_node_acl),
1554 .get_fabric_name = iscsi_get_fabric_name,
1555 .tpg_get_wwn = lio_tpg_get_endpoint_wwn, 1550 .tpg_get_wwn = lio_tpg_get_endpoint_wwn,
1556 .tpg_get_tag = lio_tpg_get_tag, 1551 .tpg_get_tag = lio_tpg_get_tag,
1557 .tpg_get_default_depth = lio_tpg_get_default_depth, 1552 .tpg_get_default_depth = lio_tpg_get_default_depth,
@@ -1596,4 +1591,6 @@ const struct target_core_fabric_ops iscsi_ops = {
1596 .tfc_tpg_nacl_attrib_attrs = lio_target_nacl_attrib_attrs, 1591 .tfc_tpg_nacl_attrib_attrs = lio_target_nacl_attrib_attrs,
1597 .tfc_tpg_nacl_auth_attrs = lio_target_nacl_auth_attrs, 1592 .tfc_tpg_nacl_auth_attrs = lio_target_nacl_auth_attrs,
1598 .tfc_tpg_nacl_param_attrs = lio_target_nacl_param_attrs, 1593 .tfc_tpg_nacl_param_attrs = lio_target_nacl_param_attrs,
1594
1595 .write_pending_must_be_called = true,
1599}; 1596};
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index a211e8154f4c..1b54a9c70851 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -943,20 +943,8 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
943 return 0; 943 return 0;
944 } 944 }
945 spin_unlock_bh(&cmd->istate_lock); 945 spin_unlock_bh(&cmd->istate_lock);
946 /* 946 if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
947 * Determine if delayed TASK_ABORTED status for WRITEs
948 * should be sent now if no unsolicited data out
949 * payloads are expected, or if the delayed status
950 * should be sent after unsolicited data out with
951 * ISCSI_FLAG_CMD_FINAL set in iscsi_handle_data_out()
952 */
953 if (transport_check_aborted_status(se_cmd,
954 (cmd->unsolicited_data == 0)) != 0)
955 return 0; 947 return 0;
956 /*
957 * Otherwise send CHECK_CONDITION and sense for
958 * exception
959 */
960 return transport_send_check_condition_and_sense(se_cmd, 948 return transport_send_check_condition_and_sense(se_cmd,
961 cmd->sense_reason, 0); 949 cmd->sense_reason, 0);
962 } 950 }
@@ -974,13 +962,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
974 962
975 if (!(cmd->cmd_flags & 963 if (!(cmd->cmd_flags &
976 ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) { 964 ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
977 /* 965 if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
978 * Send the delayed TASK_ABORTED status for
979 * WRITEs if no more unsolicitied data is
980 * expected.
981 */
982 if (transport_check_aborted_status(se_cmd, 1)
983 != 0)
984 return 0; 966 return 0;
985 967
986 iscsit_set_dataout_sequence_values(cmd); 968 iscsit_set_dataout_sequence_values(cmd);
@@ -995,11 +977,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
995 977
996 if ((cmd->data_direction == DMA_TO_DEVICE) && 978 if ((cmd->data_direction == DMA_TO_DEVICE) &&
997 !(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) { 979 !(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
998 /* 980 if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
999 * Send the delayed TASK_ABORTED status for WRITEs if
1000 * no more nsolicitied data is expected.
1001 */
1002 if (transport_check_aborted_status(se_cmd, 1) != 0)
1003 return 0; 981 return 0;
1004 982
1005 iscsit_set_unsoliticed_dataout(cmd); 983 iscsit_set_unsoliticed_dataout(cmd);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index bc8918f382e4..7bd7c0c0db6f 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -324,7 +324,7 @@ static struct scsi_host_template tcm_loop_driver_template = {
324 .sg_tablesize = 256, 324 .sg_tablesize = 256,
325 .cmd_per_lun = 1024, 325 .cmd_per_lun = 1024,
326 .max_sectors = 0xFFFF, 326 .max_sectors = 0xFFFF,
327 .use_clustering = DISABLE_CLUSTERING, 327 .dma_boundary = PAGE_SIZE - 1,
328 .slave_alloc = tcm_loop_slave_alloc, 328 .slave_alloc = tcm_loop_slave_alloc,
329 .module = THIS_MODULE, 329 .module = THIS_MODULE,
330 .track_queue_depth = 1, 330 .track_queue_depth = 1,
@@ -460,11 +460,6 @@ static void tcm_loop_release_core_bus(void)
460 pr_debug("Releasing TCM Loop Core BUS\n"); 460 pr_debug("Releasing TCM Loop Core BUS\n");
461} 461}
462 462
463static char *tcm_loop_get_fabric_name(void)
464{
465 return "loopback";
466}
467
468static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg) 463static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
469{ 464{
470 return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); 465 return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
@@ -1149,8 +1144,7 @@ static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1149 1144
1150static const struct target_core_fabric_ops loop_ops = { 1145static const struct target_core_fabric_ops loop_ops = {
1151 .module = THIS_MODULE, 1146 .module = THIS_MODULE,
1152 .name = "loopback", 1147 .fabric_name = "loopback",
1153 .get_fabric_name = tcm_loop_get_fabric_name,
1154 .tpg_get_wwn = tcm_loop_get_endpoint_wwn, 1148 .tpg_get_wwn = tcm_loop_get_endpoint_wwn,
1155 .tpg_get_tag = tcm_loop_get_tag, 1149 .tpg_get_tag = tcm_loop_get_tag,
1156 .tpg_check_demo_mode = tcm_loop_check_demo_mode, 1150 .tpg_check_demo_mode = tcm_loop_check_demo_mode,
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 3d10189ecedc..08cee13dfb9a 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1694,11 +1694,6 @@ static int sbp_check_false(struct se_portal_group *se_tpg)
1694 return 0; 1694 return 0;
1695} 1695}
1696 1696
1697static char *sbp_get_fabric_name(void)
1698{
1699 return "sbp";
1700}
1701
1702static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg) 1697static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1703{ 1698{
1704 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1699 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
@@ -2323,8 +2318,7 @@ static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2323 2318
2324static const struct target_core_fabric_ops sbp_ops = { 2319static const struct target_core_fabric_ops sbp_ops = {
2325 .module = THIS_MODULE, 2320 .module = THIS_MODULE,
2326 .name = "sbp", 2321 .fabric_name = "sbp",
2327 .get_fabric_name = sbp_get_fabric_name,
2328 .tpg_get_wwn = sbp_get_fabric_wwn, 2322 .tpg_get_wwn = sbp_get_fabric_wwn,
2329 .tpg_get_tag = sbp_get_tag, 2323 .tpg_get_tag = sbp_get_tag,
2330 .tpg_check_demo_mode = sbp_check_true, 2324 .tpg_check_demo_mode = sbp_check_true,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 4f134b0c3e29..6b0d9beacf90 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -451,7 +451,7 @@ static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
451 pr_debug("[%s]: ALUA TG Port not available, " 451 pr_debug("[%s]: ALUA TG Port not available, "
452 "SenseKey: NOT_READY, ASC/ASCQ: " 452 "SenseKey: NOT_READY, ASC/ASCQ: "
453 "0x04/0x%02x\n", 453 "0x04/0x%02x\n",
454 cmd->se_tfo->get_fabric_name(), alua_ascq); 454 cmd->se_tfo->fabric_name, alua_ascq);
455 455
456 cmd->scsi_asc = 0x04; 456 cmd->scsi_asc = 0x04;
457 cmd->scsi_ascq = alua_ascq; 457 cmd->scsi_ascq = alua_ascq;
@@ -1229,13 +1229,13 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
1229 1229
1230 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) { 1230 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
1231 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu", 1231 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
1232 db_root, se_tpg->se_tpg_tfo->get_fabric_name(), 1232 db_root, se_tpg->se_tpg_tfo->fabric_name,
1233 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), 1233 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1234 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg), 1234 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
1235 lun->unpacked_lun); 1235 lun->unpacked_lun);
1236 } else { 1236 } else {
1237 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu", 1237 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
1238 db_root, se_tpg->se_tpg_tfo->get_fabric_name(), 1238 db_root, se_tpg->se_tpg_tfo->fabric_name,
1239 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), 1239 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1240 lun->unpacked_lun); 1240 lun->unpacked_lun);
1241 } 1241 }
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index f6b1549f4142..72016d0dfca5 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -172,7 +172,10 @@ static struct target_fabric_configfs *target_core_get_fabric(
172 172
173 mutex_lock(&g_tf_lock); 173 mutex_lock(&g_tf_lock);
174 list_for_each_entry(tf, &g_tf_list, tf_list) { 174 list_for_each_entry(tf, &g_tf_list, tf_list) {
175 if (!strcmp(tf->tf_ops->name, name)) { 175 const char *cmp_name = tf->tf_ops->fabric_alias;
176 if (!cmp_name)
177 cmp_name = tf->tf_ops->fabric_name;
178 if (!strcmp(cmp_name, name)) {
176 atomic_inc(&tf->tf_access_cnt); 179 atomic_inc(&tf->tf_access_cnt);
177 mutex_unlock(&g_tf_lock); 180 mutex_unlock(&g_tf_lock);
178 return tf; 181 return tf;
@@ -249,7 +252,7 @@ static struct config_group *target_core_register_fabric(
249 return ERR_PTR(-EINVAL); 252 return ERR_PTR(-EINVAL);
250 } 253 }
251 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" 254 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
252 " %s\n", tf->tf_ops->name); 255 " %s\n", tf->tf_ops->fabric_name);
253 /* 256 /*
254 * On a successful target_core_get_fabric() look, the returned 257 * On a successful target_core_get_fabric() look, the returned
255 * struct target_fabric_configfs *tf will contain a usage reference. 258 * struct target_fabric_configfs *tf will contain a usage reference.
@@ -282,7 +285,7 @@ static void target_core_deregister_fabric(
282 " tf list\n", config_item_name(item)); 285 " tf list\n", config_item_name(item));
283 286
284 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:" 287 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
285 " %s\n", tf->tf_ops->name); 288 " %s\n", tf->tf_ops->fabric_name);
286 atomic_dec(&tf->tf_access_cnt); 289 atomic_dec(&tf->tf_access_cnt);
287 290
288 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci" 291 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
@@ -342,17 +345,20 @@ EXPORT_SYMBOL(target_undepend_item);
342 345
343static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) 346static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
344{ 347{
345 if (!tfo->name) { 348 if (tfo->fabric_alias) {
346 pr_err("Missing tfo->name\n"); 349 if (strlen(tfo->fabric_alias) >= TARGET_FABRIC_NAME_SIZE) {
347 return -EINVAL; 350 pr_err("Passed alias: %s exceeds "
351 "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_alias);
352 return -EINVAL;
353 }
348 } 354 }
349 if (strlen(tfo->name) >= TARGET_FABRIC_NAME_SIZE) { 355 if (!tfo->fabric_name) {
350 pr_err("Passed name: %s exceeds TARGET_FABRIC" 356 pr_err("Missing tfo->fabric_name\n");
351 "_NAME_SIZE\n", tfo->name);
352 return -EINVAL; 357 return -EINVAL;
353 } 358 }
354 if (!tfo->get_fabric_name) { 359 if (strlen(tfo->fabric_name) >= TARGET_FABRIC_NAME_SIZE) {
355 pr_err("Missing tfo->get_fabric_name()\n"); 360 pr_err("Passed name: %s exceeds "
361 "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_name);
356 return -EINVAL; 362 return -EINVAL;
357 } 363 }
358 if (!tfo->tpg_get_wwn) { 364 if (!tfo->tpg_get_wwn) {
@@ -486,7 +492,7 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
486 492
487 mutex_lock(&g_tf_lock); 493 mutex_lock(&g_tf_lock);
488 list_for_each_entry(t, &g_tf_list, tf_list) { 494 list_for_each_entry(t, &g_tf_list, tf_list) {
489 if (!strcmp(t->tf_ops->name, fo->name)) { 495 if (!strcmp(t->tf_ops->fabric_name, fo->fabric_name)) {
490 BUG_ON(atomic_read(&t->tf_access_cnt)); 496 BUG_ON(atomic_read(&t->tf_access_cnt));
491 list_del(&t->tf_list); 497 list_del(&t->tf_list);
492 mutex_unlock(&g_tf_lock); 498 mutex_unlock(&g_tf_lock);
@@ -532,9 +538,9 @@ DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu);
532DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws); 538DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws);
533DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw); 539DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw);
534DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc); 540DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc);
541DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr);
535DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type); 542DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type);
536DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type); 543DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type);
537DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_format);
538DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify); 544DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify);
539DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids); 545DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids);
540DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot); 546DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot);
@@ -592,6 +598,7 @@ static ssize_t _name##_store(struct config_item *item, const char *page, \
592DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write); 598DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write);
593DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw); 599DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw);
594DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc); 600DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc);
601DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr);
595DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids); 602DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids);
596DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot); 603DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot);
597 604
@@ -613,12 +620,17 @@ static void dev_set_t10_wwn_model_alias(struct se_device *dev)
613 const char *configname; 620 const char *configname;
614 621
615 configname = config_item_name(&dev->dev_group.cg_item); 622 configname = config_item_name(&dev->dev_group.cg_item);
616 if (strlen(configname) >= 16) { 623 if (strlen(configname) >= INQUIRY_MODEL_LEN) {
617 pr_warn("dev[%p]: Backstore name '%s' is too long for " 624 pr_warn("dev[%p]: Backstore name '%s' is too long for "
618 "INQUIRY_MODEL, truncating to 16 bytes\n", dev, 625 "INQUIRY_MODEL, truncating to 15 characters\n", dev,
619 configname); 626 configname);
620 } 627 }
621 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname); 628 /*
629 * XXX We can't use sizeof(dev->t10_wwn.model) (INQUIRY_MODEL_LEN + 1)
630 * here without potentially breaking existing setups, so continue to
631 * truncate one byte shorter than what can be carried in INQUIRY.
632 */
633 strlcpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN);
622} 634}
623 635
624static ssize_t emulate_model_alias_store(struct config_item *item, 636static ssize_t emulate_model_alias_store(struct config_item *item,
@@ -640,11 +652,12 @@ static ssize_t emulate_model_alias_store(struct config_item *item,
640 if (ret < 0) 652 if (ret < 0)
641 return ret; 653 return ret;
642 654
655 BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
643 if (flag) { 656 if (flag) {
644 dev_set_t10_wwn_model_alias(dev); 657 dev_set_t10_wwn_model_alias(dev);
645 } else { 658 } else {
646 strncpy(&dev->t10_wwn.model[0], 659 strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
647 dev->transport->inquiry_prod, 16); 660 sizeof(dev->t10_wwn.model));
648 } 661 }
649 da->emulate_model_alias = flag; 662 da->emulate_model_alias = flag;
650 return count; 663 return count;
@@ -1116,9 +1129,10 @@ CONFIGFS_ATTR(, emulate_tpu);
1116CONFIGFS_ATTR(, emulate_tpws); 1129CONFIGFS_ATTR(, emulate_tpws);
1117CONFIGFS_ATTR(, emulate_caw); 1130CONFIGFS_ATTR(, emulate_caw);
1118CONFIGFS_ATTR(, emulate_3pc); 1131CONFIGFS_ATTR(, emulate_3pc);
1132CONFIGFS_ATTR(, emulate_pr);
1119CONFIGFS_ATTR(, pi_prot_type); 1133CONFIGFS_ATTR(, pi_prot_type);
1120CONFIGFS_ATTR_RO(, hw_pi_prot_type); 1134CONFIGFS_ATTR_RO(, hw_pi_prot_type);
1121CONFIGFS_ATTR(, pi_prot_format); 1135CONFIGFS_ATTR_WO(, pi_prot_format);
1122CONFIGFS_ATTR(, pi_prot_verify); 1136CONFIGFS_ATTR(, pi_prot_verify);
1123CONFIGFS_ATTR(, enforce_pr_isids); 1137CONFIGFS_ATTR(, enforce_pr_isids);
1124CONFIGFS_ATTR(, is_nonrot); 1138CONFIGFS_ATTR(, is_nonrot);
@@ -1156,6 +1170,7 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
1156 &attr_emulate_tpws, 1170 &attr_emulate_tpws,
1157 &attr_emulate_caw, 1171 &attr_emulate_caw,
1158 &attr_emulate_3pc, 1172 &attr_emulate_3pc,
1173 &attr_emulate_pr,
1159 &attr_pi_prot_type, 1174 &attr_pi_prot_type,
1160 &attr_hw_pi_prot_type, 1175 &attr_hw_pi_prot_type,
1161 &attr_pi_prot_format, 1176 &attr_pi_prot_format,
@@ -1211,6 +1226,74 @@ static struct t10_wwn *to_t10_wwn(struct config_item *item)
1211} 1226}
1212 1227
1213/* 1228/*
1229 * STANDARD and VPD page 0x83 T10 Vendor Identification
1230 */
1231static ssize_t target_wwn_vendor_id_show(struct config_item *item,
1232 char *page)
1233{
1234 return sprintf(page, "%s\n", &to_t10_wwn(item)->vendor[0]);
1235}
1236
1237static ssize_t target_wwn_vendor_id_store(struct config_item *item,
1238 const char *page, size_t count)
1239{
1240 struct t10_wwn *t10_wwn = to_t10_wwn(item);
1241 struct se_device *dev = t10_wwn->t10_dev;
1242 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
1243 unsigned char buf[INQUIRY_VENDOR_LEN + 2];
1244 char *stripped = NULL;
1245 size_t len;
1246 int i;
1247
1248 len = strlcpy(buf, page, sizeof(buf));
1249 if (len < sizeof(buf)) {
1250 /* Strip any newline added from userspace. */
1251 stripped = strstrip(buf);
1252 len = strlen(stripped);
1253 }
1254 if (len > INQUIRY_VENDOR_LEN) {
1255 pr_err("Emulated T10 Vendor Identification exceeds"
1256 " INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN)
1257 "\n");
1258 return -EOVERFLOW;
1259 }
1260
1261 /*
1262 * SPC 4.3.1:
1263 * ASCII data fields shall contain only ASCII printable characters (i.e.,
1264 * code values 20h to 7Eh) and may be terminated with one or more ASCII
1265 * null (00h) characters.
1266 */
1267 for (i = 0; i < len; i++) {
1268 if ((stripped[i] < 0x20) || (stripped[i] > 0x7E)) {
1269 pr_err("Emulated T10 Vendor Identification contains"
1270 " non-ASCII-printable characters\n");
1271 return -EINVAL;
1272 }
1273 }
1274
1275 /*
1276 * Check to see if any active exports exist. If they do exist, fail
1277 * here as changing this information on the fly (underneath the
1278 * initiator side OS dependent multipath code) could cause negative
1279 * effects.
1280 */
1281 if (dev->export_count) {
1282 pr_err("Unable to set T10 Vendor Identification while"
1283 " active %d exports exist\n", dev->export_count);
1284 return -EINVAL;
1285 }
1286
1287 BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1);
1288 strlcpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor));
1289
1290 pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:"
1291 " %s\n", dev->t10_wwn.vendor);
1292
1293 return count;
1294}
1295
1296/*
1214 * VPD page 0x80 Unit serial 1297 * VPD page 0x80 Unit serial
1215 */ 1298 */
1216static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item, 1299static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item,
@@ -1356,6 +1439,7 @@ DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
1356/* VPD page 0x83 Association: SCSI Target Device */ 1439/* VPD page 0x83 Association: SCSI Target Device */
1357DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20); 1440DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
1358 1441
1442CONFIGFS_ATTR(target_wwn_, vendor_id);
1359CONFIGFS_ATTR(target_wwn_, vpd_unit_serial); 1443CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
1360CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier); 1444CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
1361CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit); 1445CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
@@ -1363,6 +1447,7 @@ CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port);
1363CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device); 1447CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
1364 1448
1365static struct configfs_attribute *target_core_dev_wwn_attrs[] = { 1449static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
1450 &target_wwn_attr_vendor_id,
1366 &target_wwn_attr_vpd_unit_serial, 1451 &target_wwn_attr_vpd_unit_serial,
1367 &target_wwn_attr_vpd_protocol_identifier, 1452 &target_wwn_attr_vpd_protocol_identifier,
1368 &target_wwn_attr_vpd_assoc_logical_unit, 1453 &target_wwn_attr_vpd_assoc_logical_unit,
@@ -1400,7 +1485,7 @@ static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
1400 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); 1485 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
1401 1486
1402 return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n", 1487 return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
1403 se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 1488 se_nacl->se_tpg->se_tpg_tfo->fabric_name,
1404 se_nacl->initiatorname, i_buf); 1489 se_nacl->initiatorname, i_buf);
1405} 1490}
1406 1491
@@ -1414,7 +1499,7 @@ static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
1414 if (se_nacl) { 1499 if (se_nacl) {
1415 len = sprintf(page, 1500 len = sprintf(page,
1416 "SPC-2 Reservation: %s Initiator: %s\n", 1501 "SPC-2 Reservation: %s Initiator: %s\n",
1417 se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 1502 se_nacl->se_tpg->se_tpg_tfo->fabric_name,
1418 se_nacl->initiatorname); 1503 se_nacl->initiatorname);
1419 } else { 1504 } else {
1420 len = sprintf(page, "No SPC-2 Reservation holder\n"); 1505 len = sprintf(page, "No SPC-2 Reservation holder\n");
@@ -1427,6 +1512,9 @@ static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
1427 struct se_device *dev = pr_to_dev(item); 1512 struct se_device *dev = pr_to_dev(item);
1428 int ret; 1513 int ret;
1429 1514
1515 if (!dev->dev_attrib.emulate_pr)
1516 return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
1517
1430 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 1518 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
1431 return sprintf(page, "Passthrough\n"); 1519 return sprintf(page, "Passthrough\n");
1432 1520
@@ -1489,13 +1577,13 @@ static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item,
1489 tfo = se_tpg->se_tpg_tfo; 1577 tfo = se_tpg->se_tpg_tfo;
1490 1578
1491 len += sprintf(page+len, "SPC-3 Reservation: %s" 1579 len += sprintf(page+len, "SPC-3 Reservation: %s"
1492 " Target Node Endpoint: %s\n", tfo->get_fabric_name(), 1580 " Target Node Endpoint: %s\n", tfo->fabric_name,
1493 tfo->tpg_get_wwn(se_tpg)); 1581 tfo->tpg_get_wwn(se_tpg));
1494 len += sprintf(page+len, "SPC-3 Reservation: Relative Port" 1582 len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
1495 " Identifier Tag: %hu %s Portal Group Tag: %hu" 1583 " Identifier Tag: %hu %s Portal Group Tag: %hu"
1496 " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi, 1584 " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi,
1497 tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg), 1585 tfo->fabric_name, tfo->tpg_get_tag(se_tpg),
1498 tfo->get_fabric_name(), pr_reg->pr_aptpl_target_lun); 1586 tfo->fabric_name, pr_reg->pr_aptpl_target_lun);
1499 1587
1500out_unlock: 1588out_unlock:
1501 spin_unlock(&dev->dev_reservation_lock); 1589 spin_unlock(&dev->dev_reservation_lock);
@@ -1526,7 +1614,7 @@ static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item,
1526 core_pr_dump_initiator_port(pr_reg, i_buf, 1614 core_pr_dump_initiator_port(pr_reg, i_buf,
1527 PR_REG_ISID_ID_LEN); 1615 PR_REG_ISID_ID_LEN);
1528 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n", 1616 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
1529 tfo->get_fabric_name(), 1617 tfo->fabric_name,
1530 pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key, 1618 pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
1531 pr_reg->pr_res_generation); 1619 pr_reg->pr_res_generation);
1532 1620
@@ -1567,12 +1655,14 @@ static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
1567{ 1655{
1568 struct se_device *dev = pr_to_dev(item); 1656 struct se_device *dev = pr_to_dev(item);
1569 1657
1658 if (!dev->dev_attrib.emulate_pr)
1659 return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
1570 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 1660 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
1571 return sprintf(page, "SPC_PASSTHROUGH\n"); 1661 return sprintf(page, "SPC_PASSTHROUGH\n");
1572 else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 1662 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1573 return sprintf(page, "SPC2_RESERVATIONS\n"); 1663 return sprintf(page, "SPC2_RESERVATIONS\n");
1574 else 1664
1575 return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); 1665 return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
1576} 1666}
1577 1667
1578static ssize_t target_pr_res_aptpl_active_show(struct config_item *item, 1668static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
@@ -1580,7 +1670,8 @@ static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
1580{ 1670{
1581 struct se_device *dev = pr_to_dev(item); 1671 struct se_device *dev = pr_to_dev(item);
1582 1672
1583 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 1673 if (!dev->dev_attrib.emulate_pr ||
1674 (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1584 return 0; 1675 return 0;
1585 1676
1586 return sprintf(page, "APTPL Bit Status: %s\n", 1677 return sprintf(page, "APTPL Bit Status: %s\n",
@@ -1592,7 +1683,8 @@ static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
1592{ 1683{
1593 struct se_device *dev = pr_to_dev(item); 1684 struct se_device *dev = pr_to_dev(item);
1594 1685
1595 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 1686 if (!dev->dev_attrib.emulate_pr ||
1687 (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1596 return 0; 1688 return 0;
1597 1689
1598 return sprintf(page, "Ready to process PR APTPL metadata..\n"); 1690 return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1638,7 +1730,8 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
1638 u16 tpgt = 0; 1730 u16 tpgt = 0;
1639 u8 type = 0; 1731 u8 type = 0;
1640 1732
1641 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 1733 if (!dev->dev_attrib.emulate_pr ||
1734 (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1642 return count; 1735 return count;
1643 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 1736 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1644 return count; 1737 return count;
@@ -2746,7 +2839,7 @@ static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
2746 struct se_portal_group *tpg = lun->lun_tpg; 2839 struct se_portal_group *tpg = lun->lun_tpg;
2747 2840
2748 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" 2841 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
2749 "/%s\n", tpg->se_tpg_tfo->get_fabric_name(), 2842 "/%s\n", tpg->se_tpg_tfo->fabric_name,
2750 tpg->se_tpg_tfo->tpg_get_wwn(tpg), 2843 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2751 tpg->se_tpg_tfo->tpg_get_tag(tpg), 2844 tpg->se_tpg_tfo->tpg_get_tag(tpg),
2752 config_item_name(&lun->lun_group.cg_item)); 2845 config_item_name(&lun->lun_group.cg_item));
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 47b5ef153135..93c56f4a9911 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -95,7 +95,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
95 deve->lun_access_ro) { 95 deve->lun_access_ro) {
96 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 96 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
97 " Access for 0x%08llx\n", 97 " Access for 0x%08llx\n",
98 se_cmd->se_tfo->get_fabric_name(), 98 se_cmd->se_tfo->fabric_name,
99 unpacked_lun); 99 unpacked_lun);
100 rcu_read_unlock(); 100 rcu_read_unlock();
101 ret = TCM_WRITE_PROTECTED; 101 ret = TCM_WRITE_PROTECTED;
@@ -114,7 +114,7 @@ out_unlock:
114 if (unpacked_lun != 0) { 114 if (unpacked_lun != 0) {
115 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 115 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
116 " Access for 0x%08llx\n", 116 " Access for 0x%08llx\n",
117 se_cmd->se_tfo->get_fabric_name(), 117 se_cmd->se_tfo->fabric_name,
118 unpacked_lun); 118 unpacked_lun);
119 return TCM_NON_EXISTENT_LUN; 119 return TCM_NON_EXISTENT_LUN;
120 } 120 }
@@ -188,7 +188,7 @@ out_unlock:
188 if (!se_lun) { 188 if (!se_lun) {
189 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 189 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
190 " Access for 0x%08llx\n", 190 " Access for 0x%08llx\n",
191 se_cmd->se_tfo->get_fabric_name(), 191 se_cmd->se_tfo->fabric_name,
192 unpacked_lun); 192 unpacked_lun);
193 return -ENODEV; 193 return -ENODEV;
194 } 194 }
@@ -237,7 +237,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
237 if (!lun) { 237 if (!lun) {
238 pr_err("%s device entries device pointer is" 238 pr_err("%s device entries device pointer is"
239 " NULL, but Initiator has access.\n", 239 " NULL, but Initiator has access.\n",
240 tpg->se_tpg_tfo->get_fabric_name()); 240 tpg->se_tpg_tfo->fabric_name);
241 continue; 241 continue;
242 } 242 }
243 if (lun->lun_rtpi != rtpi) 243 if (lun->lun_rtpi != rtpi)
@@ -571,9 +571,9 @@ int core_dev_add_lun(
571 return rc; 571 return rc;
572 572
573 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" 573 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
574 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 574 " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
575 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 575 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
576 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); 576 tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
577 /* 577 /*
578 * Update LUN maps for dynamically added initiators when 578 * Update LUN maps for dynamically added initiators when
579 * generate_node_acl is enabled. 579 * generate_node_acl is enabled.
@@ -604,9 +604,9 @@ void core_dev_del_lun(
604 struct se_lun *lun) 604 struct se_lun *lun)
605{ 605{
606 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" 606 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
607 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 607 " device object\n", tpg->se_tpg_tfo->fabric_name,
608 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 608 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
609 tpg->se_tpg_tfo->get_fabric_name()); 609 tpg->se_tpg_tfo->fabric_name);
610 610
611 core_tpg_remove_lun(tpg, lun); 611 core_tpg_remove_lun(tpg, lun);
612} 612}
@@ -621,7 +621,7 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
621 621
622 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 622 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
623 pr_err("%s InitiatorName exceeds maximum size.\n", 623 pr_err("%s InitiatorName exceeds maximum size.\n",
624 tpg->se_tpg_tfo->get_fabric_name()); 624 tpg->se_tpg_tfo->fabric_name);
625 *ret = -EOVERFLOW; 625 *ret = -EOVERFLOW;
626 return NULL; 626 return NULL;
627 } 627 }
@@ -664,7 +664,7 @@ int core_dev_add_initiator_node_lun_acl(
664 return -EINVAL; 664 return -EINVAL;
665 665
666 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " 666 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
667 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 667 " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
668 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 668 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
669 lun_access_ro ? "RO" : "RW", 669 lun_access_ro ? "RO" : "RW",
670 nacl->initiatorname); 670 nacl->initiatorname);
@@ -697,7 +697,7 @@ int core_dev_del_initiator_node_lun_acl(
697 697
698 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" 698 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
699 " InitiatorNode: %s Mapped LUN: %llu\n", 699 " InitiatorNode: %s Mapped LUN: %llu\n",
700 tpg->se_tpg_tfo->get_fabric_name(), 700 tpg->se_tpg_tfo->fabric_name,
701 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 701 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
702 nacl->initiatorname, lacl->mapped_lun); 702 nacl->initiatorname, lacl->mapped_lun);
703 703
@@ -709,9 +709,9 @@ void core_dev_free_initiator_node_lun_acl(
709 struct se_lun_acl *lacl) 709 struct se_lun_acl *lacl)
710{ 710{
711 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 711 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
712 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(), 712 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
713 tpg->se_tpg_tfo->tpg_get_tag(tpg), 713 tpg->se_tpg_tfo->tpg_get_tag(tpg),
714 tpg->se_tpg_tfo->get_fabric_name(), 714 tpg->se_tpg_tfo->fabric_name,
715 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun); 715 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
716 716
717 kfree(lacl); 717 kfree(lacl);
@@ -720,36 +720,17 @@ void core_dev_free_initiator_node_lun_acl(
720static void scsi_dump_inquiry(struct se_device *dev) 720static void scsi_dump_inquiry(struct se_device *dev)
721{ 721{
722 struct t10_wwn *wwn = &dev->t10_wwn; 722 struct t10_wwn *wwn = &dev->t10_wwn;
723 char buf[17]; 723 int device_type = dev->transport->get_device_type(dev);
724 int i, device_type; 724
725 /* 725 /*
726 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 726 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
727 */ 727 */
728 for (i = 0; i < 8; i++) 728 pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
729 if (wwn->vendor[i] >= 0x20) 729 wwn->vendor);
730 buf[i] = wwn->vendor[i]; 730 pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
731 else 731 wwn->model);
732 buf[i] = ' '; 732 pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
733 buf[i] = '\0'; 733 wwn->revision);
734 pr_debug(" Vendor: %s\n", buf);
735
736 for (i = 0; i < 16; i++)
737 if (wwn->model[i] >= 0x20)
738 buf[i] = wwn->model[i];
739 else
740 buf[i] = ' ';
741 buf[i] = '\0';
742 pr_debug(" Model: %s\n", buf);
743
744 for (i = 0; i < 4; i++)
745 if (wwn->revision[i] >= 0x20)
746 buf[i] = wwn->revision[i];
747 else
748 buf[i] = ' ';
749 buf[i] = '\0';
750 pr_debug(" Revision: %s\n", buf);
751
752 device_type = dev->transport->get_device_type(dev);
753 pr_debug(" Type: %s ", scsi_device_type(device_type)); 734 pr_debug(" Type: %s ", scsi_device_type(device_type));
754} 735}
755 736
@@ -805,6 +786,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
805 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 786 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
806 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 787 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
807 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 788 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
789 dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
808 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 790 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
809 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 791 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
810 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; 792 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
@@ -822,13 +804,19 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
822 804
823 xcopy_lun = &dev->xcopy_lun; 805 xcopy_lun = &dev->xcopy_lun;
824 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); 806 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
825 init_completion(&xcopy_lun->lun_ref_comp);
826 init_completion(&xcopy_lun->lun_shutdown_comp); 807 init_completion(&xcopy_lun->lun_shutdown_comp);
827 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); 808 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
828 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); 809 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
829 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); 810 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
830 xcopy_lun->lun_tpg = &xcopy_pt_tpg; 811 xcopy_lun->lun_tpg = &xcopy_pt_tpg;
831 812
813 /* Preload the default INQUIRY const values */
814 strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
815 strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
816 sizeof(dev->t10_wwn.model));
817 strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
818 sizeof(dev->t10_wwn.revision));
819
832 return dev; 820 return dev;
833} 821}
834 822
@@ -987,35 +975,10 @@ int target_configure_device(struct se_device *dev)
987 goto out_destroy_device; 975 goto out_destroy_device;
988 976
989 /* 977 /*
990 * Startup the struct se_device processing thread
991 */
992 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
993 dev->transport->name);
994 if (!dev->tmr_wq) {
995 pr_err("Unable to create tmr workqueue for %s\n",
996 dev->transport->name);
997 ret = -ENOMEM;
998 goto out_free_alua;
999 }
1000
1001 /*
1002 * Setup work_queue for QUEUE_FULL 978 * Setup work_queue for QUEUE_FULL
1003 */ 979 */
1004 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 980 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1005 981
1006 /*
1007 * Preload the initial INQUIRY const values if we are doing
1008 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1009 * passthrough because this is being provided by the backend LLD.
1010 */
1011 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
1012 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1013 strncpy(&dev->t10_wwn.model[0],
1014 dev->transport->inquiry_prod, 16);
1015 strncpy(&dev->t10_wwn.revision[0],
1016 dev->transport->inquiry_rev, 4);
1017 }
1018
1019 scsi_dump_inquiry(dev); 982 scsi_dump_inquiry(dev);
1020 983
1021 spin_lock(&hba->device_lock); 984 spin_lock(&hba->device_lock);
@@ -1026,8 +989,6 @@ int target_configure_device(struct se_device *dev)
1026 989
1027 return 0; 990 return 0;
1028 991
1029out_free_alua:
1030 core_alua_free_lu_gp_mem(dev);
1031out_destroy_device: 992out_destroy_device:
1032 dev->transport->destroy_device(dev); 993 dev->transport->destroy_device(dev);
1033out_free_index: 994out_free_index:
@@ -1046,8 +1007,6 @@ void target_free_device(struct se_device *dev)
1046 WARN_ON(!list_empty(&dev->dev_sep_list)); 1007 WARN_ON(!list_empty(&dev->dev_sep_list));
1047 1008
1048 if (target_dev_configured(dev)) { 1009 if (target_dev_configured(dev)) {
1049 destroy_workqueue(dev->tmr_wq);
1050
1051 dev->transport->destroy_device(dev); 1010 dev->transport->destroy_device(dev);
1052 1011
1053 mutex_lock(&device_mutex); 1012 mutex_lock(&device_mutex);
@@ -1159,6 +1118,18 @@ passthrough_parse_cdb(struct se_cmd *cmd,
1159 } 1118 }
1160 1119
1161 /* 1120 /*
1121 * With emulate_pr disabled, all reservation requests should fail,
1122 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
1123 */
1124 if (!dev->dev_attrib.emulate_pr &&
1125 ((cdb[0] == PERSISTENT_RESERVE_IN) ||
1126 (cdb[0] == PERSISTENT_RESERVE_OUT) ||
1127 (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
1128 (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
1129 return TCM_UNSUPPORTED_SCSI_OPCODE;
1130 }
1131
1132 /*
1162 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to 1133 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1163 * emulate the response, since tcmu does not have the information 1134 * emulate the response, since tcmu does not have the information
1164 * required to process these commands. 1135 * required to process these commands.
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index aa2f4f632ebe..9a6e20a2af7d 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -203,7 +203,7 @@ static ssize_t target_fabric_mappedlun_write_protect_store(
203 203
204 pr_debug("%s_ConfigFS: Changed Initiator ACL: %s" 204 pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
205 " Mapped LUN: %llu Write Protect bit to %s\n", 205 " Mapped LUN: %llu Write Protect bit to %s\n",
206 se_tpg->se_tpg_tfo->get_fabric_name(), 206 se_tpg->se_tpg_tfo->fabric_name,
207 se_nacl->initiatorname, lacl->mapped_lun, (wp) ? "ON" : "OFF"); 207 se_nacl->initiatorname, lacl->mapped_lun, (wp) ? "ON" : "OFF");
208 208
209 return count; 209 return count;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 0c6635587930..853344415963 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -138,7 +138,6 @@ int init_se_kmem_caches(void);
138void release_se_kmem_caches(void); 138void release_se_kmem_caches(void);
139u32 scsi_get_new_index(scsi_index_t); 139u32 scsi_get_new_index(scsi_index_t);
140void transport_subsystem_check_init(void); 140void transport_subsystem_check_init(void);
141int transport_cmd_finish_abort(struct se_cmd *);
142unsigned char *transport_dump_cmd_direction(struct se_cmd *); 141unsigned char *transport_dump_cmd_direction(struct se_cmd *);
143void transport_dump_dev_state(struct se_device *, char *, int *); 142void transport_dump_dev_state(struct se_device *, char *, int *);
144void transport_dump_dev_info(struct se_device *, struct se_lun *, 143void transport_dump_dev_info(struct se_device *, struct se_lun *,
@@ -148,7 +147,6 @@ int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
148int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); 147int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
149int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); 148int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
150void transport_clear_lun_ref(struct se_lun *); 149void transport_clear_lun_ref(struct se_lun *);
151void transport_send_task_abort(struct se_cmd *);
152sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); 150sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
153void target_qf_do_work(struct work_struct *work); 151void target_qf_do_work(struct work_struct *work);
154bool target_check_wce(struct se_device *dev); 152bool target_check_wce(struct se_device *dev);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 10db5656fd5d..397f38cb7f4e 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -235,7 +235,7 @@ target_scsi2_reservation_release(struct se_cmd *cmd)
235 tpg = sess->se_tpg; 235 tpg = sess->se_tpg;
236 pr_debug("SCSI-2 Released reservation for %s LUN: %llu ->" 236 pr_debug("SCSI-2 Released reservation for %s LUN: %llu ->"
237 " MAPPED LUN: %llu for %s\n", 237 " MAPPED LUN: %llu for %s\n",
238 tpg->se_tpg_tfo->get_fabric_name(), 238 tpg->se_tpg_tfo->fabric_name,
239 cmd->se_lun->unpacked_lun, cmd->orig_fe_lun, 239 cmd->se_lun->unpacked_lun, cmd->orig_fe_lun,
240 sess->se_node_acl->initiatorname); 240 sess->se_node_acl->initiatorname);
241 241
@@ -278,7 +278,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
278 if (dev->dev_reserved_node_acl && 278 if (dev->dev_reserved_node_acl &&
279 (dev->dev_reserved_node_acl != sess->se_node_acl)) { 279 (dev->dev_reserved_node_acl != sess->se_node_acl)) {
280 pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n", 280 pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
281 tpg->se_tpg_tfo->get_fabric_name()); 281 tpg->se_tpg_tfo->fabric_name);
282 pr_err("Original reserver LUN: %llu %s\n", 282 pr_err("Original reserver LUN: %llu %s\n",
283 cmd->se_lun->unpacked_lun, 283 cmd->se_lun->unpacked_lun,
284 dev->dev_reserved_node_acl->initiatorname); 284 dev->dev_reserved_node_acl->initiatorname);
@@ -297,7 +297,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
297 dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID; 297 dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID;
298 } 298 }
299 pr_debug("SCSI-2 Reserved %s LUN: %llu -> MAPPED LUN: %llu" 299 pr_debug("SCSI-2 Reserved %s LUN: %llu -> MAPPED LUN: %llu"
300 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 300 " for %s\n", tpg->se_tpg_tfo->fabric_name,
301 cmd->se_lun->unpacked_lun, cmd->orig_fe_lun, 301 cmd->se_lun->unpacked_lun, cmd->orig_fe_lun,
302 sess->se_node_acl->initiatorname); 302 sess->se_node_acl->initiatorname);
303 303
@@ -914,11 +914,11 @@ static void core_scsi3_aptpl_reserve(
914 914
915 pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created" 915 pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created"
916 " new reservation holder TYPE: %s ALL_TG_PT: %d\n", 916 " new reservation holder TYPE: %s ALL_TG_PT: %d\n",
917 tpg->se_tpg_tfo->get_fabric_name(), 917 tpg->se_tpg_tfo->fabric_name,
918 core_scsi3_pr_dump_type(pr_reg->pr_res_type), 918 core_scsi3_pr_dump_type(pr_reg->pr_res_type),
919 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 919 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
920 pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", 920 pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
921 tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname, 921 tpg->se_tpg_tfo->fabric_name, node_acl->initiatorname,
922 i_buf); 922 i_buf);
923} 923}
924 924
@@ -1036,19 +1036,19 @@ static void __core_scsi3_dump_registration(
1036 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); 1036 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
1037 1037
1038 pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator" 1038 pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
1039 " Node: %s%s\n", tfo->get_fabric_name(), (register_type == REGISTER_AND_MOVE) ? 1039 " Node: %s%s\n", tfo->fabric_name, (register_type == REGISTER_AND_MOVE) ?
1040 "_AND_MOVE" : (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ? 1040 "_AND_MOVE" : (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ?
1041 "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname, 1041 "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
1042 i_buf); 1042 i_buf);
1043 pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", 1043 pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
1044 tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg), 1044 tfo->fabric_name, tfo->tpg_get_wwn(se_tpg),
1045 tfo->tpg_get_tag(se_tpg)); 1045 tfo->tpg_get_tag(se_tpg));
1046 pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" 1046 pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
1047 " Port(s)\n", tfo->get_fabric_name(), 1047 " Port(s)\n", tfo->fabric_name,
1048 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", 1048 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
1049 dev->transport->name); 1049 dev->transport->name);
1050 pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" 1050 pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
1051 " 0x%08x APTPL: %d\n", tfo->get_fabric_name(), 1051 " 0x%08x APTPL: %d\n", tfo->fabric_name,
1052 pr_reg->pr_res_key, pr_reg->pr_res_generation, 1052 pr_reg->pr_res_key, pr_reg->pr_res_generation,
1053 pr_reg->pr_reg_aptpl); 1053 pr_reg->pr_reg_aptpl);
1054} 1054}
@@ -1329,7 +1329,7 @@ static void __core_scsi3_free_registration(
1329 */ 1329 */
1330 while (atomic_read(&pr_reg->pr_res_holders) != 0) { 1330 while (atomic_read(&pr_reg->pr_res_holders) != 0) {
1331 pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n", 1331 pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n",
1332 tfo->get_fabric_name()); 1332 tfo->fabric_name);
1333 cpu_relax(); 1333 cpu_relax();
1334 } 1334 }
1335 1335
@@ -1341,15 +1341,15 @@ static void __core_scsi3_free_registration(
1341 1341
1342 spin_lock(&pr_tmpl->registration_lock); 1342 spin_lock(&pr_tmpl->registration_lock);
1343 pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator" 1343 pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
1344 " Node: %s%s\n", tfo->get_fabric_name(), 1344 " Node: %s%s\n", tfo->fabric_name,
1345 pr_reg->pr_reg_nacl->initiatorname, 1345 pr_reg->pr_reg_nacl->initiatorname,
1346 i_buf); 1346 i_buf);
1347 pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" 1347 pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
1348 " Port(s)\n", tfo->get_fabric_name(), 1348 " Port(s)\n", tfo->fabric_name,
1349 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", 1349 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
1350 dev->transport->name); 1350 dev->transport->name);
1351 pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" 1351 pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
1352 " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key, 1352 " 0x%08x\n", tfo->fabric_name, pr_reg->pr_res_key,
1353 pr_reg->pr_res_generation); 1353 pr_reg->pr_res_generation);
1354 1354
1355 if (!preempt_and_abort_list) { 1355 if (!preempt_and_abort_list) {
@@ -1645,7 +1645,7 @@ core_scsi3_decode_spec_i_port(
1645 dest_tpg = tmp_tpg; 1645 dest_tpg = tmp_tpg;
1646 pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:" 1646 pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:"
1647 " %s Port RTPI: %hu\n", 1647 " %s Port RTPI: %hu\n",
1648 dest_tpg->se_tpg_tfo->get_fabric_name(), 1648 dest_tpg->se_tpg_tfo->fabric_name,
1649 dest_node_acl->initiatorname, dest_rtpi); 1649 dest_node_acl->initiatorname, dest_rtpi);
1650 1650
1651 spin_lock(&dev->se_port_lock); 1651 spin_lock(&dev->se_port_lock);
@@ -1662,7 +1662,7 @@ core_scsi3_decode_spec_i_port(
1662 1662
1663 pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" 1663 pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
1664 " tid_len: %d for %s + %s\n", 1664 " tid_len: %d for %s + %s\n",
1665 dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length, 1665 dest_tpg->se_tpg_tfo->fabric_name, cmd->data_length,
1666 tpdl, tid_len, i_str, iport_ptr); 1666 tpdl, tid_len, i_str, iport_ptr);
1667 1667
1668 if (tid_len > tpdl) { 1668 if (tid_len > tpdl) {
@@ -1683,7 +1683,7 @@ core_scsi3_decode_spec_i_port(
1683 if (!dest_se_deve) { 1683 if (!dest_se_deve) {
1684 pr_err("Unable to locate %s dest_se_deve" 1684 pr_err("Unable to locate %s dest_se_deve"
1685 " from destination RTPI: %hu\n", 1685 " from destination RTPI: %hu\n",
1686 dest_tpg->se_tpg_tfo->get_fabric_name(), 1686 dest_tpg->se_tpg_tfo->fabric_name,
1687 dest_rtpi); 1687 dest_rtpi);
1688 1688
1689 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1689 core_scsi3_nodeacl_undepend_item(dest_node_acl);
@@ -1704,7 +1704,7 @@ core_scsi3_decode_spec_i_port(
1704 1704
1705 pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s" 1705 pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
1706 " dest_se_deve mapped_lun: %llu\n", 1706 " dest_se_deve mapped_lun: %llu\n",
1707 dest_tpg->se_tpg_tfo->get_fabric_name(), 1707 dest_tpg->se_tpg_tfo->fabric_name,
1708 dest_node_acl->initiatorname, dest_se_deve->mapped_lun); 1708 dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
1709 1709
1710 /* 1710 /*
@@ -1815,7 +1815,7 @@ core_scsi3_decode_spec_i_port(
1815 1815
1816 pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully" 1816 pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully"
1817 " registered Transport ID for Node: %s%s Mapped LUN:" 1817 " registered Transport ID for Node: %s%s Mapped LUN:"
1818 " %llu\n", dest_tpg->se_tpg_tfo->get_fabric_name(), 1818 " %llu\n", dest_tpg->se_tpg_tfo->fabric_name,
1819 dest_node_acl->initiatorname, i_buf, (dest_se_deve) ? 1819 dest_node_acl->initiatorname, i_buf, (dest_se_deve) ?
1820 dest_se_deve->mapped_lun : 0); 1820 dest_se_deve->mapped_lun : 0);
1821 1821
@@ -1913,7 +1913,7 @@ static int core_scsi3_update_aptpl_buf(
1913 "res_holder=1\nres_type=%02x\n" 1913 "res_holder=1\nres_type=%02x\n"
1914 "res_scope=%02x\nres_all_tg_pt=%d\n" 1914 "res_scope=%02x\nres_all_tg_pt=%d\n"
1915 "mapped_lun=%llu\n", reg_count, 1915 "mapped_lun=%llu\n", reg_count,
1916 tpg->se_tpg_tfo->get_fabric_name(), 1916 tpg->se_tpg_tfo->fabric_name,
1917 pr_reg->pr_reg_nacl->initiatorname, isid_buf, 1917 pr_reg->pr_reg_nacl->initiatorname, isid_buf,
1918 pr_reg->pr_res_key, pr_reg->pr_res_type, 1918 pr_reg->pr_res_key, pr_reg->pr_res_type,
1919 pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt, 1919 pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
@@ -1923,7 +1923,7 @@ static int core_scsi3_update_aptpl_buf(
1923 "initiator_fabric=%s\ninitiator_node=%s\n%s" 1923 "initiator_fabric=%s\ninitiator_node=%s\n%s"
1924 "sa_res_key=%llu\nres_holder=0\n" 1924 "sa_res_key=%llu\nres_holder=0\n"
1925 "res_all_tg_pt=%d\nmapped_lun=%llu\n", 1925 "res_all_tg_pt=%d\nmapped_lun=%llu\n",
1926 reg_count, tpg->se_tpg_tfo->get_fabric_name(), 1926 reg_count, tpg->se_tpg_tfo->fabric_name,
1927 pr_reg->pr_reg_nacl->initiatorname, isid_buf, 1927 pr_reg->pr_reg_nacl->initiatorname, isid_buf,
1928 pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt, 1928 pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
1929 pr_reg->pr_res_mapped_lun); 1929 pr_reg->pr_res_mapped_lun);
@@ -1942,7 +1942,7 @@ static int core_scsi3_update_aptpl_buf(
1942 */ 1942 */
1943 snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n" 1943 snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
1944 "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%llu\nPR_REG_END:" 1944 "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%llu\nPR_REG_END:"
1945 " %d\n", tpg->se_tpg_tfo->get_fabric_name(), 1945 " %d\n", tpg->se_tpg_tfo->fabric_name,
1946 tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1946 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1947 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1947 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1948 pr_reg->tg_pt_sep_rtpi, pr_reg->pr_aptpl_target_lun, 1948 pr_reg->tg_pt_sep_rtpi, pr_reg->pr_aptpl_target_lun,
@@ -2168,7 +2168,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
2168 pr_reg->pr_res_key = sa_res_key; 2168 pr_reg->pr_res_key = sa_res_key;
2169 pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation" 2169 pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
2170 " Key for %s to: 0x%016Lx PRgeneration:" 2170 " Key for %s to: 0x%016Lx PRgeneration:"
2171 " 0x%08x\n", cmd->se_tfo->get_fabric_name(), 2171 " 0x%08x\n", cmd->se_tfo->fabric_name,
2172 (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ? "_AND_IGNORE_EXISTING_KEY" : "", 2172 (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ? "_AND_IGNORE_EXISTING_KEY" : "",
2173 pr_reg->pr_reg_nacl->initiatorname, 2173 pr_reg->pr_reg_nacl->initiatorname,
2174 pr_reg->pr_res_key, pr_reg->pr_res_generation); 2174 pr_reg->pr_res_key, pr_reg->pr_res_generation);
@@ -2356,9 +2356,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
2356 pr_err("SPC-3 PR: Attempted RESERVE from" 2356 pr_err("SPC-3 PR: Attempted RESERVE from"
2357 " [%s]: %s while reservation already held by" 2357 " [%s]: %s while reservation already held by"
2358 " [%s]: %s, returning RESERVATION_CONFLICT\n", 2358 " [%s]: %s, returning RESERVATION_CONFLICT\n",
2359 cmd->se_tfo->get_fabric_name(), 2359 cmd->se_tfo->fabric_name,
2360 se_sess->se_node_acl->initiatorname, 2360 se_sess->se_node_acl->initiatorname,
2361 pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 2361 pr_res_nacl->se_tpg->se_tpg_tfo->fabric_name,
2362 pr_res_holder->pr_reg_nacl->initiatorname); 2362 pr_res_holder->pr_reg_nacl->initiatorname);
2363 2363
2364 spin_unlock(&dev->dev_reservation_lock); 2364 spin_unlock(&dev->dev_reservation_lock);
@@ -2379,9 +2379,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
2379 " [%s]: %s trying to change TYPE and/or SCOPE," 2379 " [%s]: %s trying to change TYPE and/or SCOPE,"
2380 " while reservation already held by [%s]: %s," 2380 " while reservation already held by [%s]: %s,"
2381 " returning RESERVATION_CONFLICT\n", 2381 " returning RESERVATION_CONFLICT\n",
2382 cmd->se_tfo->get_fabric_name(), 2382 cmd->se_tfo->fabric_name,
2383 se_sess->se_node_acl->initiatorname, 2383 se_sess->se_node_acl->initiatorname,
2384 pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 2384 pr_res_nacl->se_tpg->se_tpg_tfo->fabric_name,
2385 pr_res_holder->pr_reg_nacl->initiatorname); 2385 pr_res_holder->pr_reg_nacl->initiatorname);
2386 2386
2387 spin_unlock(&dev->dev_reservation_lock); 2387 spin_unlock(&dev->dev_reservation_lock);
@@ -2414,10 +2414,10 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
2414 2414
2415 pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new" 2415 pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new"
2416 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2416 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2417 cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type), 2417 cmd->se_tfo->fabric_name, core_scsi3_pr_dump_type(type),
2418 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2418 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2419 pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", 2419 pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
2420 cmd->se_tfo->get_fabric_name(), 2420 cmd->se_tfo->fabric_name,
2421 se_sess->se_node_acl->initiatorname, 2421 se_sess->se_node_acl->initiatorname,
2422 i_buf); 2422 i_buf);
2423 spin_unlock(&dev->dev_reservation_lock); 2423 spin_unlock(&dev->dev_reservation_lock);
@@ -2506,12 +2506,12 @@ out:
2506 if (!dev->dev_pr_res_holder) { 2506 if (!dev->dev_pr_res_holder) {
2507 pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" 2507 pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
2508 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2508 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2509 tfo->get_fabric_name(), (explicit) ? "explicit" : 2509 tfo->fabric_name, (explicit) ? "explicit" :
2510 "implicit", core_scsi3_pr_dump_type(pr_res_type), 2510 "implicit", core_scsi3_pr_dump_type(pr_res_type),
2511 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2511 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2512 } 2512 }
2513 pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", 2513 pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
2514 tfo->get_fabric_name(), se_nacl->initiatorname, 2514 tfo->fabric_name, se_nacl->initiatorname,
2515 i_buf); 2515 i_buf);
2516 /* 2516 /*
2517 * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE 2517 * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE
@@ -2609,9 +2609,9 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
2609 " reservation from [%s]: %s with different TYPE " 2609 " reservation from [%s]: %s with different TYPE "
2610 "and/or SCOPE while reservation already held by" 2610 "and/or SCOPE while reservation already held by"
2611 " [%s]: %s, returning RESERVATION_CONFLICT\n", 2611 " [%s]: %s, returning RESERVATION_CONFLICT\n",
2612 cmd->se_tfo->get_fabric_name(), 2612 cmd->se_tfo->fabric_name,
2613 se_sess->se_node_acl->initiatorname, 2613 se_sess->se_node_acl->initiatorname,
2614 pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 2614 pr_res_nacl->se_tpg->se_tpg_tfo->fabric_name,
2615 pr_res_holder->pr_reg_nacl->initiatorname); 2615 pr_res_holder->pr_reg_nacl->initiatorname);
2616 2616
2617 spin_unlock(&dev->dev_reservation_lock); 2617 spin_unlock(&dev->dev_reservation_lock);
@@ -2752,7 +2752,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
2752 spin_unlock(&pr_tmpl->registration_lock); 2752 spin_unlock(&pr_tmpl->registration_lock);
2753 2753
2754 pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n", 2754 pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n",
2755 cmd->se_tfo->get_fabric_name()); 2755 cmd->se_tfo->fabric_name);
2756 2756
2757 core_scsi3_update_and_write_aptpl(cmd->se_dev, false); 2757 core_scsi3_update_and_write_aptpl(cmd->se_dev, false);
2758 2758
@@ -2791,11 +2791,11 @@ static void __core_scsi3_complete_pro_preempt(
2791 2791
2792 pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new" 2792 pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new"
2793 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2793 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2794 tfo->get_fabric_name(), (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "", 2794 tfo->fabric_name, (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
2795 core_scsi3_pr_dump_type(type), 2795 core_scsi3_pr_dump_type(type),
2796 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2796 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2797 pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", 2797 pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
2798 tfo->get_fabric_name(), (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "", 2798 tfo->fabric_name, (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
2799 nacl->initiatorname, i_buf); 2799 nacl->initiatorname, i_buf);
2800 /* 2800 /*
2801 * For PREEMPT_AND_ABORT, add the preempting reservation's 2801 * For PREEMPT_AND_ABORT, add the preempting reservation's
@@ -3282,7 +3282,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3282 " proto_ident: 0x%02x does not match ident: 0x%02x" 3282 " proto_ident: 0x%02x does not match ident: 0x%02x"
3283 " from fabric: %s\n", proto_ident, 3283 " from fabric: %s\n", proto_ident,
3284 dest_se_tpg->proto_id, 3284 dest_se_tpg->proto_id,
3285 dest_tf_ops->get_fabric_name()); 3285 dest_tf_ops->fabric_name);
3286 ret = TCM_INVALID_PARAMETER_LIST; 3286 ret = TCM_INVALID_PARAMETER_LIST;
3287 goto out; 3287 goto out;
3288 } 3288 }
@@ -3299,7 +3299,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3299 buf = NULL; 3299 buf = NULL;
3300 3300
3301 pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s" 3301 pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
3302 " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ? 3302 " %s\n", dest_tf_ops->fabric_name, (iport_ptr != NULL) ?
3303 "port" : "device", initiator_str, (iport_ptr != NULL) ? 3303 "port" : "device", initiator_str, (iport_ptr != NULL) ?
3304 iport_ptr : ""); 3304 iport_ptr : "");
3305 /* 3305 /*
@@ -3344,7 +3344,7 @@ after_iport_check:
3344 3344
3345 if (!dest_node_acl) { 3345 if (!dest_node_acl) {
3346 pr_err("Unable to locate %s dest_node_acl for" 3346 pr_err("Unable to locate %s dest_node_acl for"
3347 " TransportID%s\n", dest_tf_ops->get_fabric_name(), 3347 " TransportID%s\n", dest_tf_ops->fabric_name,
3348 initiator_str); 3348 initiator_str);
3349 ret = TCM_INVALID_PARAMETER_LIST; 3349 ret = TCM_INVALID_PARAMETER_LIST;
3350 goto out; 3350 goto out;
@@ -3360,7 +3360,7 @@ after_iport_check:
3360 } 3360 }
3361 3361
3362 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" 3362 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
3363 " %s from TransportID\n", dest_tf_ops->get_fabric_name(), 3363 " %s from TransportID\n", dest_tf_ops->fabric_name,
3364 dest_node_acl->initiatorname); 3364 dest_node_acl->initiatorname);
3365 3365
3366 /* 3366 /*
@@ -3370,7 +3370,7 @@ after_iport_check:
3370 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi); 3370 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
3371 if (!dest_se_deve) { 3371 if (!dest_se_deve) {
3372 pr_err("Unable to locate %s dest_se_deve from RTPI:" 3372 pr_err("Unable to locate %s dest_se_deve from RTPI:"
3373 " %hu\n", dest_tf_ops->get_fabric_name(), rtpi); 3373 " %hu\n", dest_tf_ops->fabric_name, rtpi);
3374 ret = TCM_INVALID_PARAMETER_LIST; 3374 ret = TCM_INVALID_PARAMETER_LIST;
3375 goto out; 3375 goto out;
3376 } 3376 }
@@ -3385,7 +3385,7 @@ after_iport_check:
3385 3385
3386 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" 3386 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
3387 " ACL for dest_se_deve->mapped_lun: %llu\n", 3387 " ACL for dest_se_deve->mapped_lun: %llu\n",
3388 dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, 3388 dest_tf_ops->fabric_name, dest_node_acl->initiatorname,
3389 dest_se_deve->mapped_lun); 3389 dest_se_deve->mapped_lun);
3390 3390
3391 /* 3391 /*
@@ -3501,13 +3501,13 @@ after_iport_check:
3501 3501
3502 pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE" 3502 pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
3503 " created new reservation holder TYPE: %s on object RTPI:" 3503 " created new reservation holder TYPE: %s on object RTPI:"
3504 " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(), 3504 " %hu PRGeneration: 0x%08x\n", dest_tf_ops->fabric_name,
3505 core_scsi3_pr_dump_type(type), rtpi, 3505 core_scsi3_pr_dump_type(type), rtpi,
3506 dest_pr_reg->pr_res_generation); 3506 dest_pr_reg->pr_res_generation);
3507 pr_debug("SPC-3 PR Successfully moved reservation from" 3507 pr_debug("SPC-3 PR Successfully moved reservation from"
3508 " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n", 3508 " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
3509 tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname, 3509 tf_ops->fabric_name, pr_reg_nacl->initiatorname,
3510 i_buf, dest_tf_ops->get_fabric_name(), 3510 i_buf, dest_tf_ops->fabric_name,
3511 dest_node_acl->initiatorname, (iport_ptr != NULL) ? 3511 dest_node_acl->initiatorname, (iport_ptr != NULL) ?
3512 iport_ptr : ""); 3512 iport_ptr : "");
3513 /* 3513 /*
@@ -4095,6 +4095,8 @@ target_check_reservation(struct se_cmd *cmd)
4095 return 0; 4095 return 0;
4096 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) 4096 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
4097 return 0; 4097 return 0;
4098 if (!dev->dev_attrib.emulate_pr)
4099 return 0;
4098 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 4100 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
4099 return 0; 4101 return 0;
4100 4102
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index c062d363dce3..b5388a106567 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -179,20 +179,20 @@ out_free:
179static void 179static void
180pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn) 180pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
181{ 181{
182 unsigned char *buf;
183
184 if (sdev->inquiry_len < INQUIRY_LEN) 182 if (sdev->inquiry_len < INQUIRY_LEN)
185 return; 183 return;
186
187 buf = sdev->inquiry;
188 if (!buf)
189 return;
190 /* 184 /*
191 * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev() 185 * Use sdev->inquiry data from drivers/scsi/scsi_scan.c:scsi_add_lun()
192 */ 186 */
193 memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor)); 187 BUILD_BUG_ON(sizeof(wwn->vendor) != INQUIRY_VENDOR_LEN + 1);
194 memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model)); 188 snprintf(wwn->vendor, sizeof(wwn->vendor),
195 memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision)); 189 "%." __stringify(INQUIRY_VENDOR_LEN) "s", sdev->vendor);
190 BUILD_BUG_ON(sizeof(wwn->model) != INQUIRY_MODEL_LEN + 1);
191 snprintf(wwn->model, sizeof(wwn->model),
192 "%." __stringify(INQUIRY_MODEL_LEN) "s", sdev->model);
193 BUILD_BUG_ON(sizeof(wwn->revision) != INQUIRY_REVISION_LEN + 1);
194 snprintf(wwn->revision, sizeof(wwn->revision),
195 "%." __stringify(INQUIRY_REVISION_LEN) "s", sdev->rev);
196} 196}
197 197
198static int 198static int
@@ -811,7 +811,6 @@ static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
811 struct scsi_device *sd = pdv->pdv_sd; 811 struct scsi_device *sd = pdv->pdv_sd;
812 unsigned char host_id[16]; 812 unsigned char host_id[16];
813 ssize_t bl; 813 ssize_t bl;
814 int i;
815 814
816 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) 815 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
817 snprintf(host_id, 16, "%d", pdv->pdv_host_id); 816 snprintf(host_id, 16, "%d", pdv->pdv_host_id);
@@ -824,29 +823,12 @@ static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
824 host_id); 823 host_id);
825 824
826 if (sd) { 825 if (sd) {
827 bl += sprintf(b + bl, " "); 826 bl += sprintf(b + bl, " Vendor: %."
828 bl += sprintf(b + bl, "Vendor: "); 827 __stringify(INQUIRY_VENDOR_LEN) "s", sd->vendor);
829 for (i = 0; i < 8; i++) { 828 bl += sprintf(b + bl, " Model: %."
830 if (ISPRINT(sd->vendor[i])) /* printable character? */ 829 __stringify(INQUIRY_MODEL_LEN) "s", sd->model);
831 bl += sprintf(b + bl, "%c", sd->vendor[i]); 830 bl += sprintf(b + bl, " Rev: %."
832 else 831 __stringify(INQUIRY_REVISION_LEN) "s\n", sd->rev);
833 bl += sprintf(b + bl, " ");
834 }
835 bl += sprintf(b + bl, " Model: ");
836 for (i = 0; i < 16; i++) {
837 if (ISPRINT(sd->model[i])) /* printable character ? */
838 bl += sprintf(b + bl, "%c", sd->model[i]);
839 else
840 bl += sprintf(b + bl, " ");
841 }
842 bl += sprintf(b + bl, " Rev: ");
843 for (i = 0; i < 4; i++) {
844 if (ISPRINT(sd->rev[i])) /* printable character ? */
845 bl += sprintf(b + bl, "%c", sd->rev[i]);
846 else
847 bl += sprintf(b + bl, " ");
848 }
849 bl += sprintf(b + bl, "\n");
850 } 832 }
851 return bl; 833 return bl;
852} 834}
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index f459118bc11b..47094ae01c04 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -108,12 +108,19 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
108 108
109 buf[7] = 0x2; /* CmdQue=1 */ 109 buf[7] = 0x2; /* CmdQue=1 */
110 110
111 memcpy(&buf[8], "LIO-ORG ", 8); 111 /*
112 memset(&buf[16], 0x20, 16); 112 * ASCII data fields described as being left-aligned shall have any
113 * unused bytes at the end of the field (i.e., highest offset) and the
114 * unused bytes shall be filled with ASCII space characters (20h).
115 */
116 memset(&buf[8], 0x20,
117 INQUIRY_VENDOR_LEN + INQUIRY_MODEL_LEN + INQUIRY_REVISION_LEN);
118 memcpy(&buf[8], dev->t10_wwn.vendor,
119 strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN));
113 memcpy(&buf[16], dev->t10_wwn.model, 120 memcpy(&buf[16], dev->t10_wwn.model,
114 min_t(size_t, strlen(dev->t10_wwn.model), 16)); 121 strnlen(dev->t10_wwn.model, INQUIRY_MODEL_LEN));
115 memcpy(&buf[32], dev->t10_wwn.revision, 122 memcpy(&buf[32], dev->t10_wwn.revision,
116 min_t(size_t, strlen(dev->t10_wwn.revision), 4)); 123 strnlen(dev->t10_wwn.revision, INQUIRY_REVISION_LEN));
117 buf[4] = 31; /* Set additional length to 31 */ 124 buf[4] = 31; /* Set additional length to 31 */
118 125
119 return 0; 126 return 0;
@@ -251,7 +258,10 @@ check_t10_vend_desc:
251 buf[off] = 0x2; /* ASCII */ 258 buf[off] = 0x2; /* ASCII */
252 buf[off+1] = 0x1; /* T10 Vendor ID */ 259 buf[off+1] = 0x1; /* T10 Vendor ID */
253 buf[off+2] = 0x0; 260 buf[off+2] = 0x0;
254 memcpy(&buf[off+4], "LIO-ORG", 8); 261 /* left align Vendor ID and pad with spaces */
262 memset(&buf[off+4], 0x20, INQUIRY_VENDOR_LEN);
263 memcpy(&buf[off+4], dev->t10_wwn.vendor,
264 strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN));
255 /* Extra Byte for NULL Terminator */ 265 /* Extra Byte for NULL Terminator */
256 id_len++; 266 id_len++;
257 /* Identifier Length */ 267 /* Identifier Length */
@@ -1281,6 +1291,14 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1281 struct se_device *dev = cmd->se_dev; 1291 struct se_device *dev = cmd->se_dev;
1282 unsigned char *cdb = cmd->t_task_cdb; 1292 unsigned char *cdb = cmd->t_task_cdb;
1283 1293
1294 if (!dev->dev_attrib.emulate_pr &&
1295 ((cdb[0] == PERSISTENT_RESERVE_IN) ||
1296 (cdb[0] == PERSISTENT_RESERVE_OUT) ||
1297 (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
1298 (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
1299 return TCM_UNSUPPORTED_SCSI_OPCODE;
1300 }
1301
1284 switch (cdb[0]) { 1302 switch (cdb[0]) {
1285 case MODE_SELECT: 1303 case MODE_SELECT:
1286 *size = cdb[4]; 1304 *size = cdb[4];
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index f0db91ebd735..8d9ceedfd455 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -246,43 +246,25 @@ static ssize_t target_stat_lu_lu_name_show(struct config_item *item, char *page)
246static ssize_t target_stat_lu_vend_show(struct config_item *item, char *page) 246static ssize_t target_stat_lu_vend_show(struct config_item *item, char *page)
247{ 247{
248 struct se_device *dev = to_stat_lu_dev(item); 248 struct se_device *dev = to_stat_lu_dev(item);
249 int i;
250 char str[sizeof(dev->t10_wwn.vendor)+1];
251 249
252 /* scsiLuVendorId */ 250 return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_VENDOR_LEN)
253 for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++) 251 "s\n", dev->t10_wwn.vendor);
254 str[i] = ISPRINT(dev->t10_wwn.vendor[i]) ?
255 dev->t10_wwn.vendor[i] : ' ';
256 str[i] = '\0';
257 return snprintf(page, PAGE_SIZE, "%s\n", str);
258} 252}
259 253
260static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page) 254static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page)
261{ 255{
262 struct se_device *dev = to_stat_lu_dev(item); 256 struct se_device *dev = to_stat_lu_dev(item);
263 int i;
264 char str[sizeof(dev->t10_wwn.model)+1];
265 257
266 /* scsiLuProductId */ 258 return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_MODEL_LEN)
267 for (i = 0; i < sizeof(dev->t10_wwn.model); i++) 259 "s\n", dev->t10_wwn.model);
268 str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
269 dev->t10_wwn.model[i] : ' ';
270 str[i] = '\0';
271 return snprintf(page, PAGE_SIZE, "%s\n", str);
272} 260}
273 261
274static ssize_t target_stat_lu_rev_show(struct config_item *item, char *page) 262static ssize_t target_stat_lu_rev_show(struct config_item *item, char *page)
275{ 263{
276 struct se_device *dev = to_stat_lu_dev(item); 264 struct se_device *dev = to_stat_lu_dev(item);
277 int i;
278 char str[sizeof(dev->t10_wwn.revision)+1];
279 265
280 /* scsiLuRevisionId */ 266 return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_REVISION_LEN)
281 for (i = 0; i < sizeof(dev->t10_wwn.revision); i++) 267 "s\n", dev->t10_wwn.revision);
282 str[i] = ISPRINT(dev->t10_wwn.revision[i]) ?
283 dev->t10_wwn.revision[i] : ' ';
284 str[i] = '\0';
285 return snprintf(page, PAGE_SIZE, "%s\n", str);
286} 268}
287 269
288static ssize_t target_stat_lu_dev_type_show(struct config_item *item, char *page) 270static ssize_t target_stat_lu_dev_type_show(struct config_item *item, char *page)
@@ -612,7 +594,7 @@ static ssize_t target_stat_tgt_port_name_show(struct config_item *item,
612 dev = rcu_dereference(lun->lun_se_dev); 594 dev = rcu_dereference(lun->lun_se_dev);
613 if (dev) 595 if (dev)
614 ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", 596 ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
615 tpg->se_tpg_tfo->get_fabric_name(), 597 tpg->se_tpg_tfo->fabric_name,
616 lun->lun_rtpi); 598 lun->lun_rtpi);
617 rcu_read_unlock(); 599 rcu_read_unlock();
618 return ret; 600 return ret;
@@ -767,7 +749,7 @@ static ssize_t target_stat_transport_device_show(struct config_item *item,
767 if (dev) { 749 if (dev) {
768 /* scsiTransportType */ 750 /* scsiTransportType */
769 ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", 751 ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
770 tpg->se_tpg_tfo->get_fabric_name()); 752 tpg->se_tpg_tfo->fabric_name);
771 } 753 }
772 rcu_read_unlock(); 754 rcu_read_unlock();
773 return ret; 755 return ret;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 6d1179a7f043..ad0061e09d4c 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -163,18 +163,23 @@ void core_tmr_abort_task(
163 continue; 163 continue;
164 164
165 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", 165 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
166 se_cmd->se_tfo->get_fabric_name(), ref_tag); 166 se_cmd->se_tfo->fabric_name, ref_tag);
167 167
168 if (!__target_check_io_state(se_cmd, se_sess, 0)) 168 if (!__target_check_io_state(se_cmd, se_sess,
169 dev->dev_attrib.emulate_tas))
169 continue; 170 continue;
170 171
171 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 172 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
172 173
173 cancel_work_sync(&se_cmd->work); 174 /*
174 transport_wait_for_tasks(se_cmd); 175 * Ensure that this ABORT request is visible to the LU RESET
176 * code.
177 */
178 if (!tmr->tmr_dev)
179 WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd,
180 se_cmd->orig_fe_lun) < 0);
175 181
176 if (!transport_cmd_finish_abort(se_cmd)) 182 target_put_cmd_and_wait(se_cmd);
177 target_put_sess_cmd(se_cmd);
178 183
179 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" 184 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
180 " ref_tag: %llu\n", ref_tag); 185 " ref_tag: %llu\n", ref_tag);
@@ -268,14 +273,28 @@ static void core_tmr_drain_tmr_list(
268 (preempt_and_abort_list) ? "Preempt" : "", tmr_p, 273 (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
269 tmr_p->function, tmr_p->response, cmd->t_state); 274 tmr_p->function, tmr_p->response, cmd->t_state);
270 275
271 cancel_work_sync(&cmd->work); 276 target_put_cmd_and_wait(cmd);
272 transport_wait_for_tasks(cmd);
273
274 if (!transport_cmd_finish_abort(cmd))
275 target_put_sess_cmd(cmd);
276 } 277 }
277} 278}
278 279
280/**
281 * core_tmr_drain_state_list() - abort SCSI commands associated with a device
282 *
283 * @dev: Device for which to abort outstanding SCSI commands.
284 * @prout_cmd: Pointer to the SCSI PREEMPT AND ABORT if this function is called
285 * to realize the PREEMPT AND ABORT functionality.
286 * @tmr_sess: Session through which the LUN RESET has been received.
287 * @tas: Task Aborted Status (TAS) bit from the SCSI control mode page.
288 * A quote from SPC-4, paragraph "7.5.10 Control mode page":
289 * "A task aborted status (TAS) bit set to zero specifies that
290 * aborted commands shall be terminated by the device server
291 * without any response to the application client. A TAS bit set
292 * to one specifies that commands aborted by the actions of an I_T
293 * nexus other than the I_T nexus on which the command was
294 * received shall be completed with TASK ABORTED status."
295 * @preempt_and_abort_list: For the PREEMPT AND ABORT functionality, a list
296 * with registrations that will be preempted.
297 */
279static void core_tmr_drain_state_list( 298static void core_tmr_drain_state_list(
280 struct se_device *dev, 299 struct se_device *dev,
281 struct se_cmd *prout_cmd, 300 struct se_cmd *prout_cmd,
@@ -350,18 +369,7 @@ static void core_tmr_drain_state_list(
350 cmd->tag, (preempt_and_abort_list) ? "preempt" : "", 369 cmd->tag, (preempt_and_abort_list) ? "preempt" : "",
351 cmd->pr_res_key); 370 cmd->pr_res_key);
352 371
353 /* 372 target_put_cmd_and_wait(cmd);
354 * If the command may be queued onto a workqueue cancel it now.
355 *
356 * This is equivalent to removal from the execute queue in the
357 * loop above, but we do it down here given that
358 * cancel_work_sync may block.
359 */
360 cancel_work_sync(&cmd->work);
361 transport_wait_for_tasks(cmd);
362
363 if (!transport_cmd_finish_abort(cmd))
364 target_put_sess_cmd(cmd);
365 } 373 }
366} 374}
367 375
@@ -398,7 +406,7 @@ int core_tmr_lun_reset(
398 if (tmr_nacl && tmr_tpg) { 406 if (tmr_nacl && tmr_tpg) {
399 pr_debug("LUN_RESET: TMR caller fabric: %s" 407 pr_debug("LUN_RESET: TMR caller fabric: %s"
400 " initiator port %s\n", 408 " initiator port %s\n",
401 tmr_tpg->se_tpg_tfo->get_fabric_name(), 409 tmr_tpg->se_tpg_tfo->fabric_name,
402 tmr_nacl->initiatorname); 410 tmr_nacl->initiatorname);
403 } 411 }
404 } 412 }
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 02e8a5d86658..e2ace1059437 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -151,7 +151,7 @@ void core_tpg_add_node_to_devs(
151 151
152 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s" 152 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
153 " access for LUN in Demo Mode\n", 153 " access for LUN in Demo Mode\n",
154 tpg->se_tpg_tfo->get_fabric_name(), 154 tpg->se_tpg_tfo->fabric_name,
155 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 155 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
156 lun_access_ro ? "READ-ONLY" : "READ-WRITE"); 156 lun_access_ro ? "READ-ONLY" : "READ-WRITE");
157 157
@@ -176,7 +176,7 @@ target_set_nacl_queue_depth(struct se_portal_group *tpg,
176 176
177 if (!acl->queue_depth) { 177 if (!acl->queue_depth) {
178 pr_warn("Queue depth for %s Initiator Node: %s is 0," 178 pr_warn("Queue depth for %s Initiator Node: %s is 0,"
179 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), 179 "defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name,
180 acl->initiatorname); 180 acl->initiatorname);
181 acl->queue_depth = 1; 181 acl->queue_depth = 1;
182 } 182 }
@@ -227,11 +227,11 @@ static void target_add_node_acl(struct se_node_acl *acl)
227 227
228 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s" 228 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
229 " Initiator Node: %s\n", 229 " Initiator Node: %s\n",
230 tpg->se_tpg_tfo->get_fabric_name(), 230 tpg->se_tpg_tfo->fabric_name,
231 tpg->se_tpg_tfo->tpg_get_tag(tpg), 231 tpg->se_tpg_tfo->tpg_get_tag(tpg),
232 acl->dynamic_node_acl ? "DYNAMIC" : "", 232 acl->dynamic_node_acl ? "DYNAMIC" : "",
233 acl->queue_depth, 233 acl->queue_depth,
234 tpg->se_tpg_tfo->get_fabric_name(), 234 tpg->se_tpg_tfo->fabric_name,
235 acl->initiatorname); 235 acl->initiatorname);
236} 236}
237 237
@@ -313,7 +313,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
313 if (acl->dynamic_node_acl) { 313 if (acl->dynamic_node_acl) {
314 acl->dynamic_node_acl = 0; 314 acl->dynamic_node_acl = 0;
315 pr_debug("%s_TPG[%u] - Replacing dynamic ACL" 315 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
316 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 316 " for %s\n", tpg->se_tpg_tfo->fabric_name,
317 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); 317 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
318 mutex_unlock(&tpg->acl_node_mutex); 318 mutex_unlock(&tpg->acl_node_mutex);
319 return acl; 319 return acl;
@@ -321,7 +321,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
321 321
322 pr_err("ACL entry for %s Initiator" 322 pr_err("ACL entry for %s Initiator"
323 " Node %s already exists for TPG %u, ignoring" 323 " Node %s already exists for TPG %u, ignoring"
324 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 324 " request.\n", tpg->se_tpg_tfo->fabric_name,
325 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 325 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
326 mutex_unlock(&tpg->acl_node_mutex); 326 mutex_unlock(&tpg->acl_node_mutex);
327 return ERR_PTR(-EEXIST); 327 return ERR_PTR(-EEXIST);
@@ -380,9 +380,9 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
380 core_free_device_list_for_node(acl, tpg); 380 core_free_device_list_for_node(acl, tpg);
381 381
382 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" 382 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
383 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 383 " Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name,
384 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 384 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
385 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); 385 tpg->se_tpg_tfo->fabric_name, acl->initiatorname);
386 386
387 kfree(acl); 387 kfree(acl);
388} 388}
@@ -418,7 +418,7 @@ int core_tpg_set_initiator_node_queue_depth(
418 418
419 pr_debug("Successfully changed queue depth to: %d for Initiator" 419 pr_debug("Successfully changed queue depth to: %d for Initiator"
420 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth, 420 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
421 acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(), 421 acl->initiatorname, tpg->se_tpg_tfo->fabric_name,
422 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 422 tpg->se_tpg_tfo->tpg_get_tag(tpg));
423 423
424 return 0; 424 return 0;
@@ -512,7 +512,7 @@ int core_tpg_register(
512 spin_unlock_bh(&tpg_lock); 512 spin_unlock_bh(&tpg_lock);
513 513
514 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, " 514 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
515 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(), 515 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
516 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ? 516 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
517 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL, 517 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
518 se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); 518 se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
@@ -528,7 +528,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
528 LIST_HEAD(node_list); 528 LIST_HEAD(node_list);
529 529
530 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, " 530 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
531 "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(), 531 "Proto: %d, Portal Tag: %u\n", tfo->fabric_name,
532 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL, 532 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
533 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg)); 533 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
534 534
@@ -577,7 +577,6 @@ struct se_lun *core_tpg_alloc_lun(
577 } 577 }
578 lun->unpacked_lun = unpacked_lun; 578 lun->unpacked_lun = unpacked_lun;
579 atomic_set(&lun->lun_acl_count, 0); 579 atomic_set(&lun->lun_acl_count, 0);
580 init_completion(&lun->lun_ref_comp);
581 init_completion(&lun->lun_shutdown_comp); 580 init_completion(&lun->lun_shutdown_comp);
582 INIT_LIST_HEAD(&lun->lun_deve_list); 581 INIT_LIST_HEAD(&lun->lun_deve_list);
583 INIT_LIST_HEAD(&lun->lun_dev_link); 582 INIT_LIST_HEAD(&lun->lun_dev_link);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 2cfd61d62e97..ef9e75b359d4 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -224,19 +224,28 @@ void transport_subsystem_check_init(void)
224 sub_api_initialized = 1; 224 sub_api_initialized = 1;
225} 225}
226 226
227static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
228{
229 struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
230
231 wake_up(&sess->cmd_list_wq);
232}
233
227/** 234/**
228 * transport_init_session - initialize a session object 235 * transport_init_session - initialize a session object
229 * @se_sess: Session object pointer. 236 * @se_sess: Session object pointer.
230 * 237 *
231 * The caller must have zero-initialized @se_sess before calling this function. 238 * The caller must have zero-initialized @se_sess before calling this function.
232 */ 239 */
233void transport_init_session(struct se_session *se_sess) 240int transport_init_session(struct se_session *se_sess)
234{ 241{
235 INIT_LIST_HEAD(&se_sess->sess_list); 242 INIT_LIST_HEAD(&se_sess->sess_list);
236 INIT_LIST_HEAD(&se_sess->sess_acl_list); 243 INIT_LIST_HEAD(&se_sess->sess_acl_list);
237 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 244 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
238 spin_lock_init(&se_sess->sess_cmd_lock); 245 spin_lock_init(&se_sess->sess_cmd_lock);
239 init_waitqueue_head(&se_sess->cmd_list_wq); 246 init_waitqueue_head(&se_sess->cmd_list_wq);
247 return percpu_ref_init(&se_sess->cmd_count,
248 target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
240} 249}
241EXPORT_SYMBOL(transport_init_session); 250EXPORT_SYMBOL(transport_init_session);
242 251
@@ -247,6 +256,7 @@ EXPORT_SYMBOL(transport_init_session);
247struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) 256struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
248{ 257{
249 struct se_session *se_sess; 258 struct se_session *se_sess;
259 int ret;
250 260
251 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 261 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
252 if (!se_sess) { 262 if (!se_sess) {
@@ -254,7 +264,11 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
254 " se_sess_cache\n"); 264 " se_sess_cache\n");
255 return ERR_PTR(-ENOMEM); 265 return ERR_PTR(-ENOMEM);
256 } 266 }
257 transport_init_session(se_sess); 267 ret = transport_init_session(se_sess);
268 if (ret < 0) {
269 kmem_cache_free(se_sess_cache, se_sess);
270 return ERR_PTR(ret);
271 }
258 se_sess->sup_prot_ops = sup_prot_ops; 272 se_sess->sup_prot_ops = sup_prot_ops;
259 273
260 return se_sess; 274 return se_sess;
@@ -273,14 +287,11 @@ int transport_alloc_session_tags(struct se_session *se_sess,
273{ 287{
274 int rc; 288 int rc;
275 289
276 se_sess->sess_cmd_map = kcalloc(tag_size, tag_num, 290 se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
277 GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL); 291 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
278 if (!se_sess->sess_cmd_map) { 292 if (!se_sess->sess_cmd_map) {
279 se_sess->sess_cmd_map = vzalloc(array_size(tag_size, tag_num)); 293 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
280 if (!se_sess->sess_cmd_map) { 294 return -ENOMEM;
281 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
282 return -ENOMEM;
283 }
284 } 295 }
285 296
286 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1, 297 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
@@ -397,7 +408,7 @@ void __transport_register_session(
397 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 408 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
398 409
399 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 410 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
400 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 411 se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr);
401} 412}
402EXPORT_SYMBOL(__transport_register_session); 413EXPORT_SYMBOL(__transport_register_session);
403 414
@@ -581,6 +592,7 @@ void transport_free_session(struct se_session *se_sess)
581 sbitmap_queue_free(&se_sess->sess_tag_pool); 592 sbitmap_queue_free(&se_sess->sess_tag_pool);
582 kvfree(se_sess->sess_cmd_map); 593 kvfree(se_sess->sess_cmd_map);
583 } 594 }
595 percpu_ref_exit(&se_sess->cmd_count);
584 kmem_cache_free(se_sess_cache, se_sess); 596 kmem_cache_free(se_sess_cache, se_sess);
585} 597}
586EXPORT_SYMBOL(transport_free_session); 598EXPORT_SYMBOL(transport_free_session);
@@ -602,7 +614,7 @@ void transport_deregister_session(struct se_session *se_sess)
602 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 614 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
603 615
604 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 616 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
605 se_tpg->se_tpg_tfo->get_fabric_name()); 617 se_tpg->se_tpg_tfo->fabric_name);
606 /* 618 /*
607 * If last kref is dropping now for an explicit NodeACL, awake sleeping 619 * If last kref is dropping now for an explicit NodeACL, awake sleeping
608 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 620 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
@@ -695,32 +707,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
695 percpu_ref_put(&lun->lun_ref); 707 percpu_ref_put(&lun->lun_ref);
696} 708}
697 709
698int transport_cmd_finish_abort(struct se_cmd *cmd)
699{
700 bool send_tas = cmd->transport_state & CMD_T_TAS;
701 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
702 int ret = 0;
703
704 if (send_tas)
705 transport_send_task_abort(cmd);
706
707 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
708 transport_lun_remove_cmd(cmd);
709 /*
710 * Allow the fabric driver to unmap any resources before
711 * releasing the descriptor via TFO->release_cmd()
712 */
713 if (!send_tas)
714 cmd->se_tfo->aborted_task(cmd);
715
716 if (transport_cmd_check_stop_to_fabric(cmd))
717 return 1;
718 if (!send_tas && ack_kref)
719 ret = target_put_sess_cmd(cmd);
720
721 return ret;
722}
723
724static void target_complete_failure_work(struct work_struct *work) 710static void target_complete_failure_work(struct work_struct *work)
725{ 711{
726 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 712 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -770,12 +756,88 @@ void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
770} 756}
771EXPORT_SYMBOL(transport_copy_sense_to_cmd); 757EXPORT_SYMBOL(transport_copy_sense_to_cmd);
772 758
759static void target_handle_abort(struct se_cmd *cmd)
760{
761 bool tas = cmd->transport_state & CMD_T_TAS;
762 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
763 int ret;
764
765 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
766
767 if (tas) {
768 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
769 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
770 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
771 cmd->t_task_cdb[0], cmd->tag);
772 trace_target_cmd_complete(cmd);
773 ret = cmd->se_tfo->queue_status(cmd);
774 if (ret) {
775 transport_handle_queue_full(cmd, cmd->se_dev,
776 ret, false);
777 return;
778 }
779 } else {
780 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
781 cmd->se_tfo->queue_tm_rsp(cmd);
782 }
783 } else {
784 /*
785 * Allow the fabric driver to unmap any resources before
786 * releasing the descriptor via TFO->release_cmd().
787 */
788 cmd->se_tfo->aborted_task(cmd);
789 if (ack_kref)
790 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
791 /*
792 * To do: establish a unit attention condition on the I_T
793 * nexus associated with cmd. See also the paragraph "Aborting
794 * commands" in SAM.
795 */
796 }
797
798 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
799
800 transport_lun_remove_cmd(cmd);
801
802 transport_cmd_check_stop_to_fabric(cmd);
803}
804
805static void target_abort_work(struct work_struct *work)
806{
807 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
808
809 target_handle_abort(cmd);
810}
811
812static bool target_cmd_interrupted(struct se_cmd *cmd)
813{
814 int post_ret;
815
816 if (cmd->transport_state & CMD_T_ABORTED) {
817 if (cmd->transport_complete_callback)
818 cmd->transport_complete_callback(cmd, false, &post_ret);
819 INIT_WORK(&cmd->work, target_abort_work);
820 queue_work(target_completion_wq, &cmd->work);
821 return true;
822 } else if (cmd->transport_state & CMD_T_STOP) {
823 if (cmd->transport_complete_callback)
824 cmd->transport_complete_callback(cmd, false, &post_ret);
825 complete_all(&cmd->t_transport_stop_comp);
826 return true;
827 }
828
829 return false;
830}
831
832/* May be called from interrupt context so must not sleep. */
773void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 833void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
774{ 834{
775 struct se_device *dev = cmd->se_dev;
776 int success; 835 int success;
777 unsigned long flags; 836 unsigned long flags;
778 837
838 if (target_cmd_interrupted(cmd))
839 return;
840
779 cmd->scsi_status = scsi_status; 841 cmd->scsi_status = scsi_status;
780 842
781 spin_lock_irqsave(&cmd->t_state_lock, flags); 843 spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -791,34 +853,12 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
791 break; 853 break;
792 } 854 }
793 855
794 /*
795 * Check for case where an explicit ABORT_TASK has been received
796 * and transport_wait_for_tasks() will be waiting for completion..
797 */
798 if (cmd->transport_state & CMD_T_ABORTED ||
799 cmd->transport_state & CMD_T_STOP) {
800 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
801 /*
802 * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
803 * release se_device->caw_sem obtained by sbc_compare_and_write()
804 * since target_complete_ok_work() or target_complete_failure_work()
805 * won't be called to invoke the normal CAW completion callbacks.
806 */
807 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
808 up(&dev->caw_sem);
809 }
810 complete_all(&cmd->t_transport_stop_comp);
811 return;
812 } else if (!success) {
813 INIT_WORK(&cmd->work, target_complete_failure_work);
814 } else {
815 INIT_WORK(&cmd->work, target_complete_ok_work);
816 }
817
818 cmd->t_state = TRANSPORT_COMPLETE; 856 cmd->t_state = TRANSPORT_COMPLETE;
819 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 857 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
820 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 858 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
821 859
860 INIT_WORK(&cmd->work, success ? target_complete_ok_work :
861 target_complete_failure_work);
822 if (cmd->se_cmd_flags & SCF_USE_CPUID) 862 if (cmd->se_cmd_flags & SCF_USE_CPUID)
823 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 863 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
824 else 864 else
@@ -880,7 +920,7 @@ void target_qf_do_work(struct work_struct *work)
880 atomic_dec_mb(&dev->dev_qf_count); 920 atomic_dec_mb(&dev->dev_qf_count);
881 921
882 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 922 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
883 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 923 " context: %s\n", cmd->se_tfo->fabric_name, cmd,
884 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 924 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
885 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 925 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
886 : "UNKNOWN"); 926 : "UNKNOWN");
@@ -1244,7 +1284,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1244 } else if (size != cmd->data_length) { 1284 } else if (size != cmd->data_length) {
1245 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" 1285 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
1246 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1286 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1247 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1287 " 0x%02x\n", cmd->se_tfo->fabric_name,
1248 cmd->data_length, size, cmd->t_task_cdb[0]); 1288 cmd->data_length, size, cmd->t_task_cdb[0]);
1249 1289
1250 if (cmd->data_direction == DMA_TO_DEVICE) { 1290 if (cmd->data_direction == DMA_TO_DEVICE) {
@@ -1316,7 +1356,8 @@ void transport_init_se_cmd(
1316 INIT_LIST_HEAD(&cmd->se_cmd_list); 1356 INIT_LIST_HEAD(&cmd->se_cmd_list);
1317 INIT_LIST_HEAD(&cmd->state_list); 1357 INIT_LIST_HEAD(&cmd->state_list);
1318 init_completion(&cmd->t_transport_stop_comp); 1358 init_completion(&cmd->t_transport_stop_comp);
1319 cmd->compl = NULL; 1359 cmd->free_compl = NULL;
1360 cmd->abrt_compl = NULL;
1320 spin_lock_init(&cmd->t_state_lock); 1361 spin_lock_init(&cmd->t_state_lock);
1321 INIT_WORK(&cmd->work, NULL); 1362 INIT_WORK(&cmd->work, NULL);
1322 kref_init(&cmd->cmd_kref); 1363 kref_init(&cmd->cmd_kref);
@@ -1396,7 +1437,7 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1396 ret = dev->transport->parse_cdb(cmd); 1437 ret = dev->transport->parse_cdb(cmd);
1397 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1438 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1398 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1439 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1399 cmd->se_tfo->get_fabric_name(), 1440 cmd->se_tfo->fabric_name,
1400 cmd->se_sess->se_node_acl->initiatorname, 1441 cmd->se_sess->se_node_acl->initiatorname,
1401 cmd->t_task_cdb[0]); 1442 cmd->t_task_cdb[0]);
1402 if (ret) 1443 if (ret)
@@ -1792,8 +1833,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1792 if (cmd->transport_complete_callback) 1833 if (cmd->transport_complete_callback)
1793 cmd->transport_complete_callback(cmd, false, &post_ret); 1834 cmd->transport_complete_callback(cmd, false, &post_ret);
1794 1835
1795 if (transport_check_aborted_status(cmd, 1)) 1836 if (cmd->transport_state & CMD_T_ABORTED) {
1837 INIT_WORK(&cmd->work, target_abort_work);
1838 queue_work(target_completion_wq, &cmd->work);
1796 return; 1839 return;
1840 }
1797 1841
1798 switch (sense_reason) { 1842 switch (sense_reason) {
1799 case TCM_NON_EXISTENT_LUN: 1843 case TCM_NON_EXISTENT_LUN:
@@ -1999,8 +2043,6 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1999 return true; 2043 return true;
2000} 2044}
2001 2045
2002static int __transport_check_aborted_status(struct se_cmd *, int);
2003
2004void target_execute_cmd(struct se_cmd *cmd) 2046void target_execute_cmd(struct se_cmd *cmd)
2005{ 2047{
2006 /* 2048 /*
@@ -2009,20 +2051,10 @@ void target_execute_cmd(struct se_cmd *cmd)
2009 * 2051 *
2010 * If the received CDB has already been aborted stop processing it here. 2052 * If the received CDB has already been aborted stop processing it here.
2011 */ 2053 */
2012 spin_lock_irq(&cmd->t_state_lock); 2054 if (target_cmd_interrupted(cmd))
2013 if (__transport_check_aborted_status(cmd, 1)) {
2014 spin_unlock_irq(&cmd->t_state_lock);
2015 return;
2016 }
2017 if (cmd->transport_state & CMD_T_STOP) {
2018 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2019 __func__, __LINE__, cmd->tag);
2020
2021 spin_unlock_irq(&cmd->t_state_lock);
2022 complete_all(&cmd->t_transport_stop_comp);
2023 return; 2055 return;
2024 }
2025 2056
2057 spin_lock_irq(&cmd->t_state_lock);
2026 cmd->t_state = TRANSPORT_PROCESSING; 2058 cmd->t_state = TRANSPORT_PROCESSING;
2027 cmd->transport_state &= ~CMD_T_PRE_EXECUTE; 2059 cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
2028 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; 2060 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
@@ -2571,7 +2603,8 @@ transport_generic_new_cmd(struct se_cmd *cmd)
2571 * Determine if frontend context caller is requesting the stopping of 2603 * Determine if frontend context caller is requesting the stopping of
2572 * this command for frontend exceptions. 2604 * this command for frontend exceptions.
2573 */ 2605 */
2574 if (cmd->transport_state & CMD_T_STOP) { 2606 if (cmd->transport_state & CMD_T_STOP &&
2607 !cmd->se_tfo->write_pending_must_be_called) {
2575 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 2608 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2576 __func__, __LINE__, cmd->tag); 2609 __func__, __LINE__, cmd->tag);
2577 2610
@@ -2635,13 +2668,29 @@ static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2635} 2668}
2636 2669
2637/* 2670/*
2671 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has
2672 * finished.
2673 */
2674void target_put_cmd_and_wait(struct se_cmd *cmd)
2675{
2676 DECLARE_COMPLETION_ONSTACK(compl);
2677
2678 WARN_ON_ONCE(cmd->abrt_compl);
2679 cmd->abrt_compl = &compl;
2680 target_put_sess_cmd(cmd);
2681 wait_for_completion(&compl);
2682}
2683
2684/*
2638 * This function is called by frontend drivers after processing of a command 2685 * This function is called by frontend drivers after processing of a command
2639 * has finished. 2686 * has finished.
2640 * 2687 *
2641 * The protocol for ensuring that either the regular flow or the TMF 2688 * The protocol for ensuring that either the regular frontend command
2642 * code drops one reference is as follows: 2689 * processing flow or target_handle_abort() code drops one reference is as
2690 * follows:
2643 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause 2691 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
2644 * the frontend driver to drop one reference, synchronously or asynchronously. 2692 * the frontend driver to call this function synchronously or asynchronously.
2693 * That will cause one reference to be dropped.
2645 * - During regular command processing the target core sets CMD_T_COMPLETE 2694 * - During regular command processing the target core sets CMD_T_COMPLETE
2646 * before invoking one of the .queue_*() functions. 2695 * before invoking one of the .queue_*() functions.
2647 * - The code that aborts commands skips commands and TMFs for which 2696 * - The code that aborts commands skips commands and TMFs for which
@@ -2653,7 +2702,7 @@ static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2653 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will 2702 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
2654 * be called and will drop a reference. 2703 * be called and will drop a reference.
2655 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() 2704 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
2656 * will be called. transport_cmd_finish_abort() will drop the final reference. 2705 * will be called. target_handle_abort() will drop the final reference.
2657 */ 2706 */
2658int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2707int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2659{ 2708{
@@ -2677,9 +2726,8 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2677 transport_lun_remove_cmd(cmd); 2726 transport_lun_remove_cmd(cmd);
2678 } 2727 }
2679 if (aborted) 2728 if (aborted)
2680 cmd->compl = &compl; 2729 cmd->free_compl = &compl;
2681 if (!aborted || tas) 2730 ret = target_put_sess_cmd(cmd);
2682 ret = target_put_sess_cmd(cmd);
2683 if (aborted) { 2731 if (aborted) {
2684 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); 2732 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2685 wait_for_completion(&compl); 2733 wait_for_completion(&compl);
@@ -2719,6 +2767,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2719 } 2767 }
2720 se_cmd->transport_state |= CMD_T_PRE_EXECUTE; 2768 se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
2721 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2769 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2770 percpu_ref_get(&se_sess->cmd_count);
2722out: 2771out:
2723 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2772 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2724 2773
@@ -2743,21 +2792,24 @@ static void target_release_cmd_kref(struct kref *kref)
2743{ 2792{
2744 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2793 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2745 struct se_session *se_sess = se_cmd->se_sess; 2794 struct se_session *se_sess = se_cmd->se_sess;
2746 struct completion *compl = se_cmd->compl; 2795 struct completion *free_compl = se_cmd->free_compl;
2796 struct completion *abrt_compl = se_cmd->abrt_compl;
2747 unsigned long flags; 2797 unsigned long flags;
2748 2798
2749 if (se_sess) { 2799 if (se_sess) {
2750 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2800 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2751 list_del_init(&se_cmd->se_cmd_list); 2801 list_del_init(&se_cmd->se_cmd_list);
2752 if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
2753 wake_up(&se_sess->cmd_list_wq);
2754 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2802 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2755 } 2803 }
2756 2804
2757 target_free_cmd_mem(se_cmd); 2805 target_free_cmd_mem(se_cmd);
2758 se_cmd->se_tfo->release_cmd(se_cmd); 2806 se_cmd->se_tfo->release_cmd(se_cmd);
2759 if (compl) 2807 if (free_compl)
2760 complete(compl); 2808 complete(free_compl);
2809 if (abrt_compl)
2810 complete(abrt_compl);
2811
2812 percpu_ref_put(&se_sess->cmd_count);
2761} 2813}
2762 2814
2763/** 2815/**
@@ -2886,6 +2938,8 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2886 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2938 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2887 se_sess->sess_tearing_down = 1; 2939 se_sess->sess_tearing_down = 1;
2888 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2940 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2941
2942 percpu_ref_kill(&se_sess->cmd_count);
2889} 2943}
2890EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2944EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2891 2945
@@ -2900,52 +2954,24 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
2900 2954
2901 WARN_ON_ONCE(!se_sess->sess_tearing_down); 2955 WARN_ON_ONCE(!se_sess->sess_tearing_down);
2902 2956
2903 spin_lock_irq(&se_sess->sess_cmd_lock);
2904 do { 2957 do {
2905 ret = wait_event_lock_irq_timeout( 2958 ret = wait_event_timeout(se_sess->cmd_list_wq,
2906 se_sess->cmd_list_wq, 2959 percpu_ref_is_zero(&se_sess->cmd_count),
2907 list_empty(&se_sess->sess_cmd_list), 2960 180 * HZ);
2908 se_sess->sess_cmd_lock, 180 * HZ);
2909 list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list) 2961 list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
2910 target_show_cmd("session shutdown: still waiting for ", 2962 target_show_cmd("session shutdown: still waiting for ",
2911 cmd); 2963 cmd);
2912 } while (ret <= 0); 2964 } while (ret <= 0);
2913 spin_unlock_irq(&se_sess->sess_cmd_lock);
2914} 2965}
2915EXPORT_SYMBOL(target_wait_for_sess_cmds); 2966EXPORT_SYMBOL(target_wait_for_sess_cmds);
2916 2967
2917static void target_lun_confirm(struct percpu_ref *ref) 2968/*
2918{ 2969 * Prevent that new percpu_ref_tryget_live() calls succeed and wait until
2919 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); 2970 * all references to the LUN have been released. Called during LUN shutdown.
2920 2971 */
2921 complete(&lun->lun_ref_comp);
2922}
2923
2924void transport_clear_lun_ref(struct se_lun *lun) 2972void transport_clear_lun_ref(struct se_lun *lun)
2925{ 2973{
2926 /* 2974 percpu_ref_kill(&lun->lun_ref);
2927 * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop
2928 * the initial reference and schedule confirm kill to be
2929 * executed after one full RCU grace period has completed.
2930 */
2931 percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
2932 /*
2933 * The first completion waits for percpu_ref_switch_to_atomic_rcu()
2934 * to call target_lun_confirm after lun->lun_ref has been marked
2935 * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t
2936 * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref
2937 * fails for all new incoming I/O.
2938 */
2939 wait_for_completion(&lun->lun_ref_comp);
2940 /*
2941 * The second completion waits for percpu_ref_put_many() to
2942 * invoke ->release() after lun->lun_ref has switched to
2943 * atomic_t mode, and lun->lun_ref.count has reached zero.
2944 *
2945 * At this point all target-core lun->lun_ref references have
2946 * been dropped via transport_lun_remove_cmd(), and it's safe
2947 * to proceed with the remaining LUN shutdown.
2948 */
2949 wait_for_completion(&lun->lun_shutdown_comp); 2975 wait_for_completion(&lun->lun_shutdown_comp);
2950} 2976}
2951 2977
@@ -3229,6 +3255,8 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
3229{ 3255{
3230 unsigned long flags; 3256 unsigned long flags;
3231 3257
3258 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3259
3232 spin_lock_irqsave(&cmd->t_state_lock, flags); 3260 spin_lock_irqsave(&cmd->t_state_lock, flags);
3233 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 3261 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3234 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3262 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -3245,114 +3273,15 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
3245} 3273}
3246EXPORT_SYMBOL(transport_send_check_condition_and_sense); 3274EXPORT_SYMBOL(transport_send_check_condition_and_sense);
3247 3275
3248static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3249 __releases(&cmd->t_state_lock)
3250 __acquires(&cmd->t_state_lock)
3251{
3252 int ret;
3253
3254 assert_spin_locked(&cmd->t_state_lock);
3255 WARN_ON_ONCE(!irqs_disabled());
3256
3257 if (!(cmd->transport_state & CMD_T_ABORTED))
3258 return 0;
3259 /*
3260 * If cmd has been aborted but either no status is to be sent or it has
3261 * already been sent, just return
3262 */
3263 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
3264 if (send_status)
3265 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3266 return 1;
3267 }
3268
3269 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
3270 " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
3271
3272 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
3273 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3274 trace_target_cmd_complete(cmd);
3275
3276 spin_unlock_irq(&cmd->t_state_lock);
3277 ret = cmd->se_tfo->queue_status(cmd);
3278 if (ret)
3279 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3280 spin_lock_irq(&cmd->t_state_lock);
3281
3282 return 1;
3283}
3284
3285int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
3286{
3287 int ret;
3288
3289 spin_lock_irq(&cmd->t_state_lock);
3290 ret = __transport_check_aborted_status(cmd, send_status);
3291 spin_unlock_irq(&cmd->t_state_lock);
3292
3293 return ret;
3294}
3295EXPORT_SYMBOL(transport_check_aborted_status);
3296
3297void transport_send_task_abort(struct se_cmd *cmd)
3298{
3299 unsigned long flags;
3300 int ret;
3301
3302 spin_lock_irqsave(&cmd->t_state_lock, flags);
3303 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
3304 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3305 return;
3306 }
3307 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3308
3309 /*
3310 * If there are still expected incoming fabric WRITEs, we wait
3311 * until until they have completed before sending a TASK_ABORTED
3312 * response. This response with TASK_ABORTED status will be
3313 * queued back to fabric module by transport_check_aborted_status().
3314 */
3315 if (cmd->data_direction == DMA_TO_DEVICE) {
3316 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
3317 spin_lock_irqsave(&cmd->t_state_lock, flags);
3318 if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
3319 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3320 goto send_abort;
3321 }
3322 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
3323 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3324 return;
3325 }
3326 }
3327send_abort:
3328 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
3329
3330 transport_lun_remove_cmd(cmd);
3331
3332 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
3333 cmd->t_task_cdb[0], cmd->tag);
3334
3335 trace_target_cmd_complete(cmd);
3336 ret = cmd->se_tfo->queue_status(cmd);
3337 if (ret)
3338 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
3339}
3340
3341static void target_tmr_work(struct work_struct *work) 3276static void target_tmr_work(struct work_struct *work)
3342{ 3277{
3343 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3278 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3344 struct se_device *dev = cmd->se_dev; 3279 struct se_device *dev = cmd->se_dev;
3345 struct se_tmr_req *tmr = cmd->se_tmr_req; 3280 struct se_tmr_req *tmr = cmd->se_tmr_req;
3346 unsigned long flags;
3347 int ret; 3281 int ret;
3348 3282
3349 spin_lock_irqsave(&cmd->t_state_lock, flags); 3283 if (cmd->transport_state & CMD_T_ABORTED)
3350 if (cmd->transport_state & CMD_T_ABORTED) { 3284 goto aborted;
3351 tmr->response = TMR_FUNCTION_REJECTED;
3352 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3353 goto check_stop;
3354 }
3355 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3356 3285
3357 switch (tmr->function) { 3286 switch (tmr->function) {
3358 case TMR_ABORT_TASK: 3287 case TMR_ABORT_TASK:
@@ -3386,18 +3315,16 @@ static void target_tmr_work(struct work_struct *work)
3386 break; 3315 break;
3387 } 3316 }
3388 3317
3389 spin_lock_irqsave(&cmd->t_state_lock, flags); 3318 if (cmd->transport_state & CMD_T_ABORTED)
3390 if (cmd->transport_state & CMD_T_ABORTED) { 3319 goto aborted;
3391 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3392 goto check_stop;
3393 }
3394 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3395 3320
3396 cmd->se_tfo->queue_tm_rsp(cmd); 3321 cmd->se_tfo->queue_tm_rsp(cmd);
3397 3322
3398check_stop:
3399 transport_lun_remove_cmd(cmd);
3400 transport_cmd_check_stop_to_fabric(cmd); 3323 transport_cmd_check_stop_to_fabric(cmd);
3324 return;
3325
3326aborted:
3327 target_handle_abort(cmd);
3401} 3328}
3402 3329
3403int transport_generic_handle_tmr( 3330int transport_generic_handle_tmr(
@@ -3416,16 +3343,15 @@ int transport_generic_handle_tmr(
3416 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3343 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3417 3344
3418 if (aborted) { 3345 if (aborted) {
3419 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d" 3346 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
3420 "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function, 3347 cmd->se_tmr_req->function,
3421 cmd->se_tmr_req->ref_task_tag, cmd->tag); 3348 cmd->se_tmr_req->ref_task_tag, cmd->tag);
3422 transport_lun_remove_cmd(cmd); 3349 target_handle_abort(cmd);
3423 transport_cmd_check_stop_to_fabric(cmd);
3424 return 0; 3350 return 0;
3425 } 3351 }
3426 3352
3427 INIT_WORK(&cmd->work, target_tmr_work); 3353 INIT_WORK(&cmd->work, target_tmr_work);
3428 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 3354 schedule_work(&cmd->work);
3429 return 0; 3355 return 0;
3430} 3356}
3431EXPORT_SYMBOL(transport_generic_handle_tmr); 3357EXPORT_SYMBOL(transport_generic_handle_tmr);
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index c8ac242ce888..ced1c10364eb 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -266,7 +266,7 @@ bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
266 pr_debug("[%s]: %s UNIT ATTENTION condition with" 266 pr_debug("[%s]: %s UNIT ATTENTION condition with"
267 " INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x" 267 " INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x"
268 " reported ASC: 0x%02x, ASCQ: 0x%02x\n", 268 " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
269 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 269 nacl->se_tpg->se_tpg_tfo->fabric_name,
270 (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : 270 (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
271 "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl, 271 "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
272 cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq); 272 cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
@@ -327,7 +327,7 @@ int core_scsi3_ua_clear_for_request_sense(
327 327
328 pr_debug("[%s]: Released UNIT ATTENTION condition, mapped" 328 pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
329 " LUN: %llu, got REQUEST_SENSE reported ASC: 0x%02x," 329 " LUN: %llu, got REQUEST_SENSE reported ASC: 0x%02x,"
330 " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 330 " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->fabric_name,
331 cmd->orig_fe_lun, *asc, *ascq); 331 cmd->orig_fe_lun, *asc, *ascq);
332 332
333 return (head) ? -EPERM : 0; 333 return (head) ? -EPERM : 0;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9cd404acdb82..1e6d24943565 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -958,7 +958,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
958 * 0 success 958 * 0 success
959 * 1 internally queued to wait for ring memory to free. 959 * 1 internally queued to wait for ring memory to free.
960 */ 960 */
961static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err) 961static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
962{ 962{
963 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 963 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
964 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 964 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 70adcfdca8d1..c2e1fc927fdf 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -399,11 +399,6 @@ struct se_portal_group xcopy_pt_tpg;
399static struct se_session xcopy_pt_sess; 399static struct se_session xcopy_pt_sess;
400static struct se_node_acl xcopy_pt_nacl; 400static struct se_node_acl xcopy_pt_nacl;
401 401
402static char *xcopy_pt_get_fabric_name(void)
403{
404 return "xcopy-pt";
405}
406
407static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) 402static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
408{ 403{
409 return 0; 404 return 0;
@@ -463,7 +458,7 @@ static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
463} 458}
464 459
465static const struct target_core_fabric_ops xcopy_pt_tfo = { 460static const struct target_core_fabric_ops xcopy_pt_tfo = {
466 .get_fabric_name = xcopy_pt_get_fabric_name, 461 .fabric_name = "xcopy-pt",
467 .get_cmd_state = xcopy_pt_get_cmd_state, 462 .get_cmd_state = xcopy_pt_get_cmd_state,
468 .release_cmd = xcopy_pt_release_cmd, 463 .release_cmd = xcopy_pt_release_cmd,
469 .check_stop_free = xcopy_pt_check_stop_free, 464 .check_stop_free = xcopy_pt_check_stop_free,
@@ -479,6 +474,8 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
479 474
480int target_xcopy_setup_pt(void) 475int target_xcopy_setup_pt(void)
481{ 476{
477 int ret;
478
482 xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0); 479 xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
483 if (!xcopy_wq) { 480 if (!xcopy_wq) {
484 pr_err("Unable to allocate xcopy_wq\n"); 481 pr_err("Unable to allocate xcopy_wq\n");
@@ -496,7 +493,9 @@ int target_xcopy_setup_pt(void)
496 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list); 493 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
497 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list); 494 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
498 memset(&xcopy_pt_sess, 0, sizeof(struct se_session)); 495 memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
499 transport_init_session(&xcopy_pt_sess); 496 ret = transport_init_session(&xcopy_pt_sess);
497 if (ret < 0)
498 return ret;
500 499
501 xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg; 500 xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
502 xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess; 501 xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index e55c4d537592..1ce49518d440 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -392,11 +392,6 @@ static inline struct ft_tpg *ft_tpg(struct se_portal_group *se_tpg)
392 return container_of(se_tpg, struct ft_tpg, se_tpg); 392 return container_of(se_tpg, struct ft_tpg, se_tpg);
393} 393}
394 394
395static char *ft_get_fabric_name(void)
396{
397 return "fc";
398}
399
400static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg) 395static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
401{ 396{
402 return ft_tpg(se_tpg)->lport_wwn->name; 397 return ft_tpg(se_tpg)->lport_wwn->name;
@@ -427,9 +422,8 @@ static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
427 422
428static const struct target_core_fabric_ops ft_fabric_ops = { 423static const struct target_core_fabric_ops ft_fabric_ops = {
429 .module = THIS_MODULE, 424 .module = THIS_MODULE,
430 .name = "fc", 425 .fabric_name = "fc",
431 .node_acl_size = sizeof(struct ft_node_acl), 426 .node_acl_size = sizeof(struct ft_node_acl),
432 .get_fabric_name = ft_get_fabric_name,
433 .tpg_get_wwn = ft_get_fabric_wwn, 427 .tpg_get_wwn = ft_get_fabric_wwn,
434 .tpg_get_tag = ft_get_tag, 428 .tpg_get_tag = ft_get_tag,
435 .tpg_check_demo_mode = ft_check_false, 429 .tpg_check_demo_mode = ft_check_false,
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 106988a6661a..34f5982cab78 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1256,11 +1256,6 @@ static int usbg_check_false(struct se_portal_group *se_tpg)
1256 return 0; 1256 return 0;
1257} 1257}
1258 1258
1259static char *usbg_get_fabric_name(void)
1260{
1261 return "usb_gadget";
1262}
1263
1264static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg) 1259static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg)
1265{ 1260{
1266 struct usbg_tpg *tpg = container_of(se_tpg, 1261 struct usbg_tpg *tpg = container_of(se_tpg,
@@ -1718,8 +1713,7 @@ static int usbg_check_stop_free(struct se_cmd *se_cmd)
1718 1713
1719static const struct target_core_fabric_ops usbg_ops = { 1714static const struct target_core_fabric_ops usbg_ops = {
1720 .module = THIS_MODULE, 1715 .module = THIS_MODULE,
1721 .name = "usb_gadget", 1716 .fabric_name = "usb_gadget",
1722 .get_fabric_name = usbg_get_fabric_name,
1723 .tpg_get_wwn = usbg_get_fabric_wwn, 1717 .tpg_get_wwn = usbg_get_fabric_wwn,
1724 .tpg_get_tag = usbg_get_tag, 1718 .tpg_get_tag = usbg_get_tag,
1725 .tpg_check_demo_mode = usbg_check_true, 1719 .tpg_check_demo_mode = usbg_check_true,
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 9f2f563c82ed..607be1f4fe27 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -632,7 +632,6 @@ static struct scsi_host_template mts_scsi_host_template = {
632 .sg_tablesize = SG_ALL, 632 .sg_tablesize = SG_ALL,
633 .can_queue = 1, 633 .can_queue = 1,
634 .this_id = -1, 634 .this_id = -1,
635 .use_clustering = 1,
636 .emulated = 1, 635 .emulated = 1,
637 .slave_alloc = mts_slave_alloc, 636 .slave_alloc = mts_slave_alloc,
638 .slave_configure = mts_slave_configure, 637 .slave_configure = mts_slave_configure,
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index e227bb5b794f..fde2e71a6ade 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -639,13 +639,6 @@ static const struct scsi_host_template usb_stor_host_template = {
639 */ 639 */
640 .max_sectors = 240, 640 .max_sectors = 240,
641 641
642 /*
643 * merge commands... this seems to help performance, but
644 * periodically someone should test to see which setting is more
645 * optimal.
646 */
647 .use_clustering = 1,
648
649 /* emulated HBA */ 642 /* emulated HBA */
650 .emulated = 1, 643 .emulated = 1,
651 644
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 1f7b401c4d04..36742e8e7edc 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -879,6 +879,7 @@ static struct scsi_host_template uas_host_template = {
879 .this_id = -1, 879 .this_id = -1,
880 .sg_tablesize = SG_NONE, 880 .sg_tablesize = SG_NONE,
881 .skip_settle_delay = 1, 881 .skip_settle_delay = 1,
882 .dma_boundary = PAGE_SIZE - 1,
882}; 883};
883 884
884#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ 885#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 50dffe83714c..a08472ae5b1b 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -285,11 +285,6 @@ static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
285 return 0; 285 return 0;
286} 286}
287 287
288static char *vhost_scsi_get_fabric_name(void)
289{
290 return "vhost";
291}
292
293static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg) 288static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
294{ 289{
295 struct vhost_scsi_tpg *tpg = container_of(se_tpg, 290 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
@@ -2289,8 +2284,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2289 2284
2290static const struct target_core_fabric_ops vhost_scsi_ops = { 2285static const struct target_core_fabric_ops vhost_scsi_ops = {
2291 .module = THIS_MODULE, 2286 .module = THIS_MODULE,
2292 .name = "vhost", 2287 .fabric_name = "vhost",
2293 .get_fabric_name = vhost_scsi_get_fabric_name,
2294 .tpg_get_wwn = vhost_scsi_get_fabric_wwn, 2288 .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
2295 .tpg_get_tag = vhost_scsi_get_tpgt, 2289 .tpg_get_tag = vhost_scsi_get_tpgt,
2296 .tpg_check_demo_mode = vhost_scsi_check_true, 2290 .tpg_check_demo_mode = vhost_scsi_check_true,
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 14a3d4cbc2a7..c9e23a126218 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1712,11 +1712,6 @@ static struct configfs_attribute *scsiback_wwn_attrs[] = {
1712 NULL, 1712 NULL,
1713}; 1713};
1714 1714
1715static char *scsiback_get_fabric_name(void)
1716{
1717 return "xen-pvscsi";
1718}
1719
1720static int scsiback_port_link(struct se_portal_group *se_tpg, 1715static int scsiback_port_link(struct se_portal_group *se_tpg,
1721 struct se_lun *lun) 1716 struct se_lun *lun)
1722{ 1717{
@@ -1810,8 +1805,7 @@ static int scsiback_check_false(struct se_portal_group *se_tpg)
1810 1805
1811static const struct target_core_fabric_ops scsiback_ops = { 1806static const struct target_core_fabric_ops scsiback_ops = {
1812 .module = THIS_MODULE, 1807 .module = THIS_MODULE,
1813 .name = "xen-pvscsi", 1808 .fabric_name = "xen-pvscsi",
1814 .get_fabric_name = scsiback_get_fabric_name,
1815 .tpg_get_wwn = scsiback_get_fabric_wwn, 1809 .tpg_get_wwn = scsiback_get_fabric_wwn,
1816 .tpg_get_tag = scsiback_get_tag, 1810 .tpg_get_tag = scsiback_get_tag,
1817 .tpg_check_demo_mode = scsiback_check_true, 1811 .tpg_check_demo_mode = scsiback_check_true,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 45552e6eae1e..338604dff7d0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -340,7 +340,6 @@ struct queue_limits {
340 340
341 unsigned char misaligned; 341 unsigned char misaligned;
342 unsigned char discard_misaligned; 342 unsigned char discard_misaligned;
343 unsigned char cluster;
344 unsigned char raid_partial_stripes_expensive; 343 unsigned char raid_partial_stripes_expensive;
345 enum blk_zoned_model zoned; 344 enum blk_zoned_model zoned;
346}; 345};
@@ -658,11 +657,6 @@ static inline bool queue_is_mq(struct request_queue *q)
658 return q->mq_ops; 657 return q->mq_ops;
659} 658}
660 659
661static inline unsigned int blk_queue_cluster(struct request_queue *q)
662{
663 return q->limits.cluster;
664}
665
666static inline enum blk_zoned_model 660static inline enum blk_zoned_model
667blk_queue_zoned_model(struct request_queue *q) 661blk_queue_zoned_model(struct request_queue *q)
668{ 662{
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 38c95d66ab12..68133842e6d7 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -135,7 +135,6 @@ enum {
135 135
136 ATA_SHT_EMULATED = 1, 136 ATA_SHT_EMULATED = 1,
137 ATA_SHT_THIS_ID = -1, 137 ATA_SHT_THIS_ID = -1,
138 ATA_SHT_USE_CLUSTERING = 1,
139 138
140 /* struct ata_taskfile flags */ 139 /* struct ata_taskfile flags */
141 ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */ 140 ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */
@@ -1360,7 +1359,6 @@ extern struct device_attribute *ata_common_sdev_attrs[];
1360 .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ 1359 .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
1361 .this_id = ATA_SHT_THIS_ID, \ 1360 .this_id = ATA_SHT_THIS_ID, \
1362 .emulated = ATA_SHT_EMULATED, \ 1361 .emulated = ATA_SHT_EMULATED, \
1363 .use_clustering = ATA_SHT_USE_CLUSTERING, \
1364 .proc_name = drv_name, \ 1362 .proc_name = drv_name, \
1365 .slave_configure = ata_scsi_slave_config, \ 1363 .slave_configure = ata_scsi_slave_config, \
1366 .slave_destroy = ata_scsi_slave_destroy, \ 1364 .slave_destroy = ata_scsi_slave_destroy, \
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index aa760df8c6b3..6ca954e9f752 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -42,9 +42,6 @@ struct scsi_transport_template;
42#define MODE_INITIATOR 0x01 42#define MODE_INITIATOR 0x01
43#define MODE_TARGET 0x02 43#define MODE_TARGET 0x02
44 44
45#define DISABLE_CLUSTERING 0
46#define ENABLE_CLUSTERING 1
47
48struct scsi_host_template { 45struct scsi_host_template {
49 struct module *module; 46 struct module *module;
50 const char *name; 47 const char *name;
@@ -364,6 +361,11 @@ struct scsi_host_template {
364 unsigned int max_sectors; 361 unsigned int max_sectors;
365 362
366 /* 363 /*
364 * Maximum size in bytes of a single segment.
365 */
366 unsigned int max_segment_size;
367
368 /*
367 * DMA scatter gather segment boundary limit. A segment crossing this 369 * DMA scatter gather segment boundary limit. A segment crossing this
368 * boundary will be split in two. 370 * boundary will be split in two.
369 */ 371 */
@@ -413,16 +415,6 @@ struct scsi_host_template {
413 unsigned unchecked_isa_dma:1; 415 unsigned unchecked_isa_dma:1;
414 416
415 /* 417 /*
416 * True if this host adapter can make good use of clustering.
417 * I originally thought that if the tablesize was large that it
418 * was a waste of CPU cycles to prepare a cluster list, but
419 * it works out that the Buslogic is faster if you use a smaller
420 * number of segments (i.e. use clustering). I guess it is
421 * inefficient.
422 */
423 unsigned use_clustering:1;
424
425 /*
426 * True for emulated SCSI host adapters (e.g. ATAPI). 418 * True for emulated SCSI host adapters (e.g. ATAPI).
427 */ 419 */
428 unsigned emulated:1; 420 unsigned emulated:1;
@@ -596,6 +588,7 @@ struct Scsi_Host {
596 short unsigned int sg_tablesize; 588 short unsigned int sg_tablesize;
597 short unsigned int sg_prot_tablesize; 589 short unsigned int sg_prot_tablesize;
598 unsigned int max_sectors; 590 unsigned int max_sectors;
591 unsigned int max_segment_size;
599 unsigned long dma_boundary; 592 unsigned long dma_boundary;
600 /* 593 /*
601 * In scsi-mq mode, the number of hardware queues supported by the LLD. 594 * In scsi-mq mode, the number of hardware queues supported by the LLD.
@@ -613,7 +606,6 @@ struct Scsi_Host {
613 606
614 unsigned active_mode:2; 607 unsigned active_mode:2;
615 unsigned unchecked_isa_dma:1; 608 unsigned unchecked_isa_dma:1;
616 unsigned use_clustering:1;
617 609
618 /* 610 /*
619 * Host has requested that no further requests come through for the 611 * Host has requested that no further requests come through for the
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index e3bdb0550a59..69b7b955902c 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -46,6 +46,10 @@
46/* Used by transport_get_inquiry_vpd_device_ident() */ 46/* Used by transport_get_inquiry_vpd_device_ident() */
47#define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254 47#define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254
48 48
49#define INQUIRY_VENDOR_LEN 8
50#define INQUIRY_MODEL_LEN 16
51#define INQUIRY_REVISION_LEN 4
52
49/* Attempts before moving from SHORT to LONG */ 53/* Attempts before moving from SHORT to LONG */
50#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3 54#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3
51#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */ 55#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */
@@ -87,6 +91,8 @@
87#define DA_EMULATE_3PC 1 91#define DA_EMULATE_3PC 1
88/* No Emulation for PSCSI by default */ 92/* No Emulation for PSCSI by default */
89#define DA_EMULATE_ALUA 0 93#define DA_EMULATE_ALUA 0
94/* Emulate SCSI2 RESERVE/RELEASE and Persistent Reservations by default */
95#define DA_EMULATE_PR 1
90/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */ 96/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
91#define DA_ENFORCE_PR_ISIDS 1 97#define DA_ENFORCE_PR_ISIDS 1
92/* Force SPC-3 PR Activate Persistence across Target Power Loss */ 98/* Force SPC-3 PR Activate Persistence across Target Power Loss */
@@ -134,7 +140,6 @@ enum se_cmd_flags_table {
134 SCF_SENT_CHECK_CONDITION = 0x00000800, 140 SCF_SENT_CHECK_CONDITION = 0x00000800,
135 SCF_OVERFLOW_BIT = 0x00001000, 141 SCF_OVERFLOW_BIT = 0x00001000,
136 SCF_UNDERFLOW_BIT = 0x00002000, 142 SCF_UNDERFLOW_BIT = 0x00002000,
137 SCF_SEND_DELAYED_TAS = 0x00004000,
138 SCF_ALUA_NON_OPTIMIZED = 0x00008000, 143 SCF_ALUA_NON_OPTIMIZED = 0x00008000,
139 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, 144 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
140 SCF_COMPARE_AND_WRITE = 0x00080000, 145 SCF_COMPARE_AND_WRITE = 0x00080000,
@@ -314,9 +319,13 @@ struct t10_vpd {
314}; 319};
315 320
316struct t10_wwn { 321struct t10_wwn {
317 char vendor[8]; 322 /*
318 char model[16]; 323 * SCSI left aligned strings may not be null terminated. +1 to ensure a
319 char revision[4]; 324 * null terminator is always present.
325 */
326 char vendor[INQUIRY_VENDOR_LEN + 1];
327 char model[INQUIRY_MODEL_LEN + 1];
328 char revision[INQUIRY_REVISION_LEN + 1];
320 char unit_serial[INQUIRY_VPD_SERIAL_LEN]; 329 char unit_serial[INQUIRY_VPD_SERIAL_LEN];
321 spinlock_t t10_vpd_lock; 330 spinlock_t t10_vpd_lock;
322 struct se_device *t10_dev; 331 struct se_device *t10_dev;
@@ -474,7 +483,8 @@ struct se_cmd {
474 struct se_session *se_sess; 483 struct se_session *se_sess;
475 struct se_tmr_req *se_tmr_req; 484 struct se_tmr_req *se_tmr_req;
476 struct list_head se_cmd_list; 485 struct list_head se_cmd_list;
477 struct completion *compl; 486 struct completion *free_compl;
487 struct completion *abrt_compl;
478 const struct target_core_fabric_ops *se_tfo; 488 const struct target_core_fabric_ops *se_tfo;
479 sense_reason_t (*execute_cmd)(struct se_cmd *); 489 sense_reason_t (*execute_cmd)(struct se_cmd *);
480 sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *); 490 sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
@@ -601,6 +611,7 @@ struct se_session {
601 struct se_node_acl *se_node_acl; 611 struct se_node_acl *se_node_acl;
602 struct se_portal_group *se_tpg; 612 struct se_portal_group *se_tpg;
603 void *fabric_sess_ptr; 613 void *fabric_sess_ptr;
614 struct percpu_ref cmd_count;
604 struct list_head sess_list; 615 struct list_head sess_list;
605 struct list_head sess_acl_list; 616 struct list_head sess_acl_list;
606 struct list_head sess_cmd_list; 617 struct list_head sess_cmd_list;
@@ -664,7 +675,7 @@ struct se_dev_attrib {
664 int emulate_tpws; 675 int emulate_tpws;
665 int emulate_caw; 676 int emulate_caw;
666 int emulate_3pc; 677 int emulate_3pc;
667 int pi_prot_format; 678 int emulate_pr;
668 enum target_prot_type pi_prot_type; 679 enum target_prot_type pi_prot_type;
669 enum target_prot_type hw_pi_prot_type; 680 enum target_prot_type hw_pi_prot_type;
670 int pi_prot_verify; 681 int pi_prot_verify;
@@ -731,7 +742,6 @@ struct se_lun {
731 struct scsi_port_stats lun_stats; 742 struct scsi_port_stats lun_stats;
732 struct config_group lun_group; 743 struct config_group lun_group;
733 struct se_port_stat_grps port_stat_grps; 744 struct se_port_stat_grps port_stat_grps;
734 struct completion lun_ref_comp;
735 struct completion lun_shutdown_comp; 745 struct completion lun_shutdown_comp;
736 struct percpu_ref lun_ref; 746 struct percpu_ref lun_ref;
737 struct list_head lun_dev_link; 747 struct list_head lun_dev_link;
@@ -794,7 +804,6 @@ struct se_device {
794 struct t10_pr_registration *dev_pr_res_holder; 804 struct t10_pr_registration *dev_pr_res_holder;
795 struct list_head dev_sep_list; 805 struct list_head dev_sep_list;
796 struct list_head dev_tmr_list; 806 struct list_head dev_tmr_list;
797 struct workqueue_struct *tmr_wq;
798 struct work_struct qf_work_queue; 807 struct work_struct qf_work_queue;
799 struct list_head delayed_cmd_list; 808 struct list_head delayed_cmd_list;
800 struct list_head state_list; 809 struct list_head state_list;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index f4147b398431..ee5ddd81cd8d 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -8,7 +8,18 @@
8 8
9struct target_core_fabric_ops { 9struct target_core_fabric_ops {
10 struct module *module; 10 struct module *module;
11 const char *name; 11 /*
12 * XXX: Special case for iscsi/iSCSI...
13 * If non-null, fabric_alias is used for matching target/$fabric
14 * ConfigFS paths. If null, fabric_name is used for this (see below).
15 */
16 const char *fabric_alias;
17 /*
18 * fabric_name is used for matching target/$fabric ConfigFS paths
19 * without a fabric_alias (see above). It's also used for the ALUA state
20 * path and is stored on disk with PR state.
21 */
22 const char *fabric_name;
12 size_t node_acl_size; 23 size_t node_acl_size;
13 /* 24 /*
14 * Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload. 25 * Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload.
@@ -23,7 +34,6 @@ struct target_core_fabric_ops {
23 * XXX: Currently assumes single PAGE_SIZE per scatterlist entry 34 * XXX: Currently assumes single PAGE_SIZE per scatterlist entry
24 */ 35 */
25 u32 max_data_sg_nents; 36 u32 max_data_sg_nents;
26 char *(*get_fabric_name)(void);
27 char *(*tpg_get_wwn)(struct se_portal_group *); 37 char *(*tpg_get_wwn)(struct se_portal_group *);
28 u16 (*tpg_get_tag)(struct se_portal_group *); 38 u16 (*tpg_get_tag)(struct se_portal_group *);
29 u32 (*tpg_get_default_depth)(struct se_portal_group *); 39 u32 (*tpg_get_default_depth)(struct se_portal_group *);
@@ -101,6 +111,13 @@ struct target_core_fabric_ops {
101 struct configfs_attribute **tfc_tpg_nacl_attrib_attrs; 111 struct configfs_attribute **tfc_tpg_nacl_attrib_attrs;
102 struct configfs_attribute **tfc_tpg_nacl_auth_attrs; 112 struct configfs_attribute **tfc_tpg_nacl_auth_attrs;
103 struct configfs_attribute **tfc_tpg_nacl_param_attrs; 113 struct configfs_attribute **tfc_tpg_nacl_param_attrs;
114
115 /*
116 * Set this member variable to true if the SCSI transport protocol
117 * (e.g. iSCSI) requires that the Data-Out buffer is transferred in
118 * its entirety before a command is aborted.
119 */
120 bool write_pending_must_be_called;
104}; 121};
105 122
106int target_register_template(const struct target_core_fabric_ops *fo); 123int target_register_template(const struct target_core_fabric_ops *fo);
@@ -116,7 +133,7 @@ struct se_session *target_setup_session(struct se_portal_group *,
116 struct se_session *, void *)); 133 struct se_session *, void *));
117void target_remove_session(struct se_session *); 134void target_remove_session(struct se_session *);
118 135
119void transport_init_session(struct se_session *); 136int transport_init_session(struct se_session *se_sess);
120struct se_session *transport_alloc_session(enum target_prot_op); 137struct se_session *transport_alloc_session(enum target_prot_op);
121int transport_alloc_session_tags(struct se_session *, unsigned int, 138int transport_alloc_session_tags(struct se_session *, unsigned int,
122 unsigned int); 139 unsigned int);
@@ -149,12 +166,12 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
149int transport_handle_cdb_direct(struct se_cmd *); 166int transport_handle_cdb_direct(struct se_cmd *);
150sense_reason_t transport_generic_new_cmd(struct se_cmd *); 167sense_reason_t transport_generic_new_cmd(struct se_cmd *);
151 168
169void target_put_cmd_and_wait(struct se_cmd *cmd);
152void target_execute_cmd(struct se_cmd *cmd); 170void target_execute_cmd(struct se_cmd *cmd);
153 171
154int transport_generic_free_cmd(struct se_cmd *, int); 172int transport_generic_free_cmd(struct se_cmd *, int);
155 173
156bool transport_wait_for_tasks(struct se_cmd *); 174bool transport_wait_for_tasks(struct se_cmd *);
157int transport_check_aborted_status(struct se_cmd *, int);
158int transport_send_check_condition_and_sense(struct se_cmd *, 175int transport_send_check_condition_and_sense(struct se_cmd *,
159 sense_reason_t, int); 176 sense_reason_t, int);
160int target_get_sess_cmd(struct se_cmd *, bool); 177int target_get_sess_cmd(struct se_cmd *, bool);
diff --git a/include/trace/events/iscsi.h b/include/trace/events/iscsi.h
new file mode 100644
index 000000000000..87408faf6e4e
--- /dev/null
+++ b/include/trace/events/iscsi.h
@@ -0,0 +1,107 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM iscsi
3
4#if !defined(_TRACE_ISCSI_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_ISCSI_H
6
7#include <linux/tracepoint.h>
8
9/* max debug message length */
10#define ISCSI_MSG_MAX 256
11
12/*
13 * Declare tracepoint helper function.
14 */
15void iscsi_dbg_trace(void (*trace)(struct device *dev, struct va_format *),
16 struct device *dev, const char *fmt, ...);
17
18/*
19 * Declare event class for iscsi debug messages.
20 */
21DECLARE_EVENT_CLASS(iscsi_log_msg,
22
23 TP_PROTO(struct device *dev, struct va_format *vaf),
24
25 TP_ARGS(dev, vaf),
26
27 TP_STRUCT__entry(
28 __string(dname, dev_name(dev) )
29 __dynamic_array(char, msg, ISCSI_MSG_MAX )
30 ),
31
32 TP_fast_assign(
33 __assign_str(dname, dev_name(dev));
34 vsnprintf(__get_str(msg), ISCSI_MSG_MAX, vaf->fmt, *vaf->va);
35 ),
36
37 TP_printk("%s: %s",__get_str(dname), __get_str(msg)
38 )
39);
40
41/*
42 * Define event to capture iscsi connection debug messages.
43 */
44DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_conn,
45 TP_PROTO(struct device *dev, struct va_format *vaf),
46
47 TP_ARGS(dev, vaf)
48);
49
50/*
51 * Define event to capture iscsi session debug messages.
52 */
53DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_session,
54 TP_PROTO(struct device *dev, struct va_format *vaf),
55
56 TP_ARGS(dev, vaf)
57);
58
59/*
60 * Define event to capture iscsi error handling debug messages.
61 */
62DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_eh,
63 TP_PROTO(struct device *dev, struct va_format *vaf),
64
65 TP_ARGS(dev, vaf)
66);
67
68/*
69 * Define event to capture iscsi tcp debug messages.
70 */
71DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_tcp,
72 TP_PROTO(struct device *dev, struct va_format *vaf),
73
74 TP_ARGS(dev, vaf)
75);
76
77/*
78 * Define event to capture iscsi sw tcp debug messages.
79 */
80DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_sw_tcp,
81 TP_PROTO(struct device *dev, struct va_format *vaf),
82
83 TP_ARGS(dev, vaf)
84);
85
86/*
87 * Define event to capture iscsi transport session debug messages.
88 */
89DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_trans_session,
90 TP_PROTO(struct device *dev, struct va_format *vaf),
91
92 TP_ARGS(dev, vaf)
93);
94
95/*
96 * Define event to capture iscsi transport connection debug messages.
97 */
98DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_trans_conn,
99 TP_PROTO(struct device *dev, struct va_format *vaf),
100
101 TP_ARGS(dev, vaf)
102);
103
104#endif /* _TRACE_ISCSI_H */
105
106/* This part must be outside protection */
107#include <trace/define_trace.h>