aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-21 13:50:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-21 13:50:15 -0400
commit10fd71780f7d155f4e35fecfad0ebd4a725a244b (patch)
treeb88976120fd8f620669ed239842ea26ecc2c5e52
parent3e414b5bd28f965fb39b9e9419d877df0cf3111a (diff)
parente74006edd0d42b45ff37ae4ae13c614cfa30056b (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly update of the usual drivers: qla2xxx, ufs, smartpqi, lpfc, hisi_sas, qedf, mpt3sas; plus a whole load of minor updates. The only core change this time around is the addition of request batching for virtio. Since batching requires an additional flag to use, it should be invisible to the rest of the drivers" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (264 commits) scsi: hisi_sas: Fix the conflict between device gone and host reset scsi: hisi_sas: Add BIST support for phy loopback scsi: hisi_sas: Add hisi_sas_debugfs_alloc() to centralise allocation scsi: hisi_sas: Remove some unused function arguments scsi: hisi_sas: Remove redundant work declaration scsi: hisi_sas: Remove hisi_sas_hw.slot_complete scsi: hisi_sas: Assign NCQ tag for all NCQ commands scsi: hisi_sas: Update all the registers after suspend and resume scsi: hisi_sas: Retry 3 times TMF IO for SAS disks when init device scsi: hisi_sas: Remove sleep after issue phy reset if sas_smp_phy_control() fails scsi: hisi_sas: Directly return when running I_T_nexus reset if phy disabled scsi: hisi_sas: Use true/false as input parameter of sas_phy_reset() scsi: hisi_sas: add debugfs auto-trigger for internal abort time out scsi: virtio_scsi: unplug LUNs when events missed scsi: scsi_dh_rdac: zero cdb in send_mode_select() scsi: fcoe: fix null-ptr-deref Read in fc_release_transport scsi: ufs-hisi: use devm_platform_ioremap_resource() to simplify code scsi: ufshcd: use devm_platform_ioremap_resource() to simplify code scsi: hisi_sas: use devm_platform_ioremap_resource() to simplify code scsi: ufs: Use kmemdup in ufshcd_read_string_desc() ...
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt2
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-db845c.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-mtp.dts2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c9
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c7
-rw-r--r--drivers/scsi/csiostor/csio_wr.c8
-rw-r--r--drivers/scsi/cxlflash/main.c12
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c13
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c14
-rw-r--r--drivers/scsi/fcoe/fcoe.c17
-rw-r--r--drivers/scsi/fdomain.c6
-rw-r--r--drivers/scsi/fdomain_isa.c5
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c4
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c14
-rw-r--r--drivers/scsi/fnic/fnic_isr.c4
-rw-r--r--drivers/scsi/fnic/fnic_trace.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h54
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c982
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c48
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c92
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c307
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c3
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c3
-rw-r--r--drivers/scsi/lpfc/lpfc.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c76
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c228
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h61
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c181
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h34
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c926
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c389
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c591
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c533
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h50
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c112
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c29
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h5
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h10
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_image.h39
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_pci.h13
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h13
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c175
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h30
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c178
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c196
-rw-r--r--drivers/scsi/ncr53c8xx.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c13
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/qedf/qedf.h1
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c16
-rw-r--r--drivers/scsi/qedf/qedf_els.c38
-rw-r--r--drivers/scsi/qedf/qedf_fip.c33
-rw-r--r--drivers/scsi/qedf/qedf_io.c67
-rw-r--r--drivers/scsi/qedf/qedf_main.c178
-rw-r--r--drivers/scsi/qedf/qedf_version.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h132
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_dsd.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h33
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c254
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c550
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c226
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c26
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c67
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c237
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c212
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h35
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c27
-rw-r--r--drivers/scsi/qlogicpti.c10
-rw-r--r--drivers/scsi/scsi_debugfs.c5
-rw-r--r--drivers/scsi/scsi_lib.c52
-rw-r--r--drivers/scsi/scsi_logging.c48
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/scsi/smartpqi/Kconfig2
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h20
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c236
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c102
-rw-r--r--drivers/scsi/sun3_scsi.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_nvram.c2
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c40
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c4
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c41
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h4
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c18
-rw-r--r--drivers/scsi/ufs/ufs.h2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c4
-rw-r--r--drivers/scsi/ufs/ufshcd.c281
-rw-r--r--drivers/scsi/ufs/ufshcd.h57
-rw-r--r--drivers/scsi/virtio_scsi.c88
-rw-r--r--drivers/scsi/wd33c93.c1
-rw-r--r--drivers/target/target_core_user.c20
-rw-r--r--include/linux/nvme-fc-driver.h2
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/scsi/scsi_dbg.h2
-rw-r--r--include/scsi/scsi_host.h16
-rw-r--r--include/uapi/scsi/scsi_bsg_fc.h54
-rw-r--r--include/uapi/scsi/scsi_netlink.h20
-rw-r--r--include/uapi/scsi/scsi_netlink_fc.h17
130 files changed, 5755 insertions, 3679 deletions
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index a74720486ee2..d78ef63935f9 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -54,6 +54,8 @@ Optional properties:
54 PHY reset from the UFS controller. 54 PHY reset from the UFS controller.
55- resets : reset node register 55- resets : reset node register
56- reset-names : describe reset node register, the "rst" corresponds to reset the whole UFS IP. 56- reset-names : describe reset node register, the "rst" corresponds to reset the whole UFS IP.
57- reset-gpios : A phandle and gpio specifier denoting the GPIO connected
58 to the RESET pin of the UFS memory device.
57 59
58Note: If above properties are not defined it can be assumed that the supply 60Note: If above properties are not defined it can be assumed that the supply
59regulators or clocks are always on. 61regulators or clocks are always on.
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
index 71bd717a4251..f5a85caff1a3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
@@ -497,6 +497,8 @@
497&ufs_mem_hc { 497&ufs_mem_hc {
498 status = "okay"; 498 status = "okay";
499 499
500 reset-gpios = <&tlmm 150 GPIO_ACTIVE_LOW>;
501
500 vcc-supply = <&vreg_l20a_2p95>; 502 vcc-supply = <&vreg_l20a_2p95>;
501 vcc-max-microamp = <800000>; 503 vcc-max-microamp = <800000>;
502}; 504};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index 2e78638eb73b..c57548b7b250 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -388,6 +388,8 @@
388&ufs_mem_hc { 388&ufs_mem_hc {
389 status = "okay"; 389 status = "okay";
390 390
391 reset-gpios = <&tlmm 150 GPIO_ACTIVE_LOW>;
392
391 vcc-supply = <&vreg_l20a_2p95>; 393 vcc-supply = <&vreg_l20a_2p95>;
392 vcc-max-microamp = <600000>; 394 vcc-max-microamp = <600000>;
393}; 395};
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 261d8e495fed..f5781e31f57c 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -565,8 +565,7 @@ static void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha)
565 if (asd_ha->hw_prof.scb_ext) 565 if (asd_ha->hw_prof.scb_ext)
566 asd_free_coherent(asd_ha, asd_ha->hw_prof.scb_ext); 566 asd_free_coherent(asd_ha, asd_ha->hw_prof.scb_ext);
567 567
568 if (asd_ha->hw_prof.ddb_bitmap) 568 kfree(asd_ha->hw_prof.ddb_bitmap);
569 kfree(asd_ha->hw_prof.ddb_bitmap);
570 asd_ha->hw_prof.ddb_bitmap = NULL; 569 asd_ha->hw_prof.ddb_bitmap = NULL;
571 570
572 for (i = 0; i < ASD_MAX_PHYS; i++) { 571 for (i = 0; i < ASD_MAX_PHYS; i++) {
@@ -641,12 +640,10 @@ Err:
641 640
642static void asd_destroy_global_caches(void) 641static void asd_destroy_global_caches(void)
643{ 642{
644 if (asd_dma_token_cache) 643 kmem_cache_destroy(asd_dma_token_cache);
645 kmem_cache_destroy(asd_dma_token_cache);
646 asd_dma_token_cache = NULL; 644 asd_dma_token_cache = NULL;
647 645
648 if (asd_ascb_cache) 646 kmem_cache_destroy(asd_ascb_cache);
649 kmem_cache_destroy(asd_ascb_cache);
650 asd_ascb_cache = NULL; 647 asd_ascb_cache = NULL;
651} 648}
652 649
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index b2014cb96f58..22f06be2606f 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -536,7 +536,7 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
536 struct device *dev) 536 struct device *dev)
537{ 537{
538 struct bfad_im_port_pointer *im_portp; 538 struct bfad_im_port_pointer *im_portp;
539 int error = 1; 539 int error;
540 540
541 mutex_lock(&bfad_mutex); 541 mutex_lock(&bfad_mutex);
542 error = idr_alloc(&bfad_im_port_index, im_port, 0, 0, GFP_KERNEL); 542 error = idr_alloc(&bfad_im_port_index, im_port, 0, 0, GFP_KERNEL);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 9ff9429395eb..b4bfab5edf8f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -428,7 +428,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
428 struct fc_lport *lport; 428 struct fc_lport *lport;
429 struct bnx2fc_interface *interface; 429 struct bnx2fc_interface *interface;
430 struct fcoe_ctlr *ctlr; 430 struct fcoe_ctlr *ctlr;
431 struct fc_frame_header *fh;
432 struct fcoe_rcv_info *fr; 431 struct fcoe_rcv_info *fr;
433 struct fcoe_percpu_s *bg; 432 struct fcoe_percpu_s *bg;
434 struct sk_buff *tmp_skb; 433 struct sk_buff *tmp_skb;
@@ -463,7 +462,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
463 goto err; 462 goto err;
464 463
465 skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); 464 skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
466 fh = (struct fc_frame_header *) skb_transport_header(skb);
467 465
468 fr = fcoe_dev_from_skb(skb); 466 fr = fcoe_dev_from_skb(skb);
469 fr->fr_dev = lport; 467 fr->fr_dev = lport;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 747f019fb393..f069e09beb10 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -633,7 +633,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
633 u16 xid; 633 u16 xid;
634 u32 frame_len, len; 634 u32 frame_len, len;
635 struct bnx2fc_cmd *io_req = NULL; 635 struct bnx2fc_cmd *io_req = NULL;
636 struct fcoe_task_ctx_entry *task, *task_page;
637 struct bnx2fc_interface *interface = tgt->port->priv; 636 struct bnx2fc_interface *interface = tgt->port->priv;
638 struct bnx2fc_hba *hba = interface->hba; 637 struct bnx2fc_hba *hba = interface->hba;
639 int task_idx, index; 638 int task_idx, index;
@@ -711,9 +710,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
711 710
712 task_idx = xid / BNX2FC_TASKS_PER_PAGE; 711 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
713 index = xid % BNX2FC_TASKS_PER_PAGE; 712 index = xid % BNX2FC_TASKS_PER_PAGE;
714 task_page = (struct fcoe_task_ctx_entry *)
715 hba->task_ctx[task_idx];
716 task = &(task_page[index]);
717 713
718 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; 714 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
719 if (!io_req) 715 if (!io_req)
@@ -839,9 +835,6 @@ ret_err_rqe:
839 835
840 task_idx = xid / BNX2FC_TASKS_PER_PAGE; 836 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
841 index = xid % BNX2FC_TASKS_PER_PAGE; 837 index = xid % BNX2FC_TASKS_PER_PAGE;
842 task_page = (struct fcoe_task_ctx_entry *)
843 interface->hba->task_ctx[task_idx];
844 task = &(task_page[index]);
845 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; 838 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
846 if (!io_req) 839 if (!io_req)
847 goto ret_warn_rqe; 840 goto ret_warn_rqe;
@@ -1122,7 +1115,6 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1122 struct fcoe_kcqe *ofld_kcqe) 1115 struct fcoe_kcqe *ofld_kcqe)
1123{ 1116{
1124 struct bnx2fc_rport *tgt; 1117 struct bnx2fc_rport *tgt;
1125 struct fcoe_port *port;
1126 struct bnx2fc_interface *interface; 1118 struct bnx2fc_interface *interface;
1127 u32 conn_id; 1119 u32 conn_id;
1128 u32 context_id; 1120 u32 context_id;
@@ -1136,7 +1128,6 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1136 } 1128 }
1137 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", 1129 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
1138 ofld_kcqe->fcoe_conn_context_id); 1130 ofld_kcqe->fcoe_conn_context_id);
1139 port = tgt->port;
1140 interface = tgt->port->priv; 1131 interface = tgt->port->priv;
1141 if (hba != interface->hba) { 1132 if (hba != interface->hba) {
1142 printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n"); 1133 printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
@@ -1463,10 +1454,7 @@ void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
1463{ 1454{
1464 struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd; 1455 struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
1465 struct bnx2fc_rport *tgt = seq_clnp_req->tgt; 1456 struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
1466 struct bnx2fc_interface *interface = tgt->port->priv;
1467 struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl; 1457 struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
1468 struct fcoe_task_ctx_entry *orig_task;
1469 struct fcoe_task_ctx_entry *task_page;
1470 struct fcoe_ext_mul_sges_ctx *sgl; 1458 struct fcoe_ext_mul_sges_ctx *sgl;
1471 u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP; 1459 u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
1472 u8 orig_task_type; 1460 u8 orig_task_type;
@@ -1528,10 +1516,6 @@ void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
1528 orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE; 1516 orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
1529 index = orig_xid % BNX2FC_TASKS_PER_PAGE; 1517 index = orig_xid % BNX2FC_TASKS_PER_PAGE;
1530 1518
1531 task_page = (struct fcoe_task_ctx_entry *)
1532 interface->hba->task_ctx[orig_task_idx];
1533 orig_task = &(task_page[index]);
1534
1535 /* Multiple SGEs were used for this IO */ 1519 /* Multiple SGEs were used for this IO */
1536 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; 1520 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1537 sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr; 1521 sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 9e50e5b53763..da00ca5fa5dc 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -930,7 +930,6 @@ abts_err:
930int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, 930int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
931 enum fc_rctl r_ctl) 931 enum fc_rctl r_ctl)
932{ 932{
933 struct fc_lport *lport;
934 struct bnx2fc_rport *tgt = orig_io_req->tgt; 933 struct bnx2fc_rport *tgt = orig_io_req->tgt;
935 struct bnx2fc_interface *interface; 934 struct bnx2fc_interface *interface;
936 struct fcoe_port *port; 935 struct fcoe_port *port;
@@ -948,7 +947,6 @@ int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
948 947
949 port = orig_io_req->port; 948 port = orig_io_req->port;
950 interface = port->priv; 949 interface = port->priv;
951 lport = port->lport;
952 950
953 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); 951 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
954 if (!cb_arg) { 952 if (!cb_arg) {
@@ -999,7 +997,6 @@ cleanup_err:
999 997
1000int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) 998int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
1001{ 999{
1002 struct fc_lport *lport;
1003 struct bnx2fc_rport *tgt = io_req->tgt; 1000 struct bnx2fc_rport *tgt = io_req->tgt;
1004 struct bnx2fc_interface *interface; 1001 struct bnx2fc_interface *interface;
1005 struct fcoe_port *port; 1002 struct fcoe_port *port;
@@ -1015,7 +1012,6 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
1015 1012
1016 port = io_req->port; 1013 port = io_req->port;
1017 interface = port->priv; 1014 interface = port->priv;
1018 lport = port->lport;
1019 1015
1020 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); 1016 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
1021 if (!cleanup_io_req) { 1017 if (!cleanup_io_req) {
@@ -1927,8 +1923,6 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1927 struct fcoe_fcp_rsp_payload *fcp_rsp; 1923 struct fcoe_fcp_rsp_payload *fcp_rsp;
1928 struct bnx2fc_rport *tgt = io_req->tgt; 1924 struct bnx2fc_rport *tgt = io_req->tgt;
1929 struct scsi_cmnd *sc_cmd; 1925 struct scsi_cmnd *sc_cmd;
1930 struct Scsi_Host *host;
1931
1932 1926
1933 /* scsi_cmd_cmpl is called with tgt lock held */ 1927 /* scsi_cmd_cmpl is called with tgt lock held */
1934 1928
@@ -1957,7 +1951,6 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1957 /* parse fcp_rsp and obtain sense data from RQ if available */ 1951 /* parse fcp_rsp and obtain sense data from RQ if available */
1958 bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); 1952 bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
1959 1953
1960 host = sc_cmd->device->host;
1961 if (!sc_cmd->SCp.ptr) { 1954 if (!sc_cmd->SCp.ptr) {
1962 printk(KERN_ERR PFX "SCp.ptr is NULL\n"); 1955 printk(KERN_ERR PFX "SCp.ptr is NULL\n");
1963 return; 1956 return;
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index 03bd896cdbb9..0ca695110f54 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -1316,7 +1316,6 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
1316 u32 fl_align = clsz < 32 ? 32 : clsz; 1316 u32 fl_align = clsz < 32 ? 32 : clsz;
1317 u32 pack_align; 1317 u32 pack_align;
1318 u32 ingpad, ingpack; 1318 u32 ingpad, ingpack;
1319 int pcie_cap;
1320 1319
1321 csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) | 1320 csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
1322 HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) | 1321 HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
@@ -1347,8 +1346,7 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
1347 * multiple of the Maximum Payload Size. 1346 * multiple of the Maximum Payload Size.
1348 */ 1347 */
1349 pack_align = fl_align; 1348 pack_align = fl_align;
1350 pcie_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP); 1349 if (pci_is_pcie(hw->pdev)) {
1351 if (pcie_cap) {
1352 u32 mps, mps_log; 1350 u32 mps, mps_log;
1353 u16 devctl; 1351 u16 devctl;
1354 1352
@@ -1356,9 +1354,7 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
1356 * [bits 7:5] encodes sizes as powers of 2 starting at 1354 * [bits 7:5] encodes sizes as powers of 2 starting at
1357 * 128 bytes. 1355 * 128 bytes.
1358 */ 1356 */
1359 pci_read_config_word(hw->pdev, 1357 pcie_capability_read_word(hw->pdev, PCI_EXP_DEVCTL, &devctl);
1360 pcie_cap + PCI_EXP_DEVCTL,
1361 &devctl);
1362 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7; 1358 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
1363 mps = 1 << mps_log; 1359 mps = 1 << mps_log;
1364 if (mps > pack_align) 1360 if (mps > pack_align)
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index b1f4724efde2..93ef97af22df 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -753,10 +753,13 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
753 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ 753 /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
754 if (index == PRIMARY_HWQ) 754 if (index == PRIMARY_HWQ)
755 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq); 755 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
756 /* fall through */
756 case UNMAP_TWO: 757 case UNMAP_TWO:
757 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq); 758 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
759 /* fall through */
758 case UNMAP_ONE: 760 case UNMAP_ONE:
759 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq); 761 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
762 /* fall through */
760 case FREE_IRQ: 763 case FREE_IRQ:
761 cfg->ops->free_afu_irqs(hwq->ctx_cookie); 764 cfg->ops->free_afu_irqs(hwq->ctx_cookie);
762 /* fall through */ 765 /* fall through */
@@ -973,14 +976,18 @@ static void cxlflash_remove(struct pci_dev *pdev)
973 switch (cfg->init_state) { 976 switch (cfg->init_state) {
974 case INIT_STATE_CDEV: 977 case INIT_STATE_CDEV:
975 cxlflash_release_chrdev(cfg); 978 cxlflash_release_chrdev(cfg);
979 /* fall through */
976 case INIT_STATE_SCSI: 980 case INIT_STATE_SCSI:
977 cxlflash_term_local_luns(cfg); 981 cxlflash_term_local_luns(cfg);
978 scsi_remove_host(cfg->host); 982 scsi_remove_host(cfg->host);
983 /* fall through */
979 case INIT_STATE_AFU: 984 case INIT_STATE_AFU:
980 term_afu(cfg); 985 term_afu(cfg);
986 /* fall through */
981 case INIT_STATE_PCI: 987 case INIT_STATE_PCI:
982 cfg->ops->destroy_afu(cfg->afu_cookie); 988 cfg->ops->destroy_afu(cfg->afu_cookie);
983 pci_disable_device(pdev); 989 pci_disable_device(pdev);
990 /* fall through */
984 case INIT_STATE_NONE: 991 case INIT_STATE_NONE:
985 free_mem(cfg); 992 free_mem(cfg);
986 scsi_host_put(cfg->host); 993 scsi_host_put(cfg->host);
@@ -2353,11 +2360,11 @@ retry:
2353 cxlflash_schedule_async_reset(cfg); 2360 cxlflash_schedule_async_reset(cfg);
2354 break; 2361 break;
2355 } 2362 }
2356 /* fall through to retry */ 2363 /* fall through - to retry */
2357 case -EAGAIN: 2364 case -EAGAIN:
2358 if (++nretry < 2) 2365 if (++nretry < 2)
2359 goto retry; 2366 goto retry;
2360 /* fall through to exit */ 2367 /* fall through - to exit */
2361 default: 2368 default:
2362 break; 2369 break;
2363 } 2370 }
@@ -3017,6 +3024,7 @@ retry:
3017 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 3024 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
3018 if (cfg->state == STATE_NORMAL) 3025 if (cfg->state == STATE_NORMAL)
3019 goto retry; 3026 goto retry;
3027 /* else, fall through */
3020 default: 3028 default:
3021 /* Ideally should not happen */ 3029 /* Ideally should not happen */
3022 dev_err(dev, "%s: Device is not ready, state=%d\n", 3030 dev_err(dev, "%s: Device is not ready, state=%d\n",
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 65f1fe343c64..5efc959493ec 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -546,6 +546,8 @@ static void send_mode_select(struct work_struct *work)
546 spin_unlock(&ctlr->ms_lock); 546 spin_unlock(&ctlr->ms_lock);
547 547
548 retry: 548 retry:
549 memset(cdb, 0, sizeof(cdb));
550
549 data_size = rdac_failover_get(ctlr, &list, cdb); 551 data_size = rdac_failover_get(ctlr, &list, cdb);
550 552
551 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " 553 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 950cd92df2ff..eb7d139ffc00 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -762,14 +762,10 @@ u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
762 762
763static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a) 763static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
764{ 764{
765 int pcie_cap_reg; 765 if (pci_is_pcie(a->pcid)) {
766
767 pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
768 if (pcie_cap_reg) {
769 u16 devcontrol; 766 u16 devcontrol;
770 767
771 pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, 768 pcie_capability_read_word(a->pcid, PCI_EXP_DEVCTL, &devcontrol);
772 &devcontrol);
773 769
774 if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 770 if ((devcontrol & PCI_EXP_DEVCTL_READRQ) >
775 PCI_EXP_DEVCTL_READRQ_512B) { 771 PCI_EXP_DEVCTL_READRQ_512B) {
@@ -778,9 +774,8 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
778 774
779 devcontrol &= ~PCI_EXP_DEVCTL_READRQ; 775 devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
780 devcontrol |= PCI_EXP_DEVCTL_READRQ_512B; 776 devcontrol |= PCI_EXP_DEVCTL_READRQ_512B;
781 pci_write_config_word(a->pcid, 777 pcie_capability_write_word(a->pcid, PCI_EXP_DEVCTL,
782 pcie_cap_reg + PCI_EXP_DEVCTL, 778 devcontrol);
783 devcontrol);
784 } 779 }
785 } 780 }
786} 781}
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index 3d130523c288..442c5e70a7b4 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -757,7 +757,6 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
757 757
758 struct atto_hba_get_adapter_info *gai = 758 struct atto_hba_get_adapter_info *gai =
759 &hi->data.get_adap_info; 759 &hi->data.get_adap_info;
760 int pcie_cap_reg;
761 760
762 if (hi->flags & HBAF_TUNNEL) { 761 if (hi->flags & HBAF_TUNNEL) {
763 hi->status = ATTO_STS_UNSUPPORTED; 762 hi->status = ATTO_STS_UNSUPPORTED;
@@ -784,17 +783,14 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
784 gai->pci.dev_num = PCI_SLOT(a->pcid->devfn); 783 gai->pci.dev_num = PCI_SLOT(a->pcid->devfn);
785 gai->pci.func_num = PCI_FUNC(a->pcid->devfn); 784 gai->pci.func_num = PCI_FUNC(a->pcid->devfn);
786 785
787 pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); 786 if (pci_is_pcie(a->pcid)) {
788 if (pcie_cap_reg) {
789 u16 stat; 787 u16 stat;
790 u32 caps; 788 u32 caps;
791 789
792 pci_read_config_word(a->pcid, 790 pcie_capability_read_word(a->pcid, PCI_EXP_LNKSTA,
793 pcie_cap_reg + PCI_EXP_LNKSTA, 791 &stat);
794 &stat); 792 pcie_capability_read_dword(a->pcid, PCI_EXP_LNKCAP,
795 pci_read_config_dword(a->pcid, 793 &caps);
796 pcie_cap_reg + PCI_EXP_LNKCAP,
797 &caps);
798 794
799 gai->pci.link_speed_curr = 795 gai->pci.link_speed_curr =
800 (u8)(stat & PCI_EXP_LNKSTA_CLS); 796 (u8)(stat & PCI_EXP_LNKSTA_CLS);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 587d4bbb7d22..25dae9f0b205 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1250,15 +1250,21 @@ static int __init fcoe_if_init(void)
1250 /* attach to scsi transport */ 1250 /* attach to scsi transport */
1251 fcoe_nport_scsi_transport = 1251 fcoe_nport_scsi_transport =
1252 fc_attach_transport(&fcoe_nport_fc_functions); 1252 fc_attach_transport(&fcoe_nport_fc_functions);
1253 if (!fcoe_nport_scsi_transport)
1254 goto err;
1255
1253 fcoe_vport_scsi_transport = 1256 fcoe_vport_scsi_transport =
1254 fc_attach_transport(&fcoe_vport_fc_functions); 1257 fc_attach_transport(&fcoe_vport_fc_functions);
1255 1258 if (!fcoe_vport_scsi_transport)
1256 if (!fcoe_nport_scsi_transport) { 1259 goto err_vport;
1257 printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
1258 return -ENODEV;
1259 }
1260 1260
1261 return 0; 1261 return 0;
1262
1263err_vport:
1264 fc_release_transport(fcoe_nport_scsi_transport);
1265err:
1266 printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
1267 return -ENODEV;
1262} 1268}
1263 1269
1264/** 1270/**
@@ -1617,7 +1623,6 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
1617 else 1623 else
1618 fr_flags(fp) |= FCPHF_CRC_UNCHECKED; 1624 fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
1619 1625
1620 fh = (struct fc_frame_header *) skb_transport_header(skb);
1621 fh = fc_frame_header_get(fp); 1626 fh = fc_frame_header_get(fp);
1622 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) 1627 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP)
1623 return 0; 1628 return 0;
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index b5e66971b6d9..772bdc93930a 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -166,7 +166,7 @@ static int fdomain_test_loopback(int base)
166 166
167static void fdomain_reset(int base) 167static void fdomain_reset(int base)
168{ 168{
169 outb(1, base + REG_BCTL); 169 outb(BCTL_RST, base + REG_BCTL);
170 mdelay(20); 170 mdelay(20);
171 outb(0, base + REG_BCTL); 171 outb(0, base + REG_BCTL);
172 mdelay(1150); 172 mdelay(1150);
@@ -306,7 +306,7 @@ static void fdomain_work(struct work_struct *work)
306 status = inb(fd->base + REG_BSTAT); 306 status = inb(fd->base + REG_BSTAT);
307 307
308 if (status & BSTAT_REQ) { 308 if (status & BSTAT_REQ) {
309 switch (status & 0x0e) { 309 switch (status & (BSTAT_MSG | BSTAT_CMD | BSTAT_IO)) {
310 case BSTAT_CMD: /* COMMAND OUT */ 310 case BSTAT_CMD: /* COMMAND OUT */
311 outb(cmd->cmnd[cmd->SCp.sent_command++], 311 outb(cmd->cmnd[cmd->SCp.sent_command++],
312 fd->base + REG_SCSI_DATA); 312 fd->base + REG_SCSI_DATA);
@@ -331,7 +331,7 @@ static void fdomain_work(struct work_struct *work)
331 case BSTAT_MSG | BSTAT_CMD: /* MESSAGE OUT */ 331 case BSTAT_MSG | BSTAT_CMD: /* MESSAGE OUT */
332 outb(MESSAGE_REJECT, fd->base + REG_SCSI_DATA); 332 outb(MESSAGE_REJECT, fd->base + REG_SCSI_DATA);
333 break; 333 break;
334 case BSTAT_MSG | BSTAT_IO | BSTAT_CMD: /* MESSAGE IN */ 334 case BSTAT_MSG | BSTAT_CMD | BSTAT_IO: /* MESSAGE IN */
335 cmd->SCp.Message = inb(fd->base + REG_SCSI_DATA); 335 cmd->SCp.Message = inb(fd->base + REG_SCSI_DATA);
336 if (!cmd->SCp.Message) 336 if (!cmd->SCp.Message)
337 ++done; 337 ++done;
diff --git a/drivers/scsi/fdomain_isa.c b/drivers/scsi/fdomain_isa.c
index 28639adf8219..f2da4fa382e8 100644
--- a/drivers/scsi/fdomain_isa.c
+++ b/drivers/scsi/fdomain_isa.c
@@ -131,8 +131,7 @@ static int fdomain_isa_match(struct device *dev, unsigned int ndev)
131 if (!request_region(base, FDOMAIN_REGION_SIZE, "fdomain_isa")) 131 if (!request_region(base, FDOMAIN_REGION_SIZE, "fdomain_isa"))
132 return 0; 132 return 0;
133 133
134 irq = irqs[(inb(base + REG_CFG1) & 0x0e) >> 1]; 134 irq = irqs[(inb(base + REG_CFG1) & CFG1_IRQ_MASK) >> 1];
135
136 135
137 if (sig) 136 if (sig)
138 this_id = sig->this_id; 137 this_id = sig->this_id;
@@ -164,7 +163,7 @@ static int fdomain_isa_param_match(struct device *dev, unsigned int ndev)
164 } 163 }
165 164
166 if (irq_ <= 0) 165 if (irq_ <= 0)
167 irq_ = irqs[(inb(io[ndev] + REG_CFG1) & 0x0e) >> 1]; 166 irq_ = irqs[(inb(io[ndev] + REG_CFG1) & CFG1_IRQ_MASK) >> 1];
168 167
169 sh = fdomain_create(io[ndev], irq_, scsi_id[ndev], dev); 168 sh = fdomain_create(io[ndev], irq_, scsi_id[ndev], dev);
170 if (!sh) { 169 if (!sh) {
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 21991c99db7c..13f7d88d6e57 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -52,7 +52,6 @@ static struct fc_trace_flag_type *fc_trc_flag;
52 */ 52 */
53int fnic_debugfs_init(void) 53int fnic_debugfs_init(void)
54{ 54{
55 int rc = -1;
56 fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL); 55 fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
57 56
58 fnic_stats_debugfs_root = debugfs_create_dir("statistics", 57 fnic_stats_debugfs_root = debugfs_create_dir("statistics",
@@ -70,8 +69,7 @@ int fnic_debugfs_init(void)
70 fc_trc_flag->fc_clear = 4; 69 fc_trc_flag->fc_clear = 4;
71 } 70 }
72 71
73 rc = 0; 72 return 0;
74 return rc;
75} 73}
76 74
77/* 75/*
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 911a5adc289c..673887e383cc 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -52,6 +52,7 @@ void fnic_handle_link(struct work_struct *work)
52 unsigned long flags; 52 unsigned long flags;
53 int old_link_status; 53 int old_link_status;
54 u32 old_link_down_cnt; 54 u32 old_link_down_cnt;
55 u64 old_port_speed, new_port_speed;
55 56
56 spin_lock_irqsave(&fnic->fnic_lock, flags); 57 spin_lock_irqsave(&fnic->fnic_lock, flags);
57 58
@@ -62,14 +63,19 @@ void fnic_handle_link(struct work_struct *work)
62 63
63 old_link_down_cnt = fnic->link_down_cnt; 64 old_link_down_cnt = fnic->link_down_cnt;
64 old_link_status = fnic->link_status; 65 old_link_status = fnic->link_status;
66 old_port_speed = atomic64_read(
67 &fnic->fnic_stats.misc_stats.current_port_speed);
68
65 fnic->link_status = vnic_dev_link_status(fnic->vdev); 69 fnic->link_status = vnic_dev_link_status(fnic->vdev);
66 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); 70 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
67 71
72 new_port_speed = vnic_dev_port_speed(fnic->vdev);
68 atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed, 73 atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
69 vnic_dev_port_speed(fnic->vdev)); 74 new_port_speed);
70 shost_printk(KERN_INFO, fnic->lport->host, "Current vnic speed set to : %llu\n", 75 if (old_port_speed != new_port_speed)
71 (u64)atomic64_read( 76 shost_printk(KERN_INFO, fnic->lport->host,
72 &fnic->fnic_stats.misc_stats.current_port_speed)); 77 "Current vnic speed set to : %llu\n",
78 new_port_speed);
73 79
74 switch (vnic_dev_port_speed(fnic->vdev)) { 80 switch (vnic_dev_port_speed(fnic->vdev)) {
75 case DCEM_PORTSPEED_10G: 81 case DCEM_PORTSPEED_10G:
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index da4602b63495..2fb2731f50fb 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -254,7 +254,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
254 int vecs = n + m + o + 1; 254 int vecs = n + m + o + 1;
255 255
256 if (pci_alloc_irq_vectors(fnic->pdev, vecs, vecs, 256 if (pci_alloc_irq_vectors(fnic->pdev, vecs, vecs,
257 PCI_IRQ_MSIX) < 0) { 257 PCI_IRQ_MSIX) == vecs) {
258 fnic->rq_count = n; 258 fnic->rq_count = n;
259 fnic->raw_wq_count = m; 259 fnic->raw_wq_count = m;
260 fnic->wq_copy_count = o; 260 fnic->wq_copy_count = o;
@@ -280,7 +280,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
280 fnic->wq_copy_count >= 1 && 280 fnic->wq_copy_count >= 1 &&
281 fnic->cq_count >= 3 && 281 fnic->cq_count >= 3 &&
282 fnic->intr_count >= 1 && 282 fnic->intr_count >= 1 &&
283 pci_alloc_irq_vectors(fnic->pdev, 1, 1, PCI_IRQ_MSI) < 0) { 283 pci_alloc_irq_vectors(fnic->pdev, 1, 1, PCI_IRQ_MSI) == 1) {
284 fnic->rq_count = 1; 284 fnic->rq_count = 1;
285 fnic->raw_wq_count = 1; 285 fnic->raw_wq_count = 1;
286 fnic->wq_copy_count = 1; 286 fnic->wq_copy_count = 1;
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 9621831e17ba..a0d01aea28f7 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -453,7 +453,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
453 (u64)atomic64_read(&stats->misc_stats.frame_errors)); 453 (u64)atomic64_read(&stats->misc_stats.frame_errors));
454 454
455 len += snprintf(debug->debug_buffer + len, buf_size - len, 455 len += snprintf(debug->debug_buffer + len, buf_size - len,
456 "Firmware reported port seed: %llu\n", 456 "Firmware reported port speed: %llu\n",
457 (u64)atomic64_read( 457 (u64)atomic64_read(
458 &stats->misc_stats.current_port_speed)); 458 &stats->misc_stats.current_port_speed));
459 459
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 42a02cc47a60..720c4d6be939 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -31,7 +31,13 @@
31#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES 31#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
32#define HISI_SAS_RESET_BIT 0 32#define HISI_SAS_RESET_BIT 0
33#define HISI_SAS_REJECT_CMD_BIT 1 33#define HISI_SAS_REJECT_CMD_BIT 1
34#define HISI_SAS_RESERVED_IPTT_CNT 96 34#define HISI_SAS_MAX_COMMANDS (HISI_SAS_QUEUE_SLOTS)
35#define HISI_SAS_RESERVED_IPTT 96
36#define HISI_SAS_UNRESERVED_IPTT \
37 (HISI_SAS_MAX_COMMANDS - HISI_SAS_RESERVED_IPTT)
38
39#define HISI_SAS_IOST_ITCT_CACHE_NUM 64
40#define HISI_SAS_IOST_ITCT_CACHE_DW_SZ 10
35 41
36#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer)) 42#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
37#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table)) 43#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
@@ -128,7 +134,6 @@ struct hisi_sas_rst {
128 134
129#define HISI_SAS_DECLARE_RST_WORK_ON_STACK(r) \ 135#define HISI_SAS_DECLARE_RST_WORK_ON_STACK(r) \
130 DECLARE_COMPLETION_ONSTACK(c); \ 136 DECLARE_COMPLETION_ONSTACK(c); \
131 DECLARE_WORK(w, hisi_sas_sync_rst_work_handler); \
132 struct hisi_sas_rst r = HISI_SAS_RST_WORK_INIT(r, c) 137 struct hisi_sas_rst r = HISI_SAS_RST_WORK_INIT(r, c)
133 138
134enum hisi_sas_bit_err_type { 139enum hisi_sas_bit_err_type {
@@ -249,6 +254,22 @@ struct hisi_sas_debugfs_reg {
249 }; 254 };
250}; 255};
251 256
257struct hisi_sas_iost_itct_cache {
258 u32 data[HISI_SAS_IOST_ITCT_CACHE_DW_SZ];
259};
260
261enum hisi_sas_debugfs_reg_array_member {
262 DEBUGFS_GLOBAL = 0,
263 DEBUGFS_AXI,
264 DEBUGFS_RAS,
265 DEBUGFS_REGS_NUM
266};
267
268enum hisi_sas_debugfs_cache_type {
269 HISI_SAS_ITCT_CACHE,
270 HISI_SAS_IOST_CACHE,
271};
272
252struct hisi_sas_hw { 273struct hisi_sas_hw {
253 int (*hw_init)(struct hisi_hba *hisi_hba); 274 int (*hw_init)(struct hisi_hba *hisi_hba);
254 void (*setup_itct)(struct hisi_hba *hisi_hba, 275 void (*setup_itct)(struct hisi_hba *hisi_hba,
@@ -257,7 +278,6 @@ struct hisi_sas_hw {
257 struct domain_device *device); 278 struct domain_device *device);
258 struct hisi_sas_device *(*alloc_dev)(struct domain_device *device); 279 struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
259 void (*sl_notify_ssp)(struct hisi_hba *hisi_hba, int phy_no); 280 void (*sl_notify_ssp)(struct hisi_hba *hisi_hba, int phy_no);
260 int (*get_free_slot)(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq);
261 void (*start_delivery)(struct hisi_sas_dq *dq); 281 void (*start_delivery)(struct hisi_sas_dq *dq);
262 void (*prep_ssp)(struct hisi_hba *hisi_hba, 282 void (*prep_ssp)(struct hisi_hba *hisi_hba,
263 struct hisi_sas_slot *slot); 283 struct hisi_sas_slot *slot);
@@ -268,8 +288,6 @@ struct hisi_sas_hw {
268 void (*prep_abort)(struct hisi_hba *hisi_hba, 288 void (*prep_abort)(struct hisi_hba *hisi_hba,
269 struct hisi_sas_slot *slot, 289 struct hisi_sas_slot *slot,
270 int device_id, int abort_flag, int tag_to_abort); 290 int device_id, int abort_flag, int tag_to_abort);
271 int (*slot_complete)(struct hisi_hba *hisi_hba,
272 struct hisi_sas_slot *slot);
273 void (*phys_init)(struct hisi_hba *hisi_hba); 291 void (*phys_init)(struct hisi_hba *hisi_hba);
274 void (*phy_start)(struct hisi_hba *hisi_hba, int phy_no); 292 void (*phy_start)(struct hisi_hba *hisi_hba, int phy_no);
275 void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no); 293 void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
@@ -288,15 +306,18 @@ struct hisi_sas_hw {
288 u32 (*get_phys_state)(struct hisi_hba *hisi_hba); 306 u32 (*get_phys_state)(struct hisi_hba *hisi_hba);
289 int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type, 307 int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type,
290 u8 reg_index, u8 reg_count, u8 *write_data); 308 u8 reg_index, u8 reg_count, u8 *write_data);
291 int (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba, 309 void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
292 int delay_ms, int timeout_ms); 310 int delay_ms, int timeout_ms);
293 void (*snapshot_prepare)(struct hisi_hba *hisi_hba); 311 void (*snapshot_prepare)(struct hisi_hba *hisi_hba);
294 void (*snapshot_restore)(struct hisi_hba *hisi_hba); 312 void (*snapshot_restore)(struct hisi_hba *hisi_hba);
295 int max_command_entries; 313 int (*set_bist)(struct hisi_hba *hisi_hba, bool enable);
314 void (*read_iost_itct_cache)(struct hisi_hba *hisi_hba,
315 enum hisi_sas_debugfs_cache_type type,
316 u32 *cache);
296 int complete_hdr_size; 317 int complete_hdr_size;
297 struct scsi_host_template *sht; 318 struct scsi_host_template *sht;
298 319
299 const struct hisi_sas_debugfs_reg *debugfs_reg_global; 320 const struct hisi_sas_debugfs_reg *debugfs_reg_array[DEBUGFS_REGS_NUM];
300 const struct hisi_sas_debugfs_reg *debugfs_reg_port; 321 const struct hisi_sas_debugfs_reg *debugfs_reg_port;
301}; 322};
302 323
@@ -371,16 +392,28 @@ struct hisi_hba {
371 int cq_nvecs; 392 int cq_nvecs;
372 unsigned int *reply_map; 393 unsigned int *reply_map;
373 394
395 /* bist */
396 enum sas_linkrate debugfs_bist_linkrate;
397 int debugfs_bist_code_mode;
398 int debugfs_bist_phy_no;
399 int debugfs_bist_mode;
400 u32 debugfs_bist_cnt;
401 int debugfs_bist_enable;
402
374 /* debugfs memories */ 403 /* debugfs memories */
375 u32 *debugfs_global_reg; 404 /* Put Global AXI and RAS Register into register array */
405 u32 *debugfs_regs[DEBUGFS_REGS_NUM];
376 u32 *debugfs_port_reg[HISI_SAS_MAX_PHYS]; 406 u32 *debugfs_port_reg[HISI_SAS_MAX_PHYS];
377 void *debugfs_complete_hdr[HISI_SAS_MAX_QUEUES]; 407 void *debugfs_complete_hdr[HISI_SAS_MAX_QUEUES];
378 struct hisi_sas_cmd_hdr *debugfs_cmd_hdr[HISI_SAS_MAX_QUEUES]; 408 struct hisi_sas_cmd_hdr *debugfs_cmd_hdr[HISI_SAS_MAX_QUEUES];
379 struct hisi_sas_iost *debugfs_iost; 409 struct hisi_sas_iost *debugfs_iost;
380 struct hisi_sas_itct *debugfs_itct; 410 struct hisi_sas_itct *debugfs_itct;
411 u64 *debugfs_iost_cache;
412 u64 *debugfs_itct_cache;
381 413
382 struct dentry *debugfs_dir; 414 struct dentry *debugfs_dir;
383 struct dentry *debugfs_dump_dentry; 415 struct dentry *debugfs_dump_dentry;
416 struct dentry *debugfs_bist_dentry;
384 bool debugfs_snapshot; 417 bool debugfs_snapshot;
385}; 418};
386 419
@@ -533,7 +566,6 @@ extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
533extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port); 566extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
534extern void hisi_sas_sata_done(struct sas_task *task, 567extern void hisi_sas_sata_done(struct sas_task *task,
535 struct hisi_sas_slot *slot); 568 struct hisi_sas_slot *slot);
536extern int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag);
537extern int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba); 569extern int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba);
538extern int hisi_sas_probe(struct platform_device *pdev, 570extern int hisi_sas_probe(struct platform_device *pdev,
539 const struct hisi_sas_hw *ops); 571 const struct hisi_sas_hw *ops);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index cb746cfc2fa8..d1513fdf1e00 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -118,21 +118,6 @@ void hisi_sas_sata_done(struct sas_task *task,
118} 118}
119EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 119EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
120 120
121int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
122{
123 struct ata_queued_cmd *qc = task->uldd_task;
124
125 if (qc) {
126 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
127 qc->tf.command == ATA_CMD_FPDMA_READ) {
128 *tag = qc->tag;
129 return 1;
130 }
131 }
132 return 0;
133}
134EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
135
136/* 121/*
137 * This function assumes linkrate mask fits in 8 bits, which it 122 * This function assumes linkrate mask fits in 8 bits, which it
138 * does for all HW versions supported. 123 * does for all HW versions supported.
@@ -180,8 +165,8 @@ static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
180{ 165{
181 unsigned long flags; 166 unsigned long flags;
182 167
183 if (hisi_hba->hw->slot_index_alloc || (slot_idx >= 168 if (hisi_hba->hw->slot_index_alloc ||
184 hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) { 169 slot_idx >= HISI_SAS_UNRESERVED_IPTT) {
185 spin_lock_irqsave(&hisi_hba->lock, flags); 170 spin_lock_irqsave(&hisi_hba->lock, flags);
186 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 171 hisi_sas_slot_index_clear(hisi_hba, slot_idx);
187 spin_unlock_irqrestore(&hisi_hba->lock, flags); 172 spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -211,8 +196,7 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
211 if (index >= hisi_hba->slot_index_count) { 196 if (index >= hisi_hba->slot_index_count) {
212 index = find_next_zero_bit(bitmap, 197 index = find_next_zero_bit(bitmap,
213 hisi_hba->slot_index_count, 198 hisi_hba->slot_index_count,
214 hisi_hba->hw->max_command_entries - 199 HISI_SAS_UNRESERVED_IPTT);
215 HISI_SAS_RESERVED_IPTT_CNT);
216 if (index >= hisi_hba->slot_index_count) { 200 if (index >= hisi_hba->slot_index_count) {
217 spin_unlock_irqrestore(&hisi_hba->lock, flags); 201 spin_unlock_irqrestore(&hisi_hba->lock, flags);
218 return -SAS_QUEUE_FULL; 202 return -SAS_QUEUE_FULL;
@@ -301,7 +285,7 @@ static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
301 285
302static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, 286static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
303 struct sas_task *task, int n_elem, 287 struct sas_task *task, int n_elem,
304 int n_elem_req, int n_elem_resp) 288 int n_elem_req)
305{ 289{
306 struct device *dev = hisi_hba->dev; 290 struct device *dev = hisi_hba->dev;
307 291
@@ -315,16 +299,13 @@ static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
315 if (n_elem_req) 299 if (n_elem_req)
316 dma_unmap_sg(dev, &task->smp_task.smp_req, 300 dma_unmap_sg(dev, &task->smp_task.smp_req,
317 1, DMA_TO_DEVICE); 301 1, DMA_TO_DEVICE);
318 if (n_elem_resp)
319 dma_unmap_sg(dev, &task->smp_task.smp_resp,
320 1, DMA_FROM_DEVICE);
321 } 302 }
322 } 303 }
323} 304}
324 305
325static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, 306static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
326 struct sas_task *task, int *n_elem, 307 struct sas_task *task, int *n_elem,
327 int *n_elem_req, int *n_elem_resp) 308 int *n_elem_req)
328{ 309{
329 struct device *dev = hisi_hba->dev; 310 struct device *dev = hisi_hba->dev;
330 int rc; 311 int rc;
@@ -332,7 +313,7 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
332 if (sas_protocol_ata(task->task_proto)) { 313 if (sas_protocol_ata(task->task_proto)) {
333 *n_elem = task->num_scatter; 314 *n_elem = task->num_scatter;
334 } else { 315 } else {
335 unsigned int req_len, resp_len; 316 unsigned int req_len;
336 317
337 if (task->num_scatter) { 318 if (task->num_scatter) {
338 *n_elem = dma_map_sg(dev, task->scatter, 319 *n_elem = dma_map_sg(dev, task->scatter,
@@ -353,17 +334,6 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
353 rc = -EINVAL; 334 rc = -EINVAL;
354 goto err_out_dma_unmap; 335 goto err_out_dma_unmap;
355 } 336 }
356 *n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
357 1, DMA_FROM_DEVICE);
358 if (!*n_elem_resp) {
359 rc = -ENOMEM;
360 goto err_out_dma_unmap;
361 }
362 resp_len = sg_dma_len(&task->smp_task.smp_resp);
363 if (resp_len & 0x3) {
364 rc = -EINVAL;
365 goto err_out_dma_unmap;
366 }
367 } 337 }
368 } 338 }
369 339
@@ -378,7 +348,7 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
378err_out_dma_unmap: 348err_out_dma_unmap:
379 /* It would be better to call dma_unmap_sg() here, but it's messy */ 349 /* It would be better to call dma_unmap_sg() here, but it's messy */
380 hisi_sas_dma_unmap(hisi_hba, task, *n_elem, 350 hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
381 *n_elem_req, *n_elem_resp); 351 *n_elem_req);
382prep_out: 352prep_out:
383 return rc; 353 return rc;
384} 354}
@@ -450,7 +420,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
450 struct asd_sas_port *sas_port = device->port; 420 struct asd_sas_port *sas_port = device->port;
451 struct device *dev = hisi_hba->dev; 421 struct device *dev = hisi_hba->dev;
452 int dlvry_queue_slot, dlvry_queue, rc, slot_idx; 422 int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
453 int n_elem = 0, n_elem_dif = 0, n_elem_req = 0, n_elem_resp = 0; 423 int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
454 struct hisi_sas_dq *dq; 424 struct hisi_sas_dq *dq;
455 unsigned long flags; 425 unsigned long flags;
456 int wr_q_index; 426 int wr_q_index;
@@ -486,7 +456,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
486 } 456 }
487 457
488 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, 458 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
489 &n_elem_req, &n_elem_resp); 459 &n_elem_req);
490 if (rc < 0) 460 if (rc < 0)
491 goto prep_out; 461 goto prep_out;
492 462
@@ -520,13 +490,8 @@ static int hisi_sas_task_prep(struct sas_task *task,
520 slot = &hisi_hba->slot_info[slot_idx]; 490 slot = &hisi_hba->slot_info[slot_idx];
521 491
522 spin_lock_irqsave(&dq->lock, flags); 492 spin_lock_irqsave(&dq->lock, flags);
523 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); 493 wr_q_index = dq->wr_point;
524 if (wr_q_index < 0) { 494 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
525 spin_unlock_irqrestore(&dq->lock, flags);
526 rc = -EAGAIN;
527 goto err_out_tag;
528 }
529
530 list_add_tail(&slot->delivery, &dq->list); 495 list_add_tail(&slot->delivery, &dq->list);
531 spin_unlock_irqrestore(&dq->lock, flags); 496 spin_unlock_irqrestore(&dq->lock, flags);
532 spin_lock_irqsave(&sas_dev->lock, flags); 497 spin_lock_irqsave(&sas_dev->lock, flags);
@@ -551,7 +516,8 @@ static int hisi_sas_task_prep(struct sas_task *task,
551 516
552 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 517 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
553 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 518 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
554 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); 519 memset(hisi_sas_status_buf_addr_mem(slot), 0,
520 sizeof(struct hisi_sas_err_record));
555 521
556 switch (task->task_proto) { 522 switch (task->task_proto) {
557 case SAS_PROTOCOL_SMP: 523 case SAS_PROTOCOL_SMP:
@@ -580,14 +546,12 @@ static int hisi_sas_task_prep(struct sas_task *task,
580 546
581 return 0; 547 return 0;
582 548
583err_out_tag:
584 hisi_sas_slot_index_free(hisi_hba, slot_idx);
585err_out_dif_dma_unmap: 549err_out_dif_dma_unmap:
586 if (!sas_protocol_ata(task->task_proto)) 550 if (!sas_protocol_ata(task->task_proto))
587 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); 551 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
588err_out_dma_unmap: 552err_out_dma_unmap:
589 hisi_sas_dma_unmap(hisi_hba, task, n_elem, 553 hisi_sas_dma_unmap(hisi_hba, task, n_elem,
590 n_elem_req, n_elem_resp); 554 n_elem_req);
591prep_out: 555prep_out:
592 dev_err(dev, "task prep: failed[%d]!\n", rc); 556 dev_err(dev, "task prep: failed[%d]!\n", rc);
593 return rc; 557 return rc;
@@ -719,13 +683,13 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
719 return sas_dev; 683 return sas_dev;
720} 684}
721 685
722#define HISI_SAS_SRST_ATA_DISK_CNT 3 686#define HISI_SAS_DISK_RECOVER_CNT 3
723static int hisi_sas_init_device(struct domain_device *device) 687static int hisi_sas_init_device(struct domain_device *device)
724{ 688{
725 int rc = TMF_RESP_FUNC_COMPLETE; 689 int rc = TMF_RESP_FUNC_COMPLETE;
726 struct scsi_lun lun; 690 struct scsi_lun lun;
727 struct hisi_sas_tmf_task tmf_task; 691 struct hisi_sas_tmf_task tmf_task;
728 int retry = HISI_SAS_SRST_ATA_DISK_CNT; 692 int retry = HISI_SAS_DISK_RECOVER_CNT;
729 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 693 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
730 struct device *dev = hisi_hba->dev; 694 struct device *dev = hisi_hba->dev;
731 struct sas_phy *local_phy; 695 struct sas_phy *local_phy;
@@ -735,10 +699,14 @@ static int hisi_sas_init_device(struct domain_device *device)
735 int_to_scsilun(0, &lun); 699 int_to_scsilun(0, &lun);
736 700
737 tmf_task.tmf = TMF_CLEAR_TASK_SET; 701 tmf_task.tmf = TMF_CLEAR_TASK_SET;
738 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun, 702 while (retry-- > 0) {
739 &tmf_task); 703 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
740 if (rc == TMF_RESP_FUNC_COMPLETE) 704 &tmf_task);
741 hisi_sas_release_task(hisi_hba, device); 705 if (rc == TMF_RESP_FUNC_COMPLETE) {
706 hisi_sas_release_task(hisi_hba, device);
707 break;
708 }
709 }
742 break; 710 break;
743 case SAS_SATA_DEV: 711 case SAS_SATA_DEV:
744 case SAS_SATA_PM: 712 case SAS_SATA_PM:
@@ -1081,21 +1049,22 @@ static void hisi_sas_dev_gone(struct domain_device *device)
1081 dev_info(dev, "dev[%d:%x] is gone\n", 1049 dev_info(dev, "dev[%d:%x] is gone\n",
1082 sas_dev->device_id, sas_dev->dev_type); 1050 sas_dev->device_id, sas_dev->dev_type);
1083 1051
1052 down(&hisi_hba->sem);
1084 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { 1053 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
1085 hisi_sas_internal_task_abort(hisi_hba, device, 1054 hisi_sas_internal_task_abort(hisi_hba, device,
1086 HISI_SAS_INT_ABT_DEV, 0); 1055 HISI_SAS_INT_ABT_DEV, 0);
1087 1056
1088 hisi_sas_dereg_device(hisi_hba, device); 1057 hisi_sas_dereg_device(hisi_hba, device);
1089 1058
1090 down(&hisi_hba->sem);
1091 hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 1059 hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
1092 up(&hisi_hba->sem);
1093 device->lldd_dev = NULL; 1060 device->lldd_dev = NULL;
1094 } 1061 }
1095 1062
1096 if (hisi_hba->hw->free_device) 1063 if (hisi_hba->hw->free_device)
1097 hisi_hba->hw->free_device(sas_dev); 1064 hisi_hba->hw->free_device(sas_dev);
1098 sas_dev->dev_type = SAS_PHY_UNUSED; 1065 sas_dev->dev_type = SAS_PHY_UNUSED;
1066 sas_dev->sas_device = NULL;
1067 up(&hisi_hba->sem);
1099} 1068}
1100 1069
1101static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 1070static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
@@ -1423,8 +1392,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1423 } 1392 }
1424} 1393}
1425 1394
1426static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, 1395static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
1427 u32 state)
1428{ 1396{
1429 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1397 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1430 struct asd_sas_port *_sas_port = NULL; 1398 struct asd_sas_port *_sas_port = NULL;
@@ -1576,16 +1544,16 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
1576 msleep(1000); 1544 msleep(1000);
1577 hisi_sas_refresh_port_id(hisi_hba); 1545 hisi_sas_refresh_port_id(hisi_hba);
1578 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1546 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1579 up(&hisi_hba->sem);
1580 1547
1581 if (hisi_hba->reject_stp_links_msk) 1548 if (hisi_hba->reject_stp_links_msk)
1582 hisi_sas_terminate_stp_reject(hisi_hba); 1549 hisi_sas_terminate_stp_reject(hisi_hba);
1583 hisi_sas_reset_init_all_devices(hisi_hba); 1550 hisi_sas_reset_init_all_devices(hisi_hba);
1551 up(&hisi_hba->sem);
1584 scsi_unblock_requests(shost); 1552 scsi_unblock_requests(shost);
1585 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1553 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1586 1554
1587 state = hisi_hba->hw->get_phys_state(hisi_hba); 1555 state = hisi_hba->hw->get_phys_state(hisi_hba);
1588 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state); 1556 hisi_sas_rescan_topology(hisi_hba, state);
1589} 1557}
1590EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1558EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
1591 1559
@@ -1770,24 +1738,34 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1770 struct hisi_sas_device *sas_dev = device->lldd_dev; 1738 struct hisi_sas_device *sas_dev = device->lldd_dev;
1771 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1739 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1772 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1740 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1773 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number];
1774 struct hisi_sas_phy *phy = container_of(sas_phy,
1775 struct hisi_sas_phy, sas_phy);
1776 DECLARE_COMPLETION_ONSTACK(phyreset); 1741 DECLARE_COMPLETION_ONSTACK(phyreset);
1777 int rc, reset_type; 1742 int rc, reset_type;
1778 1743
1744 if (!local_phy->enabled) {
1745 sas_put_local_phy(local_phy);
1746 return -ENODEV;
1747 }
1748
1779 if (scsi_is_sas_phy_local(local_phy)) { 1749 if (scsi_is_sas_phy_local(local_phy)) {
1750 struct asd_sas_phy *sas_phy =
1751 sas_ha->sas_phy[local_phy->number];
1752 struct hisi_sas_phy *phy =
1753 container_of(sas_phy, struct hisi_sas_phy, sas_phy);
1780 phy->in_reset = 1; 1754 phy->in_reset = 1;
1781 phy->reset_completion = &phyreset; 1755 phy->reset_completion = &phyreset;
1782 } 1756 }
1783 1757
1784 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || 1758 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
1785 !dev_is_sata(device)) ? 1 : 0; 1759 !dev_is_sata(device)) ? true : false;
1786 1760
1787 rc = sas_phy_reset(local_phy, reset_type); 1761 rc = sas_phy_reset(local_phy, reset_type);
1788 sas_put_local_phy(local_phy); 1762 sas_put_local_phy(local_phy);
1789 1763
1790 if (scsi_is_sas_phy_local(local_phy)) { 1764 if (scsi_is_sas_phy_local(local_phy)) {
1765 struct asd_sas_phy *sas_phy =
1766 sas_ha->sas_phy[local_phy->number];
1767 struct hisi_sas_phy *phy =
1768 container_of(sas_phy, struct hisi_sas_phy, sas_phy);
1791 int ret = wait_for_completion_timeout(&phyreset, 2 * HZ); 1769 int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
1792 unsigned long flags; 1770 unsigned long flags;
1793 1771
@@ -1802,9 +1780,10 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1802 } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) { 1780 } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) {
1803 /* 1781 /*
1804 * If in init state, we rely on caller to wait for link to be 1782 * If in init state, we rely on caller to wait for link to be
1805 * ready; otherwise, delay. 1783 * ready; otherwise, except phy reset is fail, delay.
1806 */ 1784 */
1807 msleep(2000); 1785 if (!rc)
1786 msleep(2000);
1808 } 1787 }
1809 1788
1810 return rc; 1789 return rc;
@@ -1845,21 +1824,21 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1845 struct device *dev = hisi_hba->dev; 1824 struct device *dev = hisi_hba->dev;
1846 int rc = TMF_RESP_FUNC_FAILED; 1825 int rc = TMF_RESP_FUNC_FAILED;
1847 1826
1827 /* Clear internal IO and then lu reset */
1828 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1829 HISI_SAS_INT_ABT_DEV, 0);
1830 if (rc < 0) {
1831 dev_err(dev, "lu_reset: internal abort failed\n");
1832 goto out;
1833 }
1834 hisi_sas_dereg_device(hisi_hba, device);
1835
1848 if (dev_is_sata(device)) { 1836 if (dev_is_sata(device)) {
1849 struct sas_phy *phy; 1837 struct sas_phy *phy;
1850 1838
1851 /* Clear internal IO and then hardreset */
1852 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1853 HISI_SAS_INT_ABT_DEV, 0);
1854 if (rc < 0) {
1855 dev_err(dev, "lu_reset: internal abort failed\n");
1856 goto out;
1857 }
1858 hisi_sas_dereg_device(hisi_hba, device);
1859
1860 phy = sas_get_local_phy(device); 1839 phy = sas_get_local_phy(device);
1861 1840
1862 rc = sas_phy_reset(phy, 1); 1841 rc = sas_phy_reset(phy, true);
1863 1842
1864 if (rc == 0) 1843 if (rc == 0)
1865 hisi_sas_release_task(hisi_hba, device); 1844 hisi_sas_release_task(hisi_hba, device);
@@ -1867,14 +1846,6 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1867 } else { 1846 } else {
1868 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; 1847 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
1869 1848
1870 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1871 HISI_SAS_INT_ABT_DEV, 0);
1872 if (rc < 0) {
1873 dev_err(dev, "lu_reset: internal abort failed\n");
1874 goto out;
1875 }
1876 hisi_sas_dereg_device(hisi_hba, device);
1877
1878 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1849 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1879 if (rc == TMF_RESP_FUNC_COMPLETE) 1850 if (rc == TMF_RESP_FUNC_COMPLETE)
1880 hisi_sas_release_task(hisi_hba, device); 1851 hisi_sas_release_task(hisi_hba, device);
@@ -1964,7 +1935,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1964 struct asd_sas_port *sas_port = device->port; 1935 struct asd_sas_port *sas_port = device->port;
1965 struct hisi_sas_cmd_hdr *cmd_hdr_base; 1936 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1966 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; 1937 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1967 unsigned long flags, flags_dq = 0; 1938 unsigned long flags;
1968 int wr_q_index; 1939 int wr_q_index;
1969 1940
1970 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 1941 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
@@ -1983,15 +1954,11 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1983 slot_idx = rc; 1954 slot_idx = rc;
1984 slot = &hisi_hba->slot_info[slot_idx]; 1955 slot = &hisi_hba->slot_info[slot_idx];
1985 1956
1986 spin_lock_irqsave(&dq->lock, flags_dq); 1957 spin_lock_irqsave(&dq->lock, flags);
1987 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); 1958 wr_q_index = dq->wr_point;
1988 if (wr_q_index < 0) { 1959 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
1989 spin_unlock_irqrestore(&dq->lock, flags_dq);
1990 rc = -EAGAIN;
1991 goto err_out_tag;
1992 }
1993 list_add_tail(&slot->delivery, &dq->list); 1960 list_add_tail(&slot->delivery, &dq->list);
1994 spin_unlock_irqrestore(&dq->lock, flags_dq); 1961 spin_unlock_irqrestore(&dq->lock, flags);
1995 spin_lock_irqsave(&sas_dev->lock, flags); 1962 spin_lock_irqsave(&sas_dev->lock, flags);
1996 list_add_tail(&slot->entry, &sas_dev->list); 1963 list_add_tail(&slot->entry, &sas_dev->list);
1997 spin_unlock_irqrestore(&sas_dev->lock, flags); 1964 spin_unlock_irqrestore(&sas_dev->lock, flags);
@@ -2012,7 +1979,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
2012 1979
2013 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 1980 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
2014 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 1981 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
2015 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); 1982 memset(hisi_sas_status_buf_addr_mem(slot), 0,
1983 sizeof(struct hisi_sas_err_record));
2016 1984
2017 hisi_sas_task_prep_abort(hisi_hba, slot, device_id, 1985 hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
2018 abort_flag, task_tag); 1986 abort_flag, task_tag);
@@ -2028,8 +1996,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
2028 1996
2029 return 0; 1997 return 0;
2030 1998
2031err_out_tag:
2032 hisi_sas_slot_index_free(hisi_hba, slot_idx);
2033err_out: 1999err_out:
2034 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); 2000 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
2035 2001
@@ -2089,6 +2055,9 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
2089 2055
2090 /* Internal abort timed out */ 2056 /* Internal abort timed out */
2091 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 2057 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
2058 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
2059 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
2060
2092 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 2061 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
2093 struct hisi_sas_slot *slot = task->lldd_task; 2062 struct hisi_sas_slot *slot = task->lldd_task;
2094 2063
@@ -2123,7 +2092,7 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
2123 } 2092 }
2124 2093
2125exit: 2094exit:
2126 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p resp: 0x%x sts 0x%x\n", 2095 dev_dbg(dev, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n",
2127 SAS_ADDR(device->sas_addr), task, 2096 SAS_ADDR(device->sas_addr), task,
2128 task->task_status.resp, /* 0 is complete, -1 is undelivered */ 2097 task->task_status.resp, /* 0 is complete, -1 is undelivered */
2129 task->task_status.stat); 2098 task->task_status.stat);
@@ -2291,7 +2260,7 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
2291 2260
2292void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2261void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
2293{ 2262{
2294 int i, s, j, max_command_entries = hisi_hba->hw->max_command_entries; 2263 int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS;
2295 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; 2264 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint;
2296 2265
2297 for (i = 0; i < hisi_hba->queue_count; i++) { 2266 for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -2328,7 +2297,7 @@ EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
2328int hisi_sas_alloc(struct hisi_hba *hisi_hba) 2297int hisi_sas_alloc(struct hisi_hba *hisi_hba)
2329{ 2298{
2330 struct device *dev = hisi_hba->dev; 2299 struct device *dev = hisi_hba->dev;
2331 int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries; 2300 int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS;
2332 int max_command_entries_ru, sz_slot_buf_ru; 2301 int max_command_entries_ru, sz_slot_buf_ru;
2333 int blk_cnt, slots_per_blk; 2302 int blk_cnt, slots_per_blk;
2334 2303
@@ -2379,7 +2348,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
2379 2348
2380 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2349 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2381 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2350 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
2382 GFP_KERNEL | __GFP_ZERO); 2351 GFP_KERNEL);
2383 if (!hisi_hba->itct) 2352 if (!hisi_hba->itct)
2384 goto err_out; 2353 goto err_out;
2385 2354
@@ -2396,7 +2365,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
2396 else 2365 else
2397 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); 2366 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
2398 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); 2367 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
2399 s = lcm(max_command_entries_ru, sz_slot_buf_ru); 2368 s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE);
2400 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2369 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
2401 slots_per_blk = s / sz_slot_buf_ru; 2370 slots_per_blk = s / sz_slot_buf_ru;
2402 2371
@@ -2406,7 +2375,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
2406 void *buf; 2375 void *buf;
2407 2376
2408 buf = dmam_alloc_coherent(dev, s, &buf_dma, 2377 buf = dmam_alloc_coherent(dev, s, &buf_dma,
2409 GFP_KERNEL | __GFP_ZERO); 2378 GFP_KERNEL);
2410 if (!buf) 2379 if (!buf)
2411 goto err_out; 2380 goto err_out;
2412 2381
@@ -2455,11 +2424,9 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
2455 GFP_KERNEL); 2424 GFP_KERNEL);
2456 if (!hisi_hba->sata_breakpoint) 2425 if (!hisi_hba->sata_breakpoint)
2457 goto err_out; 2426 goto err_out;
2458 hisi_sas_init_mem(hisi_hba);
2459 2427
2460 hisi_sas_slot_index_init(hisi_hba); 2428 hisi_sas_slot_index_init(hisi_hba);
2461 hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries - 2429 hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT;
2462 HISI_SAS_RESERVED_IPTT_CNT;
2463 2430
2464 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 2431 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
2465 if (!hisi_hba->wq) { 2432 if (!hisi_hba->wq) {
@@ -2610,8 +2577,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2610 goto err_out; 2577 goto err_out;
2611 } 2578 }
2612 2579
2613 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2580 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0);
2614 hisi_hba->regs = devm_ioremap_resource(dev, res);
2615 if (IS_ERR(hisi_hba->regs)) 2581 if (IS_ERR(hisi_hba->regs))
2616 goto err_out; 2582 goto err_out;
2617 2583
@@ -2672,13 +2638,11 @@ int hisi_sas_probe(struct platform_device *pdev,
2672 shost->max_channel = 1; 2638 shost->max_channel = 1;
2673 shost->max_cmd_len = 16; 2639 shost->max_cmd_len = 16;
2674 if (hisi_hba->hw->slot_index_alloc) { 2640 if (hisi_hba->hw->slot_index_alloc) {
2675 shost->can_queue = hisi_hba->hw->max_command_entries; 2641 shost->can_queue = HISI_SAS_MAX_COMMANDS;
2676 shost->cmd_per_lun = hisi_hba->hw->max_command_entries; 2642 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS;
2677 } else { 2643 } else {
2678 shost->can_queue = hisi_hba->hw->max_command_entries - 2644 shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
2679 HISI_SAS_RESERVED_IPTT_CNT; 2645 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
2680 shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
2681 HISI_SAS_RESERVED_IPTT_CNT;
2682 } 2646 }
2683 2647
2684 sha->sas_ha_name = DRV_NAME; 2648 sha->sas_ha_name = DRV_NAME;
@@ -2769,21 +2733,52 @@ static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
2769 2733
2770static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba) 2734static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
2771{ 2735{
2772 u32 *databuf = (u32 *)hisi_hba->debugfs_global_reg; 2736 u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_GLOBAL];
2737 const struct hisi_sas_hw *hw = hisi_hba->hw;
2773 const struct hisi_sas_debugfs_reg *global = 2738 const struct hisi_sas_debugfs_reg *global =
2774 hisi_hba->hw->debugfs_reg_global; 2739 hw->debugfs_reg_array[DEBUGFS_GLOBAL];
2775 int i; 2740 int i;
2776 2741
2777 for (i = 0; i < global->count; i++, databuf++) 2742 for (i = 0; i < global->count; i++, databuf++)
2778 *databuf = global->read_global_reg(hisi_hba, 4 * i); 2743 *databuf = global->read_global_reg(hisi_hba, 4 * i);
2779} 2744}
2780 2745
2746static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
2747{
2748 u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_AXI];
2749 const struct hisi_sas_hw *hw = hisi_hba->hw;
2750 const struct hisi_sas_debugfs_reg *axi =
2751 hw->debugfs_reg_array[DEBUGFS_AXI];
2752 int i;
2753
2754 for (i = 0; i < axi->count; i++, databuf++)
2755 *databuf = axi->read_global_reg(hisi_hba,
2756 4 * i + axi->base_off);
2757}
2758
2759static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
2760{
2761 u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_RAS];
2762 const struct hisi_sas_hw *hw = hisi_hba->hw;
2763 const struct hisi_sas_debugfs_reg *ras =
2764 hw->debugfs_reg_array[DEBUGFS_RAS];
2765 int i;
2766
2767 for (i = 0; i < ras->count; i++, databuf++)
2768 *databuf = ras->read_global_reg(hisi_hba,
2769 4 * i + ras->base_off);
2770}
2771
2781static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba) 2772static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
2782{ 2773{
2774 void *cachebuf = hisi_hba->debugfs_itct_cache;
2783 void *databuf = hisi_hba->debugfs_itct; 2775 void *databuf = hisi_hba->debugfs_itct;
2784 struct hisi_sas_itct *itct; 2776 struct hisi_sas_itct *itct;
2785 int i; 2777 int i;
2786 2778
2779 hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_ITCT_CACHE,
2780 cachebuf);
2781
2787 itct = hisi_hba->itct; 2782 itct = hisi_hba->itct;
2788 2783
2789 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { 2784 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
@@ -2794,11 +2789,15 @@ static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
2794 2789
2795static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba) 2790static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba)
2796{ 2791{
2797 int max_command_entries = hisi_hba->hw->max_command_entries; 2792 int max_command_entries = HISI_SAS_MAX_COMMANDS;
2793 void *cachebuf = hisi_hba->debugfs_iost_cache;
2798 void *databuf = hisi_hba->debugfs_iost; 2794 void *databuf = hisi_hba->debugfs_iost;
2799 struct hisi_sas_iost *iost; 2795 struct hisi_sas_iost *iost;
2800 int i; 2796 int i;
2801 2797
2798 hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_IOST_CACHE,
2799 cachebuf);
2800
2802 iost = hisi_hba->iost; 2801 iost = hisi_hba->iost;
2803 2802
2804 for (i = 0; i < max_command_entries; i++, iost++) { 2803 for (i = 0; i < max_command_entries; i++, iost++) {
@@ -2845,9 +2844,9 @@ static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p)
2845{ 2844{
2846 struct hisi_hba *hisi_hba = s->private; 2845 struct hisi_hba *hisi_hba = s->private;
2847 const struct hisi_sas_hw *hw = hisi_hba->hw; 2846 const struct hisi_sas_hw *hw = hisi_hba->hw;
2848 const struct hisi_sas_debugfs_reg *reg_global = hw->debugfs_reg_global; 2847 const void *reg_global = hw->debugfs_reg_array[DEBUGFS_GLOBAL];
2849 2848
2850 hisi_sas_debugfs_print_reg(hisi_hba->debugfs_global_reg, 2849 hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_GLOBAL],
2851 reg_global, s); 2850 reg_global, s);
2852 2851
2853 return 0; 2852 return 0;
@@ -2867,6 +2866,58 @@ static const struct file_operations hisi_sas_debugfs_global_fops = {
2867 .owner = THIS_MODULE, 2866 .owner = THIS_MODULE,
2868}; 2867};
2869 2868
2869static int hisi_sas_debugfs_axi_show(struct seq_file *s, void *p)
2870{
2871 struct hisi_hba *hisi_hba = s->private;
2872 const struct hisi_sas_hw *hw = hisi_hba->hw;
2873 const void *reg_axi = hw->debugfs_reg_array[DEBUGFS_AXI];
2874
2875 hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_AXI],
2876 reg_axi, s);
2877
2878 return 0;
2879}
2880
2881static int hisi_sas_debugfs_axi_open(struct inode *inode, struct file *filp)
2882{
2883 return single_open(filp, hisi_sas_debugfs_axi_show,
2884 inode->i_private);
2885}
2886
2887static const struct file_operations hisi_sas_debugfs_axi_fops = {
2888 .open = hisi_sas_debugfs_axi_open,
2889 .read = seq_read,
2890 .llseek = seq_lseek,
2891 .release = single_release,
2892 .owner = THIS_MODULE,
2893};
2894
2895static int hisi_sas_debugfs_ras_show(struct seq_file *s, void *p)
2896{
2897 struct hisi_hba *hisi_hba = s->private;
2898 const struct hisi_sas_hw *hw = hisi_hba->hw;
2899 const void *reg_ras = hw->debugfs_reg_array[DEBUGFS_RAS];
2900
2901 hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_RAS],
2902 reg_ras, s);
2903
2904 return 0;
2905}
2906
2907static int hisi_sas_debugfs_ras_open(struct inode *inode, struct file *filp)
2908{
2909 return single_open(filp, hisi_sas_debugfs_ras_show,
2910 inode->i_private);
2911}
2912
2913static const struct file_operations hisi_sas_debugfs_ras_fops = {
2914 .open = hisi_sas_debugfs_ras_open,
2915 .read = seq_read,
2916 .llseek = seq_lseek,
2917 .release = single_release,
2918 .owner = THIS_MODULE,
2919};
2920
2870static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p) 2921static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p)
2871{ 2922{
2872 struct hisi_sas_phy *phy = s->private; 2923 struct hisi_sas_phy *phy = s->private;
@@ -2893,8 +2944,8 @@ static const struct file_operations hisi_sas_debugfs_port_fops = {
2893 .owner = THIS_MODULE, 2944 .owner = THIS_MODULE,
2894}; 2945};
2895 2946
2896static int hisi_sas_show_row_64(struct seq_file *s, int index, 2947static void hisi_sas_show_row_64(struct seq_file *s, int index,
2897 int sz, __le64 *ptr) 2948 int sz, __le64 *ptr)
2898{ 2949{
2899 int i; 2950 int i;
2900 2951
@@ -2907,12 +2958,10 @@ static int hisi_sas_show_row_64(struct seq_file *s, int index,
2907 } 2958 }
2908 2959
2909 seq_puts(s, "\n"); 2960 seq_puts(s, "\n");
2910
2911 return 0;
2912} 2961}
2913 2962
2914static int hisi_sas_show_row_32(struct seq_file *s, int index, 2963static void hisi_sas_show_row_32(struct seq_file *s, int index,
2915 int sz, __le32 *ptr) 2964 int sz, __le32 *ptr)
2916{ 2965{
2917 int i; 2966 int i;
2918 2967
@@ -2924,11 +2973,9 @@ static int hisi_sas_show_row_32(struct seq_file *s, int index,
2924 seq_puts(s, "\n\t"); 2973 seq_puts(s, "\n\t");
2925 } 2974 }
2926 seq_puts(s, "\n"); 2975 seq_puts(s, "\n");
2927
2928 return 0;
2929} 2976}
2930 2977
2931static int hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr) 2978static void hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
2932{ 2979{
2933 struct hisi_sas_cq *cq = cq_ptr; 2980 struct hisi_sas_cq *cq = cq_ptr;
2934 struct hisi_hba *hisi_hba = cq->hisi_hba; 2981 struct hisi_hba *hisi_hba = cq->hisi_hba;
@@ -2936,20 +2983,18 @@ static int hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
2936 __le32 *complete_hdr = complete_queue + 2983 __le32 *complete_hdr = complete_queue +
2937 (hisi_hba->hw->complete_hdr_size * slot); 2984 (hisi_hba->hw->complete_hdr_size * slot);
2938 2985
2939 return hisi_sas_show_row_32(s, slot, 2986 hisi_sas_show_row_32(s, slot,
2940 hisi_hba->hw->complete_hdr_size, 2987 hisi_hba->hw->complete_hdr_size,
2941 complete_hdr); 2988 complete_hdr);
2942} 2989}
2943 2990
2944static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p) 2991static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p)
2945{ 2992{
2946 struct hisi_sas_cq *cq = s->private; 2993 struct hisi_sas_cq *cq = s->private;
2947 int slot, ret; 2994 int slot;
2948 2995
2949 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { 2996 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
2950 ret = hisi_sas_cq_show_slot(s, slot, cq); 2997 hisi_sas_cq_show_slot(s, slot, cq);
2951 if (ret)
2952 return ret;
2953 } 2998 }
2954 return 0; 2999 return 0;
2955} 3000}
@@ -2967,7 +3012,7 @@ static const struct file_operations hisi_sas_debugfs_cq_fops = {
2967 .owner = THIS_MODULE, 3012 .owner = THIS_MODULE,
2968}; 3013};
2969 3014
2970static int hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr) 3015static void hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
2971{ 3016{
2972 struct hisi_sas_dq *dq = dq_ptr; 3017 struct hisi_sas_dq *dq = dq_ptr;
2973 struct hisi_hba *hisi_hba = dq->hisi_hba; 3018 struct hisi_hba *hisi_hba = dq->hisi_hba;
@@ -2975,18 +3020,15 @@ static int hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
2975 __le32 *cmd_hdr = cmd_queue + 3020 __le32 *cmd_hdr = cmd_queue +
2976 sizeof(struct hisi_sas_cmd_hdr) * slot; 3021 sizeof(struct hisi_sas_cmd_hdr) * slot;
2977 3022
2978 return hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr), 3023 hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr), cmd_hdr);
2979 cmd_hdr);
2980} 3024}
2981 3025
2982static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p) 3026static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p)
2983{ 3027{
2984 int slot, ret; 3028 int slot;
2985 3029
2986 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { 3030 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
2987 ret = hisi_sas_dq_show_slot(s, slot, s->private); 3031 hisi_sas_dq_show_slot(s, slot, s->private);
2988 if (ret)
2989 return ret;
2990 } 3032 }
2991 return 0; 3033 return 0;
2992} 3034}
@@ -3008,14 +3050,12 @@ static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p)
3008{ 3050{
3009 struct hisi_hba *hisi_hba = s->private; 3051 struct hisi_hba *hisi_hba = s->private;
3010 struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost; 3052 struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost;
3011 int i, ret, max_command_entries = hisi_hba->hw->max_command_entries; 3053 int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
3012 __le64 *iost = &debugfs_iost->qw0;
3013 3054
3014 for (i = 0; i < max_command_entries; i++, debugfs_iost++) { 3055 for (i = 0; i < max_command_entries; i++, debugfs_iost++) {
3015 ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost), 3056 __le64 *iost = &debugfs_iost->qw0;
3016 iost); 3057
3017 if (ret) 3058 hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost), iost);
3018 return ret;
3019 } 3059 }
3020 3060
3021 return 0; 3061 return 0;
@@ -3034,18 +3074,56 @@ static const struct file_operations hisi_sas_debugfs_iost_fops = {
3034 .owner = THIS_MODULE, 3074 .owner = THIS_MODULE,
3035}; 3075};
3036 3076
3077static int hisi_sas_debugfs_iost_cache_show(struct seq_file *s, void *p)
3078{
3079 struct hisi_hba *hisi_hba = s->private;
3080 struct hisi_sas_iost_itct_cache *iost_cache =
3081 (struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_iost_cache;
3082 u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
3083 int i, tab_idx;
3084 __le64 *iost;
3085
3086 for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) {
3087 /*
3088 * Data struct of IOST cache:
3089 * Data[1]: BIT0~15: Table index
3090 * Bit16: Valid mask
3091 * Data[2]~[9]: IOST table
3092 */
3093 tab_idx = (iost_cache->data[1] & 0xffff);
3094 iost = (__le64 *)iost_cache;
3095
3096 hisi_sas_show_row_64(s, tab_idx, cache_size, iost);
3097 }
3098
3099 return 0;
3100}
3101
3102static int hisi_sas_debugfs_iost_cache_open(struct inode *inode,
3103 struct file *filp)
3104{
3105 return single_open(filp, hisi_sas_debugfs_iost_cache_show,
3106 inode->i_private);
3107}
3108
3109static const struct file_operations hisi_sas_debugfs_iost_cache_fops = {
3110 .open = hisi_sas_debugfs_iost_cache_open,
3111 .read = seq_read,
3112 .llseek = seq_lseek,
3113 .release = single_release,
3114 .owner = THIS_MODULE,
3115};
3116
3037static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p) 3117static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p)
3038{ 3118{
3039 int i, ret; 3119 int i;
3040 struct hisi_hba *hisi_hba = s->private; 3120 struct hisi_hba *hisi_hba = s->private;
3041 struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct; 3121 struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct;
3042 __le64 *itct = &debugfs_itct->qw0;
3043 3122
3044 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) { 3123 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) {
3045 ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct), 3124 __le64 *itct = &debugfs_itct->qw0;
3046 itct); 3125
3047 if (ret) 3126 hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct), itct);
3048 return ret;
3049 } 3127 }
3050 3128
3051 return 0; 3129 return 0;
@@ -3064,6 +3142,46 @@ static const struct file_operations hisi_sas_debugfs_itct_fops = {
3064 .owner = THIS_MODULE, 3142 .owner = THIS_MODULE,
3065}; 3143};
3066 3144
3145static int hisi_sas_debugfs_itct_cache_show(struct seq_file *s, void *p)
3146{
3147 struct hisi_hba *hisi_hba = s->private;
3148 struct hisi_sas_iost_itct_cache *itct_cache =
3149 (struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_itct_cache;
3150 u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
3151 int i, tab_idx;
3152 __le64 *itct;
3153
3154 for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) {
3155 /*
3156 * Data struct of ITCT cache:
3157 * Data[1]: BIT0~15: Table index
3158 * Bit16: Valid mask
3159 * Data[2]~[9]: ITCT table
3160 */
3161 tab_idx = itct_cache->data[1] & 0xffff;
3162 itct = (__le64 *)itct_cache;
3163
3164 hisi_sas_show_row_64(s, tab_idx, cache_size, itct);
3165 }
3166
3167 return 0;
3168}
3169
3170static int hisi_sas_debugfs_itct_cache_open(struct inode *inode,
3171 struct file *filp)
3172{
3173 return single_open(filp, hisi_sas_debugfs_itct_cache_show,
3174 inode->i_private);
3175}
3176
3177static const struct file_operations hisi_sas_debugfs_itct_cache_fops = {
3178 .open = hisi_sas_debugfs_itct_cache_open,
3179 .read = seq_read,
3180 .llseek = seq_lseek,
3181 .release = single_release,
3182 .owner = THIS_MODULE,
3183};
3184
3067static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba) 3185static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
3068{ 3186{
3069 struct dentry *dump_dentry; 3187 struct dentry *dump_dentry;
@@ -3110,9 +3228,21 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
3110 debugfs_create_file("iost", 0400, dump_dentry, hisi_hba, 3228 debugfs_create_file("iost", 0400, dump_dentry, hisi_hba,
3111 &hisi_sas_debugfs_iost_fops); 3229 &hisi_sas_debugfs_iost_fops);
3112 3230
3231 debugfs_create_file("iost_cache", 0400, dump_dentry, hisi_hba,
3232 &hisi_sas_debugfs_iost_cache_fops);
3233
3113 debugfs_create_file("itct", 0400, dump_dentry, hisi_hba, 3234 debugfs_create_file("itct", 0400, dump_dentry, hisi_hba,
3114 &hisi_sas_debugfs_itct_fops); 3235 &hisi_sas_debugfs_itct_fops);
3115 3236
3237 debugfs_create_file("itct_cache", 0400, dump_dentry, hisi_hba,
3238 &hisi_sas_debugfs_itct_cache_fops);
3239
3240 debugfs_create_file("axi", 0400, dump_dentry, hisi_hba,
3241 &hisi_sas_debugfs_axi_fops);
3242
3243 debugfs_create_file("ras", 0400, dump_dentry, hisi_hba,
3244 &hisi_sas_debugfs_ras_fops);
3245
3116 return; 3246 return;
3117} 3247}
3118 3248
@@ -3122,6 +3252,8 @@ static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba)
3122 3252
3123 hisi_sas_debugfs_snapshot_global_reg(hisi_hba); 3253 hisi_sas_debugfs_snapshot_global_reg(hisi_hba);
3124 hisi_sas_debugfs_snapshot_port_reg(hisi_hba); 3254 hisi_sas_debugfs_snapshot_port_reg(hisi_hba);
3255 hisi_sas_debugfs_snapshot_axi_reg(hisi_hba);
3256 hisi_sas_debugfs_snapshot_ras_reg(hisi_hba);
3125 hisi_sas_debugfs_snapshot_cq_reg(hisi_hba); 3257 hisi_sas_debugfs_snapshot_cq_reg(hisi_hba);
3126 hisi_sas_debugfs_snapshot_dq_reg(hisi_hba); 3258 hisi_sas_debugfs_snapshot_dq_reg(hisi_hba);
3127 hisi_sas_debugfs_snapshot_itct_reg(hisi_hba); 3259 hisi_sas_debugfs_snapshot_itct_reg(hisi_hba);
@@ -3162,6 +3294,382 @@ static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = {
3162 .owner = THIS_MODULE, 3294 .owner = THIS_MODULE,
3163}; 3295};
3164 3296
3297enum {
3298 HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL = 0,
3299 HISI_SAS_BIST_LOOPBACK_MODE_SERDES,
3300 HISI_SAS_BIST_LOOPBACK_MODE_REMOTE,
3301};
3302
3303enum {
3304 HISI_SAS_BIST_CODE_MODE_PRBS7 = 0,
3305 HISI_SAS_BIST_CODE_MODE_PRBS23,
3306 HISI_SAS_BIST_CODE_MODE_PRBS31,
3307 HISI_SAS_BIST_CODE_MODE_JTPAT,
3308 HISI_SAS_BIST_CODE_MODE_CJTPAT,
3309 HISI_SAS_BIST_CODE_MODE_SCRAMBED_0,
3310 HISI_SAS_BIST_CODE_MODE_TRAIN,
3311 HISI_SAS_BIST_CODE_MODE_TRAIN_DONE,
3312 HISI_SAS_BIST_CODE_MODE_HFTP,
3313 HISI_SAS_BIST_CODE_MODE_MFTP,
3314 HISI_SAS_BIST_CODE_MODE_LFTP,
3315 HISI_SAS_BIST_CODE_MODE_FIXED_DATA,
3316};
3317
3318static const struct {
3319 int value;
3320 char *name;
3321} hisi_sas_debugfs_loop_linkrate[] = {
3322 { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" },
3323 { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
3324 { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
3325 { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" },
3326};
3327
3328static int hisi_sas_debugfs_bist_linkrate_show(struct seq_file *s, void *p)
3329{
3330 struct hisi_hba *hisi_hba = s->private;
3331 int i;
3332
3333 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) {
3334 int match = (hisi_hba->debugfs_bist_linkrate ==
3335 hisi_sas_debugfs_loop_linkrate[i].value);
3336
3337 seq_printf(s, "%s%s%s ", match ? "[" : "",
3338 hisi_sas_debugfs_loop_linkrate[i].name,
3339 match ? "]" : "");
3340 }
3341 seq_puts(s, "\n");
3342
3343 return 0;
3344}
3345
3346static ssize_t hisi_sas_debugfs_bist_linkrate_write(struct file *filp,
3347 const char __user *buf,
3348 size_t count, loff_t *ppos)
3349{
3350 struct seq_file *m = filp->private_data;
3351 struct hisi_hba *hisi_hba = m->private;
3352 char kbuf[16] = {}, *pkbuf;
3353 bool found = false;
3354 int i;
3355
3356 if (hisi_hba->debugfs_bist_enable)
3357 return -EPERM;
3358
3359 if (count >= sizeof(kbuf))
3360 return -EOVERFLOW;
3361
3362 if (copy_from_user(kbuf, buf, count))
3363 return -EINVAL;
3364
3365 pkbuf = strstrip(kbuf);
3366
3367 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) {
3368 if (!strncmp(hisi_sas_debugfs_loop_linkrate[i].name,
3369 pkbuf, 16)) {
3370 hisi_hba->debugfs_bist_linkrate =
3371 hisi_sas_debugfs_loop_linkrate[i].value;
3372 found = true;
3373 break;
3374 }
3375 }
3376
3377 if (!found)
3378 return -EINVAL;
3379
3380 return count;
3381}
3382
3383static int hisi_sas_debugfs_bist_linkrate_open(struct inode *inode,
3384 struct file *filp)
3385{
3386 return single_open(filp, hisi_sas_debugfs_bist_linkrate_show,
3387 inode->i_private);
3388}
3389
3390static const struct file_operations hisi_sas_debugfs_bist_linkrate_ops = {
3391 .open = hisi_sas_debugfs_bist_linkrate_open,
3392 .read = seq_read,
3393 .write = hisi_sas_debugfs_bist_linkrate_write,
3394 .llseek = seq_lseek,
3395 .release = single_release,
3396 .owner = THIS_MODULE,
3397};
3398
3399static const struct {
3400 int value;
3401 char *name;
3402} hisi_sas_debugfs_loop_code_mode[] = {
3403 { HISI_SAS_BIST_CODE_MODE_PRBS7, "PRBS7" },
3404 { HISI_SAS_BIST_CODE_MODE_PRBS23, "PRBS23" },
3405 { HISI_SAS_BIST_CODE_MODE_PRBS31, "PRBS31" },
3406 { HISI_SAS_BIST_CODE_MODE_JTPAT, "JTPAT" },
3407 { HISI_SAS_BIST_CODE_MODE_CJTPAT, "CJTPAT" },
3408 { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, "SCRAMBED_0" },
3409 { HISI_SAS_BIST_CODE_MODE_TRAIN, "TRAIN" },
3410 { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, "TRAIN_DONE" },
3411 { HISI_SAS_BIST_CODE_MODE_HFTP, "HFTP" },
3412 { HISI_SAS_BIST_CODE_MODE_MFTP, "MFTP" },
3413 { HISI_SAS_BIST_CODE_MODE_LFTP, "LFTP" },
3414 { HISI_SAS_BIST_CODE_MODE_FIXED_DATA, "FIXED_DATA" },
3415};
3416
3417static int hisi_sas_debugfs_bist_code_mode_show(struct seq_file *s, void *p)
3418{
3419 struct hisi_hba *hisi_hba = s->private;
3420 int i;
3421
3422 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) {
3423 int match = (hisi_hba->debugfs_bist_code_mode ==
3424 hisi_sas_debugfs_loop_code_mode[i].value);
3425
3426 seq_printf(s, "%s%s%s ", match ? "[" : "",
3427 hisi_sas_debugfs_loop_code_mode[i].name,
3428 match ? "]" : "");
3429 }
3430 seq_puts(s, "\n");
3431
3432 return 0;
3433}
3434
3435static ssize_t hisi_sas_debugfs_bist_code_mode_write(struct file *filp,
3436 const char __user *buf,
3437 size_t count,
3438 loff_t *ppos)
3439{
3440 struct seq_file *m = filp->private_data;
3441 struct hisi_hba *hisi_hba = m->private;
3442 char kbuf[16] = {}, *pkbuf;
3443 bool found = false;
3444 int i;
3445
3446 if (hisi_hba->debugfs_bist_enable)
3447 return -EPERM;
3448
3449 if (count >= sizeof(kbuf))
3450 return -EINVAL;
3451
3452 if (copy_from_user(kbuf, buf, count))
3453 return -EOVERFLOW;
3454
3455 pkbuf = strstrip(kbuf);
3456
3457 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) {
3458 if (!strncmp(hisi_sas_debugfs_loop_code_mode[i].name,
3459 pkbuf, 16)) {
3460 hisi_hba->debugfs_bist_code_mode =
3461 hisi_sas_debugfs_loop_code_mode[i].value;
3462 found = true;
3463 break;
3464 }
3465 }
3466
3467 if (!found)
3468 return -EINVAL;
3469
3470 return count;
3471}
3472
3473static int hisi_sas_debugfs_bist_code_mode_open(struct inode *inode,
3474 struct file *filp)
3475{
3476 return single_open(filp, hisi_sas_debugfs_bist_code_mode_show,
3477 inode->i_private);
3478}
3479
3480static const struct file_operations hisi_sas_debugfs_bist_code_mode_ops = {
3481 .open = hisi_sas_debugfs_bist_code_mode_open,
3482 .read = seq_read,
3483 .write = hisi_sas_debugfs_bist_code_mode_write,
3484 .llseek = seq_lseek,
3485 .release = single_release,
3486 .owner = THIS_MODULE,
3487};
3488
3489static ssize_t hisi_sas_debugfs_bist_phy_write(struct file *filp,
3490 const char __user *buf,
3491 size_t count, loff_t *ppos)
3492{
3493 struct seq_file *m = filp->private_data;
3494 struct hisi_hba *hisi_hba = m->private;
3495 unsigned int phy_no;
3496 int val;
3497
3498 if (hisi_hba->debugfs_bist_enable)
3499 return -EPERM;
3500
3501 val = kstrtouint_from_user(buf, count, 0, &phy_no);
3502 if (val)
3503 return val;
3504
3505 if (phy_no >= hisi_hba->n_phy)
3506 return -EINVAL;
3507
3508 hisi_hba->debugfs_bist_phy_no = phy_no;
3509
3510 return count;
3511}
3512
3513static int hisi_sas_debugfs_bist_phy_show(struct seq_file *s, void *p)
3514{
3515 struct hisi_hba *hisi_hba = s->private;
3516
3517 seq_printf(s, "%d\n", hisi_hba->debugfs_bist_phy_no);
3518
3519 return 0;
3520}
3521
3522static int hisi_sas_debugfs_bist_phy_open(struct inode *inode,
3523 struct file *filp)
3524{
3525 return single_open(filp, hisi_sas_debugfs_bist_phy_show,
3526 inode->i_private);
3527}
3528
3529static const struct file_operations hisi_sas_debugfs_bist_phy_ops = {
3530 .open = hisi_sas_debugfs_bist_phy_open,
3531 .read = seq_read,
3532 .write = hisi_sas_debugfs_bist_phy_write,
3533 .llseek = seq_lseek,
3534 .release = single_release,
3535 .owner = THIS_MODULE,
3536};
3537
3538static const struct {
3539 int value;
3540 char *name;
3541} hisi_sas_debugfs_loop_modes[] = {
3542 { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digial" },
3543 { HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" },
3544 { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" },
3545};
3546
3547static int hisi_sas_debugfs_bist_mode_show(struct seq_file *s, void *p)
3548{
3549 struct hisi_hba *hisi_hba = s->private;
3550 int i;
3551
3552 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) {
3553 int match = (hisi_hba->debugfs_bist_mode ==
3554 hisi_sas_debugfs_loop_modes[i].value);
3555
3556 seq_printf(s, "%s%s%s ", match ? "[" : "",
3557 hisi_sas_debugfs_loop_modes[i].name,
3558 match ? "]" : "");
3559 }
3560 seq_puts(s, "\n");
3561
3562 return 0;
3563}
3564
3565static ssize_t hisi_sas_debugfs_bist_mode_write(struct file *filp,
3566 const char __user *buf,
3567 size_t count, loff_t *ppos)
3568{
3569 struct seq_file *m = filp->private_data;
3570 struct hisi_hba *hisi_hba = m->private;
3571 char kbuf[16] = {}, *pkbuf;
3572 bool found = false;
3573 int i;
3574
3575 if (hisi_hba->debugfs_bist_enable)
3576 return -EPERM;
3577
3578 if (count >= sizeof(kbuf))
3579 return -EINVAL;
3580
3581 if (copy_from_user(kbuf, buf, count))
3582 return -EOVERFLOW;
3583
3584 pkbuf = strstrip(kbuf);
3585
3586 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) {
3587 if (!strncmp(hisi_sas_debugfs_loop_modes[i].name, pkbuf, 16)) {
3588 hisi_hba->debugfs_bist_mode =
3589 hisi_sas_debugfs_loop_modes[i].value;
3590 found = true;
3591 break;
3592 }
3593 }
3594
3595 if (!found)
3596 return -EINVAL;
3597
3598 return count;
3599}
3600
3601static int hisi_sas_debugfs_bist_mode_open(struct inode *inode,
3602 struct file *filp)
3603{
3604 return single_open(filp, hisi_sas_debugfs_bist_mode_show,
3605 inode->i_private);
3606}
3607
3608static const struct file_operations hisi_sas_debugfs_bist_mode_ops = {
3609 .open = hisi_sas_debugfs_bist_mode_open,
3610 .read = seq_read,
3611 .write = hisi_sas_debugfs_bist_mode_write,
3612 .llseek = seq_lseek,
3613 .release = single_release,
3614 .owner = THIS_MODULE,
3615};
3616
3617static ssize_t hisi_sas_debugfs_bist_enable_write(struct file *filp,
3618 const char __user *buf,
3619 size_t count, loff_t *ppos)
3620{
3621 struct seq_file *m = filp->private_data;
3622 struct hisi_hba *hisi_hba = m->private;
3623 unsigned int enable;
3624 int val;
3625
3626 val = kstrtouint_from_user(buf, count, 0, &enable);
3627 if (val)
3628 return val;
3629
3630 if (enable > 1)
3631 return -EINVAL;
3632
3633 if (enable == hisi_hba->debugfs_bist_enable)
3634 return count;
3635
3636 if (!hisi_hba->hw->set_bist)
3637 return -EPERM;
3638
3639 val = hisi_hba->hw->set_bist(hisi_hba, enable);
3640 if (val < 0)
3641 return val;
3642
3643 hisi_hba->debugfs_bist_enable = enable;
3644
3645 return count;
3646}
3647
3648static int hisi_sas_debugfs_bist_enable_show(struct seq_file *s, void *p)
3649{
3650 struct hisi_hba *hisi_hba = s->private;
3651
3652 seq_printf(s, "%d\n", hisi_hba->debugfs_bist_enable);
3653
3654 return 0;
3655}
3656
3657static int hisi_sas_debugfs_bist_enable_open(struct inode *inode,
3658 struct file *filp)
3659{
3660 return single_open(filp, hisi_sas_debugfs_bist_enable_show,
3661 inode->i_private);
3662}
3663
3664static const struct file_operations hisi_sas_debugfs_bist_enable_ops = {
3665 .open = hisi_sas_debugfs_bist_enable_open,
3666 .read = seq_read,
3667 .write = hisi_sas_debugfs_bist_enable_write,
3668 .llseek = seq_lseek,
3669 .release = single_release,
3670 .owner = THIS_MODULE,
3671};
3672
3165void hisi_sas_debugfs_work_handler(struct work_struct *work) 3673void hisi_sas_debugfs_work_handler(struct work_struct *work)
3166{ 3674{
3167 struct hisi_hba *hisi_hba = 3675 struct hisi_hba *hisi_hba =
@@ -3175,89 +3683,165 @@ void hisi_sas_debugfs_work_handler(struct work_struct *work)
3175} 3683}
3176EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler); 3684EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
3177 3685
3178void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba) 3686void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
3179{ 3687{
3180 int max_command_entries = hisi_hba->hw->max_command_entries;
3181 struct device *dev = hisi_hba->dev; 3688 struct device *dev = hisi_hba->dev;
3182 int p, i, c, d; 3689 int i;
3690
3691 devm_kfree(dev, hisi_hba->debugfs_iost_cache);
3692 devm_kfree(dev, hisi_hba->debugfs_itct_cache);
3693 devm_kfree(dev, hisi_hba->debugfs_iost);
3694
3695 for (i = 0; i < hisi_hba->queue_count; i++)
3696 devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]);
3697
3698 for (i = 0; i < hisi_hba->queue_count; i++)
3699 devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]);
3700
3701 for (i = 0; i < DEBUGFS_REGS_NUM; i++)
3702 devm_kfree(dev, hisi_hba->debugfs_regs[i]);
3703
3704 for (i = 0; i < hisi_hba->n_phy; i++)
3705 devm_kfree(dev, hisi_hba->debugfs_port_reg[i]);
3706}
3707
3708int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba)
3709{
3710 const struct hisi_sas_hw *hw = hisi_hba->hw;
3711 struct device *dev = hisi_hba->dev;
3712 int p, c, d;
3183 size_t sz; 3713 size_t sz;
3184 3714
3185 hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), 3715 hisi_hba->debugfs_dump_dentry =
3186 hisi_sas_debugfs_dir); 3716 debugfs_create_dir("dump", hisi_hba->debugfs_dir);
3187 debugfs_create_file("trigger_dump", 0600,
3188 hisi_hba->debugfs_dir,
3189 hisi_hba,
3190 &hisi_sas_debugfs_trigger_dump_fops);
3191 3717
3192 /* Alloc buffer for global */ 3718 sz = hw->debugfs_reg_array[DEBUGFS_GLOBAL]->count * 4;
3193 sz = hisi_hba->hw->debugfs_reg_global->count * 4; 3719 hisi_hba->debugfs_regs[DEBUGFS_GLOBAL] =
3194 hisi_hba->debugfs_global_reg = 3720 devm_kmalloc(dev, sz, GFP_KERNEL);
3195 devm_kmalloc(dev, sz, GFP_KERNEL);
3196 3721
3197 if (!hisi_hba->debugfs_global_reg) 3722 if (!hisi_hba->debugfs_regs[DEBUGFS_GLOBAL])
3198 goto fail_global; 3723 goto fail;
3199 3724
3200 /* Alloc buffer for port */ 3725 sz = hw->debugfs_reg_port->count * 4;
3201 sz = hisi_hba->hw->debugfs_reg_port->count * 4;
3202 for (p = 0; p < hisi_hba->n_phy; p++) { 3726 for (p = 0; p < hisi_hba->n_phy; p++) {
3203 hisi_hba->debugfs_port_reg[p] = 3727 hisi_hba->debugfs_port_reg[p] =
3204 devm_kmalloc(dev, sz, GFP_KERNEL); 3728 devm_kmalloc(dev, sz, GFP_KERNEL);
3205 3729
3206 if (!hisi_hba->debugfs_port_reg[p]) 3730 if (!hisi_hba->debugfs_port_reg[p])
3207 goto fail_port; 3731 goto fail;
3208 } 3732 }
3209 3733
3210 /* Alloc buffer for cq */ 3734 sz = hw->debugfs_reg_array[DEBUGFS_AXI]->count * 4;
3211 sz = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 3735 hisi_hba->debugfs_regs[DEBUGFS_AXI] =
3736 devm_kmalloc(dev, sz, GFP_KERNEL);
3737
3738 if (!hisi_hba->debugfs_regs[DEBUGFS_AXI])
3739 goto fail;
3740
3741 sz = hw->debugfs_reg_array[DEBUGFS_RAS]->count * 4;
3742 hisi_hba->debugfs_regs[DEBUGFS_RAS] =
3743 devm_kmalloc(dev, sz, GFP_KERNEL);
3744
3745 if (!hisi_hba->debugfs_regs[DEBUGFS_RAS])
3746 goto fail;
3747
3748 sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
3212 for (c = 0; c < hisi_hba->queue_count; c++) { 3749 for (c = 0; c < hisi_hba->queue_count; c++) {
3213 hisi_hba->debugfs_complete_hdr[c] = 3750 hisi_hba->debugfs_complete_hdr[c] =
3214 devm_kmalloc(dev, sz, GFP_KERNEL); 3751 devm_kmalloc(dev, sz, GFP_KERNEL);
3215 3752
3216 if (!hisi_hba->debugfs_complete_hdr[c]) 3753 if (!hisi_hba->debugfs_complete_hdr[c])
3217 goto fail_cq; 3754 goto fail;
3218 } 3755 }
3219 3756
3220 /* Alloc buffer for dq */
3221 sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 3757 sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
3222 for (d = 0; d < hisi_hba->queue_count; d++) { 3758 for (d = 0; d < hisi_hba->queue_count; d++) {
3223 hisi_hba->debugfs_cmd_hdr[d] = 3759 hisi_hba->debugfs_cmd_hdr[d] =
3224 devm_kmalloc(dev, sz, GFP_KERNEL); 3760 devm_kmalloc(dev, sz, GFP_KERNEL);
3225 3761
3226 if (!hisi_hba->debugfs_cmd_hdr[d]) 3762 if (!hisi_hba->debugfs_cmd_hdr[d])
3227 goto fail_iost_dq; 3763 goto fail;
3228 } 3764 }
3229 3765
3230 /* Alloc buffer for iost */ 3766 sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost);
3231 sz = max_command_entries * sizeof(struct hisi_sas_iost);
3232 3767
3233 hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL); 3768 hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL);
3234 if (!hisi_hba->debugfs_iost) 3769 if (!hisi_hba->debugfs_iost)
3235 goto fail_iost_dq; 3770 goto fail;
3771
3772 sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
3773 sizeof(struct hisi_sas_iost_itct_cache);
3774
3775 hisi_hba->debugfs_iost_cache = devm_kmalloc(dev, sz, GFP_KERNEL);
3776 if (!hisi_hba->debugfs_iost_cache)
3777 goto fail;
3778
3779 sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
3780 sizeof(struct hisi_sas_iost_itct_cache);
3781
3782 hisi_hba->debugfs_itct_cache = devm_kmalloc(dev, sz, GFP_KERNEL);
3783 if (!hisi_hba->debugfs_itct_cache)
3784 goto fail;
3236 3785
3237 /* Alloc buffer for itct */
3238 /* New memory allocation must be locate before itct */ 3786 /* New memory allocation must be locate before itct */
3239 sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 3787 sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
3240 3788
3241 hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL); 3789 hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL);
3242 if (!hisi_hba->debugfs_itct) 3790 if (!hisi_hba->debugfs_itct)
3243 goto fail_itct; 3791 goto fail;
3244 3792
3245 return; 3793 return 0;
3246fail_itct: 3794fail:
3247 devm_kfree(dev, hisi_hba->debugfs_iost); 3795 hisi_sas_debugfs_release(hisi_hba);
3248fail_iost_dq: 3796 return -ENOMEM;
3249 for (i = 0; i < d; i++) 3797}
3250 devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]); 3798
3251fail_cq: 3799void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
3252 for (i = 0; i < c; i++) 3800{
3253 devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]); 3801 hisi_hba->debugfs_bist_dentry =
3254fail_port: 3802 debugfs_create_dir("bist", hisi_hba->debugfs_dir);
3255 for (i = 0; i < p; i++) 3803 debugfs_create_file("link_rate", 0600,
3256 devm_kfree(dev, hisi_hba->debugfs_port_reg[i]); 3804 hisi_hba->debugfs_bist_dentry, hisi_hba,
3257 devm_kfree(dev, hisi_hba->debugfs_global_reg); 3805 &hisi_sas_debugfs_bist_linkrate_ops);
3258fail_global: 3806
3259 debugfs_remove_recursive(hisi_hba->debugfs_dir); 3807 debugfs_create_file("code_mode", 0600,
3260 dev_dbg(dev, "failed to init debugfs!\n"); 3808 hisi_hba->debugfs_bist_dentry, hisi_hba,
3809 &hisi_sas_debugfs_bist_code_mode_ops);
3810
3811 debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry,
3812 hisi_hba, &hisi_sas_debugfs_bist_phy_ops);
3813
3814 debugfs_create_u32("cnt", 0600, hisi_hba->debugfs_bist_dentry,
3815 &hisi_hba->debugfs_bist_cnt);
3816
3817 debugfs_create_file("loopback_mode", 0600,
3818 hisi_hba->debugfs_bist_dentry,
3819 hisi_hba, &hisi_sas_debugfs_bist_mode_ops);
3820
3821 debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry,
3822 hisi_hba, &hisi_sas_debugfs_bist_enable_ops);
3823
3824 hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
3825}
3826
3827void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
3828{
3829 struct device *dev = hisi_hba->dev;
3830
3831 hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
3832 hisi_sas_debugfs_dir);
3833 debugfs_create_file("trigger_dump", 0600,
3834 hisi_hba->debugfs_dir,
3835 hisi_hba,
3836 &hisi_sas_debugfs_trigger_dump_fops);
3837
3838 /* create bist structures */
3839 hisi_sas_debugfs_bist_init(hisi_hba);
3840
3841 if (hisi_sas_debugfs_alloc(hisi_hba)) {
3842 debugfs_remove_recursive(hisi_hba->debugfs_dir);
3843 dev_dbg(dev, "failed to init debugfs!\n");
3844 }
3261} 3845}
3262EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init); 3846EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init);
3263 3847
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 3912216e8a4f..b861a0f14c9d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -401,8 +401,6 @@ enum {
401 TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x31a */ 401 TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x31a */
402}; 402};
403 403
404#define HISI_SAS_COMMAND_ENTRIES_V1_HW 8192
405
406#define HISI_SAS_PHY_MAX_INT_NR (HISI_SAS_PHY_INT_NR * HISI_SAS_MAX_PHYS) 404#define HISI_SAS_PHY_MAX_INT_NR (HISI_SAS_PHY_INT_NR * HISI_SAS_MAX_PHYS)
407#define HISI_SAS_CQ_MAX_INT_NR (HISI_SAS_MAX_QUEUES) 405#define HISI_SAS_CQ_MAX_INT_NR (HISI_SAS_MAX_QUEUES)
408#define HISI_SAS_FATAL_INT_NR (2) 406#define HISI_SAS_FATAL_INT_NR (2)
@@ -418,13 +416,6 @@ static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
418 return readl(regs); 416 return readl(regs);
419} 417}
420 418
421static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
422{
423 void __iomem *regs = hisi_hba->regs + off;
424
425 return readl_relaxed(regs);
426}
427
428static void hisi_sas_write32(struct hisi_hba *hisi_hba, 419static void hisi_sas_write32(struct hisi_hba *hisi_hba,
429 u32 off, u32 val) 420 u32 off, u32 val)
430{ 421{
@@ -866,30 +857,6 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
866 return bitmap; 857 return bitmap;
867} 858}
868 859
869/*
870 * The callpath to this function and upto writing the write
871 * queue pointer should be safe from interruption.
872 */
873static int
874get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
875{
876 struct device *dev = hisi_hba->dev;
877 int queue = dq->id;
878 u32 r, w;
879
880 w = dq->wr_point;
881 r = hisi_sas_read32_relaxed(hisi_hba,
882 DLVRY_Q_0_RD_PTR + (queue * 0x14));
883 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
884 dev_warn(dev, "could not find free slot\n");
885 return -EAGAIN;
886 }
887
888 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
889
890 return w;
891}
892
893/* DQ lock must be taken here */ 860/* DQ lock must be taken here */
894static void start_delivery_v1_hw(struct hisi_sas_dq *dq) 861static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
895{ 862{
@@ -1308,21 +1275,17 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
1308 } 1275 }
1309 case SAS_PROTOCOL_SMP: 1276 case SAS_PROTOCOL_SMP:
1310 { 1277 {
1311 void *to;
1312 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 1278 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1279 void *to = page_address(sg_page(sg_resp));
1313 1280
1314 ts->stat = SAM_STAT_GOOD; 1281 ts->stat = SAM_STAT_GOOD;
1315 to = kmap_atomic(sg_page(sg_resp));
1316 1282
1317 dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
1318 DMA_FROM_DEVICE);
1319 dma_unmap_sg(dev, &task->smp_task.smp_req, 1, 1283 dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
1320 DMA_TO_DEVICE); 1284 DMA_TO_DEVICE);
1321 memcpy(to + sg_resp->offset, 1285 memcpy(to + sg_resp->offset,
1322 hisi_sas_status_buf_addr_mem(slot) + 1286 hisi_sas_status_buf_addr_mem(slot) +
1323 sizeof(struct hisi_sas_err_record), 1287 sizeof(struct hisi_sas_err_record),
1324 sg_dma_len(sg_resp)); 1288 sg_resp->length);
1325 kunmap_atomic(to);
1326 break; 1289 break;
1327 } 1290 }
1328 case SAS_PROTOCOL_SATA: 1291 case SAS_PROTOCOL_SATA:
@@ -1534,11 +1497,9 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
1534 struct hisi_sas_complete_v1_hdr *complete_queue = 1497 struct hisi_sas_complete_v1_hdr *complete_queue =
1535 (struct hisi_sas_complete_v1_hdr *) 1498 (struct hisi_sas_complete_v1_hdr *)
1536 hisi_hba->complete_hdr[queue]; 1499 hisi_hba->complete_hdr[queue];
1537 u32 irq_value, rd_point = cq->rd_point, wr_point; 1500 u32 rd_point = cq->rd_point, wr_point;
1538 1501
1539 spin_lock(&hisi_hba->lock); 1502 spin_lock(&hisi_hba->lock);
1540 irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
1541
1542 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); 1503 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
1543 wr_point = hisi_sas_read32(hisi_hba, 1504 wr_point = hisi_sas_read32(hisi_hba,
1544 COMPL_Q_0_WR_PTR + (0x14 * queue)); 1505 COMPL_Q_0_WR_PTR + (0x14 * queue));
@@ -1820,9 +1781,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
1820 .clear_itct = clear_itct_v1_hw, 1781 .clear_itct = clear_itct_v1_hw,
1821 .prep_smp = prep_smp_v1_hw, 1782 .prep_smp = prep_smp_v1_hw,
1822 .prep_ssp = prep_ssp_v1_hw, 1783 .prep_ssp = prep_ssp_v1_hw,
1823 .get_free_slot = get_free_slot_v1_hw,
1824 .start_delivery = start_delivery_v1_hw, 1784 .start_delivery = start_delivery_v1_hw,
1825 .slot_complete = slot_complete_v1_hw,
1826 .phys_init = phys_init_v1_hw, 1785 .phys_init = phys_init_v1_hw,
1827 .phy_start = start_phy_v1_hw, 1786 .phy_start = start_phy_v1_hw,
1828 .phy_disable = disable_phy_v1_hw, 1787 .phy_disable = disable_phy_v1_hw,
@@ -1830,7 +1789,6 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
1830 .phy_set_linkrate = phy_set_linkrate_v1_hw, 1789 .phy_set_linkrate = phy_set_linkrate_v1_hw,
1831 .phy_get_max_linkrate = phy_get_max_linkrate_v1_hw, 1790 .phy_get_max_linkrate = phy_get_max_linkrate_v1_hw,
1832 .get_wideport_bitmap = get_wideport_bitmap_v1_hw, 1791 .get_wideport_bitmap = get_wideport_bitmap_v1_hw,
1833 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW,
1834 .complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr), 1792 .complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr),
1835 .sht = &sht_v1_hw, 1793 .sht = &sht_v1_hw,
1836}; 1794};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index e9b15d45f98f..8e96a257e439 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1637,31 +1637,6 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
1637 return bitmap; 1637 return bitmap;
1638} 1638}
1639 1639
1640/*
1641 * The callpath to this function and upto writing the write
1642 * queue pointer should be safe from interruption.
1643 */
1644static int
1645get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
1646{
1647 struct device *dev = hisi_hba->dev;
1648 int queue = dq->id;
1649 u32 r, w;
1650
1651 w = dq->wr_point;
1652 r = hisi_sas_read32_relaxed(hisi_hba,
1653 DLVRY_Q_0_RD_PTR + (queue * 0x14));
1654 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
1655 dev_warn(dev, "full queue=%d r=%d w=%d\n",
1656 queue, r, w);
1657 return -EAGAIN;
1658 }
1659
1660 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
1661
1662 return w;
1663}
1664
1665/* DQ lock must be taken here */ 1640/* DQ lock must be taken here */
1666static void start_delivery_v2_hw(struct hisi_sas_dq *dq) 1641static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
1667{ 1642{
@@ -2418,7 +2393,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2418 slot_err_v2_hw(hisi_hba, task, slot, 2); 2393 slot_err_v2_hw(hisi_hba, task, slot, 2);
2419 2394
2420 if (ts->stat != SAS_DATA_UNDERRUN) 2395 if (ts->stat != SAS_DATA_UNDERRUN)
2421 dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", 2396 dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
2422 slot->idx, task, sas_dev->device_id, 2397 slot->idx, task, sas_dev->device_id,
2423 complete_hdr->dw0, complete_hdr->dw1, 2398 complete_hdr->dw0, complete_hdr->dw1,
2424 complete_hdr->act, complete_hdr->dw3, 2399 complete_hdr->act, complete_hdr->dw3,
@@ -2444,20 +2419,16 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2444 case SAS_PROTOCOL_SMP: 2419 case SAS_PROTOCOL_SMP:
2445 { 2420 {
2446 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 2421 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
2447 void *to; 2422 void *to = page_address(sg_page(sg_resp));
2448 2423
2449 ts->stat = SAM_STAT_GOOD; 2424 ts->stat = SAM_STAT_GOOD;
2450 to = kmap_atomic(sg_page(sg_resp));
2451 2425
2452 dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
2453 DMA_FROM_DEVICE);
2454 dma_unmap_sg(dev, &task->smp_task.smp_req, 1, 2426 dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
2455 DMA_TO_DEVICE); 2427 DMA_TO_DEVICE);
2456 memcpy(to + sg_resp->offset, 2428 memcpy(to + sg_resp->offset,
2457 hisi_sas_status_buf_addr_mem(slot) + 2429 hisi_sas_status_buf_addr_mem(slot) +
2458 sizeof(struct hisi_sas_err_record), 2430 sizeof(struct hisi_sas_err_record),
2459 sg_dma_len(sg_resp)); 2431 sg_resp->length);
2460 kunmap_atomic(to);
2461 break; 2432 break;
2462 } 2433 }
2463 case SAS_PROTOCOL_SATA: 2434 case SAS_PROTOCOL_SATA:
@@ -2484,7 +2455,7 @@ out:
2484 spin_lock_irqsave(&task->task_state_lock, flags); 2455 spin_lock_irqsave(&task->task_state_lock, flags);
2485 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 2456 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
2486 spin_unlock_irqrestore(&task->task_state_lock, flags); 2457 spin_unlock_irqrestore(&task->task_state_lock, flags);
2487 dev_info(dev, "slot complete: task(%p) aborted\n", task); 2458 dev_info(dev, "slot complete: task(%pK) aborted\n", task);
2488 return SAS_ABORTED_TASK; 2459 return SAS_ABORTED_TASK;
2489 } 2460 }
2490 task->task_state_flags |= SAS_TASK_STATE_DONE; 2461 task->task_state_flags |= SAS_TASK_STATE_DONE;
@@ -2495,7 +2466,7 @@ out:
2495 spin_lock_irqsave(&device->done_lock, flags); 2466 spin_lock_irqsave(&device->done_lock, flags);
2496 if (test_bit(SAS_HA_FROZEN, &ha->state)) { 2467 if (test_bit(SAS_HA_FROZEN, &ha->state)) {
2497 spin_unlock_irqrestore(&device->done_lock, flags); 2468 spin_unlock_irqrestore(&device->done_lock, flags);
2498 dev_info(dev, "slot complete: task(%p) ignored\n", 2469 dev_info(dev, "slot complete: task(%pK) ignored\n",
2499 task); 2470 task);
2500 return sts; 2471 return sts;
2501 } 2472 }
@@ -2563,7 +2534,10 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
2563 hdr->dw1 = cpu_to_le32(dw1); 2534 hdr->dw1 = cpu_to_le32(dw1);
2564 2535
2565 /* dw2 */ 2536 /* dw2 */
2566 if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) { 2537 if (task->ata_task.use_ncq) {
2538 struct ata_queued_cmd *qc = task->uldd_task;
2539
2540 hdr_tag = qc->tag;
2567 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 2541 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
2568 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; 2542 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
2569 } 2543 }
@@ -3333,8 +3307,8 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
3333{ 3307{
3334 struct platform_device *pdev = hisi_hba->platform_dev; 3308 struct platform_device *pdev = hisi_hba->platform_dev;
3335 struct device *dev = &pdev->dev; 3309 struct device *dev = &pdev->dev;
3336 int irq, rc, irq_map[128]; 3310 int irq, rc = 0, irq_map[128];
3337 int i, phy_no, fatal_no, queue_no, k; 3311 int i, phy_no, fatal_no, queue_no;
3338 3312
3339 for (i = 0; i < 128; i++) 3313 for (i = 0; i < 128; i++)
3340 irq_map[i] = platform_get_irq(pdev, i); 3314 irq_map[i] = platform_get_irq(pdev, i);
@@ -3347,7 +3321,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
3347 dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n", 3321 dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n",
3348 irq, rc); 3322 irq, rc);
3349 rc = -ENOENT; 3323 rc = -ENOENT;
3350 goto free_phy_int_irqs; 3324 goto err_out;
3351 } 3325 }
3352 } 3326 }
3353 3327
@@ -3361,7 +3335,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
3361 dev_err(dev, "irq init: could not request sata interrupt %d, rc=%d\n", 3335 dev_err(dev, "irq init: could not request sata interrupt %d, rc=%d\n",
3362 irq, rc); 3336 irq, rc);
3363 rc = -ENOENT; 3337 rc = -ENOENT;
3364 goto free_sata_int_irqs; 3338 goto err_out;
3365 } 3339 }
3366 } 3340 }
3367 3341
@@ -3373,7 +3347,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
3373 dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n", 3347 dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n",
3374 irq, rc); 3348 irq, rc);
3375 rc = -ENOENT; 3349 rc = -ENOENT;
3376 goto free_fatal_int_irqs; 3350 goto err_out;
3377 } 3351 }
3378 } 3352 }
3379 3353
@@ -3388,34 +3362,14 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
3388 dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n", 3362 dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
3389 irq, rc); 3363 irq, rc);
3390 rc = -ENOENT; 3364 rc = -ENOENT;
3391 goto free_cq_int_irqs; 3365 goto err_out;
3392 } 3366 }
3393 tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq); 3367 tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq);
3394 } 3368 }
3395 3369
3396 hisi_hba->cq_nvecs = hisi_hba->queue_count; 3370 hisi_hba->cq_nvecs = hisi_hba->queue_count;
3397 3371
3398 return 0; 3372err_out:
3399
3400free_cq_int_irqs:
3401 for (k = 0; k < queue_no; k++) {
3402 struct hisi_sas_cq *cq = &hisi_hba->cq[k];
3403
3404 free_irq(irq_map[k + 96], cq);
3405 tasklet_kill(&cq->tasklet);
3406 }
3407free_fatal_int_irqs:
3408 for (k = 0; k < fatal_no; k++)
3409 free_irq(irq_map[k + 81], hisi_hba);
3410free_sata_int_irqs:
3411 for (k = 0; k < phy_no; k++) {
3412 struct hisi_sas_phy *phy = &hisi_hba->phy[k];
3413
3414 free_irq(irq_map[k + 72], phy);
3415 }
3416free_phy_int_irqs:
3417 for (k = 0; k < i; k++)
3418 free_irq(irq_map[k + 1], hisi_hba);
3419 return rc; 3373 return rc;
3420} 3374}
3421 3375
@@ -3544,8 +3498,8 @@ static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
3544 return 0; 3498 return 0;
3545} 3499}
3546 3500
3547static int wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba, 3501static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
3548 int delay_ms, int timeout_ms) 3502 int delay_ms, int timeout_ms)
3549{ 3503{
3550 struct device *dev = hisi_hba->dev; 3504 struct device *dev = hisi_hba->dev;
3551 int entries, entries_old = 0, time; 3505 int entries, entries_old = 0, time;
@@ -3559,12 +3513,13 @@ static int wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
3559 msleep(delay_ms); 3513 msleep(delay_ms);
3560 } 3514 }
3561 3515
3562 if (time >= timeout_ms) 3516 if (time >= timeout_ms) {
3563 return -ETIMEDOUT; 3517 dev_dbg(dev, "Wait commands complete timeout!\n");
3518 return;
3519 }
3564 3520
3565 dev_dbg(dev, "wait commands complete %dms\n", time); 3521 dev_dbg(dev, "wait commands complete %dms\n", time);
3566 3522
3567 return 0;
3568} 3523}
3569 3524
3570static struct device_attribute *host_attrs_v2_hw[] = { 3525static struct device_attribute *host_attrs_v2_hw[] = {
@@ -3606,9 +3561,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
3606 .prep_ssp = prep_ssp_v2_hw, 3561 .prep_ssp = prep_ssp_v2_hw,
3607 .prep_stp = prep_ata_v2_hw, 3562 .prep_stp = prep_ata_v2_hw,
3608 .prep_abort = prep_abort_v2_hw, 3563 .prep_abort = prep_abort_v2_hw,
3609 .get_free_slot = get_free_slot_v2_hw,
3610 .start_delivery = start_delivery_v2_hw, 3564 .start_delivery = start_delivery_v2_hw,
3611 .slot_complete = slot_complete_v2_hw,
3612 .phys_init = phys_init_v2_hw, 3565 .phys_init = phys_init_v2_hw,
3613 .phy_start = start_phy_v2_hw, 3566 .phy_start = start_phy_v2_hw,
3614 .phy_disable = disable_phy_v2_hw, 3567 .phy_disable = disable_phy_v2_hw,
@@ -3616,7 +3569,6 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
3616 .get_events = phy_get_events_v2_hw, 3569 .get_events = phy_get_events_v2_hw,
3617 .phy_set_linkrate = phy_set_linkrate_v2_hw, 3570 .phy_set_linkrate = phy_set_linkrate_v2_hw,
3618 .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw, 3571 .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw,
3619 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
3620 .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr), 3572 .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
3621 .soft_reset = soft_reset_v2_hw, 3573 .soft_reset = soft_reset_v2_hw,
3622 .get_phys_state = get_phys_state_v2_hw, 3574 .get_phys_state = get_phys_state_v2_hw,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 5f0f6df11adf..cb8d087762db 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -71,6 +71,7 @@
71#define HGC_DQE_ECC_MB_ADDR_OFF 16 71#define HGC_DQE_ECC_MB_ADDR_OFF 16
72#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF) 72#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
73#define CHNL_INT_STATUS 0x148 73#define CHNL_INT_STATUS 0x148
74#define TAB_DFX 0x14c
74#define HGC_ITCT_ECC_ADDR 0x150 75#define HGC_ITCT_ECC_ADDR 0x150
75#define HGC_ITCT_ECC_1B_ADDR_OFF 0 76#define HGC_ITCT_ECC_1B_ADDR_OFF 0
76#define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \ 77#define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \
@@ -83,6 +84,7 @@
83#define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF) 84#define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
84#define FIFO_ERR_INFO_OFF 8 85#define FIFO_ERR_INFO_OFF 8
85#define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF) 86#define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF)
87#define TAB_RD_TYPE 0x15c
86#define INT_COAL_EN 0x19c 88#define INT_COAL_EN 0x19c
87#define OQ_INT_COAL_TIME 0x1a0 89#define OQ_INT_COAL_TIME 0x1a0
88#define OQ_INT_COAL_CNT 0x1a4 90#define OQ_INT_COAL_CNT 0x1a4
@@ -189,12 +191,30 @@
189#define PHY_CFG_PHY_RST_OFF 3 191#define PHY_CFG_PHY_RST_OFF 3
190#define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF) 192#define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF)
191#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) 193#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
194#define CFG_PROG_PHY_LINK_RATE_OFF 8
195#define CFG_PROG_PHY_LINK_RATE_MSK (0xf << CFG_PROG_PHY_LINK_RATE_OFF)
192#define PHY_CTRL (PORT_BASE + 0x14) 196#define PHY_CTRL (PORT_BASE + 0x14)
193#define PHY_CTRL_RESET_OFF 0 197#define PHY_CTRL_RESET_OFF 0
194#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) 198#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
195#define CMD_HDR_PIR_OFF 8 199#define CMD_HDR_PIR_OFF 8
196#define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF) 200#define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF)
197#define SERDES_CFG (PORT_BASE + 0x1c) 201#define SERDES_CFG (PORT_BASE + 0x1c)
202#define CFG_ALOS_CHK_DISABLE_OFF 9
203#define CFG_ALOS_CHK_DISABLE_MSK (0x1 << CFG_ALOS_CHK_DISABLE_OFF)
204#define SAS_PHY_BIST_CTRL (PORT_BASE + 0x2c)
205#define CFG_BIST_MODE_SEL_OFF 0
206#define CFG_BIST_MODE_SEL_MSK (0xf << CFG_BIST_MODE_SEL_OFF)
207#define CFG_LOOP_TEST_MODE_OFF 14
208#define CFG_LOOP_TEST_MODE_MSK (0x3 << CFG_LOOP_TEST_MODE_OFF)
209#define CFG_RX_BIST_EN_OFF 16
210#define CFG_RX_BIST_EN_MSK (0x1 << CFG_RX_BIST_EN_OFF)
211#define CFG_TX_BIST_EN_OFF 17
212#define CFG_TX_BIST_EN_MSK (0x1 << CFG_TX_BIST_EN_OFF)
213#define CFG_BIST_TEST_OFF 18
214#define CFG_BIST_TEST_MSK (0x1 << CFG_BIST_TEST_OFF)
215#define SAS_PHY_BIST_CODE (PORT_BASE + 0x30)
216#define SAS_PHY_BIST_CODE1 (PORT_BASE + 0x34)
217#define SAS_BIST_ERR_CNT (PORT_BASE + 0x38)
198#define SL_CFG (PORT_BASE + 0x84) 218#define SL_CFG (PORT_BASE + 0x84)
199#define AIP_LIMIT (PORT_BASE + 0x90) 219#define AIP_LIMIT (PORT_BASE + 0x90)
200#define SL_CONTROL (PORT_BASE + 0x94) 220#define SL_CONTROL (PORT_BASE + 0x94)
@@ -499,13 +519,6 @@ static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
499 return readl(regs); 519 return readl(regs);
500} 520}
501 521
502static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
503{
504 void __iomem *regs = hisi_hba->regs + off;
505
506 return readl_relaxed(regs);
507}
508
509static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) 522static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val)
510{ 523{
511 void __iomem *regs = hisi_hba->regs + off; 524 void __iomem *regs = hisi_hba->regs + off;
@@ -1006,31 +1019,6 @@ static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id)
1006 return bitmap; 1019 return bitmap;
1007} 1020}
1008 1021
1009/**
1010 * The callpath to this function and upto writing the write
1011 * queue pointer should be safe from interruption.
1012 */
1013static int
1014get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
1015{
1016 struct device *dev = hisi_hba->dev;
1017 int queue = dq->id;
1018 u32 r, w;
1019
1020 w = dq->wr_point;
1021 r = hisi_sas_read32_relaxed(hisi_hba,
1022 DLVRY_Q_0_RD_PTR + (queue * 0x14));
1023 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
1024 dev_warn(dev, "full queue=%d r=%d w=%d\n",
1025 queue, r, w);
1026 return -EAGAIN;
1027 }
1028
1029 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
1030
1031 return w;
1032}
1033
1034static void start_delivery_v3_hw(struct hisi_sas_dq *dq) 1022static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
1035{ 1023{
1036 struct hisi_hba *hisi_hba = dq->hisi_hba; 1024 struct hisi_hba *hisi_hba = dq->hisi_hba;
@@ -1386,7 +1374,10 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
1386 hdr->dw1 = cpu_to_le32(dw1); 1374 hdr->dw1 = cpu_to_le32(dw1);
1387 1375
1388 /* dw2 */ 1376 /* dw2 */
1389 if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) { 1377 if (task->ata_task.use_ncq) {
1378 struct ata_queued_cmd *qc = task->uldd_task;
1379
1380 hdr_tag = qc->tag;
1390 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 1381 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
1391 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; 1382 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
1392 } 1383 }
@@ -1944,7 +1935,7 @@ static void fatal_ecc_int_v3_hw(struct hisi_hba *hisi_hba)
1944 u32 irq_value, irq_msk; 1935 u32 irq_value, irq_msk;
1945 1936
1946 irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK); 1937 irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK);
1947 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff); 1938 hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff);
1948 1939
1949 irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); 1940 irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
1950 if (irq_value) 1941 if (irq_value)
@@ -2220,7 +2211,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2220 2211
2221 slot_err_v3_hw(hisi_hba, task, slot); 2212 slot_err_v3_hw(hisi_hba, task, slot);
2222 if (ts->stat != SAS_DATA_UNDERRUN) 2213 if (ts->stat != SAS_DATA_UNDERRUN)
2223 dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", 2214 dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
2224 slot->idx, task, sas_dev->device_id, 2215 slot->idx, task, sas_dev->device_id,
2225 dw0, dw1, complete_hdr->act, dw3, 2216 dw0, dw1, complete_hdr->act, dw3,
2226 error_info[0], error_info[1], 2217 error_info[0], error_info[1],
@@ -2241,20 +2232,16 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2241 } 2232 }
2242 case SAS_PROTOCOL_SMP: { 2233 case SAS_PROTOCOL_SMP: {
2243 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 2234 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
2244 void *to; 2235 void *to = page_address(sg_page(sg_resp));
2245 2236
2246 ts->stat = SAM_STAT_GOOD; 2237 ts->stat = SAM_STAT_GOOD;
2247 to = kmap_atomic(sg_page(sg_resp));
2248 2238
2249 dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
2250 DMA_FROM_DEVICE);
2251 dma_unmap_sg(dev, &task->smp_task.smp_req, 1, 2239 dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
2252 DMA_TO_DEVICE); 2240 DMA_TO_DEVICE);
2253 memcpy(to + sg_resp->offset, 2241 memcpy(to + sg_resp->offset,
2254 hisi_sas_status_buf_addr_mem(slot) + 2242 hisi_sas_status_buf_addr_mem(slot) +
2255 sizeof(struct hisi_sas_err_record), 2243 sizeof(struct hisi_sas_err_record),
2256 sg_dma_len(sg_resp)); 2244 sg_resp->length);
2257 kunmap_atomic(to);
2258 break; 2245 break;
2259 } 2246 }
2260 case SAS_PROTOCOL_SATA: 2247 case SAS_PROTOCOL_SATA:
@@ -2279,7 +2266,7 @@ out:
2279 spin_lock_irqsave(&task->task_state_lock, flags); 2266 spin_lock_irqsave(&task->task_state_lock, flags);
2280 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 2267 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
2281 spin_unlock_irqrestore(&task->task_state_lock, flags); 2268 spin_unlock_irqrestore(&task->task_state_lock, flags);
2282 dev_info(dev, "slot complete: task(%p) aborted\n", task); 2269 dev_info(dev, "slot complete: task(%pK) aborted\n", task);
2283 return SAS_ABORTED_TASK; 2270 return SAS_ABORTED_TASK;
2284 } 2271 }
2285 task->task_state_flags |= SAS_TASK_STATE_DONE; 2272 task->task_state_flags |= SAS_TASK_STATE_DONE;
@@ -2290,7 +2277,7 @@ out:
2290 spin_lock_irqsave(&device->done_lock, flags); 2277 spin_lock_irqsave(&device->done_lock, flags);
2291 if (test_bit(SAS_HA_FROZEN, &ha->state)) { 2278 if (test_bit(SAS_HA_FROZEN, &ha->state)) {
2292 spin_unlock_irqrestore(&device->done_lock, flags); 2279 spin_unlock_irqrestore(&device->done_lock, flags);
2293 dev_info(dev, "slot complete: task(%p) ignored\n ", 2280 dev_info(dev, "slot complete: task(%pK) ignored\n ",
2294 task); 2281 task);
2295 return sts; 2282 return sts;
2296 } 2283 }
@@ -2385,8 +2372,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
2385{ 2372{
2386 struct device *dev = hisi_hba->dev; 2373 struct device *dev = hisi_hba->dev;
2387 struct pci_dev *pdev = hisi_hba->pci_dev; 2374 struct pci_dev *pdev = hisi_hba->pci_dev;
2388 int vectors, rc; 2375 int vectors, rc, i;
2389 int i, k;
2390 int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi; 2376 int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi;
2391 2377
2392 if (auto_affine_msi_experimental) { 2378 if (auto_affine_msi_experimental) {
@@ -2434,7 +2420,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
2434 if (rc) { 2420 if (rc) {
2435 dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc); 2421 dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc);
2436 rc = -ENOENT; 2422 rc = -ENOENT;
2437 goto free_phy_irq; 2423 goto free_irq_vectors;
2438 } 2424 }
2439 2425
2440 rc = devm_request_irq(dev, pci_irq_vector(pdev, 11), 2426 rc = devm_request_irq(dev, pci_irq_vector(pdev, 11),
@@ -2443,7 +2429,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
2443 if (rc) { 2429 if (rc) {
2444 dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc); 2430 dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc);
2445 rc = -ENOENT; 2431 rc = -ENOENT;
2446 goto free_chnl_interrupt; 2432 goto free_irq_vectors;
2447 } 2433 }
2448 2434
2449 /* Init tasklets for cq only */ 2435 /* Init tasklets for cq only */
@@ -2460,7 +2446,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
2460 dev_err(dev, "could not request cq%d interrupt, rc=%d\n", 2446 dev_err(dev, "could not request cq%d interrupt, rc=%d\n",
2461 i, rc); 2447 i, rc);
2462 rc = -ENOENT; 2448 rc = -ENOENT;
2463 goto free_cq_irqs; 2449 goto free_irq_vectors;
2464 } 2450 }
2465 2451
2466 tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq); 2452 tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq);
@@ -2468,18 +2454,6 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
2468 2454
2469 return 0; 2455 return 0;
2470 2456
2471free_cq_irqs:
2472 for (k = 0; k < i; k++) {
2473 struct hisi_sas_cq *cq = &hisi_hba->cq[k];
2474 int nr = hisi_sas_intr_conv ? 16 : 16 + k;
2475
2476 free_irq(pci_irq_vector(pdev, nr), cq);
2477 }
2478 free_irq(pci_irq_vector(pdev, 11), hisi_hba);
2479free_chnl_interrupt:
2480 free_irq(pci_irq_vector(pdev, 2), hisi_hba);
2481free_phy_irq:
2482 free_irq(pci_irq_vector(pdev, 1), hisi_hba);
2483free_irq_vectors: 2457free_irq_vectors:
2484 pci_free_irq_vectors(pdev); 2458 pci_free_irq_vectors(pdev);
2485 return rc; 2459 return rc;
@@ -2620,8 +2594,8 @@ static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type,
2620 return 0; 2594 return 0;
2621} 2595}
2622 2596
2623static int wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, 2597static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
2624 int delay_ms, int timeout_ms) 2598 int delay_ms, int timeout_ms)
2625{ 2599{
2626 struct device *dev = hisi_hba->dev; 2600 struct device *dev = hisi_hba->dev;
2627 int entries, entries_old = 0, time; 2601 int entries, entries_old = 0, time;
@@ -2635,12 +2609,12 @@ static int wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
2635 msleep(delay_ms); 2609 msleep(delay_ms);
2636 } 2610 }
2637 2611
2638 if (time >= timeout_ms) 2612 if (time >= timeout_ms) {
2639 return -ETIMEDOUT; 2613 dev_dbg(dev, "Wait commands complete timeout!\n");
2614 return;
2615 }
2640 2616
2641 dev_dbg(dev, "wait commands complete %dms\n", time); 2617 dev_dbg(dev, "wait commands complete %dms\n", time);
2642
2643 return 0;
2644} 2618}
2645 2619
2646static ssize_t intr_conv_v3_hw_show(struct device *dev, 2620static ssize_t intr_conv_v3_hw_show(struct device *dev,
@@ -2887,16 +2861,45 @@ static const struct hisi_sas_debugfs_reg debugfs_global_reg = {
2887 .read_global_reg = hisi_sas_read32, 2861 .read_global_reg = hisi_sas_read32,
2888}; 2862};
2889 2863
2864static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = {
2865 HISI_SAS_DEBUGFS_REG(AM_CFG_MAX_TRANS),
2866 HISI_SAS_DEBUGFS_REG(AM_CFG_SINGLE_PORT_MAX_TRANS),
2867 HISI_SAS_DEBUGFS_REG(AXI_CFG),
2868 HISI_SAS_DEBUGFS_REG(AM_ROB_ECC_ERR_ADDR),
2869 {}
2870};
2871
2872static const struct hisi_sas_debugfs_reg debugfs_axi_reg = {
2873 .lu = debugfs_axi_reg_lu,
2874 .count = 0x61,
2875 .base_off = AXI_MASTER_CFG_BASE,
2876 .read_global_reg = hisi_sas_read32,
2877};
2878
2879static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = {
2880 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1),
2881 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK),
2882 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK),
2883 HISI_SAS_DEBUGFS_REG(CFG_SAS_RAS_INTR_MASK),
2884 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2),
2885 HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2_MASK),
2886 {}
2887};
2888
2889static const struct hisi_sas_debugfs_reg debugfs_ras_reg = {
2890 .lu = debugfs_ras_reg_lu,
2891 .count = 0x10,
2892 .base_off = RAS_BASE,
2893 .read_global_reg = hisi_sas_read32,
2894};
2895
2890static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba) 2896static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
2891{ 2897{
2892 struct device *dev = hisi_hba->dev;
2893
2894 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 2898 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
2895 2899
2896 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); 2900 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
2897 2901
2898 if (wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000) == -ETIMEDOUT) 2902 wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000);
2899 dev_dbg(dev, "Wait commands complete timeout!\n");
2900 2903
2901 hisi_sas_kill_tasklets(hisi_hba); 2904 hisi_sas_kill_tasklets(hisi_hba);
2902} 2905}
@@ -2909,6 +2912,142 @@ static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba)
2909 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 2912 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
2910} 2913}
2911 2914
2915static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
2916 enum hisi_sas_debugfs_cache_type type,
2917 u32 *cache)
2918{
2919 u32 cache_dw_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ *
2920 HISI_SAS_IOST_ITCT_CACHE_NUM;
2921 u32 *buf = cache;
2922 u32 i, val;
2923
2924 hisi_sas_write32(hisi_hba, TAB_RD_TYPE, type);
2925
2926 for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_DW_SZ; i++) {
2927 val = hisi_sas_read32(hisi_hba, TAB_DFX);
2928 if (val == 0xffffffff)
2929 break;
2930 }
2931
2932 if (val != 0xffffffff) {
2933 pr_err("Issue occur when reading IOST/ITCT cache!\n");
2934 return;
2935 }
2936
2937 memset(buf, 0, cache_dw_size * 4);
2938 buf[0] = val;
2939
2940 for (i = 1; i < cache_dw_size; i++)
2941 buf[i] = hisi_sas_read32(hisi_hba, TAB_DFX);
2942}
2943
2944static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba *hisi_hba)
2945{
2946 u32 reg_val;
2947 int phy_id = hisi_hba->debugfs_bist_phy_no;
2948
2949 /* disable PHY */
2950 hisi_sas_phy_enable(hisi_hba, phy_id, 0);
2951
2952 /* disable ALOS */
2953 reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG);
2954 reg_val |= CFG_ALOS_CHK_DISABLE_MSK;
2955 hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val);
2956}
2957
2958static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba)
2959{
2960 u32 reg_val;
2961 int phy_id = hisi_hba->debugfs_bist_phy_no;
2962
2963 /* disable loopback */
2964 reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL);
2965 reg_val &= ~(CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK |
2966 CFG_BIST_TEST_MSK);
2967 hisi_sas_phy_write32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL, reg_val);
2968
2969 /* enable ALOS */
2970 reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG);
2971 reg_val &= ~CFG_ALOS_CHK_DISABLE_MSK;
2972 hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val);
2973
2974 /* restore the linkrate */
2975 reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, PROG_PHY_LINK_RATE);
2976 /* init OOB link rate as 1.5 Gbits */
2977 reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK;
2978 reg_val |= (0x8 << CFG_PROG_PHY_LINK_RATE_OFF);
2979 hisi_sas_phy_write32(hisi_hba, phy_id, PROG_PHY_LINK_RATE, reg_val);
2980
2981 /* enable PHY */
2982 hisi_sas_phy_enable(hisi_hba, phy_id, 1);
2983}
2984
2985#define SAS_PHY_BIST_CODE_INIT 0x1
2986#define SAS_PHY_BIST_CODE1_INIT 0X80
2987static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
2988{
2989 u32 reg_val, mode_tmp;
2990 u32 linkrate = hisi_hba->debugfs_bist_linkrate;
2991 u32 phy_id = hisi_hba->debugfs_bist_phy_no;
2992 u32 code_mode = hisi_hba->debugfs_bist_code_mode;
2993 u32 path_mode = hisi_hba->debugfs_bist_mode;
2994 struct device *dev = hisi_hba->dev;
2995
2996 dev_info(dev, "BIST info:linkrate=%d phy_id=%d code_mode=%d path_mode=%d\n",
2997 linkrate, phy_id, code_mode, path_mode);
2998 mode_tmp = path_mode ? 2 : 1;
2999 if (enable) {
3000 /* some preparations before bist test */
3001 hisi_sas_bist_test_prep_v3_hw(hisi_hba);
3002
3003 /* set linkrate of bit test*/
3004 reg_val = hisi_sas_phy_read32(hisi_hba, phy_id,
3005 PROG_PHY_LINK_RATE);
3006 reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK;
3007 reg_val |= (linkrate << CFG_PROG_PHY_LINK_RATE_OFF);
3008 hisi_sas_phy_write32(hisi_hba, phy_id,
3009 PROG_PHY_LINK_RATE, reg_val);
3010
3011 /* set code mode of bit test */
3012 reg_val = hisi_sas_phy_read32(hisi_hba, phy_id,
3013 SAS_PHY_BIST_CTRL);
3014 reg_val &= ~(CFG_BIST_MODE_SEL_MSK |
3015 CFG_LOOP_TEST_MODE_MSK |
3016 CFG_RX_BIST_EN_MSK |
3017 CFG_TX_BIST_EN_MSK |
3018 CFG_BIST_TEST_MSK);
3019 reg_val |= ((code_mode << CFG_BIST_MODE_SEL_OFF) |
3020 (mode_tmp << CFG_LOOP_TEST_MODE_OFF) |
3021 CFG_BIST_TEST_MSK);
3022 hisi_sas_phy_write32(hisi_hba, phy_id,
3023 SAS_PHY_BIST_CTRL, reg_val);
3024
3025 mdelay(100);
3026 reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
3027 hisi_sas_phy_write32(hisi_hba, phy_id,
3028 SAS_PHY_BIST_CTRL, reg_val);
3029
3030 /* set the bist init value */
3031 hisi_sas_phy_write32(hisi_hba, phy_id,
3032 SAS_PHY_BIST_CODE,
3033 SAS_PHY_BIST_CODE_INIT);
3034 hisi_sas_phy_write32(hisi_hba, phy_id,
3035 SAS_PHY_BIST_CODE1,
3036 SAS_PHY_BIST_CODE1_INIT);
3037
3038 /* clear error bit */
3039 mdelay(100);
3040 hisi_sas_phy_read32(hisi_hba, phy_id, SAS_BIST_ERR_CNT);
3041 } else {
3042 /* disable bist test and recover it */
3043 hisi_hba->debugfs_bist_cnt += hisi_sas_phy_read32(hisi_hba,
3044 phy_id, SAS_BIST_ERR_CNT);
3045 hisi_sas_bist_test_restore_v3_hw(hisi_hba);
3046 }
3047
3048 return 0;
3049}
3050
2912static struct scsi_host_template sht_v3_hw = { 3051static struct scsi_host_template sht_v3_hw = {
2913 .name = DRV_NAME, 3052 .name = DRV_NAME,
2914 .module = THIS_MODULE, 3053 .module = THIS_MODULE,
@@ -2935,7 +3074,6 @@ static struct scsi_host_template sht_v3_hw = {
2935static const struct hisi_sas_hw hisi_sas_v3_hw = { 3074static const struct hisi_sas_hw hisi_sas_v3_hw = {
2936 .hw_init = hisi_sas_v3_init, 3075 .hw_init = hisi_sas_v3_init,
2937 .setup_itct = setup_itct_v3_hw, 3076 .setup_itct = setup_itct_v3_hw,
2938 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
2939 .get_wideport_bitmap = get_wideport_bitmap_v3_hw, 3077 .get_wideport_bitmap = get_wideport_bitmap_v3_hw,
2940 .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), 3078 .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
2941 .clear_itct = clear_itct_v3_hw, 3079 .clear_itct = clear_itct_v3_hw,
@@ -2944,9 +3082,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
2944 .prep_smp = prep_smp_v3_hw, 3082 .prep_smp = prep_smp_v3_hw,
2945 .prep_stp = prep_ata_v3_hw, 3083 .prep_stp = prep_ata_v3_hw,
2946 .prep_abort = prep_abort_v3_hw, 3084 .prep_abort = prep_abort_v3_hw,
2947 .get_free_slot = get_free_slot_v3_hw,
2948 .start_delivery = start_delivery_v3_hw, 3085 .start_delivery = start_delivery_v3_hw,
2949 .slot_complete = slot_complete_v3_hw,
2950 .phys_init = phys_init_v3_hw, 3086 .phys_init = phys_init_v3_hw,
2951 .phy_start = start_phy_v3_hw, 3087 .phy_start = start_phy_v3_hw,
2952 .phy_disable = disable_phy_v3_hw, 3088 .phy_disable = disable_phy_v3_hw,
@@ -2959,10 +3095,14 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
2959 .get_events = phy_get_events_v3_hw, 3095 .get_events = phy_get_events_v3_hw,
2960 .write_gpio = write_gpio_v3_hw, 3096 .write_gpio = write_gpio_v3_hw,
2961 .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw, 3097 .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw,
2962 .debugfs_reg_global = &debugfs_global_reg, 3098 .debugfs_reg_array[DEBUGFS_GLOBAL] = &debugfs_global_reg,
3099 .debugfs_reg_array[DEBUGFS_AXI] = &debugfs_axi_reg,
3100 .debugfs_reg_array[DEBUGFS_RAS] = &debugfs_ras_reg,
2963 .debugfs_reg_port = &debugfs_port_reg, 3101 .debugfs_reg_port = &debugfs_port_reg,
2964 .snapshot_prepare = debugfs_snapshot_prepare_v3_hw, 3102 .snapshot_prepare = debugfs_snapshot_prepare_v3_hw,
2965 .snapshot_restore = debugfs_snapshot_restore_v3_hw, 3103 .snapshot_restore = debugfs_snapshot_restore_v3_hw,
3104 .read_iost_itct_cache = read_iost_itct_cache_v3_hw,
3105 .set_bist = debugfs_set_bist_v3_hw,
2966}; 3106};
2967 3107
2968static struct Scsi_Host * 3108static struct Scsi_Host *
@@ -2993,8 +3133,6 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
2993 else 3133 else
2994 hisi_hba->prot_mask = prot_mask; 3134 hisi_hba->prot_mask = prot_mask;
2995 3135
2996 timer_setup(&hisi_hba->timer, NULL, 0);
2997
2998 if (hisi_sas_get_fw_info(hisi_hba) < 0) 3136 if (hisi_sas_get_fw_info(hisi_hba) < 0)
2999 goto err_out; 3137 goto err_out;
3000 3138
@@ -3076,17 +3214,14 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3076 shost->max_lun = ~0; 3214 shost->max_lun = ~0;
3077 shost->max_channel = 1; 3215 shost->max_channel = 1;
3078 shost->max_cmd_len = 16; 3216 shost->max_cmd_len = 16;
3079 shost->can_queue = hisi_hba->hw->max_command_entries - 3217 shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
3080 HISI_SAS_RESERVED_IPTT_CNT; 3218 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
3081 shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
3082 HISI_SAS_RESERVED_IPTT_CNT;
3083 3219
3084 sha->sas_ha_name = DRV_NAME; 3220 sha->sas_ha_name = DRV_NAME;
3085 sha->dev = dev; 3221 sha->dev = dev;
3086 sha->lldd_module = THIS_MODULE; 3222 sha->lldd_module = THIS_MODULE;
3087 sha->sas_addr = &hisi_hba->sas_addr[0]; 3223 sha->sas_addr = &hisi_hba->sas_addr[0];
3088 sha->num_phys = hisi_hba->n_phy; 3224 sha->num_phys = hisi_hba->n_phy;
3089 sha->core.shost = hisi_hba->shost;
3090 3225
3091 for (i = 0; i < hisi_hba->n_phy; i++) { 3226 for (i = 0; i < hisi_hba->n_phy; i++) {
3092 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 3227 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
@@ -3273,15 +3408,21 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
3273 pci_enable_wake(pdev, PCI_D0, 0); 3408 pci_enable_wake(pdev, PCI_D0, 0);
3274 pci_restore_state(pdev); 3409 pci_restore_state(pdev);
3275 rc = pci_enable_device(pdev); 3410 rc = pci_enable_device(pdev);
3276 if (rc) 3411 if (rc) {
3277 dev_err(dev, "enable device failed during resume (%d)\n", rc); 3412 dev_err(dev, "enable device failed during resume (%d)\n", rc);
3413 return rc;
3414 }
3278 3415
3279 pci_set_master(pdev); 3416 pci_set_master(pdev);
3280 scsi_unblock_requests(shost); 3417 scsi_unblock_requests(shost);
3281 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 3418 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
3282 3419
3283 sas_prep_resume_ha(sha); 3420 sas_prep_resume_ha(sha);
3284 init_reg_v3_hw(hisi_hba); 3421 rc = hw_init_v3_hw(hisi_hba);
3422 if (rc) {
3423 scsi_remove_host(shost);
3424 pci_disable_device(pdev);
3425 }
3285 hisi_hba->hw->phys_init(hisi_hba); 3426 hisi_hba->hw->phys_init(hisi_hba);
3286 sas_resume_ha(sha); 3427 sas_resume_ha(sha);
3287 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 3428 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 8cdbac076a1b..df897df5cafe 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1830,6 +1830,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
1830 port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) | 1830 port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
1831 (bsg_request->rqst_data.h_els.port_id[1] << 8) | 1831 (bsg_request->rqst_data.h_els.port_id[1] << 8) |
1832 bsg_request->rqst_data.h_els.port_id[2]; 1832 bsg_request->rqst_data.h_els.port_id[2];
1833 /* fall through */
1833 case FC_BSG_RPT_ELS: 1834 case FC_BSG_RPT_ELS:
1834 fc_flags = IBMVFC_FC_ELS; 1835 fc_flags = IBMVFC_FC_ELS;
1835 break; 1836 break;
@@ -1838,6 +1839,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
1838 port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) | 1839 port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
1839 (bsg_request->rqst_data.h_ct.port_id[1] << 8) | 1840 (bsg_request->rqst_data.h_ct.port_id[1] << 8) |
1840 bsg_request->rqst_data.h_ct.port_id[2]; 1841 bsg_request->rqst_data.h_ct.port_id[2];
1842 /* fall through */
1841 case FC_BSG_RPT_CT: 1843 case FC_BSG_RPT_CT:
1842 fc_flags = IBMVFC_FC_CT_IU; 1844 fc_flags = IBMVFC_FC_CT_IU;
1843 break; 1845 break;
@@ -4020,6 +4022,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
4020 return; 4022 return;
4021 case IBMVFC_MAD_CRQ_ERROR: 4023 case IBMVFC_MAD_CRQ_ERROR:
4022 ibmvfc_retry_host_init(vhost); 4024 ibmvfc_retry_host_init(vhost);
4025 /* fall through */
4023 case IBMVFC_MAD_DRIVER_FAILED: 4026 case IBMVFC_MAD_DRIVER_FAILED:
4024 ibmvfc_free_event(evt); 4027 ibmvfc_free_event(evt);
4025 return; 4028 return;
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 7f9535392a93..a929fe76102b 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1581,6 +1581,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1581 case H_PERMISSION: 1581 case H_PERMISSION:
1582 if (connection_broken(vscsi)) 1582 if (connection_broken(vscsi))
1583 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); 1583 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1584 /* Fall through */
1584 default: 1585 default:
1585 dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n", 1586 dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
1586 rc); 1587 rc);
@@ -2492,8 +2493,10 @@ static long ibmvscsis_ping_response(struct scsi_info *vscsi)
2492 break; 2493 break;
2493 case H_CLOSED: 2494 case H_CLOSED:
2494 vscsi->flags |= CLIENT_FAILED; 2495 vscsi->flags |= CLIENT_FAILED;
2496 /* Fall through */
2495 case H_DROPPED: 2497 case H_DROPPED:
2496 vscsi->flags |= RESPONSE_Q_DOWN; 2498 vscsi->flags |= RESPONSE_Q_DOWN;
2499 /* Fall through */
2497 case H_REMOTE_PARM: 2500 case H_REMOTE_PARM:
2498 dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", 2501 dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
2499 rc); 2502 rc);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index bade2e025ecf..691acbdcc46d 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -51,6 +51,8 @@ struct lpfc_sli2_slim;
51 cmnd for menlo needs nearly twice as for firmware 51 cmnd for menlo needs nearly twice as for firmware
52 downloads using bsg */ 52 downloads using bsg */
53 53
54#define LPFC_DEFAULT_XPSGL_SIZE 256
55#define LPFC_MAX_SG_TABLESIZE 0xffff
54#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */ 56#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */
55#define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */ 57#define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */
56#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */ 58#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
@@ -732,14 +734,13 @@ struct lpfc_hba {
732#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */ 734#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
733#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ 735#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
734#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ 736#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
735#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */ 737#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */
736#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */ 738#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
737#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */ 739#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
738#define HBA_FORCED_LINK_SPEED 0x40000 /* 740#define HBA_FORCED_LINK_SPEED 0x40000 /*
739 * Firmware supports Forced Link Speed 741 * Firmware supports Forced Link Speed
740 * capability 742 * capability
741 */ 743 */
742#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
743#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ 744#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
744 745
745 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ 746 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
@@ -795,10 +796,12 @@ struct lpfc_hba {
795 uint8_t mds_diags_support; 796 uint8_t mds_diags_support;
796 uint8_t bbcredit_support; 797 uint8_t bbcredit_support;
797 uint8_t enab_exp_wqcq_pages; 798 uint8_t enab_exp_wqcq_pages;
799 u8 nsler; /* Firmware supports FC-NVMe-2 SLER */
798 800
799 /* HBA Config Parameters */ 801 /* HBA Config Parameters */
800 uint32_t cfg_ack0; 802 uint32_t cfg_ack0;
801 uint32_t cfg_xri_rebalancing; 803 uint32_t cfg_xri_rebalancing;
804 uint32_t cfg_xpsgl;
802 uint32_t cfg_enable_npiv; 805 uint32_t cfg_enable_npiv;
803 uint32_t cfg_enable_rrq; 806 uint32_t cfg_enable_rrq;
804 uint32_t cfg_topology; 807 uint32_t cfg_topology;
@@ -905,6 +908,7 @@ struct lpfc_hba {
905 wait_queue_head_t work_waitq; 908 wait_queue_head_t work_waitq;
906 struct task_struct *worker_thread; 909 struct task_struct *worker_thread;
907 unsigned long data_flags; 910 unsigned long data_flags;
911 uint32_t border_sge_num;
908 912
909 uint32_t hbq_in_use; /* HBQs in use flag */ 913 uint32_t hbq_in_use; /* HBQs in use flag */
910 uint32_t hbq_count; /* Count of configured HBQs */ 914 uint32_t hbq_count; /* Count of configured HBQs */
@@ -987,6 +991,7 @@ struct lpfc_hba {
987 struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */ 991 struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
988 struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ 992 struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
989 struct dma_pool *txrdy_payload_pool; 993 struct dma_pool *txrdy_payload_pool;
994 struct dma_pool *lpfc_cmd_rsp_buf_pool;
990 struct lpfc_dma_pool lpfc_mbuf_safety_pool; 995 struct lpfc_dma_pool lpfc_mbuf_safety_pool;
991 996
992 mempool_t *mbox_mem_pool; 997 mempool_t *mbox_mem_pool;
@@ -1034,8 +1039,6 @@ struct lpfc_hba {
1034 struct dentry *debug_hbqinfo; 1039 struct dentry *debug_hbqinfo;
1035 struct dentry *debug_dumpHostSlim; 1040 struct dentry *debug_dumpHostSlim;
1036 struct dentry *debug_dumpHBASlim; 1041 struct dentry *debug_dumpHBASlim;
1037 struct dentry *debug_dumpData; /* BlockGuard BPL */
1038 struct dentry *debug_dumpDif; /* BlockGuard BPL */
1039 struct dentry *debug_InjErrLBA; /* LBA to inject errors at */ 1042 struct dentry *debug_InjErrLBA; /* LBA to inject errors at */
1040 struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */ 1043 struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */
1041 struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */ 1044 struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d65558619ab0..25aa7a53d255 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -841,7 +841,8 @@ lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
841 lpfc_vpd_t *vp = &phba->vpd; 841 lpfc_vpd_t *vp = &phba->vpd;
842 842
843 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw); 843 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
844 return scnprintf(buf, PAGE_SIZE, "%s\n", hdw); 844 return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw,
845 vp->rev.smRev, vp->rev.smFwRev);
845} 846}
846 847
847/** 848/**
@@ -3682,8 +3683,8 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
3682 if (rport) 3683 if (rport)
3683 remoteport = rport->remoteport; 3684 remoteport = rport->remoteport;
3684 spin_unlock(&vport->phba->hbalock); 3685 spin_unlock(&vport->phba->hbalock);
3685 if (remoteport) 3686 if (rport && remoteport)
3686 nvme_fc_set_remoteport_devloss(rport->remoteport, 3687 nvme_fc_set_remoteport_devloss(remoteport,
3687 vport->cfg_devloss_tmo); 3688 vport->cfg_devloss_tmo);
3688#endif 3689#endif
3689 } 3690 }
@@ -5467,15 +5468,12 @@ LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
5467 * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions. 5468 * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
5468 * For the Initiator (I), enabling this parameter means that an NVMET 5469 * For the Initiator (I), enabling this parameter means that an NVMET
5469 * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be 5470 * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
5470 * processed by the initiator for subsequent NVME FCP IO. For the target 5471 * processed by the initiator for subsequent NVME FCP IO.
5471 * function (T), enabling this parameter qualifies the lpfc_nvmet_fb_size 5472 * Currently, this feature is not supported on the NVME target
5472 * driver parameter as the target function's first burst size returned to the
5473 * initiator in the target's NVME PRLI response. Parameter supported on physical
5474 * port only - no NPIV support.
5475 * Value range is [0,1]. Default value is 0 (disabled). 5473 * Value range is [0,1]. Default value is 0 (disabled).
5476 */ 5474 */
5477LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1, 5475LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
5478 "Enable First Burst feature on I and T functions."); 5476 "Enable First Burst feature for NVME Initiator.");
5479 5477
5480/* 5478/*
5481# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue 5479# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
@@ -5927,7 +5925,7 @@ lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
5927 * 1 = MDS Diagnostics enabled 5925 * 1 = MDS Diagnostics enabled
5928 * Value range is [0,1]. Default value is 0. 5926 * Value range is [0,1]. Default value is 0.
5929 */ 5927 */
5930LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); 5928LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
5931 5929
5932/* 5930/*
5933 * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size 5931 * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
@@ -6859,10 +6857,31 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
6859static void 6857static void
6860lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 6858lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
6861{ 6859{
6860 struct lpfc_rport_data *rdata = rport->dd_data;
6861 struct lpfc_nodelist *ndlp = rdata->pnode;
6862#if (IS_ENABLED(CONFIG_NVME_FC))
6863 struct lpfc_nvme_rport *nrport = NULL;
6864#endif
6865
6862 if (timeout) 6866 if (timeout)
6863 rport->dev_loss_tmo = timeout; 6867 rport->dev_loss_tmo = timeout;
6864 else 6868 else
6865 rport->dev_loss_tmo = 1; 6869 rport->dev_loss_tmo = 1;
6870
6871 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
6872 dev_info(&rport->dev, "Cannot find remote node to "
6873 "set rport dev loss tmo, port_id x%x\n",
6874 rport->port_id);
6875 return;
6876 }
6877
6878#if (IS_ENABLED(CONFIG_NVME_FC))
6879 nrport = lpfc_ndlp_get_nrport(ndlp);
6880
6881 if (nrport && nrport->remoteport)
6882 nvme_fc_set_remoteport_devloss(nrport->remoteport,
6883 rport->dev_loss_tmo);
6884#endif
6866} 6885}
6867 6886
6868/** 6887/**
@@ -7059,6 +7078,21 @@ struct fc_function_template lpfc_vport_transport_functions = {
7059}; 7078};
7060 7079
7061/** 7080/**
7081 * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE
7082 * Mode
7083 * @phba: lpfc_hba pointer.
7084 **/
7085static void
7086lpfc_get_hba_function_mode(struct lpfc_hba *phba)
7087{
7088 /* If it's a SkyHawk FCoE adapter */
7089 if (phba->pcidev->device == PCI_DEVICE_ID_SKYHAWK)
7090 phba->hba_flag |= HBA_FCOE_MODE;
7091 else
7092 phba->hba_flag &= ~HBA_FCOE_MODE;
7093}
7094
7095/**
7062 * lpfc_get_cfgparam - Used during probe_one to init the adapter structure 7096 * lpfc_get_cfgparam - Used during probe_one to init the adapter structure
7063 * @phba: lpfc_hba pointer. 7097 * @phba: lpfc_hba pointer.
7064 **/ 7098 **/
@@ -7114,8 +7148,18 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
7114 else 7148 else
7115 phba->cfg_poll = lpfc_poll; 7149 phba->cfg_poll = lpfc_poll;
7116 7150
7117 if (phba->cfg_enable_bg) 7151 /* Get the function mode */
7152 lpfc_get_hba_function_mode(phba);
7153
7154 /* BlockGuard allowed for FC only. */
7155 if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) {
7156 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7157 "0581 BlockGuard feature not supported\n");
7158 /* If set, clear the BlockGuard support param */
7159 phba->cfg_enable_bg = 0;
7160 } else if (phba->cfg_enable_bg) {
7118 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 7161 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
7162 }
7119 7163
7120 lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp); 7164 lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
7121 7165
@@ -7175,16 +7219,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
7175 lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level); 7219 lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
7176 lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func); 7220 lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
7177 7221
7178
7179 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
7180 * accommodate 512K and 1M IOs in a single nvme buf and supply
7181 * enough NVME LS iocb buffers for larger connectivity counts.
7182 */
7183 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
7184 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
7185 phba->cfg_iocb_cnt = 5;
7186 }
7187
7188 return; 7222 return;
7189} 7223}
7190 7224
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index b7216d694bff..39a736b887b1 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1040,7 +1040,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1040 if (!dmabuf) { 1040 if (!dmabuf) {
1041 lpfc_printf_log(phba, KERN_ERR, 1041 lpfc_printf_log(phba, KERN_ERR,
1042 LOG_LIBDFC, "2616 No dmabuf " 1042 LOG_LIBDFC, "2616 No dmabuf "
1043 "found for iocbq 0x%p\n", 1043 "found for iocbq x%px\n",
1044 iocbq); 1044 iocbq);
1045 kfree(evt_dat->data); 1045 kfree(evt_dat->data);
1046 kfree(evt_dat); 1046 kfree(evt_dat);
@@ -1276,9 +1276,7 @@ lpfc_bsg_hba_set_event(struct bsg_job *job)
1276 return 0; /* call job done later */ 1276 return 0; /* call job done later */
1277 1277
1278job_error: 1278job_error:
1279 if (dd_data != NULL) 1279 kfree(dd_data);
1280 kfree(dd_data);
1281
1282 job->dd_data = NULL; 1280 job->dd_data = NULL;
1283 return rc; 1281 return rc;
1284} 1282}
@@ -1571,7 +1569,6 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
1571 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n", 1569 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1572 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state); 1570 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
1573 1571
1574 ctiocb->iocb_cmpl = NULL;
1575 ctiocb->iocb_flag |= LPFC_IO_LIBDFC; 1572 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1576 ctiocb->vport = phba->pport; 1573 ctiocb->vport = phba->pport;
1577 ctiocb->context1 = dd_data; 1574 ctiocb->context1 = dd_data;
@@ -5451,7 +5448,9 @@ ras_job_error:
5451 bsg_reply->result = rc; 5448 bsg_reply->result = rc;
5452 5449
5453 /* complete the job back to userspace */ 5450 /* complete the job back to userspace */
5454 bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); 5451 if (!rc)
5452 bsg_job_done(job, bsg_reply->result,
5453 bsg_reply->reply_payload_rcv_len);
5455 return rc; 5454 return rc;
5456} 5455}
5457 5456
@@ -5530,8 +5529,9 @@ ras_job_error:
5530 bsg_reply->result = rc; 5529 bsg_reply->result = rc;
5531 5530
5532 /* complete the job back to userspace */ 5531 /* complete the job back to userspace */
5533 bsg_job_done(job, bsg_reply->result, 5532 if (!rc)
5534 bsg_reply->reply_payload_rcv_len); 5533 bsg_job_done(job, bsg_reply->result,
5534 bsg_reply->reply_payload_rcv_len);
5535 5535
5536 return rc; 5536 return rc;
5537} 5537}
@@ -5591,7 +5591,9 @@ ras_job_error:
5591 bsg_reply->result = rc; 5591 bsg_reply->result = rc;
5592 5592
5593 /* complete the job back to userspace */ 5593 /* complete the job back to userspace */
5594 bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); 5594 if (!rc)
5595 bsg_job_done(job, bsg_reply->result,
5596 bsg_reply->reply_payload_rcv_len);
5595 5597
5596 return rc; 5598 return rc;
5597} 5599}
@@ -5673,7 +5675,9 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
5673 5675
5674ras_job_error: 5676ras_job_error:
5675 bsg_reply->result = rc; 5677 bsg_reply->result = rc;
5676 bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); 5678 if (!rc)
5679 bsg_job_done(job, bsg_reply->result,
5680 bsg_reply->reply_payload_rcv_len);
5677 5681
5678 return rc; 5682 return rc;
5679} 5683}
@@ -5744,8 +5748,9 @@ lpfc_get_trunk_info(struct bsg_job *job)
5744 phba->sli4_hba.link_state.logical_speed / 1000; 5748 phba->sli4_hba.link_state.logical_speed / 1000;
5745job_error: 5749job_error:
5746 bsg_reply->result = rc; 5750 bsg_reply->result = rc;
5747 bsg_job_done(job, bsg_reply->result, 5751 if (!rc)
5748 bsg_reply->reply_payload_rcv_len); 5752 bsg_job_done(job, bsg_reply->result,
5753 bsg_reply->reply_payload_rcv_len);
5749 return rc; 5754 return rc;
5750 5755
5751} 5756}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 68e9f96242d3..b2ad8c750486 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -326,7 +326,7 @@ void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
326void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 326void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
327void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba); 327void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba);
328void lpfc_sli_hba_iocb_abort(struct lpfc_hba *); 328void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
329void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); 329void lpfc_sli_flush_io_rings(struct lpfc_hba *phba);
330int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, 330int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
331 struct lpfc_dmabuf *); 331 struct lpfc_dmabuf *);
332struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *, 332struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
@@ -433,16 +433,6 @@ int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t,
433int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t, 433int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t,
434 uint16_t *, uint16_t *); 434 uint16_t *, uint16_t *);
435 435
436/* externs BlockGuard */
437extern char *_dump_buf_data;
438extern unsigned long _dump_buf_data_order;
439extern char *_dump_buf_dif;
440extern unsigned long _dump_buf_dif_order;
441extern spinlock_t _dump_buf_lock;
442extern int _dump_buf_done;
443extern spinlock_t pgcnt_lock;
444extern unsigned int pgcnt;
445
446/* Interface exported by fabric iocb scheduler */ 436/* Interface exported by fabric iocb scheduler */
447void lpfc_fabric_abort_nport(struct lpfc_nodelist *); 437void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
448void lpfc_fabric_abort_hba(struct lpfc_hba *); 438void lpfc_fabric_abort_hba(struct lpfc_hba *);
@@ -595,6 +585,7 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
595 struct lpfc_sli4_hdw_queue *qp); 585 struct lpfc_sli4_hdw_queue *qp);
596void lpfc_nvme_cmd_template(void); 586void lpfc_nvme_cmd_template(void);
597void lpfc_nvmet_cmd_template(void); 587void lpfc_nvmet_cmd_template(void);
588void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
598extern int lpfc_enable_nvmet_cnt; 589extern int lpfc_enable_nvmet_cnt;
599extern unsigned long long lpfc_enable_nvmet[]; 590extern unsigned long long lpfc_enable_nvmet[];
600extern int lpfc_no_hba_reset_cnt; 591extern int lpfc_no_hba_reset_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index ec72c39997d2..25e86706e207 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -462,6 +462,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
462 struct lpfc_nodelist *ndlp; 462 struct lpfc_nodelist *ndlp;
463 463
464 if ((vport->port_type != LPFC_NPIV_PORT) || 464 if ((vport->port_type != LPFC_NPIV_PORT) ||
465 (fc4_type == FC_TYPE_FCP) ||
465 !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) { 466 !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
466 467
467 ndlp = lpfc_setup_disc_node(vport, Did); 468 ndlp = lpfc_setup_disc_node(vport, Did);
@@ -480,10 +481,20 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
480 481
481 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 482 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
482 "0238 Process x%06x NameServer Rsp " 483 "0238 Process x%06x NameServer Rsp "
483 "Data: x%x x%x x%x x%x\n", Did, 484 "Data: x%x x%x x%x x%x x%x\n", Did,
484 ndlp->nlp_flag, ndlp->nlp_fc4_type, 485 ndlp->nlp_flag, ndlp->nlp_fc4_type,
485 vport->fc_flag, 486 ndlp->nlp_state, vport->fc_flag,
486 vport->fc_rscn_id_cnt); 487 vport->fc_rscn_id_cnt);
488
489 /* if ndlp needs to be discovered and prior
490 * state of ndlp hit devloss, change state to
491 * allow rediscovery.
492 */
493 if (ndlp->nlp_flag & NLP_NPR_2B_DISC &&
494 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
495 lpfc_nlp_set_state(vport, ndlp,
496 NLP_STE_NPR_NODE);
497 }
487 } else { 498 } else {
488 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, 499 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
489 "Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d", 500 "Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d",
@@ -491,9 +502,9 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
491 502
492 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 503 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
493 "0239 Skip x%06x NameServer Rsp " 504 "0239 Skip x%06x NameServer Rsp "
494 "Data: x%x x%x\n", Did, 505 "Data: x%x x%x %p\n",
495 vport->fc_flag, 506 Did, vport->fc_flag,
496 vport->fc_rscn_id_cnt); 507 vport->fc_rscn_id_cnt, ndlp);
497 } 508 }
498 } else { 509 } else {
499 if (!(vport->fc_flag & FC_RSCN_MODE) || 510 if (!(vport->fc_flag & FC_RSCN_MODE) ||
@@ -751,9 +762,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
751 if (CTrsp->CommandResponse.bits.CmdRsp == 762 if (CTrsp->CommandResponse.bits.CmdRsp ==
752 cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) { 763 cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
753 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 764 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
754 "0208 NameServer Rsp Data: x%x x%x\n", 765 "0208 NameServer Rsp Data: x%x x%x "
766 "sz x%x\n",
755 vport->fc_flag, 767 vport->fc_flag,
756 CTreq->un.gid.Fc4Type); 768 CTreq->un.gid.Fc4Type,
769 irsp->un.genreq64.bdl.bdeSize);
757 770
758 lpfc_ns_rsp(vport, 771 lpfc_ns_rsp(vport,
759 outp, 772 outp,
@@ -814,6 +827,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
814 } 827 }
815 vport->gidft_inp--; 828 vport->gidft_inp--;
816 } 829 }
830
831 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
832 "4216 GID_FT cmpl inp %d disc %d\n",
833 vport->gidft_inp, vport->num_disc_nodes);
834
817 /* Link up / RSCN discovery */ 835 /* Link up / RSCN discovery */
818 if ((vport->num_disc_nodes == 0) && 836 if ((vport->num_disc_nodes == 0) &&
819 (vport->gidft_inp == 0)) { 837 (vport->gidft_inp == 0)) {
@@ -1209,14 +1227,34 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1209 if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) 1227 if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK)
1210 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 1228 ndlp->nlp_fc4_type |= NLP_FC4_NVME;
1211 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1229 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1212 "3064 Setting ndlp %p, DID x%06x with " 1230 "3064 Setting ndlp x%px, DID x%06x "
1213 "FC4 x%08x, Data: x%08x x%08x\n", 1231 "with FC4 x%08x, Data: x%08x x%08x "
1232 "%d\n",
1214 ndlp, did, ndlp->nlp_fc4_type, 1233 ndlp, did, ndlp->nlp_fc4_type,
1215 FC_TYPE_FCP, FC_TYPE_NVME); 1234 FC_TYPE_FCP, FC_TYPE_NVME,
1216 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1235 ndlp->nlp_state);
1217 1236
1218 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 1237 if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE &&
1219 lpfc_issue_els_prli(vport, ndlp, 0); 1238 ndlp->nlp_fc4_type) {
1239 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1240
1241 lpfc_nlp_set_state(vport, ndlp,
1242 NLP_STE_PRLI_ISSUE);
1243 lpfc_issue_els_prli(vport, ndlp, 0);
1244 } else if (!ndlp->nlp_fc4_type) {
1245 /* If fc4 type is still unknown, then LOGO */
1246 lpfc_printf_vlog(vport, KERN_INFO,
1247 LOG_DISCOVERY,
1248 "6443 Sending LOGO ndlp x%px,"
1249 "DID x%06x with fc4_type: "
1250 "x%08x, state: %d\n",
1251 ndlp, did, ndlp->nlp_fc4_type,
1252 ndlp->nlp_state);
1253 lpfc_issue_els_logo(vport, ndlp, 0);
1254 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1255 lpfc_nlp_set_state(vport, ndlp,
1256 NLP_STE_NPR_NODE);
1257 }
1220 } 1258 }
1221 } else 1259 } else
1222 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1260 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
@@ -2515,7 +2553,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
2515 ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; 2553 ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
2516 2554
2517 hsp = (struct serv_parm *)&vport->fc_sparam; 2555 hsp = (struct serv_parm *)&vport->fc_sparam;
2518 ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) | 2556 ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
2519 (uint32_t) hsp->cmn.bbRcvSizeLsb; 2557 (uint32_t) hsp->cmn.bbRcvSizeLsb;
2520 ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt); 2558 ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
2521 size = FOURBYTES + sizeof(uint32_t); 2559 size = FOURBYTES + sizeof(uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 1ee857d9d165..8d34be60d379 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -361,7 +361,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
361 phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff); 361 phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
362 if (phys == le32_to_cpu(hbqe->bde.addrLow)) { 362 if (phys == le32_to_cpu(hbqe->bde.addrLow)) {
363 len += scnprintf(buf+len, size-len, 363 len += scnprintf(buf+len, size-len,
364 "Buf%d: %p %06x\n", i, 364 "Buf%d: x%px %06x\n", i,
365 hbq_buf->dbuf.virt, hbq_buf->tag); 365 hbq_buf->dbuf.virt, hbq_buf->tag);
366 found = 1; 366 found = 1;
367 break; 367 break;
@@ -416,8 +416,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
416 qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool]; 416 qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool];
417 417
418 len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i); 418 len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i);
419 spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag); 419 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
420 spin_lock(&qp->abts_nvme_buf_list_lock);
421 spin_lock(&qp->io_buf_list_get_lock); 420 spin_lock(&qp->io_buf_list_get_lock);
422 spin_lock(&qp->io_buf_list_put_lock); 421 spin_lock(&qp->io_buf_list_put_lock);
423 out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs + 422 out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs +
@@ -430,8 +429,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
430 qp->abts_nvme_io_bufs, out); 429 qp->abts_nvme_io_bufs, out);
431 spin_unlock(&qp->io_buf_list_put_lock); 430 spin_unlock(&qp->io_buf_list_put_lock);
432 spin_unlock(&qp->io_buf_list_get_lock); 431 spin_unlock(&qp->io_buf_list_get_lock);
433 spin_unlock(&qp->abts_nvme_buf_list_lock); 432 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
434 spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
435 433
436 lpfc_debugfs_last_xripool++; 434 lpfc_debugfs_last_xripool++;
437 if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue) 435 if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue)
@@ -533,9 +531,7 @@ lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size)
533 continue; 531 continue;
534 pbl_pool = &multixri_pool->pbl_pool; 532 pbl_pool = &multixri_pool->pbl_pool;
535 pvt_pool = &multixri_pool->pvt_pool; 533 pvt_pool = &multixri_pool->pvt_pool;
536 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; 534 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
537 if (qp->nvme_wq)
538 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
539 535
540 scnprintf(tmp, sizeof(tmp), 536 scnprintf(tmp, sizeof(tmp),
541 "%03d: %4d %4d %4d %4d | %10d %10d ", 537 "%03d: %4d %4d %4d %4d | %10d %10d ",
@@ -2166,89 +2162,6 @@ out:
2166 return rc; 2162 return rc;
2167} 2163}
2168 2164
2169static int
2170lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
2171{
2172 struct lpfc_debug *debug;
2173 int rc = -ENOMEM;
2174
2175 if (!_dump_buf_data)
2176 return -EBUSY;
2177
2178 debug = kmalloc(sizeof(*debug), GFP_KERNEL);
2179 if (!debug)
2180 goto out;
2181
2182 /* Round to page boundary */
2183 pr_err("9059 BLKGRD: %s: _dump_buf_data=0x%p\n",
2184 __func__, _dump_buf_data);
2185 debug->buffer = _dump_buf_data;
2186 if (!debug->buffer) {
2187 kfree(debug);
2188 goto out;
2189 }
2190
2191 debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT;
2192 file->private_data = debug;
2193
2194 rc = 0;
2195out:
2196 return rc;
2197}
2198
2199static int
2200lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
2201{
2202 struct lpfc_debug *debug;
2203 int rc = -ENOMEM;
2204
2205 if (!_dump_buf_dif)
2206 return -EBUSY;
2207
2208 debug = kmalloc(sizeof(*debug), GFP_KERNEL);
2209 if (!debug)
2210 goto out;
2211
2212 /* Round to page boundary */
2213 pr_err("9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%pD\n",
2214 __func__, _dump_buf_dif, file);
2215 debug->buffer = _dump_buf_dif;
2216 if (!debug->buffer) {
2217 kfree(debug);
2218 goto out;
2219 }
2220
2221 debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT;
2222 file->private_data = debug;
2223
2224 rc = 0;
2225out:
2226 return rc;
2227}
2228
2229static ssize_t
2230lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
2231 size_t nbytes, loff_t *ppos)
2232{
2233 /*
2234 * The Data/DIF buffers only save one failing IO
2235 * The write op is used as a reset mechanism after an IO has
2236 * already been saved to the next one can be saved
2237 */
2238 spin_lock(&_dump_buf_lock);
2239
2240 memset((void *)_dump_buf_data, 0,
2241 ((1 << PAGE_SHIFT) << _dump_buf_data_order));
2242 memset((void *)_dump_buf_dif, 0,
2243 ((1 << PAGE_SHIFT) << _dump_buf_dif_order));
2244
2245 _dump_buf_done = 0;
2246
2247 spin_unlock(&_dump_buf_lock);
2248
2249 return nbytes;
2250}
2251
2252static ssize_t 2165static ssize_t
2253lpfc_debugfs_dif_err_read(struct file *file, char __user *buf, 2166lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
2254 size_t nbytes, loff_t *ppos) 2167 size_t nbytes, loff_t *ppos)
@@ -2461,17 +2374,6 @@ lpfc_debugfs_release(struct inode *inode, struct file *file)
2461 return 0; 2374 return 0;
2462} 2375}
2463 2376
2464static int
2465lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
2466{
2467 struct lpfc_debug *debug = file->private_data;
2468
2469 debug->buffer = NULL;
2470 kfree(debug);
2471
2472 return 0;
2473}
2474
2475/** 2377/**
2476 * lpfc_debugfs_multixripools_write - Clear multi-XRI pools statistics 2378 * lpfc_debugfs_multixripools_write - Clear multi-XRI pools statistics
2477 * @file: The file pointer to read from. 2379 * @file: The file pointer to read from.
@@ -3786,23 +3688,13 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer,
3786 int qidx; 3688 int qidx;
3787 3689
3788 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 3690 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
3789 qp = phba->sli4_hba.hdwq[qidx].fcp_wq; 3691 qp = phba->sli4_hba.hdwq[qidx].io_wq;
3790 if (qp->assoc_qid != cq_id) 3692 if (qp->assoc_qid != cq_id)
3791 continue; 3693 continue;
3792 *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len); 3694 *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
3793 if (*len >= max_cnt) 3695 if (*len >= max_cnt)
3794 return 1; 3696 return 1;
3795 } 3697 }
3796 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
3797 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
3798 qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
3799 if (qp->assoc_qid != cq_id)
3800 continue;
3801 *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
3802 if (*len >= max_cnt)
3803 return 1;
3804 }
3805 }
3806 return 0; 3698 return 0;
3807} 3699}
3808 3700
@@ -3868,9 +3760,9 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
3868 struct lpfc_queue *qp; 3760 struct lpfc_queue *qp;
3869 int rc; 3761 int rc;
3870 3762
3871 qp = phba->sli4_hba.hdwq[eqidx].fcp_cq; 3763 qp = phba->sli4_hba.hdwq[eqidx].io_cq;
3872 3764
3873 *len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len); 3765 *len = __lpfc_idiag_print_cq(qp, "IO", pbuffer, *len);
3874 3766
3875 /* Reset max counter */ 3767 /* Reset max counter */
3876 qp->CQ_max_cqe = 0; 3768 qp->CQ_max_cqe = 0;
@@ -3878,28 +3770,11 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
3878 if (*len >= max_cnt) 3770 if (*len >= max_cnt)
3879 return 1; 3771 return 1;
3880 3772
3881 rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len, 3773 rc = lpfc_idiag_wqs_for_cq(phba, "IO", pbuffer, len,
3882 max_cnt, qp->queue_id); 3774 max_cnt, qp->queue_id);
3883 if (rc) 3775 if (rc)
3884 return 1; 3776 return 1;
3885 3777
3886 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
3887 qp = phba->sli4_hba.hdwq[eqidx].nvme_cq;
3888
3889 *len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
3890
3891 /* Reset max counter */
3892 qp->CQ_max_cqe = 0;
3893
3894 if (*len >= max_cnt)
3895 return 1;
3896
3897 rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
3898 max_cnt, qp->queue_id);
3899 if (rc)
3900 return 1;
3901 }
3902
3903 if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) { 3778 if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
3904 /* NVMET CQset */ 3779 /* NVMET CQset */
3905 qp = phba->sli4_hba.nvmet_cqset[eqidx]; 3780 qp = phba->sli4_hba.nvmet_cqset[eqidx];
@@ -4348,7 +4223,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
4348 if (phba->sli4_hba.hdwq) { 4223 if (phba->sli4_hba.hdwq) {
4349 for (qidx = 0; qidx < phba->cfg_hdw_queue; 4224 for (qidx = 0; qidx < phba->cfg_hdw_queue;
4350 qidx++) { 4225 qidx++) {
4351 qp = phba->sli4_hba.hdwq[qidx].fcp_cq; 4226 qp = phba->sli4_hba.hdwq[qidx].io_cq;
4352 if (qp && qp->queue_id == queid) { 4227 if (qp && qp->queue_id == queid) {
4353 /* Sanity check */ 4228 /* Sanity check */
4354 rc = lpfc_idiag_que_param_check( 4229 rc = lpfc_idiag_que_param_check(
@@ -4360,22 +4235,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
4360 } 4235 }
4361 } 4236 }
4362 } 4237 }
4363 /* NVME complete queue */
4364 if (phba->sli4_hba.hdwq) {
4365 qidx = 0;
4366 do {
4367 qp = phba->sli4_hba.hdwq[qidx].nvme_cq;
4368 if (qp && qp->queue_id == queid) {
4369 /* Sanity check */
4370 rc = lpfc_idiag_que_param_check(
4371 qp, index, count);
4372 if (rc)
4373 goto error_out;
4374 idiag.ptr_private = qp;
4375 goto pass_check;
4376 }
4377 } while (++qidx < phba->cfg_hdw_queue);
4378 }
4379 goto error_out; 4238 goto error_out;
4380 break; 4239 break;
4381 case LPFC_IDIAG_MQ: 4240 case LPFC_IDIAG_MQ:
@@ -4419,20 +4278,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
4419 if (phba->sli4_hba.hdwq) { 4278 if (phba->sli4_hba.hdwq) {
4420 /* FCP/SCSI work queue */ 4279 /* FCP/SCSI work queue */
4421 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 4280 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
4422 qp = phba->sli4_hba.hdwq[qidx].fcp_wq; 4281 qp = phba->sli4_hba.hdwq[qidx].io_wq;
4423 if (qp && qp->queue_id == queid) {
4424 /* Sanity check */
4425 rc = lpfc_idiag_que_param_check(
4426 qp, index, count);
4427 if (rc)
4428 goto error_out;
4429 idiag.ptr_private = qp;
4430 goto pass_check;
4431 }
4432 }
4433 /* NVME work queue */
4434 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
4435 qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
4436 if (qp && qp->queue_id == queid) { 4282 if (qp && qp->queue_id == queid) {
4437 /* Sanity check */ 4283 /* Sanity check */
4438 rc = lpfc_idiag_que_param_check( 4284 rc = lpfc_idiag_que_param_check(
@@ -5508,26 +5354,6 @@ static const struct file_operations lpfc_debugfs_op_cpucheck = {
5508 .release = lpfc_debugfs_release, 5354 .release = lpfc_debugfs_release,
5509}; 5355};
5510 5356
5511#undef lpfc_debugfs_op_dumpData
5512static const struct file_operations lpfc_debugfs_op_dumpData = {
5513 .owner = THIS_MODULE,
5514 .open = lpfc_debugfs_dumpData_open,
5515 .llseek = lpfc_debugfs_lseek,
5516 .read = lpfc_debugfs_read,
5517 .write = lpfc_debugfs_dumpDataDif_write,
5518 .release = lpfc_debugfs_dumpDataDif_release,
5519};
5520
5521#undef lpfc_debugfs_op_dumpDif
5522static const struct file_operations lpfc_debugfs_op_dumpDif = {
5523 .owner = THIS_MODULE,
5524 .open = lpfc_debugfs_dumpDif_open,
5525 .llseek = lpfc_debugfs_lseek,
5526 .read = lpfc_debugfs_read,
5527 .write = lpfc_debugfs_dumpDataDif_write,
5528 .release = lpfc_debugfs_dumpDataDif_release,
5529};
5530
5531#undef lpfc_debugfs_op_dif_err 5357#undef lpfc_debugfs_op_dif_err
5532static const struct file_operations lpfc_debugfs_op_dif_err = { 5358static const struct file_operations lpfc_debugfs_op_dif_err = {
5533 .owner = THIS_MODULE, 5359 .owner = THIS_MODULE,
@@ -5924,20 +5750,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
5924 } else 5750 } else
5925 phba->debug_dumpHostSlim = NULL; 5751 phba->debug_dumpHostSlim = NULL;
5926 5752
5927 /* Setup dumpData */
5928 snprintf(name, sizeof(name), "dumpData");
5929 phba->debug_dumpData =
5930 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
5931 phba->hba_debugfs_root,
5932 phba, &lpfc_debugfs_op_dumpData);
5933
5934 /* Setup dumpDif */
5935 snprintf(name, sizeof(name), "dumpDif");
5936 phba->debug_dumpDif =
5937 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
5938 phba->hba_debugfs_root,
5939 phba, &lpfc_debugfs_op_dumpDif);
5940
5941 /* Setup DIF Error Injections */ 5753 /* Setup DIF Error Injections */
5942 snprintf(name, sizeof(name), "InjErrLBA"); 5754 snprintf(name, sizeof(name), "InjErrLBA");
5943 phba->debug_InjErrLBA = 5755 phba->debug_InjErrLBA =
@@ -6315,12 +6127,6 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
6315 debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */ 6127 debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
6316 phba->debug_dumpHostSlim = NULL; 6128 phba->debug_dumpHostSlim = NULL;
6317 6129
6318 debugfs_remove(phba->debug_dumpData); /* dumpData */
6319 phba->debug_dumpData = NULL;
6320
6321 debugfs_remove(phba->debug_dumpDif); /* dumpDif */
6322 phba->debug_dumpDif = NULL;
6323
6324 debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */ 6130 debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
6325 phba->debug_InjErrLBA = NULL; 6131 phba->debug_InjErrLBA = NULL;
6326 6132
@@ -6442,12 +6248,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
6442 lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0); 6248 lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
6443 6249
6444 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) 6250 for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
6445 lpfc_debug_dump_wq(phba, DUMP_FCP, idx); 6251 lpfc_debug_dump_wq(phba, DUMP_IO, idx);
6446
6447 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6448 for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
6449 lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
6450 }
6451 6252
6452 lpfc_debug_dump_hdr_rq(phba); 6253 lpfc_debug_dump_hdr_rq(phba);
6453 lpfc_debug_dump_dat_rq(phba); 6254 lpfc_debug_dump_dat_rq(phba);
@@ -6459,12 +6260,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
6459 lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0); 6260 lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
6460 6261
6461 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) 6262 for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
6462 lpfc_debug_dump_cq(phba, DUMP_FCP, idx); 6263 lpfc_debug_dump_cq(phba, DUMP_IO, idx);
6463
6464 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6465 for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
6466 lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
6467 }
6468 6264
6469 /* 6265 /*
6470 * Dump Event Queues (EQs) 6266 * Dump Event Queues (EQs)
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 34070874616d..20f2537af511 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -291,8 +291,7 @@ struct lpfc_idiag {
291#define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192 291#define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192
292 292
293enum { 293enum {
294 DUMP_FCP, 294 DUMP_IO,
295 DUMP_NVME,
296 DUMP_MBX, 295 DUMP_MBX,
297 DUMP_ELS, 296 DUMP_ELS,
298 DUMP_NVMELS, 297 DUMP_NVMELS,
@@ -415,12 +414,9 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
415 struct lpfc_queue *wq; 414 struct lpfc_queue *wq;
416 char *qtypestr; 415 char *qtypestr;
417 416
418 if (qtype == DUMP_FCP) { 417 if (qtype == DUMP_IO) {
419 wq = phba->sli4_hba.hdwq[wqidx].fcp_wq; 418 wq = phba->sli4_hba.hdwq[wqidx].io_wq;
420 qtypestr = "FCP"; 419 qtypestr = "IO";
421 } else if (qtype == DUMP_NVME) {
422 wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
423 qtypestr = "NVME";
424 } else if (qtype == DUMP_MBX) { 420 } else if (qtype == DUMP_MBX) {
425 wq = phba->sli4_hba.mbx_wq; 421 wq = phba->sli4_hba.mbx_wq;
426 qtypestr = "MBX"; 422 qtypestr = "MBX";
@@ -433,7 +429,7 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
433 } else 429 } else
434 return; 430 return;
435 431
436 if (qtype == DUMP_FCP || qtype == DUMP_NVME) 432 if (qtype == DUMP_IO)
437 pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n", 433 pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n",
438 qtypestr, wqidx, wq->queue_id); 434 qtypestr, wqidx, wq->queue_id);
439 else 435 else
@@ -459,17 +455,13 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
459 char *qtypestr; 455 char *qtypestr;
460 int eqidx; 456 int eqidx;
461 457
462 /* fcp/nvme wq and cq are 1:1, thus same indexes */ 458 /* io wq and cq are 1:1, thus same indexes */
463 eq = NULL; 459 eq = NULL;
464 460
465 if (qtype == DUMP_FCP) { 461 if (qtype == DUMP_IO) {
466 wq = phba->sli4_hba.hdwq[wqidx].fcp_wq; 462 wq = phba->sli4_hba.hdwq[wqidx].io_wq;
467 cq = phba->sli4_hba.hdwq[wqidx].fcp_cq; 463 cq = phba->sli4_hba.hdwq[wqidx].io_cq;
468 qtypestr = "FCP"; 464 qtypestr = "IO";
469 } else if (qtype == DUMP_NVME) {
470 wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
471 cq = phba->sli4_hba.hdwq[wqidx].nvme_cq;
472 qtypestr = "NVME";
473 } else if (qtype == DUMP_MBX) { 465 } else if (qtype == DUMP_MBX) {
474 wq = phba->sli4_hba.mbx_wq; 466 wq = phba->sli4_hba.mbx_wq;
475 cq = phba->sli4_hba.mbx_cq; 467 cq = phba->sli4_hba.mbx_cq;
@@ -496,7 +488,7 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
496 eq = phba->sli4_hba.hdwq[0].hba_eq; 488 eq = phba->sli4_hba.hdwq[0].hba_eq;
497 } 489 }
498 490
499 if (qtype == DUMP_FCP || qtype == DUMP_NVME) 491 if (qtype == DUMP_IO)
500 pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]" 492 pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
501 "->EQ[Idx:%d|Qid:%d]:\n", 493 "->EQ[Idx:%d|Qid:%d]:\n",
502 qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id, 494 qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id,
@@ -572,20 +564,11 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
572 int wq_idx; 564 int wq_idx;
573 565
574 for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++) 566 for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
575 if (phba->sli4_hba.hdwq[wq_idx].fcp_wq->queue_id == qid) 567 if (phba->sli4_hba.hdwq[wq_idx].io_wq->queue_id == qid)
576 break; 568 break;
577 if (wq_idx < phba->cfg_hdw_queue) { 569 if (wq_idx < phba->cfg_hdw_queue) {
578 pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); 570 pr_err("IO WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
579 lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].fcp_wq); 571 lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].io_wq);
580 return;
581 }
582
583 for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
584 if (phba->sli4_hba.hdwq[wq_idx].nvme_wq->queue_id == qid)
585 break;
586 if (wq_idx < phba->cfg_hdw_queue) {
587 pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
588 lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].nvme_wq);
589 return; 572 return;
590 } 573 }
591 574
@@ -654,22 +637,12 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
654 int cq_idx; 637 int cq_idx;
655 638
656 for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++) 639 for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
657 if (phba->sli4_hba.hdwq[cq_idx].fcp_cq->queue_id == qid) 640 if (phba->sli4_hba.hdwq[cq_idx].io_cq->queue_id == qid)
658 break;
659
660 if (cq_idx < phba->cfg_hdw_queue) {
661 pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
662 lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].fcp_cq);
663 return;
664 }
665
666 for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
667 if (phba->sli4_hba.hdwq[cq_idx].nvme_cq->queue_id == qid)
668 break; 641 break;
669 642
670 if (cq_idx < phba->cfg_hdw_queue) { 643 if (cq_idx < phba->cfg_hdw_queue) {
671 pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); 644 pr_err("IO CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
672 lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].nvme_cq); 645 lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].io_cq);
673 return; 646 return;
674 } 647 }
675 648
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 1c89c9f314fa..482e4a888dae 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -112,6 +112,8 @@ struct lpfc_nodelist {
112 uint8_t nlp_retry; /* used for ELS retries */ 112 uint8_t nlp_retry; /* used for ELS retries */
113 uint8_t nlp_fcp_info; /* class info, bits 0-3 */ 113 uint8_t nlp_fcp_info; /* class info, bits 0-3 */
114#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ 114#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
115 u8 nlp_nvme_info; /* NVME NSLER Support */
116#define NLP_NVME_NSLER 0x1 /* NVME NSLER device */
115 117
116 uint16_t nlp_usg_map; /* ndlp management usage bitmap */ 118 uint16_t nlp_usg_map; /* ndlp management usage bitmap */
117#define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */ 119#define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */
@@ -157,6 +159,7 @@ struct lpfc_node_rrq {
157/* Defines for nlp_flag (uint32) */ 159/* Defines for nlp_flag (uint32) */
158#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ 160#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */
159#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */ 161#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */
162#define NLP_RELEASE_RPI 0x00000004 /* Release RPI to free pool */
160#define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */ 163#define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */
161#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ 164#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */
162#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ 165#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f12780f4cfbb..d5303994bfd6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1052,17 +1052,18 @@ stop_rr_fcf_flogi:
1052 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1052 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
1053 goto out; 1053 goto out;
1054 1054
1055 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
1056 "0150 FLOGI failure Status:x%x/x%x "
1057 "xri x%x TMO:x%x\n",
1058 irsp->ulpStatus, irsp->un.ulpWord[4],
1059 cmdiocb->sli4_xritag, irsp->ulpTimeout);
1060
1055 /* If this is not a loop open failure, bail out */ 1061 /* If this is not a loop open failure, bail out */
1056 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1062 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
1057 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1063 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1058 IOERR_LOOP_OPEN_FAILURE))) 1064 IOERR_LOOP_OPEN_FAILURE)))
1059 goto flogifail; 1065 goto flogifail;
1060 1066
1061 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
1062 "0150 FLOGI failure Status:x%x/x%x xri x%x TMO:x%x\n",
1063 irsp->ulpStatus, irsp->un.ulpWord[4],
1064 cmdiocb->sli4_xritag, irsp->ulpTimeout);
1065
1066 /* FLOGI failed, so there is no fabric */ 1067 /* FLOGI failed, so there is no fabric */
1067 spin_lock_irq(shost->host_lock); 1068 spin_lock_irq(shost->host_lock);
1068 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1069 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -1207,6 +1208,39 @@ out:
1207} 1208}
1208 1209
1209/** 1210/**
1211 * lpfc_cmpl_els_link_down - Completion callback function for ELS command
1212 * aborted during a link down
1213 * @phba: pointer to lpfc hba data structure.
1214 * @cmdiocb: pointer to lpfc command iocb data structure.
1215 * @rspiocb: pointer to lpfc response iocb data structure.
1216 *
1217 */
1218static void
1219lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1220 struct lpfc_iocbq *rspiocb)
1221{
1222 IOCB_t *irsp;
1223 uint32_t *pcmd;
1224 uint32_t cmd;
1225
1226 pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
1227 cmd = *pcmd;
1228 irsp = &rspiocb->iocb;
1229
1230 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1231 "6445 ELS completes after LINK_DOWN: "
1232 " Status %x/%x cmd x%x flg x%x\n",
1233 irsp->ulpStatus, irsp->un.ulpWord[4], cmd,
1234 cmdiocb->iocb_flag);
1235
1236 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) {
1237 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
1238 atomic_dec(&phba->fabric_iocb_count);
1239 }
1240 lpfc_els_free_iocb(phba, cmdiocb);
1241}
1242
1243/**
1210 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1244 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
1211 * @vport: pointer to a host virtual N_Port data structure. 1245 * @vport: pointer to a host virtual N_Port data structure.
1212 * @ndlp: pointer to a node-list data structure. 1246 * @ndlp: pointer to a node-list data structure.
@@ -2107,7 +2141,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
2107 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2141 !(vport->fc_flag & FC_OFFLINE_MODE)) {
2108 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2142 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2109 "4110 Issue PLOGI x%x deferred " 2143 "4110 Issue PLOGI x%x deferred "
2110 "on NPort x%x rpi x%x Data: %p\n", 2144 "on NPort x%x rpi x%x Data: x%px\n",
2111 ndlp->nlp_defer_did, ndlp->nlp_DID, 2145 ndlp->nlp_defer_did, ndlp->nlp_DID,
2112 ndlp->nlp_rpi, ndlp); 2146 ndlp->nlp_rpi, ndlp);
2113 2147
@@ -2401,6 +2435,10 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2401 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2435 npr_nvme = (struct lpfc_nvme_prli *)pcmd;
2402 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2436 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
2403 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2437 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
2438 if (phba->nsler) {
2439 bf_set(prli_nsler, npr_nvme, 1);
2440 bf_set(prli_conf, npr_nvme, 1);
2441 }
2404 2442
2405 /* Only initiators request first burst. */ 2443 /* Only initiators request first burst. */
2406 if ((phba->cfg_nvme_enable_fb) && 2444 if ((phba->cfg_nvme_enable_fb) &&
@@ -4203,7 +4241,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4203 mempool_free(pmb, phba->mbox_mem_pool); 4241 mempool_free(pmb, phba->mbox_mem_pool);
4204 if (ndlp) { 4242 if (ndlp) {
4205 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 4243 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4206 "0006 rpi%x DID:%x flg:%x %d map:%x %p\n", 4244 "0006 rpi%x DID:%x flg:%x %d map:%x x%px\n",
4207 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 4245 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
4208 kref_read(&ndlp->kref), 4246 kref_read(&ndlp->kref),
4209 ndlp->nlp_usg_map, ndlp); 4247 ndlp->nlp_usg_map, ndlp);
@@ -5634,16 +5672,16 @@ lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
5634 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 5672 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
5635 if (vport->fc_flag & FC_FABRIC) { 5673 if (vport->fc_flag & FC_FABRIC) {
5636 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 5674 memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
5637 sizeof(desc->port_names.wwnn)); 5675 sizeof(desc->port_names.wwnn));
5638 5676
5639 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 5677 memcpy(desc->port_names.wwpn, &vport->fabric_portname,
5640 sizeof(desc->port_names.wwpn)); 5678 sizeof(desc->port_names.wwpn));
5641 } else { /* Point to Point */ 5679 } else { /* Point to Point */
5642 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 5680 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
5643 sizeof(desc->port_names.wwnn)); 5681 sizeof(desc->port_names.wwnn));
5644 5682
5645 memcpy(desc->port_names.wwnn, &ndlp->nlp_portname, 5683 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname,
5646 sizeof(desc->port_names.wwpn)); 5684 sizeof(desc->port_names.wwpn));
5647 } 5685 }
5648 5686
5649 desc->length = cpu_to_be32(sizeof(desc->port_names)); 5687 desc->length = cpu_to_be32(sizeof(desc->port_names));
@@ -6327,7 +6365,11 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
6327 continue; 6365 continue;
6328 } 6366 }
6329 6367
6330 if (ndlp->nlp_fc4_type & NLP_FC4_NVME) 6368 /* Check to see if we need to NVME rescan this target
6369 * remoteport.
6370 */
6371 if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
6372 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
6331 lpfc_nvme_rescan_port(vport, ndlp); 6373 lpfc_nvme_rescan_port(vport, ndlp);
6332 6374
6333 lpfc_disc_state_machine(vport, ndlp, NULL, 6375 lpfc_disc_state_machine(vport, ndlp, NULL,
@@ -6441,7 +6483,11 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6441 *lp, vport->fc_flag, payload_len); 6483 *lp, vport->fc_flag, payload_len);
6442 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6484 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6443 6485
6444 if (ndlp->nlp_fc4_type & NLP_FC4_NVME) 6486 /* Check to see if we need to NVME rescan this target
6487 * remoteport.
6488 */
6489 if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
6490 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
6445 lpfc_nvme_rescan_port(vport, ndlp); 6491 lpfc_nvme_rescan_port(vport, ndlp);
6446 return 0; 6492 return 0;
6447 } 6493 }
@@ -7960,18 +8006,40 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
7960 if (phba->sli_rev == LPFC_SLI_REV4) 8006 if (phba->sli_rev == LPFC_SLI_REV4)
7961 spin_lock(&pring->ring_lock); 8007 spin_lock(&pring->ring_lock);
7962 8008
8009 /* First we need to issue aborts to outstanding cmds on txcmpl */
7963 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 8010 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
7964 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 8011 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
7965 continue; 8012 continue;
7966 8013
7967 if (piocb->vport != vport) 8014 if (piocb->vport != vport)
7968 continue; 8015 continue;
7969 list_add_tail(&piocb->dlist, &abort_list); 8016
8017 /* On the ELS ring we can have ELS_REQUESTs or
8018 * GEN_REQUESTs waiting for a response.
8019 */
8020 cmd = &piocb->iocb;
8021 if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
8022 list_add_tail(&piocb->dlist, &abort_list);
8023
8024 /* If the link is down when flushing ELS commands
8025 * the firmware will not complete them till after
8026 * the link comes back up. This may confuse
8027 * discovery for the new link up, so we need to
8028 * change the compl routine to just clean up the iocb
8029 * and avoid any retry logic.
8030 */
8031 if (phba->link_state == LPFC_LINK_DOWN)
8032 piocb->iocb_cmpl = lpfc_cmpl_els_link_down;
8033 }
8034 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR)
8035 list_add_tail(&piocb->dlist, &abort_list);
7970 } 8036 }
8037
7971 if (phba->sli_rev == LPFC_SLI_REV4) 8038 if (phba->sli_rev == LPFC_SLI_REV4)
7972 spin_unlock(&pring->ring_lock); 8039 spin_unlock(&pring->ring_lock);
7973 spin_unlock_irq(&phba->hbalock); 8040 spin_unlock_irq(&phba->hbalock);
7974 /* Abort each iocb on the aborted list and remove the dlist links. */ 8041
8042 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */
7975 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 8043 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
7976 spin_lock_irq(&phba->hbalock); 8044 spin_lock_irq(&phba->hbalock);
7977 list_del_init(&piocb->dlist); 8045 list_del_init(&piocb->dlist);
@@ -7987,6 +8055,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
7987 if (phba->sli_rev == LPFC_SLI_REV4) 8055 if (phba->sli_rev == LPFC_SLI_REV4)
7988 spin_lock(&pring->ring_lock); 8056 spin_lock(&pring->ring_lock);
7989 8057
8058 /* No need to abort the txq list,
8059 * just queue them up for lpfc_sli_cancel_iocbs
8060 */
7990 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 8061 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
7991 cmd = &piocb->iocb; 8062 cmd = &piocb->iocb;
7992 8063
@@ -8007,11 +8078,22 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
8007 list_del_init(&piocb->list); 8078 list_del_init(&piocb->list);
8008 list_add_tail(&piocb->list, &abort_list); 8079 list_add_tail(&piocb->list, &abort_list);
8009 } 8080 }
8081
8082 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */
8083 if (vport == phba->pport) {
8084 list_for_each_entry_safe(piocb, tmp_iocb,
8085 &phba->fabric_iocb_list, list) {
8086 cmd = &piocb->iocb;
8087 list_del_init(&piocb->list);
8088 list_add_tail(&piocb->list, &abort_list);
8089 }
8090 }
8091
8010 if (phba->sli_rev == LPFC_SLI_REV4) 8092 if (phba->sli_rev == LPFC_SLI_REV4)
8011 spin_unlock(&pring->ring_lock); 8093 spin_unlock(&pring->ring_lock);
8012 spin_unlock_irq(&phba->hbalock); 8094 spin_unlock_irq(&phba->hbalock);
8013 8095
8014 /* Cancell all the IOCBs from the completions list */ 8096 /* Cancel all the IOCBs from the completions list */
8015 lpfc_sli_cancel_iocbs(phba, &abort_list, 8097 lpfc_sli_cancel_iocbs(phba, &abort_list,
8016 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 8098 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
8017 8099
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 28ecaa7fc715..749286acdc17 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -118,6 +118,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
118 struct lpfc_work_evt *evtp; 118 struct lpfc_work_evt *evtp;
119 int put_node; 119 int put_node;
120 int put_rport; 120 int put_rport;
121 unsigned long iflags;
121 122
122 rdata = rport->dd_data; 123 rdata = rport->dd_data;
123 ndlp = rdata->pnode; 124 ndlp = rdata->pnode;
@@ -132,7 +133,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
132 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); 133 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
133 134
134 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 135 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
135 "3181 dev_loss_callbk x%06x, rport %p flg x%x\n", 136 "3181 dev_loss_callbk x%06x, rport x%px flg x%x\n",
136 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag); 137 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
137 138
138 /* Don't defer this if we are in the process of deleting the vport 139 /* Don't defer this if we are in the process of deleting the vport
@@ -170,22 +171,22 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
170 } 171 }
171 172
172 shost = lpfc_shost_from_vport(vport); 173 shost = lpfc_shost_from_vport(vport);
173 spin_lock_irq(shost->host_lock); 174 spin_lock_irqsave(shost->host_lock, iflags);
174 ndlp->nlp_flag |= NLP_IN_DEV_LOSS; 175 ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
175 spin_unlock_irq(shost->host_lock); 176 spin_unlock_irqrestore(shost->host_lock, iflags);
176 177
177 /* We need to hold the node by incrementing the reference 178 /* We need to hold the node by incrementing the reference
178 * count until this queued work is done 179 * count until this queued work is done
179 */ 180 */
180 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 181 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
181 182
182 spin_lock_irq(&phba->hbalock); 183 spin_lock_irqsave(&phba->hbalock, iflags);
183 if (evtp->evt_arg1) { 184 if (evtp->evt_arg1) {
184 evtp->evt = LPFC_EVT_DEV_LOSS; 185 evtp->evt = LPFC_EVT_DEV_LOSS;
185 list_add_tail(&evtp->evt_listp, &phba->work_list); 186 list_add_tail(&evtp->evt_listp, &phba->work_list);
186 lpfc_worker_wake_up(phba); 187 lpfc_worker_wake_up(phba);
187 } 188 }
188 spin_unlock_irq(&phba->hbalock); 189 spin_unlock_irqrestore(&phba->hbalock, iflags);
189 190
190 return; 191 return;
191} 192}
@@ -212,14 +213,15 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
212 int put_node; 213 int put_node;
213 int warn_on = 0; 214 int warn_on = 0;
214 int fcf_inuse = 0; 215 int fcf_inuse = 0;
216 unsigned long iflags;
215 217
216 rport = ndlp->rport; 218 rport = ndlp->rport;
217 vport = ndlp->vport; 219 vport = ndlp->vport;
218 shost = lpfc_shost_from_vport(vport); 220 shost = lpfc_shost_from_vport(vport);
219 221
220 spin_lock_irq(shost->host_lock); 222 spin_lock_irqsave(shost->host_lock, iflags);
221 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; 223 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
222 spin_unlock_irq(shost->host_lock); 224 spin_unlock_irqrestore(shost->host_lock, iflags);
223 225
224 if (!rport) 226 if (!rport)
225 return fcf_inuse; 227 return fcf_inuse;
@@ -235,7 +237,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
235 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); 237 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
236 238
237 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 239 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
238 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n", 240 "3182 dev_loss_tmo_handler x%06x, rport x%px flg x%x\n",
239 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag); 241 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
240 242
241 /* 243 /*
@@ -903,6 +905,8 @@ lpfc_linkdown(struct lpfc_hba *phba)
903 phba->trunk_link.link1.state = 0; 905 phba->trunk_link.link1.state = 0;
904 phba->trunk_link.link2.state = 0; 906 phba->trunk_link.link2.state = 0;
905 phba->trunk_link.link3.state = 0; 907 phba->trunk_link.link3.state = 0;
908 phba->sli4_hba.link_state.logical_speed =
909 LPFC_LINK_SPEED_UNKNOWN;
906 } 910 }
907 spin_lock_irq(shost->host_lock); 911 spin_lock_irq(shost->host_lock);
908 phba->pport->fc_flag &= ~FC_LBIT; 912 phba->pport->fc_flag &= ~FC_LBIT;
@@ -3115,8 +3119,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3115 int rc; 3119 int rc;
3116 struct fcf_record *fcf_record; 3120 struct fcf_record *fcf_record;
3117 uint32_t fc_flags = 0; 3121 uint32_t fc_flags = 0;
3122 unsigned long iflags;
3118 3123
3119 spin_lock_irq(&phba->hbalock); 3124 spin_lock_irqsave(&phba->hbalock, iflags);
3120 phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); 3125 phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
3121 3126
3122 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3127 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
@@ -3213,12 +3218,12 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3213 vport->fc_myDID = phba->fc_pref_DID; 3218 vport->fc_myDID = phba->fc_pref_DID;
3214 fc_flags |= FC_LBIT; 3219 fc_flags |= FC_LBIT;
3215 } 3220 }
3216 spin_unlock_irq(&phba->hbalock); 3221 spin_unlock_irqrestore(&phba->hbalock, iflags);
3217 3222
3218 if (fc_flags) { 3223 if (fc_flags) {
3219 spin_lock_irq(shost->host_lock); 3224 spin_lock_irqsave(shost->host_lock, iflags);
3220 vport->fc_flag |= fc_flags; 3225 vport->fc_flag |= fc_flags;
3221 spin_unlock_irq(shost->host_lock); 3226 spin_unlock_irqrestore(shost->host_lock, iflags);
3222 } 3227 }
3223 3228
3224 lpfc_linkup(phba); 3229 lpfc_linkup(phba);
@@ -3292,22 +3297,22 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3292 * The driver is expected to do FIP/FCF. Call the port 3297 * The driver is expected to do FIP/FCF. Call the port
3293 * and get the FCF Table. 3298 * and get the FCF Table.
3294 */ 3299 */
3295 spin_lock_irq(&phba->hbalock); 3300 spin_lock_irqsave(&phba->hbalock, iflags);
3296 if (phba->hba_flag & FCF_TS_INPROG) { 3301 if (phba->hba_flag & FCF_TS_INPROG) {
3297 spin_unlock_irq(&phba->hbalock); 3302 spin_unlock_irqrestore(&phba->hbalock, iflags);
3298 return; 3303 return;
3299 } 3304 }
3300 /* This is the initial FCF discovery scan */ 3305 /* This is the initial FCF discovery scan */
3301 phba->fcf.fcf_flag |= FCF_INIT_DISC; 3306 phba->fcf.fcf_flag |= FCF_INIT_DISC;
3302 spin_unlock_irq(&phba->hbalock); 3307 spin_unlock_irqrestore(&phba->hbalock, iflags);
3303 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3308 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3304 "2778 Start FCF table scan at linkup\n"); 3309 "2778 Start FCF table scan at linkup\n");
3305 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3310 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3306 LPFC_FCOE_FCF_GET_FIRST); 3311 LPFC_FCOE_FCF_GET_FIRST);
3307 if (rc) { 3312 if (rc) {
3308 spin_lock_irq(&phba->hbalock); 3313 spin_lock_irqsave(&phba->hbalock, iflags);
3309 phba->fcf.fcf_flag &= ~FCF_INIT_DISC; 3314 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
3310 spin_unlock_irq(&phba->hbalock); 3315 spin_unlock_irqrestore(&phba->hbalock, iflags);
3311 goto out; 3316 goto out;
3312 } 3317 }
3313 /* Reset FCF roundrobin bmask for new discovery */ 3318 /* Reset FCF roundrobin bmask for new discovery */
@@ -3318,7 +3323,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3318out: 3323out:
3319 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3324 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3320 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 3325 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
3321 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n", 3326 "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
3322 vport->port_state, sparam_mbox, cfglink_mbox); 3327 vport->port_state, sparam_mbox, cfglink_mbox);
3323 lpfc_issue_clear_la(phba, vport); 3328 lpfc_issue_clear_la(phba, vport);
3324 return; 3329 return;
@@ -3366,6 +3371,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3366 MAILBOX_t *mb = &pmb->u.mb; 3371 MAILBOX_t *mb = &pmb->u.mb;
3367 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 3372 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3368 uint8_t attn_type; 3373 uint8_t attn_type;
3374 unsigned long iflags;
3369 3375
3370 /* Unblock ELS traffic */ 3376 /* Unblock ELS traffic */
3371 pring = lpfc_phba_elsring(phba); 3377 pring = lpfc_phba_elsring(phba);
@@ -3387,12 +3393,12 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3387 3393
3388 memcpy(&phba->alpa_map[0], mp->virt, 128); 3394 memcpy(&phba->alpa_map[0], mp->virt, 128);
3389 3395
3390 spin_lock_irq(shost->host_lock); 3396 spin_lock_irqsave(shost->host_lock, iflags);
3391 if (bf_get(lpfc_mbx_read_top_pb, la)) 3397 if (bf_get(lpfc_mbx_read_top_pb, la))
3392 vport->fc_flag |= FC_BYPASSED_MODE; 3398 vport->fc_flag |= FC_BYPASSED_MODE;
3393 else 3399 else
3394 vport->fc_flag &= ~FC_BYPASSED_MODE; 3400 vport->fc_flag &= ~FC_BYPASSED_MODE;
3395 spin_unlock_irq(shost->host_lock); 3401 spin_unlock_irqrestore(shost->host_lock, iflags);
3396 3402
3397 if (phba->fc_eventTag <= la->eventTag) { 3403 if (phba->fc_eventTag <= la->eventTag) {
3398 phba->fc_stat.LinkMultiEvent++; 3404 phba->fc_stat.LinkMultiEvent++;
@@ -3403,12 +3409,12 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3403 3409
3404 phba->fc_eventTag = la->eventTag; 3410 phba->fc_eventTag = la->eventTag;
3405 if (phba->sli_rev < LPFC_SLI_REV4) { 3411 if (phba->sli_rev < LPFC_SLI_REV4) {
3406 spin_lock_irq(&phba->hbalock); 3412 spin_lock_irqsave(&phba->hbalock, iflags);
3407 if (bf_get(lpfc_mbx_read_top_mm, la)) 3413 if (bf_get(lpfc_mbx_read_top_mm, la))
3408 phba->sli.sli_flag |= LPFC_MENLO_MAINT; 3414 phba->sli.sli_flag |= LPFC_MENLO_MAINT;
3409 else 3415 else
3410 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; 3416 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
3411 spin_unlock_irq(&phba->hbalock); 3417 spin_unlock_irqrestore(&phba->hbalock, iflags);
3412 } 3418 }
3413 3419
3414 phba->link_events++; 3420 phba->link_events++;
@@ -3529,7 +3535,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3529 pmb->ctx_ndlp = NULL; 3535 pmb->ctx_ndlp = NULL;
3530 3536
3531 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 3537 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3532 "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n", 3538 "0002 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
3533 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 3539 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3534 kref_read(&ndlp->kref), 3540 kref_read(&ndlp->kref),
3535 ndlp->nlp_usg_map, ndlp); 3541 ndlp->nlp_usg_map, ndlp);
@@ -4041,7 +4047,7 @@ out:
4041 ndlp->nlp_type |= NLP_FABRIC; 4047 ndlp->nlp_type |= NLP_FABRIC;
4042 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 4048 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4043 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 4049 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
4044 "0003 rpi:%x DID:%x flg:%x %d map%x %p\n", 4050 "0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
4045 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 4051 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
4046 kref_read(&ndlp->kref), 4052 kref_read(&ndlp->kref),
4047 ndlp->nlp_usg_map, ndlp); 4053 ndlp->nlp_usg_map, ndlp);
@@ -4160,7 +4166,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4160 fc_remote_port_rolechg(rport, rport_ids.roles); 4166 fc_remote_port_rolechg(rport, rport_ids.roles);
4161 4167
4162 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 4168 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4163 "3183 rport register x%06x, rport %p role x%x\n", 4169 "3183 rport register x%06x, rport x%px role x%x\n",
4164 ndlp->nlp_DID, rport, rport_ids.roles); 4170 ndlp->nlp_DID, rport, rport_ids.roles);
4165 4171
4166 if ((rport->scsi_target_id != -1) && 4172 if ((rport->scsi_target_id != -1) &&
@@ -4184,7 +4190,7 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
4184 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 4190 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4185 4191
4186 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4192 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4187 "3184 rport unregister x%06x, rport %p\n", 4193 "3184 rport unregister x%06x, rport x%px\n",
4188 ndlp->nlp_DID, rport); 4194 ndlp->nlp_DID, rport);
4189 4195
4190 fc_remote_port_delete(rport); 4196 fc_remote_port_delete(rport);
@@ -4196,8 +4202,9 @@ static void
4196lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) 4202lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
4197{ 4203{
4198 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4204 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4205 unsigned long iflags;
4199 4206
4200 spin_lock_irq(shost->host_lock); 4207 spin_lock_irqsave(shost->host_lock, iflags);
4201 switch (state) { 4208 switch (state) {
4202 case NLP_STE_UNUSED_NODE: 4209 case NLP_STE_UNUSED_NODE:
4203 vport->fc_unused_cnt += count; 4210 vport->fc_unused_cnt += count;
@@ -4227,7 +4234,7 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
4227 vport->fc_npr_cnt += count; 4234 vport->fc_npr_cnt += count;
4228 break; 4235 break;
4229 } 4236 }
4230 spin_unlock_irq(shost->host_lock); 4237 spin_unlock_irqrestore(shost->host_lock, iflags);
4231} 4238}
4232 4239
4233static void 4240static void
@@ -4480,9 +4487,21 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4480 return NULL; 4487 return NULL;
4481 4488
4482 if (phba->sli_rev == LPFC_SLI_REV4) { 4489 if (phba->sli_rev == LPFC_SLI_REV4) {
4483 rpi = lpfc_sli4_alloc_rpi(vport->phba); 4490 if (ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)
4484 if (rpi == LPFC_RPI_ALLOC_ERROR) 4491 rpi = lpfc_sli4_alloc_rpi(vport->phba);
4492 else
4493 rpi = ndlp->nlp_rpi;
4494
4495 if (rpi == LPFC_RPI_ALLOC_ERROR) {
4496 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4497 "0359 %s: ndlp:x%px "
4498 "usgmap:x%x refcnt:%d FAILED RPI "
4499 " ALLOC\n",
4500 __func__,
4501 (void *)ndlp, ndlp->nlp_usg_map,
4502 kref_read(&ndlp->kref));
4485 return NULL; 4503 return NULL;
4504 }
4486 } 4505 }
4487 4506
4488 spin_lock_irqsave(&phba->ndlp_lock, flags); 4507 spin_lock_irqsave(&phba->ndlp_lock, flags);
@@ -4490,9 +4509,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4490 if (NLP_CHK_FREE_REQ(ndlp)) { 4509 if (NLP_CHK_FREE_REQ(ndlp)) {
4491 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 4510 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4492 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 4511 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4493 "0277 lpfc_enable_node: ndlp:x%p " 4512 "0277 %s: ndlp:x%px "
4494 "usgmap:x%x refcnt:%d\n", 4513 "usgmap:x%x refcnt:%d\n",
4495 (void *)ndlp, ndlp->nlp_usg_map, 4514 __func__, (void *)ndlp, ndlp->nlp_usg_map,
4496 kref_read(&ndlp->kref)); 4515 kref_read(&ndlp->kref));
4497 goto free_rpi; 4516 goto free_rpi;
4498 } 4517 }
@@ -4500,9 +4519,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4500 if (NLP_CHK_NODE_ACT(ndlp)) { 4519 if (NLP_CHK_NODE_ACT(ndlp)) {
4501 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 4520 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4502 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 4521 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4503 "0278 lpfc_enable_node: ndlp:x%p " 4522 "0278 %s: ndlp:x%px "
4504 "usgmap:x%x refcnt:%d\n", 4523 "usgmap:x%x refcnt:%d\n",
4505 (void *)ndlp, ndlp->nlp_usg_map, 4524 __func__, (void *)ndlp, ndlp->nlp_usg_map,
4506 kref_read(&ndlp->kref)); 4525 kref_read(&ndlp->kref));
4507 goto free_rpi; 4526 goto free_rpi;
4508 } 4527 }
@@ -4532,7 +4551,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4532 ndlp->nlp_rpi = rpi; 4551 ndlp->nlp_rpi = rpi;
4533 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4552 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4534 "0008 rpi:%x DID:%x flg:%x refcnt:%d " 4553 "0008 rpi:%x DID:%x flg:%x refcnt:%d "
4535 "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, 4554 "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
4536 ndlp->nlp_flag, 4555 ndlp->nlp_flag,
4537 kref_read(&ndlp->kref), 4556 kref_read(&ndlp->kref),
4538 ndlp->nlp_usg_map, ndlp); 4557 ndlp->nlp_usg_map, ndlp);
@@ -4541,6 +4560,14 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4541 4560
4542 if (state != NLP_STE_UNUSED_NODE) 4561 if (state != NLP_STE_UNUSED_NODE)
4543 lpfc_nlp_set_state(vport, ndlp, state); 4562 lpfc_nlp_set_state(vport, ndlp, state);
4563 else
4564 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4565 "0013 rpi:%x DID:%x flg:%x refcnt:%d "
4566 "map:%x x%px STATE=UNUSED\n",
4567 ndlp->nlp_rpi, ndlp->nlp_DID,
4568 ndlp->nlp_flag,
4569 kref_read(&ndlp->kref),
4570 ndlp->nlp_usg_map, ndlp);
4544 4571
4545 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 4572 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4546 "node enable: did:x%x", 4573 "node enable: did:x%x",
@@ -4797,7 +4824,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4797 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { 4824 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
4798 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4825 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4799 "1434 UNREG cmpl deferred logo x%x " 4826 "1434 UNREG cmpl deferred logo x%x "
4800 "on NPort x%x Data: x%x %p\n", 4827 "on NPort x%x Data: x%x x%px\n",
4801 ndlp->nlp_rpi, ndlp->nlp_DID, 4828 ndlp->nlp_rpi, ndlp->nlp_DID,
4802 ndlp->nlp_defer_did, ndlp); 4829 ndlp->nlp_defer_did, ndlp);
4803 4830
@@ -4805,6 +4832,10 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4805 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 4832 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4806 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 4833 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
4807 } else { 4834 } else {
4835 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
4836 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
4837 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
4838 }
4808 ndlp->nlp_flag &= ~NLP_UNREG_INP; 4839 ndlp->nlp_flag &= ~NLP_UNREG_INP;
4809 } 4840 }
4810} 4841}
@@ -4843,7 +4874,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4843 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4874 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4844 "1436 unreg_rpi SKIP UNREG x%x on " 4875 "1436 unreg_rpi SKIP UNREG x%x on "
4845 "NPort x%x deferred x%x flg x%x " 4876 "NPort x%x deferred x%x flg x%x "
4846 "Data: %p\n", 4877 "Data: x%px\n",
4847 ndlp->nlp_rpi, ndlp->nlp_DID, 4878 ndlp->nlp_rpi, ndlp->nlp_DID,
4848 ndlp->nlp_defer_did, 4879 ndlp->nlp_defer_did,
4849 ndlp->nlp_flag, ndlp); 4880 ndlp->nlp_flag, ndlp);
@@ -4893,7 +4924,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4893 4924
4894 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4925 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4895 "1433 unreg_rpi UNREG x%x on " 4926 "1433 unreg_rpi UNREG x%x on "
4896 "NPort x%x deferred flg x%x Data:%p\n", 4927 "NPort x%x deferred flg x%x "
4928 "Data:x%px\n",
4897 ndlp->nlp_rpi, ndlp->nlp_DID, 4929 ndlp->nlp_rpi, ndlp->nlp_DID,
4898 ndlp->nlp_flag, ndlp); 4930 ndlp->nlp_flag, ndlp);
4899 4931
@@ -5034,16 +5066,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5034 ndlp->nlp_state, ndlp->nlp_rpi); 5066 ndlp->nlp_state, ndlp->nlp_rpi);
5035 if (NLP_CHK_FREE_REQ(ndlp)) { 5067 if (NLP_CHK_FREE_REQ(ndlp)) {
5036 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 5068 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
5037 "0280 lpfc_cleanup_node: ndlp:x%p " 5069 "0280 %s: ndlp:x%px "
5038 "usgmap:x%x refcnt:%d\n", 5070 "usgmap:x%x refcnt:%d\n",
5039 (void *)ndlp, ndlp->nlp_usg_map, 5071 __func__, (void *)ndlp, ndlp->nlp_usg_map,
5040 kref_read(&ndlp->kref)); 5072 kref_read(&ndlp->kref));
5041 lpfc_dequeue_node(vport, ndlp); 5073 lpfc_dequeue_node(vport, ndlp);
5042 } else { 5074 } else {
5043 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 5075 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
5044 "0281 lpfc_cleanup_node: ndlp:x%p " 5076 "0281 %s: ndlp:x%px "
5045 "usgmap:x%x refcnt:%d\n", 5077 "usgmap:x%x refcnt:%d\n",
5046 (void *)ndlp, ndlp->nlp_usg_map, 5078 __func__, (void *)ndlp, ndlp->nlp_usg_map,
5047 kref_read(&ndlp->kref)); 5079 kref_read(&ndlp->kref));
5048 lpfc_disable_node(vport, ndlp); 5080 lpfc_disable_node(vport, ndlp);
5049 } 5081 }
@@ -5104,6 +5136,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5104 list_del_init(&ndlp->els_retry_evt.evt_listp); 5136 list_del_init(&ndlp->els_retry_evt.evt_listp);
5105 list_del_init(&ndlp->dev_loss_evt.evt_listp); 5137 list_del_init(&ndlp->dev_loss_evt.evt_listp);
5106 lpfc_cleanup_vports_rrqs(vport, ndlp); 5138 lpfc_cleanup_vports_rrqs(vport, ndlp);
5139 if (phba->sli_rev == LPFC_SLI_REV4)
5140 ndlp->nlp_flag |= NLP_RELEASE_RPI;
5107 lpfc_unreg_rpi(vport, ndlp); 5141 lpfc_unreg_rpi(vport, ndlp);
5108 5142
5109 return 0; 5143 return 0;
@@ -5132,7 +5166,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5132 * allocated by the firmware. 5166 * allocated by the firmware.
5133 */ 5167 */
5134 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 5168 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5135 "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n", 5169 "0005 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
5136 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 5170 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
5137 kref_read(&ndlp->kref), 5171 kref_read(&ndlp->kref),
5138 ndlp->nlp_usg_map, ndlp); 5172 ndlp->nlp_usg_map, ndlp);
@@ -5168,8 +5202,8 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5168 * for registered rport so need to cleanup rport 5202 * for registered rport so need to cleanup rport
5169 */ 5203 */
5170 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 5204 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
5171 "0940 removed node x%p DID x%x " 5205 "0940 removed node x%px DID x%x "
5172 " rport not null %p\n", 5206 " rport not null x%px\n",
5173 ndlp, ndlp->nlp_DID, ndlp->rport); 5207 ndlp, ndlp->nlp_DID, ndlp->rport);
5174 rport = ndlp->rport; 5208 rport = ndlp->rport;
5175 rdata = rport->dd_data; 5209 rdata = rport->dd_data;
@@ -5243,15 +5277,15 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5243 5277
5244 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 5278 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5245 if (lpfc_matchdid(vport, ndlp, did)) { 5279 if (lpfc_matchdid(vport, ndlp, did)) {
5246 data1 = (((uint32_t) ndlp->nlp_state << 24) | 5280 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5247 ((uint32_t) ndlp->nlp_xri << 16) | 5281 ((uint32_t)ndlp->nlp_xri << 16) |
5248 ((uint32_t) ndlp->nlp_type << 8) | 5282 ((uint32_t)ndlp->nlp_type << 8) |
5249 ((uint32_t) ndlp->nlp_rpi & 0xff)); 5283 ((uint32_t)ndlp->nlp_usg_map & 0xff));
5250 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 5284 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5251 "0929 FIND node DID " 5285 "0929 FIND node DID "
5252 "Data: x%p x%x x%x x%x %p\n", 5286 "Data: x%px x%x x%x x%x x%x x%px\n",
5253 ndlp, ndlp->nlp_DID, 5287 ndlp, ndlp->nlp_DID,
5254 ndlp->nlp_flag, data1, 5288 ndlp->nlp_flag, data1, ndlp->nlp_rpi,
5255 ndlp->active_rrqs_xri_bitmap); 5289 ndlp->active_rrqs_xri_bitmap);
5256 return ndlp; 5290 return ndlp;
5257 } 5291 }
@@ -5296,7 +5330,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
5296 spin_unlock_irqrestore(shost->host_lock, iflags); 5330 spin_unlock_irqrestore(shost->host_lock, iflags);
5297 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 5331 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5298 "2025 FIND node DID " 5332 "2025 FIND node DID "
5299 "Data: x%p x%x x%x x%x %p\n", 5333 "Data: x%px x%x x%x x%x x%px\n",
5300 ndlp, ndlp->nlp_DID, 5334 ndlp, ndlp->nlp_DID,
5301 ndlp->nlp_flag, data1, 5335 ndlp->nlp_flag, data1,
5302 ndlp->active_rrqs_xri_bitmap); 5336 ndlp->active_rrqs_xri_bitmap);
@@ -5336,8 +5370,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
5336 if (vport->phba->nvmet_support) 5370 if (vport->phba->nvmet_support)
5337 return NULL; 5371 return NULL;
5338 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); 5372 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
5339 if (!ndlp) 5373 if (!ndlp) {
5374 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
5375 "0014 Could not enable ndlp\n");
5340 return NULL; 5376 return NULL;
5377 }
5341 spin_lock_irq(shost->host_lock); 5378 spin_lock_irq(shost->host_lock);
5342 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 5379 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5343 spin_unlock_irq(shost->host_lock); 5380 spin_unlock_irq(shost->host_lock);
@@ -5960,7 +5997,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5960 ndlp->nlp_type |= NLP_FABRIC; 5997 ndlp->nlp_type |= NLP_FABRIC;
5961 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 5998 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
5962 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 5999 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
5963 "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n", 6000 "0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
5964 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 6001 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
5965 kref_read(&ndlp->kref), 6002 kref_read(&ndlp->kref),
5966 ndlp->nlp_usg_map, ndlp); 6003 ndlp->nlp_usg_map, ndlp);
@@ -6014,8 +6051,8 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
6014 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 6051 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6015 if (filter(ndlp, param)) { 6052 if (filter(ndlp, param)) {
6016 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 6053 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6017 "3185 FIND node filter %p DID " 6054 "3185 FIND node filter %ps DID "
6018 "ndlp %p did x%x flg x%x st x%x " 6055 "ndlp x%px did x%x flg x%x st x%x "
6019 "xri x%x type x%x rpi x%x\n", 6056 "xri x%x type x%x rpi x%x\n",
6020 filter, ndlp, ndlp->nlp_DID, 6057 filter, ndlp, ndlp->nlp_DID,
6021 ndlp->nlp_flag, ndlp->nlp_state, 6058 ndlp->nlp_flag, ndlp->nlp_state,
@@ -6025,7 +6062,7 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
6025 } 6062 }
6026 } 6063 }
6027 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 6064 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6028 "3186 FIND node filter %p NOT FOUND.\n", filter); 6065 "3186 FIND node filter %ps NOT FOUND.\n", filter);
6029 return NULL; 6066 return NULL;
6030} 6067}
6031 6068
@@ -6065,10 +6102,11 @@ lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
6065{ 6102{
6066 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6103 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6067 struct lpfc_nodelist *ndlp; 6104 struct lpfc_nodelist *ndlp;
6105 unsigned long flags;
6068 6106
6069 spin_lock_irq(shost->host_lock); 6107 spin_lock_irqsave(shost->host_lock, flags);
6070 ndlp = __lpfc_findnode_rpi(vport, rpi); 6108 ndlp = __lpfc_findnode_rpi(vport, rpi);
6071 spin_unlock_irq(shost->host_lock); 6109 spin_unlock_irqrestore(shost->host_lock, flags);
6072 return ndlp; 6110 return ndlp;
6073} 6111}
6074 6112
@@ -6149,7 +6187,7 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
6149 ndlp->nlp_rpi = rpi; 6187 ndlp->nlp_rpi = rpi;
6150 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 6188 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6151 "0007 rpi:%x DID:%x flg:%x refcnt:%d " 6189 "0007 rpi:%x DID:%x flg:%x refcnt:%d "
6152 "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, 6190 "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
6153 ndlp->nlp_flag, 6191 ndlp->nlp_flag,
6154 kref_read(&ndlp->kref), 6192 kref_read(&ndlp->kref),
6155 ndlp->nlp_usg_map, ndlp); 6193 ndlp->nlp_usg_map, ndlp);
@@ -6187,8 +6225,9 @@ lpfc_nlp_release(struct kref *kref)
6187 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 6225 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
6188 6226
6189 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 6227 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
6190 "0279 lpfc_nlp_release: ndlp:x%p did %x " 6228 "0279 %s: ndlp:x%px did %x "
6191 "usgmap:x%x refcnt:%d rpi:%x\n", 6229 "usgmap:x%x refcnt:%d rpi:%x\n",
6230 __func__,
6192 (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map, 6231 (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
6193 kref_read(&ndlp->kref), ndlp->nlp_rpi); 6232 kref_read(&ndlp->kref), ndlp->nlp_rpi);
6194 6233
@@ -6200,8 +6239,6 @@ lpfc_nlp_release(struct kref *kref)
6200 spin_lock_irqsave(&phba->ndlp_lock, flags); 6239 spin_lock_irqsave(&phba->ndlp_lock, flags);
6201 NLP_CLR_NODE_ACT(ndlp); 6240 NLP_CLR_NODE_ACT(ndlp);
6202 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 6241 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6203 if (phba->sli_rev == LPFC_SLI_REV4)
6204 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
6205 6242
6206 /* free ndlp memory for final ndlp release */ 6243 /* free ndlp memory for final ndlp release */
6207 if (NLP_CHK_FREE_REQ(ndlp)) { 6244 if (NLP_CHK_FREE_REQ(ndlp)) {
@@ -6237,9 +6274,9 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
6237 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) { 6274 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
6238 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 6275 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6239 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 6276 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6240 "0276 lpfc_nlp_get: ndlp:x%p " 6277 "0276 %s: ndlp:x%px "
6241 "usgmap:x%x refcnt:%d\n", 6278 "usgmap:x%x refcnt:%d\n",
6242 (void *)ndlp, ndlp->nlp_usg_map, 6279 __func__, (void *)ndlp, ndlp->nlp_usg_map,
6243 kref_read(&ndlp->kref)); 6280 kref_read(&ndlp->kref));
6244 return NULL; 6281 return NULL;
6245 } else 6282 } else
@@ -6265,9 +6302,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
6265 return 1; 6302 return 1;
6266 6303
6267 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 6304 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6268 "node put: did:x%x flg:x%x refcnt:x%x", 6305 "node put: did:x%x flg:x%x refcnt:x%x",
6269 ndlp->nlp_DID, ndlp->nlp_flag, 6306 ndlp->nlp_DID, ndlp->nlp_flag,
6270 kref_read(&ndlp->kref)); 6307 kref_read(&ndlp->kref));
6271 phba = ndlp->phba; 6308 phba = ndlp->phba;
6272 spin_lock_irqsave(&phba->ndlp_lock, flags); 6309 spin_lock_irqsave(&phba->ndlp_lock, flags);
6273 /* Check the ndlp memory free acknowledge flag to avoid the 6310 /* Check the ndlp memory free acknowledge flag to avoid the
@@ -6277,9 +6314,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
6277 if (NLP_CHK_FREE_ACK(ndlp)) { 6314 if (NLP_CHK_FREE_ACK(ndlp)) {
6278 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 6315 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6279 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 6316 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6280 "0274 lpfc_nlp_put: ndlp:x%p " 6317 "0274 %s: ndlp:x%px "
6281 "usgmap:x%x refcnt:%d\n", 6318 "usgmap:x%x refcnt:%d\n",
6282 (void *)ndlp, ndlp->nlp_usg_map, 6319 __func__, (void *)ndlp, ndlp->nlp_usg_map,
6283 kref_read(&ndlp->kref)); 6320 kref_read(&ndlp->kref));
6284 return 1; 6321 return 1;
6285 } 6322 }
@@ -6290,9 +6327,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
6290 if (NLP_CHK_IACT_REQ(ndlp)) { 6327 if (NLP_CHK_IACT_REQ(ndlp)) {
6291 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 6328 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6292 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 6329 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6293 "0275 lpfc_nlp_put: ndlp:x%p " 6330 "0275 %s: ndlp:x%px "
6294 "usgmap:x%x refcnt:%d\n", 6331 "usgmap:x%x refcnt:%d\n",
6295 (void *)ndlp, ndlp->nlp_usg_map, 6332 __func__, (void *)ndlp, ndlp->nlp_usg_map,
6296 kref_read(&ndlp->kref)); 6333 kref_read(&ndlp->kref));
6297 return 1; 6334 return 1;
6298 } 6335 }
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 5b439a6dcde1..436cdc8c5ef4 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -843,7 +843,7 @@ typedef struct _ADISC { /* Structure is in Big Endian format */
843 struct lpfc_name portName; 843 struct lpfc_name portName;
844 struct lpfc_name nodeName; 844 struct lpfc_name nodeName;
845 uint32_t DID; 845 uint32_t DID;
846} ADISC; 846} __packed ADISC;
847 847
848typedef struct _FARP { /* Structure is in Big Endian format */ 848typedef struct _FARP { /* Structure is in Big Endian format */
849 uint32_t Mflags:8; 849 uint32_t Mflags:8;
@@ -873,7 +873,7 @@ typedef struct _FAN { /* Structure is in Big Endian format */
873 uint32_t Fdid; 873 uint32_t Fdid;
874 struct lpfc_name FportName; 874 struct lpfc_name FportName;
875 struct lpfc_name FnodeName; 875 struct lpfc_name FnodeName;
876} FAN; 876} __packed FAN;
877 877
878typedef struct _SCR { /* Structure is in Big Endian format */ 878typedef struct _SCR { /* Structure is in Big Endian format */
879 uint8_t resvd1; 879 uint8_t resvd1;
@@ -917,7 +917,7 @@ typedef struct _RNID { /* Structure is in Big Endian format */
917 union { 917 union {
918 RNID_TOP_DISC topologyDisc; /* topology disc (0xdf) */ 918 RNID_TOP_DISC topologyDisc; /* topology disc (0xdf) */
919 } un; 919 } un;
920} RNID; 920} __packed RNID;
921 921
922typedef struct _RPS { /* Structure is in Big Endian format */ 922typedef struct _RPS { /* Structure is in Big Endian format */
923 union { 923 union {
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 77f9a55a3f54..bd533475c86a 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -2050,6 +2050,23 @@ struct sli4_sge { /* SLI-4 */
2050 uint32_t sge_len; 2050 uint32_t sge_len;
2051}; 2051};
2052 2052
2053struct sli4_hybrid_sgl {
2054 struct list_head list_node;
2055 struct sli4_sge *dma_sgl;
2056 dma_addr_t dma_phys_sgl;
2057};
2058
2059struct fcp_cmd_rsp_buf {
2060 struct list_head list_node;
2061
2062 /* for storing cmd/rsp dma alloc'ed virt_addr */
2063 struct fcp_cmnd *fcp_cmnd;
2064 struct fcp_rsp *fcp_rsp;
2065
2066 /* for storing this cmd/rsp's dma mapped phys addr from per CPU pool */
2067 dma_addr_t fcp_cmd_rsp_dma_handle;
2068};
2069
2053struct sli4_sge_diseed { /* SLI-4 */ 2070struct sli4_sge_diseed { /* SLI-4 */
2054 uint32_t ref_tag; 2071 uint32_t ref_tag;
2055 uint32_t ref_tag_tran; 2072 uint32_t ref_tag_tran;
@@ -3449,6 +3466,9 @@ struct lpfc_sli4_parameters {
3449#define cfg_xib_SHIFT 4 3466#define cfg_xib_SHIFT 4
3450#define cfg_xib_MASK 0x00000001 3467#define cfg_xib_MASK 0x00000001
3451#define cfg_xib_WORD word19 3468#define cfg_xib_WORD word19
3469#define cfg_xpsgl_SHIFT 6
3470#define cfg_xpsgl_MASK 0x00000001
3471#define cfg_xpsgl_WORD word19
3452#define cfg_eqdr_SHIFT 8 3472#define cfg_eqdr_SHIFT 8
3453#define cfg_eqdr_MASK 0x00000001 3473#define cfg_eqdr_MASK 0x00000001
3454#define cfg_eqdr_WORD word19 3474#define cfg_eqdr_WORD word19
@@ -3460,6 +3480,10 @@ struct lpfc_sli4_parameters {
3460#define cfg_bv1s_MASK 0x00000001 3480#define cfg_bv1s_MASK 0x00000001
3461#define cfg_bv1s_WORD word19 3481#define cfg_bv1s_WORD word19
3462 3482
3483#define cfg_nsler_SHIFT 12
3484#define cfg_nsler_MASK 0x00000001
3485#define cfg_nsler_WORD word19
3486
3463 uint32_t word20; 3487 uint32_t word20;
3464#define cfg_max_tow_xri_SHIFT 0 3488#define cfg_max_tow_xri_SHIFT 0
3465#define cfg_max_tow_xri_MASK 0x0000ffff 3489#define cfg_max_tow_xri_MASK 0x0000ffff
@@ -4314,6 +4338,12 @@ struct wqe_common {
4314#define wqe_rcvoxid_SHIFT 16 4338#define wqe_rcvoxid_SHIFT 16
4315#define wqe_rcvoxid_MASK 0x0000FFFF 4339#define wqe_rcvoxid_MASK 0x0000FFFF
4316#define wqe_rcvoxid_WORD word9 4340#define wqe_rcvoxid_WORD word9
4341#define wqe_sof_SHIFT 24
4342#define wqe_sof_MASK 0x000000FF
4343#define wqe_sof_WORD word9
4344#define wqe_eof_SHIFT 16
4345#define wqe_eof_MASK 0x000000FF
4346#define wqe_eof_WORD word9
4317 uint32_t word10; 4347 uint32_t word10;
4318#define wqe_ebde_cnt_SHIFT 0 4348#define wqe_ebde_cnt_SHIFT 0
4319#define wqe_ebde_cnt_MASK 0x0000000f 4349#define wqe_ebde_cnt_MASK 0x0000000f
@@ -4595,6 +4625,7 @@ struct lpfc_nvme_prli {
4595#define prli_type_code_WORD word1 4625#define prli_type_code_WORD word1
4596 uint32_t word_rsvd2; 4626 uint32_t word_rsvd2;
4597 uint32_t word_rsvd3; 4627 uint32_t word_rsvd3;
4628
4598 uint32_t word4; 4629 uint32_t word4;
4599#define prli_fba_SHIFT 0 4630#define prli_fba_SHIFT 0
4600#define prli_fba_MASK 0x00000001 4631#define prli_fba_MASK 0x00000001
@@ -4611,6 +4642,9 @@ struct lpfc_nvme_prli {
4611#define prli_conf_SHIFT 7 4642#define prli_conf_SHIFT 7
4612#define prli_conf_MASK 0x00000001 4643#define prli_conf_MASK 0x00000001
4613#define prli_conf_WORD word4 4644#define prli_conf_WORD word4
4645#define prli_nsler_SHIFT 8
4646#define prli_nsler_MASK 0x00000001
4647#define prli_nsler_WORD word4
4614 uint32_t word5; 4648 uint32_t word5;
4615#define prli_fb_sz_SHIFT 0 4649#define prli_fb_sz_SHIFT 0
4616#define prli_fb_sz_MASK 0x0000ffff 4650#define prli_fb_sz_MASK 0x0000ffff
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 1ac98becb5ba..e91377a4cafe 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -39,6 +39,7 @@
39#include <linux/msi.h> 39#include <linux/msi.h>
40#include <linux/irq.h> 40#include <linux/irq.h>
41#include <linux/bitops.h> 41#include <linux/bitops.h>
42#include <linux/crash_dump.h>
42 43
43#include <scsi/scsi.h> 44#include <scsi/scsi.h>
44#include <scsi/scsi_device.h> 45#include <scsi/scsi_device.h>
@@ -65,12 +66,6 @@
65#include "lpfc_version.h" 66#include "lpfc_version.h"
66#include "lpfc_ids.h" 67#include "lpfc_ids.h"
67 68
68char *_dump_buf_data;
69unsigned long _dump_buf_data_order;
70char *_dump_buf_dif;
71unsigned long _dump_buf_dif_order;
72spinlock_t _dump_buf_lock;
73
74/* Used when mapping IRQ vectors in a driver centric manner */ 69/* Used when mapping IRQ vectors in a driver centric manner */
75static uint32_t lpfc_present_cpu; 70static uint32_t lpfc_present_cpu;
76 71
@@ -1081,8 +1076,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1081 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 1076 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1082 qp = &phba->sli4_hba.hdwq[idx]; 1077 qp = &phba->sli4_hba.hdwq[idx];
1083 1078
1084 spin_lock(&qp->abts_scsi_buf_list_lock); 1079 spin_lock(&qp->abts_io_buf_list_lock);
1085 list_splice_init(&qp->lpfc_abts_scsi_buf_list, 1080 list_splice_init(&qp->lpfc_abts_io_buf_list,
1086 &aborts); 1081 &aborts);
1087 1082
1088 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1083 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
@@ -1093,29 +1088,11 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1093 spin_lock(&qp->io_buf_list_put_lock); 1088 spin_lock(&qp->io_buf_list_put_lock);
1094 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); 1089 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1095 qp->put_io_bufs += qp->abts_scsi_io_bufs; 1090 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1091 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1096 qp->abts_scsi_io_bufs = 0; 1092 qp->abts_scsi_io_bufs = 0;
1093 qp->abts_nvme_io_bufs = 0;
1097 spin_unlock(&qp->io_buf_list_put_lock); 1094 spin_unlock(&qp->io_buf_list_put_lock);
1098 spin_unlock(&qp->abts_scsi_buf_list_lock); 1095 spin_unlock(&qp->abts_io_buf_list_lock);
1099
1100 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1101 spin_lock(&qp->abts_nvme_buf_list_lock);
1102 list_splice_init(&qp->lpfc_abts_nvme_buf_list,
1103 &nvme_aborts);
1104 list_for_each_entry_safe(psb, psb_next, &nvme_aborts,
1105 list) {
1106 psb->pCmd = NULL;
1107 psb->status = IOSTAT_SUCCESS;
1108 cnt++;
1109 }
1110 spin_lock(&qp->io_buf_list_put_lock);
1111 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1112 qp->abts_nvme_io_bufs = 0;
1113 list_splice_init(&nvme_aborts,
1114 &qp->lpfc_io_buf_list_put);
1115 spin_unlock(&qp->io_buf_list_put_lock);
1116 spin_unlock(&qp->abts_nvme_buf_list_lock);
1117
1118 }
1119 } 1096 }
1120 spin_unlock_irq(&phba->hbalock); 1097 spin_unlock_irq(&phba->hbalock);
1121 1098
@@ -1261,6 +1238,7 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
1261 unsigned char *eqcnt = NULL; 1238 unsigned char *eqcnt = NULL;
1262 uint32_t usdelay; 1239 uint32_t usdelay;
1263 int i; 1240 int i;
1241 bool update = false;
1264 1242
1265 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) 1243 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1266 return; 1244 return;
@@ -1274,20 +1252,29 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
1274 if (!eqcnt) 1252 if (!eqcnt)
1275 goto requeue; 1253 goto requeue;
1276 1254
1277 /* Loop thru all IRQ vectors */ 1255 if (phba->cfg_irq_chann > 1) {
1278 for (i = 0; i < phba->cfg_irq_chann; i++) { 1256 /* Loop thru all IRQ vectors */
1279 /* Get the EQ corresponding to the IRQ vector */ 1257 for (i = 0; i < phba->cfg_irq_chann; i++) {
1280 eq = phba->sli4_hba.hba_eq_hdl[i].eq; 1258 /* Get the EQ corresponding to the IRQ vector */
1281 if (eq && eqcnt[eq->last_cpu] < 2) 1259 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1282 eqcnt[eq->last_cpu]++; 1260 if (!eq)
1283 continue; 1261 continue;
1284 } 1262 if (eq->q_mode) {
1263 update = true;
1264 break;
1265 }
1266 if (eqcnt[eq->last_cpu] < 2)
1267 eqcnt[eq->last_cpu]++;
1268 }
1269 } else
1270 update = true;
1285 1271
1286 for_each_present_cpu(i) { 1272 for_each_present_cpu(i) {
1287 if (phba->cfg_irq_chann > 1 && eqcnt[i] < 2)
1288 continue;
1289
1290 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); 1273 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1274 if (!update && eqcnt[i] < 2) {
1275 eqi->icnt = 0;
1276 continue;
1277 }
1291 1278
1292 usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) * 1279 usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
1293 LPFC_EQ_DELAY_STEP; 1280 LPFC_EQ_DELAY_STEP;
@@ -1535,6 +1522,7 @@ lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1535 spin_unlock_irq(&phba->hbalock); 1522 spin_unlock_irq(&phba->hbalock);
1536 1523
1537 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1524 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1525 lpfc_sli_flush_io_rings(phba);
1538 lpfc_offline(phba); 1526 lpfc_offline(phba);
1539 lpfc_hba_down_post(phba); 1527 lpfc_hba_down_post(phba);
1540 lpfc_unblock_mgmt_io(phba); 1528 lpfc_unblock_mgmt_io(phba);
@@ -1796,6 +1784,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1796 "2887 Reset Needed: Attempting Port " 1784 "2887 Reset Needed: Attempting Port "
1797 "Recovery...\n"); 1785 "Recovery...\n");
1798 lpfc_offline_prep(phba, mbx_action); 1786 lpfc_offline_prep(phba, mbx_action);
1787 lpfc_sli_flush_io_rings(phba);
1799 lpfc_offline(phba); 1788 lpfc_offline(phba);
1800 /* release interrupt for possible resource change */ 1789 /* release interrupt for possible resource change */
1801 lpfc_sli4_disable_intr(phba); 1790 lpfc_sli4_disable_intr(phba);
@@ -1915,7 +1904,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1915 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1916 "7624 Firmware not ready: Failing UE recovery," 1905 "7624 Firmware not ready: Failing UE recovery,"
1917 " waited %dSec", i); 1906 " waited %dSec", i);
1918 lpfc_sli4_offline_eratt(phba); 1907 phba->link_state = LPFC_HBA_ERROR;
1919 break; 1908 break;
1920 1909
1921 case LPFC_SLI_INTF_IF_TYPE_2: 1910 case LPFC_SLI_INTF_IF_TYPE_2:
@@ -1989,9 +1978,8 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1989 } 1978 }
1990 /* fall through for not able to recover */ 1979 /* fall through for not able to recover */
1991 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1992 "3152 Unrecoverable error, bring the port " 1981 "3152 Unrecoverable error\n");
1993 "offline\n"); 1982 phba->link_state = LPFC_HBA_ERROR;
1994 lpfc_sli4_offline_eratt(phba);
1995 break; 1983 break;
1996 case LPFC_SLI_INTF_IF_TYPE_1: 1984 case LPFC_SLI_INTF_IF_TYPE_1:
1997 default: 1985 default:
@@ -2863,7 +2851,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
2863 &vport->fc_nodes, nlp_listp) { 2851 &vport->fc_nodes, nlp_listp) {
2864 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2852 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2865 LOG_NODE, 2853 LOG_NODE,
2866 "0282 did:x%x ndlp:x%p " 2854 "0282 did:x%x ndlp:x%px "
2867 "usgmap:x%x refcnt:%d\n", 2855 "usgmap:x%x refcnt:%d\n",
2868 ndlp->nlp_DID, (void *)ndlp, 2856 ndlp->nlp_DID, (void *)ndlp,
2869 ndlp->nlp_usg_map, 2857 ndlp->nlp_usg_map,
@@ -3067,7 +3055,7 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
3067 ndlp->nlp_rpi = rpi; 3055 ndlp->nlp_rpi = rpi;
3068 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 3056 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3069 "0009 rpi:%x DID:%x " 3057 "0009 rpi:%x DID:%x "
3070 "flg:%x map:%x %p\n", ndlp->nlp_rpi, 3058 "flg:%x map:%x x%px\n", ndlp->nlp_rpi,
3071 ndlp->nlp_DID, ndlp->nlp_flag, 3059 ndlp->nlp_DID, ndlp->nlp_flag,
3072 ndlp->nlp_usg_map, ndlp); 3060 ndlp->nlp_usg_map, ndlp);
3073 } 3061 }
@@ -3252,12 +3240,8 @@ static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3252 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) 3240 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3253 lpfc_destroy_expedite_pool(phba); 3241 lpfc_destroy_expedite_pool(phba);
3254 3242
3255 if (!(phba->pport->load_flag & FC_UNLOADING)) { 3243 if (!(phba->pport->load_flag & FC_UNLOADING))
3256 lpfc_sli_flush_fcp_rings(phba); 3244 lpfc_sli_flush_io_rings(phba);
3257
3258 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3259 lpfc_sli_flush_nvme_rings(phba);
3260 }
3261 3245
3262 hwq_count = phba->cfg_hdw_queue; 3246 hwq_count = phba->cfg_hdw_queue;
3263 3247
@@ -3491,7 +3475,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3491 lpfc_printf_vlog(ndlp->vport, 3475 lpfc_printf_vlog(ndlp->vport,
3492 KERN_INFO, LOG_NODE, 3476 KERN_INFO, LOG_NODE,
3493 "0011 lpfc_offline: " 3477 "0011 lpfc_offline: "
3494 "ndlp:x%p did %x " 3478 "ndlp:x%px did %x "
3495 "usgmap:x%x rpi:%x\n", 3479 "usgmap:x%x rpi:%x\n",
3496 ndlp, ndlp->nlp_DID, 3480 ndlp, ndlp->nlp_DID,
3497 ndlp->nlp_usg_map, 3481 ndlp->nlp_usg_map,
@@ -3636,6 +3620,9 @@ lpfc_io_free(struct lpfc_hba *phba)
3636 qp->put_io_bufs--; 3620 qp->put_io_bufs--;
3637 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3621 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3638 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3622 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3623 if (phba->cfg_xpsgl && !phba->nvmet_support)
3624 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3625 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3639 kfree(lpfc_ncmd); 3626 kfree(lpfc_ncmd);
3640 qp->total_io_bufs--; 3627 qp->total_io_bufs--;
3641 } 3628 }
@@ -3649,6 +3636,9 @@ lpfc_io_free(struct lpfc_hba *phba)
3649 qp->get_io_bufs--; 3636 qp->get_io_bufs--;
3650 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 3637 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3651 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 3638 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3639 if (phba->cfg_xpsgl && !phba->nvmet_support)
3640 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3641 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3652 kfree(lpfc_ncmd); 3642 kfree(lpfc_ncmd);
3653 qp->total_io_bufs--; 3643 qp->total_io_bufs--;
3654 } 3644 }
@@ -4097,18 +4087,9 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4097 LIST_HEAD(post_nblist); 4087 LIST_HEAD(post_nblist);
4098 LIST_HEAD(nvme_nblist); 4088 LIST_HEAD(nvme_nblist);
4099 4089
4100 /* Sanity check to ensure our sizing is right for both SCSI and NVME */
4101 if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) {
4102 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
4103 "6426 Common buffer size %zd exceeds %d\n",
4104 sizeof(struct lpfc_io_buf),
4105 LPFC_COMMON_IO_BUF_SZ);
4106 return 0;
4107 }
4108
4109 phba->sli4_hba.io_xri_cnt = 0; 4090 phba->sli4_hba.io_xri_cnt = 0;
4110 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 4091 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4111 lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL); 4092 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4112 if (!lpfc_ncmd) 4093 if (!lpfc_ncmd)
4113 break; 4094 break;
4114 /* 4095 /*
@@ -4124,22 +4105,30 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4124 break; 4105 break;
4125 } 4106 }
4126 4107
4127 /* 4108 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4128 * 4K Page alignment is CRITICAL to BlockGuard, double check 4109 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4129 * to be sure. 4110 } else {
4130 */ 4111 /*
4131 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 4112 * 4K Page alignment is CRITICAL to BlockGuard, double
4132 (((unsigned long)(lpfc_ncmd->data) & 4113 * check to be sure.
4133 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 4114 */
4134 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 4115 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4135 "3369 Memory alignment err: addr=%lx\n", 4116 (((unsigned long)(lpfc_ncmd->data) &
4136 (unsigned long)lpfc_ncmd->data); 4117 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4137 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4118 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
4138 lpfc_ncmd->data, lpfc_ncmd->dma_handle); 4119 "3369 Memory alignment err: "
4139 kfree(lpfc_ncmd); 4120 "addr=%lx\n",
4140 break; 4121 (unsigned long)lpfc_ncmd->data);
4122 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4123 lpfc_ncmd->data,
4124 lpfc_ncmd->dma_handle);
4125 kfree(lpfc_ncmd);
4126 break;
4127 }
4141 } 4128 }
4142 4129
4130 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4131
4143 lxri = lpfc_sli4_next_xritag(phba); 4132 lxri = lpfc_sli4_next_xritag(phba);
4144 if (lxri == NO_XRI) { 4133 if (lxri == NO_XRI) {
4145 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 4134 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
@@ -4318,7 +4307,11 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4318 4307
4319 shost->dma_boundary = 4308 shost->dma_boundary =
4320 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4309 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4321 shost->sg_tablesize = phba->cfg_scsi_seg_cnt; 4310
4311 if (phba->cfg_xpsgl && !phba->nvmet_support)
4312 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4313 else
4314 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4322 } else 4315 } else
4323 /* SLI-3 has a limited number of hardware queues (3), 4316 /* SLI-3 has a limited number of hardware queues (3),
4324 * thus there is only one for FCP processing. 4317 * thus there is only one for FCP processing.
@@ -6336,6 +6329,24 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6336 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 6329 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
6337 return -ENOMEM; 6330 return -ENOMEM;
6338 6331
6332 phba->lpfc_sg_dma_buf_pool =
6333 dma_pool_create("lpfc_sg_dma_buf_pool",
6334 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
6335 BPL_ALIGN_SZ, 0);
6336
6337 if (!phba->lpfc_sg_dma_buf_pool)
6338 goto fail_free_mem;
6339
6340 phba->lpfc_cmd_rsp_buf_pool =
6341 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6342 &phba->pcidev->dev,
6343 sizeof(struct fcp_cmnd) +
6344 sizeof(struct fcp_rsp),
6345 BPL_ALIGN_SZ, 0);
6346
6347 if (!phba->lpfc_cmd_rsp_buf_pool)
6348 goto fail_free_dma_buf_pool;
6349
6339 /* 6350 /*
6340 * Enable sr-iov virtual functions if supported and configured 6351 * Enable sr-iov virtual functions if supported and configured
6341 * through the module parameter. 6352 * through the module parameter.
@@ -6354,6 +6365,13 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
6354 } 6365 }
6355 6366
6356 return 0; 6367 return 0;
6368
6369fail_free_dma_buf_pool:
6370 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6371 phba->lpfc_sg_dma_buf_pool = NULL;
6372fail_free_mem:
6373 lpfc_mem_free(phba);
6374 return -ENOMEM;
6357} 6375}
6358 6376
6359/** 6377/**
@@ -6414,6 +6432,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6414 if (rc) 6432 if (rc)
6415 return -ENODEV; 6433 return -ENODEV;
6416 6434
6435 /* Allocate all driver workqueues here */
6436
6437 /* The lpfc_wq workqueue for deferred irq use */
6438 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6439
6417 /* 6440 /*
6418 * Initialize timers used by driver 6441 * Initialize timers used by driver
6419 */ 6442 */
@@ -6448,102 +6471,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6448 * The WQ create will allocate the ring. 6471 * The WQ create will allocate the ring.
6449 */ 6472 */
6450 6473
6451 /*
6452 * 1 for cmd, 1 for rsp, NVME adds an extra one
6453 * for boundary conditions in its max_sgl_segment template.
6454 */
6455 extra = 2;
6456 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6457 extra++;
6458
6459 /*
6460 * It doesn't matter what family our adapter is in, we are
6461 * limited to 2 Pages, 512 SGEs, for our SGL.
6462 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
6463 */
6464 max_buf_size = (2 * SLI4_PAGE_SIZE);
6465
6466 /*
6467 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
6468 * used to create the sg_dma_buf_pool must be calculated.
6469 */
6470 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6471 /*
6472 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
6473 * the FCP rsp, and a SGE. Sice we have no control
6474 * over how many protection segments the SCSI Layer
6475 * will hand us (ie: there could be one for every block
6476 * in the IO), just allocate enough SGEs to accomidate
6477 * our max amount and we need to limit lpfc_sg_seg_cnt
6478 * to minimize the risk of running out.
6479 */
6480 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6481 sizeof(struct fcp_rsp) + max_buf_size;
6482
6483 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
6484 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6485
6486 /*
6487 * If supporting DIF, reduce the seg count for scsi to
6488 * allow room for the DIF sges.
6489 */
6490 if (phba->cfg_enable_bg &&
6491 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6492 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6493 else
6494 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6495
6496 } else {
6497 /*
6498 * The scsi_buf for a regular I/O holds the FCP cmnd,
6499 * the FCP rsp, a SGE for each, and a SGE for up to
6500 * cfg_sg_seg_cnt data segments.
6501 */
6502 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6503 sizeof(struct fcp_rsp) +
6504 ((phba->cfg_sg_seg_cnt + extra) *
6505 sizeof(struct sli4_sge));
6506
6507 /* Total SGEs for scsi_sg_list */
6508 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
6509 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6510
6511 /*
6512 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
6513 * need to post 1 page for the SGL.
6514 */
6515 }
6516
6517 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
6518 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6519 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
6520 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
6521 "6300 Reducing NVME sg segment "
6522 "cnt to %d\n",
6523 LPFC_MAX_NVME_SEG_CNT);
6524 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
6525 } else
6526 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
6527 }
6528
6529 /* Initialize the host templates with the updated values. */
6530 lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
6531 lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
6532 lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
6533
6534 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
6535 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
6536 else
6537 phba->cfg_sg_dma_buf_size =
6538 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
6539
6540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6541 "9087 sg_seg_cnt:%d dmabuf_size:%d "
6542 "total:%d scsi:%d nvme:%d\n",
6543 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6544 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
6545 phba->cfg_nvme_seg_cnt);
6546
6547 /* Initialize buffer queue management fields */ 6474 /* Initialize buffer queue management fields */
6548 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); 6475 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
6549 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 6476 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
@@ -6552,11 +6479,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6552 /* 6479 /*
6553 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 6480 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
6554 */ 6481 */
6555 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 6482 /* Initialize the Abort buffer list used by driver */
6556 /* Initialize the Abort scsi buffer list used by driver */ 6483 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
6557 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 6484 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
6558 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
6559 }
6560 6485
6561 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 6486 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6562 /* Initialize the Abort nvme buffer list used by driver */ 6487 /* Initialize the Abort nvme buffer list used by driver */
@@ -6764,6 +6689,131 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6764 } 6689 }
6765 } 6690 }
6766 6691
6692 /*
6693 * 1 for cmd, 1 for rsp, NVME adds an extra one
6694 * for boundary conditions in its max_sgl_segment template.
6695 */
6696 extra = 2;
6697 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6698 extra++;
6699
6700 /*
6701 * It doesn't matter what family our adapter is in, we are
6702 * limited to 2 Pages, 512 SGEs, for our SGL.
6703 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
6704 */
6705 max_buf_size = (2 * SLI4_PAGE_SIZE);
6706
6707 /*
6708 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
6709 * used to create the sg_dma_buf_pool must be calculated.
6710 */
6711 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6712 /* Both cfg_enable_bg and cfg_external_dif code paths */
6713
6714 /*
6715 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
6716 * the FCP rsp, and a SGE. Sice we have no control
6717 * over how many protection segments the SCSI Layer
6718 * will hand us (ie: there could be one for every block
6719 * in the IO), just allocate enough SGEs to accomidate
6720 * our max amount and we need to limit lpfc_sg_seg_cnt
6721 * to minimize the risk of running out.
6722 */
6723 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6724 sizeof(struct fcp_rsp) + max_buf_size;
6725
6726 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
6727 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6728
6729 /*
6730 * If supporting DIF, reduce the seg count for scsi to
6731 * allow room for the DIF sges.
6732 */
6733 if (phba->cfg_enable_bg &&
6734 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6735 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6736 else
6737 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6738
6739 } else {
6740 /*
6741 * The scsi_buf for a regular I/O holds the FCP cmnd,
6742 * the FCP rsp, a SGE for each, and a SGE for up to
6743 * cfg_sg_seg_cnt data segments.
6744 */
6745 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6746 sizeof(struct fcp_rsp) +
6747 ((phba->cfg_sg_seg_cnt + extra) *
6748 sizeof(struct sli4_sge));
6749
6750 /* Total SGEs for scsi_sg_list */
6751 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
6752 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6753
6754 /*
6755 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
6756 * need to post 1 page for the SGL.
6757 */
6758 }
6759
6760 if (phba->cfg_xpsgl && !phba->nvmet_support)
6761 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
6762 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
6763 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
6764 else
6765 phba->cfg_sg_dma_buf_size =
6766 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
6767
6768 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
6769 sizeof(struct sli4_sge);
6770
6771 /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
6772 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6773 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
6774 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
6775 "6300 Reducing NVME sg segment "
6776 "cnt to %d\n",
6777 LPFC_MAX_NVME_SEG_CNT);
6778 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
6779 } else
6780 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
6781 }
6782
6783 /* Initialize the host templates with the updated values. */
6784 lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
6785 lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
6786 lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
6787
6788 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6789 "9087 sg_seg_cnt:%d dmabuf_size:%d "
6790 "total:%d scsi:%d nvme:%d\n",
6791 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6792 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
6793 phba->cfg_nvme_seg_cnt);
6794
6795 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
6796 i = phba->cfg_sg_dma_buf_size;
6797 else
6798 i = SLI4_PAGE_SIZE;
6799
6800 phba->lpfc_sg_dma_buf_pool =
6801 dma_pool_create("lpfc_sg_dma_buf_pool",
6802 &phba->pcidev->dev,
6803 phba->cfg_sg_dma_buf_size,
6804 i, 0);
6805 if (!phba->lpfc_sg_dma_buf_pool)
6806 goto out_free_bsmbx;
6807
6808 phba->lpfc_cmd_rsp_buf_pool =
6809 dma_pool_create("lpfc_cmd_rsp_buf_pool",
6810 &phba->pcidev->dev,
6811 sizeof(struct fcp_cmnd) +
6812 sizeof(struct fcp_rsp),
6813 i, 0);
6814 if (!phba->lpfc_cmd_rsp_buf_pool)
6815 goto out_free_sg_dma_buf;
6816
6767 mempool_free(mboxq, phba->mbox_mem_pool); 6817 mempool_free(mboxq, phba->mbox_mem_pool);
6768 6818
6769 /* Verify OAS is supported */ 6819 /* Verify OAS is supported */
@@ -6775,12 +6825,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6775 /* Verify all the SLI4 queues */ 6825 /* Verify all the SLI4 queues */
6776 rc = lpfc_sli4_queue_verify(phba); 6826 rc = lpfc_sli4_queue_verify(phba);
6777 if (rc) 6827 if (rc)
6778 goto out_free_bsmbx; 6828 goto out_free_cmd_rsp_buf;
6779 6829
6780 /* Create driver internal CQE event pool */ 6830 /* Create driver internal CQE event pool */
6781 rc = lpfc_sli4_cq_event_pool_create(phba); 6831 rc = lpfc_sli4_cq_event_pool_create(phba);
6782 if (rc) 6832 if (rc)
6783 goto out_free_bsmbx; 6833 goto out_free_cmd_rsp_buf;
6784 6834
6785 /* Initialize sgl lists per host */ 6835 /* Initialize sgl lists per host */
6786 lpfc_init_sgl_list(phba); 6836 lpfc_init_sgl_list(phba);
@@ -6871,6 +6921,12 @@ out_free_active_sgl:
6871 lpfc_free_active_sgl(phba); 6921 lpfc_free_active_sgl(phba);
6872out_destroy_cq_event_pool: 6922out_destroy_cq_event_pool:
6873 lpfc_sli4_cq_event_pool_destroy(phba); 6923 lpfc_sli4_cq_event_pool_destroy(phba);
6924out_free_cmd_rsp_buf:
6925 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
6926 phba->lpfc_cmd_rsp_buf_pool = NULL;
6927out_free_sg_dma_buf:
6928 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6929 phba->lpfc_sg_dma_buf_pool = NULL;
6874out_free_bsmbx: 6930out_free_bsmbx:
6875 lpfc_destroy_bootstrap_mbox(phba); 6931 lpfc_destroy_bootstrap_mbox(phba);
6876out_free_mem: 6932out_free_mem:
@@ -6997,12 +7053,6 @@ lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
6997 return error; 7053 return error;
6998 } 7054 }
6999 7055
7000 /* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */
7001 if (phba->sli_rev == LPFC_SLI_REV4)
7002 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7003 else
7004 phba->wq = NULL;
7005
7006 return 0; 7056 return 0;
7007} 7057}
7008 7058
@@ -7563,7 +7613,6 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7563 uint32_t old_mask; 7613 uint32_t old_mask;
7564 uint32_t old_guard; 7614 uint32_t old_guard;
7565 7615
7566 int pagecnt = 10;
7567 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 7616 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
7568 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7617 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7569 "1478 Registering BlockGuard with the " 7618 "1478 Registering BlockGuard with the "
@@ -7600,56 +7649,6 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
7600 "layer, Bad protection parameters: %d %d\n", 7649 "layer, Bad protection parameters: %d %d\n",
7601 old_mask, old_guard); 7650 old_mask, old_guard);
7602 } 7651 }
7603
7604 if (!_dump_buf_data) {
7605 while (pagecnt) {
7606 spin_lock_init(&_dump_buf_lock);
7607 _dump_buf_data =
7608 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
7609 if (_dump_buf_data) {
7610 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7611 "9043 BLKGRD: allocated %d pages for "
7612 "_dump_buf_data at 0x%p\n",
7613 (1 << pagecnt), _dump_buf_data);
7614 _dump_buf_data_order = pagecnt;
7615 memset(_dump_buf_data, 0,
7616 ((1 << PAGE_SHIFT) << pagecnt));
7617 break;
7618 } else
7619 --pagecnt;
7620 }
7621 if (!_dump_buf_data_order)
7622 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7623 "9044 BLKGRD: ERROR unable to allocate "
7624 "memory for hexdump\n");
7625 } else
7626 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7627 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
7628 "\n", _dump_buf_data);
7629 if (!_dump_buf_dif) {
7630 while (pagecnt) {
7631 _dump_buf_dif =
7632 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
7633 if (_dump_buf_dif) {
7634 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7635 "9046 BLKGRD: allocated %d pages for "
7636 "_dump_buf_dif at 0x%p\n",
7637 (1 << pagecnt), _dump_buf_dif);
7638 _dump_buf_dif_order = pagecnt;
7639 memset(_dump_buf_dif, 0,
7640 ((1 << PAGE_SHIFT) << pagecnt));
7641 break;
7642 } else
7643 --pagecnt;
7644 }
7645 if (!_dump_buf_dif_order)
7646 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7647 "9047 BLKGRD: ERROR unable to allocate "
7648 "memory for hexdump\n");
7649 } else
7650 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7651 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
7652 _dump_buf_dif);
7653} 7652}
7654 7653
7655/** 7654/**
@@ -8309,6 +8308,10 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
8309 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 8308 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
8310 phba->sli4_hba.max_cfg_param.max_xri = 8309 phba->sli4_hba.max_cfg_param.max_xri =
8311 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 8310 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
8311 /* Reduce resource usage in kdump environment */
8312 if (is_kdump_kernel() &&
8313 phba->sli4_hba.max_cfg_param.max_xri > 512)
8314 phba->sli4_hba.max_cfg_param.max_xri = 512;
8312 phba->sli4_hba.max_cfg_param.xri_base = 8315 phba->sli4_hba.max_cfg_param.xri_base =
8313 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 8316 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
8314 phba->sli4_hba.max_cfg_param.max_vpi = 8317 phba->sli4_hba.max_cfg_param.max_vpi =
@@ -8382,11 +8385,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
8382 */ 8385 */
8383 qmin -= 4; 8386 qmin -= 4;
8384 8387
8385 /* If NVME is configured, double the number of CQ/WQs needed */
8386 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
8387 !phba->nvmet_support)
8388 qmin /= 2;
8389
8390 /* Check to see if there is enough for NVME */ 8388 /* Check to see if there is enough for NVME */
8391 if ((phba->cfg_irq_chann > qmin) || 8389 if ((phba->cfg_irq_chann > qmin) ||
8392 (phba->cfg_hdw_queue > qmin)) { 8390 (phba->cfg_hdw_queue > qmin)) {
@@ -8643,51 +8641,14 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
8643} 8641}
8644 8642
8645static int 8643static int
8646lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) 8644lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
8647{
8648 struct lpfc_queue *qdesc;
8649 int cpu;
8650
8651 cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
8652 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8653 phba->sli4_hba.cq_esize,
8654 LPFC_CQE_EXP_COUNT, cpu);
8655 if (!qdesc) {
8656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8657 "0508 Failed allocate fast-path NVME CQ (%d)\n",
8658 wqidx);
8659 return 1;
8660 }
8661 qdesc->qe_valid = 1;
8662 qdesc->hdwq = wqidx;
8663 qdesc->chann = cpu;
8664 phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc;
8665
8666 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8667 LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT,
8668 cpu);
8669 if (!qdesc) {
8670 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8671 "0509 Failed allocate fast-path NVME WQ (%d)\n",
8672 wqidx);
8673 return 1;
8674 }
8675 qdesc->hdwq = wqidx;
8676 qdesc->chann = wqidx;
8677 phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc;
8678 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8679 return 0;
8680}
8681
8682static int
8683lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
8684{ 8645{
8685 struct lpfc_queue *qdesc; 8646 struct lpfc_queue *qdesc;
8686 uint32_t wqesize; 8647 u32 wqesize;
8687 int cpu; 8648 int cpu;
8688 8649
8689 cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ); 8650 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
8690 /* Create Fast Path FCP CQs */ 8651 /* Create Fast Path IO CQs */
8691 if (phba->enab_exp_wqcq_pages) 8652 if (phba->enab_exp_wqcq_pages)
8692 /* Increase the CQ size when WQEs contain an embedded cdb */ 8653 /* Increase the CQ size when WQEs contain an embedded cdb */
8693 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, 8654 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
@@ -8700,15 +8661,15 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
8700 phba->sli4_hba.cq_ecount, cpu); 8661 phba->sli4_hba.cq_ecount, cpu);
8701 if (!qdesc) { 8662 if (!qdesc) {
8702 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8663 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8703 "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx); 8664 "0499 Failed allocate fast-path IO CQ (%d)\n", idx);
8704 return 1; 8665 return 1;
8705 } 8666 }
8706 qdesc->qe_valid = 1; 8667 qdesc->qe_valid = 1;
8707 qdesc->hdwq = wqidx; 8668 qdesc->hdwq = idx;
8708 qdesc->chann = cpu; 8669 qdesc->chann = cpu;
8709 phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc; 8670 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
8710 8671
8711 /* Create Fast Path FCP WQs */ 8672 /* Create Fast Path IO WQs */
8712 if (phba->enab_exp_wqcq_pages) { 8673 if (phba->enab_exp_wqcq_pages) {
8713 /* Increase the WQ size when WQEs contain an embedded cdb */ 8674 /* Increase the WQ size when WQEs contain an embedded cdb */
8714 wqesize = (phba->fcp_embed_io) ? 8675 wqesize = (phba->fcp_embed_io) ?
@@ -8723,13 +8684,13 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
8723 8684
8724 if (!qdesc) { 8685 if (!qdesc) {
8725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8686 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8726 "0503 Failed allocate fast-path FCP WQ (%d)\n", 8687 "0503 Failed allocate fast-path IO WQ (%d)\n",
8727 wqidx); 8688 idx);
8728 return 1; 8689 return 1;
8729 } 8690 }
8730 qdesc->hdwq = wqidx; 8691 qdesc->hdwq = idx;
8731 qdesc->chann = wqidx; 8692 qdesc->chann = cpu;
8732 phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc; 8693 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
8733 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8694 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8734 return 0; 8695 return 0;
8735} 8696}
@@ -8793,12 +8754,13 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8793 qp->get_io_bufs = 0; 8754 qp->get_io_bufs = 0;
8794 qp->put_io_bufs = 0; 8755 qp->put_io_bufs = 0;
8795 qp->total_io_bufs = 0; 8756 qp->total_io_bufs = 0;
8796 spin_lock_init(&qp->abts_scsi_buf_list_lock); 8757 spin_lock_init(&qp->abts_io_buf_list_lock);
8797 INIT_LIST_HEAD(&qp->lpfc_abts_scsi_buf_list); 8758 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
8798 qp->abts_scsi_io_bufs = 0; 8759 qp->abts_scsi_io_bufs = 0;
8799 spin_lock_init(&qp->abts_nvme_buf_list_lock);
8800 INIT_LIST_HEAD(&qp->lpfc_abts_nvme_buf_list);
8801 qp->abts_nvme_io_bufs = 0; 8760 qp->abts_nvme_io_bufs = 0;
8761 INIT_LIST_HEAD(&qp->sgl_list);
8762 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
8763 spin_lock_init(&qp->hdwq_lock);
8802 } 8764 }
8803 } 8765 }
8804 8766
@@ -8864,7 +8826,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8864 } 8826 }
8865 qdesc->qe_valid = 1; 8827 qdesc->qe_valid = 1;
8866 qdesc->hdwq = cpup->hdwq; 8828 qdesc->hdwq = cpup->hdwq;
8867 qdesc->chann = cpu; /* First CPU this EQ is affinitised to */ 8829 qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
8868 qdesc->last_cpu = qdesc->chann; 8830 qdesc->last_cpu = qdesc->chann;
8869 8831
8870 /* Save the allocated EQ in the Hardware Queue */ 8832 /* Save the allocated EQ in the Hardware Queue */
@@ -8895,41 +8857,31 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8895 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; 8857 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
8896 } 8858 }
8897 8859
8898 /* Allocate SCSI SLI4 CQ/WQs */ 8860 /* Allocate IO Path SLI4 CQ/WQs */
8899 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8861 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8900 if (lpfc_alloc_fcp_wq_cq(phba, idx)) 8862 if (lpfc_alloc_io_wq_cq(phba, idx))
8901 goto out_error; 8863 goto out_error;
8902 } 8864 }
8903 8865
8904 /* Allocate NVME SLI4 CQ/WQs */ 8866 if (phba->nvmet_support) {
8905 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 8867 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
8906 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 8868 cpu = lpfc_find_cpu_handle(phba, idx,
8907 if (lpfc_alloc_nvme_wq_cq(phba, idx)) 8869 LPFC_FIND_BY_HDWQ);
8908 goto out_error; 8870 qdesc = lpfc_sli4_queue_alloc(phba,
8909 }
8910
8911 if (phba->nvmet_support) {
8912 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
8913 cpu = lpfc_find_cpu_handle(phba, idx,
8914 LPFC_FIND_BY_HDWQ);
8915 qdesc = lpfc_sli4_queue_alloc(
8916 phba,
8917 LPFC_DEFAULT_PAGE_SIZE, 8871 LPFC_DEFAULT_PAGE_SIZE,
8918 phba->sli4_hba.cq_esize, 8872 phba->sli4_hba.cq_esize,
8919 phba->sli4_hba.cq_ecount, 8873 phba->sli4_hba.cq_ecount,
8920 cpu); 8874 cpu);
8921 if (!qdesc) { 8875 if (!qdesc) {
8922 lpfc_printf_log( 8876 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8923 phba, KERN_ERR, LOG_INIT,
8924 "3142 Failed allocate NVME " 8877 "3142 Failed allocate NVME "
8925 "CQ Set (%d)\n", idx); 8878 "CQ Set (%d)\n", idx);
8926 goto out_error; 8879 goto out_error;
8927 }
8928 qdesc->qe_valid = 1;
8929 qdesc->hdwq = idx;
8930 qdesc->chann = cpu;
8931 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
8932 } 8880 }
8881 qdesc->qe_valid = 1;
8882 qdesc->hdwq = idx;
8883 qdesc->chann = cpu;
8884 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
8933 } 8885 }
8934 } 8886 }
8935 8887
@@ -8960,7 +8912,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8960 goto out_error; 8912 goto out_error;
8961 } 8913 }
8962 qdesc->qe_valid = 1; 8914 qdesc->qe_valid = 1;
8963 qdesc->chann = 0; 8915 qdesc->chann = cpu;
8964 phba->sli4_hba.els_cq = qdesc; 8916 phba->sli4_hba.els_cq = qdesc;
8965 8917
8966 8918
@@ -8978,7 +8930,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8978 "0505 Failed allocate slow-path MQ\n"); 8930 "0505 Failed allocate slow-path MQ\n");
8979 goto out_error; 8931 goto out_error;
8980 } 8932 }
8981 qdesc->chann = 0; 8933 qdesc->chann = cpu;
8982 phba->sli4_hba.mbx_wq = qdesc; 8934 phba->sli4_hba.mbx_wq = qdesc;
8983 8935
8984 /* 8936 /*
@@ -8994,7 +8946,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
8994 "0504 Failed allocate slow-path ELS WQ\n"); 8946 "0504 Failed allocate slow-path ELS WQ\n");
8995 goto out_error; 8947 goto out_error;
8996 } 8948 }
8997 qdesc->chann = 0; 8949 qdesc->chann = cpu;
8998 phba->sli4_hba.els_wq = qdesc; 8950 phba->sli4_hba.els_wq = qdesc;
8999 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8951 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9000 8952
@@ -9008,7 +8960,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
9008 "6079 Failed allocate NVME LS CQ\n"); 8960 "6079 Failed allocate NVME LS CQ\n");
9009 goto out_error; 8961 goto out_error;
9010 } 8962 }
9011 qdesc->chann = 0; 8963 qdesc->chann = cpu;
9012 qdesc->qe_valid = 1; 8964 qdesc->qe_valid = 1;
9013 phba->sli4_hba.nvmels_cq = qdesc; 8965 phba->sli4_hba.nvmels_cq = qdesc;
9014 8966
@@ -9021,7 +8973,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
9021 "6080 Failed allocate NVME LS WQ\n"); 8973 "6080 Failed allocate NVME LS WQ\n");
9022 goto out_error; 8974 goto out_error;
9023 } 8975 }
9024 qdesc->chann = 0; 8976 qdesc->chann = cpu;
9025 phba->sli4_hba.nvmels_wq = qdesc; 8977 phba->sli4_hba.nvmels_wq = qdesc;
9026 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); 8978 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
9027 } 8979 }
@@ -9164,15 +9116,13 @@ lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
9164 /* Loop thru all Hardware Queues */ 9116 /* Loop thru all Hardware Queues */
9165 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 9117 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9166 /* Free the CQ/WQ corresponding to the Hardware Queue */ 9118 /* Free the CQ/WQ corresponding to the Hardware Queue */
9167 lpfc_sli4_queue_free(hdwq[idx].fcp_cq); 9119 lpfc_sli4_queue_free(hdwq[idx].io_cq);
9168 lpfc_sli4_queue_free(hdwq[idx].nvme_cq); 9120 lpfc_sli4_queue_free(hdwq[idx].io_wq);
9169 lpfc_sli4_queue_free(hdwq[idx].fcp_wq); 9121 hdwq[idx].io_cq = NULL;
9170 lpfc_sli4_queue_free(hdwq[idx].nvme_wq); 9122 hdwq[idx].io_wq = NULL;
9171 hdwq[idx].hba_eq = NULL; 9123 if (phba->cfg_xpsgl && !phba->nvmet_support)
9172 hdwq[idx].fcp_cq = NULL; 9124 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
9173 hdwq[idx].nvme_cq = NULL; 9125 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
9174 hdwq[idx].fcp_wq = NULL;
9175 hdwq[idx].nvme_wq = NULL;
9176 } 9126 }
9177 /* Loop thru all IRQ vectors */ 9127 /* Loop thru all IRQ vectors */
9178 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 9128 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
@@ -9372,8 +9322,7 @@ lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9372 list_for_each_entry(childq, &eq->child_list, list) { 9322 list_for_each_entry(childq, &eq->child_list, list) {
9373 if (childq->queue_id > phba->sli4_hba.cq_max) 9323 if (childq->queue_id > phba->sli4_hba.cq_max)
9374 continue; 9324 continue;
9375 if ((childq->subtype == LPFC_FCP) || 9325 if (childq->subtype == LPFC_IO)
9376 (childq->subtype == LPFC_NVME))
9377 phba->sli4_hba.cq_lookup[childq->queue_id] = 9326 phba->sli4_hba.cq_lookup[childq->queue_id] =
9378 childq; 9327 childq;
9379 } 9328 }
@@ -9499,31 +9448,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9499 } 9448 }
9500 9449
9501 /* Loop thru all Hardware Queues */ 9450 /* Loop thru all Hardware Queues */
9502 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9503 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9504 cpu = lpfc_find_cpu_handle(phba, qidx,
9505 LPFC_FIND_BY_HDWQ);
9506 cpup = &phba->sli4_hba.cpu_map[cpu];
9507
9508 /* Create the CQ/WQ corresponding to the
9509 * Hardware Queue
9510 */
9511 rc = lpfc_create_wq_cq(phba,
9512 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9513 qp[qidx].nvme_cq,
9514 qp[qidx].nvme_wq,
9515 &phba->sli4_hba.hdwq[qidx].nvme_cq_map,
9516 qidx, LPFC_NVME);
9517 if (rc) {
9518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9519 "6123 Failed to setup fastpath "
9520 "NVME WQ/CQ (%d), rc = 0x%x\n",
9521 qidx, (uint32_t)rc);
9522 goto out_destroy;
9523 }
9524 }
9525 }
9526
9527 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9451 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9528 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); 9452 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9529 cpup = &phba->sli4_hba.cpu_map[cpu]; 9453 cpup = &phba->sli4_hba.cpu_map[cpu];
@@ -9531,14 +9455,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
9531 /* Create the CQ/WQ corresponding to the Hardware Queue */ 9455 /* Create the CQ/WQ corresponding to the Hardware Queue */
9532 rc = lpfc_create_wq_cq(phba, 9456 rc = lpfc_create_wq_cq(phba,
9533 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, 9457 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9534 qp[qidx].fcp_cq, 9458 qp[qidx].io_cq,
9535 qp[qidx].fcp_wq, 9459 qp[qidx].io_wq,
9536 &phba->sli4_hba.hdwq[qidx].fcp_cq_map, 9460 &phba->sli4_hba.hdwq[qidx].io_cq_map,
9537 qidx, LPFC_FCP); 9461 qidx,
9462 LPFC_IO);
9538 if (rc) { 9463 if (rc) {
9539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9464 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9540 "0535 Failed to setup fastpath " 9465 "0535 Failed to setup fastpath "
9541 "FCP WQ/CQ (%d), rc = 0x%x\n", 9466 "IO WQ/CQ (%d), rc = 0x%x\n",
9542 qidx, (uint32_t)rc); 9467 qidx, (uint32_t)rc);
9543 goto out_destroy; 9468 goto out_destroy;
9544 } 9469 }
@@ -9838,10 +9763,8 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
9838 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 9763 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9839 /* Destroy the CQ/WQ corresponding to Hardware Queue */ 9764 /* Destroy the CQ/WQ corresponding to Hardware Queue */
9840 qp = &phba->sli4_hba.hdwq[qidx]; 9765 qp = &phba->sli4_hba.hdwq[qidx];
9841 lpfc_wq_destroy(phba, qp->fcp_wq); 9766 lpfc_wq_destroy(phba, qp->io_wq);
9842 lpfc_wq_destroy(phba, qp->nvme_wq); 9767 lpfc_cq_destroy(phba, qp->io_cq);
9843 lpfc_cq_destroy(phba, qp->fcp_cq);
9844 lpfc_cq_destroy(phba, qp->nvme_cq);
9845 } 9768 }
9846 /* Loop thru all IRQ vectors */ 9769 /* Loop thru all IRQ vectors */
9847 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 9770 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
@@ -10711,7 +10634,7 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10711static void 10634static void
10712lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) 10635lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10713{ 10636{
10714 int i, cpu, idx, new_cpu, start_cpu, first_cpu; 10637 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
10715 int max_phys_id, min_phys_id; 10638 int max_phys_id, min_phys_id;
10716 int max_core_id, min_core_id; 10639 int max_core_id, min_core_id;
10717 struct lpfc_vector_map_info *cpup; 10640 struct lpfc_vector_map_info *cpup;
@@ -10753,8 +10676,8 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10753#endif 10676#endif
10754 10677
10755 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10678 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10756 "3328 CPU physid %d coreid %d\n", 10679 "3328 CPU %d physid %d coreid %d flag x%x\n",
10757 cpup->phys_id, cpup->core_id); 10680 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
10758 10681
10759 if (cpup->phys_id > max_phys_id) 10682 if (cpup->phys_id > max_phys_id)
10760 max_phys_id = cpup->phys_id; 10683 max_phys_id = cpup->phys_id;
@@ -10812,17 +10735,17 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10812 cpup->eq = idx; 10735 cpup->eq = idx;
10813 cpup->irq = pci_irq_vector(phba->pcidev, idx); 10736 cpup->irq = pci_irq_vector(phba->pcidev, idx);
10814 10737
10815 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10816 "3336 Set Affinity: CPU %d "
10817 "irq %d eq %d\n",
10818 cpu, cpup->irq, cpup->eq);
10819
10820 /* If this is the first CPU thats assigned to this 10738 /* If this is the first CPU thats assigned to this
10821 * vector, set LPFC_CPU_FIRST_IRQ. 10739 * vector, set LPFC_CPU_FIRST_IRQ.
10822 */ 10740 */
10823 if (!i) 10741 if (!i)
10824 cpup->flag |= LPFC_CPU_FIRST_IRQ; 10742 cpup->flag |= LPFC_CPU_FIRST_IRQ;
10825 i++; 10743 i++;
10744
10745 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10746 "3336 Set Affinity: CPU %d "
10747 "irq %d eq %d flag x%x\n",
10748 cpu, cpup->irq, cpup->eq, cpup->flag);
10826 } 10749 }
10827 } 10750 }
10828 10751
@@ -10936,69 +10859,103 @@ found_any:
10936 } 10859 }
10937 } 10860 }
10938 10861
10862 /* Assign hdwq indices that are unique across all cpus in the map
10863 * that are also FIRST_CPUs.
10864 */
10865 idx = 0;
10866 for_each_present_cpu(cpu) {
10867 cpup = &phba->sli4_hba.cpu_map[cpu];
10868
10869 /* Only FIRST IRQs get a hdwq index assignment. */
10870 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10871 continue;
10872
10873 /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
10874 cpup->hdwq = idx;
10875 idx++;
10876 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10877 "3333 Set Affinity: CPU %d (phys %d core %d): "
10878 "hdwq %d eq %d irq %d flg x%x\n",
10879 cpu, cpup->phys_id, cpup->core_id,
10880 cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
10881 }
10939 /* Finally we need to associate a hdwq with each cpu_map entry 10882 /* Finally we need to associate a hdwq with each cpu_map entry
10940 * This will be 1 to 1 - hdwq to cpu, unless there are less 10883 * This will be 1 to 1 - hdwq to cpu, unless there are less
10941 * hardware queues then CPUs. For that case we will just round-robin 10884 * hardware queues then CPUs. For that case we will just round-robin
10942 * the available hardware queues as they get assigned to CPUs. 10885 * the available hardware queues as they get assigned to CPUs.
10886 * The next_idx is the idx from the FIRST_CPU loop above to account
10887 * for irq_chann < hdwq. The idx is used for round-robin assignments
10888 * and needs to start at 0.
10943 */ 10889 */
10944 idx = 0; 10890 next_idx = idx;
10945 start_cpu = 0; 10891 start_cpu = 0;
10892 idx = 0;
10946 for_each_present_cpu(cpu) { 10893 for_each_present_cpu(cpu) {
10947 cpup = &phba->sli4_hba.cpu_map[cpu]; 10894 cpup = &phba->sli4_hba.cpu_map[cpu];
10948 if (idx >= phba->cfg_hdw_queue) {
10949 /* We need to reuse a Hardware Queue for another CPU,
10950 * so be smart about it and pick one that has its
10951 * IRQ/EQ mapped to the same phys_id (CPU package).
10952 * and core_id.
10953 */
10954 new_cpu = start_cpu;
10955 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10956 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10957 if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) &&
10958 (new_cpup->phys_id == cpup->phys_id) &&
10959 (new_cpup->core_id == cpup->core_id))
10960 goto found_hdwq;
10961 new_cpu = cpumask_next(
10962 new_cpu, cpu_present_mask);
10963 if (new_cpu == nr_cpumask_bits)
10964 new_cpu = first_cpu;
10965 }
10966 10895
10967 /* If we can't match both phys_id and core_id, 10896 /* FIRST cpus are already mapped. */
10968 * settle for just a phys_id match. 10897 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10969 */ 10898 continue;
10970 new_cpu = start_cpu; 10899
10971 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 10900 /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
10972 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; 10901 * of the unassigned cpus to the next idx so that all
10973 if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) && 10902 * hdw queues are fully utilized.
10974 (new_cpup->phys_id == cpup->phys_id)) 10903 */
10975 goto found_hdwq; 10904 if (next_idx < phba->cfg_hdw_queue) {
10976 new_cpu = cpumask_next( 10905 cpup->hdwq = next_idx;
10977 new_cpu, cpu_present_mask); 10906 next_idx++;
10978 if (new_cpu == nr_cpumask_bits) 10907 continue;
10979 new_cpu = first_cpu; 10908 }
10909
10910 /* Not a First CPU and all hdw_queues are used. Reuse a
10911 * Hardware Queue for another CPU, so be smart about it
10912 * and pick one that has its IRQ/EQ mapped to the same phys_id
10913 * (CPU package) and core_id.
10914 */
10915 new_cpu = start_cpu;
10916 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10917 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10918 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
10919 new_cpup->phys_id == cpup->phys_id &&
10920 new_cpup->core_id == cpup->core_id) {
10921 goto found_hdwq;
10980 } 10922 }
10923 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
10924 if (new_cpu == nr_cpumask_bits)
10925 new_cpu = first_cpu;
10926 }
10981 10927
10982 /* Otherwise just round robin on cfg_hdw_queue */ 10928 /* If we can't match both phys_id and core_id,
10983 cpup->hdwq = idx % phba->cfg_hdw_queue; 10929 * settle for just a phys_id match.
10984 goto logit; 10930 */
10985found_hdwq: 10931 new_cpu = start_cpu;
10986 /* We found an available entry, copy the IRQ info */ 10932 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
10987 start_cpu = cpumask_next(new_cpu, cpu_present_mask); 10933 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
10988 if (start_cpu == nr_cpumask_bits) 10934 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
10989 start_cpu = first_cpu; 10935 new_cpup->phys_id == cpup->phys_id)
10990 cpup->hdwq = new_cpup->hdwq; 10936 goto found_hdwq;
10991 } else { 10937
10992 /* 1 to 1, CPU to hdwq */ 10938 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
10993 cpup->hdwq = idx; 10939 if (new_cpu == nr_cpumask_bits)
10940 new_cpu = first_cpu;
10994 } 10941 }
10995logit: 10942
10943 /* Otherwise just round robin on cfg_hdw_queue */
10944 cpup->hdwq = idx % phba->cfg_hdw_queue;
10945 idx++;
10946 goto logit;
10947 found_hdwq:
10948 /* We found an available entry, copy the IRQ info */
10949 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
10950 if (start_cpu == nr_cpumask_bits)
10951 start_cpu = first_cpu;
10952 cpup->hdwq = new_cpup->hdwq;
10953 logit:
10996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10954 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10997 "3335 Set Affinity: CPU %d (phys %d core %d): " 10955 "3335 Set Affinity: CPU %d (phys %d core %d): "
10998 "hdwq %d eq %d irq %d flg x%x\n", 10956 "hdwq %d eq %d irq %d flg x%x\n",
10999 cpu, cpup->phys_id, cpup->core_id, 10957 cpu, cpup->phys_id, cpup->core_id,
11000 cpup->hdwq, cpup->eq, cpup->irq, cpup->flag); 10958 cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
11001 idx++;
11002 } 10959 }
11003 10960
11004 /* The cpu_map array will be used later during initialization 10961 /* The cpu_map array will be used later during initialization
@@ -11089,10 +11046,10 @@ vec_fail_out:
11089 * @phba: pointer to lpfc hba data structure. 11046 * @phba: pointer to lpfc hba data structure.
11090 * 11047 *
11091 * This routine is invoked to enable the MSI interrupt mode to device with 11048 * This routine is invoked to enable the MSI interrupt mode to device with
11092 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 11049 * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
11093 * to enable the MSI vector. The device driver is responsible for calling 11050 * called to enable the MSI vector. The device driver is responsible for
11094 * the request_irq() to register MSI vector with a interrupt the handler, 11051 * calling the request_irq() to register MSI vector with a interrupt the
11095 * which is done in this function. 11052 * handler, which is done in this function.
11096 * 11053 *
11097 * Return codes 11054 * Return codes
11098 * 0 - successful 11055 * 0 - successful
@@ -11103,20 +11060,21 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
11103{ 11060{
11104 int rc, index; 11061 int rc, index;
11105 11062
11106 rc = pci_enable_msi(phba->pcidev); 11063 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
11107 if (!rc) 11064 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
11065 if (rc > 0)
11108 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11066 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11109 "0487 PCI enable MSI mode success.\n"); 11067 "0487 PCI enable MSI mode success.\n");
11110 else { 11068 else {
11111 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 11069 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11112 "0488 PCI enable MSI mode failed (%d)\n", rc); 11070 "0488 PCI enable MSI mode failed (%d)\n", rc);
11113 return rc; 11071 return rc ? rc : -1;
11114 } 11072 }
11115 11073
11116 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 11074 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
11117 0, LPFC_DRIVER_NAME, phba); 11075 0, LPFC_DRIVER_NAME, phba);
11118 if (rc) { 11076 if (rc) {
11119 pci_disable_msi(phba->pcidev); 11077 pci_free_irq_vectors(phba->pcidev);
11120 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11078 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
11121 "0490 MSI request_irq failed (%d)\n", rc); 11079 "0490 MSI request_irq failed (%d)\n", rc);
11122 return rc; 11080 return rc;
@@ -11282,11 +11240,10 @@ static void
11282lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 11240lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11283{ 11241{
11284 struct lpfc_sli4_hdw_queue *qp; 11242 struct lpfc_sli4_hdw_queue *qp;
11285 int idx, ccnt, fcnt; 11243 int idx, ccnt;
11286 int wait_time = 0; 11244 int wait_time = 0;
11287 int io_xri_cmpl = 1; 11245 int io_xri_cmpl = 1;
11288 int nvmet_xri_cmpl = 1; 11246 int nvmet_xri_cmpl = 1;
11289 int fcp_xri_cmpl = 1;
11290 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 11247 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
11291 11248
11292 /* Driver just aborted IOs during the hba_unset process. Pause 11249 /* Driver just aborted IOs during the hba_unset process. Pause
@@ -11300,32 +11257,21 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11300 lpfc_nvme_wait_for_io_drain(phba); 11257 lpfc_nvme_wait_for_io_drain(phba);
11301 11258
11302 ccnt = 0; 11259 ccnt = 0;
11303 fcnt = 0;
11304 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11260 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11305 qp = &phba->sli4_hba.hdwq[idx]; 11261 qp = &phba->sli4_hba.hdwq[idx];
11306 fcp_xri_cmpl = list_empty( 11262 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
11307 &qp->lpfc_abts_scsi_buf_list); 11263 if (!io_xri_cmpl) /* if list is NOT empty */
11308 if (!fcp_xri_cmpl) /* if list is NOT empty */ 11264 ccnt++;
11309 fcnt++;
11310 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11311 io_xri_cmpl = list_empty(
11312 &qp->lpfc_abts_nvme_buf_list);
11313 if (!io_xri_cmpl) /* if list is NOT empty */
11314 ccnt++;
11315 }
11316 } 11265 }
11317 if (ccnt) 11266 if (ccnt)
11318 io_xri_cmpl = 0; 11267 io_xri_cmpl = 0;
11319 if (fcnt)
11320 fcp_xri_cmpl = 0;
11321 11268
11322 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11269 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11323 nvmet_xri_cmpl = 11270 nvmet_xri_cmpl =
11324 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 11271 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
11325 } 11272 }
11326 11273
11327 while (!fcp_xri_cmpl || !els_xri_cmpl || !io_xri_cmpl || 11274 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
11328 !nvmet_xri_cmpl) {
11329 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 11275 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
11330 if (!nvmet_xri_cmpl) 11276 if (!nvmet_xri_cmpl)
11331 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -11334,12 +11280,7 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11334 wait_time/1000); 11280 wait_time/1000);
11335 if (!io_xri_cmpl) 11281 if (!io_xri_cmpl)
11336 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11337 "6100 NVME XRI exchange busy " 11283 "6100 IO XRI exchange busy "
11338 "wait time: %d seconds.\n",
11339 wait_time/1000);
11340 if (!fcp_xri_cmpl)
11341 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11342 "2877 FCP XRI exchange busy "
11343 "wait time: %d seconds.\n", 11284 "wait time: %d seconds.\n",
11344 wait_time/1000); 11285 wait_time/1000);
11345 if (!els_xri_cmpl) 11286 if (!els_xri_cmpl)
@@ -11355,24 +11296,15 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
11355 } 11296 }
11356 11297
11357 ccnt = 0; 11298 ccnt = 0;
11358 fcnt = 0;
11359 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 11299 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11360 qp = &phba->sli4_hba.hdwq[idx]; 11300 qp = &phba->sli4_hba.hdwq[idx];
11361 fcp_xri_cmpl = list_empty( 11301 io_xri_cmpl = list_empty(
11362 &qp->lpfc_abts_scsi_buf_list); 11302 &qp->lpfc_abts_io_buf_list);
11363 if (!fcp_xri_cmpl) /* if list is NOT empty */ 11303 if (!io_xri_cmpl) /* if list is NOT empty */
11364 fcnt++; 11304 ccnt++;
11365 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11366 io_xri_cmpl = list_empty(
11367 &qp->lpfc_abts_nvme_buf_list);
11368 if (!io_xri_cmpl) /* if list is NOT empty */
11369 ccnt++;
11370 }
11371 } 11305 }
11372 if (ccnt) 11306 if (ccnt)
11373 io_xri_cmpl = 0; 11307 io_xri_cmpl = 0;
11374 if (fcnt)
11375 fcp_xri_cmpl = 0;
11376 11308
11377 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 11309 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11378 nvmet_xri_cmpl = list_empty( 11310 nvmet_xri_cmpl = list_empty(
@@ -11616,6 +11548,9 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11616 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 11548 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
11617 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 11549 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
11618 11550
11551 /* Check for Extended Pre-Registered SGL support */
11552 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
11553
11619 /* Check for firmware nvme support */ 11554 /* Check for firmware nvme support */
11620 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && 11555 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
11621 bf_get(cfg_xib, mbx_sli4_parameters)); 11556 bf_get(cfg_xib, mbx_sli4_parameters));
@@ -11646,6 +11581,7 @@ fcponly:
11646 phba->nvme_support = 0; 11581 phba->nvme_support = 0;
11647 phba->nvmet_support = 0; 11582 phba->nvmet_support = 0;
11648 phba->cfg_nvmet_mrq = 0; 11583 phba->cfg_nvmet_mrq = 0;
11584 phba->cfg_nvme_seg_cnt = 0;
11649 11585
11650 /* If no FC4 type support, move to just SCSI support */ 11586 /* If no FC4 type support, move to just SCSI support */
11651 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 11587 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
@@ -11654,6 +11590,15 @@ fcponly:
11654 } 11590 }
11655 } 11591 }
11656 11592
11593 /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
11594 * accommodate 512K and 1M IOs in a single nvme buf and supply
11595 * enough NVME LS iocb buffers for larger connectivity counts.
11596 */
11597 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11598 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
11599 phba->cfg_iocb_cnt = 5;
11600 }
11601
11657 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */ 11602 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
11658 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 11603 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
11659 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters))) 11604 LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
@@ -11718,6 +11663,14 @@ fcponly:
11718 else 11663 else
11719 phba->mds_diags_support = 0; 11664 phba->mds_diags_support = 0;
11720 11665
11666 /*
11667 * Check if the SLI port supports NSLER
11668 */
11669 if (bf_get(cfg_nsler, mbx_sli4_parameters))
11670 phba->nsler = 1;
11671 else
11672 phba->nsler = 0;
11673
11721 return 0; 11674 return 0;
11722} 11675}
11723 11676
@@ -12146,7 +12099,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
12146 lpfc_scsi_dev_block(phba); 12099 lpfc_scsi_dev_block(phba);
12147 12100
12148 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 12101 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
12149 lpfc_sli_flush_fcp_rings(phba); 12102 lpfc_sli_flush_io_rings(phba);
12150 12103
12151 /* stop all timers */ 12104 /* stop all timers */
12152 lpfc_stop_hba_timers(phba); 12105 lpfc_stop_hba_timers(phba);
@@ -12176,7 +12129,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12176 lpfc_stop_hba_timers(phba); 12129 lpfc_stop_hba_timers(phba);
12177 12130
12178 /* Clean up all driver's outstanding SCSI I/Os */ 12131 /* Clean up all driver's outstanding SCSI I/Os */
12179 lpfc_sli_flush_fcp_rings(phba); 12132 lpfc_sli_flush_io_rings(phba);
12180} 12133}
12181 12134
12182/** 12135/**
@@ -12948,12 +12901,8 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
12948 /* Block all SCSI devices' I/Os on the host */ 12901 /* Block all SCSI devices' I/Os on the host */
12949 lpfc_scsi_dev_block(phba); 12902 lpfc_scsi_dev_block(phba);
12950 12903
12951 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 12904 /* Flush all driver's outstanding I/Os as we are to reset */
12952 lpfc_sli_flush_fcp_rings(phba); 12905 lpfc_sli_flush_io_rings(phba);
12953
12954 /* Flush the outstanding NVME IOs if fc4 type enabled. */
12955 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12956 lpfc_sli_flush_nvme_rings(phba);
12957 12906
12958 /* stop all timers */ 12907 /* stop all timers */
12959 lpfc_stop_hba_timers(phba); 12908 lpfc_stop_hba_timers(phba);
@@ -12984,12 +12933,8 @@ lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
12984 /* stop all timers */ 12933 /* stop all timers */
12985 lpfc_stop_hba_timers(phba); 12934 lpfc_stop_hba_timers(phba);
12986 12935
12987 /* Clean up all driver's outstanding SCSI I/Os */ 12936 /* Clean up all driver's outstanding I/Os */
12988 lpfc_sli_flush_fcp_rings(phba); 12937 lpfc_sli_flush_io_rings(phba);
12989
12990 /* Flush the outstanding NVME IOs if fc4 type enabled. */
12991 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12992 lpfc_sli_flush_nvme_rings(phba);
12993} 12938}
12994 12939
12995/** 12940/**
@@ -13530,19 +13475,6 @@ lpfc_exit(void)
13530 pci_unregister_driver(&lpfc_driver); 13475 pci_unregister_driver(&lpfc_driver);
13531 fc_release_transport(lpfc_transport_template); 13476 fc_release_transport(lpfc_transport_template);
13532 fc_release_transport(lpfc_vport_transport_template); 13477 fc_release_transport(lpfc_vport_transport_template);
13533 if (_dump_buf_data) {
13534 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
13535 "_dump_buf_data at 0x%p\n",
13536 (1L << _dump_buf_data_order), _dump_buf_data);
13537 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
13538 }
13539
13540 if (_dump_buf_dif) {
13541 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
13542 "_dump_buf_dif at 0x%p\n",
13543 (1L << _dump_buf_dif_order), _dump_buf_dif);
13544 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
13545 }
13546 idr_destroy(&lpfc_hba_index); 13478 idr_destroy(&lpfc_hba_index);
13547} 13479}
13548 13480
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 66191fa35f63..ae09bb863497 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -72,8 +72,8 @@ lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
72 * lpfc_mem_alloc - create and allocate all PCI and memory pools 72 * lpfc_mem_alloc - create and allocate all PCI and memory pools
73 * @phba: HBA to allocate pools for 73 * @phba: HBA to allocate pools for
74 * 74 *
75 * Description: Creates and allocates PCI pools lpfc_sg_dma_buf_pool, 75 * Description: Creates and allocates PCI pools lpfc_mbuf_pool,
76 * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools 76 * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
77 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. 77 * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
78 * 78 *
79 * Notes: Not interrupt-safe. Must be called with no locks held. If any 79 * Notes: Not interrupt-safe. Must be called with no locks held. If any
@@ -89,36 +89,12 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
89 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 89 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
90 int i; 90 int i;
91 91
92 if (phba->sli_rev == LPFC_SLI_REV4) {
93 /* Calculate alignment */
94 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
95 i = phba->cfg_sg_dma_buf_size;
96 else
97 i = SLI4_PAGE_SIZE;
98
99 phba->lpfc_sg_dma_buf_pool =
100 dma_pool_create("lpfc_sg_dma_buf_pool",
101 &phba->pcidev->dev,
102 phba->cfg_sg_dma_buf_size,
103 i, 0);
104 if (!phba->lpfc_sg_dma_buf_pool)
105 goto fail;
106
107 } else {
108 phba->lpfc_sg_dma_buf_pool =
109 dma_pool_create("lpfc_sg_dma_buf_pool",
110 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
111 align, 0);
112
113 if (!phba->lpfc_sg_dma_buf_pool)
114 goto fail;
115 }
116 92
117 phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev, 93 phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
118 LPFC_BPL_SIZE, 94 LPFC_BPL_SIZE,
119 align, 0); 95 align, 0);
120 if (!phba->lpfc_mbuf_pool) 96 if (!phba->lpfc_mbuf_pool)
121 goto fail_free_dma_buf_pool; 97 goto fail;
122 98
123 pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE, 99 pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE,
124 sizeof(struct lpfc_dmabuf), 100 sizeof(struct lpfc_dmabuf),
@@ -208,9 +184,6 @@ fail_free_drb_pool:
208 fail_free_lpfc_mbuf_pool: 184 fail_free_lpfc_mbuf_pool:
209 dma_pool_destroy(phba->lpfc_mbuf_pool); 185 dma_pool_destroy(phba->lpfc_mbuf_pool);
210 phba->lpfc_mbuf_pool = NULL; 186 phba->lpfc_mbuf_pool = NULL;
211 fail_free_dma_buf_pool:
212 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
213 phba->lpfc_sg_dma_buf_pool = NULL;
214 fail: 187 fail:
215 return -ENOMEM; 188 return -ENOMEM;
216} 189}
@@ -248,25 +221,22 @@ lpfc_mem_free(struct lpfc_hba *phba)
248 221
249 /* Free HBQ pools */ 222 /* Free HBQ pools */
250 lpfc_sli_hbqbuf_free_all(phba); 223 lpfc_sli_hbqbuf_free_all(phba);
251 if (phba->lpfc_nvmet_drb_pool) 224 dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
252 dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
253 phba->lpfc_nvmet_drb_pool = NULL; 225 phba->lpfc_nvmet_drb_pool = NULL;
254 if (phba->lpfc_drb_pool) 226
255 dma_pool_destroy(phba->lpfc_drb_pool); 227 dma_pool_destroy(phba->lpfc_drb_pool);
256 phba->lpfc_drb_pool = NULL; 228 phba->lpfc_drb_pool = NULL;
257 if (phba->lpfc_hrb_pool) 229
258 dma_pool_destroy(phba->lpfc_hrb_pool); 230 dma_pool_destroy(phba->lpfc_hrb_pool);
259 phba->lpfc_hrb_pool = NULL; 231 phba->lpfc_hrb_pool = NULL;
260 if (phba->txrdy_payload_pool) 232
261 dma_pool_destroy(phba->txrdy_payload_pool); 233 dma_pool_destroy(phba->txrdy_payload_pool);
262 phba->txrdy_payload_pool = NULL; 234 phba->txrdy_payload_pool = NULL;
263 235
264 if (phba->lpfc_hbq_pool) 236 dma_pool_destroy(phba->lpfc_hbq_pool);
265 dma_pool_destroy(phba->lpfc_hbq_pool);
266 phba->lpfc_hbq_pool = NULL; 237 phba->lpfc_hbq_pool = NULL;
267 238
268 if (phba->rrq_pool) 239 mempool_destroy(phba->rrq_pool);
269 mempool_destroy(phba->rrq_pool);
270 phba->rrq_pool = NULL; 240 phba->rrq_pool = NULL;
271 241
272 /* Free NLP memory pool */ 242 /* Free NLP memory pool */
@@ -290,10 +260,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
290 dma_pool_destroy(phba->lpfc_mbuf_pool); 260 dma_pool_destroy(phba->lpfc_mbuf_pool);
291 phba->lpfc_mbuf_pool = NULL; 261 phba->lpfc_mbuf_pool = NULL;
292 262
293 /* Free DMA buffer memory pool */
294 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
295 phba->lpfc_sg_dma_buf_pool = NULL;
296
297 /* Free Device Data memory pool */ 263 /* Free Device Data memory pool */
298 if (phba->device_data_mem_pool) { 264 if (phba->device_data_mem_pool) {
299 /* Ensure all objects have been returned to the pool */ 265 /* Ensure all objects have been returned to the pool */
@@ -366,6 +332,13 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
366 /* Free and destroy all the allocated memory pools */ 332 /* Free and destroy all the allocated memory pools */
367 lpfc_mem_free(phba); 333 lpfc_mem_free(phba);
368 334
335 /* Free DMA buffer memory pool */
336 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
337 phba->lpfc_sg_dma_buf_pool = NULL;
338
339 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
340 phba->lpfc_cmd_rsp_buf_pool = NULL;
341
369 /* Free the iocb lookup array */ 342 /* Free the iocb lookup array */
370 kfree(psli->iocbq_lookup); 343 kfree(psli->iocbq_lookup);
371 psli->iocbq_lookup = NULL; 344 psli->iocbq_lookup = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 59252bfca14e..f4b879d25fe9 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -614,7 +614,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
614 } 614 }
615out: 615out:
616 /* If we are authenticated, move to the proper state */ 616 /* If we are authenticated, move to the proper state */
617 if (ndlp->nlp_type & NLP_FCP_TARGET) 617 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
618 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 618 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
619 else 619 else
620 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 620 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -799,9 +799,15 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
799 if (npr->writeXferRdyDis) 799 if (npr->writeXferRdyDis)
800 ndlp->nlp_flag |= NLP_FIRSTBURST; 800 ndlp->nlp_flag |= NLP_FIRSTBURST;
801 } 801 }
802 if (npr->Retry) 802 if (npr->Retry && ndlp->nlp_type &
803 (NLP_FCP_INITIATOR | NLP_FCP_TARGET))
803 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 804 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
804 805
806 if (npr->Retry && phba->nsler &&
807 ndlp->nlp_type & (NLP_NVME_INITIATOR | NLP_NVME_TARGET))
808 ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
809
810
805 /* If this driver is in nvme target mode, set the ndlp's fc4 811 /* If this driver is in nvme target mode, set the ndlp's fc4
806 * type to NVME provided the PRLI response claims NVME FC4 812 * type to NVME provided the PRLI response claims NVME FC4
807 * type. Target mode does not issue gft_id so doesn't get 813 * type. Target mode does not issue gft_id so doesn't get
@@ -885,7 +891,7 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
885 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 891 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
886 "1435 release_rpi SKIP UNREG x%x on " 892 "1435 release_rpi SKIP UNREG x%x on "
887 "NPort x%x deferred x%x flg x%x " 893 "NPort x%x deferred x%x flg x%x "
888 "Data: %p\n", 894 "Data: x%px\n",
889 ndlp->nlp_rpi, ndlp->nlp_DID, 895 ndlp->nlp_rpi, ndlp->nlp_DID,
890 ndlp->nlp_defer_did, 896 ndlp->nlp_defer_did,
891 ndlp->nlp_flag, ndlp); 897 ndlp->nlp_flag, ndlp);
@@ -1661,6 +1667,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1661 LPFC_MBOXQ_t *mb; 1667 LPFC_MBOXQ_t *mb;
1662 LPFC_MBOXQ_t *nextmb; 1668 LPFC_MBOXQ_t *nextmb;
1663 struct lpfc_dmabuf *mp; 1669 struct lpfc_dmabuf *mp;
1670 struct lpfc_nodelist *ns_ndlp;
1664 1671
1665 cmdiocb = (struct lpfc_iocbq *) arg; 1672 cmdiocb = (struct lpfc_iocbq *) arg;
1666 1673
@@ -1693,6 +1700,13 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1693 } 1700 }
1694 spin_unlock_irq(&phba->hbalock); 1701 spin_unlock_irq(&phba->hbalock);
1695 1702
1703 /* software abort if any GID_FT is outstanding */
1704 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
1705 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
1706 if (ns_ndlp && NLP_CHK_NODE_ACT(ns_ndlp))
1707 lpfc_els_abort(phba, ns_ndlp);
1708 }
1709
1696 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 1710 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1697 return ndlp->nlp_state; 1711 return ndlp->nlp_state;
1698} 1712}
@@ -1814,7 +1828,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1814 1828
1815 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1829 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1816 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 1830 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1817 lpfc_issue_els_prli(vport, ndlp, 0); 1831 if (lpfc_issue_els_prli(vport, ndlp, 0)) {
1832 lpfc_issue_els_logo(vport, ndlp, 0);
1833 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1834 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1835 }
1818 } else { 1836 } else {
1819 if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support) 1837 if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support)
1820 phba->targetport->port_id = vport->fc_myDID; 1838 phba->targetport->port_id = vport->fc_myDID;
@@ -2012,6 +2030,11 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2012 if (bf_get_be32(prli_init, nvpr)) 2030 if (bf_get_be32(prli_init, nvpr))
2013 ndlp->nlp_type |= NLP_NVME_INITIATOR; 2031 ndlp->nlp_type |= NLP_NVME_INITIATOR;
2014 2032
2033 if (phba->nsler && bf_get_be32(prli_nsler, nvpr))
2034 ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
2035 else
2036 ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
2037
2015 /* Target driver cannot solicit NVME FB. */ 2038 /* Target driver cannot solicit NVME FB. */
2016 if (bf_get_be32(prli_tgt, nvpr)) { 2039 if (bf_get_be32(prli_tgt, nvpr)) {
2017 /* Complete the nvme target roles. The transport 2040 /* Complete the nvme target roles. The transport
@@ -2891,18 +2914,21 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2891 uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, 2914 uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
2892 uint32_t); 2915 uint32_t);
2893 uint32_t got_ndlp = 0; 2916 uint32_t got_ndlp = 0;
2917 uint32_t data1;
2894 2918
2895 if (lpfc_nlp_get(ndlp)) 2919 if (lpfc_nlp_get(ndlp))
2896 got_ndlp = 1; 2920 got_ndlp = 1;
2897 2921
2898 cur_state = ndlp->nlp_state; 2922 cur_state = ndlp->nlp_state;
2899 2923
2924 data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
2925 ((uint32_t)ndlp->nlp_type));
2900 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ 2926 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
2901 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2927 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2902 "0211 DSM in event x%x on NPort x%x in " 2928 "0211 DSM in event x%x on NPort x%x in "
2903 "state %d rpi x%x Data: x%x x%x\n", 2929 "state %d rpi x%x Data: x%x x%x\n",
2904 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi, 2930 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi,
2905 ndlp->nlp_flag, ndlp->nlp_fc4_type); 2931 ndlp->nlp_flag, data1);
2906 2932
2907 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2933 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2908 "DSM in: evt:%d ste:%d did:x%x", 2934 "DSM in: evt:%d ste:%d did:x%x",
@@ -2913,10 +2939,13 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2913 2939
2914 /* DSM out state <rc> on NPort <nlp_DID> */ 2940 /* DSM out state <rc> on NPort <nlp_DID> */
2915 if (got_ndlp) { 2941 if (got_ndlp) {
2942 data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
2943 ((uint32_t)ndlp->nlp_type));
2916 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2944 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2917 "0212 DSM out state %d on NPort x%x " 2945 "0212 DSM out state %d on NPort x%x "
2918 "rpi x%x Data: x%x\n", 2946 "rpi x%x Data: x%x x%x\n",
2919 rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag); 2947 rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag,
2948 data1);
2920 2949
2921 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2950 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2922 "DSM out: ste:%d did:x%x flg:x%x", 2951 "DSM out: ste:%d did:x%x flg:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 946642cee3df..a227e36cbdc2 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -247,7 +247,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
247 247
248 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 248 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
249 "6073 Binding %s HdwQueue %d (cpu %d) to " 249 "6073 Binding %s HdwQueue %d (cpu %d) to "
250 "hdw_queue %d qhandle %p\n", str, 250 "hdw_queue %d qhandle x%px\n", str,
251 qidx, qhandle->cpu_id, qhandle->index, qhandle); 251 qidx, qhandle->cpu_id, qhandle->index, qhandle);
252 *handle = (void *)qhandle; 252 *handle = (void *)qhandle;
253 return 0; 253 return 0;
@@ -282,7 +282,7 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
282 vport = lport->vport; 282 vport = lport->vport;
283 283
284 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 284 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
285 "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n", 285 "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
286 lport, qidx, handle); 286 lport, qidx, handle);
287 kfree(handle); 287 kfree(handle);
288} 288}
@@ -293,7 +293,7 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
293 struct lpfc_nvme_lport *lport = localport->private; 293 struct lpfc_nvme_lport *lport = localport->private;
294 294
295 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME, 295 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
296 "6173 localport %p delete complete\n", 296 "6173 localport x%px delete complete\n",
297 lport); 297 lport);
298 298
299 /* release any threads waiting for the unreg to complete */ 299 /* release any threads waiting for the unreg to complete */
@@ -332,7 +332,7 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
332 * calling state machine to remove the node. 332 * calling state machine to remove the node.
333 */ 333 */
334 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 334 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
335 "6146 remoteport delete of remoteport %p\n", 335 "6146 remoteport delete of remoteport x%px\n",
336 remoteport); 336 remoteport);
337 spin_lock_irq(&vport->phba->hbalock); 337 spin_lock_irq(&vport->phba->hbalock);
338 338
@@ -383,8 +383,8 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
383 ndlp = (struct lpfc_nodelist *)cmdwqe->context1; 383 ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
384 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 384 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
385 "6047 nvme cmpl Enter " 385 "6047 nvme cmpl Enter "
386 "Data %p DID %x Xri: %x status %x reason x%x cmd:%p " 386 "Data %px DID %x Xri: %x status %x reason x%x "
387 "lsreg:%p bmp:%p ndlp:%p\n", 387 "cmd:x%px lsreg:x%px bmp:x%px ndlp:x%px\n",
388 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, 388 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
389 cmdwqe->sli4_xritag, status, 389 cmdwqe->sli4_xritag, status,
390 (wcqe->parameter & 0xffff), 390 (wcqe->parameter & 0xffff),
@@ -404,7 +404,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
404 else 404 else
405 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 405 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
406 "6046 nvme cmpl without done call back? " 406 "6046 nvme cmpl without done call back? "
407 "Data %p DID %x Xri: %x status %x\n", 407 "Data %px DID %x Xri: %x status %x\n",
408 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, 408 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
409 cmdwqe->sli4_xritag, status); 409 cmdwqe->sli4_xritag, status);
410 if (ndlp) { 410 if (ndlp) {
@@ -436,6 +436,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
436 return 1; 436 return 1;
437 437
438 wqe = &genwqe->wqe; 438 wqe = &genwqe->wqe;
439 /* Initialize only 64 bytes */
439 memset(wqe, 0, sizeof(union lpfc_wqe)); 440 memset(wqe, 0, sizeof(union lpfc_wqe));
440 441
441 genwqe->context3 = (uint8_t *)bmp; 442 genwqe->context3 = (uint8_t *)bmp;
@@ -516,7 +517,8 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
516 /* Issue GEN REQ WQE for NPORT <did> */ 517 /* Issue GEN REQ WQE for NPORT <did> */
517 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 518 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
518 "6050 Issue GEN REQ WQE to NPORT x%x " 519 "6050 Issue GEN REQ WQE to NPORT x%x "
519 "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n", 520 "Data: x%x x%x wq:x%px lsreq:x%px bmp:x%px "
521 "xmit:%d 1st:%d\n",
520 ndlp->nlp_DID, genwqe->iotag, 522 ndlp->nlp_DID, genwqe->iotag,
521 vport->port_state, 523 vport->port_state,
522 genwqe, pnvme_lsreq, bmp, xmit_len, first_len); 524 genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
@@ -594,7 +596,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
594 ndlp = rport->ndlp; 596 ndlp = rport->ndlp;
595 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 597 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
596 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, 598 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
597 "6051 Remoteport %p, rport has invalid ndlp. " 599 "6051 Remoteport x%px, rport has invalid ndlp. "
598 "Failing LS Req\n", pnvme_rport); 600 "Failing LS Req\n", pnvme_rport);
599 return -ENODEV; 601 return -ENODEV;
600 } 602 }
@@ -646,10 +648,10 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
646 648
647 /* Expand print to include key fields. */ 649 /* Expand print to include key fields. */
648 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 650 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
649 "6149 Issue LS Req to DID 0x%06x lport %p, rport %p " 651 "6149 Issue LS Req to DID 0x%06x lport x%px, "
650 "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n", 652 "rport x%px lsreq x%px rqstlen:%d rsplen:%d "
651 ndlp->nlp_DID, 653 "%pad %pad\n",
652 pnvme_lport, pnvme_rport, 654 ndlp->nlp_DID, pnvme_lport, pnvme_rport,
653 pnvme_lsreq, pnvme_lsreq->rqstlen, 655 pnvme_lsreq, pnvme_lsreq->rqstlen,
654 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, 656 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
655 &pnvme_lsreq->rspdma); 657 &pnvme_lsreq->rspdma);
@@ -665,8 +667,8 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
665 if (ret != WQE_SUCCESS) { 667 if (ret != WQE_SUCCESS) {
666 atomic_inc(&lport->xmt_ls_err); 668 atomic_inc(&lport->xmt_ls_err);
667 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 669 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
668 "6052 EXIT. issue ls wqe failed lport %p, " 670 "6052 EXIT. issue ls wqe failed lport x%px, "
669 "rport %p lsreq%p Status %x DID %x\n", 671 "rport x%px lsreq x%px Status %x DID %x\n",
670 pnvme_lport, pnvme_rport, pnvme_lsreq, 672 pnvme_lport, pnvme_rport, pnvme_lsreq,
671 ret, ndlp->nlp_DID); 673 ret, ndlp->nlp_DID);
672 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys); 674 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
@@ -723,7 +725,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
723 725
724 /* Expand print to include key fields. */ 726 /* Expand print to include key fields. */
725 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, 727 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
726 "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d " 728 "6040 ENTER. lport x%px, rport x%px lsreq x%px rqstlen:%d "
727 "rsplen:%d %pad %pad\n", 729 "rsplen:%d %pad %pad\n",
728 pnvme_lport, pnvme_rport, 730 pnvme_lport, pnvme_rport,
729 pnvme_lsreq, pnvme_lsreq->rqstlen, 731 pnvme_lsreq, pnvme_lsreq->rqstlen,
@@ -984,8 +986,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
984 if (!lpfc_ncmd->nvmeCmd) { 986 if (!lpfc_ncmd->nvmeCmd) {
985 spin_unlock(&lpfc_ncmd->buf_lock); 987 spin_unlock(&lpfc_ncmd->buf_lock);
986 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, 988 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
987 "6066 Missing cmpl ptrs: lpfc_ncmd %p, " 989 "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
988 "nvmeCmd %p\n", 990 "nvmeCmd x%px\n",
989 lpfc_ncmd, lpfc_ncmd->nvmeCmd); 991 lpfc_ncmd, lpfc_ncmd->nvmeCmd);
990 992
991 /* Release the lpfc_ncmd regardless of the missing elements. */ 993 /* Release the lpfc_ncmd regardless of the missing elements. */
@@ -998,9 +1000,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
998 idx = lpfc_ncmd->cur_iocbq.hba_wqidx; 1000 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
999 phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++; 1001 phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
1000 1002
1001 if (vport->localport) { 1003 if (unlikely(status && vport->localport)) {
1002 lport = (struct lpfc_nvme_lport *)vport->localport->private; 1004 lport = (struct lpfc_nvme_lport *)vport->localport->private;
1003 if (lport && status) { 1005 if (lport) {
1004 if (bf_get(lpfc_wcqe_c_xb, wcqe)) 1006 if (bf_get(lpfc_wcqe_c_xb, wcqe))
1005 atomic_inc(&lport->cmpl_fcp_xb); 1007 atomic_inc(&lport->cmpl_fcp_xb);
1006 atomic_inc(&lport->cmpl_fcp_err); 1008 atomic_inc(&lport->cmpl_fcp_err);
@@ -1100,8 +1102,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
1100 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED) 1102 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1101 lpfc_printf_vlog(vport, KERN_INFO, 1103 lpfc_printf_vlog(vport, KERN_INFO,
1102 LOG_NVME_IOERR, 1104 LOG_NVME_IOERR,
1103 "6032 Delay Aborted cmd %p " 1105 "6032 Delay Aborted cmd x%px "
1104 "nvme cmd %p, xri x%x, " 1106 "nvme cmd x%px, xri x%x, "
1105 "xb %d\n", 1107 "xb %d\n",
1106 lpfc_ncmd, nCmd, 1108 lpfc_ncmd, nCmd,
1107 lpfc_ncmd->cur_iocbq.sli4_xritag, 1109 lpfc_ncmd->cur_iocbq.sli4_xritag,
@@ -1140,7 +1142,7 @@ out_err:
1140 phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme; 1142 phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
1141 lpfc_nvme_ktime(phba, lpfc_ncmd); 1143 lpfc_nvme_ktime(phba, lpfc_ncmd);
1142 } 1144 }
1143 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { 1145 if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) {
1144 uint32_t cpu; 1146 uint32_t cpu;
1145 idx = lpfc_ncmd->cur_iocbq.hba_wqidx; 1147 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
1146 cpu = raw_smp_processor_id(); 1148 cpu = raw_smp_processor_id();
@@ -1253,6 +1255,9 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1253 sizeof(uint32_t) * 8); 1255 sizeof(uint32_t) * 8);
1254 cstat->control_requests++; 1256 cstat->control_requests++;
1255 } 1257 }
1258
1259 if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
1260 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
1256 /* 1261 /*
1257 * Finish initializing those WQE fields that are independent 1262 * Finish initializing those WQE fields that are independent
1258 * of the nvme_cmnd request_buffer 1263 * of the nvme_cmnd request_buffer
@@ -1304,14 +1309,16 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1304 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; 1309 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1305 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe; 1310 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1306 struct sli4_sge *sgl = lpfc_ncmd->dma_sgl; 1311 struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
1312 struct sli4_hybrid_sgl *sgl_xtra = NULL;
1307 struct scatterlist *data_sg; 1313 struct scatterlist *data_sg;
1308 struct sli4_sge *first_data_sgl; 1314 struct sli4_sge *first_data_sgl;
1309 struct ulp_bde64 *bde; 1315 struct ulp_bde64 *bde;
1310 dma_addr_t physaddr; 1316 dma_addr_t physaddr = 0;
1311 uint32_t num_bde = 0; 1317 uint32_t num_bde = 0;
1312 uint32_t dma_len; 1318 uint32_t dma_len = 0;
1313 uint32_t dma_offset = 0; 1319 uint32_t dma_offset = 0;
1314 int nseg, i; 1320 int nseg, i, j;
1321 bool lsp_just_set = false;
1315 1322
1316 /* Fix up the command and response DMA stuff. */ 1323 /* Fix up the command and response DMA stuff. */
1317 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd); 1324 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
@@ -1348,6 +1355,9 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1348 */ 1355 */
1349 nseg = nCmd->sg_cnt; 1356 nseg = nCmd->sg_cnt;
1350 data_sg = nCmd->first_sgl; 1357 data_sg = nCmd->first_sgl;
1358
1359 /* for tracking the segment boundaries */
1360 j = 2;
1351 for (i = 0; i < nseg; i++) { 1361 for (i = 0; i < nseg; i++) {
1352 if (data_sg == NULL) { 1362 if (data_sg == NULL) {
1353 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1363 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1356,23 +1366,76 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1356 lpfc_ncmd->seg_cnt = 0; 1366 lpfc_ncmd->seg_cnt = 0;
1357 return 1; 1367 return 1;
1358 } 1368 }
1359 physaddr = data_sg->dma_address; 1369
1360 dma_len = data_sg->length; 1370 sgl->word2 = 0;
1361 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 1371 if ((num_bde + 1) == nseg) {
1362 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1363 sgl->word2 = le32_to_cpu(sgl->word2);
1364 if ((num_bde + 1) == nseg)
1365 bf_set(lpfc_sli4_sge_last, sgl, 1); 1372 bf_set(lpfc_sli4_sge_last, sgl, 1);
1366 else 1373 bf_set(lpfc_sli4_sge_type, sgl,
1374 LPFC_SGE_TYPE_DATA);
1375 } else {
1367 bf_set(lpfc_sli4_sge_last, sgl, 0); 1376 bf_set(lpfc_sli4_sge_last, sgl, 0);
1368 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 1377
1369 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 1378 /* expand the segment */
1370 sgl->word2 = cpu_to_le32(sgl->word2); 1379 if (!lsp_just_set &&
1371 sgl->sge_len = cpu_to_le32(dma_len); 1380 !((j + 1) % phba->border_sge_num) &&
1372 1381 ((nseg - 1) != i)) {
1373 dma_offset += dma_len; 1382 /* set LSP type */
1374 data_sg = sg_next(data_sg); 1383 bf_set(lpfc_sli4_sge_type, sgl,
1375 sgl++; 1384 LPFC_SGE_TYPE_LSP);
1385
1386 sgl_xtra = lpfc_get_sgl_per_hdwq(
1387 phba, lpfc_ncmd);
1388
1389 if (unlikely(!sgl_xtra)) {
1390 lpfc_ncmd->seg_cnt = 0;
1391 return 1;
1392 }
1393 sgl->addr_lo = cpu_to_le32(putPaddrLow(
1394 sgl_xtra->dma_phys_sgl));
1395 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
1396 sgl_xtra->dma_phys_sgl));
1397
1398 } else {
1399 bf_set(lpfc_sli4_sge_type, sgl,
1400 LPFC_SGE_TYPE_DATA);
1401 }
1402 }
1403
1404 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
1405 LPFC_SGE_TYPE_LSP)) {
1406 if ((nseg - 1) == i)
1407 bf_set(lpfc_sli4_sge_last, sgl, 1);
1408
1409 physaddr = data_sg->dma_address;
1410 dma_len = data_sg->length;
1411 sgl->addr_lo = cpu_to_le32(
1412 putPaddrLow(physaddr));
1413 sgl->addr_hi = cpu_to_le32(
1414 putPaddrHigh(physaddr));
1415
1416 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1417 sgl->word2 = cpu_to_le32(sgl->word2);
1418 sgl->sge_len = cpu_to_le32(dma_len);
1419
1420 dma_offset += dma_len;
1421 data_sg = sg_next(data_sg);
1422
1423 sgl++;
1424
1425 lsp_just_set = false;
1426 } else {
1427 sgl->word2 = cpu_to_le32(sgl->word2);
1428
1429 sgl->sge_len = cpu_to_le32(
1430 phba->cfg_sg_dma_buf_size);
1431
1432 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
1433 i = i - 1;
1434
1435 lsp_just_set = true;
1436 }
1437
1438 j++;
1376 } 1439 }
1377 if (phba->cfg_enable_pbde) { 1440 if (phba->cfg_enable_pbde) {
1378 /* Use PBDE support for first SGL only, offset == 0 */ 1441 /* Use PBDE support for first SGL only, offset == 0 */
@@ -1474,7 +1537,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1474 goto out_fail; 1537 goto out_fail;
1475 } 1538 }
1476 1539
1477 if (vport->load_flag & FC_UNLOADING) { 1540 if (unlikely(vport->load_flag & FC_UNLOADING)) {
1478 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1541 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1479 "6124 Fail IO, Driver unload\n"); 1542 "6124 Fail IO, Driver unload\n");
1480 atomic_inc(&lport->xmt_fcp_err); 1543 atomic_inc(&lport->xmt_fcp_err);
@@ -1505,8 +1568,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1505 ndlp = rport->ndlp; 1568 ndlp = rport->ndlp;
1506 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1569 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1507 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, 1570 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1508 "6053 Fail IO, ndlp not ready: rport %p " 1571 "6053 Busy IO, ndlp not ready: rport x%px "
1509 "ndlp %p, DID x%06x\n", 1572 "ndlp x%px, DID x%06x\n",
1510 rport, ndlp, pnvme_rport->port_id); 1573 rport, ndlp, pnvme_rport->port_id);
1511 atomic_inc(&lport->xmt_fcp_err); 1574 atomic_inc(&lport->xmt_fcp_err);
1512 ret = -EBUSY; 1575 ret = -EBUSY;
@@ -1758,7 +1821,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1758 /* Announce entry to new IO submit field. */ 1821 /* Announce entry to new IO submit field. */
1759 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, 1822 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1760 "6002 Abort Request to rport DID x%06x " 1823 "6002 Abort Request to rport DID x%06x "
1761 "for nvme_fc_req %p\n", 1824 "for nvme_fc_req x%px\n",
1762 pnvme_rport->port_id, 1825 pnvme_rport->port_id,
1763 pnvme_fcreq); 1826 pnvme_fcreq);
1764 1827
@@ -1767,7 +1830,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1767 */ 1830 */
1768 spin_lock_irqsave(&phba->hbalock, flags); 1831 spin_lock_irqsave(&phba->hbalock, flags);
1769 /* driver queued commands are in process of being flushed */ 1832 /* driver queued commands are in process of being flushed */
1770 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { 1833 if (phba->hba_flag & HBA_IOQ_FLUSH) {
1771 spin_unlock_irqrestore(&phba->hbalock, flags); 1834 spin_unlock_irqrestore(&phba->hbalock, flags);
1772 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 1835 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1773 "6139 Driver in reset cleanup - flushing " 1836 "6139 Driver in reset cleanup - flushing "
@@ -1805,8 +1868,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1805 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { 1868 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1806 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 1869 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1807 "6143 NVME req mismatch: " 1870 "6143 NVME req mismatch: "
1808 "lpfc_nbuf %p nvmeCmd %p, " 1871 "lpfc_nbuf x%px nvmeCmd x%px, "
1809 "pnvme_fcreq %p. Skipping Abort xri x%x\n", 1872 "pnvme_fcreq x%px. Skipping Abort xri x%x\n",
1810 lpfc_nbuf, lpfc_nbuf->nvmeCmd, 1873 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1811 pnvme_fcreq, nvmereq_wqe->sli4_xritag); 1874 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1812 goto out_unlock; 1875 goto out_unlock;
@@ -1815,7 +1878,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1815 /* Don't abort IOs no longer on the pending queue. */ 1878 /* Don't abort IOs no longer on the pending queue. */
1816 if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { 1879 if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1817 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 1880 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1818 "6142 NVME IO req %p not queued - skipping " 1881 "6142 NVME IO req x%px not queued - skipping "
1819 "abort req xri x%x\n", 1882 "abort req xri x%x\n",
1820 pnvme_fcreq, nvmereq_wqe->sli4_xritag); 1883 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1821 goto out_unlock; 1884 goto out_unlock;
@@ -1830,8 +1893,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1830 if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) { 1893 if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1831 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 1894 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1832 "6144 Outstanding NVME I/O Abort Request " 1895 "6144 Outstanding NVME I/O Abort Request "
1833 "still pending on nvme_fcreq %p, " 1896 "still pending on nvme_fcreq x%px, "
1834 "lpfc_ncmd %p xri x%x\n", 1897 "lpfc_ncmd %px xri x%x\n",
1835 pnvme_fcreq, lpfc_nbuf, 1898 pnvme_fcreq, lpfc_nbuf,
1836 nvmereq_wqe->sli4_xritag); 1899 nvmereq_wqe->sli4_xritag);
1837 goto out_unlock; 1900 goto out_unlock;
@@ -1841,7 +1904,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1841 if (!abts_buf) { 1904 if (!abts_buf) {
1842 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 1905 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1843 "6136 No available abort wqes. Skipping " 1906 "6136 No available abort wqes. Skipping "
1844 "Abts req for nvme_fcreq %p xri x%x\n", 1907 "Abts req for nvme_fcreq x%px xri x%x\n",
1845 pnvme_fcreq, nvmereq_wqe->sli4_xritag); 1908 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1846 goto out_unlock; 1909 goto out_unlock;
1847 } 1910 }
@@ -1855,7 +1918,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1855 /* WQEs are reused. Clear stale data and set key fields to 1918 /* WQEs are reused. Clear stale data and set key fields to
1856 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. 1919 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
1857 */ 1920 */
1858 memset(abts_wqe, 0, sizeof(union lpfc_wqe)); 1921 memset(abts_wqe, 0, sizeof(*abts_wqe));
1859 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); 1922 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1860 1923
1861 /* word 7 */ 1924 /* word 7 */
@@ -1892,7 +1955,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1892 if (ret_val) { 1955 if (ret_val) {
1893 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, 1956 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1894 "6137 Failed abts issue_wqe with status x%x " 1957 "6137 Failed abts issue_wqe with status x%x "
1895 "for nvme_fcreq %p.\n", 1958 "for nvme_fcreq x%px.\n",
1896 ret_val, pnvme_fcreq); 1959 ret_val, pnvme_fcreq);
1897 lpfc_sli_release_iocbq(phba, abts_buf); 1960 lpfc_sli_release_iocbq(phba, abts_buf);
1898 return; 1961 return;
@@ -1982,7 +2045,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1982 sgl->word2 = cpu_to_le32(sgl->word2); 2045 sgl->word2 = cpu_to_le32(sgl->word2);
1983 /* Fill in word 3 / sgl_len during cmd submission */ 2046 /* Fill in word 3 / sgl_len during cmd submission */
1984 2047
1985 /* Initialize WQE */ 2048 /* Initialize 64 bytes only */
1986 memset(wqe, 0, sizeof(union lpfc_wqe)); 2049 memset(wqe, 0, sizeof(union lpfc_wqe));
1987 2050
1988 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 2051 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
@@ -2028,11 +2091,11 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
2028 lpfc_ncmd->cur_iocbq.sli4_xritag, 2091 lpfc_ncmd->cur_iocbq.sli4_xritag,
2029 lpfc_ncmd->cur_iocbq.iotag); 2092 lpfc_ncmd->cur_iocbq.iotag);
2030 2093
2031 spin_lock_irqsave(&qp->abts_nvme_buf_list_lock, iflag); 2094 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
2032 list_add_tail(&lpfc_ncmd->list, 2095 list_add_tail(&lpfc_ncmd->list,
2033 &qp->lpfc_abts_nvme_buf_list); 2096 &qp->lpfc_abts_io_buf_list);
2034 qp->abts_nvme_io_bufs++; 2097 qp->abts_nvme_io_bufs++;
2035 spin_unlock_irqrestore(&qp->abts_nvme_buf_list_lock, iflag); 2098 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
2036 } else 2099 } else
2037 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp); 2100 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
2038} 2101}
@@ -2095,8 +2158,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2095 if (!ret) { 2158 if (!ret) {
2096 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, 2159 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2097 "6005 Successfully registered local " 2160 "6005 Successfully registered local "
2098 "NVME port num %d, localP %p, private %p, " 2161 "NVME port num %d, localP x%px, private "
2099 "sg_seg %d\n", 2162 "x%px, sg_seg %d\n",
2100 localport->port_num, localport, 2163 localport->port_num, localport,
2101 localport->private, 2164 localport->private,
2102 lpfc_nvme_template.max_sgl_segments); 2165 lpfc_nvme_template.max_sgl_segments);
@@ -2157,14 +2220,14 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2157 if (unlikely(!ret)) { 2220 if (unlikely(!ret)) {
2158 pending = 0; 2221 pending = 0;
2159 for (i = 0; i < phba->cfg_hdw_queue; i++) { 2222 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2160 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; 2223 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2161 if (!pring) 2224 if (!pring)
2162 continue; 2225 continue;
2163 if (pring->txcmplq_cnt) 2226 if (pring->txcmplq_cnt)
2164 pending += pring->txcmplq_cnt; 2227 pending += pring->txcmplq_cnt;
2165 } 2228 }
2166 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, 2229 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
2167 "6176 Lport %p Localport %p wait " 2230 "6176 Lport x%px Localport x%px wait "
2168 "timed out. Pending %d. Renewing.\n", 2231 "timed out. Pending %d. Renewing.\n",
2169 lport, vport->localport, pending); 2232 lport, vport->localport, pending);
2170 continue; 2233 continue;
@@ -2172,7 +2235,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2172 break; 2235 break;
2173 } 2236 }
2174 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 2237 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2175 "6177 Lport %p Localport %p Complete Success\n", 2238 "6177 Lport x%px Localport x%px Complete Success\n",
2176 lport, vport->localport); 2239 lport, vport->localport);
2177} 2240}
2178#endif 2241#endif
@@ -2203,7 +2266,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2203 lport = (struct lpfc_nvme_lport *)localport->private; 2266 lport = (struct lpfc_nvme_lport *)localport->private;
2204 2267
2205 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 2268 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2206 "6011 Destroying NVME localport %p\n", 2269 "6011 Destroying NVME localport x%px\n",
2207 localport); 2270 localport);
2208 2271
2209 /* lport's rport list is clear. Unregister 2272 /* lport's rport list is clear. Unregister
@@ -2253,12 +2316,12 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
2253 lport = (struct lpfc_nvme_lport *)localport->private; 2316 lport = (struct lpfc_nvme_lport *)localport->private;
2254 if (!lport) { 2317 if (!lport) {
2255 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, 2318 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2256 "6171 Update NVME fail. localP %p, No lport\n", 2319 "6171 Update NVME fail. localP x%px, No lport\n",
2257 localport); 2320 localport);
2258 return; 2321 return;
2259 } 2322 }
2260 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 2323 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2261 "6012 Update NVME lport %p did x%x\n", 2324 "6012 Update NVME lport x%px did x%x\n",
2262 localport, vport->fc_myDID); 2325 localport, vport->fc_myDID);
2263 2326
2264 localport->port_id = vport->fc_myDID; 2327 localport->port_id = vport->fc_myDID;
@@ -2268,7 +2331,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
2268 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR; 2331 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2269 2332
2270 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2333 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2271 "6030 bound lport %p to DID x%06x\n", 2334 "6030 bound lport x%px to DID x%06x\n",
2272 lport, localport->port_id); 2335 lport, localport->port_id);
2273#endif 2336#endif
2274} 2337}
@@ -2317,9 +2380,13 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2317 2380
2318 spin_lock_irq(&vport->phba->hbalock); 2381 spin_lock_irq(&vport->phba->hbalock);
2319 oldrport = lpfc_ndlp_get_nrport(ndlp); 2382 oldrport = lpfc_ndlp_get_nrport(ndlp);
2320 spin_unlock_irq(&vport->phba->hbalock); 2383 if (oldrport) {
2321 if (!oldrport) 2384 prev_ndlp = oldrport->ndlp;
2385 spin_unlock_irq(&vport->phba->hbalock);
2386 } else {
2387 spin_unlock_irq(&vport->phba->hbalock);
2322 lpfc_nlp_get(ndlp); 2388 lpfc_nlp_get(ndlp);
2389 }
2323 2390
2324 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port); 2391 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2325 if (!ret) { 2392 if (!ret) {
@@ -2338,25 +2405,34 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2338 /* New remoteport record does not guarantee valid 2405 /* New remoteport record does not guarantee valid
2339 * host private memory area. 2406 * host private memory area.
2340 */ 2407 */
2341 prev_ndlp = oldrport->ndlp;
2342 if (oldrport == remote_port->private) { 2408 if (oldrport == remote_port->private) {
2343 /* Same remoteport - ndlp should match. 2409 /* Same remoteport - ndlp should match.
2344 * Just reuse. 2410 * Just reuse.
2345 */ 2411 */
2346 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 2412 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2347 LOG_NVME_DISC, 2413 LOG_NVME_DISC,
2348 "6014 Rebinding lport to " 2414 "6014 Rebind lport to current "
2349 "remoteport %p wwpn 0x%llx, " 2415 "remoteport x%px wwpn 0x%llx, "
2350 "Data: x%x x%x %p %p x%x x%06x\n", 2416 "Data: x%x x%x x%px x%px x%x "
2417 " x%06x\n",
2351 remote_port, 2418 remote_port,
2352 remote_port->port_name, 2419 remote_port->port_name,
2353 remote_port->port_id, 2420 remote_port->port_id,
2354 remote_port->port_role, 2421 remote_port->port_role,
2355 prev_ndlp, 2422 oldrport->ndlp,
2356 ndlp, 2423 ndlp,
2357 ndlp->nlp_type, 2424 ndlp->nlp_type,
2358 ndlp->nlp_DID); 2425 ndlp->nlp_DID);
2359 return 0; 2426
2427 /* It's a complete rebind only if the driver
2428 * is registering with the same ndlp. Otherwise
2429 * the driver likely executed a node swap
2430 * prior to this registration and the ndlp to
2431 * remoteport binding needs to be redone.
2432 */
2433 if (prev_ndlp == ndlp)
2434 return 0;
2435
2360 } 2436 }
2361 2437
2362 /* Sever the ndlp<->rport association 2438 /* Sever the ndlp<->rport association
@@ -2390,10 +2466,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2390 spin_unlock_irq(&vport->phba->hbalock); 2466 spin_unlock_irq(&vport->phba->hbalock);
2391 lpfc_printf_vlog(vport, KERN_INFO, 2467 lpfc_printf_vlog(vport, KERN_INFO,
2392 LOG_NVME_DISC | LOG_NODE, 2468 LOG_NVME_DISC | LOG_NODE,
2393 "6022 Binding new rport to " 2469 "6022 Bind lport x%px to remoteport x%px "
2394 "lport %p Remoteport %p rport %p WWNN 0x%llx, " 2470 "rport x%px WWNN 0x%llx, "
2395 "Rport WWPN 0x%llx DID " 2471 "Rport WWPN 0x%llx DID "
2396 "x%06x Role x%x, ndlp %p prev_ndlp %p\n", 2472 "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
2397 lport, remote_port, rport, 2473 lport, remote_port, rport,
2398 rpinfo.node_name, rpinfo.port_name, 2474 rpinfo.node_name, rpinfo.port_name,
2399 rpinfo.port_id, rpinfo.port_role, 2475 rpinfo.port_id, rpinfo.port_role,
@@ -2423,20 +2499,23 @@ void
2423lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2499lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2424{ 2500{
2425#if (IS_ENABLED(CONFIG_NVME_FC)) 2501#if (IS_ENABLED(CONFIG_NVME_FC))
2426 struct lpfc_nvme_rport *rport; 2502 struct lpfc_nvme_rport *nrport;
2427 struct nvme_fc_remote_port *remoteport; 2503 struct nvme_fc_remote_port *remoteport = NULL;
2428 2504
2429 rport = ndlp->nrport; 2505 spin_lock_irq(&vport->phba->hbalock);
2506 nrport = lpfc_ndlp_get_nrport(ndlp);
2507 if (nrport)
2508 remoteport = nrport->remoteport;
2509 spin_unlock_irq(&vport->phba->hbalock);
2430 2510
2431 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2511 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2432 "6170 Rescan NPort DID x%06x type x%x " 2512 "6170 Rescan NPort DID x%06x type x%x "
2433 "state x%x rport %p\n", 2513 "state x%x nrport x%px remoteport x%px\n",
2434 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, rport); 2514 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
2435 if (!rport) 2515 nrport, remoteport);
2436 goto input_err; 2516
2437 remoteport = rport->remoteport; 2517 if (!nrport || !remoteport)
2438 if (!remoteport) 2518 goto rescan_exit;
2439 goto input_err;
2440 2519
2441 /* Only rescan if we are an NVME target in the MAPPED state */ 2520 /* Only rescan if we are an NVME target in the MAPPED state */
2442 if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY && 2521 if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
@@ -2449,10 +2528,10 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2449 ndlp->nlp_DID, remoteport->port_state); 2528 ndlp->nlp_DID, remoteport->port_state);
2450 } 2529 }
2451 return; 2530 return;
2452input_err: 2531 rescan_exit:
2453 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 2532 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2454 "6169 State error: lport %p, rport%p FCID x%06x\n", 2533 "6169 Skip NVME Rport Rescan, NVME remoteport "
2455 vport->localport, ndlp->rport, ndlp->nlp_DID); 2534 "unregistered\n");
2456#endif 2535#endif
2457} 2536}
2458 2537
@@ -2499,7 +2578,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2499 goto input_err; 2578 goto input_err;
2500 2579
2501 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2580 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2502 "6033 Unreg nvme remoteport %p, portname x%llx, " 2581 "6033 Unreg nvme remoteport x%px, portname x%llx, "
2503 "port_id x%06x, portstate x%x port type x%x\n", 2582 "port_id x%06x, portstate x%x port type x%x\n",
2504 remoteport, remoteport->port_name, 2583 remoteport, remoteport->port_name,
2505 remoteport->port_id, remoteport->port_state, 2584 remoteport->port_id, remoteport->port_state,
@@ -2537,7 +2616,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2537 input_err: 2616 input_err:
2538#endif 2617#endif
2539 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, 2618 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2540 "6168 State error: lport %p, rport%p FCID x%06x\n", 2619 "6168 State error: lport x%px, rport x%px FCID x%06x\n",
2541 vport->localport, ndlp->rport, ndlp->nlp_DID); 2620 vport->localport, ndlp->rport, ndlp->nlp_DID);
2542} 2621}
2543 2622
@@ -2545,6 +2624,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2545 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort 2624 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2546 * @phba: pointer to lpfc hba data structure. 2625 * @phba: pointer to lpfc hba data structure.
2547 * @axri: pointer to the fcp xri abort wcqe structure. 2626 * @axri: pointer to the fcp xri abort wcqe structure.
2627 * @lpfc_ncmd: The nvme job structure for the request being aborted.
2548 * 2628 *
2549 * This routine is invoked by the worker thread to process a SLI4 fast-path 2629 * This routine is invoked by the worker thread to process a SLI4 fast-path
2550 * NVME aborted xri. Aborted NVME IO commands are completed to the transport 2630 * NVME aborted xri. Aborted NVME IO commands are completed to the transport
@@ -2552,59 +2632,33 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2552 **/ 2632 **/
2553void 2633void
2554lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, 2634lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2555 struct sli4_wcqe_xri_aborted *axri, int idx) 2635 struct sli4_wcqe_xri_aborted *axri,
2636 struct lpfc_io_buf *lpfc_ncmd)
2556{ 2637{
2557 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 2638 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2558 struct lpfc_io_buf *lpfc_ncmd, *next_lpfc_ncmd;
2559 struct nvmefc_fcp_req *nvme_cmd = NULL; 2639 struct nvmefc_fcp_req *nvme_cmd = NULL;
2560 struct lpfc_nodelist *ndlp; 2640 struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
2561 struct lpfc_sli4_hdw_queue *qp;
2562 unsigned long iflag = 0;
2563 2641
2564 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
2565 return;
2566 qp = &phba->sli4_hba.hdwq[idx];
2567 spin_lock_irqsave(&phba->hbalock, iflag);
2568 spin_lock(&qp->abts_nvme_buf_list_lock);
2569 list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
2570 &qp->lpfc_abts_nvme_buf_list, list) {
2571 if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
2572 list_del_init(&lpfc_ncmd->list);
2573 qp->abts_nvme_io_bufs--;
2574 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2575 lpfc_ncmd->status = IOSTAT_SUCCESS;
2576 spin_unlock(&qp->abts_nvme_buf_list_lock);
2577
2578 spin_unlock_irqrestore(&phba->hbalock, iflag);
2579 ndlp = lpfc_ncmd->ndlp;
2580 if (ndlp)
2581 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2582
2583 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2584 "6311 nvme_cmd %p xri x%x tag x%x "
2585 "abort complete and xri released\n",
2586 lpfc_ncmd->nvmeCmd, xri,
2587 lpfc_ncmd->cur_iocbq.iotag);
2588
2589 /* Aborted NVME commands are required to not complete
2590 * before the abort exchange command fully completes.
2591 * Once completed, it is available via the put list.
2592 */
2593 if (lpfc_ncmd->nvmeCmd) {
2594 nvme_cmd = lpfc_ncmd->nvmeCmd;
2595 nvme_cmd->done(nvme_cmd);
2596 lpfc_ncmd->nvmeCmd = NULL;
2597 }
2598 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2599 return;
2600 }
2601 }
2602 spin_unlock(&qp->abts_nvme_buf_list_lock);
2603 spin_unlock_irqrestore(&phba->hbalock, iflag);
2604 2642
2605 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2643 if (ndlp)
2606 "6312 XRI Aborted xri x%x not found\n", xri); 2644 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2607 2645
2646 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2647 "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
2648 "xri released\n",
2649 lpfc_ncmd->nvmeCmd, xri,
2650 lpfc_ncmd->cur_iocbq.iotag);
2651
2652 /* Aborted NVME commands are required to not complete
2653 * before the abort exchange command fully completes.
2654 * Once completed, it is available via the put list.
2655 */
2656 if (lpfc_ncmd->nvmeCmd) {
2657 nvme_cmd = lpfc_ncmd->nvmeCmd;
2658 nvme_cmd->done(nvme_cmd);
2659 lpfc_ncmd->nvmeCmd = NULL;
2660 }
2661 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2608} 2662}
2609 2663
2610/** 2664/**
@@ -2626,13 +2680,13 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2626 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq) 2680 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
2627 return; 2681 return;
2628 2682
2629 /* Cycle through all NVME rings and make sure all outstanding 2683 /* Cycle through all IO rings and make sure all outstanding
2630 * WQEs have been removed from the txcmplqs. 2684 * WQEs have been removed from the txcmplqs.
2631 */ 2685 */
2632 for (i = 0; i < phba->cfg_hdw_queue; i++) { 2686 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2633 if (!phba->sli4_hba.hdwq[i].nvme_wq) 2687 if (!phba->sli4_hba.hdwq[i].io_wq)
2634 continue; 2688 continue;
2635 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; 2689 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2636 2690
2637 if (!pring) 2691 if (!pring)
2638 continue; 2692 continue;
@@ -2653,3 +2707,50 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2653 } 2707 }
2654 } 2708 }
2655} 2709}
2710
2711void
2712lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn)
2713{
2714#if (IS_ENABLED(CONFIG_NVME_FC))
2715 struct lpfc_io_buf *lpfc_ncmd;
2716 struct nvmefc_fcp_req *nCmd;
2717 struct lpfc_nvme_fcpreq_priv *freqpriv;
2718
2719 if (!pwqeIn->context1) {
2720 lpfc_sli_release_iocbq(phba, pwqeIn);
2721 return;
2722 }
2723 /* For abort iocb just return, IO iocb will do a done call */
2724 if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
2725 CMD_ABORT_XRI_CX) {
2726 lpfc_sli_release_iocbq(phba, pwqeIn);
2727 return;
2728 }
2729 lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
2730
2731 spin_lock(&lpfc_ncmd->buf_lock);
2732 if (!lpfc_ncmd->nvmeCmd) {
2733 spin_unlock(&lpfc_ncmd->buf_lock);
2734 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2735 return;
2736 }
2737
2738 nCmd = lpfc_ncmd->nvmeCmd;
2739 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2740 "6194 NVME Cancel xri %x\n",
2741 lpfc_ncmd->cur_iocbq.sli4_xritag);
2742
2743 nCmd->transferred_length = 0;
2744 nCmd->rcv_rsplen = 0;
2745 nCmd->status = NVME_SC_INTERNAL;
2746 freqpriv = nCmd->private;
2747 freqpriv->nvme_buf = NULL;
2748 lpfc_ncmd->nvmeCmd = NULL;
2749
2750 spin_unlock(&lpfc_ncmd->buf_lock);
2751 nCmd->done(nCmd);
2752
2753 /* Call release with XB=1 to queue the IO into the abort list. */
2754 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2755#endif
2756}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index faa596f9e861..9884228800a5 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1026,7 +1026,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
1026 * WQE release CQE 1026 * WQE release CQE
1027 */ 1027 */
1028 ctxp->flag |= LPFC_NVMET_DEFER_WQFULL; 1028 ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
1029 wq = ctxp->hdwq->nvme_wq; 1029 wq = ctxp->hdwq->io_wq;
1030 pring = wq->pring; 1030 pring = wq->pring;
1031 spin_lock_irqsave(&pring->ring_lock, iflags); 1031 spin_lock_irqsave(&pring->ring_lock, iflags);
1032 list_add_tail(&nvmewqeq->list, &wq->wqfull_list); 1032 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
@@ -1104,7 +1104,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1104 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 1104 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1105 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, 1105 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1106 ctxp->oxid); 1106 ctxp->oxid);
1107 wq = ctxp->hdwq->nvme_wq; 1107 wq = ctxp->hdwq->io_wq;
1108 lpfc_nvmet_wqfull_flush(phba, wq, ctxp); 1108 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1109 return; 1109 return;
1110 } 1110 }
@@ -1437,7 +1437,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1437 infop = lpfc_get_ctx_list(phba, i, j); 1437 infop = lpfc_get_ctx_list(phba, i, j);
1438 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 1438 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1439 "6408 TOTAL NVMET ctx for CPU %d " 1439 "6408 TOTAL NVMET ctx for CPU %d "
1440 "MRQ %d: cnt %d nextcpu %p\n", 1440 "MRQ %d: cnt %d nextcpu x%px\n",
1441 i, j, infop->nvmet_ctx_list_cnt, 1441 i, j, infop->nvmet_ctx_list_cnt,
1442 infop->nvmet_ctx_next_cpu); 1442 infop->nvmet_ctx_next_cpu);
1443 } 1443 }
@@ -1500,7 +1500,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1500 1500
1501 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 1501 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1502 "6026 Registered NVME " 1502 "6026 Registered NVME "
1503 "targetport: %p, private %p " 1503 "targetport: x%px, private x%px "
1504 "portnm %llx nodenm %llx segs %d qs %d\n", 1504 "portnm %llx nodenm %llx segs %d qs %d\n",
1505 phba->targetport, tgtp, 1505 phba->targetport, tgtp,
1506 pinfo.port_name, pinfo.node_name, 1506 pinfo.port_name, pinfo.node_name,
@@ -1555,7 +1555,7 @@ lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1555 return 0; 1555 return 0;
1556 1556
1557 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 1557 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1558 "6007 Update NVMET port %p did x%x\n", 1558 "6007 Update NVMET port x%px did x%x\n",
1559 phba->targetport, vport->fc_myDID); 1559 phba->targetport, vport->fc_myDID);
1560 1560
1561 phba->targetport->port_id = vport->fc_myDID; 1561 phba->targetport->port_id = vport->fc_myDID;
@@ -1790,12 +1790,8 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1790 lpfc_nvmet_defer_release(phba, ctxp); 1790 lpfc_nvmet_defer_release(phba, ctxp);
1791 spin_unlock_irqrestore(&ctxp->ctxlock, iflag); 1791 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1792 } 1792 }
1793 if (ctxp->state == LPFC_NVMET_STE_RCV) 1793 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1794 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, 1794 ctxp->oxid);
1795 ctxp->oxid);
1796 else
1797 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1798 ctxp->oxid);
1799 1795
1800 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); 1796 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1801 return 0; 1797 return 0;
@@ -1922,7 +1918,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1922 if (phba->targetport) { 1918 if (phba->targetport) {
1923 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1919 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1924 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 1920 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
1925 wq = phba->sli4_hba.hdwq[qidx].nvme_wq; 1921 wq = phba->sli4_hba.hdwq[qidx].io_wq;
1926 lpfc_nvmet_wqfull_flush(phba, wq, NULL); 1922 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1927 } 1923 }
1928 tgtp->tport_unreg_cmp = &tport_unreg_cmp; 1924 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
@@ -1930,7 +1926,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1930 if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp, 1926 if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
1931 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO))) 1927 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
1932 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 1928 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1933 "6179 Unreg targetport %p timeout " 1929 "6179 Unreg targetport x%px timeout "
1934 "reached.\n", phba->targetport); 1930 "reached.\n", phba->targetport);
1935 lpfc_nvmet_cleanup_io_context(phba); 1931 lpfc_nvmet_cleanup_io_context(phba);
1936 } 1932 }
@@ -3113,7 +3109,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3113 atomic_inc(&tgtp->xmt_ls_abort_cmpl); 3109 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3114 3110
3115 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 3111 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3116 "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n", 3112 "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3117 ctxp, wcqe->word0, wcqe->total_data_placed, 3113 ctxp, wcqe->word0, wcqe->total_data_placed,
3118 result, wcqe->word3); 3114 result, wcqe->word3);
3119 3115
@@ -3299,7 +3295,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3299 */ 3295 */
3300 spin_lock_irqsave(&phba->hbalock, flags); 3296 spin_lock_irqsave(&phba->hbalock, flags);
3301 /* driver queued commands are in process of being flushed */ 3297 /* driver queued commands are in process of being flushed */
3302 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { 3298 if (phba->hba_flag & HBA_IOQ_FLUSH) {
3303 spin_unlock_irqrestore(&phba->hbalock, flags); 3299 spin_unlock_irqrestore(&phba->hbalock, flags);
3304 atomic_inc(&tgtp->xmt_abort_rsp_error); 3300 atomic_inc(&tgtp->xmt_abort_rsp_error);
3305 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 3301 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
@@ -3334,7 +3330,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3334 /* WQEs are reused. Clear stale data and set key fields to 3330 /* WQEs are reused. Clear stale data and set key fields to
3335 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. 3331 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3336 */ 3332 */
3337 memset(abts_wqe, 0, sizeof(union lpfc_wqe)); 3333 memset(abts_wqe, 0, sizeof(*abts_wqe));
3338 3334
3339 /* word 3 */ 3335 /* word 3 */
3340 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); 3336 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index f9df800e7067..fe1097666de4 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -53,8 +53,6 @@
53#define LPFC_RESET_WAIT 2 53#define LPFC_RESET_WAIT 2
54#define LPFC_ABORT_WAIT 2 54#define LPFC_ABORT_WAIT 2
55 55
56int _dump_buf_done = 1;
57
58static char *dif_op_str[] = { 56static char *dif_op_str[] = {
59 "PROT_NORMAL", 57 "PROT_NORMAL",
60 "PROT_READ_INSERT", 58 "PROT_READ_INSERT",
@@ -89,63 +87,6 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
89static int 87static int
90lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); 88lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
91 89
92static void
93lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
94{
95 void *src, *dst;
96 struct scatterlist *sgde = scsi_sglist(cmnd);
97
98 if (!_dump_buf_data) {
99 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
100 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
101 __func__);
102 return;
103 }
104
105
106 if (!sgde) {
107 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
108 "9051 BLKGRD: ERROR: data scatterlist is null\n");
109 return;
110 }
111
112 dst = (void *) _dump_buf_data;
113 while (sgde) {
114 src = sg_virt(sgde);
115 memcpy(dst, src, sgde->length);
116 dst += sgde->length;
117 sgde = sg_next(sgde);
118 }
119}
120
121static void
122lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
123{
124 void *src, *dst;
125 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
126
127 if (!_dump_buf_dif) {
128 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
129 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
130 __func__);
131 return;
132 }
133
134 if (!sgde) {
135 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
136 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
137 return;
138 }
139
140 dst = _dump_buf_dif;
141 while (sgde) {
142 src = sg_virt(sgde);
143 memcpy(dst, src, sgde->length);
144 dst += sgde->length;
145 sgde = sg_next(sgde);
146 }
147}
148
149static inline unsigned 90static inline unsigned
150lpfc_cmd_blksize(struct scsi_cmnd *sc) 91lpfc_cmd_blksize(struct scsi_cmnd *sc)
151{ 92{
@@ -537,29 +478,32 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
537 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 478 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
538 qp = &phba->sli4_hba.hdwq[idx]; 479 qp = &phba->sli4_hba.hdwq[idx];
539 480
540 spin_lock(&qp->abts_scsi_buf_list_lock); 481 spin_lock(&qp->abts_io_buf_list_lock);
541 list_for_each_entry_safe(psb, next_psb, 482 list_for_each_entry_safe(psb, next_psb,
542 &qp->lpfc_abts_scsi_buf_list, list) { 483 &qp->lpfc_abts_io_buf_list, list) {
484 if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME)
485 continue;
486
543 if (psb->rdata && psb->rdata->pnode && 487 if (psb->rdata && psb->rdata->pnode &&
544 psb->rdata->pnode->vport == vport) 488 psb->rdata->pnode->vport == vport)
545 psb->rdata = NULL; 489 psb->rdata = NULL;
546 } 490 }
547 spin_unlock(&qp->abts_scsi_buf_list_lock); 491 spin_unlock(&qp->abts_io_buf_list_lock);
548 } 492 }
549 spin_unlock_irqrestore(&phba->hbalock, iflag); 493 spin_unlock_irqrestore(&phba->hbalock, iflag);
550} 494}
551 495
552/** 496/**
553 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort 497 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
554 * @phba: pointer to lpfc hba data structure. 498 * @phba: pointer to lpfc hba data structure.
555 * @axri: pointer to the fcp xri abort wcqe structure. 499 * @axri: pointer to the fcp xri abort wcqe structure.
556 * 500 *
557 * This routine is invoked by the worker thread to process a SLI4 fast-path 501 * This routine is invoked by the worker thread to process a SLI4 fast-path
558 * FCP aborted xri. 502 * FCP or NVME aborted xri.
559 **/ 503 **/
560void 504void
561lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, 505lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
562 struct sli4_wcqe_xri_aborted *axri, int idx) 506 struct sli4_wcqe_xri_aborted *axri, int idx)
563{ 507{
564 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 508 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
565 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 509 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
@@ -577,16 +521,25 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
577 521
578 qp = &phba->sli4_hba.hdwq[idx]; 522 qp = &phba->sli4_hba.hdwq[idx];
579 spin_lock_irqsave(&phba->hbalock, iflag); 523 spin_lock_irqsave(&phba->hbalock, iflag);
580 spin_lock(&qp->abts_scsi_buf_list_lock); 524 spin_lock(&qp->abts_io_buf_list_lock);
581 list_for_each_entry_safe(psb, next_psb, 525 list_for_each_entry_safe(psb, next_psb,
582 &qp->lpfc_abts_scsi_buf_list, list) { 526 &qp->lpfc_abts_io_buf_list, list) {
583 if (psb->cur_iocbq.sli4_xritag == xri) { 527 if (psb->cur_iocbq.sli4_xritag == xri) {
584 list_del(&psb->list); 528 list_del_init(&psb->list);
585 qp->abts_scsi_io_bufs--;
586 psb->exch_busy = 0; 529 psb->exch_busy = 0;
587 psb->status = IOSTAT_SUCCESS; 530 psb->status = IOSTAT_SUCCESS;
588 spin_unlock( 531#ifdef BUILD_NVME
589 &qp->abts_scsi_buf_list_lock); 532 if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
533 qp->abts_nvme_io_bufs--;
534 spin_unlock(&qp->abts_io_buf_list_lock);
535 spin_unlock_irqrestore(&phba->hbalock, iflag);
536 lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
537 return;
538 }
539#endif
540 qp->abts_scsi_io_bufs--;
541 spin_unlock(&qp->abts_io_buf_list_lock);
542
590 if (psb->rdata && psb->rdata->pnode) 543 if (psb->rdata && psb->rdata->pnode)
591 ndlp = psb->rdata->pnode; 544 ndlp = psb->rdata->pnode;
592 else 545 else
@@ -605,12 +558,12 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
605 return; 558 return;
606 } 559 }
607 } 560 }
608 spin_unlock(&qp->abts_scsi_buf_list_lock); 561 spin_unlock(&qp->abts_io_buf_list_lock);
609 for (i = 1; i <= phba->sli.last_iotag; i++) { 562 for (i = 1; i <= phba->sli.last_iotag; i++) {
610 iocbq = phba->sli.iocbq_lookup[i]; 563 iocbq = phba->sli.iocbq_lookup[i];
611 564
612 if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 565 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
613 (iocbq->iocb_flag & LPFC_IO_LIBDFC)) 566 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
614 continue; 567 continue;
615 if (iocbq->sli4_xritag != xri) 568 if (iocbq->sli4_xritag != xri)
616 continue; 569 continue;
@@ -685,8 +638,9 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
685 IOCB_t *iocb; 638 IOCB_t *iocb;
686 dma_addr_t pdma_phys_fcp_rsp; 639 dma_addr_t pdma_phys_fcp_rsp;
687 dma_addr_t pdma_phys_fcp_cmd; 640 dma_addr_t pdma_phys_fcp_cmd;
688 uint32_t sgl_size, cpu, idx; 641 uint32_t cpu, idx;
689 int tag; 642 int tag;
643 struct fcp_cmd_rsp_buf *tmp = NULL;
690 644
691 cpu = raw_smp_processor_id(); 645 cpu = raw_smp_processor_id();
692 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { 646 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
@@ -704,9 +658,6 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
704 return NULL; 658 return NULL;
705 } 659 }
706 660
707 sgl_size = phba->cfg_sg_dma_buf_size -
708 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
709
710 /* Setup key fields in buffer that may have been changed 661 /* Setup key fields in buffer that may have been changed
711 * if other protocols used this buffer. 662 * if other protocols used this buffer.
712 */ 663 */
@@ -721,9 +672,12 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
721#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 672#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
722 lpfc_cmd->prot_data_type = 0; 673 lpfc_cmd->prot_data_type = 0;
723#endif 674#endif
724 lpfc_cmd->fcp_cmnd = (lpfc_cmd->data + sgl_size); 675 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
725 lpfc_cmd->fcp_rsp = (struct fcp_rsp *)((uint8_t *)lpfc_cmd->fcp_cmnd + 676 if (!tmp)
726 sizeof(struct fcp_cmnd)); 677 return NULL;
678
679 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
680 lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
727 681
728 /* 682 /*
729 * The first two SGEs are the FCP_CMD and FCP_RSP. 683 * The first two SGEs are the FCP_CMD and FCP_RSP.
@@ -731,7 +685,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
731 * first two and leave the rest for queuecommand. 685 * first two and leave the rest for queuecommand.
732 */ 686 */
733 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 687 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
734 pdma_phys_fcp_cmd = (lpfc_cmd->dma_handle + sgl_size); 688 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
735 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 689 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
736 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 690 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
737 sgl->word2 = le32_to_cpu(sgl->word2); 691 sgl->word2 = le32_to_cpu(sgl->word2);
@@ -835,11 +789,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
835 789
836 qp = psb->hdwq; 790 qp = psb->hdwq;
837 if (psb->exch_busy) { 791 if (psb->exch_busy) {
838 spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag); 792 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
839 psb->pCmd = NULL; 793 psb->pCmd = NULL;
840 list_add_tail(&psb->list, &qp->lpfc_abts_scsi_buf_list); 794 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
841 qp->abts_scsi_io_bufs++; 795 qp->abts_scsi_io_bufs++;
842 spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag); 796 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
843 } else { 797 } else {
844 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp); 798 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
845 } 799 }
@@ -918,9 +872,10 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
918 "dma_map_sg. Config %d, seg_cnt %d\n", 872 "dma_map_sg. Config %d, seg_cnt %d\n",
919 __func__, phba->cfg_sg_seg_cnt, 873 __func__, phba->cfg_sg_seg_cnt,
920 lpfc_cmd->seg_cnt); 874 lpfc_cmd->seg_cnt);
875 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
921 lpfc_cmd->seg_cnt = 0; 876 lpfc_cmd->seg_cnt = 0;
922 scsi_dma_unmap(scsi_cmnd); 877 scsi_dma_unmap(scsi_cmnd);
923 return 1; 878 return 2;
924 } 879 }
925 880
926 /* 881 /*
@@ -1774,7 +1729,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1774 1729
1775 if (!sgpe || !sgde) { 1730 if (!sgpe || !sgde) {
1776 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1731 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1777 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n", 1732 "9020 Invalid s/g entry: data=x%px prot=x%px\n",
1778 sgpe, sgde); 1733 sgpe, sgde);
1779 return 0; 1734 return 0;
1780 } 1735 }
@@ -1989,7 +1944,8 @@ out:
1989 **/ 1944 **/
1990static int 1945static int
1991lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1946lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1992 struct sli4_sge *sgl, int datasegcnt) 1947 struct sli4_sge *sgl, int datasegcnt,
1948 struct lpfc_io_buf *lpfc_cmd)
1993{ 1949{
1994 struct scatterlist *sgde = NULL; /* s/g data entry */ 1950 struct scatterlist *sgde = NULL; /* s/g data entry */
1995 struct sli4_sge_diseed *diseed = NULL; 1951 struct sli4_sge_diseed *diseed = NULL;
@@ -2003,6 +1959,9 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2003 uint32_t checking = 1; 1959 uint32_t checking = 1;
2004 uint32_t dma_len; 1960 uint32_t dma_len;
2005 uint32_t dma_offset = 0; 1961 uint32_t dma_offset = 0;
1962 struct sli4_hybrid_sgl *sgl_xtra = NULL;
1963 int j;
1964 bool lsp_just_set = false;
2006 1965
2007 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1966 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2008 if (status) 1967 if (status)
@@ -2062,23 +2021,64 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2062 sgl++; 2021 sgl++;
2063 2022
2064 /* assumption: caller has already run dma_map_sg on command data */ 2023 /* assumption: caller has already run dma_map_sg on command data */
2065 scsi_for_each_sg(sc, sgde, datasegcnt, i) { 2024 sgde = scsi_sglist(sc);
2066 physaddr = sg_dma_address(sgde); 2025 j = 3;
2067 dma_len = sg_dma_len(sgde); 2026 for (i = 0; i < datasegcnt; i++) {
2068 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 2027 /* clear it */
2069 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 2028 sgl->word2 = 0;
2070 if ((i + 1) == datasegcnt)
2071 bf_set(lpfc_sli4_sge_last, sgl, 1);
2072 else
2073 bf_set(lpfc_sli4_sge_last, sgl, 0);
2074 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2075 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2076 2029
2077 sgl->sge_len = cpu_to_le32(dma_len); 2030 /* do we need to expand the segment */
2078 dma_offset += dma_len; 2031 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2032 ((datasegcnt - 1) != i)) {
2033 /* set LSP type */
2034 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2035
2036 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2037
2038 if (unlikely(!sgl_xtra)) {
2039 lpfc_cmd->seg_cnt = 0;
2040 return 0;
2041 }
2042 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2043 sgl_xtra->dma_phys_sgl));
2044 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2045 sgl_xtra->dma_phys_sgl));
2046
2047 } else {
2048 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2049 }
2050
2051 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2052 if ((datasegcnt - 1) == i)
2053 bf_set(lpfc_sli4_sge_last, sgl, 1);
2054 physaddr = sg_dma_address(sgde);
2055 dma_len = sg_dma_len(sgde);
2056 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2057 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2058
2059 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2060 sgl->word2 = cpu_to_le32(sgl->word2);
2061 sgl->sge_len = cpu_to_le32(dma_len);
2062
2063 dma_offset += dma_len;
2064 sgde = sg_next(sgde);
2065
2066 sgl++;
2067 num_sge++;
2068 lsp_just_set = false;
2069
2070 } else {
2071 sgl->word2 = cpu_to_le32(sgl->word2);
2072 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2073
2074 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2075 i = i - 1;
2076
2077 lsp_just_set = true;
2078 }
2079
2080 j++;
2079 2081
2080 sgl++;
2081 num_sge++;
2082 } 2082 }
2083 2083
2084out: 2084out:
@@ -2124,7 +2124,8 @@ out:
2124 **/ 2124 **/
2125static int 2125static int
2126lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 2126lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2127 struct sli4_sge *sgl, int datacnt, int protcnt) 2127 struct sli4_sge *sgl, int datacnt, int protcnt,
2128 struct lpfc_io_buf *lpfc_cmd)
2128{ 2129{
2129 struct scatterlist *sgde = NULL; /* s/g data entry */ 2130 struct scatterlist *sgde = NULL; /* s/g data entry */
2130 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 2131 struct scatterlist *sgpe = NULL; /* s/g prot entry */
@@ -2146,14 +2147,15 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2146#endif 2147#endif
2147 uint32_t checking = 1; 2148 uint32_t checking = 1;
2148 uint32_t dma_offset = 0; 2149 uint32_t dma_offset = 0;
2149 int num_sge = 0; 2150 int num_sge = 0, j = 2;
2151 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2150 2152
2151 sgpe = scsi_prot_sglist(sc); 2153 sgpe = scsi_prot_sglist(sc);
2152 sgde = scsi_sglist(sc); 2154 sgde = scsi_sglist(sc);
2153 2155
2154 if (!sgpe || !sgde) { 2156 if (!sgpe || !sgde) {
2155 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2157 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2156 "9082 Invalid s/g entry: data=0x%p prot=0x%p\n", 2158 "9082 Invalid s/g entry: data=x%px prot=x%px\n",
2157 sgpe, sgde); 2159 sgpe, sgde);
2158 return 0; 2160 return 0;
2159 } 2161 }
@@ -2179,9 +2181,37 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2179 split_offset = 0; 2181 split_offset = 0;
2180 do { 2182 do {
2181 /* Check to see if we ran out of space */ 2183 /* Check to see if we ran out of space */
2182 if (num_sge >= (phba->cfg_total_seg_cnt - 2)) 2184 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2185 !(phba->cfg_xpsgl))
2183 return num_sge + 3; 2186 return num_sge + 3;
2184 2187
2188 /* DISEED and DIF have to be together */
2189 if (!((j + 1) % phba->border_sge_num) ||
2190 !((j + 2) % phba->border_sge_num) ||
2191 !((j + 3) % phba->border_sge_num)) {
2192 sgl->word2 = 0;
2193
2194 /* set LSP type */
2195 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2196
2197 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2198
2199 if (unlikely(!sgl_xtra)) {
2200 goto out;
2201 } else {
2202 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2203 sgl_xtra->dma_phys_sgl));
2204 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2205 sgl_xtra->dma_phys_sgl));
2206 }
2207
2208 sgl->word2 = cpu_to_le32(sgl->word2);
2209 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2210
2211 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2212 j = 0;
2213 }
2214
2185 /* setup DISEED with what we have */ 2215 /* setup DISEED with what we have */
2186 diseed = (struct sli4_sge_diseed *) sgl; 2216 diseed = (struct sli4_sge_diseed *) sgl;
2187 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2217 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
@@ -2228,7 +2258,9 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2228 2258
2229 /* advance sgl and increment bde count */ 2259 /* advance sgl and increment bde count */
2230 num_sge++; 2260 num_sge++;
2261
2231 sgl++; 2262 sgl++;
2263 j++;
2232 2264
2233 /* setup the first BDE that points to protection buffer */ 2265 /* setup the first BDE that points to protection buffer */
2234 protphysaddr = sg_dma_address(sgpe) + protgroup_offset; 2266 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
@@ -2243,6 +2275,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2243 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr)); 2275 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2244 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr)); 2276 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2245 sgl->word2 = cpu_to_le32(sgl->word2); 2277 sgl->word2 = cpu_to_le32(sgl->word2);
2278 sgl->sge_len = 0;
2246 2279
2247 protgrp_blks = protgroup_len / 8; 2280 protgrp_blks = protgroup_len / 8;
2248 protgrp_bytes = protgrp_blks * blksize; 2281 protgrp_bytes = protgrp_blks * blksize;
@@ -2263,9 +2296,14 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2263 /* setup SGE's for data blocks associated with DIF data */ 2296 /* setup SGE's for data blocks associated with DIF data */
2264 pgdone = 0; 2297 pgdone = 0;
2265 subtotal = 0; /* total bytes processed for current prot grp */ 2298 subtotal = 0; /* total bytes processed for current prot grp */
2299
2300 sgl++;
2301 j++;
2302
2266 while (!pgdone) { 2303 while (!pgdone) {
2267 /* Check to see if we ran out of space */ 2304 /* Check to see if we ran out of space */
2268 if (num_sge >= phba->cfg_total_seg_cnt) 2305 if ((num_sge >= phba->cfg_total_seg_cnt) &&
2306 !phba->cfg_xpsgl)
2269 return num_sge + 1; 2307 return num_sge + 1;
2270 2308
2271 if (!sgde) { 2309 if (!sgde) {
@@ -2274,60 +2312,101 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2274 __func__); 2312 __func__);
2275 return 0; 2313 return 0;
2276 } 2314 }
2277 sgl++;
2278 dataphysaddr = sg_dma_address(sgde) + split_offset;
2279 2315
2280 remainder = sg_dma_len(sgde) - split_offset; 2316 if (!((j + 1) % phba->border_sge_num)) {
2317 sgl->word2 = 0;
2281 2318
2282 if ((subtotal + remainder) <= protgrp_bytes) { 2319 /* set LSP type */
2283 /* we can use this whole buffer */ 2320 bf_set(lpfc_sli4_sge_type, sgl,
2284 dma_len = remainder; 2321 LPFC_SGE_TYPE_LSP);
2285 split_offset = 0;
2286 2322
2287 if ((subtotal + remainder) == protgrp_bytes) 2323 sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2288 pgdone = 1; 2324 lpfc_cmd);
2325
2326 if (unlikely(!sgl_xtra)) {
2327 goto out;
2328 } else {
2329 sgl->addr_lo = cpu_to_le32(
2330 putPaddrLow(sgl_xtra->dma_phys_sgl));
2331 sgl->addr_hi = cpu_to_le32(
2332 putPaddrHigh(sgl_xtra->dma_phys_sgl));
2333 }
2334
2335 sgl->word2 = cpu_to_le32(sgl->word2);
2336 sgl->sge_len = cpu_to_le32(
2337 phba->cfg_sg_dma_buf_size);
2338
2339 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2289 } else { 2340 } else {
2290 /* must split this buffer with next prot grp */ 2341 dataphysaddr = sg_dma_address(sgde) +
2291 dma_len = protgrp_bytes - subtotal; 2342 split_offset;
2292 split_offset += dma_len;
2293 }
2294 2343
2295 subtotal += dma_len; 2344 remainder = sg_dma_len(sgde) - split_offset;
2296 2345
2297 sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr)); 2346 if ((subtotal + remainder) <= protgrp_bytes) {
2298 sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr)); 2347 /* we can use this whole buffer */
2299 bf_set(lpfc_sli4_sge_last, sgl, 0); 2348 dma_len = remainder;
2300 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2349 split_offset = 0;
2301 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2302 2350
2303 sgl->sge_len = cpu_to_le32(dma_len); 2351 if ((subtotal + remainder) ==
2304 dma_offset += dma_len; 2352 protgrp_bytes)
2353 pgdone = 1;
2354 } else {
2355 /* must split this buffer with next
2356 * prot grp
2357 */
2358 dma_len = protgrp_bytes - subtotal;
2359 split_offset += dma_len;
2360 }
2305 2361
2306 num_sge++; 2362 subtotal += dma_len;
2307 curr_data++;
2308 2363
2309 if (split_offset) 2364 sgl->word2 = 0;
2310 break; 2365 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2366 dataphysaddr));
2367 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2368 dataphysaddr));
2369 bf_set(lpfc_sli4_sge_last, sgl, 0);
2370 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2371 bf_set(lpfc_sli4_sge_type, sgl,
2372 LPFC_SGE_TYPE_DATA);
2311 2373
2312 /* Move to the next s/g segment if possible */ 2374 sgl->sge_len = cpu_to_le32(dma_len);
2313 sgde = sg_next(sgde); 2375 dma_offset += dma_len;
2376
2377 num_sge++;
2378 curr_data++;
2379
2380 if (split_offset) {
2381 sgl++;
2382 j++;
2383 break;
2384 }
2385
2386 /* Move to the next s/g segment if possible */
2387 sgde = sg_next(sgde);
2388
2389 sgl++;
2390 }
2391
2392 j++;
2314 } 2393 }
2315 2394
2316 if (protgroup_offset) { 2395 if (protgroup_offset) {
2317 /* update the reference tag */ 2396 /* update the reference tag */
2318 reftag += protgrp_blks; 2397 reftag += protgrp_blks;
2319 sgl++;
2320 continue; 2398 continue;
2321 } 2399 }
2322 2400
2323 /* are we done ? */ 2401 /* are we done ? */
2324 if (curr_prot == protcnt) { 2402 if (curr_prot == protcnt) {
2403 /* mark the last SGL */
2404 sgl--;
2325 bf_set(lpfc_sli4_sge_last, sgl, 1); 2405 bf_set(lpfc_sli4_sge_last, sgl, 1);
2326 alldone = 1; 2406 alldone = 1;
2327 } else if (curr_prot < protcnt) { 2407 } else if (curr_prot < protcnt) {
2328 /* advance to next prot buffer */ 2408 /* advance to next prot buffer */
2329 sgpe = sg_next(sgpe); 2409 sgpe = sg_next(sgpe);
2330 sgl++;
2331 2410
2332 /* update the reference tag */ 2411 /* update the reference tag */
2333 reftag += protgrp_blks; 2412 reftag += protgrp_blks;
@@ -2430,7 +2509,10 @@ lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2430 * 2509 *
2431 * This is the protection/DIF aware version of 2510 * This is the protection/DIF aware version of
2432 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 2511 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2433 * two functions eventually, but for now, it's here 2512 * two functions eventually, but for now, it's here.
2513 * RETURNS 0 - SUCCESS,
2514 * 1 - Failed DMA map, retry.
2515 * 2 - Invalid scsi cmd or prot-type. Do not rety.
2434 **/ 2516 **/
2435static int 2517static int
2436lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, 2518lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
@@ -2444,6 +2526,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2444 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 2526 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2445 int prot_group_type = 0; 2527 int prot_group_type = 0;
2446 int fcpdl; 2528 int fcpdl;
2529 int ret = 1;
2447 struct lpfc_vport *vport = phba->pport; 2530 struct lpfc_vport *vport = phba->pport;
2448 2531
2449 /* 2532 /*
@@ -2467,8 +2550,11 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2467 lpfc_cmd->seg_cnt = datasegcnt; 2550 lpfc_cmd->seg_cnt = datasegcnt;
2468 2551
2469 /* First check if data segment count from SCSI Layer is good */ 2552 /* First check if data segment count from SCSI Layer is good */
2470 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) 2553 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2554 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2555 ret = 2;
2471 goto err; 2556 goto err;
2557 }
2472 2558
2473 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 2559 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2474 2560
@@ -2476,14 +2562,18 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2476 case LPFC_PG_TYPE_NO_DIF: 2562 case LPFC_PG_TYPE_NO_DIF:
2477 2563
2478 /* Here we need to add a PDE5 and PDE6 to the count */ 2564 /* Here we need to add a PDE5 and PDE6 to the count */
2479 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) 2565 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2566 ret = 2;
2480 goto err; 2567 goto err;
2568 }
2481 2569
2482 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, 2570 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2483 datasegcnt); 2571 datasegcnt);
2484 /* we should have 2 or more entries in buffer list */ 2572 /* we should have 2 or more entries in buffer list */
2485 if (num_bde < 2) 2573 if (num_bde < 2) {
2574 ret = 2;
2486 goto err; 2575 goto err;
2576 }
2487 break; 2577 break;
2488 2578
2489 case LPFC_PG_TYPE_DIF_BUF: 2579 case LPFC_PG_TYPE_DIF_BUF:
@@ -2507,15 +2597,19 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2507 * protection data segment. 2597 * protection data segment.
2508 */ 2598 */
2509 if ((lpfc_cmd->prot_seg_cnt * 4) > 2599 if ((lpfc_cmd->prot_seg_cnt * 4) >
2510 (phba->cfg_total_seg_cnt - 2)) 2600 (phba->cfg_total_seg_cnt - 2)) {
2601 ret = 2;
2511 goto err; 2602 goto err;
2603 }
2512 2604
2513 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, 2605 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2514 datasegcnt, protsegcnt); 2606 datasegcnt, protsegcnt);
2515 /* we should have 3 or more entries in buffer list */ 2607 /* we should have 3 or more entries in buffer list */
2516 if ((num_bde < 3) || 2608 if ((num_bde < 3) ||
2517 (num_bde > phba->cfg_total_seg_cnt)) 2609 (num_bde > phba->cfg_total_seg_cnt)) {
2610 ret = 2;
2518 goto err; 2611 goto err;
2612 }
2519 break; 2613 break;
2520 2614
2521 case LPFC_PG_TYPE_INVALID: 2615 case LPFC_PG_TYPE_INVALID:
@@ -2526,7 +2620,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2526 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 2620 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2527 "9022 Unexpected protection group %i\n", 2621 "9022 Unexpected protection group %i\n",
2528 prot_group_type); 2622 prot_group_type);
2529 return 1; 2623 return 2;
2530 } 2624 }
2531 } 2625 }
2532 2626
@@ -2576,7 +2670,7 @@ err:
2576 2670
2577 lpfc_cmd->seg_cnt = 0; 2671 lpfc_cmd->seg_cnt = 0;
2578 lpfc_cmd->prot_seg_cnt = 0; 2672 lpfc_cmd->prot_seg_cnt = 0;
2579 return 1; 2673 return ret;
2580} 2674}
2581 2675
2582/* 2676/*
@@ -2809,26 +2903,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2809 uint32_t bgstat = bgf->bgstat; 2903 uint32_t bgstat = bgf->bgstat;
2810 uint64_t failing_sector = 0; 2904 uint64_t failing_sector = 0;
2811 2905
2812 spin_lock(&_dump_buf_lock);
2813 if (!_dump_buf_done) {
2814 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
2815 " Data for %u blocks to debugfs\n",
2816 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
2817 lpfc_debug_save_data(phba, cmd);
2818
2819 /* If we have a prot sgl, save the DIF buffer */
2820 if (lpfc_prot_group_type(phba, cmd) ==
2821 LPFC_PG_TYPE_DIF_BUF) {
2822 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
2823 "Saving DIF for %u blocks to debugfs\n",
2824 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
2825 lpfc_debug_save_dif(phba, cmd);
2826 }
2827
2828 _dump_buf_done = 1;
2829 }
2830 spin_unlock(&_dump_buf_lock);
2831
2832 if (lpfc_bgs_get_invalid_prof(bgstat)) { 2906 if (lpfc_bgs_get_invalid_prof(bgstat)) {
2833 cmd->result = DID_ERROR << 16; 2907 cmd->result = DID_ERROR << 16;
2834 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2908 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -2962,7 +3036,8 @@ out:
2962 * field of @lpfc_cmd for device with SLI-4 interface spec. 3036 * field of @lpfc_cmd for device with SLI-4 interface spec.
2963 * 3037 *
2964 * Return codes: 3038 * Return codes:
2965 * 1 - Error 3039 * 2 - Error - Do not retry
3040 * 1 - Error - Retry
2966 * 0 - Success 3041 * 0 - Success
2967 **/ 3042 **/
2968static int 3043static int
@@ -2978,8 +3053,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2978 uint32_t num_bde = 0; 3053 uint32_t num_bde = 0;
2979 uint32_t dma_len; 3054 uint32_t dma_len;
2980 uint32_t dma_offset = 0; 3055 uint32_t dma_offset = 0;
2981 int nseg; 3056 int nseg, i, j;
2982 struct ulp_bde64 *bde; 3057 struct ulp_bde64 *bde;
3058 bool lsp_just_set = false;
3059 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2983 3060
2984 /* 3061 /*
2985 * There are three possibilities here - use scatter-gather segment, use 3062 * There are three possibilities here - use scatter-gather segment, use
@@ -3006,15 +3083,17 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3006 sgl += 1; 3083 sgl += 1;
3007 first_data_sgl = sgl; 3084 first_data_sgl = sgl;
3008 lpfc_cmd->seg_cnt = nseg; 3085 lpfc_cmd->seg_cnt = nseg;
3009 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 3086 if (!phba->cfg_xpsgl &&
3087 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3010 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:" 3088 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3011 " %s: Too many sg segments from " 3089 " %s: Too many sg segments from "
3012 "dma_map_sg. Config %d, seg_cnt %d\n", 3090 "dma_map_sg. Config %d, seg_cnt %d\n",
3013 __func__, phba->cfg_sg_seg_cnt, 3091 __func__, phba->cfg_sg_seg_cnt,
3014 lpfc_cmd->seg_cnt); 3092 lpfc_cmd->seg_cnt);
3093 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3015 lpfc_cmd->seg_cnt = 0; 3094 lpfc_cmd->seg_cnt = 0;
3016 scsi_dma_unmap(scsi_cmnd); 3095 scsi_dma_unmap(scsi_cmnd);
3017 return 1; 3096 return 2;
3018 } 3097 }
3019 3098
3020 /* 3099 /*
@@ -3026,22 +3105,80 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3026 * the IOCB. If it can't then the BDEs get added to a BPL as it 3105 * the IOCB. If it can't then the BDEs get added to a BPL as it
3027 * does for SLI-2 mode. 3106 * does for SLI-2 mode.
3028 */ 3107 */
3029 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 3108
3030 physaddr = sg_dma_address(sgel); 3109 /* for tracking segment boundaries */
3031 dma_len = sg_dma_len(sgel); 3110 sgel = scsi_sglist(scsi_cmnd);
3032 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 3111 j = 2;
3033 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 3112 for (i = 0; i < nseg; i++) {
3034 sgl->word2 = le32_to_cpu(sgl->word2); 3113 sgl->word2 = 0;
3035 if ((num_bde + 1) == nseg) 3114 if ((num_bde + 1) == nseg) {
3036 bf_set(lpfc_sli4_sge_last, sgl, 1); 3115 bf_set(lpfc_sli4_sge_last, sgl, 1);
3037 else 3116 bf_set(lpfc_sli4_sge_type, sgl,
3117 LPFC_SGE_TYPE_DATA);
3118 } else {
3038 bf_set(lpfc_sli4_sge_last, sgl, 0); 3119 bf_set(lpfc_sli4_sge_last, sgl, 0);
3039 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 3120
3040 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 3121 /* do we need to expand the segment */
3041 sgl->word2 = cpu_to_le32(sgl->word2); 3122 if (!lsp_just_set &&
3042 sgl->sge_len = cpu_to_le32(dma_len); 3123 !((j + 1) % phba->border_sge_num) &&
3043 dma_offset += dma_len; 3124 ((nseg - 1) != i)) {
3044 sgl++; 3125 /* set LSP type */
3126 bf_set(lpfc_sli4_sge_type, sgl,
3127 LPFC_SGE_TYPE_LSP);
3128
3129 sgl_xtra = lpfc_get_sgl_per_hdwq(
3130 phba, lpfc_cmd);
3131
3132 if (unlikely(!sgl_xtra)) {
3133 lpfc_cmd->seg_cnt = 0;
3134 scsi_dma_unmap(scsi_cmnd);
3135 return 1;
3136 }
3137 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3138 sgl_xtra->dma_phys_sgl));
3139 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3140 sgl_xtra->dma_phys_sgl));
3141
3142 } else {
3143 bf_set(lpfc_sli4_sge_type, sgl,
3144 LPFC_SGE_TYPE_DATA);
3145 }
3146 }
3147
3148 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3149 LPFC_SGE_TYPE_LSP)) {
3150 if ((nseg - 1) == i)
3151 bf_set(lpfc_sli4_sge_last, sgl, 1);
3152
3153 physaddr = sg_dma_address(sgel);
3154 dma_len = sg_dma_len(sgel);
3155 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3156 physaddr));
3157 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3158 physaddr));
3159
3160 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3161 sgl->word2 = cpu_to_le32(sgl->word2);
3162 sgl->sge_len = cpu_to_le32(dma_len);
3163
3164 dma_offset += dma_len;
3165 sgel = sg_next(sgel);
3166
3167 sgl++;
3168 lsp_just_set = false;
3169
3170 } else {
3171 sgl->word2 = cpu_to_le32(sgl->word2);
3172 sgl->sge_len = cpu_to_le32(
3173 phba->cfg_sg_dma_buf_size);
3174
3175 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3176 i = i - 1;
3177
3178 lsp_just_set = true;
3179 }
3180
3181 j++;
3045 } 3182 }
3046 /* 3183 /*
3047 * Setup the first Payload BDE. For FCoE we just key off 3184 * Setup the first Payload BDE. For FCoE we just key off
@@ -3110,6 +3247,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3110 * This is the protection/DIF aware version of 3247 * This is the protection/DIF aware version of
3111 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 3248 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3112 * two functions eventually, but for now, it's here 3249 * two functions eventually, but for now, it's here
3250 * Return codes:
3251 * 2 - Error - Do not retry
3252 * 1 - Error - Retry
3253 * 0 - Success
3113 **/ 3254 **/
3114static int 3255static int
3115lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, 3256lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
@@ -3123,6 +3264,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3123 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 3264 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3124 int prot_group_type = 0; 3265 int prot_group_type = 0;
3125 int fcpdl; 3266 int fcpdl;
3267 int ret = 1;
3126 struct lpfc_vport *vport = phba->pport; 3268 struct lpfc_vport *vport = phba->pport;
3127 3269
3128 /* 3270 /*
@@ -3152,23 +3294,33 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3152 lpfc_cmd->seg_cnt = datasegcnt; 3294 lpfc_cmd->seg_cnt = datasegcnt;
3153 3295
3154 /* First check if data segment count from SCSI Layer is good */ 3296 /* First check if data segment count from SCSI Layer is good */
3155 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) 3297 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3298 !phba->cfg_xpsgl) {
3299 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3300 ret = 2;
3156 goto err; 3301 goto err;
3302 }
3157 3303
3158 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 3304 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3159 3305
3160 switch (prot_group_type) { 3306 switch (prot_group_type) {
3161 case LPFC_PG_TYPE_NO_DIF: 3307 case LPFC_PG_TYPE_NO_DIF:
3162 /* Here we need to add a DISEED to the count */ 3308 /* Here we need to add a DISEED to the count */
3163 if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt) 3309 if (((lpfc_cmd->seg_cnt + 1) >
3310 phba->cfg_total_seg_cnt) &&
3311 !phba->cfg_xpsgl) {
3312 ret = 2;
3164 goto err; 3313 goto err;
3314 }
3165 3315
3166 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, 3316 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3167 datasegcnt); 3317 datasegcnt, lpfc_cmd);
3168 3318
3169 /* we should have 2 or more entries in buffer list */ 3319 /* we should have 2 or more entries in buffer list */
3170 if (num_sge < 2) 3320 if (num_sge < 2) {
3321 ret = 2;
3171 goto err; 3322 goto err;
3323 }
3172 break; 3324 break;
3173 3325
3174 case LPFC_PG_TYPE_DIF_BUF: 3326 case LPFC_PG_TYPE_DIF_BUF:
@@ -3190,17 +3342,23 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3190 * There is a minimun of 3 SGEs used for every 3342 * There is a minimun of 3 SGEs used for every
3191 * protection data segment. 3343 * protection data segment.
3192 */ 3344 */
3193 if ((lpfc_cmd->prot_seg_cnt * 3) > 3345 if (((lpfc_cmd->prot_seg_cnt * 3) >
3194 (phba->cfg_total_seg_cnt - 2)) 3346 (phba->cfg_total_seg_cnt - 2)) &&
3347 !phba->cfg_xpsgl) {
3348 ret = 2;
3195 goto err; 3349 goto err;
3350 }
3196 3351
3197 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, 3352 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3198 datasegcnt, protsegcnt); 3353 datasegcnt, protsegcnt, lpfc_cmd);
3199 3354
3200 /* we should have 3 or more entries in buffer list */ 3355 /* we should have 3 or more entries in buffer list */
3201 if ((num_sge < 3) || 3356 if (num_sge < 3 ||
3202 (num_sge > phba->cfg_total_seg_cnt)) 3357 (num_sge > phba->cfg_total_seg_cnt &&
3358 !phba->cfg_xpsgl)) {
3359 ret = 2;
3203 goto err; 3360 goto err;
3361 }
3204 break; 3362 break;
3205 3363
3206 case LPFC_PG_TYPE_INVALID: 3364 case LPFC_PG_TYPE_INVALID:
@@ -3211,7 +3369,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3211 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 3369 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3212 "9083 Unexpected protection group %i\n", 3370 "9083 Unexpected protection group %i\n",
3213 prot_group_type); 3371 prot_group_type);
3214 return 1; 3372 return 2;
3215 } 3373 }
3216 } 3374 }
3217 3375
@@ -3273,7 +3431,7 @@ err:
3273 3431
3274 lpfc_cmd->seg_cnt = 0; 3432 lpfc_cmd->seg_cnt = 0;
3275 lpfc_cmd->prot_seg_cnt = 0; 3433 lpfc_cmd->prot_seg_cnt = 0;
3276 return 1; 3434 return ret;
3277} 3435}
3278 3436
3279/** 3437/**
@@ -3839,7 +3997,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3839 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 3997 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
3840 3998
3841 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3999 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3842 "0710 Iodone <%d/%llu> cmd %p, error " 4000 "0710 Iodone <%d/%llu> cmd x%px, error "
3843 "x%x SNS x%x x%x Data: x%x x%x\n", 4001 "x%x SNS x%x x%x Data: x%x x%x\n",
3844 cmd->device->id, cmd->device->lun, cmd, 4002 cmd->device->id, cmd->device->lun, cmd,
3845 cmd->result, *lp, *(lp + 3), cmd->retries, 4003 cmd->result, *lp, *(lp + 3), cmd->retries,
@@ -4454,8 +4612,12 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4454 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 4612 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4455 } 4613 }
4456 4614
4457 if (err) 4615 if (err == 2) {
4616 cmnd->result = DID_ERROR << 16;
4617 goto out_fail_command_release_buf;
4618 } else if (err) {
4458 goto out_host_busy_free_buf; 4619 goto out_host_busy_free_buf;
4620 }
4459 4621
4460 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 4622 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4461 4623
@@ -4526,6 +4688,9 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4526 out_tgt_busy: 4688 out_tgt_busy:
4527 return SCSI_MLQUEUE_TARGET_BUSY; 4689 return SCSI_MLQUEUE_TARGET_BUSY;
4528 4690
4691 out_fail_command_release_buf:
4692 lpfc_release_scsi_buf(phba, lpfc_cmd);
4693
4529 out_fail_command: 4694 out_fail_command:
4530 cmnd->scsi_done(cmnd); 4695 cmnd->scsi_done(cmnd);
4531 return 0; 4696 return 0;
@@ -4568,7 +4733,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4568 4733
4569 spin_lock_irqsave(&phba->hbalock, flags); 4734 spin_lock_irqsave(&phba->hbalock, flags);
4570 /* driver queued commands are in process of being flushed */ 4735 /* driver queued commands are in process of being flushed */
4571 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { 4736 if (phba->hba_flag & HBA_IOQ_FLUSH) {
4572 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4737 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4573 "3168 SCSI Layer abort requested I/O has been " 4738 "3168 SCSI Layer abort requested I/O has been "
4574 "flushed by LLD.\n"); 4739 "flushed by LLD.\n");
@@ -4589,7 +4754,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4589 4754
4590 iocb = &lpfc_cmd->cur_iocbq; 4755 iocb = &lpfc_cmd->cur_iocbq;
4591 if (phba->sli_rev == LPFC_SLI_REV4) { 4756 if (phba->sli_rev == LPFC_SLI_REV4) {
4592 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].fcp_wq->pring; 4757 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
4593 if (!pring_s4) { 4758 if (!pring_s4) {
4594 ret = FAILED; 4759 ret = FAILED;
4595 goto out_unlock_buf; 4760 goto out_unlock_buf;
@@ -4956,7 +5121,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
4956 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5121 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4957 if (!rdata) { 5122 if (!rdata) {
4958 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5123 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4959 "0797 Tgt Map rport failure: rdata x%p\n", rdata); 5124 "0797 Tgt Map rport failure: rdata x%px\n", rdata);
4960 return FAILED; 5125 return FAILED;
4961 } 5126 }
4962 pnode = rdata->pnode; 5127 pnode = rdata->pnode;
@@ -5054,7 +5219,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5054 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5219 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5055 if (!rdata || !rdata->pnode) { 5220 if (!rdata || !rdata->pnode) {
5056 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5221 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5057 "0798 Device Reset rdata failure: rdata x%p\n", 5222 "0798 Device Reset rdata failure: rdata x%px\n",
5058 rdata); 5223 rdata);
5059 return FAILED; 5224 return FAILED;
5060 } 5225 }
@@ -5066,7 +5231,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5066 status = lpfc_chk_tgt_mapped(vport, cmnd); 5231 status = lpfc_chk_tgt_mapped(vport, cmnd);
5067 if (status == FAILED) { 5232 if (status == FAILED) {
5068 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5233 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5069 "0721 Device Reset rport failure: rdata x%p\n", rdata); 5234 "0721 Device Reset rport failure: rdata x%px\n", rdata);
5070 return FAILED; 5235 return FAILED;
5071 } 5236 }
5072 5237
@@ -5125,7 +5290,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5125 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5290 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5126 if (!rdata || !rdata->pnode) { 5291 if (!rdata || !rdata->pnode) {
5127 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5292 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5128 "0799 Target Reset rdata failure: rdata x%p\n", 5293 "0799 Target Reset rdata failure: rdata x%px\n",
5129 rdata); 5294 rdata);
5130 return FAILED; 5295 return FAILED;
5131 } 5296 }
@@ -5137,7 +5302,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5137 status = lpfc_chk_tgt_mapped(vport, cmnd); 5302 status = lpfc_chk_tgt_mapped(vport, cmnd);
5138 if (status == FAILED) { 5303 if (status == FAILED) {
5139 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5304 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5140 "0722 Target Reset rport failure: rdata x%p\n", rdata); 5305 "0722 Target Reset rport failure: rdata x%px\n", rdata);
5141 if (pnode) { 5306 if (pnode) {
5142 spin_lock_irq(shost->host_lock); 5307 spin_lock_irq(shost->host_lock);
5143 pnode->nlp_flag &= ~NLP_NPR_ADISC; 5308 pnode->nlp_flag &= ~NLP_NPR_ADISC;
@@ -5295,18 +5460,20 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5295 lpfc_offline(phba); 5460 lpfc_offline(phba);
5296 rc = lpfc_sli_brdrestart(phba); 5461 rc = lpfc_sli_brdrestart(phba);
5297 if (rc) 5462 if (rc)
5298 ret = FAILED; 5463 goto error;
5464
5299 rc = lpfc_online(phba); 5465 rc = lpfc_online(phba);
5300 if (rc) 5466 if (rc)
5301 ret = FAILED; 5467 goto error;
5468
5302 lpfc_unblock_mgmt_io(phba); 5469 lpfc_unblock_mgmt_io(phba);
5303 5470
5304 if (ret == FAILED) {
5305 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5306 "3323 Failed host reset, bring it offline\n");
5307 lpfc_sli4_offline_eratt(phba);
5308 }
5309 return ret; 5471 return ret;
5472error:
5473 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5474 "3323 Failed host reset\n");
5475 lpfc_unblock_mgmt_io(phba);
5476 return FAILED;
5310} 5477}
5311 5478
5312/** 5479/**
@@ -5870,7 +6037,7 @@ struct scsi_host_template lpfc_template_no_hr = {
5870 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 6037 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
5871 .cmd_per_lun = LPFC_CMD_PER_LUN, 6038 .cmd_per_lun = LPFC_CMD_PER_LUN,
5872 .shost_attrs = lpfc_hba_attrs, 6039 .shost_attrs = lpfc_hba_attrs,
5873 .max_sectors = 0xFFFF, 6040 .max_sectors = 0xFFFFFFFF,
5874 .vendor_id = LPFC_NL_VENDOR_ID, 6041 .vendor_id = LPFC_NL_VENDOR_ID,
5875 .change_queue_depth = scsi_change_queue_depth, 6042 .change_queue_depth = scsi_change_queue_depth,
5876 .track_queue_depth = 1, 6043 .track_queue_depth = 1,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f9e6a135d656..a0c6945b8139 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1391,9 +1391,12 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1391 1391
1392 while (!list_empty(iocblist)) { 1392 while (!list_empty(iocblist)) {
1393 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1393 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1394 if (!piocb->iocb_cmpl) 1394 if (!piocb->iocb_cmpl) {
1395 lpfc_sli_release_iocbq(phba, piocb); 1395 if (piocb->iocb_flag & LPFC_IO_NVME)
1396 else { 1396 lpfc_nvme_cancel_iocb(phba, piocb);
1397 else
1398 lpfc_sli_release_iocbq(phba, piocb);
1399 } else {
1397 piocb->iocb.ulpStatus = ulpstatus; 1400 piocb->iocb.ulpStatus = ulpstatus;
1398 piocb->iocb.un.ulpWord[4] = ulpWord4; 1401 piocb->iocb.un.ulpWord[4] = ulpWord4;
1399 (piocb->iocb_cmpl) (phba, piocb, piocb); 1402 (piocb->iocb_cmpl) (phba, piocb, piocb);
@@ -2426,6 +2429,20 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2426 return; 2429 return;
2427} 2430}
2428 2431
2432static void
2433__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2434{
2435 unsigned long iflags;
2436
2437 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2438 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2439 spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
2440 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2441 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2442 spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
2443 }
2444 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2445}
2429 2446
2430/** 2447/**
2431 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2448 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
@@ -2497,7 +2514,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2497 vport, 2514 vport,
2498 KERN_INFO, LOG_MBOX | LOG_DISCOVERY, 2515 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2499 "1438 UNREG cmpl deferred mbox x%x " 2516 "1438 UNREG cmpl deferred mbox x%x "
2500 "on NPort x%x Data: x%x x%x %p\n", 2517 "on NPort x%x Data: x%x x%x %px\n",
2501 ndlp->nlp_rpi, ndlp->nlp_DID, 2518 ndlp->nlp_rpi, ndlp->nlp_DID,
2502 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp); 2519 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2503 2520
@@ -2507,7 +2524,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2507 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 2524 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2508 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 2525 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2509 } else { 2526 } else {
2510 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2527 __lpfc_sli_rpi_release(vport, ndlp);
2511 } 2528 }
2512 pmb->ctx_ndlp = NULL; 2529 pmb->ctx_ndlp = NULL;
2513 } 2530 }
@@ -2555,7 +2572,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2555 vport, KERN_INFO, LOG_MBOX | LOG_SLI, 2572 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2556 "0010 UNREG_LOGIN vpi:%x " 2573 "0010 UNREG_LOGIN vpi:%x "
2557 "rpi:%x DID:%x defer x%x flg x%x " 2574 "rpi:%x DID:%x defer x%x flg x%x "
2558 "map:%x %p\n", 2575 "map:%x %px\n",
2559 vport->vpi, ndlp->nlp_rpi, 2576 vport->vpi, ndlp->nlp_rpi,
2560 ndlp->nlp_DID, ndlp->nlp_defer_did, 2577 ndlp->nlp_DID, ndlp->nlp_defer_did,
2561 ndlp->nlp_flag, 2578 ndlp->nlp_flag,
@@ -2573,7 +2590,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2573 vport, KERN_INFO, LOG_DISCOVERY, 2590 vport, KERN_INFO, LOG_DISCOVERY,
2574 "4111 UNREG cmpl deferred " 2591 "4111 UNREG cmpl deferred "
2575 "clr x%x on " 2592 "clr x%x on "
2576 "NPort x%x Data: x%x %p\n", 2593 "NPort x%x Data: x%x x%px\n",
2577 ndlp->nlp_rpi, ndlp->nlp_DID, 2594 ndlp->nlp_rpi, ndlp->nlp_DID,
2578 ndlp->nlp_defer_did, ndlp); 2595 ndlp->nlp_defer_did, ndlp);
2579 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2596 ndlp->nlp_flag &= ~NLP_UNREG_INP;
@@ -2582,7 +2599,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2582 lpfc_issue_els_plogi( 2599 lpfc_issue_els_plogi(
2583 vport, ndlp->nlp_DID, 0); 2600 vport, ndlp->nlp_DID, 0);
2584 } else { 2601 } else {
2585 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2602 __lpfc_sli_rpi_release(vport, ndlp);
2586 } 2603 }
2587 } 2604 }
2588 } 2605 }
@@ -2695,7 +2712,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2695 2712
2696 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2713 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2697 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2714 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2698 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2715 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2699 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 2716 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2700 "x%x x%x x%x\n", 2717 "x%x x%x x%x\n",
2701 pmb->vport ? pmb->vport->vpi : 0, 2718 pmb->vport ? pmb->vport->vpi : 0,
@@ -3961,7 +3978,7 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3961 /* Look on all the FCP Rings for the iotag */ 3978 /* Look on all the FCP Rings for the iotag */
3962 if (phba->sli_rev >= LPFC_SLI_REV4) { 3979 if (phba->sli_rev >= LPFC_SLI_REV4) {
3963 for (i = 0; i < phba->cfg_hdw_queue; i++) { 3980 for (i = 0; i < phba->cfg_hdw_queue; i++) {
3964 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; 3981 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
3965 lpfc_sli_abort_iocb_ring(phba, pring); 3982 lpfc_sli_abort_iocb_ring(phba, pring);
3966 } 3983 }
3967 } else { 3984 } else {
@@ -3971,17 +3988,17 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3971} 3988}
3972 3989
3973/** 3990/**
3974 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3991 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
3975 * @phba: Pointer to HBA context object. 3992 * @phba: Pointer to HBA context object.
3976 * 3993 *
3977 * This function flushes all iocbs in the fcp ring and frees all the iocb 3994 * This function flushes all iocbs in the IO ring and frees all the iocb
3978 * objects in txq and txcmplq. This function will not issue abort iocbs 3995 * objects in txq and txcmplq. This function will not issue abort iocbs
3979 * for all the iocb commands in txcmplq, they will just be returned with 3996 * for all the iocb commands in txcmplq, they will just be returned with
3980 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3997 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3981 * slot has been permanently disabled. 3998 * slot has been permanently disabled.
3982 **/ 3999 **/
3983void 4000void
3984lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 4001lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
3985{ 4002{
3986 LIST_HEAD(txq); 4003 LIST_HEAD(txq);
3987 LIST_HEAD(txcmplq); 4004 LIST_HEAD(txcmplq);
@@ -3992,13 +4009,13 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3992 4009
3993 spin_lock_irq(&phba->hbalock); 4010 spin_lock_irq(&phba->hbalock);
3994 /* Indicate the I/O queues are flushed */ 4011 /* Indicate the I/O queues are flushed */
3995 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 4012 phba->hba_flag |= HBA_IOQ_FLUSH;
3996 spin_unlock_irq(&phba->hbalock); 4013 spin_unlock_irq(&phba->hbalock);
3997 4014
3998 /* Look on all the FCP Rings for the iotag */ 4015 /* Look on all the FCP Rings for the iotag */
3999 if (phba->sli_rev >= LPFC_SLI_REV4) { 4016 if (phba->sli_rev >= LPFC_SLI_REV4) {
4000 for (i = 0; i < phba->cfg_hdw_queue; i++) { 4017 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4001 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; 4018 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4002 4019
4003 spin_lock_irq(&pring->ring_lock); 4020 spin_lock_irq(&pring->ring_lock);
4004 /* Retrieve everything on txq */ 4021 /* Retrieve everything on txq */
@@ -4046,56 +4063,6 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
4046} 4063}
4047 4064
4048/** 4065/**
4049 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
4050 * @phba: Pointer to HBA context object.
4051 *
4052 * This function flushes all wqes in the nvme rings and frees all resources
4053 * in the txcmplq. This function does not issue abort wqes for the IO
4054 * commands in txcmplq, they will just be returned with
4055 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4056 * slot has been permanently disabled.
4057 **/
4058void
4059lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
4060{
4061 LIST_HEAD(txcmplq);
4062 struct lpfc_sli_ring *pring;
4063 uint32_t i;
4064 struct lpfc_iocbq *piocb, *next_iocb;
4065
4066 if ((phba->sli_rev < LPFC_SLI_REV4) ||
4067 !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
4068 return;
4069
4070 /* Hint to other driver operations that a flush is in progress. */
4071 spin_lock_irq(&phba->hbalock);
4072 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
4073 spin_unlock_irq(&phba->hbalock);
4074
4075 /* Cycle through all NVME rings and complete each IO with
4076 * a local driver reason code. This is a flush so no
4077 * abort exchange to FW.
4078 */
4079 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4080 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
4081
4082 spin_lock_irq(&pring->ring_lock);
4083 list_for_each_entry_safe(piocb, next_iocb,
4084 &pring->txcmplq, list)
4085 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4086 /* Retrieve everything on the txcmplq */
4087 list_splice_init(&pring->txcmplq, &txcmplq);
4088 pring->txcmplq_cnt = 0;
4089 spin_unlock_irq(&pring->ring_lock);
4090
4091 /* Flush the txcmpq &&&PAE */
4092 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4093 IOSTAT_LOCAL_REJECT,
4094 IOERR_SLI_DOWN);
4095 }
4096}
4097
4098/**
4099 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 4066 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4100 * @phba: Pointer to HBA context object. 4067 * @phba: Pointer to HBA context object.
4101 * @mask: Bit mask to be checked. 4068 * @mask: Bit mask to be checked.
@@ -4495,7 +4462,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
4495 * checking during resets the device. The caller is not required to hold 4462 * checking during resets the device. The caller is not required to hold
4496 * any locks. 4463 * any locks.
4497 * 4464 *
4498 * This function returns 0 always. 4465 * This function returns 0 on success else returns negative error code.
4499 **/ 4466 **/
4500int 4467int
4501lpfc_sli4_brdreset(struct lpfc_hba *phba) 4468lpfc_sli4_brdreset(struct lpfc_hba *phba)
@@ -4652,8 +4619,10 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4652 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4619 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4653 4620
4654 rc = lpfc_sli4_brdreset(phba); 4621 rc = lpfc_sli4_brdreset(phba);
4655 if (rc) 4622 if (rc) {
4656 return rc; 4623 phba->link_state = LPFC_HBA_ERROR;
4624 goto hba_down_queue;
4625 }
4657 4626
4658 spin_lock_irq(&phba->hbalock); 4627 spin_lock_irq(&phba->hbalock);
4659 phba->pport->stopped = 0; 4628 phba->pport->stopped = 0;
@@ -4668,6 +4637,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4668 if (hba_aer_enabled) 4637 if (hba_aer_enabled)
4669 pci_disable_pcie_error_reporting(phba->pcidev); 4638 pci_disable_pcie_error_reporting(phba->pcidev);
4670 4639
4640hba_down_queue:
4671 lpfc_hba_down_post(phba); 4641 lpfc_hba_down_post(phba);
4672 lpfc_sli4_queue_destroy(phba); 4642 lpfc_sli4_queue_destroy(phba);
4673 4643
@@ -5584,10 +5554,8 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5584 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 5554 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5585 qp = &sli4_hba->hdwq[qidx]; 5555 qp = &sli4_hba->hdwq[qidx];
5586 /* ARM the corresponding CQ */ 5556 /* ARM the corresponding CQ */
5587 sli4_hba->sli4_write_cq_db(phba, qp->fcp_cq, 0, 5557 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
5588 LPFC_QUEUE_REARM); 5558 LPFC_QUEUE_REARM);
5589 sli4_hba->sli4_write_cq_db(phba, qp->nvme_cq, 0,
5590 LPFC_QUEUE_REARM);
5591 } 5559 }
5592 5560
5593 /* Loop thru all IRQ vectors */ 5561 /* Loop thru all IRQ vectors */
@@ -7243,7 +7211,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7243 else 7211 else
7244 phba->hba_flag &= ~HBA_FIP_SUPPORT; 7212 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7245 7213
7246 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH; 7214 phba->hba_flag &= ~HBA_IOQ_FLUSH;
7247 7215
7248 if (phba->sli_rev != LPFC_SLI_REV4) { 7216 if (phba->sli_rev != LPFC_SLI_REV4) {
7249 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7217 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -7972,7 +7940,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7972 7940
7973 /* Mbox cmd <mbxCommand> timeout */ 7941 /* Mbox cmd <mbxCommand> timeout */
7974 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7942 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7975 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 7943 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
7976 mb->mbxCommand, 7944 mb->mbxCommand,
7977 phba->pport->port_state, 7945 phba->pport->port_state,
7978 phba->sli.sli_flag, 7946 phba->sli.sli_flag,
@@ -9333,11 +9301,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9333 memset(wqe, 0, sizeof(union lpfc_wqe128)); 9301 memset(wqe, 0, sizeof(union lpfc_wqe128));
9334 /* Some of the fields are in the right position already */ 9302 /* Some of the fields are in the right position already */
9335 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 9303 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9336 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) { 9304 /* The ct field has moved so reset */
9337 /* The ct field has moved so reset */ 9305 wqe->generic.wqe_com.word7 = 0;
9338 wqe->generic.wqe_com.word7 = 0; 9306 wqe->generic.wqe_com.word10 = 0;
9339 wqe->generic.wqe_com.word10 = 0;
9340 }
9341 9307
9342 abort_tag = (uint32_t) iocbq->iotag; 9308 abort_tag = (uint32_t) iocbq->iotag;
9343 xritag = iocbq->sli4_xritag; 9309 xritag = iocbq->sli4_xritag;
@@ -9796,7 +9762,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9796 * we re-construct this WQE here based on information in 9762 * we re-construct this WQE here based on information in
9797 * iocbq from scratch. 9763 * iocbq from scratch.
9798 */ 9764 */
9799 memset(wqe, 0, sizeof(union lpfc_wqe)); 9765 memset(wqe, 0, sizeof(*wqe));
9800 /* OX_ID is invariable to who sent ABTS to CT exchange */ 9766 /* OX_ID is invariable to who sent ABTS to CT exchange */
9801 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 9767 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9802 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 9768 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
@@ -9843,6 +9809,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9843 9809
9844 break; 9810 break;
9845 case CMD_SEND_FRAME: 9811 case CMD_SEND_FRAME:
9812 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
9813 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
9814 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
9815 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
9816 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
9817 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
9818 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
9819 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
9820 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9846 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9821 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9847 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9822 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9848 return 0; 9823 return 0;
@@ -9904,7 +9879,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9904 /* Get the WQ */ 9879 /* Get the WQ */
9905 if ((piocb->iocb_flag & LPFC_IO_FCP) || 9880 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9906 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 9881 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9907 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq; 9882 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
9908 } else { 9883 } else {
9909 wq = phba->sli4_hba.els_wq; 9884 wq = phba->sli4_hba.els_wq;
9910 } 9885 }
@@ -10051,7 +10026,7 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10051 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1; 10026 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10052 piocb->hba_wqidx = lpfc_cmd->hdwq_no; 10027 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10053 } 10028 }
10054 return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring; 10029 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10055 } else { 10030 } else {
10056 if (unlikely(!phba->sli4_hba.els_wq)) 10031 if (unlikely(!phba->sli4_hba.els_wq))
10057 return NULL; 10032 return NULL;
@@ -10504,7 +10479,7 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba)
10504 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10479 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10505 /* Initialize list headers for txq and txcmplq as double linked lists */ 10480 /* Initialize list headers for txq and txcmplq as double linked lists */
10506 for (i = 0; i < phba->cfg_hdw_queue; i++) { 10481 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10507 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; 10482 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
10508 pring->flag = 0; 10483 pring->flag = 0;
10509 pring->ringno = LPFC_FCP_RING; 10484 pring->ringno = LPFC_FCP_RING;
10510 pring->txcmplq_cnt = 0; 10485 pring->txcmplq_cnt = 0;
@@ -10523,16 +10498,6 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba)
10523 spin_lock_init(&pring->ring_lock); 10498 spin_lock_init(&pring->ring_lock);
10524 10499
10525 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10500 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10526 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10527 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
10528 pring->flag = 0;
10529 pring->ringno = LPFC_FCP_RING;
10530 pring->txcmplq_cnt = 0;
10531 INIT_LIST_HEAD(&pring->txq);
10532 INIT_LIST_HEAD(&pring->txcmplq);
10533 INIT_LIST_HEAD(&pring->iocb_continueq);
10534 spin_lock_init(&pring->ring_lock);
10535 }
10536 pring = phba->sli4_hba.nvmels_wq->pring; 10501 pring = phba->sli4_hba.nvmels_wq->pring;
10537 pring->flag = 0; 10502 pring->flag = 0;
10538 pring->ringno = LPFC_ELS_RING; 10503 pring->ringno = LPFC_ELS_RING;
@@ -10796,9 +10761,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
10796 pring = qp->pring; 10761 pring = qp->pring;
10797 if (!pring) 10762 if (!pring)
10798 continue; 10763 continue;
10799 spin_lock_irq(&pring->ring_lock); 10764 spin_lock(&pring->ring_lock);
10800 list_splice_init(&pring->txq, &completions); 10765 list_splice_init(&pring->txq, &completions);
10801 spin_unlock_irq(&pring->ring_lock); 10766 spin_unlock(&pring->ring_lock);
10802 if (pring == phba->sli4_hba.els_wq->pring) { 10767 if (pring == phba->sli4_hba.els_wq->pring) {
10803 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10768 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10804 /* Set the lpfc data pending flag */ 10769 /* Set the lpfc data pending flag */
@@ -10979,7 +10944,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10979 spin_unlock_irq(&phba->hbalock); 10944 spin_unlock_irq(&phba->hbalock);
10980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10945 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10981 "0402 Cannot find virtual addr for buffer tag on " 10946 "0402 Cannot find virtual addr for buffer tag on "
10982 "ring %d Data x%lx x%p x%p x%x\n", 10947 "ring %d Data x%lx x%px x%px x%x\n",
10983 pring->ringno, (unsigned long) tag, 10948 pring->ringno, (unsigned long) tag,
10984 slp->next, slp->prev, pring->postbufq_cnt); 10949 slp->next, slp->prev, pring->postbufq_cnt);
10985 10950
@@ -11023,7 +10988,7 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11023 spin_unlock_irq(&phba->hbalock); 10988 spin_unlock_irq(&phba->hbalock);
11024 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10989 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11025 "0410 Cannot find virtual addr for mapped buf on " 10990 "0410 Cannot find virtual addr for mapped buf on "
11026 "ring %d Data x%llx x%p x%p x%x\n", 10991 "ring %d Data x%llx x%px x%px x%x\n",
11027 pring->ringno, (unsigned long long)phys, 10992 pring->ringno, (unsigned long long)phys,
11028 slp->next, slp->prev, pring->postbufq_cnt); 10993 slp->next, slp->prev, pring->postbufq_cnt);
11029 return NULL; 10994 return NULL;
@@ -11078,13 +11043,16 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11078 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 11043 abort_iocb = phba->sli.iocbq_lookup[abort_context];
11079 11044
11080 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 11045 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11081 "0327 Cannot abort els iocb %p " 11046 "0327 Cannot abort els iocb x%px "
11082 "with tag %x context %x, abort status %x, " 11047 "with tag %x context %x, abort status %x, "
11083 "abort code %x\n", 11048 "abort code %x\n",
11084 abort_iocb, abort_iotag, abort_context, 11049 abort_iocb, abort_iotag, abort_context,
11085 irsp->ulpStatus, irsp->un.ulpWord[4]); 11050 irsp->ulpStatus, irsp->un.ulpWord[4]);
11086 11051
11087 spin_unlock_irq(&phba->hbalock); 11052 spin_unlock_irq(&phba->hbalock);
11053 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11054 irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)
11055 lpfc_sli_release_iocbq(phba, abort_iocb);
11088 } 11056 }
11089release_iocb: 11057release_iocb:
11090 lpfc_sli_release_iocbq(phba, cmdiocb); 11058 lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -11493,7 +11461,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11493 int i; 11461 int i;
11494 11462
11495 /* all I/Os are in process of being flushed */ 11463 /* all I/Os are in process of being flushed */
11496 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) 11464 if (phba->hba_flag & HBA_IOQ_FLUSH)
11497 return errcnt; 11465 return errcnt;
11498 11466
11499 for (i = 1; i <= phba->sli.last_iotag; i++) { 11467 for (i = 1; i <= phba->sli.last_iotag; i++) {
@@ -11603,7 +11571,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11603 spin_lock_irqsave(&phba->hbalock, iflags); 11571 spin_lock_irqsave(&phba->hbalock, iflags);
11604 11572
11605 /* all I/Os are in process of being flushed */ 11573 /* all I/Os are in process of being flushed */
11606 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { 11574 if (phba->hba_flag & HBA_IOQ_FLUSH) {
11607 spin_unlock_irqrestore(&phba->hbalock, iflags); 11575 spin_unlock_irqrestore(&phba->hbalock, iflags);
11608 return 0; 11576 return 0;
11609 } 11577 }
@@ -11627,7 +11595,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11627 11595
11628 if (phba->sli_rev == LPFC_SLI_REV4) { 11596 if (phba->sli_rev == LPFC_SLI_REV4) {
11629 pring_s4 = 11597 pring_s4 =
11630 phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring; 11598 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
11631 if (!pring_s4) { 11599 if (!pring_s4) {
11632 spin_unlock(&lpfc_cmd->buf_lock); 11600 spin_unlock(&lpfc_cmd->buf_lock);
11633 continue; 11601 continue;
@@ -13336,8 +13304,13 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13336 unsigned long iflags; 13304 unsigned long iflags;
13337 13305
13338 switch (cq->subtype) { 13306 switch (cq->subtype) {
13339 case LPFC_FCP: 13307 case LPFC_IO:
13340 lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq); 13308 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13309 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13310 /* Notify aborted XRI for NVME work queue */
13311 if (phba->nvmet_support)
13312 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13313 }
13341 workposted = false; 13314 workposted = false;
13342 break; 13315 break;
13343 case LPFC_NVME_LS: /* NVME LS uses ELS resources */ 13316 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
@@ -13355,15 +13328,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13355 spin_unlock_irqrestore(&phba->hbalock, iflags); 13328 spin_unlock_irqrestore(&phba->hbalock, iflags);
13356 workposted = true; 13329 workposted = true;
13357 break; 13330 break;
13358 case LPFC_NVME:
13359 /* Notify aborted XRI for NVME work queue */
13360 if (phba->nvmet_support)
13361 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13362 else
13363 lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq);
13364
13365 workposted = false;
13366 break;
13367 default: 13331 default:
13368 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13332 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13369 "0603 Invalid CQ subtype %d: " 13333 "0603 Invalid CQ subtype %d: "
@@ -13691,7 +13655,7 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13691 &delay); 13655 &delay);
13692 break; 13656 break;
13693 case LPFC_WCQ: 13657 case LPFC_WCQ:
13694 if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME) 13658 if (cq->subtype == LPFC_IO)
13695 workposted |= __lpfc_sli4_process_cq(phba, cq, 13659 workposted |= __lpfc_sli4_process_cq(phba, cq,
13696 lpfc_sli4_fp_handle_cqe, 13660 lpfc_sli4_fp_handle_cqe,
13697 &delay); 13661 &delay);
@@ -14008,10 +13972,7 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14008 cq->CQ_wq++; 13972 cq->CQ_wq++;
14009 /* Process the WQ complete event */ 13973 /* Process the WQ complete event */
14010 phba->last_completion_time = jiffies; 13974 phba->last_completion_time = jiffies;
14011 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME)) 13975 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
14012 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14013 (struct lpfc_wcqe_complete *)&wcqe);
14014 if (cq->subtype == LPFC_NVME_LS)
14015 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 13976 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14016 (struct lpfc_wcqe_complete *)&wcqe); 13977 (struct lpfc_wcqe_complete *)&wcqe);
14017 break; 13978 break;
@@ -16918,6 +16879,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16918 struct fc_vft_header *fc_vft_hdr; 16879 struct fc_vft_header *fc_vft_hdr;
16919 uint32_t *header = (uint32_t *) fc_hdr; 16880 uint32_t *header = (uint32_t *) fc_hdr;
16920 16881
16882#define FC_RCTL_MDS_DIAGS 0xF4
16883
16921 switch (fc_hdr->fh_r_ctl) { 16884 switch (fc_hdr->fh_r_ctl) {
16922 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 16885 case FC_RCTL_DD_UNCAT: /* uncategorized information */
16923 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 16886 case FC_RCTL_DD_SOL_DATA: /* solicited data */
@@ -17445,7 +17408,6 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17445 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 17408 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17446 ctiocb->context1 = lpfc_nlp_get(ndlp); 17409 ctiocb->context1 = lpfc_nlp_get(ndlp);
17447 17410
17448 ctiocb->iocb_cmpl = NULL;
17449 ctiocb->vport = phba->pport; 17411 ctiocb->vport = phba->pport;
17450 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 17412 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17451 ctiocb->sli4_lxritag = NO_XRI; 17413 ctiocb->sli4_lxritag = NO_XRI;
@@ -17928,6 +17890,17 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17928 fcfi = bf_get(lpfc_rcqe_fcf_id, 17890 fcfi = bf_get(lpfc_rcqe_fcf_id,
17929 &dmabuf->cq_event.cqe.rcqe_cmpl); 17891 &dmabuf->cq_event.cqe.rcqe_cmpl);
17930 17892
17893 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
17894 vport = phba->pport;
17895 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
17896 "2023 MDS Loopback %d bytes\n",
17897 bf_get(lpfc_rcqe_length,
17898 &dmabuf->cq_event.cqe.rcqe_cmpl));
17899 /* Handle MDS Loopback frames */
17900 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17901 return;
17902 }
17903
17931 /* d_id this frame is directed to */ 17904 /* d_id this frame is directed to */
17932 did = sli4_did_from_fc_hdr(fc_hdr); 17905 did = sli4_did_from_fc_hdr(fc_hdr);
17933 17906
@@ -18211,6 +18184,10 @@ __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18211 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 18184 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18212 phba->sli4_hba.rpi_count--; 18185 phba->sli4_hba.rpi_count--;
18213 phba->sli4_hba.max_cfg_param.rpi_used--; 18186 phba->sli4_hba.max_cfg_param.rpi_used--;
18187 } else {
18188 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18189 "2016 rpi %x not inuse\n",
18190 rpi);
18214 } 18191 }
18215} 18192}
18216 18193
@@ -19461,7 +19438,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
19461 19438
19462 if (phba->link_flag & LS_MDS_LOOPBACK) { 19439 if (phba->link_flag & LS_MDS_LOOPBACK) {
19463 /* MDS WQE are posted only to first WQ*/ 19440 /* MDS WQE are posted only to first WQ*/
19464 wq = phba->sli4_hba.hdwq[0].fcp_wq; 19441 wq = phba->sli4_hba.hdwq[0].io_wq;
19465 if (unlikely(!wq)) 19442 if (unlikely(!wq))
19466 return 0; 19443 return 0;
19467 pring = wq->pring; 19444 pring = wq->pring;
@@ -19712,10 +19689,10 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19712 /* NVME_FCREQ and NVME_ABTS requests */ 19689 /* NVME_FCREQ and NVME_ABTS requests */
19713 if (pwqe->iocb_flag & LPFC_IO_NVME) { 19690 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19714 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19691 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19715 wq = qp->nvme_wq; 19692 wq = qp->io_wq;
19716 pring = wq->pring; 19693 pring = wq->pring;
19717 19694
19718 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); 19695 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
19719 19696
19720 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19697 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19721 qp, wq_access); 19698 qp, wq_access);
@@ -19732,7 +19709,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19732 /* NVMET requests */ 19709 /* NVMET requests */
19733 if (pwqe->iocb_flag & LPFC_IO_NVMET) { 19710 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19734 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19711 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19735 wq = qp->nvme_wq; 19712 wq = qp->io_wq;
19736 pring = wq->pring; 19713 pring = wq->pring;
19737 19714
19738 ctxp = pwqe->context2; 19715 ctxp = pwqe->context2;
@@ -19743,7 +19720,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19743 } 19720 }
19744 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19721 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19745 pwqe->sli4_xritag); 19722 pwqe->sli4_xritag);
19746 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); 19723 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
19747 19724
19748 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19725 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19749 qp, wq_access); 19726 qp, wq_access);
@@ -19790,9 +19767,7 @@ void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
19790 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) { 19767 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
19791 pvt_pool = &qp->p_multixri_pool->pvt_pool; 19768 pvt_pool = &qp->p_multixri_pool->pvt_pool;
19792 pbl_pool = &qp->p_multixri_pool->pbl_pool; 19769 pbl_pool = &qp->p_multixri_pool->pbl_pool;
19793 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; 19770 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
19794 if (qp->nvme_wq)
19795 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
19796 19771
19797 multixri_pool->stat_pbl_count = pbl_pool->count; 19772 multixri_pool->stat_pbl_count = pbl_pool->count;
19798 multixri_pool->stat_pvt_count = pvt_pool->count; 19773 multixri_pool->stat_pvt_count = pvt_pool->count;
@@ -19862,12 +19837,9 @@ void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
19862 watermark_max = xri_limit; 19837 watermark_max = xri_limit;
19863 watermark_min = xri_limit / 2; 19838 watermark_min = xri_limit / 2;
19864 19839
19865 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; 19840 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
19866 abts_io_bufs = qp->abts_scsi_io_bufs; 19841 abts_io_bufs = qp->abts_scsi_io_bufs;
19867 if (qp->nvme_wq) { 19842 abts_io_bufs += qp->abts_nvme_io_bufs;
19868 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
19869 abts_io_bufs += qp->abts_nvme_io_bufs;
19870 }
19871 19843
19872 new_watermark = txcmplq_cnt + abts_io_bufs; 19844 new_watermark = txcmplq_cnt + abts_io_bufs;
19873 new_watermark = min(watermark_max, new_watermark); 19845 new_watermark = min(watermark_max, new_watermark);
@@ -20142,12 +20114,9 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20142 pbl_pool = &qp->p_multixri_pool->pbl_pool; 20114 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20143 pvt_pool = &qp->p_multixri_pool->pvt_pool; 20115 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20144 20116
20145 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; 20117 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20146 abts_io_bufs = qp->abts_scsi_io_bufs; 20118 abts_io_bufs = qp->abts_scsi_io_bufs;
20147 if (qp->nvme_wq) { 20119 abts_io_bufs += qp->abts_nvme_io_bufs;
20148 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
20149 abts_io_bufs += qp->abts_nvme_io_bufs;
20150 }
20151 20120
20152 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs; 20121 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20153 xri_limit = qp->p_multixri_pool->xri_limit; 20122 xri_limit = qp->p_multixri_pool->xri_limit;
@@ -20188,6 +20157,13 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20188 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, 20157 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20189 iflag); 20158 iflag);
20190 } 20159 }
20160
20161 if (phba->cfg_xpsgl && !phba->nvmet_support &&
20162 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
20163 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
20164
20165 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
20166 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
20191} 20167}
20192 20168
20193/** 20169/**
@@ -20402,3 +20378,288 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20402 20378
20403 return lpfc_cmd; 20379 return lpfc_cmd;
20404} 20380}
20381
20382/**
20383 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
20384 * @phba: The HBA for which this call is being executed.
20385 * @lpfc_buf: IO buf structure to append the SGL chunk
20386 *
20387 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
20388 * and will allocate an SGL chunk if the pool is empty.
20389 *
20390 * Return codes:
20391 * NULL - Error
20392 * Pointer to sli4_hybrid_sgl - Success
20393 **/
20394struct sli4_hybrid_sgl *
20395lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20396{
20397 struct sli4_hybrid_sgl *list_entry = NULL;
20398 struct sli4_hybrid_sgl *tmp = NULL;
20399 struct sli4_hybrid_sgl *allocated_sgl = NULL;
20400 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20401 struct list_head *buf_list = &hdwq->sgl_list;
20402
20403 spin_lock_irq(&hdwq->hdwq_lock);
20404
20405 if (likely(!list_empty(buf_list))) {
20406 /* break off 1 chunk from the sgl_list */
20407 list_for_each_entry_safe(list_entry, tmp,
20408 buf_list, list_node) {
20409 list_move_tail(&list_entry->list_node,
20410 &lpfc_buf->dma_sgl_xtra_list);
20411 break;
20412 }
20413 } else {
20414 /* allocate more */
20415 spin_unlock_irq(&hdwq->hdwq_lock);
20416 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
20417 cpu_to_node(smp_processor_id()));
20418 if (!tmp) {
20419 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20420 "8353 error kmalloc memory for HDWQ "
20421 "%d %s\n",
20422 lpfc_buf->hdwq_no, __func__);
20423 return NULL;
20424 }
20425
20426 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
20427 GFP_ATOMIC, &tmp->dma_phys_sgl);
20428 if (!tmp->dma_sgl) {
20429 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20430 "8354 error pool_alloc memory for HDWQ "
20431 "%d %s\n",
20432 lpfc_buf->hdwq_no, __func__);
20433 kfree(tmp);
20434 return NULL;
20435 }
20436
20437 spin_lock_irq(&hdwq->hdwq_lock);
20438 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
20439 }
20440
20441 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
20442 struct sli4_hybrid_sgl,
20443 list_node);
20444
20445 spin_unlock_irq(&hdwq->hdwq_lock);
20446
20447 return allocated_sgl;
20448}
20449
20450/**
20451 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
20452 * @phba: The HBA for which this call is being executed.
20453 * @lpfc_buf: IO buf structure with the SGL chunk
20454 *
20455 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
20456 *
20457 * Return codes:
20458 * 0 - Success
20459 * -EINVAL - Error
20460 **/
20461int
20462lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20463{
20464 int rc = 0;
20465 struct sli4_hybrid_sgl *list_entry = NULL;
20466 struct sli4_hybrid_sgl *tmp = NULL;
20467 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20468 struct list_head *buf_list = &hdwq->sgl_list;
20469
20470 spin_lock_irq(&hdwq->hdwq_lock);
20471
20472 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
20473 list_for_each_entry_safe(list_entry, tmp,
20474 &lpfc_buf->dma_sgl_xtra_list,
20475 list_node) {
20476 list_move_tail(&list_entry->list_node,
20477 buf_list);
20478 }
20479 } else {
20480 rc = -EINVAL;
20481 }
20482
20483 spin_unlock_irq(&hdwq->hdwq_lock);
20484 return rc;
20485}
20486
20487/**
20488 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
20489 * @phba: phba object
20490 * @hdwq: hdwq to cleanup sgl buff resources on
20491 *
20492 * This routine frees all SGL chunks of hdwq SGL chunk pool.
20493 *
20494 * Return codes:
20495 * None
20496 **/
20497void
20498lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
20499 struct lpfc_sli4_hdw_queue *hdwq)
20500{
20501 struct list_head *buf_list = &hdwq->sgl_list;
20502 struct sli4_hybrid_sgl *list_entry = NULL;
20503 struct sli4_hybrid_sgl *tmp = NULL;
20504
20505 spin_lock_irq(&hdwq->hdwq_lock);
20506
20507 /* Free sgl pool */
20508 list_for_each_entry_safe(list_entry, tmp,
20509 buf_list, list_node) {
20510 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
20511 list_entry->dma_sgl,
20512 list_entry->dma_phys_sgl);
20513 list_del(&list_entry->list_node);
20514 kfree(list_entry);
20515 }
20516
20517 spin_unlock_irq(&hdwq->hdwq_lock);
20518}
20519
20520/**
20521 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
20522 * @phba: The HBA for which this call is being executed.
20523 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
20524 *
20525 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
20526 * and will allocate an CMD/RSP buffer if the pool is empty.
20527 *
20528 * Return codes:
20529 * NULL - Error
20530 * Pointer to fcp_cmd_rsp_buf - Success
20531 **/
20532struct fcp_cmd_rsp_buf *
20533lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20534 struct lpfc_io_buf *lpfc_buf)
20535{
20536 struct fcp_cmd_rsp_buf *list_entry = NULL;
20537 struct fcp_cmd_rsp_buf *tmp = NULL;
20538 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
20539 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20540 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20541
20542 spin_lock_irq(&hdwq->hdwq_lock);
20543
20544 if (likely(!list_empty(buf_list))) {
20545 /* break off 1 chunk from the list */
20546 list_for_each_entry_safe(list_entry, tmp,
20547 buf_list,
20548 list_node) {
20549 list_move_tail(&list_entry->list_node,
20550 &lpfc_buf->dma_cmd_rsp_list);
20551 break;
20552 }
20553 } else {
20554 /* allocate more */
20555 spin_unlock_irq(&hdwq->hdwq_lock);
20556 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
20557 cpu_to_node(smp_processor_id()));
20558 if (!tmp) {
20559 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20560 "8355 error kmalloc memory for HDWQ "
20561 "%d %s\n",
20562 lpfc_buf->hdwq_no, __func__);
20563 return NULL;
20564 }
20565
20566 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
20567 GFP_ATOMIC,
20568 &tmp->fcp_cmd_rsp_dma_handle);
20569
20570 if (!tmp->fcp_cmnd) {
20571 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20572 "8356 error pool_alloc memory for HDWQ "
20573 "%d %s\n",
20574 lpfc_buf->hdwq_no, __func__);
20575 kfree(tmp);
20576 return NULL;
20577 }
20578
20579 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
20580 sizeof(struct fcp_cmnd));
20581
20582 spin_lock_irq(&hdwq->hdwq_lock);
20583 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
20584 }
20585
20586 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
20587 struct fcp_cmd_rsp_buf,
20588 list_node);
20589
20590 spin_unlock_irq(&hdwq->hdwq_lock);
20591
20592 return allocated_buf;
20593}
20594
20595/**
20596 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
20597 * @phba: The HBA for which this call is being executed.
20598 * @lpfc_buf: IO buf structure with the CMD/RSP buf
20599 *
20600 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
20601 *
20602 * Return codes:
20603 * 0 - Success
20604 * -EINVAL - Error
20605 **/
20606int
20607lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20608 struct lpfc_io_buf *lpfc_buf)
20609{
20610 int rc = 0;
20611 struct fcp_cmd_rsp_buf *list_entry = NULL;
20612 struct fcp_cmd_rsp_buf *tmp = NULL;
20613 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20614 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20615
20616 spin_lock_irq(&hdwq->hdwq_lock);
20617
20618 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
20619 list_for_each_entry_safe(list_entry, tmp,
20620 &lpfc_buf->dma_cmd_rsp_list,
20621 list_node) {
20622 list_move_tail(&list_entry->list_node,
20623 buf_list);
20624 }
20625 } else {
20626 rc = -EINVAL;
20627 }
20628
20629 spin_unlock_irq(&hdwq->hdwq_lock);
20630 return rc;
20631}
20632
20633/**
20634 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
20635 * @phba: phba object
20636 * @hdwq: hdwq to cleanup cmd rsp buff resources on
20637 *
20638 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
20639 *
20640 * Return codes:
20641 * None
20642 **/
20643void
20644lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20645 struct lpfc_sli4_hdw_queue *hdwq)
20646{
20647 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20648 struct fcp_cmd_rsp_buf *list_entry = NULL;
20649 struct fcp_cmd_rsp_buf *tmp = NULL;
20650
20651 spin_lock_irq(&hdwq->hdwq_lock);
20652
20653 /* Free cmd_rsp buf pool */
20654 list_for_each_entry_safe(list_entry, tmp,
20655 buf_list,
20656 list_node) {
20657 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
20658 list_entry->fcp_cmnd,
20659 list_entry->fcp_cmd_rsp_dma_handle);
20660 list_del(&list_entry->list_node);
20661 kfree(list_entry);
20662 }
20663
20664 spin_unlock_irq(&hdwq->hdwq_lock);
20665}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 467b8270f7fd..37fbcb46387e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -365,9 +365,18 @@ struct lpfc_io_buf {
365 /* Common fields */ 365 /* Common fields */
366 struct list_head list; 366 struct list_head list;
367 void *data; 367 void *data;
368
368 dma_addr_t dma_handle; 369 dma_addr_t dma_handle;
369 dma_addr_t dma_phys_sgl; 370 dma_addr_t dma_phys_sgl;
370 struct sli4_sge *dma_sgl; 371
372 struct sli4_sge *dma_sgl; /* initial segment chunk */
373
374 /* linked list of extra sli4_hybrid_sge */
375 struct list_head dma_sgl_xtra_list;
376
377 /* list head for fcp_cmd_rsp buf */
378 struct list_head dma_cmd_rsp_list;
379
371 struct lpfc_iocbq cur_iocbq; 380 struct lpfc_iocbq cur_iocbq;
372 struct lpfc_sli4_hdw_queue *hdwq; 381 struct lpfc_sli4_hdw_queue *hdwq;
373 uint16_t hdwq_no; 382 uint16_t hdwq_no;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index a81ef0293696..0d4882a9e634 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -49,9 +49,6 @@
49#define LPFC_FCP_MQ_THRESHOLD_MAX 256 49#define LPFC_FCP_MQ_THRESHOLD_MAX 256
50#define LPFC_FCP_MQ_THRESHOLD_DEF 8 50#define LPFC_FCP_MQ_THRESHOLD_DEF 8
51 51
52/* Common buffer size to accomidate SCSI and NVME IO buffers */
53#define LPFC_COMMON_IO_BUF_SZ 768
54
55/* 52/*
56 * Provide the default FCF Record attributes used by the driver 53 * Provide the default FCF Record attributes used by the driver
57 * when nonFIP mode is configured and there is no other default 54 * when nonFIP mode is configured and there is no other default
@@ -114,9 +111,8 @@ enum lpfc_sli4_queue_type {
114enum lpfc_sli4_queue_subtype { 111enum lpfc_sli4_queue_subtype {
115 LPFC_NONE, 112 LPFC_NONE,
116 LPFC_MBOX, 113 LPFC_MBOX,
117 LPFC_FCP, 114 LPFC_IO,
118 LPFC_ELS, 115 LPFC_ELS,
119 LPFC_NVME,
120 LPFC_NVMET, 116 LPFC_NVMET,
121 LPFC_NVME_LS, 117 LPFC_NVME_LS,
122 LPFC_USOL 118 LPFC_USOL
@@ -646,22 +642,17 @@ struct lpfc_eq_intr_info {
646struct lpfc_sli4_hdw_queue { 642struct lpfc_sli4_hdw_queue {
647 /* Pointers to the constructed SLI4 queues */ 643 /* Pointers to the constructed SLI4 queues */
648 struct lpfc_queue *hba_eq; /* Event queues for HBA */ 644 struct lpfc_queue *hba_eq; /* Event queues for HBA */
649 struct lpfc_queue *fcp_cq; /* Fast-path FCP compl queue */ 645 struct lpfc_queue *io_cq; /* Fast-path FCP & NVME compl queue */
650 struct lpfc_queue *nvme_cq; /* Fast-path NVME compl queue */ 646 struct lpfc_queue *io_wq; /* Fast-path FCP & NVME work queue */
651 struct lpfc_queue *fcp_wq; /* Fast-path FCP work queue */ 647 uint16_t io_cq_map;
652 struct lpfc_queue *nvme_wq; /* Fast-path NVME work queue */
653 uint16_t fcp_cq_map;
654 uint16_t nvme_cq_map;
655 648
656 /* Keep track of IO buffers for this hardware queue */ 649 /* Keep track of IO buffers for this hardware queue */
657 spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */ 650 spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */
658 struct list_head lpfc_io_buf_list_get; 651 struct list_head lpfc_io_buf_list_get;
659 spinlock_t io_buf_list_put_lock; /* Common buf free list lock */ 652 spinlock_t io_buf_list_put_lock; /* Common buf free list lock */
660 struct list_head lpfc_io_buf_list_put; 653 struct list_head lpfc_io_buf_list_put;
661 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ 654 spinlock_t abts_io_buf_list_lock; /* list of aborted IOs */
662 struct list_head lpfc_abts_scsi_buf_list; 655 struct list_head lpfc_abts_io_buf_list;
663 spinlock_t abts_nvme_buf_list_lock; /* list of aborted NVME IOs */
664 struct list_head lpfc_abts_nvme_buf_list;
665 uint32_t total_io_bufs; 656 uint32_t total_io_bufs;
666 uint32_t get_io_bufs; 657 uint32_t get_io_bufs;
667 uint32_t put_io_bufs; 658 uint32_t put_io_bufs;
@@ -685,6 +676,13 @@ struct lpfc_sli4_hdw_queue {
685 uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT]; 676 uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
686 uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT]; 677 uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
687#endif 678#endif
679
680 /* Per HDWQ pool resources */
681 struct list_head sgl_list;
682 struct list_head cmd_rsp_buf_list;
683
684 /* Lock for syncing Per HDWQ pool resources */
685 spinlock_t hdwq_lock;
688}; 686};
689 687
690#ifdef LPFC_HDWQ_LOCK_STAT 688#ifdef LPFC_HDWQ_LOCK_STAT
@@ -850,8 +848,8 @@ struct lpfc_sli4_hba {
850 struct lpfc_queue **cq_lookup; 848 struct lpfc_queue **cq_lookup;
851 struct list_head lpfc_els_sgl_list; 849 struct list_head lpfc_els_sgl_list;
852 struct list_head lpfc_abts_els_sgl_list; 850 struct list_head lpfc_abts_els_sgl_list;
853 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ 851 spinlock_t abts_io_buf_list_lock; /* list of aborted SCSI IOs */
854 struct list_head lpfc_abts_scsi_buf_list; 852 struct list_head lpfc_abts_io_buf_list;
855 struct list_head lpfc_nvmet_sgl_list; 853 struct list_head lpfc_nvmet_sgl_list;
856 spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */ 854 spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */
857 struct list_head lpfc_abts_nvmet_ctx_list; 855 struct list_head lpfc_abts_nvmet_ctx_list;
@@ -1056,10 +1054,11 @@ int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
1056 void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *); 1054 void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
1057void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); 1055void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
1058void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); 1056void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
1059void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
1060 struct sli4_wcqe_xri_aborted *, int);
1061void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, 1057void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
1062 struct sli4_wcqe_xri_aborted *axri, int idx); 1058 struct sli4_wcqe_xri_aborted *axri,
1059 struct lpfc_io_buf *lpfc_ncmd);
1060void lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
1061 struct sli4_wcqe_xri_aborted *axri, int idx);
1063void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, 1062void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1064 struct sli4_wcqe_xri_aborted *axri); 1063 struct sli4_wcqe_xri_aborted *axri);
1065void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, 1064void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
@@ -1094,6 +1093,17 @@ int lpfc_sli4_post_status_check(struct lpfc_hba *);
1094uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *); 1093uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
1095uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *); 1094uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
1096void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba); 1095void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);
1096struct sli4_hybrid_sgl *lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba,
1097 struct lpfc_io_buf *buf);
1098struct fcp_cmd_rsp_buf *lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
1099 struct lpfc_io_buf *buf);
1100int lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *buf);
1101int lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
1102 struct lpfc_io_buf *buf);
1103void lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
1104 struct lpfc_sli4_hdw_queue *hdwq);
1105void lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
1106 struct lpfc_sli4_hdw_queue *hdwq);
1097static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx) 1107static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
1098{ 1108{
1099 return q->q_pgs[idx / q->entry_cnt_per_pg] + 1109 return q->q_pgs[idx / q->entry_cnt_per_pg] +
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f7e93aaf1e00..b8aae31ffda3 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
20 * included with this package. * 20 * included with this package. *
21 *******************************************************************/ 21 *******************************************************************/
22 22
23#define LPFC_DRIVER_VERSION "12.2.0.3" 23#define LPFC_DRIVER_VERSION "12.4.0.0"
24#define LPFC_DRIVER_NAME "lpfc" 24#define LPFC_DRIVER_NAME "lpfc"
25 25
26/* Used for SLI 2/3 */ 26/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 343bc71d4615..b76646357980 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -527,9 +527,11 @@ disable_vport(struct fc_vport *fc_vport)
527 * scsi_host_put() to release the vport. 527 * scsi_host_put() to release the vport.
528 */ 528 */
529 lpfc_mbx_unreg_vpi(vport); 529 lpfc_mbx_unreg_vpi(vport);
530 spin_lock_irq(shost->host_lock); 530 if (phba->sli_rev == LPFC_SLI_REV4) {
531 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 531 spin_lock_irq(shost->host_lock);
532 spin_unlock_irq(shost->host_lock); 532 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
533 spin_unlock_irq(shost->host_lock);
534 }
533 535
534 lpfc_vport_set_state(vport, FC_VPORT_DISABLED); 536 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
535 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 537 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index a14e8344822b..a6e788c02ff4 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2429,6 +2429,7 @@ struct megasas_instance {
2429 u8 adapter_type; 2429 u8 adapter_type;
2430 bool consistent_mask_64bit; 2430 bool consistent_mask_64bit;
2431 bool support_nvme_passthru; 2431 bool support_nvme_passthru;
2432 bool enable_sdev_max_qd;
2432 u8 task_abort_tmo; 2433 u8 task_abort_tmo;
2433 u8 max_reset_tmo; 2434 u8 max_reset_tmo;
2434 u8 snapdump_wait_time; 2435 u8 snapdump_wait_time;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index f9f07935556e..42cf38c1ea99 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -109,6 +109,10 @@ int event_log_level = MFI_EVT_CLASS_CRITICAL;
109module_param(event_log_level, int, 0644); 109module_param(event_log_level, int, 0644);
110MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)"); 110MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
111 111
112unsigned int enable_sdev_max_qd;
113module_param(enable_sdev_max_qd, int, 0444);
114MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
115
112MODULE_LICENSE("GPL"); 116MODULE_LICENSE("GPL");
113MODULE_VERSION(MEGASAS_VERSION); 117MODULE_VERSION(MEGASAS_VERSION);
114MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com"); 118MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
@@ -1941,25 +1945,19 @@ megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1941 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); 1945 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1942} 1946}
1943 1947
1944
1945/* 1948/*
1946 * megasas_set_static_target_properties - 1949 * megasas_set_fw_assisted_qd -
1947 * Device property set by driver are static and it is not required to be 1950 * set device queue depth to can_queue
1948 * updated after OCR. 1951 * set device queue depth to fw assisted qd
1949 *
1950 * set io timeout
1951 * set device queue depth
1952 * set nvme device properties. see - megasas_set_nvme_device_properties
1953 * 1952 *
1954 * @sdev: scsi device 1953 * @sdev: scsi device
1955 * @is_target_prop true, if fw provided target properties. 1954 * @is_target_prop true, if fw provided target properties.
1956 */ 1955 */
1957static void megasas_set_static_target_properties(struct scsi_device *sdev, 1956static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
1958 bool is_target_prop) 1957 bool is_target_prop)
1959{ 1958{
1960 u8 interface_type; 1959 u8 interface_type;
1961 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; 1960 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1962 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
1963 u32 tgt_device_qd; 1961 u32 tgt_device_qd;
1964 struct megasas_instance *instance; 1962 struct megasas_instance *instance;
1965 struct MR_PRIV_DEVICE *mr_device_priv_data; 1963 struct MR_PRIV_DEVICE *mr_device_priv_data;
@@ -1968,11 +1966,6 @@ static void megasas_set_static_target_properties(struct scsi_device *sdev,
1968 mr_device_priv_data = sdev->hostdata; 1966 mr_device_priv_data = sdev->hostdata;
1969 interface_type = mr_device_priv_data->interface_type; 1967 interface_type = mr_device_priv_data->interface_type;
1970 1968
1971 /*
1972 * The RAID firmware may require extended timeouts.
1973 */
1974 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
1975
1976 switch (interface_type) { 1969 switch (interface_type) {
1977 case SAS_PD: 1970 case SAS_PD:
1978 device_qd = MEGASAS_SAS_QD; 1971 device_qd = MEGASAS_SAS_QD;
@@ -1990,18 +1983,49 @@ static void megasas_set_static_target_properties(struct scsi_device *sdev,
1990 if (tgt_device_qd && 1983 if (tgt_device_qd &&
1991 (tgt_device_qd <= instance->host->can_queue)) 1984 (tgt_device_qd <= instance->host->can_queue))
1992 device_qd = tgt_device_qd; 1985 device_qd = tgt_device_qd;
1986 }
1993 1987
1994 /* max_io_size_kb will be set to non zero for 1988 if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
1995 * nvme based vd and syspd. 1989 device_qd = instance->host->can_queue;
1996 */ 1990
1991 scsi_change_queue_depth(sdev, device_qd);
1992}
1993
1994/*
1995 * megasas_set_static_target_properties -
1996 * Device property set by driver are static and it is not required to be
1997 * updated after OCR.
1998 *
1999 * set io timeout
2000 * set device queue depth
2001 * set nvme device properties. see - megasas_set_nvme_device_properties
2002 *
2003 * @sdev: scsi device
2004 * @is_target_prop true, if fw provided target properties.
2005 */
2006static void megasas_set_static_target_properties(struct scsi_device *sdev,
2007 bool is_target_prop)
2008{
2009 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
2010 struct megasas_instance *instance;
2011
2012 instance = megasas_lookup_instance(sdev->host->host_no);
2013
2014 /*
2015 * The RAID firmware may require extended timeouts.
2016 */
2017 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
2018
2019 /* max_io_size_kb will be set to non zero for
2020 * nvme based vd and syspd.
2021 */
2022 if (is_target_prop)
1997 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); 2023 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
1998 }
1999 2024
2000 if (instance->nvme_page_size && max_io_size_kb) 2025 if (instance->nvme_page_size && max_io_size_kb)
2001 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); 2026 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
2002 2027
2003 scsi_change_queue_depth(sdev, device_qd); 2028 megasas_set_fw_assisted_qd(sdev, is_target_prop);
2004
2005} 2029}
2006 2030
2007 2031
@@ -3285,6 +3309,48 @@ fw_cmds_outstanding_show(struct device *cdev,
3285} 3309}
3286 3310
3287static ssize_t 3311static ssize_t
3312enable_sdev_max_qd_show(struct device *cdev,
3313 struct device_attribute *attr, char *buf)
3314{
3315 struct Scsi_Host *shost = class_to_shost(cdev);
3316 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3317
3318 return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd);
3319}
3320
3321static ssize_t
3322enable_sdev_max_qd_store(struct device *cdev,
3323 struct device_attribute *attr, const char *buf, size_t count)
3324{
3325 struct Scsi_Host *shost = class_to_shost(cdev);
3326 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3327 u32 val = 0;
3328 bool is_target_prop;
3329 int ret_target_prop = DCMD_FAILED;
3330 struct scsi_device *sdev;
3331
3332 if (kstrtou32(buf, 0, &val) != 0) {
3333 pr_err("megasas: could not set enable_sdev_max_qd\n");
3334 return -EINVAL;
3335 }
3336
3337 mutex_lock(&instance->reset_mutex);
3338 if (val)
3339 instance->enable_sdev_max_qd = true;
3340 else
3341 instance->enable_sdev_max_qd = false;
3342
3343 shost_for_each_device(sdev, shost) {
3344 ret_target_prop = megasas_get_target_prop(instance, sdev);
3345 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
3346 megasas_set_fw_assisted_qd(sdev, is_target_prop);
3347 }
3348 mutex_unlock(&instance->reset_mutex);
3349
3350 return strlen(buf);
3351}
3352
3353static ssize_t
3288dump_system_regs_show(struct device *cdev, 3354dump_system_regs_show(struct device *cdev,
3289 struct device_attribute *attr, char *buf) 3355 struct device_attribute *attr, char *buf)
3290{ 3356{
@@ -3313,6 +3379,7 @@ static DEVICE_ATTR_RW(fw_crash_state);
3313static DEVICE_ATTR_RO(page_size); 3379static DEVICE_ATTR_RO(page_size);
3314static DEVICE_ATTR_RO(ldio_outstanding); 3380static DEVICE_ATTR_RO(ldio_outstanding);
3315static DEVICE_ATTR_RO(fw_cmds_outstanding); 3381static DEVICE_ATTR_RO(fw_cmds_outstanding);
3382static DEVICE_ATTR_RW(enable_sdev_max_qd);
3316static DEVICE_ATTR_RO(dump_system_regs); 3383static DEVICE_ATTR_RO(dump_system_regs);
3317static DEVICE_ATTR_RO(raid_map_id); 3384static DEVICE_ATTR_RO(raid_map_id);
3318 3385
@@ -3323,6 +3390,7 @@ static struct device_attribute *megaraid_host_attrs[] = {
3323 &dev_attr_page_size, 3390 &dev_attr_page_size,
3324 &dev_attr_ldio_outstanding, 3391 &dev_attr_ldio_outstanding,
3325 &dev_attr_fw_cmds_outstanding, 3392 &dev_attr_fw_cmds_outstanding,
3393 &dev_attr_enable_sdev_max_qd,
3326 &dev_attr_dump_system_regs, 3394 &dev_attr_dump_system_regs,
3327 &dev_attr_raid_map_id, 3395 &dev_attr_raid_map_id,
3328 NULL, 3396 NULL,
@@ -5894,6 +5962,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
5894 MR_MAX_RAID_MAP_SIZE_MASK); 5962 MR_MAX_RAID_MAP_SIZE_MASK);
5895 } 5963 }
5896 5964
5965 instance->enable_sdev_max_qd = enable_sdev_max_qd;
5966
5897 switch (instance->adapter_type) { 5967 switch (instance->adapter_type) {
5898 case VENTURA_SERIES: 5968 case VENTURA_SERIES:
5899 fusion->pcie_bw_limitation = true; 5969 fusion->pcie_bw_limitation = true;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 120e3c4de8c2..e301458bcbae 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -323,9 +323,6 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
323{ 323{
324 u16 cur_max_fw_cmds = 0; 324 u16 cur_max_fw_cmds = 0;
325 u16 ldio_threshold = 0; 325 u16 ldio_threshold = 0;
326 struct megasas_register_set __iomem *reg_set;
327
328 reg_set = instance->reg_set;
329 326
330 /* ventura FW does not fill outbound_scratch_pad_2 with queue depth */ 327 /* ventura FW does not fill outbound_scratch_pad_2 with queue depth */
331 if (instance->adapter_type < VENTURA_SERIES) 328 if (instance->adapter_type < VENTURA_SERIES)
@@ -3511,7 +3508,7 @@ megasas_complete_r1_command(struct megasas_instance *instance,
3511 * @instance: Adapter soft state 3508 * @instance: Adapter soft state
3512 * Completes all commands that is in reply descriptor queue 3509 * Completes all commands that is in reply descriptor queue
3513 */ 3510 */
3514int 3511static int
3515complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex, 3512complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
3516 struct megasas_irq_context *irq_context) 3513 struct megasas_irq_context *irq_context)
3517{ 3514{
@@ -3702,7 +3699,7 @@ static void megasas_enable_irq_poll(struct megasas_instance *instance)
3702 * megasas_sync_irqs - Synchronizes all IRQs owned by adapter 3699 * megasas_sync_irqs - Synchronizes all IRQs owned by adapter
3703 * @instance: Adapter soft state 3700 * @instance: Adapter soft state
3704 */ 3701 */
3705void megasas_sync_irqs(unsigned long instance_addr) 3702static void megasas_sync_irqs(unsigned long instance_addr)
3706{ 3703{
3707 u32 count, i; 3704 u32 count, i;
3708 struct megasas_instance *instance = 3705 struct megasas_instance *instance =
@@ -3760,7 +3757,7 @@ int megasas_irqpoll(struct irq_poll *irqpoll, int budget)
3760 * 3757 *
3761 * Tasklet to complete cmds 3758 * Tasklet to complete cmds
3762 */ 3759 */
3763void 3760static void
3764megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) 3761megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
3765{ 3762{
3766 struct megasas_instance *instance = 3763 struct megasas_instance *instance =
@@ -3780,7 +3777,7 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
3780/** 3777/**
3781 * megasas_isr_fusion - isr entry point 3778 * megasas_isr_fusion - isr entry point
3782 */ 3779 */
3783irqreturn_t megasas_isr_fusion(int irq, void *devp) 3780static irqreturn_t megasas_isr_fusion(int irq, void *devp)
3784{ 3781{
3785 struct megasas_irq_context *irq_context = devp; 3782 struct megasas_irq_context *irq_context = devp;
3786 struct megasas_instance *instance = irq_context->instance; 3783 struct megasas_instance *instance = irq_context->instance;
@@ -3816,7 +3813,7 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
3816 * mfi_cmd: megasas_cmd pointer 3813 * mfi_cmd: megasas_cmd pointer
3817 * 3814 *
3818 */ 3815 */
3819void 3816static void
3820build_mpt_mfi_pass_thru(struct megasas_instance *instance, 3817build_mpt_mfi_pass_thru(struct megasas_instance *instance,
3821 struct megasas_cmd *mfi_cmd) 3818 struct megasas_cmd *mfi_cmd)
3822{ 3819{
@@ -3874,7 +3871,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
3874 * @cmd: mfi cmd to build 3871 * @cmd: mfi cmd to build
3875 * 3872 *
3876 */ 3873 */
3877union MEGASAS_REQUEST_DESCRIPTOR_UNION * 3874static union MEGASAS_REQUEST_DESCRIPTOR_UNION *
3878build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 3875build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
3879{ 3876{
3880 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL; 3877 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL;
@@ -3900,7 +3897,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
3900 * @cmd: mfi cmd pointer 3897 * @cmd: mfi cmd pointer
3901 * 3898 *
3902 */ 3899 */
3903void 3900static void
3904megasas_issue_dcmd_fusion(struct megasas_instance *instance, 3901megasas_issue_dcmd_fusion(struct megasas_instance *instance,
3905 struct megasas_cmd *cmd) 3902 struct megasas_cmd *cmd)
3906{ 3903{
@@ -4096,8 +4093,9 @@ static inline void megasas_trigger_snap_dump(struct megasas_instance *instance)
4096} 4093}
4097 4094
4098/* This function waits for outstanding commands on fusion to complete */ 4095/* This function waits for outstanding commands on fusion to complete */
4099int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, 4096static int
4100 int reason, int *convert) 4097megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
4098 int reason, int *convert)
4101{ 4099{
4102 int i, outstanding, retval = 0, hb_seconds_missed = 0; 4100 int i, outstanding, retval = 0, hb_seconds_missed = 0;
4103 u32 fw_state, abs_state; 4101 u32 fw_state, abs_state;
@@ -4221,7 +4219,7 @@ void megasas_reset_reply_desc(struct megasas_instance *instance)
4221 * megasas_refire_mgmt_cmd : Re-fire management commands 4219 * megasas_refire_mgmt_cmd : Re-fire management commands
4222 * @instance: Controller's soft instance 4220 * @instance: Controller's soft instance
4223*/ 4221*/
4224void megasas_refire_mgmt_cmd(struct megasas_instance *instance) 4222static void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
4225{ 4223{
4226 int j; 4224 int j;
4227 struct megasas_cmd_fusion *cmd_fusion; 4225 struct megasas_cmd_fusion *cmd_fusion;
@@ -4747,7 +4745,8 @@ out:
4747} 4745}
4748 4746
4749/*SRIOV get other instance in cluster if any*/ 4747/*SRIOV get other instance in cluster if any*/
4750struct megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance) 4748static struct
4749megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance)
4751{ 4750{
4752 int i; 4751 int i;
4753 4752
@@ -5053,7 +5052,7 @@ out:
5053} 5052}
5054 5053
5055/* Fusion Crash dump collection */ 5054/* Fusion Crash dump collection */
5056void megasas_fusion_crash_dump(struct megasas_instance *instance) 5055static void megasas_fusion_crash_dump(struct megasas_instance *instance)
5057{ 5056{
5058 u32 status_reg; 5057 u32 status_reg;
5059 u8 partial_copy = 0; 5058 u8 partial_copy = 0;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 7efd17a3c25b..18b1e31b5eb8 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -9,7 +9,7 @@
9 * scatter/gather formats. 9 * scatter/gather formats.
10 * Creation Date: June 21, 2006 10 * Creation Date: June 21, 2006
11 * 11 *
12 * mpi2.h Version: 02.00.53 12 * mpi2.h Version: 02.00.54
13 * 13 *
14 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 14 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
15 * prefix are for use only on MPI v2.5 products, and must not be used 15 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -121,6 +121,7 @@
121 * 08-15-18 02.00.52 Bumped MPI2_HEADER_VERSION_UNIT. 121 * 08-15-18 02.00.52 Bumped MPI2_HEADER_VERSION_UNIT.
122 * 08-28-18 02.00.53 Bumped MPI2_HEADER_VERSION_UNIT. 122 * 08-28-18 02.00.53 Bumped MPI2_HEADER_VERSION_UNIT.
123 * Added MPI2_IOCSTATUS_FAILURE 123 * Added MPI2_IOCSTATUS_FAILURE
124 * 12-17-18 02.00.54 Bumped MPI2_HEADER_VERSION_UNIT
124 * -------------------------------------------------------------------------- 125 * --------------------------------------------------------------------------
125 */ 126 */
126 127
@@ -161,7 +162,7 @@
161 162
162 163
163/* Unit and Dev versioning for this MPI header set */ 164/* Unit and Dev versioning for this MPI header set */
164#define MPI2_HEADER_VERSION_UNIT (0x35) 165#define MPI2_HEADER_VERSION_UNIT (0x36)
165#define MPI2_HEADER_VERSION_DEV (0x00) 166#define MPI2_HEADER_VERSION_DEV (0x00)
166#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 167#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
167#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 168#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 167d79d145ca..3a6871aecada 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -7,7 +7,7 @@
7 * Title: MPI Configuration messages and pages 7 * Title: MPI Configuration messages and pages
8 * Creation Date: November 10, 2006 8 * Creation Date: November 10, 2006
9 * 9 *
10 * mpi2_cnfg.h Version: 02.00.46 10 * mpi2_cnfg.h Version: 02.00.47
11 * 11 *
12 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 12 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
13 * prefix are for use only on MPI v2.5 products, and must not be used 13 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -244,6 +244,11 @@
244 * Added DMDReport Delay Time defines to 244 * Added DMDReport Delay Time defines to
245 * PCIeIOUnitPage1 245 * PCIeIOUnitPage1
246 * -------------------------------------------------------------------------- 246 * --------------------------------------------------------------------------
247 * 08-02-18 02.00.44 Added Slotx2, Slotx4 to ManPage 7.
248 * 08-15-18 02.00.45 Added ProductSpecific field at end of IOC Page 1
249 * 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1
250 * Added DMDReport Delay Time defines to PCIeIOUnitPage1
251 * 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7.
247 */ 252 */
248 253
249#ifndef MPI2_CNFG_H 254#ifndef MPI2_CNFG_H
@@ -810,7 +815,8 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO {
810 U8 Location; /*0x14 */ 815 U8 Location; /*0x14 */
811 U8 ReceptacleID; /*0x15 */ 816 U8 ReceptacleID; /*0x15 */
812 U16 Slot; /*0x16 */ 817 U16 Slot; /*0x16 */
813 U32 Reserved2; /*0x18 */ 818 U16 Slotx2; /*0x18 */
819 U16 Slotx4; /*0x1A */
814} MPI2_MANPAGE7_CONNECTOR_INFO, 820} MPI2_MANPAGE7_CONNECTOR_INFO,
815 *PTR_MPI2_MANPAGE7_CONNECTOR_INFO, 821 *PTR_MPI2_MANPAGE7_CONNECTOR_INFO,
816 Mpi2ManPage7ConnectorInfo_t, 822 Mpi2ManPage7ConnectorInfo_t,
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_image.h b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
index 4959585f029d..a3f677853098 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_image.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
@@ -5,7 +5,7 @@
5 * Name: mpi2_image.h 5 * Name: mpi2_image.h
6 * Description: Contains definitions for firmware and other component images 6 * Description: Contains definitions for firmware and other component images
7 * Creation Date: 04/02/2018 7 * Creation Date: 04/02/2018
8 * Version: 02.06.03 8 * Version: 02.06.04
9 * 9 *
10 * 10 *
11 * Version History 11 * Version History
@@ -17,6 +17,8 @@
17 * 08-14-18 02.06.01 Corrected define for MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 17 * 08-14-18 02.06.01 Corrected define for MPI26_IMAGE_HEADER_SIGNATURE0_MPI26
18 * 08-28-18 02.06.02 Added MPI2_EXT_IMAGE_TYPE_RDE 18 * 08-28-18 02.06.02 Added MPI2_EXT_IMAGE_TYPE_RDE
19 * 09-07-18 02.06.03 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES 19 * 09-07-18 02.06.03 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES
20 * 12-17-18 02.06.04 Addd MPI2_EXT_IMAGE_TYPE_PBLP
21 * Shorten some defines to be compatible with DOS
20 */ 22 */
21#ifndef MPI2_IMAGE_H 23#ifndef MPI2_IMAGE_H
22#define MPI2_IMAGE_H 24#define MPI2_IMAGE_H
@@ -200,17 +202,17 @@ typedef struct _MPI26_COMPONENT_IMAGE_HEADER {
200#define MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 (0xEB000042) 202#define MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 (0xEB000042)
201 203
202/**** Definitions for Signature1 field ****/ 204/**** Definitions for Signature1 field ****/
203#define MPI26_IMAGE_HEADER_SIGNATURE1_APPLICATION (0x20505041) 205#define MPI26_IMAGE_HEADER_SIG1_APPLICATION (0x20505041)
204#define MPI26_IMAGE_HEADER_SIGNATURE1_CBB (0x20424243) 206#define MPI26_IMAGE_HEADER_SIG1_CBB (0x20424243)
205#define MPI26_IMAGE_HEADER_SIGNATURE1_MFG (0x2047464D) 207#define MPI26_IMAGE_HEADER_SIG1_MFG (0x2047464D)
206#define MPI26_IMAGE_HEADER_SIGNATURE1_BIOS (0x534F4942) 208#define MPI26_IMAGE_HEADER_SIG1_BIOS (0x534F4942)
207#define MPI26_IMAGE_HEADER_SIGNATURE1_HIIM (0x4D494948) 209#define MPI26_IMAGE_HEADER_SIG1_HIIM (0x4D494948)
208#define MPI26_IMAGE_HEADER_SIGNATURE1_HIIA (0x41494948) 210#define MPI26_IMAGE_HEADER_SIG1_HIIA (0x41494948)
209#define MPI26_IMAGE_HEADER_SIGNATURE1_CPLD (0x444C5043) 211#define MPI26_IMAGE_HEADER_SIG1_CPLD (0x444C5043)
210#define MPI26_IMAGE_HEADER_SIGNATURE1_SPD (0x20445053) 212#define MPI26_IMAGE_HEADER_SIG1_SPD (0x20445053)
211#define MPI26_IMAGE_HEADER_SIGNATURE1_NVDATA (0x5444564E) 213#define MPI26_IMAGE_HEADER_SIG1_NVDATA (0x5444564E)
212#define MPI26_IMAGE_HEADER_SIGNATURE1_GAS_GAUGE (0x20534147) 214#define MPI26_IMAGE_HEADER_SIG1_GAS_GAUGE (0x20534147)
213#define MPI26_IMAGE_HEADER_SIGNATURE1_PBLP (0x50424C50) 215#define MPI26_IMAGE_HEADER_SIG1_PBLP (0x504C4250)
214 216
215/**** Definitions for Signature2 field ****/ 217/**** Definitions for Signature2 field ****/
216#define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546) 218#define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
@@ -278,6 +280,7 @@ typedef struct _MPI2_EXT_IMAGE_HEADER {
278#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08) 280#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
279#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09) 281#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09)
280#define MPI2_EXT_IMAGE_TYPE_RDE (0x0A) 282#define MPI2_EXT_IMAGE_TYPE_RDE (0x0A)
283#define MPI2_EXT_IMAGE_TYPE_PBLP (0x0B)
281#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80) 284#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
282#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF) 285#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
283 286
@@ -472,12 +475,12 @@ Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t;
472#define MPI25_HASH_ALGORITHM_UNUSED (0x00) 475#define MPI25_HASH_ALGORITHM_UNUSED (0x00)
473#define MPI25_HASH_ALGORITHM_SHA256 (0x01) 476#define MPI25_HASH_ALGORITHM_SHA256 (0x01)
474 477
475#define MPI26_HASH_ALGORITHM_VERSION_MASK (0xE0) 478#define MPI26_HASH_ALGORITHM_VER_MASK (0xE0)
476#define MPI26_HASH_ALGORITHM_VERSION_NONE (0x00) 479#define MPI26_HASH_ALGORITHM_VER_NONE (0x00)
477#define MPI26_HASH_ALGORITHM_VERSION_SHA1 (0x20) 480#define MPI26_HASH_ALGORITHM_VER_SHA1 (0x20)
478#define MPI26_HASH_ALGORITHM_VERSION_SHA2 (0x40) 481#define MPI26_HASH_ALGORITHM_VER_SHA2 (0x40)
479#define MPI26_HASH_ALGORITHM_VERSION_SHA3 (0x60) 482#define MPI26_HASH_ALGORITHM_VER_SHA3 (0x60)
480#define MPI26_HASH_ALGORITHM_SIZE_MASK (0x1F) 483#define MPI26_HASH_ALGORITHM_SIZE_MASK (0x1F)
481#define MPI26_HASH_ALGORITHM_SIZE_256 (0x01) 484#define MPI26_HASH_ALGORITHM_SIZE_256 (0x01)
482#define MPI26_HASH_ALGORITHM_SIZE_512 (0x02) 485#define MPI26_HASH_ALGORITHM_SIZE_512 (0x02)
483 486
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
index 63a09509d7d1..bb7b79cfa558 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
@@ -6,7 +6,7 @@
6 * Title: MPI PCIe Attached Devices structures and definitions. 6 * Title: MPI PCIe Attached Devices structures and definitions.
7 * Creation Date: October 9, 2012 7 * Creation Date: October 9, 2012
8 * 8 *
9 * mpi2_pci.h Version: 02.00.03 9 * mpi2_pci.h Version: 02.00.04
10 * 10 *
11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
12 * prefix are for use only on MPI v2.5 products, and must not be used 12 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -24,6 +24,8 @@
24 * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to 24 * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to
25 * NVME Encapsulated Request. 25 * NVME Encapsulated Request.
26 * 07-22-18 02.00.03 Updted flags field for NVME Encapsulated req 26 * 07-22-18 02.00.03 Updted flags field for NVME Encapsulated req
27 * 12-17-18 02.00.04 Added MPI26_PCIE_DEVINFO_SCSI
28 * Shortten some defines to be compatible with DOS
27 * -------------------------------------------------------------------------- 29 * --------------------------------------------------------------------------
28 */ 30 */
29 31
@@ -41,7 +43,7 @@
41#define MPI26_PCIE_DEVINFO_NO_DEVICE (0x00000000) 43#define MPI26_PCIE_DEVINFO_NO_DEVICE (0x00000000)
42#define MPI26_PCIE_DEVINFO_PCI_SWITCH (0x00000001) 44#define MPI26_PCIE_DEVINFO_PCI_SWITCH (0x00000001)
43#define MPI26_PCIE_DEVINFO_NVME (0x00000003) 45#define MPI26_PCIE_DEVINFO_NVME (0x00000003)
44 46#define MPI26_PCIE_DEVINFO_SCSI (0x00000004)
45 47
46/**************************************************************************** 48/****************************************************************************
47* NVMe Encapsulated message 49* NVMe Encapsulated message
@@ -75,10 +77,9 @@ typedef struct _MPI26_NVME_ENCAPSULATED_REQUEST {
75#define MPI26_NVME_FLAGS_SUBMISSIONQ_IO (0x0000) 77#define MPI26_NVME_FLAGS_SUBMISSIONQ_IO (0x0000)
76#define MPI26_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0010) 78#define MPI26_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0010)
77/*Error Response Address Space */ 79/*Error Response Address Space */
78#define MPI26_NVME_FLAGS_MASK_ERROR_RSP_ADDR (0x000C) 80#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_MASK (0x000C)
79#define MPI26_NVME_FLAGS_MASK_ERROR_RSP_ADDR_MASK (0x000C) 81#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_SYSTEM (0x0000)
80#define MPI26_NVME_FLAGS_SYSTEM_RSP_ADDR (0x0000) 82#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_IOCTL (0x0008)
81#define MPI26_NVME_FLAGS_IOCCTL_RSP_ADDR (0x0008)
82/* Data Direction*/ 83/* Data Direction*/
83#define MPI26_NVME_FLAGS_DATADIRECTION_MASK (0x0003) 84#define MPI26_NVME_FLAGS_DATADIRECTION_MASK (0x0003)
84#define MPI26_NVME_FLAGS_NODATATRANSFER (0x0000) 85#define MPI26_NVME_FLAGS_NODATATRANSFER (0x0000)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
index 3f966b6796b3..17ef7f63b938 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -7,7 +7,7 @@
7 * Title: MPI diagnostic tool structures and definitions 7 * Title: MPI diagnostic tool structures and definitions
8 * Creation Date: March 26, 2007 8 * Creation Date: March 26, 2007
9 * 9 *
10 * mpi2_tool.h Version: 02.00.15 10 * mpi2_tool.h Version: 02.00.16
11 * 11 *
12 * Version History 12 * Version History
13 * --------------- 13 * ---------------
@@ -40,6 +40,7 @@
40 * Tool Request Message. 40 * Tool Request Message.
41 * 07-22-18 02.00.15 Added defines for new TOOLBOX_PCIE_LANE_MARGINING tool. 41 * 07-22-18 02.00.15 Added defines for new TOOLBOX_PCIE_LANE_MARGINING tool.
42 * Added option for DeviceInfo field in ISTWI tool. 42 * Added option for DeviceInfo field in ISTWI tool.
43 * 12-17-18 02.00.16 Shorten some defines to be compatible with DOS.
43 * -------------------------------------------------------------------------- 44 * --------------------------------------------------------------------------
44 */ 45 */
45 46
@@ -230,11 +231,11 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
230#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07) 231#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07)
231 232
232/*MPI26 TOOLBOX Request MsgFlags defines */ 233/*MPI26 TOOLBOX Request MsgFlags defines */
233#define MPI26_TOOLBOX_REQ_MSGFLAGS_ADDRESSING_MASK (0x01) 234#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_MASK (0x01)
234/*Request uses Man Page 43 device index addressing */ 235/*Request uses Man Page 43 device index addressing */
235#define MPI26_TOOLBOX_REQ_MSGFLAGS_ADDRESSING_DEVINDEX (0x00) 236#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INDEX (0x00)
236/*Request uses Man Page 43 device info struct addressing */ 237/*Request uses Man Page 43 device info struct addressing */
237#define MPI26_TOOLBOX_REQ_MSGFLAGS_ADDRESSING_DEVINFO (0x01) 238#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INFO (0x01)
238 239
239/*Toolbox ISTWI Read Write Tool reply message */ 240/*Toolbox ISTWI Read Write Tool reply message */
240typedef struct _MPI2_TOOLBOX_ISTWI_REPLY { 241typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {
@@ -403,7 +404,7 @@ Mpi2ToolboxTextDisplayRequest_t,
403 */ 404 */
404 405
405/*Toolbox Backend Lane Margining Tool request message */ 406/*Toolbox Backend Lane Margining Tool request message */
406typedef struct _MPI26_TOOLBOX_LANE_MARGINING_REQUEST { 407typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REQUEST {
407 U8 Tool; /*0x00 */ 408 U8 Tool; /*0x00 */
408 U8 Reserved1; /*0x01 */ 409 U8 Reserved1; /*0x01 */
409 U8 ChainOffset; /*0x02 */ 410 U8 ChainOffset; /*0x02 */
@@ -434,7 +435,7 @@ typedef struct _MPI26_TOOLBOX_LANE_MARGINING_REQUEST {
434 435
435 436
436/*Toolbox Backend Lane Margining Tool reply message */ 437/*Toolbox Backend Lane Margining Tool reply message */
437typedef struct _MPI26_TOOLBOX_LANE_MARGINING_REPLY { 438typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REPLY {
438 U8 Tool; /*0x00 */ 439 U8 Tool; /*0x00 */
439 U8 Reserved1; /*0x01 */ 440 U8 Reserved1; /*0x01 */
440 U8 MsgLength; /*0x02 */ 441 U8 MsgLength; /*0x02 */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 050c0f029ef9..fea3cb6a090b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2260,6 +2260,11 @@ base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2260 bool build_prp = true; 2260 bool build_prp = true;
2261 2261
2262 data_length = scsi_bufflen(scmd); 2262 data_length = scsi_bufflen(scmd);
2263 if (pcie_device &&
2264 (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
2265 build_prp = false;
2266 return build_prp;
2267 }
2263 2268
2264 /* If Datalenth is <= 16K and number of SGE’s entries are <= 2 2269 /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
2265 * we built IEEE SGL 2270 * we built IEEE SGL
@@ -3178,6 +3183,37 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
3178 } 3183 }
3179} 3184}
3180 3185
3186static int
3187_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
3188
3189/**
3190 * _base_check_for_fault_and_issue_reset - check if IOC is in fault state
3191 * and if it is in fault state then issue diag reset.
3192 * @ioc: per adapter object
3193 *
3194 * Returns: 0 for success, non-zero for failure.
3195 */
3196static int
3197_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
3198{
3199 u32 ioc_state;
3200 int rc = -EFAULT;
3201
3202 dinitprintk(ioc, pr_info("%s\n", __func__));
3203 if (ioc->pci_error_recovery)
3204 return 0;
3205 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3206 dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
3207
3208 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3209 mpt3sas_base_fault_info(ioc, ioc_state &
3210 MPI2_DOORBELL_DATA_MASK);
3211 rc = _base_diag_reset(ioc);
3212 }
3213
3214 return rc;
3215}
3216
3181/** 3217/**
3182 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) 3218 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
3183 * @ioc: per adapter object 3219 * @ioc: per adapter object
@@ -3190,7 +3226,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3190 struct pci_dev *pdev = ioc->pdev; 3226 struct pci_dev *pdev = ioc->pdev;
3191 u32 memap_sz; 3227 u32 memap_sz;
3192 u32 pio_sz; 3228 u32 pio_sz;
3193 int i, r = 0; 3229 int i, r = 0, rc;
3194 u64 pio_chip = 0; 3230 u64 pio_chip = 0;
3195 phys_addr_t chip_phys = 0; 3231 phys_addr_t chip_phys = 0;
3196 struct adapter_reply_queue *reply_q; 3232 struct adapter_reply_queue *reply_q;
@@ -3251,8 +3287,11 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3251 _base_mask_interrupts(ioc); 3287 _base_mask_interrupts(ioc);
3252 3288
3253 r = _base_get_ioc_facts(ioc); 3289 r = _base_get_ioc_facts(ioc);
3254 if (r) 3290 if (r) {
3255 goto out_fail; 3291 rc = _base_check_for_fault_and_issue_reset(ioc);
3292 if (rc || (_base_get_ioc_facts(ioc)))
3293 goto out_fail;
3294 }
3256 3295
3257 if (!ioc->rdpq_array_enable_assigned) { 3296 if (!ioc->rdpq_array_enable_assigned) {
3258 ioc->rdpq_array_enable = ioc->rdpq_array_capable; 3297 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
@@ -5037,6 +5076,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
5037 _base_release_memory_pools(ioc); 5076 _base_release_memory_pools(ioc);
5038 goto retry_allocation; 5077 goto retry_allocation;
5039 } 5078 }
5079 memset(ioc->request, 0, sz);
5040 5080
5041 if (retry_sz) 5081 if (retry_sz)
5042 ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n", 5082 ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
@@ -5410,8 +5450,6 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
5410 * 5450 *
5411 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. 5451 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
5412 */ 5452 */
5413static int
5414_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
5415 5453
5416static int 5454static int
5417_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) 5455_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
@@ -5868,6 +5906,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
5868 ioc->base_cmds.status = MPT3_CMD_PENDING; 5906 ioc->base_cmds.status = MPT3_CMD_PENDING;
5869 request = mpt3sas_base_get_msg_frame(ioc, smid); 5907 request = mpt3sas_base_get_msg_frame(ioc, smid);
5870 ioc->base_cmds.smid = smid; 5908 ioc->base_cmds.smid = smid;
5909 memset(request, 0, ioc->request_sz);
5871 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); 5910 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
5872 init_completion(&ioc->base_cmds.done); 5911 init_completion(&ioc->base_cmds.done);
5873 ioc->put_smid_default(ioc, smid); 5912 ioc->put_smid_default(ioc, smid);
@@ -6686,7 +6725,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
6686static int 6725static int
6687_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) 6726_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
6688{ 6727{
6689 int r, i, index; 6728 int r, i, index, rc;
6690 unsigned long flags; 6729 unsigned long flags;
6691 u32 reply_address; 6730 u32 reply_address;
6692 u16 smid; 6731 u16 smid;
@@ -6789,8 +6828,19 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
6789 skip_init_reply_post_free_queue: 6828 skip_init_reply_post_free_queue:
6790 6829
6791 r = _base_send_ioc_init(ioc); 6830 r = _base_send_ioc_init(ioc);
6792 if (r) 6831 if (r) {
6793 return r; 6832 /*
6833 * No need to check IOC state for fault state & issue
6834 * diag reset during host reset. This check is need
6835 * only during driver load time.
6836 */
6837 if (!ioc->is_driver_loading)
6838 return r;
6839
6840 rc = _base_check_for_fault_and_issue_reset(ioc);
6841 if (rc || (_base_send_ioc_init(ioc)))
6842 return r;
6843 }
6794 6844
6795 /* initialize reply free host index */ 6845 /* initialize reply free host index */
6796 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; 6846 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
@@ -6882,7 +6932,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
6882int 6932int
6883mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) 6933mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
6884{ 6934{
6885 int r, i; 6935 int r, i, rc;
6886 int cpu_id, last_cpu_id = 0; 6936 int cpu_id, last_cpu_id = 0;
6887 6937
6888 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 6938 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -6926,8 +6976,11 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
6926 6976
6927 pci_set_drvdata(ioc->pdev, ioc->shost); 6977 pci_set_drvdata(ioc->pdev, ioc->shost);
6928 r = _base_get_ioc_facts(ioc); 6978 r = _base_get_ioc_facts(ioc);
6929 if (r) 6979 if (r) {
6930 goto out_free_resources; 6980 rc = _base_check_for_fault_and_issue_reset(ioc);
6981 if (rc || (_base_get_ioc_facts(ioc)))
6982 goto out_free_resources;
6983 }
6931 6984
6932 switch (ioc->hba_mpi_version_belonged) { 6985 switch (ioc->hba_mpi_version_belonged) {
6933 case MPI2_VERSION: 6986 case MPI2_VERSION:
@@ -6995,8 +7048,11 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
6995 7048
6996 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { 7049 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
6997 r = _base_get_port_facts(ioc, i); 7050 r = _base_get_port_facts(ioc, i);
6998 if (r) 7051 if (r) {
6999 goto out_free_resources; 7052 rc = _base_check_for_fault_and_issue_reset(ioc);
7053 if (rc || (_base_get_port_facts(ioc, i)))
7054 goto out_free_resources;
7055 }
7000 } 7056 }
7001 7057
7002 r = _base_allocate_memory_pools(ioc); 7058 r = _base_allocate_memory_pools(ioc);
@@ -7118,6 +7174,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
7118 if (r) 7174 if (r)
7119 goto out_free_resources; 7175 goto out_free_resources;
7120 7176
7177 /*
7178 * Copy current copy of IOCFacts in prev_fw_facts
7179 * and it will be used during online firmware upgrade.
7180 */
7181 memcpy(&ioc->prev_fw_facts, &ioc->facts,
7182 sizeof(struct mpt3sas_facts));
7183
7121 ioc->non_operational_loop = 0; 7184 ioc->non_operational_loop = 0;
7122 ioc->got_task_abort_from_ioctl = 0; 7185 ioc->got_task_abort_from_ioctl = 0;
7123 return 0; 7186 return 0;
@@ -7280,6 +7343,85 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
7280} 7343}
7281 7344
7282/** 7345/**
7346 * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts
7347 * attributes during online firmware upgrade and update the corresponding
7348 * IOC variables accordingly.
7349 *
7350 * @ioc: Pointer to MPT_ADAPTER structure
7351 */
7352static int
7353_base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
7354{
7355 u16 pd_handles_sz;
7356 void *pd_handles = NULL, *blocking_handles = NULL;
7357 void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
7358 struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
7359
7360 if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
7361 pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
7362 if (ioc->facts.MaxDevHandle % 8)
7363 pd_handles_sz++;
7364
7365 pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
7366 GFP_KERNEL);
7367 if (!pd_handles) {
7368 ioc_info(ioc,
7369 "Unable to allocate the memory for pd_handles of sz: %d\n",
7370 pd_handles_sz);
7371 return -ENOMEM;
7372 }
7373 memset(pd_handles + ioc->pd_handles_sz, 0,
7374 (pd_handles_sz - ioc->pd_handles_sz));
7375 ioc->pd_handles = pd_handles;
7376
7377 blocking_handles = krealloc(ioc->blocking_handles,
7378 pd_handles_sz, GFP_KERNEL);
7379 if (!blocking_handles) {
7380 ioc_info(ioc,
7381 "Unable to allocate the memory for "
7382 "blocking_handles of sz: %d\n",
7383 pd_handles_sz);
7384 return -ENOMEM;
7385 }
7386 memset(blocking_handles + ioc->pd_handles_sz, 0,
7387 (pd_handles_sz - ioc->pd_handles_sz));
7388 ioc->blocking_handles = blocking_handles;
7389 ioc->pd_handles_sz = pd_handles_sz;
7390
7391 pend_os_device_add = krealloc(ioc->pend_os_device_add,
7392 pd_handles_sz, GFP_KERNEL);
7393 if (!pend_os_device_add) {
7394 ioc_info(ioc,
7395 "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
7396 pd_handles_sz);
7397 return -ENOMEM;
7398 }
7399 memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
7400 (pd_handles_sz - ioc->pend_os_device_add_sz));
7401 ioc->pend_os_device_add = pend_os_device_add;
7402 ioc->pend_os_device_add_sz = pd_handles_sz;
7403
7404 device_remove_in_progress = krealloc(
7405 ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
7406 if (!device_remove_in_progress) {
7407 ioc_info(ioc,
7408 "Unable to allocate the memory for "
7409 "device_remove_in_progress of sz: %d\n "
7410 , pd_handles_sz);
7411 return -ENOMEM;
7412 }
7413 memset(device_remove_in_progress +
7414 ioc->device_remove_in_progress_sz, 0,
7415 (pd_handles_sz - ioc->device_remove_in_progress_sz));
7416 ioc->device_remove_in_progress = device_remove_in_progress;
7417 ioc->device_remove_in_progress_sz = pd_handles_sz;
7418 }
7419
7420 memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
7421 return 0;
7422}
7423
7424/**
7283 * mpt3sas_base_hard_reset_handler - reset controller 7425 * mpt3sas_base_hard_reset_handler - reset controller
7284 * @ioc: Pointer to MPT_ADAPTER structure 7426 * @ioc: Pointer to MPT_ADAPTER structure
7285 * @type: FORCE_BIG_HAMMER or SOFT_RESET 7427 * @type: FORCE_BIG_HAMMER or SOFT_RESET
@@ -7342,6 +7484,13 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
7342 if (r) 7484 if (r)
7343 goto out; 7485 goto out;
7344 7486
7487 r = _base_check_ioc_facts_changes(ioc);
7488 if (r) {
7489 ioc_info(ioc,
7490 "Some of the parameters got changed in this new firmware"
7491 " image and it requires system reboot\n");
7492 goto out;
7493 }
7345 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) 7494 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
7346 panic("%s: Issue occurred with flashing controller firmware." 7495 panic("%s: Issue occurred with flashing controller firmware."
7347 "Please reboot the system and ensure that the correct" 7496 "Please reboot the system and ensure that the correct"
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 6afbdb044310..faca0a5e71f8 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -76,8 +76,8 @@
76#define MPT3SAS_DRIVER_NAME "mpt3sas" 76#define MPT3SAS_DRIVER_NAME "mpt3sas"
77#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" 77#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
78#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" 78#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
79#define MPT3SAS_DRIVER_VERSION "29.100.00.00" 79#define MPT3SAS_DRIVER_VERSION "31.100.00.00"
80#define MPT3SAS_MAJOR_VERSION 29 80#define MPT3SAS_MAJOR_VERSION 31
81#define MPT3SAS_MINOR_VERSION 100 81#define MPT3SAS_MINOR_VERSION 100
82#define MPT3SAS_BUILD_VERSION 0 82#define MPT3SAS_BUILD_VERSION 0
83#define MPT3SAS_RELEASE_VERSION 00 83#define MPT3SAS_RELEASE_VERSION 00
@@ -583,6 +583,7 @@ static inline void sas_device_put(struct _sas_device *s)
583 * @enclosure_level: The level of device's enclosure from the controller 583 * @enclosure_level: The level of device's enclosure from the controller
584 * @connector_name: ASCII value of the Connector's name 584 * @connector_name: ASCII value of the Connector's name
585 * @serial_number: pointer of serial number string allocated runtime 585 * @serial_number: pointer of serial number string allocated runtime
586 * @access_status: Device's Access Status
586 * @refcount: reference count for deletion 587 * @refcount: reference count for deletion
587 */ 588 */
588struct _pcie_device { 589struct _pcie_device {
@@ -604,6 +605,7 @@ struct _pcie_device {
604 u8 connector_name[4]; 605 u8 connector_name[4];
605 u8 *serial_number; 606 u8 *serial_number;
606 u8 reset_timeout; 607 u8 reset_timeout;
608 u8 access_status;
607 struct kref refcount; 609 struct kref refcount;
608}; 610};
609/** 611/**
@@ -1045,6 +1047,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
1045 * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands 1047 * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
1046 * @thresh_hold: Max number of reply descriptors processed 1048 * @thresh_hold: Max number of reply descriptors processed
1047 * before updating Host Index 1049 * before updating Host Index
1050 * @drv_support_bitmap: driver's supported feature bit map
1048 * @scsi_io_cb_idx: shost generated commands 1051 * @scsi_io_cb_idx: shost generated commands
1049 * @tm_cb_idx: task management commands 1052 * @tm_cb_idx: task management commands
1050 * @scsih_cb_idx: scsih internal commands 1053 * @scsih_cb_idx: scsih internal commands
@@ -1066,6 +1069,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
1066 * @event_log: event log pointer 1069 * @event_log: event log pointer
1067 * @event_masks: events that are masked 1070 * @event_masks: events that are masked
1068 * @facts: static facts data 1071 * @facts: static facts data
1072 * @prev_fw_facts: previous fw facts data
1069 * @pfacts: static port facts data 1073 * @pfacts: static port facts data
1070 * @manu_pg0: static manufacturing page 0 1074 * @manu_pg0: static manufacturing page 0
1071 * @manu_pg10: static manufacturing page 10 1075 * @manu_pg10: static manufacturing page 10
@@ -1227,6 +1231,8 @@ struct MPT3SAS_ADAPTER {
1227 bool msix_load_balance; 1231 bool msix_load_balance;
1228 u16 thresh_hold; 1232 u16 thresh_hold;
1229 u8 high_iops_queues; 1233 u8 high_iops_queues;
1234 u32 drv_support_bitmap;
1235 bool enable_sdev_max_qd;
1230 1236
1231 /* internal commands, callback index */ 1237 /* internal commands, callback index */
1232 u8 scsi_io_cb_idx; 1238 u8 scsi_io_cb_idx;
@@ -1276,6 +1282,7 @@ struct MPT3SAS_ADAPTER {
1276 1282
1277 /* static config pages */ 1283 /* static config pages */
1278 struct mpt3sas_facts facts; 1284 struct mpt3sas_facts facts;
1285 struct mpt3sas_facts prev_fw_facts;
1279 struct mpt3sas_port_facts *pfacts; 1286 struct mpt3sas_port_facts *pfacts;
1280 Mpi2ManufacturingPage0_t manu_pg0; 1287 Mpi2ManufacturingPage0_t manu_pg0;
1281 struct Mpi2ManufacturingPage10_t manu_pg10; 1288 struct Mpi2ManufacturingPage10_t manu_pg10;
@@ -1450,6 +1457,8 @@ struct MPT3SAS_ADAPTER {
1450 GET_MSIX_INDEX get_msix_index_for_smlio; 1457 GET_MSIX_INDEX get_msix_index_for_smlio;
1451}; 1458};
1452 1459
1460#define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001
1461
1453typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 1462typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1454 u32 reply); 1463 u32 reply);
1455 1464
@@ -1579,6 +1588,7 @@ struct _pcie_device *mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc,
1579void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc); 1588void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc);
1580struct _raid_device * 1589struct _raid_device *
1581mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle); 1590mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle);
1591void mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
1582 1592
1583/* config shared API */ 1593/* config shared API */
1584u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 1594u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1733,4 +1743,20 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
1733/* NCQ Prio Handling Check */ 1743/* NCQ Prio Handling Check */
1734bool scsih_ncq_prio_supp(struct scsi_device *sdev); 1744bool scsih_ncq_prio_supp(struct scsi_device *sdev);
1735 1745
1746/**
1747 * _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device
1748 * @device_info: bitfield providing information about the device.
1749 * Context: none
1750 *
1751 * Returns 1 if scsi device.
1752 */
1753static inline int
1754mpt3sas_scsih_is_pcie_scsi_device(u32 device_info)
1755{
1756 if ((device_info &
1757 MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) == MPI26_PCIE_DEVINFO_SCSI)
1758 return 1;
1759 else
1760 return 0;
1761}
1736#endif /* MPT3SAS_BASE_H_INCLUDED */ 1762#endif /* MPT3SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index d4ecfbbe738c..7d696952b376 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -596,8 +596,16 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
596 if (priv_data->sas_target->handle != handle) 596 if (priv_data->sas_target->handle != handle)
597 continue; 597 continue;
598 st = scsi_cmd_priv(scmd); 598 st = scsi_cmd_priv(scmd);
599 tm_request->TaskMID = cpu_to_le16(st->smid); 599
600 found = 1; 600 /*
601 * If the given TaskMID from the user space is zero, then the
602 * first outstanding smid will be picked up. Otherwise,
603 * targeted smid will be the one.
604 */
605 if (!tm_request->TaskMID || tm_request->TaskMID == st->smid) {
606 tm_request->TaskMID = cpu_to_le16(st->smid);
607 found = 1;
608 }
601 } 609 }
602 610
603 if (!found) { 611 if (!found) {
@@ -654,7 +662,6 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
654 size_t data_in_sz = 0; 662 size_t data_in_sz = 0;
655 long ret; 663 long ret;
656 u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE; 664 u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
657 u8 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
658 665
659 issue_reset = 0; 666 issue_reset = 0;
660 667
@@ -707,6 +714,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
707 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 714 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
708 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 715 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
709 request = mpt3sas_base_get_msg_frame(ioc, smid); 716 request = mpt3sas_base_get_msg_frame(ioc, smid);
717 memset(request, 0, ioc->request_sz);
710 memcpy(request, mpi_request, karg.data_sge_offset*4); 718 memcpy(request, mpi_request, karg.data_sge_offset*4);
711 ioc->ctl_cmds.smid = smid; 719 ioc->ctl_cmds.smid = smid;
712 data_out_sz = karg.data_out_size; 720 data_out_sz = karg.data_out_size;
@@ -921,13 +929,37 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
921 Mpi2ToolboxCleanRequest_t *toolbox_request = 929 Mpi2ToolboxCleanRequest_t *toolbox_request =
922 (Mpi2ToolboxCleanRequest_t *)mpi_request; 930 (Mpi2ToolboxCleanRequest_t *)mpi_request;
923 931
924 if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) { 932 if ((toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL)
933 || (toolbox_request->Tool ==
934 MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN))
925 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 935 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
926 data_in_dma, data_in_sz); 936 data_in_dma, data_in_sz);
927 } else { 937 else if (toolbox_request->Tool ==
938 MPI2_TOOLBOX_MEMORY_MOVE_TOOL) {
939 Mpi2ToolboxMemMoveRequest_t *mem_move_request =
940 (Mpi2ToolboxMemMoveRequest_t *)request;
941 Mpi2SGESimple64_t tmp, *src = NULL, *dst = NULL;
942
943 ioc->build_sg_mpi(ioc, psge, data_out_dma,
944 data_out_sz, data_in_dma, data_in_sz);
945 if (data_out_sz && !data_in_sz) {
946 dst =
947 (Mpi2SGESimple64_t *)&mem_move_request->SGL;
948 src = (void *)dst + ioc->sge_size;
949
950 memcpy(&tmp, src, ioc->sge_size);
951 memcpy(src, dst, ioc->sge_size);
952 memcpy(dst, &tmp, ioc->sge_size);
953 }
954 if (ioc->logging_level & MPT_DEBUG_TM) {
955 ioc_info(ioc,
956 "Mpi2ToolboxMemMoveRequest_t request msg\n");
957 _debug_dump_mf(mem_move_request,
958 ioc->request_sz/4);
959 }
960 } else
928 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 961 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
929 data_in_dma, data_in_sz); 962 data_in_dma, data_in_sz);
930 }
931 ioc->put_smid_default(ioc, smid); 963 ioc->put_smid_default(ioc, smid);
932 break; 964 break;
933 } 965 }
@@ -1047,12 +1079,14 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
1047 mpt3sas_halt_firmware(ioc); 1079 mpt3sas_halt_firmware(ioc);
1048 pcie_device = mpt3sas_get_pdev_by_handle(ioc, 1080 pcie_device = mpt3sas_get_pdev_by_handle(ioc,
1049 le16_to_cpu(mpi_request->FunctionDependent1)); 1081 le16_to_cpu(mpi_request->FunctionDependent1));
1050 if (pcie_device && (!ioc->tm_custom_handling)) 1082 if (pcie_device && (!ioc->tm_custom_handling) &&
1083 (!(mpt3sas_scsih_is_pcie_scsi_device(
1084 pcie_device->device_info))))
1051 mpt3sas_scsih_issue_locked_tm(ioc, 1085 mpt3sas_scsih_issue_locked_tm(ioc,
1052 le16_to_cpu(mpi_request->FunctionDependent1), 1086 le16_to_cpu(mpi_request->FunctionDependent1),
1053 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 1087 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
1054 0, pcie_device->reset_timeout, 1088 0, pcie_device->reset_timeout,
1055 tr_method); 1089 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
1056 else 1090 else
1057 mpt3sas_scsih_issue_locked_tm(ioc, 1091 mpt3sas_scsih_issue_locked_tm(ioc,
1058 le16_to_cpu(mpi_request->FunctionDependent1), 1092 le16_to_cpu(mpi_request->FunctionDependent1),
@@ -3278,9 +3312,8 @@ diag_trigger_scsi_store(struct device *cdev,
3278 ssize_t sz; 3312 ssize_t sz;
3279 3313
3280 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3314 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3281 sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count); 3315 sz = min(sizeof(ioc->diag_trigger_scsi), count);
3282 memset(&ioc->diag_trigger_scsi, 0, 3316 memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi));
3283 sizeof(struct SL_WH_EVENT_TRIGGERS_T));
3284 memcpy(&ioc->diag_trigger_scsi, buf, sz); 3317 memcpy(&ioc->diag_trigger_scsi, buf, sz);
3285 if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES) 3318 if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
3286 ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES; 3319 ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES;
@@ -3349,6 +3382,125 @@ static DEVICE_ATTR_RW(diag_trigger_mpi);
3349 3382
3350/*****************************************/ 3383/*****************************************/
3351 3384
3385/**
3386 * drv_support_bitmap_show - driver supported feature bitmap
3387 * @cdev - pointer to embedded class device
3388 * @buf - the buffer returned
3389 *
3390 * A sysfs 'read-only' shost attribute.
3391 */
3392static ssize_t
3393drv_support_bitmap_show(struct device *cdev,
3394 struct device_attribute *attr, char *buf)
3395{
3396 struct Scsi_Host *shost = class_to_shost(cdev);
3397 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3398
3399 return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap);
3400}
3401static DEVICE_ATTR_RO(drv_support_bitmap);
3402
3403/**
3404 * enable_sdev_max_qd_show - display whether sdev max qd is enabled/disabled
3405 * @cdev - pointer to embedded class device
3406 * @buf - the buffer returned
3407 *
3408 * A sysfs read/write shost attribute. This attribute is used to set the
3409 * targets queue depth to HBA IO queue depth if this attribute is enabled.
3410 */
3411static ssize_t
3412enable_sdev_max_qd_show(struct device *cdev,
3413 struct device_attribute *attr, char *buf)
3414{
3415 struct Scsi_Host *shost = class_to_shost(cdev);
3416 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3417
3418 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd);
3419}
3420
3421/**
3422 * enable_sdev_max_qd_store - Enable/disable sdev max qd
3423 * @cdev - pointer to embedded class device
3424 * @buf - the buffer returned
3425 *
3426 * A sysfs read/write shost attribute. This attribute is used to set the
3427 * targets queue depth to HBA IO queue depth if this attribute is enabled.
3428 * If this attribute is disabled then targets will have corresponding default
3429 * queue depth.
3430 */
3431static ssize_t
3432enable_sdev_max_qd_store(struct device *cdev,
3433 struct device_attribute *attr, const char *buf, size_t count)
3434{
3435 struct Scsi_Host *shost = class_to_shost(cdev);
3436 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3437 struct MPT3SAS_DEVICE *sas_device_priv_data;
3438 struct MPT3SAS_TARGET *sas_target_priv_data;
3439 int val = 0;
3440 struct scsi_device *sdev;
3441 struct _raid_device *raid_device;
3442 int qdepth;
3443
3444 if (kstrtoint(buf, 0, &val) != 0)
3445 return -EINVAL;
3446
3447 switch (val) {
3448 case 0:
3449 ioc->enable_sdev_max_qd = 0;
3450 shost_for_each_device(sdev, ioc->shost) {
3451 sas_device_priv_data = sdev->hostdata;
3452 if (!sas_device_priv_data)
3453 continue;
3454 sas_target_priv_data = sas_device_priv_data->sas_target;
3455 if (!sas_target_priv_data)
3456 continue;
3457
3458 if (sas_target_priv_data->flags &
3459 MPT_TARGET_FLAGS_VOLUME) {
3460 raid_device =
3461 mpt3sas_raid_device_find_by_handle(ioc,
3462 sas_target_priv_data->handle);
3463
3464 switch (raid_device->volume_type) {
3465 case MPI2_RAID_VOL_TYPE_RAID0:
3466 if (raid_device->device_info &
3467 MPI2_SAS_DEVICE_INFO_SSP_TARGET)
3468 qdepth =
3469 MPT3SAS_SAS_QUEUE_DEPTH;
3470 else
3471 qdepth =
3472 MPT3SAS_SATA_QUEUE_DEPTH;
3473 break;
3474 case MPI2_RAID_VOL_TYPE_RAID1E:
3475 case MPI2_RAID_VOL_TYPE_RAID1:
3476 case MPI2_RAID_VOL_TYPE_RAID10:
3477 case MPI2_RAID_VOL_TYPE_UNKNOWN:
3478 default:
3479 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
3480 }
3481 } else if (sas_target_priv_data->flags &
3482 MPT_TARGET_FLAGS_PCIE_DEVICE)
3483 qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
3484 else
3485 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
3486
3487 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
3488 }
3489 break;
3490 case 1:
3491 ioc->enable_sdev_max_qd = 1;
3492 shost_for_each_device(sdev, ioc->shost)
3493 mpt3sas_scsih_change_queue_depth(sdev,
3494 shost->can_queue);
3495 break;
3496 default:
3497 return -EINVAL;
3498 }
3499
3500 return strlen(buf);
3501}
3502static DEVICE_ATTR_RW(enable_sdev_max_qd);
3503
3352struct device_attribute *mpt3sas_host_attrs[] = { 3504struct device_attribute *mpt3sas_host_attrs[] = {
3353 &dev_attr_version_fw, 3505 &dev_attr_version_fw,
3354 &dev_attr_version_bios, 3506 &dev_attr_version_bios,
@@ -3374,7 +3526,9 @@ struct device_attribute *mpt3sas_host_attrs[] = {
3374 &dev_attr_diag_trigger_event, 3526 &dev_attr_diag_trigger_event,
3375 &dev_attr_diag_trigger_scsi, 3527 &dev_attr_diag_trigger_scsi,
3376 &dev_attr_diag_trigger_mpi, 3528 &dev_attr_diag_trigger_mpi,
3529 &dev_attr_drv_support_bitmap,
3377 &dev_attr_BRM_status, 3530 &dev_attr_BRM_status,
3531 &dev_attr_enable_sdev_max_qd,
3378 NULL, 3532 NULL,
3379}; 3533};
3380 3534
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 717ba0845a2a..d0c2f8d6f2a2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -155,6 +155,10 @@ static int prot_mask = -1;
155module_param(prot_mask, int, 0444); 155module_param(prot_mask, int, 0444);
156MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 "); 156MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
157 157
158static bool enable_sdev_max_qd;
159module_param(enable_sdev_max_qd, bool, 0444);
160MODULE_PARM_DESC(enable_sdev_max_qd,
161 "Enable sdev max qd as can_queue, def=disabled(0)");
158 162
159/* raid transport support */ 163/* raid transport support */
160static struct raid_template *mpt3sas_raid_template; 164static struct raid_template *mpt3sas_raid_template;
@@ -1152,6 +1156,11 @@ _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1152 list_add_tail(&pcie_device->list, &ioc->pcie_device_list); 1156 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1153 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1157 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1154 1158
1159 if (pcie_device->access_status ==
1160 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1161 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1162 return;
1163 }
1155 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) { 1164 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1156 _scsih_pcie_device_remove(ioc, pcie_device); 1165 _scsih_pcie_device_remove(ioc, pcie_device);
1157 } else if (!pcie_device->starget) { 1166 } else if (!pcie_device->starget) {
@@ -1196,7 +1205,9 @@ _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1196 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1205 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1197 pcie_device_get(pcie_device); 1206 pcie_device_get(pcie_device);
1198 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list); 1207 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1199 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL); 1208 if (pcie_device->access_status !=
1209 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1210 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1200 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1211 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1201} 1212}
1202/** 1213/**
@@ -1433,17 +1444,20 @@ _scsih_is_end_device(u32 device_info)
1433} 1444}
1434 1445
1435/** 1446/**
1436 * _scsih_is_nvme_device - determines if device is an nvme device 1447 * _scsih_is_nvme_pciescsi_device - determines if
1448 * device is an pcie nvme/scsi device
1437 * @device_info: bitfield providing information about the device. 1449 * @device_info: bitfield providing information about the device.
1438 * Context: none 1450 * Context: none
1439 * 1451 *
1440 * Return: 1 if nvme device. 1452 * Returns 1 if device is pcie device type nvme/scsi.
1441 */ 1453 */
1442static int 1454static int
1443_scsih_is_nvme_device(u32 device_info) 1455_scsih_is_nvme_pciescsi_device(u32 device_info)
1444{ 1456{
1445 if ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) 1457 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1446 == MPI26_PCIE_DEVINFO_NVME) 1458 == MPI26_PCIE_DEVINFO_NVME) ||
1459 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1460 == MPI26_PCIE_DEVINFO_SCSI))
1447 return 1; 1461 return 1;
1448 else 1462 else
1449 return 0; 1463 return 0;
@@ -1509,7 +1523,13 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1509 1523
1510 max_depth = shost->can_queue; 1524 max_depth = shost->can_queue;
1511 1525
1512 /* limit max device queue for SATA to 32 */ 1526 /*
1527 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1528 * is disabled.
1529 */
1530 if (ioc->enable_sdev_max_qd)
1531 goto not_sata;
1532
1513 sas_device_priv_data = sdev->hostdata; 1533 sas_device_priv_data = sdev->hostdata;
1514 if (!sas_device_priv_data) 1534 if (!sas_device_priv_data)
1515 goto not_sata; 1535 goto not_sata;
@@ -1539,6 +1559,25 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1539} 1559}
1540 1560
1541/** 1561/**
1562 * mpt3sas_scsih_change_queue_depth - setting device queue depth
1563 * @sdev: scsi device struct
1564 * @qdepth: requested queue depth
1565 *
1566 * Returns nothing.
1567 */
1568void
1569mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1570{
1571 struct Scsi_Host *shost = sdev->host;
1572 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1573
1574 if (ioc->enable_sdev_max_qd)
1575 qdepth = shost->can_queue;
1576
1577 scsih_change_queue_depth(sdev, qdepth);
1578}
1579
1580/**
1542 * scsih_target_alloc - target add routine 1581 * scsih_target_alloc - target add routine
1543 * @starget: scsi target struct 1582 * @starget: scsi target struct
1544 * 1583 *
@@ -2296,7 +2335,7 @@ scsih_slave_configure(struct scsi_device *sdev)
2296 MPT3SAS_RAID_MAX_SECTORS); 2335 MPT3SAS_RAID_MAX_SECTORS);
2297 } 2336 }
2298 2337
2299 scsih_change_queue_depth(sdev, qdepth); 2338 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2300 2339
2301 /* raid transport support */ 2340 /* raid transport support */
2302 if (!ioc->is_warpdrive) 2341 if (!ioc->is_warpdrive)
@@ -2360,7 +2399,7 @@ scsih_slave_configure(struct scsi_device *sdev)
2360 2399
2361 pcie_device_put(pcie_device); 2400 pcie_device_put(pcie_device);
2362 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2401 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2363 scsih_change_queue_depth(sdev, qdepth); 2402 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2364 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be 2403 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2365 ** merged and can eliminate holes created during merging 2404 ** merged and can eliminate holes created during merging
2366 ** operation. 2405 ** operation.
@@ -2420,7 +2459,7 @@ scsih_slave_configure(struct scsi_device *sdev)
2420 _scsih_display_sata_capabilities(ioc, handle, sdev); 2459 _scsih_display_sata_capabilities(ioc, handle, sdev);
2421 2460
2422 2461
2423 scsih_change_queue_depth(sdev, qdepth); 2462 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2424 2463
2425 if (ssp_target) { 2464 if (ssp_target) {
2426 sas_read_port_mode_page(sdev); 2465 sas_read_port_mode_page(sdev);
@@ -2872,7 +2911,8 @@ scsih_abort(struct scsi_cmnd *scmd)
2872 2911
2873 handle = sas_device_priv_data->sas_target->handle; 2912 handle = sas_device_priv_data->sas_target->handle;
2874 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 2913 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2875 if (pcie_device && (!ioc->tm_custom_handling)) 2914 if (pcie_device && (!ioc->tm_custom_handling) &&
2915 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
2876 timeout = ioc->nvme_abort_timeout; 2916 timeout = ioc->nvme_abort_timeout;
2877 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun, 2917 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2878 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 2918 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
@@ -2943,11 +2983,13 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
2943 2983
2944 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 2984 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2945 2985
2946 if (pcie_device && (!ioc->tm_custom_handling)) { 2986 if (pcie_device && (!ioc->tm_custom_handling) &&
2987 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
2947 tr_timeout = pcie_device->reset_timeout; 2988 tr_timeout = pcie_device->reset_timeout;
2948 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 2989 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
2949 } else 2990 } else
2950 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 2991 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2992
2951 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun, 2993 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2952 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, 2994 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
2953 tr_timeout, tr_method); 2995 tr_timeout, tr_method);
@@ -3020,7 +3062,8 @@ scsih_target_reset(struct scsi_cmnd *scmd)
3020 3062
3021 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3063 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3022 3064
3023 if (pcie_device && (!ioc->tm_custom_handling)) { 3065 if (pcie_device && (!ioc->tm_custom_handling) &&
3066 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3024 tr_timeout = pcie_device->reset_timeout; 3067 tr_timeout = pcie_device->reset_timeout;
3025 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3068 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3026 } else 3069 } else
@@ -3598,7 +3641,9 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3598 sas_address = pcie_device->wwid; 3641 sas_address = pcie_device->wwid;
3599 } 3642 }
3600 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 3643 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3601 if (pcie_device && (!ioc->tm_custom_handling)) 3644 if (pcie_device && (!ioc->tm_custom_handling) &&
3645 (!(mpt3sas_scsih_is_pcie_scsi_device(
3646 pcie_device->device_info))))
3602 tr_method = 3647 tr_method =
3603 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3648 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3604 else 3649 else
@@ -4654,11 +4699,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4654 * since we're lockless at this point 4699 * since we're lockless at this point
4655 */ 4700 */
4656 do { 4701 do {
4657 if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { 4702 if (test_bit(0, &sas_device_priv_data->ata_command_pending))
4658 scmd->result = SAM_STAT_BUSY; 4703 return SCSI_MLQUEUE_DEVICE_BUSY;
4659 scmd->scsi_done(scmd);
4660 return 0;
4661 }
4662 } while (_scsih_set_satl_pending(scmd, true)); 4704 } while (_scsih_set_satl_pending(scmd, true));
4663 4705
4664 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 4706 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
@@ -6456,24 +6498,17 @@ _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6456/** 6498/**
6457 * _scsih_sas_device_status_change_event - handle device status change 6499 * _scsih_sas_device_status_change_event - handle device status change
6458 * @ioc: per adapter object 6500 * @ioc: per adapter object
6459 * @fw_event: The fw_event_work object 6501 * @event_data: The fw event
6460 * Context: user. 6502 * Context: user.
6461 */ 6503 */
6462static void 6504static void
6463_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, 6505_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
6464 struct fw_event_work *fw_event) 6506 Mpi2EventDataSasDeviceStatusChange_t *event_data)
6465{ 6507{
6466 struct MPT3SAS_TARGET *target_priv_data; 6508 struct MPT3SAS_TARGET *target_priv_data;
6467 struct _sas_device *sas_device; 6509 struct _sas_device *sas_device;
6468 u64 sas_address; 6510 u64 sas_address;
6469 unsigned long flags; 6511 unsigned long flags;
6470 Mpi2EventDataSasDeviceStatusChange_t *event_data =
6471 (Mpi2EventDataSasDeviceStatusChange_t *)
6472 fw_event->event_data;
6473
6474 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6475 _scsih_sas_device_status_change_event_debug(ioc,
6476 event_data);
6477 6512
6478 /* In MPI Revision K (0xC), the internal device reset complete was 6513 /* In MPI Revision K (0xC), the internal device reset complete was
6479 * implemented, so avoid setting tm_busy flag for older firmware. 6514 * implemented, so avoid setting tm_busy flag for older firmware.
@@ -6505,6 +6540,12 @@ _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
6505 else 6540 else
6506 target_priv_data->tm_busy = 0; 6541 target_priv_data->tm_busy = 0;
6507 6542
6543 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6544 ioc_info(ioc,
6545 "%s tm_busy flag for handle(0x%04x)\n",
6546 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
6547 target_priv_data->handle);
6548
6508out: 6549out:
6509 if (sas_device) 6550 if (sas_device)
6510 sas_device_put(sas_device); 6551 sas_device_put(sas_device);
@@ -6539,6 +6580,11 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
6539 break; 6580 break;
6540 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED: 6581 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
6541 desc = "PCIe device blocked"; 6582 desc = "PCIe device blocked";
6583 ioc_info(ioc,
6584 "Device with Access Status (%s): wwid(0x%016llx), "
6585 "handle(0x%04x)\n ll only be added to the internal list",
6586 desc, (u64)wwid, handle);
6587 rc = 0;
6542 break; 6588 break;
6543 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED: 6589 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
6544 desc = "PCIe device mem space access failed"; 6590 desc = "PCIe device mem space access failed";
@@ -6643,7 +6689,8 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
6643 pcie_device->enclosure_level, 6689 pcie_device->enclosure_level,
6644 pcie_device->connector_name); 6690 pcie_device->connector_name);
6645 6691
6646 if (pcie_device->starget) 6692 if (pcie_device->starget && (pcie_device->access_status !=
6693 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
6647 scsi_remove_target(&pcie_device->starget->dev); 6694 scsi_remove_target(&pcie_device->starget->dev);
6648 dewtprintk(ioc, 6695 dewtprintk(ioc,
6649 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", 6696 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
@@ -6694,7 +6741,7 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6694 6741
6695 /* check if this is end device */ 6742 /* check if this is end device */
6696 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 6743 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6697 if (!(_scsih_is_nvme_device(device_info))) 6744 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
6698 return; 6745 return;
6699 6746
6700 wwid = le64_to_cpu(pcie_device_pg0.WWID); 6747 wwid = le64_to_cpu(pcie_device_pg0.WWID);
@@ -6709,6 +6756,7 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6709 if (unlikely(pcie_device->handle != handle)) { 6756 if (unlikely(pcie_device->handle != handle)) {
6710 starget = pcie_device->starget; 6757 starget = pcie_device->starget;
6711 sas_target_priv_data = starget->hostdata; 6758 sas_target_priv_data = starget->hostdata;
6759 pcie_device->access_status = pcie_device_pg0.AccessStatus;
6712 starget_printk(KERN_INFO, starget, 6760 starget_printk(KERN_INFO, starget,
6713 "handle changed from(0x%04x) to (0x%04x)!!!\n", 6761 "handle changed from(0x%04x) to (0x%04x)!!!\n",
6714 pcie_device->handle, handle); 6762 pcie_device->handle, handle);
@@ -6803,7 +6851,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6803 pcie_device_pg0.AccessStatus)) 6851 pcie_device_pg0.AccessStatus))
6804 return 0; 6852 return 0;
6805 6853
6806 if (!(_scsih_is_nvme_device(le32_to_cpu(pcie_device_pg0.DeviceInfo)))) 6854 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
6855 (pcie_device_pg0.DeviceInfo))))
6807 return 0; 6856 return 0;
6808 6857
6809 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid); 6858 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
@@ -6813,6 +6862,31 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6813 return 0; 6862 return 0;
6814 } 6863 }
6815 6864
6865 /* PCIe Device Page 2 contains read-only information about a
6866 * specific NVMe device; therefore, this page is only
6867 * valid for NVMe devices and skip for pcie devices of type scsi.
6868 */
6869 if (!(mpt3sas_scsih_is_pcie_scsi_device(
6870 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
6871 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
6872 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
6873 handle)) {
6874 ioc_err(ioc,
6875 "failure at %s:%d/%s()!\n", __FILE__,
6876 __LINE__, __func__);
6877 return 0;
6878 }
6879
6880 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6881 MPI2_IOCSTATUS_MASK;
6882 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6883 ioc_err(ioc,
6884 "failure at %s:%d/%s()!\n", __FILE__,
6885 __LINE__, __func__);
6886 return 0;
6887 }
6888 }
6889
6816 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL); 6890 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
6817 if (!pcie_device) { 6891 if (!pcie_device) {
6818 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6892 ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -6824,6 +6898,7 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6824 pcie_device->id = ioc->pcie_target_id++; 6898 pcie_device->id = ioc->pcie_target_id++;
6825 pcie_device->channel = PCIE_CHANNEL; 6899 pcie_device->channel = PCIE_CHANNEL;
6826 pcie_device->handle = handle; 6900 pcie_device->handle = handle;
6901 pcie_device->access_status = pcie_device_pg0.AccessStatus;
6827 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 6902 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6828 pcie_device->wwid = wwid; 6903 pcie_device->wwid = wwid;
6829 pcie_device->port_num = pcie_device_pg0.PortNum; 6904 pcie_device->port_num = pcie_device_pg0.PortNum;
@@ -6855,27 +6930,16 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6855 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 6930 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6856 } 6931 }
6857 /* TODO -- Add device name once FW supports it */ 6932 /* TODO -- Add device name once FW supports it */
6858 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply, 6933 if (!(mpt3sas_scsih_is_pcie_scsi_device(
6859 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) { 6934 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
6860 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6935 pcie_device->nvme_mdts =
6861 __FILE__, __LINE__, __func__); 6936 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
6862 kfree(pcie_device); 6937 if (pcie_device_pg2.ControllerResetTO)
6863 return 0; 6938 pcie_device->reset_timeout =
6864 } 6939 pcie_device_pg2.ControllerResetTO;
6865 6940 else
6866 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 6941 pcie_device->reset_timeout = 30;
6867 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6942 } else
6868 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6869 __FILE__, __LINE__, __func__);
6870 kfree(pcie_device);
6871 return 0;
6872 }
6873 pcie_device->nvme_mdts =
6874 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
6875 if (pcie_device_pg2.ControllerResetTO)
6876 pcie_device->reset_timeout =
6877 pcie_device_pg2.ControllerResetTO;
6878 else
6879 pcie_device->reset_timeout = 30; 6943 pcie_device->reset_timeout = 30;
6880 6944
6881 if (ioc->wait_for_discovery_to_complete) 6945 if (ioc->wait_for_discovery_to_complete)
@@ -8507,6 +8571,8 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
8507 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID)) 8571 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
8508 && (pcie_device->slot == le16_to_cpu( 8572 && (pcie_device->slot == le16_to_cpu(
8509 pcie_device_pg0->Slot))) { 8573 pcie_device_pg0->Slot))) {
8574 pcie_device->access_status =
8575 pcie_device_pg0->AccessStatus;
8510 pcie_device->responding = 1; 8576 pcie_device->responding = 1;
8511 starget = pcie_device->starget; 8577 starget = pcie_device->starget;
8512 if (starget && starget->hostdata) { 8578 if (starget && starget->hostdata) {
@@ -8594,7 +8660,7 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8594 } 8660 }
8595 handle = le16_to_cpu(pcie_device_pg0.DevHandle); 8661 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
8596 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 8662 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8597 if (!(_scsih_is_nvme_device(device_info))) 8663 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
8598 continue; 8664 continue;
8599 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0); 8665 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
8600 } 8666 }
@@ -9175,7 +9241,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9175 break; 9241 break;
9176 } 9242 }
9177 handle = le16_to_cpu(pcie_device_pg0.DevHandle); 9243 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9178 if (!(_scsih_is_nvme_device( 9244 if (!(_scsih_is_nvme_pciescsi_device(
9179 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) 9245 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
9180 continue; 9246 continue;
9181 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, 9247 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
@@ -9308,7 +9374,10 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
9308 _scsih_sas_topology_change_event(ioc, fw_event); 9374 _scsih_sas_topology_change_event(ioc, fw_event);
9309 break; 9375 break;
9310 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 9376 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9311 _scsih_sas_device_status_change_event(ioc, fw_event); 9377 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
9378 _scsih_sas_device_status_change_event_debug(ioc,
9379 (Mpi2EventDataSasDeviceStatusChange_t *)
9380 fw_event->event_data);
9312 break; 9381 break;
9313 case MPI2_EVENT_SAS_DISCOVERY: 9382 case MPI2_EVENT_SAS_DISCOVERY:
9314 _scsih_sas_discovery_event(ioc, fw_event); 9383 _scsih_sas_discovery_event(ioc, fw_event);
@@ -9481,6 +9550,10 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9481 break; 9550 break;
9482 } 9551 }
9483 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 9552 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9553 _scsih_sas_device_status_change_event(ioc,
9554 (Mpi2EventDataSasDeviceStatusChange_t *)
9555 mpi_reply->EventData);
9556 break;
9484 case MPI2_EVENT_IR_OPERATION_STATUS: 9557 case MPI2_EVENT_IR_OPERATION_STATUS:
9485 case MPI2_EVENT_SAS_DISCOVERY: 9558 case MPI2_EVENT_SAS_DISCOVERY:
9486 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 9559 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
@@ -10039,6 +10112,12 @@ _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
10039 pcie_device_put(pcie_device); 10112 pcie_device_put(pcie_device);
10040 continue; 10113 continue;
10041 } 10114 }
10115 if (pcie_device->access_status ==
10116 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
10117 pcie_device_make_active(ioc, pcie_device);
10118 pcie_device_put(pcie_device);
10119 continue;
10120 }
10042 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, 10121 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
10043 pcie_device->id, 0); 10122 pcie_device->id, 0);
10044 if (rc) { 10123 if (rc) {
@@ -10453,6 +10532,13 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10453 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; 10532 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
10454 ioc->logging_level = logging_level; 10533 ioc->logging_level = logging_level;
10455 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; 10534 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
10535 /*
10536 * Enable MEMORY MOVE support flag.
10537 */
10538 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
10539
10540 ioc->enable_sdev_max_qd = enable_sdev_max_qd;
10541
10456 /* misc semaphores and spin locks */ 10542 /* misc semaphores and spin locks */
10457 mutex_init(&ioc->reset_in_progress_mutex); 10543 mutex_init(&ioc->reset_in_progress_mutex);
10458 /* initializing pci_access_mutex lock */ 10544 /* initializing pci_access_mutex lock */
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index e6a95498ac0d..e0b427fdf818 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -3910,11 +3910,14 @@ static void __init ncr_prepare_setting(struct ncb *np)
3910 np->scsi_mode = SMODE_HVD; 3910 np->scsi_mode = SMODE_HVD;
3911 break; 3911 break;
3912 } 3912 }
3913 /* fall through */
3913 case 3: /* SYMBIOS controllers report HVD through GPIO3 */ 3914 case 3: /* SYMBIOS controllers report HVD through GPIO3 */
3914 if (INB(nc_gpreg) & 0x08) 3915 if (INB(nc_gpreg) & 0x08)
3915 break; 3916 break;
3917 /* fall through */
3916 case 2: /* Set HVD unconditionally */ 3918 case 2: /* Set HVD unconditionally */
3917 np->scsi_mode = SMODE_HVD; 3919 np->scsi_mode = SMODE_HVD;
3920 /* fall through */
3918 case 1: /* Trust previous settings for HVD */ 3921 case 1: /* Trust previous settings for HVD */
3919 if (np->sv_stest2 & 0x20) 3922 if (np->sv_stest2 & 0x20)
3920 np->scsi_mode = SMODE_HVD; 3923 np->scsi_mode = SMODE_HVD;
@@ -6714,6 +6717,7 @@ void ncr_int_sir (struct ncb *np)
6714 OUTL_DSP (scr_to_cpu(tp->lp[0]->jump_ccb[0])); 6717 OUTL_DSP (scr_to_cpu(tp->lp[0]->jump_ccb[0]));
6715 return; 6718 return;
6716 } 6719 }
6720 /* fall through */
6717 case SIR_RESEL_BAD_TARGET: /* Will send a TARGET RESET message */ 6721 case SIR_RESEL_BAD_TARGET: /* Will send a TARGET RESET message */
6718 case SIR_RESEL_BAD_LUN: /* Will send a TARGET RESET message */ 6722 case SIR_RESEL_BAD_LUN: /* Will send a TARGET RESET message */
6719 case SIR_RESEL_BAD_I_T_L_Q: /* Will send an ABORT TAG message */ 6723 case SIR_RESEL_BAD_I_T_L_Q: /* Will send an ABORT TAG message */
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 9453705f643a..7e48154e11c3 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -1308,28 +1308,22 @@ out:
1308 1308
1309int pm8001_abort_task_set(struct domain_device *dev, u8 *lun) 1309int pm8001_abort_task_set(struct domain_device *dev, u8 *lun)
1310{ 1310{
1311 int rc = TMF_RESP_FUNC_FAILED;
1312 struct pm8001_tmf_task tmf_task; 1311 struct pm8001_tmf_task tmf_task;
1313 1312
1314 tmf_task.tmf = TMF_ABORT_TASK_SET; 1313 tmf_task.tmf = TMF_ABORT_TASK_SET;
1315 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1314 return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1316 return rc;
1317} 1315}
1318 1316
1319int pm8001_clear_aca(struct domain_device *dev, u8 *lun) 1317int pm8001_clear_aca(struct domain_device *dev, u8 *lun)
1320{ 1318{
1321 int rc = TMF_RESP_FUNC_FAILED;
1322 struct pm8001_tmf_task tmf_task; 1319 struct pm8001_tmf_task tmf_task;
1323 1320
1324 tmf_task.tmf = TMF_CLEAR_ACA; 1321 tmf_task.tmf = TMF_CLEAR_ACA;
1325 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1322 return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1326
1327 return rc;
1328} 1323}
1329 1324
1330int pm8001_clear_task_set(struct domain_device *dev, u8 *lun) 1325int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1331{ 1326{
1332 int rc = TMF_RESP_FUNC_FAILED;
1333 struct pm8001_tmf_task tmf_task; 1327 struct pm8001_tmf_task tmf_task;
1334 struct pm8001_device *pm8001_dev = dev->lldd_dev; 1328 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1335 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 1329 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
@@ -1338,7 +1332,6 @@ int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1338 pm8001_printk("I_T_L_Q clear task set[%x]\n", 1332 pm8001_printk("I_T_L_Q clear task set[%x]\n",
1339 pm8001_dev->device_id)); 1333 pm8001_dev->device_id));
1340 tmf_task.tmf = TMF_CLEAR_TASK_SET; 1334 tmf_task.tmf = TMF_CLEAR_TASK_SET;
1341 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1335 return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1342 return rc;
1343} 1336}
1344 1337
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 71ff3936da4f..398d2af60832 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -5841,7 +5841,7 @@ out_disable_device:
5841} 5841}
5842 5842
5843/* 5843/*
5844 * PCI driver structure of pcmraid driver 5844 * PCI driver structure of pmcraid driver
5845 */ 5845 */
5846static struct pci_driver pmcraid_driver = { 5846static struct pci_driver pmcraid_driver = {
5847 .name = PMCRAID_DRIVER_NAME, 5847 .name = PMCRAID_DRIVER_NAME,
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 5a021217bfc9..f3f399fe10c8 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -49,6 +49,7 @@
49#define QEDF_ABORT_TIMEOUT (10 * 1000) 49#define QEDF_ABORT_TIMEOUT (10 * 1000)
50#define QEDF_CLEANUP_TIMEOUT 1 50#define QEDF_CLEANUP_TIMEOUT 1
51#define QEDF_MAX_CDB_LEN 16 51#define QEDF_MAX_CDB_LEN 16
52#define QEDF_LL2_BUF_SIZE 2500 /* Buffer size required for LL2 Rx */
52 53
53#define UPSTREAM_REMOVE 1 54#define UPSTREAM_REMOVE 1
54#define UPSTREAM_KEEP 1 55#define UPSTREAM_KEEP 1
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index d905a307302d..b88bed9bb133 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -47,13 +47,13 @@ qedf_dbg_host_init(struct qedf_dbg_ctx *qedf,
47 * @pf: the pf that is stopping 47 * @pf: the pf that is stopping
48 **/ 48 **/
49void 49void
50qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf) 50qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf_dbg)
51{ 51{
52 QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Destroying debugfs host " 52 QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Destroying debugfs host "
53 "entry\n"); 53 "entry\n");
54 /* remove debugfs entries of this PF */ 54 /* remove debugfs entries of this PF */
55 debugfs_remove_recursive(qedf->bdf_dentry); 55 debugfs_remove_recursive(qedf_dbg->bdf_dentry);
56 qedf->bdf_dentry = NULL; 56 qedf_dbg->bdf_dentry = NULL;
57} 57}
58 58
59/** 59/**
@@ -140,10 +140,10 @@ qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count,
140 loff_t *ppos) 140 loff_t *ppos)
141{ 141{
142 int cnt; 142 int cnt;
143 struct qedf_dbg_ctx *qedf = 143 struct qedf_dbg_ctx *qedf_dbg =
144 (struct qedf_dbg_ctx *)filp->private_data; 144 (struct qedf_dbg_ctx *)filp->private_data;
145 145
146 QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "entered\n"); 146 QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "debug mask=0x%x\n", qedf_debug);
147 cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug); 147 cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug);
148 148
149 cnt = min_t(int, count, cnt - *ppos); 149 cnt = min_t(int, count, cnt - *ppos);
@@ -158,7 +158,7 @@ qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer,
158 uint32_t val; 158 uint32_t val;
159 void *kern_buf; 159 void *kern_buf;
160 int rval; 160 int rval;
161 struct qedf_dbg_ctx *qedf = 161 struct qedf_dbg_ctx *qedf_dbg =
162 (struct qedf_dbg_ctx *)filp->private_data; 162 (struct qedf_dbg_ctx *)filp->private_data;
163 163
164 if (!count || *ppos) 164 if (!count || *ppos)
@@ -178,7 +178,7 @@ qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer,
178 else 178 else
179 qedf_debug = val; 179 qedf_debug = val;
180 180
181 QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val); 181 QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val);
182 return count; 182 return count;
183} 183}
184 184
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 5996f68fbf2b..87e169dcebdb 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -179,8 +179,11 @@ static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
179 179
180 orig_io_req = cb_arg->aborted_io_req; 180 orig_io_req = cb_arg->aborted_io_req;
181 181
182 if (!orig_io_req) 182 if (!orig_io_req) {
183 QEDF_ERR(&qedf->dbg_ctx,
184 "Original io_req is NULL, rrq_req = %p.\n", rrq_req);
183 goto out_free; 185 goto out_free;
186 }
184 187
185 if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO && 188 if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
186 rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) 189 rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
@@ -350,8 +353,10 @@ void qedf_restart_rport(struct qedf_rport *fcport)
350 u32 port_id; 353 u32 port_id;
351 unsigned long flags; 354 unsigned long flags;
352 355
353 if (!fcport) 356 if (!fcport) {
357 QEDF_ERR(NULL, "fcport is NULL.\n");
354 return; 358 return;
359 }
355 360
356 spin_lock_irqsave(&fcport->rport_lock, flags); 361 spin_lock_irqsave(&fcport->rport_lock, flags);
357 if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) || 362 if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
@@ -418,8 +423,11 @@ static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
418 * If we are flushing the command just free the cb_arg as none of the 423 * If we are flushing the command just free the cb_arg as none of the
419 * response data will be valid. 424 * response data will be valid.
420 */ 425 */
421 if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) 426 if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) {
427 QEDF_ERR(NULL, "els_req xid=0x%x event is flush.\n",
428 els_req->xid);
422 goto free_arg; 429 goto free_arg;
430 }
423 431
424 fcport = els_req->fcport; 432 fcport = els_req->fcport;
425 mp_req = &(els_req->mp_req); 433 mp_req = &(els_req->mp_req);
@@ -532,8 +540,10 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
532 540
533 orig_io_req = cb_arg->aborted_io_req; 541 orig_io_req = cb_arg->aborted_io_req;
534 542
535 if (!orig_io_req) 543 if (!orig_io_req) {
544 QEDF_ERR(NULL, "orig_io_req is NULL.\n");
536 goto out_free; 545 goto out_free;
546 }
537 547
538 clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); 548 clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
539 549
@@ -547,8 +557,11 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
547 orig_io_req, orig_io_req->xid, srr_req->xid, refcount); 557 orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
548 558
549 /* If a SRR times out, simply free resources */ 559 /* If a SRR times out, simply free resources */
550 if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) 560 if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) {
561 QEDF_ERR(&qedf->dbg_ctx,
562 "ELS timeout rec_xid=0x%x.\n", srr_req->xid);
551 goto out_put; 563 goto out_put;
564 }
552 565
553 /* Normalize response data into struct fc_frame */ 566 /* Normalize response data into struct fc_frame */
554 mp_req = &(srr_req->mp_req); 567 mp_req = &(srr_req->mp_req);
@@ -721,8 +734,11 @@ void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
721 cb_arg = io_req->cb_arg; 734 cb_arg = io_req->cb_arg;
722 735
723 /* If we timed out just free resources */ 736 /* If we timed out just free resources */
724 if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) 737 if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) {
738 QEDF_ERR(&qedf->dbg_ctx,
739 "cqe is NULL or timeout event (0x%x)", io_req->event);
725 goto free; 740 goto free;
741 }
726 742
727 /* Kill the timer we put on the request */ 743 /* Kill the timer we put on the request */
728 cancel_delayed_work_sync(&io_req->timeout_work); 744 cancel_delayed_work_sync(&io_req->timeout_work);
@@ -825,8 +841,10 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
825 841
826 orig_io_req = cb_arg->aborted_io_req; 842 orig_io_req = cb_arg->aborted_io_req;
827 843
828 if (!orig_io_req) 844 if (!orig_io_req) {
845 QEDF_ERR(NULL, "orig_io_req is NULL.\n");
829 goto out_free; 846 goto out_free;
847 }
830 848
831 if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO && 849 if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
832 rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) 850 rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
@@ -838,8 +856,12 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
838 orig_io_req, orig_io_req->xid, rec_req->xid, refcount); 856 orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
839 857
840 /* If a REC times out, free resources */ 858 /* If a REC times out, free resources */
841 if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) 859 if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) {
860 QEDF_ERR(&qedf->dbg_ctx,
861 "Got TMO event, orig_io_req %p orig_io_xid=0x%x.\n",
862 orig_io_req, orig_io_req->xid);
842 goto out_put; 863 goto out_put;
864 }
843 865
844 /* Normalize response data into struct fc_frame */ 866 /* Normalize response data into struct fc_frame */
845 mp_req = &(rec_req->mp_req); 867 mp_req = &(rec_req->mp_req);
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index 362d2bed72fb..bb82f0875eca 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -23,8 +23,11 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf)
23 int rc = -1; 23 int rc = -1;
24 24
25 skb = dev_alloc_skb(sizeof(struct fip_vlan)); 25 skb = dev_alloc_skb(sizeof(struct fip_vlan));
26 if (!skb) 26 if (!skb) {
27 QEDF_ERR(&qedf->dbg_ctx,
28 "Failed to allocate skb.\n");
27 return; 29 return;
30 }
28 31
29 eth_fr = (char *)skb->data; 32 eth_fr = (char *)skb->data;
30 vlan = (struct fip_vlan *)eth_fr; 33 vlan = (struct fip_vlan *)eth_fr;
@@ -250,18 +253,24 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
250 fc_wwpn_valid = true; 253 fc_wwpn_valid = true;
251 break; 254 break;
252 case FIP_DT_VN_ID: 255 case FIP_DT_VN_ID:
256 fabric_id_valid = false;
253 vp = (struct fip_vn_desc *)desc; 257 vp = (struct fip_vn_desc *)desc;
254 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, 258
255 "vx_port fd_fc_id=%x fd_mac=%pM.\n", 259 QEDF_ERR(&qedf->dbg_ctx,
256 ntoh24(vp->fd_fc_id), vp->fd_mac); 260 "CVL vx_port fd_fc_id=0x%x fd_mac=%pM fd_wwpn=%016llx.\n",
257 /* Check vx_port fabric ID */ 261 ntoh24(vp->fd_fc_id), vp->fd_mac,
258 if (ntoh24(vp->fd_fc_id) != 262 get_unaligned_be64(&vp->fd_wwpn));
259 qedf->lport->port_id) 263 /* Check for vx_port wwpn OR Check vx_port
260 fabric_id_valid = false; 264 * fabric ID OR Check vx_port MAC
261 /* Check vx_port MAC */ 265 */
262 if (!ether_addr_equal(vp->fd_mac, 266 if ((get_unaligned_be64(&vp->fd_wwpn) ==
263 qedf->data_src_addr)) 267 qedf->wwpn) ||
264 fabric_id_valid = false; 268 (ntoh24(vp->fd_fc_id) ==
269 qedf->lport->port_id) ||
270 (ether_addr_equal(vp->fd_mac,
271 qedf->data_src_addr))) {
272 fabric_id_valid = true;
273 }
265 break; 274 break;
266 default: 275 default:
267 /* Ignore anything else */ 276 /* Ignore anything else */
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index d881e822f92c..e749a2dcaad7 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -104,6 +104,8 @@ static void qedf_cmd_timeout(struct work_struct *work)
104 qedf_process_seq_cleanup_compl(qedf, NULL, io_req); 104 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
105 break; 105 break;
106 default: 106 default:
107 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
108 "Hit default case, xid=0x%x.\n", io_req->xid);
107 break; 109 break;
108 } 110 }
109} 111}
@@ -122,8 +124,10 @@ void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
122 num_ios = max_xid - min_xid + 1; 124 num_ios = max_xid - min_xid + 1;
123 125
124 /* Free fcoe_bdt_ctx structures */ 126 /* Free fcoe_bdt_ctx structures */
125 if (!cmgr->io_bdt_pool) 127 if (!cmgr->io_bdt_pool) {
128 QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n");
126 goto free_cmd_pool; 129 goto free_cmd_pool;
130 }
127 131
128 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge); 132 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
129 for (i = 0; i < num_ios; i++) { 133 for (i = 0; i < num_ios; i++) {
@@ -226,8 +230,11 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
226 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev, 230 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
227 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma, 231 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
228 GFP_KERNEL); 232 GFP_KERNEL);
229 if (!io_req->sense_buffer) 233 if (!io_req->sense_buffer) {
234 QEDF_ERR(&qedf->dbg_ctx,
235 "Failed to alloc sense buffer.\n");
230 goto mem_err; 236 goto mem_err;
237 }
231 238
232 /* Allocate task parameters to pass to f/w init funcions */ 239 /* Allocate task parameters to pass to f/w init funcions */
233 io_req->task_params = kzalloc(sizeof(*io_req->task_params), 240 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
@@ -437,8 +444,12 @@ void qedf_release_cmd(struct kref *ref)
437 struct qedf_rport *fcport = io_req->fcport; 444 struct qedf_rport *fcport = io_req->fcport;
438 unsigned long flags; 445 unsigned long flags;
439 446
440 if (io_req->cmd_type == QEDF_SCSI_CMD) 447 if (io_req->cmd_type == QEDF_SCSI_CMD) {
448 QEDF_WARN(&fcport->qedf->dbg_ctx,
449 "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
450 io_req, io_req->xid);
441 WARN_ON(io_req->sc_cmd); 451 WARN_ON(io_req->sc_cmd);
452 }
442 453
443 if (io_req->cmd_type == QEDF_ELS || 454 if (io_req->cmd_type == QEDF_ELS ||
444 io_req->cmd_type == QEDF_TASK_MGMT_CMD) 455 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
@@ -447,8 +458,10 @@ void qedf_release_cmd(struct kref *ref)
447 atomic_inc(&cmd_mgr->free_list_cnt); 458 atomic_inc(&cmd_mgr->free_list_cnt);
448 atomic_dec(&fcport->num_active_ios); 459 atomic_dec(&fcport->num_active_ios);
449 atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE); 460 atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
450 if (atomic_read(&fcport->num_active_ios) < 0) 461 if (atomic_read(&fcport->num_active_ios) < 0) {
451 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n"); 462 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
463 WARN_ON(1);
464 }
452 465
453 /* Increment task retry identifier now that the request is released */ 466 /* Increment task retry identifier now that the request is released */
454 io_req->task_retry_identifier++; 467 io_req->task_retry_identifier++;
@@ -951,6 +964,9 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
951 964
952 if (test_bit(QEDF_UNLOADING, &qedf->flags) || 965 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
953 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { 966 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
967 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
968 "Returning DNC as unloading or stop io, flags 0x%lx.\n",
969 qedf->flags);
954 sc_cmd->result = DID_NO_CONNECT << 16; 970 sc_cmd->result = DID_NO_CONNECT << 16;
955 sc_cmd->scsi_done(sc_cmd); 971 sc_cmd->scsi_done(sc_cmd);
956 return 0; 972 return 0;
@@ -967,6 +983,9 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
967 983
968 rval = fc_remote_port_chkready(rport); 984 rval = fc_remote_port_chkready(rport);
969 if (rval) { 985 if (rval) {
986 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
987 "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n",
988 rval, rport->port_id);
970 sc_cmd->result = rval; 989 sc_cmd->result = rval;
971 sc_cmd->scsi_done(sc_cmd); 990 sc_cmd->scsi_done(sc_cmd);
972 return 0; 991 return 0;
@@ -974,12 +993,14 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
974 993
975 /* Retry command if we are doing a qed drain operation */ 994 /* Retry command if we are doing a qed drain operation */
976 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { 995 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
996 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n");
977 rc = SCSI_MLQUEUE_HOST_BUSY; 997 rc = SCSI_MLQUEUE_HOST_BUSY;
978 goto exit_qcmd; 998 goto exit_qcmd;
979 } 999 }
980 1000
981 if (lport->state != LPORT_ST_READY || 1001 if (lport->state != LPORT_ST_READY ||
982 atomic_read(&qedf->link_state) != QEDF_LINK_UP) { 1002 atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
1003 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n");
983 rc = SCSI_MLQUEUE_HOST_BUSY; 1004 rc = SCSI_MLQUEUE_HOST_BUSY;
984 goto exit_qcmd; 1005 goto exit_qcmd;
985 } 1006 }
@@ -1297,8 +1318,10 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1297 struct scsi_cmnd *sc_cmd; 1318 struct scsi_cmnd *sc_cmd;
1298 int refcount; 1319 int refcount;
1299 1320
1300 if (!io_req) 1321 if (!io_req) {
1322 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
1301 return; 1323 return;
1324 }
1302 1325
1303 if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) { 1326 if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
1304 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1327 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
@@ -1414,8 +1437,12 @@ void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1414 u64 err_warn_bit_map; 1437 u64 err_warn_bit_map;
1415 u8 err_warn = 0xff; 1438 u8 err_warn = 0xff;
1416 1439
1417 if (!cqe) 1440 if (!cqe) {
1441 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1442 "cqe is NULL for io_req %p xid=0x%x\n",
1443 io_req, io_req->xid);
1418 return; 1444 return;
1445 }
1419 1446
1420 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, " 1447 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1421 "xid=0x%x\n", io_req->xid); 1448 "xid=0x%x\n", io_req->xid);
@@ -1477,8 +1504,11 @@ void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1477{ 1504{
1478 int rval; 1505 int rval;
1479 1506
1480 if (!cqe) 1507 if (!cqe) {
1508 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1509 "cqe is NULL for io_req %p\n", io_req);
1481 return; 1510 return;
1511 }
1482 1512
1483 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, " 1513 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1484 "xid=0x%x\n", io_req->xid); 1514 "xid=0x%x\n", io_req->xid);
@@ -1543,8 +1573,10 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1543 int wait_cnt = 100; 1573 int wait_cnt = 100;
1544 int refcount = 0; 1574 int refcount = 0;
1545 1575
1546 if (!fcport) 1576 if (!fcport) {
1577 QEDF_ERR(NULL, "fcport is NULL\n");
1547 return; 1578 return;
1579 }
1548 1580
1549 /* Check that fcport is still offloaded */ 1581 /* Check that fcport is still offloaded */
1550 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 1582 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
@@ -1976,6 +2008,10 @@ void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1976 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); 2008 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1977 2009
1978 if (io_req->sc_cmd) { 2010 if (io_req->sc_cmd) {
2011 if (!io_req->return_scsi_cmd_on_abts)
2012 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2013 "Not call scsi_done for xid=0x%x.\n",
2014 io_req->xid);
1979 if (io_req->return_scsi_cmd_on_abts) 2015 if (io_req->return_scsi_cmd_on_abts)
1980 qedf_scsi_done(qedf, io_req, DID_ERROR); 2016 qedf_scsi_done(qedf, io_req, DID_ERROR);
1981 } 2017 }
@@ -2201,6 +2237,10 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
2201 } 2237 }
2202 2238
2203 if (io_req->sc_cmd) { 2239 if (io_req->sc_cmd) {
2240 if (!io_req->return_scsi_cmd_on_abts)
2241 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2242 "Not call scsi_done for xid=0x%x.\n",
2243 io_req->xid);
2204 if (io_req->return_scsi_cmd_on_abts) 2244 if (io_req->return_scsi_cmd_on_abts)
2205 qedf_scsi_done(qedf, io_req, DID_ERROR); 2245 qedf_scsi_done(qedf, io_req, DID_ERROR);
2206 } 2246 }
@@ -2241,7 +2281,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
2241 u16 sqe_idx; 2281 u16 sqe_idx;
2242 2282
2243 if (!sc_cmd) { 2283 if (!sc_cmd) {
2244 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n"); 2284 QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n");
2245 return FAILED; 2285 return FAILED;
2246 } 2286 }
2247 2287
@@ -2363,8 +2403,8 @@ int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2363 2403
2364 QEDF_ERR(NULL, 2404 QEDF_ERR(NULL,
2365 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n", 2405 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
2366 tm_flags, sc_cmd, sc_cmd->cmnd[0], rport->scsi_target_id, 2406 tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff,
2367 (int)sc_cmd->device->lun); 2407 rport->scsi_target_id, (int)sc_cmd->device->lun);
2368 2408
2369 if (!rdata || !kref_get_unless_zero(&rdata->kref)) { 2409 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
2370 QEDF_ERR(NULL, "stale rport\n"); 2410 QEDF_ERR(NULL, "stale rport\n");
@@ -2515,6 +2555,11 @@ void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2515 fh = (struct fc_frame_header *)fc_frame_header_get(fp); 2555 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2516 memcpy(fh, (void *)bdq_addr, pktlen); 2556 memcpy(fh, (void *)bdq_addr, pktlen);
2517 2557
2558 QEDF_WARN(&qedf->dbg_ctx,
2559 "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n",
2560 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
2561 fh->fh_type, fc_frame_payload_op(fp));
2562
2518 /* Initialize the frame so libfc sees it as a valid frame */ 2563 /* Initialize the frame so libfc sees it as a valid frame */
2519 crc = fcoe_fc_crc(fp); 2564 crc = fcoe_fc_crc(fp);
2520 fc_frame_init(fp); 2565 fc_frame_init(fp);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 42542720962f..1659d35cd37b 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -27,6 +27,7 @@ const struct qed_fcoe_ops *qed_ops;
27 27
28static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id); 28static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
29static void qedf_remove(struct pci_dev *pdev); 29static void qedf_remove(struct pci_dev *pdev);
30static void qedf_shutdown(struct pci_dev *pdev);
30 31
31/* 32/*
32 * Driver module parameters. 33 * Driver module parameters.
@@ -110,16 +111,18 @@ static struct kmem_cache *qedf_io_work_cache;
110 111
111void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id) 112void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
112{ 113{
113 qedf->vlan_id = vlan_id; 114 int vlan_id_tmp = 0;
114 qedf->vlan_id |= qedf->prio << VLAN_PRIO_SHIFT; 115
115 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x " 116 vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT);
116 "prio=%d.\n", vlan_id, qedf->prio); 117 qedf->vlan_id = vlan_id_tmp;
118 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
119 "Setting vlan_id=0x%04x prio=%d.\n",
120 vlan_id_tmp, qedf->prio);
117} 121}
118 122
119/* Returns true if we have a valid vlan, false otherwise */ 123/* Returns true if we have a valid vlan, false otherwise */
120static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf) 124static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
121{ 125{
122 int rc;
123 126
124 while (qedf->fipvlan_retries--) { 127 while (qedf->fipvlan_retries--) {
125 /* This is to catch if link goes down during fipvlan retries */ 128 /* This is to catch if link goes down during fipvlan retries */
@@ -128,20 +131,25 @@ static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
128 return false; 131 return false;
129 } 132 }
130 133
131 if (qedf->vlan_id > 0) 134 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
135 QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n");
136 return false;
137 }
138
139 if (qedf->vlan_id > 0) {
140 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
141 "vlan = 0x%x already set, calling ctlr_link_up.\n",
142 qedf->vlan_id);
143 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
144 fcoe_ctlr_link_up(&qedf->ctlr);
132 return true; 145 return true;
146 }
133 147
134 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 148 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
135 "Retry %d.\n", qedf->fipvlan_retries); 149 "Retry %d.\n", qedf->fipvlan_retries);
136 init_completion(&qedf->fipvlan_compl); 150 init_completion(&qedf->fipvlan_compl);
137 qedf_fcoe_send_vlan_req(qedf); 151 qedf_fcoe_send_vlan_req(qedf);
138 rc = wait_for_completion_timeout(&qedf->fipvlan_compl, 152 wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ);
139 1 * HZ);
140 if (rc > 0 &&
141 (atomic_read(&qedf->link_state) == QEDF_LINK_UP)) {
142 fcoe_ctlr_link_up(&qedf->ctlr);
143 return true;
144 }
145 } 153 }
146 154
147 return false; 155 return false;
@@ -162,6 +170,8 @@ static void qedf_handle_link_update(struct work_struct *work)
162 return; 170 return;
163 171
164 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { 172 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
173 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
174 "Link is down, resetting vlan_id.\n");
165 qedf->vlan_id = 0; 175 qedf->vlan_id = 0;
166 return; 176 return;
167 } 177 }
@@ -311,8 +321,10 @@ int qedf_send_flogi(struct qedf_ctx *qedf)
311 321
312 lport = qedf->lport; 322 lport = qedf->lport;
313 323
314 if (!lport->tt.elsct_send) 324 if (!lport->tt.elsct_send) {
325 QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n");
315 return -EINVAL; 326 return -EINVAL;
327 }
316 328
317 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); 329 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
318 if (!fp) { 330 if (!fp) {
@@ -330,11 +342,6 @@ int qedf_send_flogi(struct qedf_ctx *qedf)
330 return 0; 342 return 0;
331} 343}
332 344
333struct qedf_tmp_rdata_item {
334 struct fc_rport_priv *rdata;
335 struct list_head list;
336};
337
338/* 345/*
339 * This function is called if link_down_tmo is in use. If we get a link up and 346 * This function is called if link_down_tmo is in use. If we get a link up and
340 * link_down_tmo has not expired then use just FLOGI/ADISC to recover our 347 * link_down_tmo has not expired then use just FLOGI/ADISC to recover our
@@ -344,9 +351,8 @@ static void qedf_link_recovery(struct work_struct *work)
344{ 351{
345 struct qedf_ctx *qedf = 352 struct qedf_ctx *qedf =
346 container_of(work, struct qedf_ctx, link_recovery.work); 353 container_of(work, struct qedf_ctx, link_recovery.work);
347 struct qedf_rport *fcport; 354 struct fc_lport *lport = qedf->lport;
348 struct fc_rport_priv *rdata; 355 struct fc_rport_priv *rdata;
349 struct qedf_tmp_rdata_item *rdata_item, *tmp_rdata_item;
350 bool rc; 356 bool rc;
351 int retries = 30; 357 int retries = 30;
352 int rval, i; 358 int rval, i;
@@ -413,33 +419,14 @@ static void qedf_link_recovery(struct work_struct *work)
413 * Call lport->tt.rport_login which will cause libfc to send an 419 * Call lport->tt.rport_login which will cause libfc to send an
414 * ADISC since the rport is in state ready. 420 * ADISC since the rport is in state ready.
415 */ 421 */
416 rcu_read_lock(); 422 mutex_lock(&lport->disc.disc_mutex);
417 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { 423 list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
418 rdata = fcport->rdata;
419 if (rdata == NULL)
420 continue;
421 rdata_item = kzalloc(sizeof(struct qedf_tmp_rdata_item),
422 GFP_ATOMIC);
423 if (!rdata_item)
424 continue;
425 if (kref_get_unless_zero(&rdata->kref)) { 424 if (kref_get_unless_zero(&rdata->kref)) {
426 rdata_item->rdata = rdata; 425 fc_rport_login(rdata);
427 list_add(&rdata_item->list, &rdata_login_list); 426 kref_put(&rdata->kref, fc_rport_destroy);
428 } else 427 }
429 kfree(rdata_item);
430 }
431 rcu_read_unlock();
432 /*
433 * Do the fc_rport_login outside of the rcu lock so we don't take a
434 * mutex in an atomic context.
435 */
436 list_for_each_entry_safe(rdata_item, tmp_rdata_item, &rdata_login_list,
437 list) {
438 list_del(&rdata_item->list);
439 fc_rport_login(rdata_item->rdata);
440 kref_put(&rdata_item->rdata->kref, fc_rport_destroy);
441 kfree(rdata_item);
442 } 428 }
429 mutex_unlock(&lport->disc.disc_mutex);
443} 430}
444 431
445static void qedf_update_link_speed(struct qedf_ctx *qedf, 432static void qedf_update_link_speed(struct qedf_ctx *qedf,
@@ -467,6 +454,9 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
467 case 100000: 454 case 100000:
468 lport->link_speed = FC_PORTSPEED_100GBIT; 455 lport->link_speed = FC_PORTSPEED_100GBIT;
469 break; 456 break;
457 case 20000:
458 lport->link_speed = FC_PORTSPEED_20GBIT;
459 break;
470 default: 460 default:
471 lport->link_speed = FC_PORTSPEED_UNKNOWN; 461 lport->link_speed = FC_PORTSPEED_UNKNOWN;
472 break; 462 break;
@@ -476,16 +466,40 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
476 * Set supported link speed by querying the supported 466 * Set supported link speed by querying the supported
477 * capabilities of the link. 467 * capabilities of the link.
478 */ 468 */
479 if (link->supported_caps & SUPPORTED_10000baseKR_Full) 469 if ((link->supported_caps & QED_LM_10000baseT_Full_BIT) ||
470 (link->supported_caps & QED_LM_10000baseKX4_Full_BIT) ||
471 (link->supported_caps & QED_LM_10000baseR_FEC_BIT) ||
472 (link->supported_caps & QED_LM_10000baseCR_Full_BIT) ||
473 (link->supported_caps & QED_LM_10000baseSR_Full_BIT) ||
474 (link->supported_caps & QED_LM_10000baseLR_Full_BIT) ||
475 (link->supported_caps & QED_LM_10000baseLRM_Full_BIT) ||
476 (link->supported_caps & QED_LM_10000baseKR_Full_BIT)) {
480 lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; 477 lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
481 if (link->supported_caps & SUPPORTED_25000baseKR_Full) 478 }
479 if ((link->supported_caps & QED_LM_25000baseKR_Full_BIT) ||
480 (link->supported_caps & QED_LM_25000baseCR_Full_BIT) ||
481 (link->supported_caps & QED_LM_25000baseSR_Full_BIT)) {
482 lport->link_supported_speeds |= FC_PORTSPEED_25GBIT; 482 lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
483 if (link->supported_caps & SUPPORTED_40000baseLR4_Full) 483 }
484 if ((link->supported_caps & QED_LM_40000baseLR4_Full_BIT) ||
485 (link->supported_caps & QED_LM_40000baseKR4_Full_BIT) ||
486 (link->supported_caps & QED_LM_40000baseCR4_Full_BIT) ||
487 (link->supported_caps & QED_LM_40000baseSR4_Full_BIT)) {
484 lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; 488 lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
485 if (link->supported_caps & SUPPORTED_50000baseKR2_Full) 489 }
490 if ((link->supported_caps & QED_LM_50000baseKR2_Full_BIT) ||
491 (link->supported_caps & QED_LM_50000baseCR2_Full_BIT) ||
492 (link->supported_caps & QED_LM_50000baseSR2_Full_BIT)) {
486 lport->link_supported_speeds |= FC_PORTSPEED_50GBIT; 493 lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
487 if (link->supported_caps & SUPPORTED_100000baseKR4_Full) 494 }
495 if ((link->supported_caps & QED_LM_100000baseKR4_Full_BIT) ||
496 (link->supported_caps & QED_LM_100000baseSR4_Full_BIT) ||
497 (link->supported_caps & QED_LM_100000baseCR4_Full_BIT) ||
498 (link->supported_caps & QED_LM_100000baseLR4_ER4_Full_BIT)) {
488 lport->link_supported_speeds |= FC_PORTSPEED_100GBIT; 499 lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
500 }
501 if (link->supported_caps & QED_LM_20000baseKR2_Full_BIT)
502 lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
489 fc_host_supported_speeds(lport->host) = lport->link_supported_speeds; 503 fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
490} 504}
491 505
@@ -493,6 +507,16 @@ static void qedf_link_update(void *dev, struct qed_link_output *link)
493{ 507{
494 struct qedf_ctx *qedf = (struct qedf_ctx *)dev; 508 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
495 509
510 /*
511 * Prevent race where we're removing the module and we get link update
512 * for qed.
513 */
514 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
515 QEDF_ERR(&qedf->dbg_ctx,
516 "Ignore link update, driver getting unload.\n");
517 return;
518 }
519
496 if (link->link_up) { 520 if (link->link_up) {
497 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { 521 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
498 QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC, 522 QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC,
@@ -2340,12 +2364,14 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
2340 fr_dev(fp) = lport; 2364 fr_dev(fp) = lport;
2341 fr_sof(fp) = hp->fcoe_sof; 2365 fr_sof(fp) = hp->fcoe_sof;
2342 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { 2366 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
2367 QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n");
2343 kfree_skb(skb); 2368 kfree_skb(skb);
2344 return; 2369 return;
2345 } 2370 }
2346 fr_eof(fp) = crc_eof.fcoe_eof; 2371 fr_eof(fp) = crc_eof.fcoe_eof;
2347 fr_crc(fp) = crc_eof.fcoe_crc32; 2372 fr_crc(fp) = crc_eof.fcoe_crc32;
2348 if (pskb_trim(skb, fr_len)) { 2373 if (pskb_trim(skb, fr_len)) {
2374 QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n");
2349 kfree_skb(skb); 2375 kfree_skb(skb);
2350 return; 2376 return;
2351 } 2377 }
@@ -2406,9 +2432,9 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
2406 * empty then this is not addressed to our port so simply drop it. 2432 * empty then this is not addressed to our port so simply drop it.
2407 */ 2433 */
2408 if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) { 2434 if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
2409 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, 2435 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2410 "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n", 2436 "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n",
2411 lport->port_id, ntoh24(fh->fh_d_id)); 2437 lport->port_id, ntoh24(fh->fh_d_id));
2412 kfree_skb(skb); 2438 kfree_skb(skb);
2413 return; 2439 return;
2414 } 2440 }
@@ -2417,6 +2443,8 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
2417 if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) && 2443 if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
2418 (f_ctl & FC_FC_EX_CTX)) { 2444 (f_ctl & FC_FC_EX_CTX)) {
2419 /* Drop incoming ABTS response that has both SEQ/EX CTX set */ 2445 /* Drop incoming ABTS response that has both SEQ/EX CTX set */
2446 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2447 "Dropping ABTS response as both SEQ/EX CTX set.\n");
2420 kfree_skb(skb); 2448 kfree_skb(skb);
2421 return; 2449 return;
2422 } 2450 }
@@ -2560,8 +2588,9 @@ static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
2560 sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL); 2588 sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
2561 2589
2562 if (!sb_virt) { 2590 if (!sb_virt) {
2563 QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed " 2591 QEDF_ERR(&qedf->dbg_ctx,
2564 "for id = %d.\n", sb_id); 2592 "Status block allocation failed for id = %d.\n",
2593 sb_id);
2565 return -ENOMEM; 2594 return -ENOMEM;
2566 } 2595 }
2567 2596
@@ -2569,8 +2598,9 @@ static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
2569 sb_id, QED_SB_TYPE_STORAGE); 2598 sb_id, QED_SB_TYPE_STORAGE);
2570 2599
2571 if (ret) { 2600 if (ret) {
2572 QEDF_ERR(&(qedf->dbg_ctx), "Status block initialization " 2601 QEDF_ERR(&qedf->dbg_ctx,
2573 "failed for id = %d.\n", sb_id); 2602 "Status block initialization failed (0x%x) for id = %d.\n",
2603 ret, sb_id);
2574 return ret; 2604 return ret;
2575 } 2605 }
2576 2606
@@ -2654,13 +2684,18 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
2654 io_req = &qedf->cmd_mgr->cmds[xid]; 2684 io_req = &qedf->cmd_mgr->cmds[xid];
2655 2685
2656 /* Completion not for a valid I/O anymore so just return */ 2686 /* Completion not for a valid I/O anymore so just return */
2657 if (!io_req) 2687 if (!io_req) {
2688 QEDF_ERR(&qedf->dbg_ctx,
2689 "io_req is NULL for xid=0x%x.\n", xid);
2658 return; 2690 return;
2691 }
2659 2692
2660 fcport = io_req->fcport; 2693 fcport = io_req->fcport;
2661 2694
2662 if (fcport == NULL) { 2695 if (fcport == NULL) {
2663 QEDF_ERR(&(qedf->dbg_ctx), "fcport is NULL.\n"); 2696 QEDF_ERR(&qedf->dbg_ctx,
2697 "fcport is NULL for xid=0x%x io_req=%p.\n",
2698 xid, io_req);
2664 return; 2699 return;
2665 } 2700 }
2666 2701
@@ -2669,7 +2704,8 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
2669 * isn't valid and shouldn't be taken. We should just return. 2704 * isn't valid and shouldn't be taken. We should just return.
2670 */ 2705 */
2671 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 2706 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2672 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); 2707 QEDF_ERR(&qedf->dbg_ctx,
2708 "Session not offloaded yet, fcport = %p.\n", fcport);
2673 return; 2709 return;
2674 } 2710 }
2675 2711
@@ -2881,6 +2917,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
2881 */ 2917 */
2882 if (!qedf->p_cpuq) { 2918 if (!qedf->p_cpuq) {
2883 status = 1; 2919 status = 1;
2920 QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
2884 goto mem_alloc_failure; 2921 goto mem_alloc_failure;
2885 } 2922 }
2886 2923
@@ -2896,8 +2933,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
2896 2933
2897 /* Allocate DMA coherent buffers for BDQ */ 2934 /* Allocate DMA coherent buffers for BDQ */
2898 rc = qedf_alloc_bdq(qedf); 2935 rc = qedf_alloc_bdq(qedf);
2899 if (rc) 2936 if (rc) {
2937 QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
2900 goto mem_alloc_failure; 2938 goto mem_alloc_failure;
2939 }
2901 2940
2902 /* Allocate a CQ and an associated PBL for each MSI-X vector */ 2941 /* Allocate a CQ and an associated PBL for each MSI-X vector */
2903 for (i = 0; i < qedf->num_queues; i++) { 2942 for (i = 0; i < qedf->num_queues; i++) {
@@ -3107,6 +3146,7 @@ static struct pci_driver qedf_pci_driver = {
3107 .id_table = qedf_pci_tbl, 3146 .id_table = qedf_pci_tbl,
3108 .probe = qedf_probe, 3147 .probe = qedf_probe,
3109 .remove = qedf_remove, 3148 .remove = qedf_remove,
3149 .shutdown = qedf_shutdown,
3110}; 3150};
3111 3151
3112static int __qedf_probe(struct pci_dev *pdev, int mode) 3152static int __qedf_probe(struct pci_dev *pdev, int mode)
@@ -3209,6 +3249,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
3209 qed_params.is_vf = is_vf; 3249 qed_params.is_vf = is_vf;
3210 qedf->cdev = qed_ops->common->probe(pdev, &qed_params); 3250 qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
3211 if (!qedf->cdev) { 3251 if (!qedf->cdev) {
3252 QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
3212 rc = -ENODEV; 3253 rc = -ENODEV;
3213 goto err1; 3254 goto err1;
3214 } 3255 }
@@ -3277,8 +3318,10 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
3277 3318
3278 /* Setup interrupts */ 3319 /* Setup interrupts */
3279 rc = qedf_setup_int(qedf); 3320 rc = qedf_setup_int(qedf);
3280 if (rc) 3321 if (rc) {
3322 QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n");
3281 goto err3; 3323 goto err3;
3324 }
3282 3325
3283 rc = qed_ops->start(qedf->cdev, &qedf->tasks); 3326 rc = qed_ops->start(qedf->cdev, &qedf->tasks);
3284 if (rc) { 3327 if (rc) {
@@ -3360,7 +3403,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
3360 } 3403 }
3361 3404
3362 memset(&params, 0, sizeof(params)); 3405 memset(&params, 0, sizeof(params));
3363 params.mtu = 9000; 3406 params.mtu = QEDF_LL2_BUF_SIZE;
3364 ether_addr_copy(params.ll2_mac_address, qedf->mac); 3407 ether_addr_copy(params.ll2_mac_address, qedf->mac);
3365 3408
3366 /* Start LL2 processing thread */ 3409 /* Start LL2 processing thread */
@@ -3719,6 +3762,11 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
3719 fcoe->scsi_tsk_full = qedf->task_set_fulls; 3762 fcoe->scsi_tsk_full = qedf->task_set_fulls;
3720} 3763}
3721 3764
3765static void qedf_shutdown(struct pci_dev *pdev)
3766{
3767 __qedf_remove(pdev, QEDF_MODE_NORMAL);
3768}
3769
3722/* Generic TLV data callback */ 3770/* Generic TLV data callback */
3723void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) 3771void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
3724{ 3772{
@@ -3845,7 +3893,7 @@ static void __exit qedf_cleanup(void)
3845} 3893}
3846 3894
3847MODULE_LICENSE("GPL"); 3895MODULE_LICENSE("GPL");
3848MODULE_DESCRIPTION("QLogic QEDF 25/40/50/100Gb FCoE Driver"); 3896MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module");
3849MODULE_AUTHOR("QLogic Corporation"); 3897MODULE_AUTHOR("QLogic Corporation");
3850MODULE_VERSION(QEDF_VERSION); 3898MODULE_VERSION(QEDF_VERSION);
3851module_init(qedf_init); 3899module_init(qedf_init);
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
index e57533de7e96..b0e37afe5bbb 100644
--- a/drivers/scsi/qedf/qedf_version.h
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -4,9 +4,9 @@
4 * Copyright (c) 2016-2018 Cavium Inc. 4 * Copyright (c) 2016-2018 Cavium Inc.
5 */ 5 */
6 6
7#define QEDF_VERSION "8.37.25.20" 7#define QEDF_VERSION "8.42.3.0"
8#define QEDF_DRIVER_MAJOR_VER 8 8#define QEDF_DRIVER_MAJOR_VER 8
9#define QEDF_DRIVER_MINOR_VER 37 9#define QEDF_DRIVER_MINOR_VER 42
10#define QEDF_DRIVER_REV_VER 25 10#define QEDF_DRIVER_REV_VER 3
11#define QEDF_DRIVER_ENG_VER 20 11#define QEDF_DRIVER_ENG_VER 0
12 12
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 6b7b390b2e52..8190c2a27584 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -382,7 +382,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
382 ha->optrom_region_size = size; 382 ha->optrom_region_size = size;
383 383
384 ha->optrom_state = QLA_SREADING; 384 ha->optrom_state = QLA_SREADING;
385 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 385 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
386 if (ha->optrom_buffer == NULL) { 386 if (ha->optrom_buffer == NULL) {
387 ql_log(ql_log_warn, vha, 0x7062, 387 ql_log(ql_log_warn, vha, 0x7062,
388 "Unable to allocate memory for optrom retrieval " 388 "Unable to allocate memory for optrom retrieval "
@@ -404,7 +404,6 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
404 "Reading flash region -- 0x%x/0x%x.\n", 404 "Reading flash region -- 0x%x/0x%x.\n",
405 ha->optrom_region_start, ha->optrom_region_size); 405 ha->optrom_region_start, ha->optrom_region_size);
406 406
407 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
408 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 407 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
409 ha->optrom_region_start, ha->optrom_region_size); 408 ha->optrom_region_start, ha->optrom_region_size);
410 break; 409 break;
@@ -457,7 +456,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
457 ha->optrom_region_size = size; 456 ha->optrom_region_size = size;
458 457
459 ha->optrom_state = QLA_SWRITING; 458 ha->optrom_state = QLA_SWRITING;
460 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 459 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
461 if (ha->optrom_buffer == NULL) { 460 if (ha->optrom_buffer == NULL) {
462 ql_log(ql_log_warn, vha, 0x7066, 461 ql_log(ql_log_warn, vha, 0x7066,
463 "Unable to allocate memory for optrom update " 462 "Unable to allocate memory for optrom update "
@@ -472,7 +471,6 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
472 "Staging flash region write -- 0x%x/0x%x.\n", 471 "Staging flash region write -- 0x%x/0x%x.\n",
473 ha->optrom_region_start, ha->optrom_region_size); 472 ha->optrom_region_start, ha->optrom_region_size);
474 473
475 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
476 break; 474 break;
477 case 3: 475 case 3:
478 if (ha->optrom_state != QLA_SWRITING) { 476 if (ha->optrom_state != QLA_SWRITING) {
@@ -726,7 +724,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
726 break; 724 break;
727 } else { 725 } else {
728 /* Make sure FC side is not in reset */ 726 /* Make sure FC side is not in reset */
729 qla2x00_wait_for_hba_online(vha); 727 WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
728 QLA_SUCCESS);
730 729
731 /* Issue MPI reset */ 730 /* Issue MPI reset */
732 scsi_block_requests(vha->host); 731 scsi_block_requests(vha->host);
@@ -1126,7 +1125,8 @@ qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
1126 char pci_info[30]; 1125 char pci_info[30];
1127 1126
1128 return scnprintf(buf, PAGE_SIZE, "%s\n", 1127 return scnprintf(buf, PAGE_SIZE, "%s\n",
1129 vha->hw->isp_ops->pci_info_str(vha, pci_info)); 1128 vha->hw->isp_ops->pci_info_str(vha, pci_info,
1129 sizeof(pci_info)));
1130} 1130}
1131 1131
1132static ssize_t 1132static ssize_t
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 5441557b424b..28d587a89ba6 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -12,10 +12,8 @@
12#include <linux/bsg-lib.h> 12#include <linux/bsg-lib.h>
13 13
14/* BSG support for ELS/CT pass through */ 14/* BSG support for ELS/CT pass through */
15void 15void qla2x00_bsg_job_done(srb_t *sp, int res)
16qla2x00_bsg_job_done(void *ptr, int res)
17{ 16{
18 srb_t *sp = ptr;
19 struct bsg_job *bsg_job = sp->u.bsg_job; 17 struct bsg_job *bsg_job = sp->u.bsg_job;
20 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 18 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
21 19
@@ -25,10 +23,8 @@ qla2x00_bsg_job_done(void *ptr, int res)
25 sp->free(sp); 23 sp->free(sp);
26} 24}
27 25
28void 26void qla2x00_bsg_sp_free(srb_t *sp)
29qla2x00_bsg_sp_free(void *ptr)
30{ 27{
31 srb_t *sp = ptr;
32 struct qla_hw_data *ha = sp->vha->hw; 28 struct qla_hw_data *ha = sp->vha->hw;
33 struct bsg_job *bsg_job = sp->u.bsg_job; 29 struct bsg_job *bsg_job = sp->u.bsg_job;
34 struct fc_bsg_request *bsg_request = bsg_job->request; 30 struct fc_bsg_request *bsg_request = bsg_job->request;
@@ -341,6 +337,8 @@ qla2x00_process_els(struct bsg_job *bsg_job)
341 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 337 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
342 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 338 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
343 if (!req_sg_cnt) { 339 if (!req_sg_cnt) {
340 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
341 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
344 rval = -ENOMEM; 342 rval = -ENOMEM;
345 goto done_free_fcport; 343 goto done_free_fcport;
346 } 344 }
@@ -348,6 +346,8 @@ qla2x00_process_els(struct bsg_job *bsg_job)
348 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 346 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
349 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 347 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
350 if (!rsp_sg_cnt) { 348 if (!rsp_sg_cnt) {
349 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
350 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
351 rval = -ENOMEM; 351 rval = -ENOMEM;
352 goto done_free_fcport; 352 goto done_free_fcport;
353 } 353 }
@@ -1778,8 +1778,8 @@ qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1778 uint16_t nextlid = 0; 1778 uint16_t nextlid = 0;
1779 uint32_t tot_dsds; 1779 uint32_t tot_dsds;
1780 srb_t *sp = NULL; 1780 srb_t *sp = NULL;
1781 uint32_t req_data_len = 0; 1781 uint32_t req_data_len;
1782 uint32_t rsp_data_len = 0; 1782 uint32_t rsp_data_len;
1783 1783
1784 /* Check the type of the adapter */ 1784 /* Check the type of the adapter */
1785 if (!IS_BIDI_CAPABLE(ha)) { 1785 if (!IS_BIDI_CAPABLE(ha)) {
@@ -1884,6 +1884,9 @@ qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1884 goto done_unmap_sg; 1884 goto done_unmap_sg;
1885 } 1885 }
1886 1886
1887 req_data_len = bsg_job->request_payload.payload_len;
1888 rsp_data_len = bsg_job->reply_payload.payload_len;
1889
1887 if (req_data_len != rsp_data_len) { 1890 if (req_data_len != rsp_data_len) {
1888 rval = EXT_STATUS_BUSY; 1891 rval = EXT_STATUS_BUSY;
1889 ql_log(ql_log_warn, vha, 0x70aa, 1892 ql_log(ql_log_warn, vha, 0x70aa,
@@ -1891,10 +1894,6 @@ qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1891 goto done_unmap_sg; 1894 goto done_unmap_sg;
1892 } 1895 }
1893 1896
1894 req_data_len = bsg_job->request_payload.payload_len;
1895 rsp_data_len = bsg_job->reply_payload.payload_len;
1896
1897
1898 /* Alloc SRB structure */ 1897 /* Alloc SRB structure */
1899 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); 1898 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1900 if (!sp) { 1899 if (!sp) {
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 9e80646722e2..30afc59c1870 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2743,7 +2743,8 @@ ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
2743 2743
2744 2744
2745void 2745void
2746ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, void *buf, uint size) 2746ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf,
2747 uint size)
2747{ 2748{
2748 uint cnt; 2749 uint cnt;
2749 2750
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index bad2b12604f1..873a6aef1c5c 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -34,6 +34,20 @@
34#include <scsi/scsi_transport_fc.h> 34#include <scsi/scsi_transport_fc.h>
35#include <scsi/scsi_bsg_fc.h> 35#include <scsi/scsi_bsg_fc.h>
36 36
37/* Big endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */
38typedef struct {
39 uint8_t domain;
40 uint8_t area;
41 uint8_t al_pa;
42} be_id_t;
43
44/* Little endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */
45typedef struct {
46 uint8_t al_pa;
47 uint8_t area;
48 uint8_t domain;
49} le_id_t;
50
37#include "qla_bsg.h" 51#include "qla_bsg.h"
38#include "qla_dsd.h" 52#include "qla_dsd.h"
39#include "qla_nx.h" 53#include "qla_nx.h"
@@ -117,9 +131,9 @@
117#define RD_REG_BYTE_RELAXED(addr) readb_relaxed(addr) 131#define RD_REG_BYTE_RELAXED(addr) readb_relaxed(addr)
118#define RD_REG_WORD_RELAXED(addr) readw_relaxed(addr) 132#define RD_REG_WORD_RELAXED(addr) readw_relaxed(addr)
119#define RD_REG_DWORD_RELAXED(addr) readl_relaxed(addr) 133#define RD_REG_DWORD_RELAXED(addr) readl_relaxed(addr)
120#define WRT_REG_BYTE(addr, data) writeb(data,addr) 134#define WRT_REG_BYTE(addr, data) writeb(data, addr)
121#define WRT_REG_WORD(addr, data) writew(data,addr) 135#define WRT_REG_WORD(addr, data) writew(data, addr)
122#define WRT_REG_DWORD(addr, data) writel(data,addr) 136#define WRT_REG_DWORD(addr, data) writel(data, addr)
123 137
124/* 138/*
125 * ISP83XX specific remote register addresses 139 * ISP83XX specific remote register addresses
@@ -207,7 +221,7 @@
207 * 133Mhz slot. 221 * 133Mhz slot.
208 */ 222 */
209#define RD_REG_WORD_PIO(addr) (inw((unsigned long)addr)) 223#define RD_REG_WORD_PIO(addr) (inw((unsigned long)addr))
210#define WRT_REG_WORD_PIO(addr, data) (outw(data,(unsigned long)addr)) 224#define WRT_REG_WORD_PIO(addr, data) (outw(data, (unsigned long)addr))
211 225
212/* 226/*
213 * Fibre Channel device definitions. 227 * Fibre Channel device definitions.
@@ -303,7 +317,8 @@ struct srb_cmd {
303 uint32_t request_sense_length; 317 uint32_t request_sense_length;
304 uint32_t fw_sense_length; 318 uint32_t fw_sense_length;
305 uint8_t *request_sense_ptr; 319 uint8_t *request_sense_ptr;
306 void *ctx; 320 struct ct6_dsd *ct6_ctx;
321 struct crc_context *crc_ctx;
307}; 322};
308 323
309/* 324/*
@@ -343,6 +358,51 @@ typedef union {
343} port_id_t; 358} port_id_t;
344#define INVALID_PORT_ID 0xFFFFFF 359#define INVALID_PORT_ID 0xFFFFFF
345 360
361static inline le_id_t be_id_to_le(be_id_t id)
362{
363 le_id_t res;
364
365 res.domain = id.domain;
366 res.area = id.area;
367 res.al_pa = id.al_pa;
368
369 return res;
370}
371
372static inline be_id_t le_id_to_be(le_id_t id)
373{
374 be_id_t res;
375
376 res.domain = id.domain;
377 res.area = id.area;
378 res.al_pa = id.al_pa;
379
380 return res;
381}
382
383static inline port_id_t be_to_port_id(be_id_t id)
384{
385 port_id_t res;
386
387 res.b.domain = id.domain;
388 res.b.area = id.area;
389 res.b.al_pa = id.al_pa;
390 res.b.rsvd_1 = 0;
391
392 return res;
393}
394
395static inline be_id_t port_id_to_be_id(port_id_t port_id)
396{
397 be_id_t res;
398
399 res.domain = port_id.b.domain;
400 res.area = port_id.b.area;
401 res.al_pa = port_id.b.al_pa;
402
403 return res;
404}
405
346struct els_logo_payload { 406struct els_logo_payload {
347 uint8_t opcode; 407 uint8_t opcode;
348 uint8_t rsvd[3]; 408 uint8_t rsvd[3];
@@ -395,7 +455,7 @@ struct srb_iocb {
395 struct els_logo_payload *els_logo_pyld; 455 struct els_logo_payload *els_logo_pyld;
396 dma_addr_t els_logo_pyld_dma; 456 dma_addr_t els_logo_pyld_dma;
397 } els_logo; 457 } els_logo;
398 struct { 458 struct els_plogi {
399#define ELS_DCMD_PLOGI 0x3 459#define ELS_DCMD_PLOGI 0x3
400 uint32_t flags; 460 uint32_t flags;
401 uint32_t els_cmd; 461 uint32_t els_cmd;
@@ -537,6 +597,7 @@ typedef struct srb {
537 wait_queue_head_t nvme_ls_waitq; 597 wait_queue_head_t nvme_ls_waitq;
538 struct fc_port *fcport; 598 struct fc_port *fcport;
539 struct scsi_qla_host *vha; 599 struct scsi_qla_host *vha;
600 unsigned int start_timer:1;
540 uint32_t handle; 601 uint32_t handle;
541 uint16_t flags; 602 uint16_t flags;
542 uint16_t type; 603 uint16_t type;
@@ -554,14 +615,22 @@ typedef struct srb {
554 struct bsg_job *bsg_job; 615 struct bsg_job *bsg_job;
555 struct srb_cmd scmd; 616 struct srb_cmd scmd;
556 } u; 617 } u;
557 void (*done)(void *, int); 618 /*
558 void (*free)(void *); 619 * Report completion status @res and call sp_put(@sp). @res is
620 * an NVMe status code, a SCSI result (e.g. DID_OK << 16) or a
621 * QLA_* status value.
622 */
623 void (*done)(struct srb *sp, int res);
624 /* Stop the timer and free @sp. Only used by the FCP code. */
625 void (*free)(struct srb *sp);
626 /*
627 * Call nvme_private->fd->done() and free @sp. Only used by the NVMe
628 * code.
629 */
559 void (*put_fn)(struct kref *kref); 630 void (*put_fn)(struct kref *kref);
560} srb_t; 631} srb_t;
561 632
562#define GET_CMD_SP(sp) (sp->u.scmd.cmd) 633#define GET_CMD_SP(sp) (sp->u.scmd.cmd)
563#define SET_CMD_SP(sp, cmd) (sp->u.scmd.cmd = cmd)
564#define GET_CMD_CTX_SP(sp) (sp->u.scmd.ctx)
565 634
566#define GET_CMD_SENSE_LEN(sp) \ 635#define GET_CMD_SENSE_LEN(sp) \
567 (sp->u.scmd.request_sense_length) 636 (sp->u.scmd.request_sense_length)
@@ -921,6 +990,11 @@ struct mbx_cmd_32 {
921#define MBS_LINK_DOWN_ERROR 0x400B 990#define MBS_LINK_DOWN_ERROR 0x400B
922#define MBS_DIAG_ECHO_TEST_ERROR 0x400C 991#define MBS_DIAG_ECHO_TEST_ERROR 0x400C
923 992
993static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
994{
995 return MBS_COMMAND_COMPLETE <= mbs && mbs <= MBS_DIAG_ECHO_TEST_ERROR;
996}
997
924/* 998/*
925 * ISP mailbox asynchronous event status codes 999 * ISP mailbox asynchronous event status codes
926 */ 1000 */
@@ -1851,7 +1925,7 @@ struct crc_context {
1851 uint16_t reserved_2; 1925 uint16_t reserved_2;
1852 uint16_t reserved_3; 1926 uint16_t reserved_3;
1853 uint32_t reserved_4; 1927 uint32_t reserved_4;
1854 struct dsd64 data_dsd; 1928 struct dsd64 data_dsd[1];
1855 uint32_t reserved_5[2]; 1929 uint32_t reserved_5[2];
1856 uint32_t reserved_6; 1930 uint32_t reserved_6;
1857 } nobundling; 1931 } nobundling;
@@ -1861,7 +1935,7 @@ struct crc_context {
1861 uint16_t reserved_1; 1935 uint16_t reserved_1;
1862 __le16 dseg_count; /* Data segment count */ 1936 __le16 dseg_count; /* Data segment count */
1863 uint32_t reserved_2; 1937 uint32_t reserved_2;
1864 struct dsd64 data_dsd; 1938 struct dsd64 data_dsd[1];
1865 struct dsd64 dif_dsd; 1939 struct dsd64 dif_dsd;
1866 } bundling; 1940 } bundling;
1867 } u; 1941 } u;
@@ -2289,22 +2363,6 @@ enum login_state { /* FW control Target side */
2289 DSC_LS_LOGO_PEND, 2363 DSC_LS_LOGO_PEND,
2290}; 2364};
2291 2365
2292enum fcport_mgt_event {
2293 FCME_RELOGIN = 1,
2294 FCME_RSCN,
2295 FCME_PLOGI_DONE, /* Initiator side sent LLIOCB */
2296 FCME_PRLI_DONE,
2297 FCME_GNL_DONE,
2298 FCME_GPSC_DONE,
2299 FCME_GPDB_DONE,
2300 FCME_GPNID_DONE,
2301 FCME_GFFID_DONE,
2302 FCME_ADISC_DONE,
2303 FCME_GNNID_DONE,
2304 FCME_GFPNID_DONE,
2305 FCME_ELS_PLOGI_DONE,
2306};
2307
2308enum rscn_addr_format { 2366enum rscn_addr_format {
2309 RSCN_PORT_ADDR, 2367 RSCN_PORT_ADDR,
2310 RSCN_AREA_ADDR, 2368 RSCN_AREA_ADDR,
@@ -2422,7 +2480,6 @@ typedef struct fc_port {
2422#define QLA_FCPORT_FOUND 2 2480#define QLA_FCPORT_FOUND 2
2423 2481
2424struct event_arg { 2482struct event_arg {
2425 enum fcport_mgt_event event;
2426 fc_port_t *fcport; 2483 fc_port_t *fcport;
2427 srb_t *sp; 2484 srb_t *sp;
2428 port_id_t id; 2485 port_id_t id;
@@ -2745,7 +2802,7 @@ struct ct_sns_req {
2745 /* GA_NXT, GPN_ID, GNN_ID, GFT_ID, GFPN_ID */ 2802 /* GA_NXT, GPN_ID, GNN_ID, GFT_ID, GFPN_ID */
2746 struct { 2803 struct {
2747 uint8_t reserved; 2804 uint8_t reserved;
2748 uint8_t port_id[3]; 2805 be_id_t port_id;
2749 } port_id; 2806 } port_id;
2750 2807
2751 struct { 2808 struct {
@@ -2764,13 +2821,13 @@ struct ct_sns_req {
2764 2821
2765 struct { 2822 struct {
2766 uint8_t reserved; 2823 uint8_t reserved;
2767 uint8_t port_id[3]; 2824 be_id_t port_id;
2768 uint8_t fc4_types[32]; 2825 uint8_t fc4_types[32];
2769 } rft_id; 2826 } rft_id;
2770 2827
2771 struct { 2828 struct {
2772 uint8_t reserved; 2829 uint8_t reserved;
2773 uint8_t port_id[3]; 2830 be_id_t port_id;
2774 uint16_t reserved2; 2831 uint16_t reserved2;
2775 uint8_t fc4_feature; 2832 uint8_t fc4_feature;
2776 uint8_t fc4_type; 2833 uint8_t fc4_type;
@@ -2778,7 +2835,7 @@ struct ct_sns_req {
2778 2835
2779 struct { 2836 struct {
2780 uint8_t reserved; 2837 uint8_t reserved;
2781 uint8_t port_id[3]; 2838 be_id_t port_id;
2782 uint8_t node_name[8]; 2839 uint8_t node_name[8];
2783 } rnn_id; 2840 } rnn_id;
2784 2841
@@ -2865,7 +2922,7 @@ struct ct_rsp_hdr {
2865 2922
2866struct ct_sns_gid_pt_data { 2923struct ct_sns_gid_pt_data {
2867 uint8_t control_byte; 2924 uint8_t control_byte;
2868 uint8_t port_id[3]; 2925 be_id_t port_id;
2869}; 2926};
2870 2927
2871/* It's the same for both GPN_FT and GNN_FT */ 2928/* It's the same for both GPN_FT and GNN_FT */
@@ -2895,7 +2952,7 @@ struct ct_sns_rsp {
2895 union { 2952 union {
2896 struct { 2953 struct {
2897 uint8_t port_type; 2954 uint8_t port_type;
2898 uint8_t port_id[3]; 2955 be_id_t port_id;
2899 uint8_t port_name[8]; 2956 uint8_t port_name[8];
2900 uint8_t sym_port_name_len; 2957 uint8_t sym_port_name_len;
2901 uint8_t sym_port_name[255]; 2958 uint8_t sym_port_name[255];
@@ -3111,7 +3168,7 @@ struct isp_operations {
3111 void (*update_fw_options) (struct scsi_qla_host *); 3168 void (*update_fw_options) (struct scsi_qla_host *);
3112 int (*load_risc) (struct scsi_qla_host *, uint32_t *); 3169 int (*load_risc) (struct scsi_qla_host *, uint32_t *);
3113 3170
3114 char * (*pci_info_str) (struct scsi_qla_host *, char *); 3171 char * (*pci_info_str)(struct scsi_qla_host *, char *, size_t);
3115 char * (*fw_version_str)(struct scsi_qla_host *, char *, size_t); 3172 char * (*fw_version_str)(struct scsi_qla_host *, char *, size_t);
3116 3173
3117 irq_handler_t intr_handler; 3174 irq_handler_t intr_handler;
@@ -3850,7 +3907,7 @@ struct qla_hw_data {
3850 3907
3851 /* NVRAM configuration data */ 3908 /* NVRAM configuration data */
3852#define MAX_NVRAM_SIZE 4096 3909#define MAX_NVRAM_SIZE 4096
3853#define VPD_OFFSET MAX_NVRAM_SIZE / 2 3910#define VPD_OFFSET (MAX_NVRAM_SIZE / 2)
3854 uint16_t nvram_size; 3911 uint16_t nvram_size;
3855 uint16_t nvram_base; 3912 uint16_t nvram_base;
3856 void *nvram; 3913 void *nvram;
@@ -4628,6 +4685,7 @@ struct secure_flash_update_block_pk {
4628#define QLA_SUSPENDED 0x106 4685#define QLA_SUSPENDED 0x106
4629#define QLA_BUSY 0x107 4686#define QLA_BUSY 0x107
4630#define QLA_ALREADY_REGISTERED 0x109 4687#define QLA_ALREADY_REGISTERED 0x109
4688#define QLA_OS_TIMER_EXPIRED 0x10a
4631 4689
4632#define NVRAM_DELAY() udelay(10) 4690#define NVRAM_DELAY() udelay(10)
4633 4691
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index a432caebefec..0a6fb359f4d5 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -57,10 +57,9 @@ qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
57{ 57{
58 scsi_qla_host_t *vha = s->private; 58 scsi_qla_host_t *vha = s->private;
59 struct qla_hw_data *ha = vha->hw; 59 struct qla_hw_data *ha = vha->hw;
60 struct gid_list_info *gid_list; 60 struct gid_list_info *gid_list, *gid;
61 dma_addr_t gid_list_dma; 61 dma_addr_t gid_list_dma;
62 fc_port_t fc_port; 62 fc_port_t fc_port;
63 char *id_iter;
64 int rc, i; 63 int rc, i;
65 uint16_t entries, loop_id; 64 uint16_t entries, loop_id;
66 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 65 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
@@ -82,13 +81,11 @@ qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
82 if (rc != QLA_SUCCESS) 81 if (rc != QLA_SUCCESS)
83 goto out_free_id_list; 82 goto out_free_id_list;
84 83
85 id_iter = (char *)gid_list; 84 gid = gid_list;
86 85
87 seq_puts(s, "Port Name Port ID Loop ID\n"); 86 seq_puts(s, "Port Name Port ID Loop ID\n");
88 87
89 for (i = 0; i < entries; i++) { 88 for (i = 0; i < entries; i++) {
90 struct gid_list_info *gid =
91 (struct gid_list_info *)id_iter;
92 loop_id = le16_to_cpu(gid->loop_id); 89 loop_id = le16_to_cpu(gid->loop_id);
93 memset(&fc_port, 0, sizeof(fc_port_t)); 90 memset(&fc_port, 0, sizeof(fc_port_t));
94 91
@@ -99,7 +96,7 @@ qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
99 fc_port.port_name, fc_port.d_id.b.domain, 96 fc_port.port_name, fc_port.d_id.b.domain,
100 fc_port.d_id.b.area, fc_port.d_id.b.al_pa, 97 fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
101 fc_port.loop_id); 98 fc_port.loop_id);
102 id_iter += ha->gid_list_info_size; 99 gid = (void *)gid + ha->gid_list_info_size;
103 } 100 }
104out_free_id_list: 101out_free_id_list:
105 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 102 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
diff --git a/drivers/scsi/qla2xxx/qla_dsd.h b/drivers/scsi/qla2xxx/qla_dsd.h
index 7479924ba422..20788054b91b 100644
--- a/drivers/scsi/qla2xxx/qla_dsd.h
+++ b/drivers/scsi/qla2xxx/qla_dsd.h
@@ -1,6 +1,8 @@
1#ifndef _QLA_DSD_H_ 1#ifndef _QLA_DSD_H_
2#define _QLA_DSD_H_ 2#define _QLA_DSD_H_
3 3
4#include <asm/unaligned.h>
5
4/* 32-bit data segment descriptor (8 bytes) */ 6/* 32-bit data segment descriptor (8 bytes) */
5struct dsd32 { 7struct dsd32 {
6 __le32 address; 8 __le32 address;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index df079a8c2b33..732bb871c433 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -761,13 +761,13 @@ struct els_entry_24xx {
761#define ECF_CLR_PASSTHRU_PEND BIT_12 761#define ECF_CLR_PASSTHRU_PEND BIT_12
762#define ECF_INCL_FRAME_HDR BIT_11 762#define ECF_INCL_FRAME_HDR BIT_11
763 763
764 uint32_t rx_byte_count; 764 __le32 rx_byte_count;
765 uint32_t tx_byte_count; 765 __le32 tx_byte_count;
766 766
767 __le64 tx_address __packed; /* Data segment 0 address. */ 767 __le64 tx_address __packed; /* Data segment 0 address. */
768 uint32_t tx_len; /* Data segment 0 length. */ 768 __le32 tx_len; /* Data segment 0 length. */
769 __le64 rx_address __packed; /* Data segment 1 address. */ 769 __le64 rx_address __packed; /* Data segment 1 address. */
770 uint32_t rx_len; /* Data segment 1 length. */ 770 __le32 rx_len; /* Data segment 1 length. */
771}; 771};
772 772
773struct els_sts_entry_24xx { 773struct els_sts_entry_24xx {
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index f9669fdf7798..d11416dcee4e 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -45,6 +45,8 @@ extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
45 45
46extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t); 46extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
47extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool); 47extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool);
48extern void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha,
49 struct els_plogi *els_plogi);
48 50
49extern void qla2x00_update_fcports(scsi_qla_host_t *); 51extern void qla2x00_update_fcports(scsi_qla_host_t *);
50 52
@@ -96,7 +98,11 @@ extern int qla2x00_init_rings(scsi_qla_host_t *);
96extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *, 98extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
97 int, int, bool); 99 int, int, bool);
98extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *); 100extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
99void qla2x00_fcport_event_handler(scsi_qla_host_t *, struct event_arg *); 101void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea);
102void qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha,
103 struct event_arg *ea);
104void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
105 struct event_arg *ea);
100int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8); 106int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8);
101int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *); 107int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *);
102int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, 108int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
@@ -213,9 +219,9 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
213 219
214extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); 220extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
215extern void qla2x00_disable_board_on_pci_error(struct work_struct *); 221extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
216extern void qla2x00_sp_compl(void *, int); 222extern void qla2x00_sp_compl(srb_t *sp, int);
217extern void qla2xxx_qpair_sp_free_dma(void *); 223extern void qla2xxx_qpair_sp_free_dma(srb_t *sp);
218extern void qla2xxx_qpair_sp_compl(void *, int); 224extern void qla2xxx_qpair_sp_compl(srb_t *sp, int);
219extern void qla24xx_sched_upd_fcport(fc_port_t *); 225extern void qla24xx_sched_upd_fcport(fc_port_t *);
220void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, 226void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
221 uint16_t *); 227 uint16_t *);
@@ -244,7 +250,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
244extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); 250extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
245extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *); 251extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *);
246 252
247extern void qla2x00_sp_free_dma(void *); 253extern void qla2x00_sp_free_dma(srb_t *sp);
248extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); 254extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
249 255
250extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); 256extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
@@ -272,6 +278,7 @@ extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
272extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t); 278extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
273extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, 279extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *,
274 uint16_t, struct req_que *); 280 uint16_t, struct req_que *);
281extern uint32_t qla2xxx_get_next_handle(struct req_que *req);
275extern int qla2x00_start_scsi(srb_t *sp); 282extern int qla2x00_start_scsi(srb_t *sp);
276extern int qla24xx_start_scsi(srb_t *sp); 283extern int qla24xx_start_scsi(srb_t *sp);
277int qla2x00_marker(struct scsi_qla_host *, struct qla_qpair *, 284int qla2x00_marker(struct scsi_qla_host *, struct qla_qpair *,
@@ -554,7 +561,7 @@ fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
554 * Global Function Prototypes in qla_sup.c source file. 561 * Global Function Prototypes in qla_sup.c source file.
555 */ 562 */
556extern void qla2x00_release_nvram_protection(scsi_qla_host_t *); 563extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
557extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *, 564extern int qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
558 uint32_t, uint32_t); 565 uint32_t, uint32_t);
559extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, void *, uint32_t, 566extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, void *, uint32_t,
560 uint32_t); 567 uint32_t);
@@ -630,7 +637,7 @@ extern ulong qla27xx_fwdt_template_size(void *);
630 637
631extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int); 638extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
632extern void ql_dump_regs(uint, scsi_qla_host_t *, uint); 639extern void ql_dump_regs(uint, scsi_qla_host_t *, uint);
633extern void ql_dump_buffer(uint, scsi_qla_host_t *, uint, void *, uint); 640extern void ql_dump_buffer(uint, scsi_qla_host_t *, uint, const void *, uint);
634/* 641/*
635 * Global Function Prototypes in qla_gs.c source file. 642 * Global Function Prototypes in qla_gs.c source file.
636 */ 643 */
@@ -732,7 +739,7 @@ extern int qlafx00_initialize_adapter(struct scsi_qla_host *);
732extern int qlafx00_soft_reset(scsi_qla_host_t *); 739extern int qlafx00_soft_reset(scsi_qla_host_t *);
733extern int qlafx00_chip_diag(scsi_qla_host_t *); 740extern int qlafx00_chip_diag(scsi_qla_host_t *);
734extern void qlafx00_config_rings(struct scsi_qla_host *); 741extern void qlafx00_config_rings(struct scsi_qla_host *);
735extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *); 742extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *, size_t);
736extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *, size_t); 743extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *, size_t);
737extern irqreturn_t qlafx00_intr_handler(int, void *); 744extern irqreturn_t qlafx00_intr_handler(int, void *);
738extern void qlafx00_enable_intrs(struct qla_hw_data *); 745extern void qlafx00_enable_intrs(struct qla_hw_data *);
@@ -790,10 +797,10 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *);
790 797
791/* IOCB related functions */ 798/* IOCB related functions */
792extern int qla82xx_start_scsi(srb_t *); 799extern int qla82xx_start_scsi(srb_t *);
793extern void qla2x00_sp_free(void *); 800extern void qla2x00_sp_free(srb_t *sp);
794extern void qla2x00_sp_timeout(struct timer_list *); 801extern void qla2x00_sp_timeout(struct timer_list *);
795extern void qla2x00_bsg_job_done(void *, int); 802extern void qla2x00_bsg_job_done(srb_t *sp, int);
796extern void qla2x00_bsg_sp_free(void *); 803extern void qla2x00_bsg_sp_free(srb_t *sp);
797extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *); 804extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
798 805
799/* Interrupt related */ 806/* Interrupt related */
@@ -822,8 +829,8 @@ extern int qla82xx_device_state_handler(scsi_qla_host_t *);
822extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *); 829extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *);
823extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *); 830extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *);
824 831
825extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, 832extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, size_t,
826 size_t, char *); 833 const char *);
827extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *); 834extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
828extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); 835extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
829extern void qla82xx_start_iocbs(scsi_qla_host_t *); 836extern void qla82xx_start_iocbs(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 9f58e591666d..dc0e36676313 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -226,9 +226,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
226 ct_rsp = &ha->ct_sns->p.rsp; 226 ct_rsp = &ha->ct_sns->p.rsp;
227 227
228 /* Prepare CT arguments -- port_id */ 228 /* Prepare CT arguments -- port_id */
229 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain; 229 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
230 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
231 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
232 230
233 /* Execute MS IOCB */ 231 /* Execute MS IOCB */
234 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 232 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -242,9 +240,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
242 rval = QLA_FUNCTION_FAILED; 240 rval = QLA_FUNCTION_FAILED;
243 } else { 241 } else {
244 /* Populate fc_port_t entry. */ 242 /* Populate fc_port_t entry. */
245 fcport->d_id.b.domain = ct_rsp->rsp.ga_nxt.port_id[0]; 243 fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id);
246 fcport->d_id.b.area = ct_rsp->rsp.ga_nxt.port_id[1];
247 fcport->d_id.b.al_pa = ct_rsp->rsp.ga_nxt.port_id[2];
248 244
249 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name, 245 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
250 WWN_SIZE); 246 WWN_SIZE);
@@ -337,9 +333,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
337 /* Set port IDs in switch info list. */ 333 /* Set port IDs in switch info list. */
338 for (i = 0; i < ha->max_fibre_devices; i++) { 334 for (i = 0; i < ha->max_fibre_devices; i++) {
339 gid_data = &ct_rsp->rsp.gid_pt.entries[i]; 335 gid_data = &ct_rsp->rsp.gid_pt.entries[i];
340 list[i].d_id.b.domain = gid_data->port_id[0]; 336 list[i].d_id = be_to_port_id(gid_data->port_id);
341 list[i].d_id.b.area = gid_data->port_id[1];
342 list[i].d_id.b.al_pa = gid_data->port_id[2];
343 memset(list[i].fabric_port_name, 0, WWN_SIZE); 337 memset(list[i].fabric_port_name, 0, WWN_SIZE);
344 list[i].fp_speed = PORT_SPEED_UNKNOWN; 338 list[i].fp_speed = PORT_SPEED_UNKNOWN;
345 339
@@ -403,9 +397,7 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
403 ct_rsp = &ha->ct_sns->p.rsp; 397 ct_rsp = &ha->ct_sns->p.rsp;
404 398
405 /* Prepare CT arguments -- port_id */ 399 /* Prepare CT arguments -- port_id */
406 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain; 400 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
407 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
408 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
409 401
410 /* Execute MS IOCB */ 402 /* Execute MS IOCB */
411 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 403 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -472,9 +464,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
472 ct_rsp = &ha->ct_sns->p.rsp; 464 ct_rsp = &ha->ct_sns->p.rsp;
473 465
474 /* Prepare CT arguments -- port_id */ 466 /* Prepare CT arguments -- port_id */
475 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain; 467 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
476 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
477 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
478 468
479 /* Execute MS IOCB */ 469 /* Execute MS IOCB */
480 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 470 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -509,9 +499,8 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
509 return (rval); 499 return (rval);
510} 500}
511 501
512static void qla2x00_async_sns_sp_done(void *s, int rc) 502static void qla2x00_async_sns_sp_done(srb_t *sp, int rc)
513{ 503{
514 struct srb *sp = s;
515 struct scsi_qla_host *vha = sp->vha; 504 struct scsi_qla_host *vha = sp->vha;
516 struct ct_sns_pkt *ct_sns; 505 struct ct_sns_pkt *ct_sns;
517 struct qla_work_evt *e; 506 struct qla_work_evt *e;
@@ -639,9 +628,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
639 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE); 628 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
640 629
641 /* Prepare CT arguments -- port_id, FC-4 types */ 630 /* Prepare CT arguments -- port_id, FC-4 types */
642 ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain; 631 ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
643 ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
644 ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
645 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */ 632 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
646 633
647 if (vha->flags.nvme_enabled) 634 if (vha->flags.nvme_enabled)
@@ -737,9 +724,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
737 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE); 724 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
738 725
739 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ 726 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
740 ct_req->req.rff_id.port_id[0] = d_id->b.domain; 727 ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
741 ct_req->req.rff_id.port_id[1] = d_id->b.area;
742 ct_req->req.rff_id.port_id[2] = d_id->b.al_pa;
743 ct_req->req.rff_id.fc4_feature = fc4feature; 728 ct_req->req.rff_id.fc4_feature = fc4feature;
744 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */ 729 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
745 730
@@ -830,9 +815,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
830 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE); 815 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
831 816
832 /* Prepare CT arguments -- port_id, node_name */ 817 /* Prepare CT arguments -- port_id, node_name */
833 ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain; 818 ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id);
834 ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
835 ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
836 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE); 819 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
837 820
838 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE; 821 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
@@ -1479,7 +1462,7 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1479 return ct_pkt; 1462 return ct_pkt;
1480} 1463}
1481 1464
1482static inline ms_iocb_entry_t * 1465static void
1483qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size) 1466qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1484{ 1467{
1485 struct qla_hw_data *ha = vha->hw; 1468 struct qla_hw_data *ha = vha->hw;
@@ -1493,8 +1476,6 @@ qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1493 ms_pkt->req_bytecount = cpu_to_le32(req_size); 1476 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1494 ms_pkt->req_dsd.length = ms_pkt->req_bytecount; 1477 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1495 } 1478 }
1496
1497 return ms_pkt;
1498} 1479}
1499 1480
1500/** 1481/**
@@ -1557,7 +1538,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1557 /* Attributes */ 1538 /* Attributes */
1558 ct_req->req.rhba.attrs.count = 1539 ct_req->req.rhba.attrs.count =
1559 cpu_to_be32(FDMI_HBA_ATTR_COUNT); 1540 cpu_to_be32(FDMI_HBA_ATTR_COUNT);
1560 entries = ct_req->req.rhba.hba_identifier; 1541 entries = &ct_req->req;
1561 1542
1562 /* Nodename. */ 1543 /* Nodename. */
1563 eiter = entries + size; 1544 eiter = entries + size;
@@ -1766,7 +1747,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1766 1747
1767 /* Attributes */ 1748 /* Attributes */
1768 ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT); 1749 ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
1769 entries = ct_req->req.rpa.port_name; 1750 entries = &ct_req->req;
1770 1751
1771 /* FC4 types. */ 1752 /* FC4 types. */
1772 eiter = entries + size; 1753 eiter = entries + size;
@@ -1979,7 +1960,7 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
1979 1960
1980 /* Attributes */ 1961 /* Attributes */
1981 ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT); 1962 ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
1982 entries = ct_req->req.rhba2.hba_identifier; 1963 entries = &ct_req->req;
1983 1964
1984 /* Nodename. */ 1965 /* Nodename. */
1985 eiter = entries + size; 1966 eiter = entries + size;
@@ -2338,7 +2319,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
2338 2319
2339 /* Attributes */ 2320 /* Attributes */
2340 ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT); 2321 ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
2341 entries = ct_req->req.rpa2.port_name; 2322 entries = &ct_req->req;
2342 2323
2343 /* FC4 types. */ 2324 /* FC4 types. */
2344 eiter = entries + size; 2325 eiter = entries + size;
@@ -2730,9 +2711,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
2730 ct_rsp = &ha->ct_sns->p.rsp; 2711 ct_rsp = &ha->ct_sns->p.rsp;
2731 2712
2732 /* Prepare CT arguments -- port_id */ 2713 /* Prepare CT arguments -- port_id */
2733 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain; 2714 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2734 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
2735 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
2736 2715
2737 /* Execute MS IOCB */ 2716 /* Execute MS IOCB */
2738 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2717 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -2936,9 +2915,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
2936 ct_rsp = &ha->ct_sns->p.rsp; 2915 ct_rsp = &ha->ct_sns->p.rsp;
2937 2916
2938 /* Prepare CT arguments -- port_id */ 2917 /* Prepare CT arguments -- port_id */
2939 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain; 2918 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2940 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
2941 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
2942 2919
2943 /* Execute MS IOCB */ 2920 /* Execute MS IOCB */
2944 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, 2921 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -3011,9 +2988,8 @@ void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
3011 qla_post_iidma_work(vha, fcport); 2988 qla_post_iidma_work(vha, fcport);
3012} 2989}
3013 2990
3014static void qla24xx_async_gpsc_sp_done(void *s, int res) 2991static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
3015{ 2992{
3016 struct srb *sp = s;
3017 struct scsi_qla_host *vha = sp->vha; 2993 struct scsi_qla_host *vha = sp->vha;
3018 struct qla_hw_data *ha = vha->hw; 2994 struct qla_hw_data *ha = vha->hw;
3019 fc_port_t *fcport = sp->fcport; 2995 fc_port_t *fcport = sp->fcport;
@@ -3055,11 +3031,10 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
3055 be16_to_cpu(ct_rsp->rsp.gpsc.speed)); 3031 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
3056 } 3032 }
3057 memset(&ea, 0, sizeof(ea)); 3033 memset(&ea, 0, sizeof(ea));
3058 ea.event = FCME_GPSC_DONE;
3059 ea.rc = res; 3034 ea.rc = res;
3060 ea.fcport = fcport; 3035 ea.fcport = fcport;
3061 ea.sp = sp; 3036 ea.sp = sp;
3062 qla2x00_fcport_event_handler(vha, &ea); 3037 qla24xx_handle_gpsc_event(vha, &ea);
3063 3038
3064done: 3039done:
3065 sp->free(sp); 3040 sp->free(sp);
@@ -3144,17 +3119,7 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
3144 3119
3145 switch (sp->type) { 3120 switch (sp->type) {
3146 case SRB_ELS_DCMD: 3121 case SRB_ELS_DCMD:
3147 if (c->u.els_plogi.els_plogi_pyld) 3122 qla2x00_els_dcmd2_free(vha, &c->u.els_plogi);
3148 dma_free_coherent(&vha->hw->pdev->dev,
3149 c->u.els_plogi.tx_size,
3150 c->u.els_plogi.els_plogi_pyld,
3151 c->u.els_plogi.els_plogi_pyld_dma);
3152
3153 if (c->u.els_plogi.els_resp_pyld)
3154 dma_free_coherent(&vha->hw->pdev->dev,
3155 c->u.els_plogi.rx_size,
3156 c->u.els_plogi.els_resp_pyld,
3157 c->u.els_plogi.els_resp_pyld_dma);
3158 break; 3123 break;
3159 case SRB_CT_PTHRU_CMD: 3124 case SRB_CT_PTHRU_CMD:
3160 default: 3125 default:
@@ -3280,9 +3245,8 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3280 } 3245 }
3281} 3246}
3282 3247
3283static void qla2x00_async_gpnid_sp_done(void *s, int res) 3248static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
3284{ 3249{
3285 struct srb *sp = s;
3286 struct scsi_qla_host *vha = sp->vha; 3250 struct scsi_qla_host *vha = sp->vha;
3287 struct ct_sns_req *ct_req = 3251 struct ct_sns_req *ct_req =
3288 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; 3252 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
@@ -3295,22 +3259,19 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
3295 if (res) 3259 if (res)
3296 ql_dbg(ql_dbg_disc, vha, 0x2066, 3260 ql_dbg(ql_dbg_disc, vha, 0x2066,
3297 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n", 3261 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
3298 sp->name, res, sp->gen1, ct_req->req.port_id.port_id, 3262 sp->name, res, sp->gen1, &ct_req->req.port_id.port_id,
3299 ct_rsp->rsp.gpn_id.port_name); 3263 ct_rsp->rsp.gpn_id.port_name);
3300 else 3264 else
3301 ql_dbg(ql_dbg_disc, vha, 0x2066, 3265 ql_dbg(ql_dbg_disc, vha, 0x2066,
3302 "Async done-%s good rscn gen %d ID %3phC. %8phC\n", 3266 "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
3303 sp->name, sp->gen1, ct_req->req.port_id.port_id, 3267 sp->name, sp->gen1, &ct_req->req.port_id.port_id,
3304 ct_rsp->rsp.gpn_id.port_name); 3268 ct_rsp->rsp.gpn_id.port_name);
3305 3269
3306 memset(&ea, 0, sizeof(ea)); 3270 memset(&ea, 0, sizeof(ea));
3307 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); 3271 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
3308 ea.sp = sp; 3272 ea.sp = sp;
3309 ea.id.b.domain = ct_req->req.port_id.port_id[0]; 3273 ea.id = be_to_port_id(ct_req->req.port_id.port_id);
3310 ea.id.b.area = ct_req->req.port_id.port_id[1];
3311 ea.id.b.al_pa = ct_req->req.port_id.port_id[2];
3312 ea.rc = res; 3274 ea.rc = res;
3313 ea.event = FCME_GPNID_DONE;
3314 3275
3315 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 3276 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3316 list_del(&sp->elem); 3277 list_del(&sp->elem);
@@ -3329,25 +3290,22 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
3329 return; 3290 return;
3330 } 3291 }
3331 3292
3332 qla2x00_fcport_event_handler(vha, &ea); 3293 qla24xx_handle_gpnid_event(vha, &ea);
3333 3294
3334 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); 3295 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3335 if (!e) { 3296 if (!e) {
3336 /* please ignore kernel warning. otherwise, we have mem leak. */ 3297 /* please ignore kernel warning. otherwise, we have mem leak. */
3337 if (sp->u.iocb_cmd.u.ctarg.req) { 3298 dma_free_coherent(&vha->hw->pdev->dev,
3338 dma_free_coherent(&vha->hw->pdev->dev, 3299 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3339 sp->u.iocb_cmd.u.ctarg.req_allocated_size, 3300 sp->u.iocb_cmd.u.ctarg.req,
3340 sp->u.iocb_cmd.u.ctarg.req, 3301 sp->u.iocb_cmd.u.ctarg.req_dma);
3341 sp->u.iocb_cmd.u.ctarg.req_dma); 3302 sp->u.iocb_cmd.u.ctarg.req = NULL;
3342 sp->u.iocb_cmd.u.ctarg.req = NULL; 3303
3343 } 3304 dma_free_coherent(&vha->hw->pdev->dev,
3344 if (sp->u.iocb_cmd.u.ctarg.rsp) { 3305 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3345 dma_free_coherent(&vha->hw->pdev->dev, 3306 sp->u.iocb_cmd.u.ctarg.rsp,
3346 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, 3307 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3347 sp->u.iocb_cmd.u.ctarg.rsp, 3308 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3348 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3349 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3350 }
3351 3309
3352 sp->free(sp); 3310 sp->free(sp);
3353 return; 3311 return;
@@ -3419,9 +3377,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3419 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE); 3377 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
3420 3378
3421 /* GPN_ID req */ 3379 /* GPN_ID req */
3422 ct_req->req.port_id.port_id[0] = id->b.domain; 3380 ct_req->req.port_id.port_id = port_id_to_be_id(*id);
3423 ct_req->req.port_id.port_id[1] = id->b.area;
3424 ct_req->req.port_id.port_id[2] = id->b.al_pa;
3425 3381
3426 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE; 3382 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
3427 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE; 3383 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
@@ -3432,7 +3388,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3432 3388
3433 ql_dbg(ql_dbg_disc, vha, 0x2067, 3389 ql_dbg(ql_dbg_disc, vha, 0x2067,
3434 "Async-%s hdl=%x ID %3phC.\n", sp->name, 3390 "Async-%s hdl=%x ID %3phC.\n", sp->name,
3435 sp->handle, ct_req->req.port_id.port_id); 3391 sp->handle, &ct_req->req.port_id.port_id);
3436 3392
3437 rval = qla2x00_start_sp(sp); 3393 rval = qla2x00_start_sp(sp);
3438 if (rval != QLA_SUCCESS) 3394 if (rval != QLA_SUCCESS)
@@ -3467,54 +3423,52 @@ done:
3467 3423
3468void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea) 3424void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3469{ 3425{
3470 fc_port_t *fcport = ea->fcport; 3426 fc_port_t *fcport = ea->fcport;
3471 3427
3472 qla24xx_post_gnl_work(vha, fcport); 3428 qla24xx_post_gnl_work(vha, fcport);
3473} 3429}
3474 3430
3475void qla24xx_async_gffid_sp_done(void *s, int res) 3431void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
3476{ 3432{
3477 struct srb *sp = s; 3433 struct scsi_qla_host *vha = sp->vha;
3478 struct scsi_qla_host *vha = sp->vha; 3434 fc_port_t *fcport = sp->fcport;
3479 fc_port_t *fcport = sp->fcport; 3435 struct ct_sns_rsp *ct_rsp;
3480 struct ct_sns_rsp *ct_rsp; 3436 struct event_arg ea;
3481 struct event_arg ea;
3482
3483 ql_dbg(ql_dbg_disc, vha, 0x2133,
3484 "Async done-%s res %x ID %x. %8phC\n",
3485 sp->name, res, fcport->d_id.b24, fcport->port_name);
3486
3487 fcport->flags &= ~FCF_ASYNC_SENT;
3488 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3489 /*
3490 * FC-GS-7, 5.2.3.12 FC-4 Features - format
3491 * The format of the FC-4 Features object, as defined by the FC-4,
3492 * Shall be an array of 4-bit values, one for each type code value
3493 */
3494 if (!res) {
3495 if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
3496 /* w1 b00:03 */
3497 fcport->fc4_type =
3498 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
3499 fcport->fc4_type &= 0xf;
3500 }
3501 3437
3502 if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) { 3438 ql_dbg(ql_dbg_disc, vha, 0x2133,
3503 /* w5 [00:03]/28h */ 3439 "Async done-%s res %x ID %x. %8phC\n",
3504 fcport->fc4f_nvme = 3440 sp->name, res, fcport->d_id.b24, fcport->port_name);
3505 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; 3441
3506 fcport->fc4f_nvme &= 0xf; 3442 fcport->flags &= ~FCF_ASYNC_SENT;
3443 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3444 /*
3445 * FC-GS-7, 5.2.3.12 FC-4 Features - format
3446 * The format of the FC-4 Features object, as defined by the FC-4,
3447 * Shall be an array of 4-bit values, one for each type code value
3448 */
3449 if (!res) {
3450 if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
3451 /* w1 b00:03 */
3452 fcport->fc4_type =
3453 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
3454 fcport->fc4_type &= 0xf;
3507 } 3455 }
3508 }
3509 3456
3510 memset(&ea, 0, sizeof(ea)); 3457 if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
3511 ea.sp = sp; 3458 /* w5 [00:03]/28h */
3512 ea.fcport = sp->fcport; 3459 fcport->fc4f_nvme =
3513 ea.rc = res; 3460 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
3514 ea.event = FCME_GFFID_DONE; 3461 fcport->fc4f_nvme &= 0xf;
3462 }
3463 }
3464
3465 memset(&ea, 0, sizeof(ea));
3466 ea.sp = sp;
3467 ea.fcport = sp->fcport;
3468 ea.rc = res;
3515 3469
3516 qla2x00_fcport_event_handler(vha, &ea); 3470 qla24xx_handle_gffid_event(vha, &ea);
3517 sp->free(sp); 3471 sp->free(sp);
3518} 3472}
3519 3473
3520/* Get FC4 Feature with Nport ID. */ 3474/* Get FC4 Feature with Nport ID. */
@@ -3674,7 +3628,6 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3674 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3628 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3675 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE)) 3629 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3676 continue; 3630 continue;
3677 fcport->scan_needed = 0;
3678 fcport->scan_state = QLA_FCPORT_FOUND; 3631 fcport->scan_state = QLA_FCPORT_FOUND;
3679 found = true; 3632 found = true;
3680 /* 3633 /*
@@ -3683,10 +3636,12 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3683 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 3636 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3684 qla2x00_clear_loop_id(fcport); 3637 qla2x00_clear_loop_id(fcport);
3685 fcport->flags |= FCF_FABRIC_DEVICE; 3638 fcport->flags |= FCF_FABRIC_DEVICE;
3686 } else if (fcport->d_id.b24 != rp->id.b24) { 3639 } else if (fcport->d_id.b24 != rp->id.b24 ||
3640 fcport->scan_needed) {
3687 qlt_schedule_sess_for_deletion(fcport); 3641 qlt_schedule_sess_for_deletion(fcport);
3688 } 3642 }
3689 fcport->d_id.b24 = rp->id.b24; 3643 fcport->d_id.b24 = rp->id.b24;
3644 fcport->scan_needed = 0;
3690 break; 3645 break;
3691 } 3646 }
3692 3647
@@ -3898,9 +3853,8 @@ static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
3898 } 3853 }
3899} 3854}
3900 3855
3901static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) 3856static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
3902{ 3857{
3903 struct srb *sp = s;
3904 struct scsi_qla_host *vha = sp->vha; 3858 struct scsi_qla_host *vha = sp->vha;
3905 struct ct_sns_req *ct_req = 3859 struct ct_sns_req *ct_req =
3906 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; 3860 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
@@ -4053,9 +4007,6 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
4053 4007
4054 rval = qla2x00_start_sp(sp); 4008 rval = qla2x00_start_sp(sp);
4055 if (rval != QLA_SUCCESS) { 4009 if (rval != QLA_SUCCESS) {
4056 spin_lock_irqsave(&vha->work_lock, flags);
4057 vha->scan.scan_flags &= ~SF_SCANNING;
4058 spin_unlock_irqrestore(&vha->work_lock, flags);
4059 goto done_free_sp; 4010 goto done_free_sp;
4060 } 4011 }
4061 4012
@@ -4079,6 +4030,17 @@ done_free_sp:
4079 4030
4080 sp->free(sp); 4031 sp->free(sp);
4081 4032
4033 spin_lock_irqsave(&vha->work_lock, flags);
4034 vha->scan.scan_flags &= ~SF_SCANNING;
4035 if (vha->scan.scan_flags == 0) {
4036 ql_dbg(ql_dbg_disc, vha, 0xffff,
4037 "%s: schedule\n", __func__);
4038 vha->scan.scan_flags |= SF_QUEUED;
4039 schedule_delayed_work(&vha->scan.scan_work, 5);
4040 }
4041 spin_unlock_irqrestore(&vha->work_lock, flags);
4042
4043
4082 return rval; 4044 return rval;
4083} /* GNNFT */ 4045} /* GNNFT */
4084 4046
@@ -4152,7 +4114,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4152 rspsz, 4114 rspsz,
4153 &sp->u.iocb_cmd.u.ctarg.rsp_dma, 4115 &sp->u.iocb_cmd.u.ctarg.rsp_dma,
4154 GFP_KERNEL); 4116 GFP_KERNEL);
4155 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 4117 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz;
4156 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 4118 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4157 ql_log(ql_log_warn, vha, 0xffff, 4119 ql_log(ql_log_warn, vha, 0xffff,
4158 "Failed to allocate ct_sns request.\n"); 4120 "Failed to allocate ct_sns request.\n");
@@ -4208,9 +4170,6 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4208 4170
4209 rval = qla2x00_start_sp(sp); 4171 rval = qla2x00_start_sp(sp);
4210 if (rval != QLA_SUCCESS) { 4172 if (rval != QLA_SUCCESS) {
4211 spin_lock_irqsave(&vha->work_lock, flags);
4212 vha->scan.scan_flags &= ~SF_SCANNING;
4213 spin_unlock_irqrestore(&vha->work_lock, flags);
4214 goto done_free_sp; 4173 goto done_free_sp;
4215 } 4174 }
4216 4175
@@ -4234,6 +4193,17 @@ done_free_sp:
4234 4193
4235 sp->free(sp); 4194 sp->free(sp);
4236 4195
4196 spin_lock_irqsave(&vha->work_lock, flags);
4197 vha->scan.scan_flags &= ~SF_SCANNING;
4198 if (vha->scan.scan_flags == 0) {
4199 ql_dbg(ql_dbg_disc, vha, 0xffff,
4200 "%s: schedule\n", __func__);
4201 vha->scan.scan_flags |= SF_QUEUED;
4202 schedule_delayed_work(&vha->scan.scan_work, 5);
4203 }
4204 spin_unlock_irqrestore(&vha->work_lock, flags);
4205
4206
4237 return rval; 4207 return rval;
4238} 4208}
4239 4209
@@ -4261,9 +4231,8 @@ void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4261 qla24xx_post_gnl_work(vha, ea->fcport); 4231 qla24xx_post_gnl_work(vha, ea->fcport);
4262} 4232}
4263 4233
4264static void qla2x00_async_gnnid_sp_done(void *s, int res) 4234static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res)
4265{ 4235{
4266 struct srb *sp = s;
4267 struct scsi_qla_host *vha = sp->vha; 4236 struct scsi_qla_host *vha = sp->vha;
4268 fc_port_t *fcport = sp->fcport; 4237 fc_port_t *fcport = sp->fcport;
4269 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name; 4238 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
@@ -4279,13 +4248,12 @@ static void qla2x00_async_gnnid_sp_done(void *s, int res)
4279 ea.fcport = fcport; 4248 ea.fcport = fcport;
4280 ea.sp = sp; 4249 ea.sp = sp;
4281 ea.rc = res; 4250 ea.rc = res;
4282 ea.event = FCME_GNNID_DONE;
4283 4251
4284 ql_dbg(ql_dbg_disc, vha, 0x204f, 4252 ql_dbg(ql_dbg_disc, vha, 0x204f,
4285 "Async done-%s res %x, WWPN %8phC %8phC\n", 4253 "Async done-%s res %x, WWPN %8phC %8phC\n",
4286 sp->name, res, fcport->port_name, fcport->node_name); 4254 sp->name, res, fcport->port_name, fcport->node_name);
4287 4255
4288 qla2x00_fcport_event_handler(vha, &ea); 4256 qla24xx_handle_gnnid_event(vha, &ea);
4289 4257
4290 sp->free(sp); 4258 sp->free(sp);
4291} 4259}
@@ -4318,9 +4286,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4318 GNN_ID_RSP_SIZE); 4286 GNN_ID_RSP_SIZE);
4319 4287
4320 /* GNN_ID req */ 4288 /* GNN_ID req */
4321 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain; 4289 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
4322 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
4323 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
4324 4290
4325 4291
4326 /* req & rsp use the same buffer */ 4292 /* req & rsp use the same buffer */
@@ -4396,9 +4362,8 @@ void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4396 qla24xx_post_gpsc_work(vha, fcport); 4362 qla24xx_post_gpsc_work(vha, fcport);
4397} 4363}
4398 4364
4399static void qla2x00_async_gfpnid_sp_done(void *s, int res) 4365static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res)
4400{ 4366{
4401 struct srb *sp = s;
4402 struct scsi_qla_host *vha = sp->vha; 4367 struct scsi_qla_host *vha = sp->vha;
4403 fc_port_t *fcport = sp->fcport; 4368 fc_port_t *fcport = sp->fcport;
4404 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name; 4369 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
@@ -4413,13 +4378,12 @@ static void qla2x00_async_gfpnid_sp_done(void *s, int res)
4413 ea.fcport = fcport; 4378 ea.fcport = fcport;
4414 ea.sp = sp; 4379 ea.sp = sp;
4415 ea.rc = res; 4380 ea.rc = res;
4416 ea.event = FCME_GFPNID_DONE;
4417 4381
4418 ql_dbg(ql_dbg_disc, vha, 0x204f, 4382 ql_dbg(ql_dbg_disc, vha, 0x204f,
4419 "Async done-%s res %x, WWPN %8phC %8phC\n", 4383 "Async done-%s res %x, WWPN %8phC %8phC\n",
4420 sp->name, res, fcport->port_name, fcport->fabric_port_name); 4384 sp->name, res, fcport->port_name, fcport->fabric_port_name);
4421 4385
4422 qla2x00_fcport_event_handler(vha, &ea); 4386 qla24xx_handle_gfpnid_event(vha, &ea);
4423 4387
4424 sp->free(sp); 4388 sp->free(sp);
4425} 4389}
@@ -4450,9 +4414,7 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4450 GFPN_ID_RSP_SIZE); 4414 GFPN_ID_RSP_SIZE);
4451 4415
4452 /* GFPN_ID req */ 4416 /* GFPN_ID req */
4453 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain; 4417 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
4454 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
4455 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
4456 4418
4457 4419
4458 /* req & rsp use the same buffer */ 4420 /* req & rsp use the same buffer */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d4c3baec9172..643d2324082e 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -37,8 +37,8 @@ static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
37static int qla84xx_init_chip(scsi_qla_host_t *); 37static int qla84xx_init_chip(scsi_qla_host_t *);
38static int qla25xx_init_queues(struct qla_hw_data *); 38static int qla25xx_init_queues(struct qla_hw_data *);
39static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *); 39static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
40static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *, 40static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
41 struct event_arg *); 41 struct event_arg *ea);
42static void qla24xx_handle_prli_done_event(struct scsi_qla_host *, 42static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
43 struct event_arg *); 43 struct event_arg *);
44static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *); 44static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
@@ -63,10 +63,8 @@ qla2x00_sp_timeout(struct timer_list *t)
63 iocb->timeout(sp); 63 iocb->timeout(sp);
64} 64}
65 65
66void 66void qla2x00_sp_free(srb_t *sp)
67qla2x00_sp_free(void *ptr)
68{ 67{
69 srb_t *sp = ptr;
70 struct srb_iocb *iocb = &sp->u.iocb_cmd; 68 struct srb_iocb *iocb = &sp->u.iocb_cmd;
71 69
72 del_timer(&iocb->timer); 70 del_timer(&iocb->timer);
@@ -99,22 +97,33 @@ static void qla24xx_abort_iocb_timeout(void *data)
99{ 97{
100 srb_t *sp = data; 98 srb_t *sp = data;
101 struct srb_iocb *abt = &sp->u.iocb_cmd; 99 struct srb_iocb *abt = &sp->u.iocb_cmd;
100 struct qla_qpair *qpair = sp->qpair;
101 u32 handle;
102 unsigned long flags;
103
104 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
105 for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
106 /* removing the abort */
107 if (qpair->req->outstanding_cmds[handle] == sp) {
108 qpair->req->outstanding_cmds[handle] = NULL;
109 break;
110 }
111 }
112 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
102 113
103 abt->u.abt.comp_status = CS_TIMEOUT; 114 abt->u.abt.comp_status = CS_TIMEOUT;
104 sp->done(sp, QLA_FUNCTION_TIMEOUT); 115 sp->done(sp, QLA_OS_TIMER_EXPIRED);
105} 116}
106 117
107static void qla24xx_abort_sp_done(void *ptr, int res) 118static void qla24xx_abort_sp_done(srb_t *sp, int res)
108{ 119{
109 srb_t *sp = ptr;
110 struct srb_iocb *abt = &sp->u.iocb_cmd; 120 struct srb_iocb *abt = &sp->u.iocb_cmd;
111 121
112 if (del_timer(&sp->u.iocb_cmd.timer)) { 122 del_timer(&sp->u.iocb_cmd.timer);
113 if (sp->flags & SRB_WAKEUP_ON_COMP) 123 if (sp->flags & SRB_WAKEUP_ON_COMP)
114 complete(&abt->u.abt.comp); 124 complete(&abt->u.abt.comp);
115 else 125 else
116 sp->free(sp); 126 sp->free(sp);
117 }
118} 127}
119 128
120static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) 129static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
@@ -127,7 +136,7 @@ static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
127 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport, 136 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
128 GFP_ATOMIC); 137 GFP_ATOMIC);
129 if (!sp) 138 if (!sp)
130 goto done; 139 return rval;
131 140
132 abt_iocb = &sp->u.iocb_cmd; 141 abt_iocb = &sp->u.iocb_cmd;
133 sp->type = SRB_ABT_CMD; 142 sp->type = SRB_ABT_CMD;
@@ -151,20 +160,18 @@ static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
151 cmd_sp->type); 160 cmd_sp->type);
152 161
153 rval = qla2x00_start_sp(sp); 162 rval = qla2x00_start_sp(sp);
154 if (rval != QLA_SUCCESS) 163 if (rval != QLA_SUCCESS) {
155 goto done_free_sp; 164 sp->free(sp);
165 return rval;
166 }
156 167
157 if (wait) { 168 if (wait) {
158 wait_for_completion(&abt_iocb->u.abt.comp); 169 wait_for_completion(&abt_iocb->u.abt.comp);
159 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? 170 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
160 QLA_SUCCESS : QLA_FUNCTION_FAILED; 171 QLA_SUCCESS : QLA_FUNCTION_FAILED;
161 } else { 172 sp->free(sp);
162 goto done;
163 } 173 }
164 174
165done_free_sp:
166 sp->free(sp);
167done:
168 return rval; 175 return rval;
169} 176}
170 177
@@ -234,13 +241,15 @@ qla2x00_async_iocb_timeout(void *data)
234 sp->done(sp, QLA_FUNCTION_TIMEOUT); 241 sp->done(sp, QLA_FUNCTION_TIMEOUT);
235 } 242 }
236 break; 243 break;
244 default:
245 WARN_ON_ONCE(true);
246 sp->done(sp, QLA_FUNCTION_TIMEOUT);
247 break;
237 } 248 }
238} 249}
239 250
240static void 251static void qla2x00_async_login_sp_done(srb_t *sp, int res)
241qla2x00_async_login_sp_done(void *ptr, int res)
242{ 252{
243 srb_t *sp = ptr;
244 struct scsi_qla_host *vha = sp->vha; 253 struct scsi_qla_host *vha = sp->vha;
245 struct srb_iocb *lio = &sp->u.iocb_cmd; 254 struct srb_iocb *lio = &sp->u.iocb_cmd;
246 struct event_arg ea; 255 struct event_arg ea;
@@ -252,14 +261,13 @@ qla2x00_async_login_sp_done(void *ptr, int res)
252 261
253 if (!test_bit(UNLOADING, &vha->dpc_flags)) { 262 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
254 memset(&ea, 0, sizeof(ea)); 263 memset(&ea, 0, sizeof(ea));
255 ea.event = FCME_PLOGI_DONE;
256 ea.fcport = sp->fcport; 264 ea.fcport = sp->fcport;
257 ea.data[0] = lio->u.logio.data[0]; 265 ea.data[0] = lio->u.logio.data[0];
258 ea.data[1] = lio->u.logio.data[1]; 266 ea.data[1] = lio->u.logio.data[1];
259 ea.iop[0] = lio->u.logio.iop[0]; 267 ea.iop[0] = lio->u.logio.iop[0];
260 ea.iop[1] = lio->u.logio.iop[1]; 268 ea.iop[1] = lio->u.logio.iop[1];
261 ea.sp = sp; 269 ea.sp = sp;
262 qla2x00_fcport_event_handler(vha, &ea); 270 qla24xx_handle_plogi_done_event(vha, &ea);
263 } 271 }
264 272
265 sp->free(sp); 273 sp->free(sp);
@@ -289,8 +297,13 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
289 struct srb_iocb *lio; 297 struct srb_iocb *lio;
290 int rval = QLA_FUNCTION_FAILED; 298 int rval = QLA_FUNCTION_FAILED;
291 299
292 if (!vha->flags.online) 300 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
293 goto done; 301 fcport->loop_id == FC_NO_LOOP_ID) {
302 ql_log(ql_log_warn, vha, 0xffff,
303 "%s: %8phC - not sending command.\n",
304 __func__, fcport->port_name);
305 return rval;
306 }
294 307
295 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 308 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
296 if (!sp) 309 if (!sp)
@@ -341,11 +354,8 @@ done:
341 return rval; 354 return rval;
342} 355}
343 356
344static void 357static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
345qla2x00_async_logout_sp_done(void *ptr, int res)
346{ 358{
347 srb_t *sp = ptr;
348
349 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 359 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
350 sp->fcport->login_gen++; 360 sp->fcport->login_gen++;
351 qlt_logo_completion_handler(sp->fcport, res); 361 qlt_logo_completion_handler(sp->fcport, res);
@@ -359,9 +369,6 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
359 struct srb_iocb *lio; 369 struct srb_iocb *lio;
360 int rval = QLA_FUNCTION_FAILED; 370 int rval = QLA_FUNCTION_FAILED;
361 371
362 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
363 return rval;
364
365 fcport->flags |= FCF_ASYNC_SENT; 372 fcport->flags |= FCF_ASYNC_SENT;
366 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 373 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
367 if (!sp) 374 if (!sp)
@@ -405,10 +412,8 @@ qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
405 qlt_logo_completion_handler(fcport, data[0]); 412 qlt_logo_completion_handler(fcport, data[0]);
406} 413}
407 414
408static void 415static void qla2x00_async_prlo_sp_done(srb_t *sp, int res)
409qla2x00_async_prlo_sp_done(void *s, int res)
410{ 416{
411 srb_t *sp = (srb_t *)s;
412 struct srb_iocb *lio = &sp->u.iocb_cmd; 417 struct srb_iocb *lio = &sp->u.iocb_cmd;
413 struct scsi_qla_host *vha = sp->vha; 418 struct scsi_qla_host *vha = sp->vha;
414 419
@@ -469,6 +474,9 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
469 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, 474 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
470 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id); 475 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
471 476
477 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
478 ea->data[0]);
479
472 if (ea->data[0] != MBS_COMMAND_COMPLETE) { 480 if (ea->data[0] != MBS_COMMAND_COMPLETE) {
473 ql_dbg(ql_dbg_disc, vha, 0x2066, 481 ql_dbg(ql_dbg_disc, vha, 0x2066,
474 "%s %8phC: adisc fail: post delete\n", 482 "%s %8phC: adisc fail: post delete\n",
@@ -511,10 +519,8 @@ static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
511 return qla2x00_post_work(vha, e); 519 return qla2x00_post_work(vha, e);
512} 520}
513 521
514static void 522static void qla2x00_async_adisc_sp_done(srb_t *sp, int res)
515qla2x00_async_adisc_sp_done(void *ptr, int res)
516{ 523{
517 srb_t *sp = ptr;
518 struct scsi_qla_host *vha = sp->vha; 524 struct scsi_qla_host *vha = sp->vha;
519 struct event_arg ea; 525 struct event_arg ea;
520 struct srb_iocb *lio = &sp->u.iocb_cmd; 526 struct srb_iocb *lio = &sp->u.iocb_cmd;
@@ -526,7 +532,6 @@ qla2x00_async_adisc_sp_done(void *ptr, int res)
526 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 532 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
527 533
528 memset(&ea, 0, sizeof(ea)); 534 memset(&ea, 0, sizeof(ea));
529 ea.event = FCME_ADISC_DONE;
530 ea.rc = res; 535 ea.rc = res;
531 ea.data[0] = lio->u.logio.data[0]; 536 ea.data[0] = lio->u.logio.data[0];
532 ea.data[1] = lio->u.logio.data[1]; 537 ea.data[1] = lio->u.logio.data[1];
@@ -535,7 +540,7 @@ qla2x00_async_adisc_sp_done(void *ptr, int res)
535 ea.fcport = sp->fcport; 540 ea.fcport = sp->fcport;
536 ea.sp = sp; 541 ea.sp = sp;
537 542
538 qla2x00_fcport_event_handler(vha, &ea); 543 qla24xx_handle_adisc_event(vha, &ea);
539 544
540 sp->free(sp); 545 sp->free(sp);
541} 546}
@@ -803,6 +808,15 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
803 fcport->fw_login_state = current_login_state; 808 fcport->fw_login_state = current_login_state;
804 fcport->d_id = id; 809 fcport->d_id = id;
805 switch (current_login_state) { 810 switch (current_login_state) {
811 case DSC_LS_PRLI_PEND:
812 /*
813 * In the middle of PRLI. Let it finish.
814 * Allow relogin code to recheck state again
815 * with GNL. Push disc_state back to DELETED
816 * so GNL can go out again
817 */
818 fcport->disc_state = DSC_DELETED;
819 break;
806 case DSC_LS_PRLI_COMP: 820 case DSC_LS_PRLI_COMP:
807 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) 821 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
808 fcport->port_type = FCT_INITIATOR; 822 fcport->port_type = FCT_INITIATOR;
@@ -917,10 +931,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
917 } 931 }
918} /* gnl_event */ 932} /* gnl_event */
919 933
920static void 934static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
921qla24xx_async_gnl_sp_done(void *s, int res)
922{ 935{
923 struct srb *sp = s;
924 struct scsi_qla_host *vha = sp->vha; 936 struct scsi_qla_host *vha = sp->vha;
925 unsigned long flags; 937 unsigned long flags;
926 struct fc_port *fcport = NULL, *tf; 938 struct fc_port *fcport = NULL, *tf;
@@ -943,7 +955,6 @@ qla24xx_async_gnl_sp_done(void *s, int res)
943 memset(&ea, 0, sizeof(ea)); 955 memset(&ea, 0, sizeof(ea));
944 ea.sp = sp; 956 ea.sp = sp;
945 ea.rc = res; 957 ea.rc = res;
946 ea.event = FCME_GNL_DONE;
947 958
948 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >= 959 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
949 sizeof(struct get_name_list_extended)) { 960 sizeof(struct get_name_list_extended)) {
@@ -982,7 +993,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
982 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 993 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
983 ea.fcport = fcport; 994 ea.fcport = fcport;
984 995
985 qla2x00_fcport_event_handler(vha, &ea); 996 qla24xx_handle_gnl_done_event(vha, &ea);
986 } 997 }
987 998
988 /* create new fcport if fw has knowledge of new sessions */ 999 /* create new fcport if fw has knowledge of new sessions */
@@ -1107,10 +1118,8 @@ int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1107 return qla2x00_post_work(vha, e); 1118 return qla2x00_post_work(vha, e);
1108} 1119}
1109 1120
1110static 1121static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res)
1111void qla24xx_async_gpdb_sp_done(void *s, int res)
1112{ 1122{
1113 struct srb *sp = s;
1114 struct scsi_qla_host *vha = sp->vha; 1123 struct scsi_qla_host *vha = sp->vha;
1115 struct qla_hw_data *ha = vha->hw; 1124 struct qla_hw_data *ha = vha->hw;
1116 fc_port_t *fcport = sp->fcport; 1125 fc_port_t *fcport = sp->fcport;
@@ -1129,11 +1138,10 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
1129 1138
1130 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 1139 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1131 memset(&ea, 0, sizeof(ea)); 1140 memset(&ea, 0, sizeof(ea));
1132 ea.event = FCME_GPDB_DONE;
1133 ea.fcport = fcport; 1141 ea.fcport = fcport;
1134 ea.sp = sp; 1142 ea.sp = sp;
1135 1143
1136 qla2x00_fcport_event_handler(vha, &ea); 1144 qla24xx_handle_gpdb_event(vha, &ea);
1137 1145
1138 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, 1146 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
1139 sp->u.iocb_cmd.u.mbx.in_dma); 1147 sp->u.iocb_cmd.u.mbx.in_dma);
@@ -1154,10 +1162,8 @@ static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1154 return qla2x00_post_work(vha, e); 1162 return qla2x00_post_work(vha, e);
1155} 1163}
1156 1164
1157static void 1165static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
1158qla2x00_async_prli_sp_done(void *ptr, int res)
1159{ 1166{
1160 srb_t *sp = ptr;
1161 struct scsi_qla_host *vha = sp->vha; 1167 struct scsi_qla_host *vha = sp->vha;
1162 struct srb_iocb *lio = &sp->u.iocb_cmd; 1168 struct srb_iocb *lio = &sp->u.iocb_cmd;
1163 struct event_arg ea; 1169 struct event_arg ea;
@@ -1170,7 +1176,6 @@ qla2x00_async_prli_sp_done(void *ptr, int res)
1170 1176
1171 if (!test_bit(UNLOADING, &vha->dpc_flags)) { 1177 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
1172 memset(&ea, 0, sizeof(ea)); 1178 memset(&ea, 0, sizeof(ea));
1173 ea.event = FCME_PRLI_DONE;
1174 ea.fcport = sp->fcport; 1179 ea.fcport = sp->fcport;
1175 ea.data[0] = lio->u.logio.data[0]; 1180 ea.data[0] = lio->u.logio.data[0];
1176 ea.data[1] = lio->u.logio.data[1]; 1181 ea.data[1] = lio->u.logio.data[1];
@@ -1178,7 +1183,7 @@ qla2x00_async_prli_sp_done(void *ptr, int res)
1178 ea.iop[1] = lio->u.logio.iop[1]; 1183 ea.iop[1] = lio->u.logio.iop[1];
1179 ea.sp = sp; 1184 ea.sp = sp;
1180 1185
1181 qla2x00_fcport_event_handler(vha, &ea); 1186 qla24xx_handle_prli_done_event(vha, &ea);
1182 } 1187 }
1183 1188
1184 sp->free(sp); 1189 sp->free(sp);
@@ -1262,8 +1267,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1262 struct port_database_24xx *pd; 1267 struct port_database_24xx *pd;
1263 struct qla_hw_data *ha = vha->hw; 1268 struct qla_hw_data *ha = vha->hw;
1264 1269
1265 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 1270 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
1271 fcport->loop_id == FC_NO_LOOP_ID) {
1272 ql_log(ql_log_warn, vha, 0xffff,
1273 "%s: %8phC - not sending command.\n",
1274 __func__, fcport->port_name);
1266 return rval; 1275 return rval;
1276 }
1267 1277
1268 fcport->disc_state = DSC_GPDB; 1278 fcport->disc_state = DSC_GPDB;
1269 1279
@@ -1473,7 +1483,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1473 u64 wwn; 1483 u64 wwn;
1474 u16 sec; 1484 u16 sec;
1475 1485
1476 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20d8, 1486 ql_dbg(ql_dbg_disc, vha, 0x20d8,
1477 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n", 1487 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
1478 __func__, fcport->port_name, fcport->disc_state, 1488 __func__, fcport->port_name, fcport->disc_state,
1479 fcport->fw_login_state, fcport->login_pause, fcport->flags, 1489 fcport->fw_login_state, fcport->login_pause, fcport->flags,
@@ -1484,6 +1494,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1484 return 0; 1494 return 0;
1485 1495
1486 if ((fcport->loop_id != FC_NO_LOOP_ID) && 1496 if ((fcport->loop_id != FC_NO_LOOP_ID) &&
1497 qla_dual_mode_enabled(vha) &&
1487 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1498 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1488 (fcport->fw_login_state == DSC_LS_PRLI_PEND))) 1499 (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
1489 return 0; 1500 return 0;
@@ -1636,12 +1647,34 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1636 return qla2x00_post_work(vha, e); 1647 return qla2x00_post_work(vha, e);
1637} 1648}
1638 1649
1639static 1650void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
1651{
1652 fc_port_t *fcport;
1653 unsigned long flags;
1654
1655 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1656 if (fcport) {
1657 fcport->scan_needed = 1;
1658 fcport->rscn_gen++;
1659 }
1660
1661 spin_lock_irqsave(&vha->work_lock, flags);
1662 if (vha->scan.scan_flags == 0) {
1663 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
1664 vha->scan.scan_flags |= SF_QUEUED;
1665 schedule_delayed_work(&vha->scan.scan_work, 5);
1666 }
1667 spin_unlock_irqrestore(&vha->work_lock, flags);
1668}
1669
1640void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, 1670void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1641 struct event_arg *ea) 1671 struct event_arg *ea)
1642{ 1672{
1643 fc_port_t *fcport = ea->fcport; 1673 fc_port_t *fcport = ea->fcport;
1644 1674
1675 if (test_bit(UNLOADING, &vha->dpc_flags))
1676 return;
1677
1645 ql_dbg(ql_dbg_disc, vha, 0x2102, 1678 ql_dbg(ql_dbg_disc, vha, 0x2102,
1646 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", 1679 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1647 __func__, fcport->port_name, fcport->disc_state, 1680 __func__, fcport->port_name, fcport->disc_state,
@@ -1651,110 +1684,16 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1651 fcport->last_login_gen, fcport->login_gen, 1684 fcport->last_login_gen, fcport->login_gen,
1652 fcport->flags); 1685 fcport->flags);
1653 1686
1654 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1655 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
1656 return;
1657
1658 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
1659 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1660 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1661 return;
1662 }
1663 }
1664
1665 if (fcport->last_rscn_gen != fcport->rscn_gen) { 1687 if (fcport->last_rscn_gen != fcport->rscn_gen) {
1666 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n", 1688 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n",
1667 __func__, __LINE__, fcport->port_name); 1689 __func__, __LINE__, fcport->port_name);
1668 1690 qla24xx_post_gnl_work(vha, fcport);
1669 return; 1691 return;
1670 } 1692 }
1671 1693
1672 qla24xx_fcport_handle_login(vha, fcport); 1694 qla24xx_fcport_handle_login(vha, fcport);
1673} 1695}
1674 1696
1675
1676static void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
1677 struct event_arg *ea)
1678{
1679 ql_dbg(ql_dbg_disc, vha, 0x2118,
1680 "%s %d %8phC post PRLI\n",
1681 __func__, __LINE__, ea->fcport->port_name);
1682 qla24xx_post_prli_work(vha, ea->fcport);
1683}
1684
1685void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1686{
1687 fc_port_t *fcport;
1688
1689 switch (ea->event) {
1690 case FCME_RELOGIN:
1691 if (test_bit(UNLOADING, &vha->dpc_flags))
1692 return;
1693
1694 qla24xx_handle_relogin_event(vha, ea);
1695 break;
1696 case FCME_RSCN:
1697 if (test_bit(UNLOADING, &vha->dpc_flags))
1698 return;
1699 {
1700 unsigned long flags;
1701
1702 fcport = qla2x00_find_fcport_by_nportid
1703 (vha, &ea->id, 1);
1704 if (fcport) {
1705 fcport->scan_needed = 1;
1706 fcport->rscn_gen++;
1707 }
1708
1709 spin_lock_irqsave(&vha->work_lock, flags);
1710 if (vha->scan.scan_flags == 0) {
1711 ql_dbg(ql_dbg_disc, vha, 0xffff,
1712 "%s: schedule\n", __func__);
1713 vha->scan.scan_flags |= SF_QUEUED;
1714 schedule_delayed_work(&vha->scan.scan_work, 5);
1715 }
1716 spin_unlock_irqrestore(&vha->work_lock, flags);
1717 }
1718 break;
1719 case FCME_GNL_DONE:
1720 qla24xx_handle_gnl_done_event(vha, ea);
1721 break;
1722 case FCME_GPSC_DONE:
1723 qla24xx_handle_gpsc_event(vha, ea);
1724 break;
1725 case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
1726 qla24xx_handle_plogi_done_event(vha, ea);
1727 break;
1728 case FCME_PRLI_DONE:
1729 qla24xx_handle_prli_done_event(vha, ea);
1730 break;
1731 case FCME_GPDB_DONE:
1732 qla24xx_handle_gpdb_event(vha, ea);
1733 break;
1734 case FCME_GPNID_DONE:
1735 qla24xx_handle_gpnid_event(vha, ea);
1736 break;
1737 case FCME_GFFID_DONE:
1738 qla24xx_handle_gffid_event(vha, ea);
1739 break;
1740 case FCME_ADISC_DONE:
1741 qla24xx_handle_adisc_event(vha, ea);
1742 break;
1743 case FCME_GNNID_DONE:
1744 qla24xx_handle_gnnid_event(vha, ea);
1745 break;
1746 case FCME_GFPNID_DONE:
1747 qla24xx_handle_gfpnid_event(vha, ea);
1748 break;
1749 case FCME_ELS_PLOGI_DONE:
1750 qla_handle_els_plogi_done(vha, ea);
1751 break;
1752 default:
1753 BUG_ON(1);
1754 break;
1755 }
1756}
1757
1758/* 1697/*
1759 * RSCN(s) came in for this fcport, but the RSCN(s) was not able 1698 * RSCN(s) came in for this fcport, but the RSCN(s) was not able
1760 * to be consumed by the fcport 1699 * to be consumed by the fcport
@@ -1772,10 +1711,9 @@ void qla_rscn_replay(fc_port_t *fcport)
1772 1711
1773 if (fcport->scan_needed) { 1712 if (fcport->scan_needed) {
1774 memset(&ea, 0, sizeof(ea)); 1713 memset(&ea, 0, sizeof(ea));
1775 ea.event = FCME_RSCN;
1776 ea.id = fcport->d_id; 1714 ea.id = fcport->d_id;
1777 ea.id.b.rsvd_1 = RSCN_PORT_ADDR; 1715 ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
1778 qla2x00_fcport_event_handler(fcport->vha, &ea); 1716 qla2x00_handle_rscn(fcport->vha, &ea);
1779 } 1717 }
1780} 1718}
1781 1719
@@ -1789,10 +1727,8 @@ qla2x00_tmf_iocb_timeout(void *data)
1789 complete(&tmf->u.tmf.comp); 1727 complete(&tmf->u.tmf.comp);
1790} 1728}
1791 1729
1792static void 1730static void qla2x00_tmf_sp_done(srb_t *sp, int res)
1793qla2x00_tmf_sp_done(void *ptr, int res)
1794{ 1731{
1795 srb_t *sp = ptr;
1796 struct srb_iocb *tmf = &sp->u.iocb_cmd; 1732 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1797 1733
1798 complete(&tmf->u.tmf.comp); 1734 complete(&tmf->u.tmf.comp);
@@ -1890,6 +1826,9 @@ qla24xx_async_abort_command(srb_t *sp)
1890static void 1826static void
1891qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) 1827qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1892{ 1828{
1829 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
1830 ea->data[0]);
1831
1893 switch (ea->data[0]) { 1832 switch (ea->data[0]) {
1894 case MBS_COMMAND_COMPLETE: 1833 case MBS_COMMAND_COMPLETE:
1895 ql_dbg(ql_dbg_disc, vha, 0x2118, 1834 ql_dbg(ql_dbg_disc, vha, 0x2118,
@@ -1929,7 +1868,7 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1929 } 1868 }
1930} 1869}
1931 1870
1932static void 1871void
1933qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) 1872qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1934{ 1873{
1935 port_id_t cid; /* conflict Nport id */ 1874 port_id_t cid; /* conflict Nport id */
@@ -1953,8 +1892,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1953 return; 1892 return;
1954 } 1893 }
1955 1894
1956 if (fcport->disc_state == DSC_DELETE_PEND) 1895 if ((fcport->disc_state == DSC_DELETE_PEND) ||
1896 (fcport->disc_state == DSC_DELETED)) {
1897 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1957 return; 1898 return;
1899 }
1958 1900
1959 if (ea->sp->gen2 != fcport->login_gen) { 1901 if (ea->sp->gen2 != fcport->login_gen) {
1960 /* target side must have changed it. */ 1902 /* target side must have changed it. */
@@ -1972,6 +1914,9 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1972 return; 1914 return;
1973 } 1915 }
1974 1916
1917 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
1918 ea->data[0]);
1919
1975 switch (ea->data[0]) { 1920 switch (ea->data[0]) {
1976 case MBS_COMMAND_COMPLETE: 1921 case MBS_COMMAND_COMPLETE:
1977 /* 1922 /*
@@ -2266,6 +2211,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
2266 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 2211 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2267 rval = qla2x00_init_rings(vha); 2212 rval = qla2x00_init_rings(vha);
2268 2213
2214 /* No point in continuing if firmware initialization failed. */
2215 if (rval != QLA_SUCCESS)
2216 return rval;
2217
2269 ha->flags.chip_reset_done = 1; 2218 ha->flags.chip_reset_done = 1;
2270 2219
2271 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { 2220 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
@@ -3082,103 +3031,113 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
3082} 3031}
3083 3032
3084static void 3033static void
3085qla2x00_alloc_offload_mem(scsi_qla_host_t *vha) 3034qla2x00_init_fce_trace(scsi_qla_host_t *vha)
3086{ 3035{
3087 int rval; 3036 int rval;
3088 dma_addr_t tc_dma; 3037 dma_addr_t tc_dma;
3089 void *tc; 3038 void *tc;
3090 struct qla_hw_data *ha = vha->hw; 3039 struct qla_hw_data *ha = vha->hw;
3091 3040
3092 if (ha->eft) { 3041 if (!IS_FWI2_CAPABLE(ha))
3042 return;
3043
3044 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3045 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3046 return;
3047
3048 if (ha->fce) {
3093 ql_dbg(ql_dbg_init, vha, 0x00bd, 3049 ql_dbg(ql_dbg_init, vha, 0x00bd,
3094 "%s: Offload Mem is already allocated.\n", 3050 "%s: FCE Mem is already allocated.\n",
3095 __func__); 3051 __func__);
3096 return; 3052 return;
3097 } 3053 }
3098 3054
3099 if (IS_FWI2_CAPABLE(ha)) { 3055 /* Allocate memory for Fibre Channel Event Buffer. */
3100 /* Allocate memory for Fibre Channel Event Buffer. */ 3056 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
3101 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 3057 GFP_KERNEL);
3102 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 3058 if (!tc) {
3103 goto try_eft; 3059 ql_log(ql_log_warn, vha, 0x00be,
3060 "Unable to allocate (%d KB) for FCE.\n",
3061 FCE_SIZE / 1024);
3062 return;
3063 }
3104 3064
3105 if (ha->fce) 3065 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
3106 dma_free_coherent(&ha->pdev->dev, 3066 ha->fce_mb, &ha->fce_bufs);
3107 FCE_SIZE, ha->fce, ha->fce_dma); 3067 if (rval) {
3068 ql_log(ql_log_warn, vha, 0x00bf,
3069 "Unable to initialize FCE (%d).\n", rval);
3070 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
3071 return;
3072 }
3108 3073
3109 /* Allocate memory for Fibre Channel Event Buffer. */ 3074 ql_dbg(ql_dbg_init, vha, 0x00c0,
3110 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 3075 "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
3111 GFP_KERNEL);
3112 if (!tc) {
3113 ql_log(ql_log_warn, vha, 0x00be,
3114 "Unable to allocate (%d KB) for FCE.\n",
3115 FCE_SIZE / 1024);
3116 goto try_eft;
3117 }
3118
3119 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
3120 ha->fce_mb, &ha->fce_bufs);
3121 if (rval) {
3122 ql_log(ql_log_warn, vha, 0x00bf,
3123 "Unable to initialize FCE (%d).\n", rval);
3124 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
3125 tc_dma);
3126 ha->flags.fce_enabled = 0;
3127 goto try_eft;
3128 }
3129 ql_dbg(ql_dbg_init, vha, 0x00c0,
3130 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
3131
3132 ha->flags.fce_enabled = 1;
3133 ha->fce_dma = tc_dma;
3134 ha->fce = tc;
3135
3136try_eft:
3137 if (ha->eft)
3138 dma_free_coherent(&ha->pdev->dev,
3139 EFT_SIZE, ha->eft, ha->eft_dma);
3140 3076
3141 /* Allocate memory for Extended Trace Buffer. */ 3077 ha->flags.fce_enabled = 1;
3142 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 3078 ha->fce_dma = tc_dma;
3143 GFP_KERNEL); 3079 ha->fce = tc;
3144 if (!tc) { 3080}
3145 ql_log(ql_log_warn, vha, 0x00c1,
3146 "Unable to allocate (%d KB) for EFT.\n",
3147 EFT_SIZE / 1024);
3148 goto eft_err;
3149 }
3150 3081
3151 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); 3082static void
3152 if (rval) { 3083qla2x00_init_eft_trace(scsi_qla_host_t *vha)
3153 ql_log(ql_log_warn, vha, 0x00c2, 3084{
3154 "Unable to initialize EFT (%d).\n", rval); 3085 int rval;
3155 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, 3086 dma_addr_t tc_dma;
3156 tc_dma); 3087 void *tc;
3157 goto eft_err; 3088 struct qla_hw_data *ha = vha->hw;
3158 } 3089
3159 ql_dbg(ql_dbg_init, vha, 0x00c3, 3090 if (!IS_FWI2_CAPABLE(ha))
3160 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); 3091 return;
3161 3092
3162 ha->eft_dma = tc_dma; 3093 if (ha->eft) {
3163 ha->eft = tc; 3094 ql_dbg(ql_dbg_init, vha, 0x00bd,
3095 "%s: EFT Mem is already allocated.\n",
3096 __func__);
3097 return;
3164 } 3098 }
3165 3099
3166eft_err: 3100 /* Allocate memory for Extended Trace Buffer. */
3167 return; 3101 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3102 GFP_KERNEL);
3103 if (!tc) {
3104 ql_log(ql_log_warn, vha, 0x00c1,
3105 "Unable to allocate (%d KB) for EFT.\n",
3106 EFT_SIZE / 1024);
3107 return;
3108 }
3109
3110 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
3111 if (rval) {
3112 ql_log(ql_log_warn, vha, 0x00c2,
3113 "Unable to initialize EFT (%d).\n", rval);
3114 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
3115 return;
3116 }
3117
3118 ql_dbg(ql_dbg_init, vha, 0x00c3,
3119 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
3120
3121 ha->eft_dma = tc_dma;
3122 ha->eft = tc;
3123}
3124
3125static void
3126qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
3127{
3128 qla2x00_init_fce_trace(vha);
3129 qla2x00_init_eft_trace(vha);
3168} 3130}
3169 3131
3170void 3132void
3171qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) 3133qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
3172{ 3134{
3173 int rval;
3174 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, 3135 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
3175 eft_size, fce_size, mq_size; 3136 eft_size, fce_size, mq_size;
3176 struct qla_hw_data *ha = vha->hw; 3137 struct qla_hw_data *ha = vha->hw;
3177 struct req_que *req = ha->req_q_map[0]; 3138 struct req_que *req = ha->req_q_map[0];
3178 struct rsp_que *rsp = ha->rsp_q_map[0]; 3139 struct rsp_que *rsp = ha->rsp_q_map[0];
3179 struct qla2xxx_fw_dump *fw_dump; 3140 struct qla2xxx_fw_dump *fw_dump;
3180 dma_addr_t tc_dma;
3181 void *tc;
3182 3141
3183 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; 3142 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
3184 req_q_size = rsp_q_size = 0; 3143 req_q_size = rsp_q_size = 0;
@@ -3216,37 +3175,13 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
3216 } 3175 }
3217 if (ha->tgt.atio_ring) 3176 if (ha->tgt.atio_ring)
3218 mq_size += ha->tgt.atio_q_length * sizeof(request_t); 3177 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
3219 /* Allocate memory for Fibre Channel Event Buffer. */
3220 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3221 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3222 goto try_eft;
3223 3178
3224 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; 3179 qla2x00_init_fce_trace(vha);
3225try_eft: 3180 if (ha->fce)
3181 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
3182 qla2x00_init_eft_trace(vha);
3226 if (ha->eft) 3183 if (ha->eft)
3227 dma_free_coherent(&ha->pdev->dev, 3184 eft_size = EFT_SIZE;
3228 EFT_SIZE, ha->eft, ha->eft_dma);
3229
3230 /* Allocate memory for Extended Trace Buffer. */
3231 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3232 GFP_KERNEL);
3233 if (!tc) {
3234 ql_log(ql_log_warn, vha, 0x00c1,
3235 "Unable to allocate (%d KB) for EFT.\n",
3236 EFT_SIZE / 1024);
3237 goto allocate;
3238 }
3239
3240 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
3241 if (rval) {
3242 ql_log(ql_log_warn, vha, 0x00c2,
3243 "Unable to initialize EFT (%d).\n", rval);
3244 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
3245 tc_dma);
3246 }
3247 ql_dbg(ql_dbg_init, vha, 0x00c3,
3248 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
3249 eft_size = EFT_SIZE;
3250 } 3185 }
3251 3186
3252 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 3187 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
@@ -3268,24 +3203,22 @@ try_eft:
3268 j, fwdt->dump_size); 3203 j, fwdt->dump_size);
3269 dump_size += fwdt->dump_size; 3204 dump_size += fwdt->dump_size;
3270 } 3205 }
3271 goto allocate; 3206 } else {
3207 req_q_size = req->length * sizeof(request_t);
3208 rsp_q_size = rsp->length * sizeof(response_t);
3209 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
3210 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size
3211 + eft_size;
3212 ha->chain_offset = dump_size;
3213 dump_size += mq_size + fce_size;
3214 if (ha->exchoffld_buf)
3215 dump_size += sizeof(struct qla2xxx_offld_chain) +
3216 ha->exchoffld_size;
3217 if (ha->exlogin_buf)
3218 dump_size += sizeof(struct qla2xxx_offld_chain) +
3219 ha->exlogin_size;
3272 } 3220 }
3273 3221
3274 req_q_size = req->length * sizeof(request_t);
3275 rsp_q_size = rsp->length * sizeof(response_t);
3276 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
3277 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
3278 ha->chain_offset = dump_size;
3279 dump_size += mq_size + fce_size;
3280
3281 if (ha->exchoffld_buf)
3282 dump_size += sizeof(struct qla2xxx_offld_chain) +
3283 ha->exchoffld_size;
3284 if (ha->exlogin_buf)
3285 dump_size += sizeof(struct qla2xxx_offld_chain) +
3286 ha->exlogin_size;
3287
3288allocate:
3289 if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) { 3222 if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
3290 3223
3291 ql_dbg(ql_dbg_init, vha, 0x00c5, 3224 ql_dbg(ql_dbg_init, vha, 0x00c5,
@@ -4400,7 +4333,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
4400 4333
4401inline void 4334inline void
4402qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, 4335qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
4403 char *def) 4336 const char *def)
4404{ 4337{
4405 char *st, *en; 4338 char *st, *en;
4406 uint16_t index; 4339 uint16_t index;
@@ -4412,7 +4345,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
4412 if (len > sizeof(zero)) 4345 if (len > sizeof(zero))
4413 len = sizeof(zero); 4346 len = sizeof(zero);
4414 if (memcmp(model, &zero, len) != 0) { 4347 if (memcmp(model, &zero, len) != 0) {
4415 strncpy(ha->model_number, model, len); 4348 memcpy(ha->model_number, model, len);
4416 st = en = ha->model_number; 4349 st = en = ha->model_number;
4417 en += len - 1; 4350 en += len - 1;
4418 while (en > st) { 4351 while (en > st) {
@@ -4425,21 +4358,23 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
4425 if (use_tbl && 4358 if (use_tbl &&
4426 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 4359 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4427 index < QLA_MODEL_NAMES) 4360 index < QLA_MODEL_NAMES)
4428 strncpy(ha->model_desc, 4361 strlcpy(ha->model_desc,
4429 qla2x00_model_name[index * 2 + 1], 4362 qla2x00_model_name[index * 2 + 1],
4430 sizeof(ha->model_desc) - 1); 4363 sizeof(ha->model_desc));
4431 } else { 4364 } else {
4432 index = (ha->pdev->subsystem_device & 0xff); 4365 index = (ha->pdev->subsystem_device & 0xff);
4433 if (use_tbl && 4366 if (use_tbl &&
4434 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 4367 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4435 index < QLA_MODEL_NAMES) { 4368 index < QLA_MODEL_NAMES) {
4436 strcpy(ha->model_number, 4369 strlcpy(ha->model_number,
4437 qla2x00_model_name[index * 2]); 4370 qla2x00_model_name[index * 2],
4438 strncpy(ha->model_desc, 4371 sizeof(ha->model_number));
4372 strlcpy(ha->model_desc,
4439 qla2x00_model_name[index * 2 + 1], 4373 qla2x00_model_name[index * 2 + 1],
4440 sizeof(ha->model_desc) - 1); 4374 sizeof(ha->model_desc));
4441 } else { 4375 } else {
4442 strcpy(ha->model_number, def); 4376 strlcpy(ha->model_number, def,
4377 sizeof(ha->model_number));
4443 } 4378 }
4444 } 4379 }
4445 if (IS_FWI2_CAPABLE(ha)) 4380 if (IS_FWI2_CAPABLE(ha))
@@ -5044,7 +4979,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
5044 4979
5045 uint16_t index; 4980 uint16_t index;
5046 uint16_t entries; 4981 uint16_t entries;
5047 char *id_iter; 4982 struct gid_list_info *gid;
5048 uint16_t loop_id; 4983 uint16_t loop_id;
5049 uint8_t domain, area, al_pa; 4984 uint8_t domain, area, al_pa;
5050 struct qla_hw_data *ha = vha->hw; 4985 struct qla_hw_data *ha = vha->hw;
@@ -5119,18 +5054,16 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
5119 new_fcport->flags &= ~FCF_FABRIC_DEVICE; 5054 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5120 5055
5121 /* Add devices to port list. */ 5056 /* Add devices to port list. */
5122 id_iter = (char *)ha->gid_list; 5057 gid = ha->gid_list;
5123 for (index = 0; index < entries; index++) { 5058 for (index = 0; index < entries; index++) {
5124 domain = ((struct gid_list_info *)id_iter)->domain; 5059 domain = gid->domain;
5125 area = ((struct gid_list_info *)id_iter)->area; 5060 area = gid->area;
5126 al_pa = ((struct gid_list_info *)id_iter)->al_pa; 5061 al_pa = gid->al_pa;
5127 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 5062 if (IS_QLA2100(ha) || IS_QLA2200(ha))
5128 loop_id = (uint16_t) 5063 loop_id = gid->loop_id_2100;
5129 ((struct gid_list_info *)id_iter)->loop_id_2100;
5130 else 5064 else
5131 loop_id = le16_to_cpu( 5065 loop_id = le16_to_cpu(gid->loop_id);
5132 ((struct gid_list_info *)id_iter)->loop_id); 5066 gid = (void *)gid + ha->gid_list_info_size;
5133 id_iter += ha->gid_list_info_size;
5134 5067
5135 /* Bypass reserved domain fields. */ 5068 /* Bypass reserved domain fields. */
5136 if ((domain & 0xf0) == 0xf0) 5069 if ((domain & 0xf0) == 0xf0)
@@ -5355,7 +5288,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
5355 "%s %8phN. rport %p is %s mode\n", 5288 "%s %8phN. rport %p is %s mode\n",
5356 __func__, fcport->port_name, rport, 5289 __func__, fcport->port_name, rport,
5357 (fcport->port_type == FCT_TARGET) ? "tgt" : 5290 (fcport->port_type == FCT_TARGET) ? "tgt" :
5358 ((fcport->port_type & FCT_NVME) ? "nvme" :"ini")); 5291 ((fcport->port_type & FCT_NVME) ? "nvme" : "ini"));
5359 5292
5360 fc_remote_port_rolechg(rport, rport_ids.roles); 5293 fc_remote_port_rolechg(rport, rport_ids.roles);
5361} 5294}
@@ -6596,7 +6529,8 @@ qla2x00_quiesce_io(scsi_qla_host_t *vha)
6596 LOOP_DOWN_TIME); 6529 LOOP_DOWN_TIME);
6597 } 6530 }
6598 /* Wait for pending cmds to complete */ 6531 /* Wait for pending cmds to complete */
6599 qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST); 6532 WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST)
6533 != QLA_SUCCESS);
6600} 6534}
6601 6535
6602void 6536void
@@ -6684,8 +6618,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
6684 } 6618 }
6685 6619
6686 /* Clear all async request states across all VPs. */ 6620 /* Clear all async request states across all VPs. */
6687 list_for_each_entry(fcport, &vha->vp_fcports, list) 6621 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6688 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 6622 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6623 fcport->scan_state = 0;
6624 }
6689 spin_lock_irqsave(&ha->vport_slock, flags); 6625 spin_lock_irqsave(&ha->vport_slock, flags);
6690 list_for_each_entry(vp, &ha->vp_list, list) { 6626 list_for_each_entry(vp, &ha->vp_list, list) {
6691 atomic_inc(&vp->vref_count); 6627 atomic_inc(&vp->vref_count);
@@ -7519,8 +7455,12 @@ qla27xx_get_active_image(struct scsi_qla_host *vha,
7519 goto check_sec_image; 7455 goto check_sec_image;
7520 } 7456 }
7521 7457
7522 qla24xx_read_flash_data(vha, (void *)(&pri_image_status), 7458 if (qla24xx_read_flash_data(vha, (void *)(&pri_image_status),
7523 ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2); 7459 ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
7460 QLA_SUCCESS) {
7461 WARN_ON_ONCE(true);
7462 goto check_sec_image;
7463 }
7524 qla27xx_print_image(vha, "Primary image", &pri_image_status); 7464 qla27xx_print_image(vha, "Primary image", &pri_image_status);
7525 7465
7526 if (qla27xx_check_image_status_signature(&pri_image_status)) { 7466 if (qla27xx_check_image_status_signature(&pri_image_status)) {
@@ -8274,7 +8214,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
8274 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? 8214 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
8275 "primary" : "secondary"); 8215 "primary" : "secondary");
8276 } 8216 }
8277 qla24xx_read_flash_data(vha, ha->vpd, faddr, ha->vpd_size >> 2); 8217 ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size);
8278 8218
8279 /* Get NVRAM data into cache and calculate checksum. */ 8219 /* Get NVRAM data into cache and calculate checksum. */
8280 faddr = ha->flt_region_nvram; 8220 faddr = ha->flt_region_nvram;
@@ -8286,7 +8226,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
8286 "Loading %s nvram image.\n", 8226 "Loading %s nvram image.\n",
8287 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? 8227 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
8288 "primary" : "secondary"); 8228 "primary" : "secondary");
8289 qla24xx_read_flash_data(vha, ha->nvram, faddr, ha->nvram_size >> 2); 8229 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
8290 8230
8291 dptr = (uint32_t *)nv; 8231 dptr = (uint32_t *)nv;
8292 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) 8232 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index bf063c664352..0c3d907af769 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -152,6 +152,18 @@ qla2x00_chip_is_down(scsi_qla_host_t *vha)
152 return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started); 152 return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started);
153} 153}
154 154
155static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha,
156 struct qla_qpair *qpair, fc_port_t *fcport)
157{
158 memset(sp, 0, sizeof(*sp));
159 sp->fcport = fcport;
160 sp->iocbs = 1;
161 sp->vha = vha;
162 sp->qpair = qpair;
163 sp->cmd_type = TYPE_SRB;
164 INIT_LIST_HEAD(&sp->elem);
165}
166
155static inline srb_t * 167static inline srb_t *
156qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair, 168qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
157 fc_port_t *fcport, gfp_t flag) 169 fc_port_t *fcport, gfp_t flag)
@@ -164,19 +176,9 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
164 return NULL; 176 return NULL;
165 177
166 sp = mempool_alloc(qpair->srb_mempool, flag); 178 sp = mempool_alloc(qpair->srb_mempool, flag);
167 if (!sp) 179 if (sp)
168 goto done; 180 qla2xxx_init_sp(sp, vha, qpair, fcport);
169 181 else
170 memset(sp, 0, sizeof(*sp));
171 sp->fcport = fcport;
172 sp->iocbs = 1;
173 sp->vha = vha;
174 sp->qpair = qpair;
175 sp->cmd_type = TYPE_SRB;
176 INIT_LIST_HEAD(&sp->elem);
177
178done:
179 if (!sp)
180 QLA_QPAIR_MARK_NOT_BUSY(qpair); 182 QLA_QPAIR_MARK_NOT_BUSY(qpair);
181 return sp; 183 return sp;
182} 184}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 9312b19ed708..e92e52aa6e9b 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -292,6 +292,26 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
292 } 292 }
293} 293}
294 294
295/*
296 * Find the first handle that is not in use, starting from
297 * req->current_outstanding_cmd + 1. The caller must hold the lock that is
298 * associated with @req.
299 */
300uint32_t qla2xxx_get_next_handle(struct req_que *req)
301{
302 uint32_t index, handle = req->current_outstanding_cmd;
303
304 for (index = 1; index < req->num_outstanding_cmds; index++) {
305 handle++;
306 if (handle == req->num_outstanding_cmds)
307 handle = 1;
308 if (!req->outstanding_cmds[handle])
309 return handle;
310 }
311
312 return 0;
313}
314
295/** 315/**
296 * qla2x00_start_scsi() - Send a SCSI command to the ISP 316 * qla2x00_start_scsi() - Send a SCSI command to the ISP
297 * @sp: command to send to the ISP 317 * @sp: command to send to the ISP
@@ -306,7 +326,6 @@ qla2x00_start_scsi(srb_t *sp)
306 scsi_qla_host_t *vha; 326 scsi_qla_host_t *vha;
307 struct scsi_cmnd *cmd; 327 struct scsi_cmnd *cmd;
308 uint32_t *clr_ptr; 328 uint32_t *clr_ptr;
309 uint32_t index;
310 uint32_t handle; 329 uint32_t handle;
311 cmd_entry_t *cmd_pkt; 330 cmd_entry_t *cmd_pkt;
312 uint16_t cnt; 331 uint16_t cnt;
@@ -339,16 +358,8 @@ qla2x00_start_scsi(srb_t *sp)
339 /* Acquire ring specific lock */ 358 /* Acquire ring specific lock */
340 spin_lock_irqsave(&ha->hardware_lock, flags); 359 spin_lock_irqsave(&ha->hardware_lock, flags);
341 360
342 /* Check for room in outstanding command list. */ 361 handle = qla2xxx_get_next_handle(req);
343 handle = req->current_outstanding_cmd; 362 if (handle == 0)
344 for (index = 1; index < req->num_outstanding_cmds; index++) {
345 handle++;
346 if (handle == req->num_outstanding_cmds)
347 handle = 1;
348 if (!req->outstanding_cmds[handle])
349 break;
350 }
351 if (index == req->num_outstanding_cmds)
352 goto queuing_error; 363 goto queuing_error;
353 364
354 /* Map the sg table so we have an accurate count of sg entries needed */ 365 /* Map the sg table so we have an accurate count of sg entries needed */
@@ -610,7 +621,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
610 } 621 }
611 622
612 cur_seg = scsi_sglist(cmd); 623 cur_seg = scsi_sglist(cmd);
613 ctx = GET_CMD_CTX_SP(sp); 624 ctx = sp->u.scmd.ct6_ctx;
614 625
615 while (tot_dsds) { 626 while (tot_dsds) {
616 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? 627 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
@@ -943,8 +954,7 @@ alloc_and_fill:
943 954
944 if (sp) { 955 if (sp) {
945 list_add_tail(&dsd_ptr->list, 956 list_add_tail(&dsd_ptr->list,
946 &((struct crc_context *) 957 &sp->u.scmd.crc_ctx->dsd_list);
947 sp->u.scmd.ctx)->dsd_list);
948 958
949 sp->flags |= SRB_CRC_CTX_DSD_VALID; 959 sp->flags |= SRB_CRC_CTX_DSD_VALID;
950 } else { 960 } else {
@@ -1041,8 +1051,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
1041 1051
1042 if (sp) { 1052 if (sp) {
1043 list_add_tail(&dsd_ptr->list, 1053 list_add_tail(&dsd_ptr->list,
1044 &((struct crc_context *) 1054 &sp->u.scmd.crc_ctx->dsd_list);
1045 sp->u.scmd.ctx)->dsd_list);
1046 1055
1047 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1056 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1048 } else { 1057 } else {
@@ -1088,7 +1097,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1088 1097
1089 sgl = scsi_prot_sglist(cmd); 1098 sgl = scsi_prot_sglist(cmd);
1090 vha = sp->vha; 1099 vha = sp->vha;
1091 difctx = sp->u.scmd.ctx; 1100 difctx = sp->u.scmd.crc_ctx;
1092 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE; 1101 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1093 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, 1102 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1094 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n", 1103 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
@@ -1364,6 +1373,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1364 cur_dsd++; 1373 cur_dsd++;
1365 return 0; 1374 return 0;
1366} 1375}
1376
1367/** 1377/**
1368 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command 1378 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1369 * Type 6 IOCB types. 1379 * Type 6 IOCB types.
@@ -1427,7 +1437,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1427 bundling = 0; 1437 bundling = 0;
1428 1438
1429 /* Allocate CRC context from global pool */ 1439 /* Allocate CRC context from global pool */
1430 crc_ctx_pkt = sp->u.scmd.ctx = 1440 crc_ctx_pkt = sp->u.scmd.crc_ctx =
1431 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 1441 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1432 1442
1433 if (!crc_ctx_pkt) 1443 if (!crc_ctx_pkt)
@@ -1515,7 +1525,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1515 } 1525 }
1516 1526
1517 if (!bundling) { 1527 if (!bundling) {
1518 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd; 1528 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1519 } else { 1529 } else {
1520 /* 1530 /*
1521 * Configure Bundling if we need to fetch interlaving 1531 * Configure Bundling if we need to fetch interlaving
@@ -1525,7 +1535,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1525 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 1535 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1526 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - 1536 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1527 tot_prot_dsds); 1537 tot_prot_dsds);
1528 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd; 1538 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1529 } 1539 }
1530 1540
1531 /* Finish the common fields of CRC pkt */ 1541 /* Finish the common fields of CRC pkt */
@@ -1583,7 +1593,6 @@ qla24xx_start_scsi(srb_t *sp)
1583 int nseg; 1593 int nseg;
1584 unsigned long flags; 1594 unsigned long flags;
1585 uint32_t *clr_ptr; 1595 uint32_t *clr_ptr;
1586 uint32_t index;
1587 uint32_t handle; 1596 uint32_t handle;
1588 struct cmd_type_7 *cmd_pkt; 1597 struct cmd_type_7 *cmd_pkt;
1589 uint16_t cnt; 1598 uint16_t cnt;
@@ -1611,16 +1620,8 @@ qla24xx_start_scsi(srb_t *sp)
1611 /* Acquire ring specific lock */ 1620 /* Acquire ring specific lock */
1612 spin_lock_irqsave(&ha->hardware_lock, flags); 1621 spin_lock_irqsave(&ha->hardware_lock, flags);
1613 1622
1614 /* Check for room in outstanding command list. */ 1623 handle = qla2xxx_get_next_handle(req);
1615 handle = req->current_outstanding_cmd; 1624 if (handle == 0)
1616 for (index = 1; index < req->num_outstanding_cmds; index++) {
1617 handle++;
1618 if (handle == req->num_outstanding_cmds)
1619 handle = 1;
1620 if (!req->outstanding_cmds[handle])
1621 break;
1622 }
1623 if (index == req->num_outstanding_cmds)
1624 goto queuing_error; 1625 goto queuing_error;
1625 1626
1626 /* Map the sg table so we have an accurate count of sg entries needed */ 1627 /* Map the sg table so we have an accurate count of sg entries needed */
@@ -1723,7 +1724,6 @@ qla24xx_dif_start_scsi(srb_t *sp)
1723 int nseg; 1724 int nseg;
1724 unsigned long flags; 1725 unsigned long flags;
1725 uint32_t *clr_ptr; 1726 uint32_t *clr_ptr;
1726 uint32_t index;
1727 uint32_t handle; 1727 uint32_t handle;
1728 uint16_t cnt; 1728 uint16_t cnt;
1729 uint16_t req_cnt = 0; 1729 uint16_t req_cnt = 0;
@@ -1764,17 +1764,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
1764 /* Acquire ring specific lock */ 1764 /* Acquire ring specific lock */
1765 spin_lock_irqsave(&ha->hardware_lock, flags); 1765 spin_lock_irqsave(&ha->hardware_lock, flags);
1766 1766
1767 /* Check for room in outstanding command list. */ 1767 handle = qla2xxx_get_next_handle(req);
1768 handle = req->current_outstanding_cmd; 1768 if (handle == 0)
1769 for (index = 1; index < req->num_outstanding_cmds; index++) {
1770 handle++;
1771 if (handle == req->num_outstanding_cmds)
1772 handle = 1;
1773 if (!req->outstanding_cmds[handle])
1774 break;
1775 }
1776
1777 if (index == req->num_outstanding_cmds)
1778 goto queuing_error; 1769 goto queuing_error;
1779 1770
1780 /* Compute number of required data segments */ 1771 /* Compute number of required data segments */
@@ -1919,7 +1910,6 @@ qla2xxx_start_scsi_mq(srb_t *sp)
1919 int nseg; 1910 int nseg;
1920 unsigned long flags; 1911 unsigned long flags;
1921 uint32_t *clr_ptr; 1912 uint32_t *clr_ptr;
1922 uint32_t index;
1923 uint32_t handle; 1913 uint32_t handle;
1924 struct cmd_type_7 *cmd_pkt; 1914 struct cmd_type_7 *cmd_pkt;
1925 uint16_t cnt; 1915 uint16_t cnt;
@@ -1950,16 +1940,8 @@ qla2xxx_start_scsi_mq(srb_t *sp)
1950 vha->marker_needed = 0; 1940 vha->marker_needed = 0;
1951 } 1941 }
1952 1942
1953 /* Check for room in outstanding command list. */ 1943 handle = qla2xxx_get_next_handle(req);
1954 handle = req->current_outstanding_cmd; 1944 if (handle == 0)
1955 for (index = 1; index < req->num_outstanding_cmds; index++) {
1956 handle++;
1957 if (handle == req->num_outstanding_cmds)
1958 handle = 1;
1959 if (!req->outstanding_cmds[handle])
1960 break;
1961 }
1962 if (index == req->num_outstanding_cmds)
1963 goto queuing_error; 1945 goto queuing_error;
1964 1946
1965 /* Map the sg table so we have an accurate count of sg entries needed */ 1947 /* Map the sg table so we have an accurate count of sg entries needed */
@@ -2063,7 +2045,6 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
2063 int nseg; 2045 int nseg;
2064 unsigned long flags; 2046 unsigned long flags;
2065 uint32_t *clr_ptr; 2047 uint32_t *clr_ptr;
2066 uint32_t index;
2067 uint32_t handle; 2048 uint32_t handle;
2068 uint16_t cnt; 2049 uint16_t cnt;
2069 uint16_t req_cnt = 0; 2050 uint16_t req_cnt = 0;
@@ -2118,17 +2099,8 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
2118 vha->marker_needed = 0; 2099 vha->marker_needed = 0;
2119 } 2100 }
2120 2101
2121 /* Check for room in outstanding command list. */ 2102 handle = qla2xxx_get_next_handle(req);
2122 handle = req->current_outstanding_cmd; 2103 if (handle == 0)
2123 for (index = 1; index < req->num_outstanding_cmds; index++) {
2124 handle++;
2125 if (handle == req->num_outstanding_cmds)
2126 handle = 1;
2127 if (!req->outstanding_cmds[handle])
2128 break;
2129 }
2130
2131 if (index == req->num_outstanding_cmds)
2132 goto queuing_error; 2104 goto queuing_error;
2133 2105
2134 /* Compute number of required data segments */ 2106 /* Compute number of required data segments */
@@ -2275,7 +2247,7 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2275 struct qla_hw_data *ha = vha->hw; 2247 struct qla_hw_data *ha = vha->hw;
2276 struct req_que *req = qpair->req; 2248 struct req_que *req = qpair->req;
2277 device_reg_t *reg = ISP_QUE_REG(ha, req->id); 2249 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2278 uint32_t index, handle; 2250 uint32_t handle;
2279 request_t *pkt; 2251 request_t *pkt;
2280 uint16_t cnt, req_cnt; 2252 uint16_t cnt, req_cnt;
2281 2253
@@ -2315,16 +2287,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2315 goto queuing_error; 2287 goto queuing_error;
2316 2288
2317 if (sp) { 2289 if (sp) {
2318 /* Check for room in outstanding command list. */ 2290 handle = qla2xxx_get_next_handle(req);
2319 handle = req->current_outstanding_cmd; 2291 if (handle == 0) {
2320 for (index = 1; index < req->num_outstanding_cmds; index++) {
2321 handle++;
2322 if (handle == req->num_outstanding_cmds)
2323 handle = 1;
2324 if (!req->outstanding_cmds[handle])
2325 break;
2326 }
2327 if (index == req->num_outstanding_cmds) {
2328 ql_log(ql_log_warn, vha, 0x700b, 2292 ql_log(ql_log_warn, vha, 0x700b,
2329 "No room on outstanding cmd array.\n"); 2293 "No room on outstanding cmd array.\n");
2330 goto queuing_error; 2294 goto queuing_error;
@@ -2540,13 +2504,11 @@ void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2540 sp->free = qla2x00_sp_free; 2504 sp->free = qla2x00_sp_free;
2541 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD) 2505 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2542 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); 2506 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2543 add_timer(&sp->u.iocb_cmd.timer); 2507 sp->start_timer = 1;
2544} 2508}
2545 2509
2546static void 2510static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2547qla2x00_els_dcmd_sp_free(void *data)
2548{ 2511{
2549 srb_t *sp = data;
2550 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2512 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2551 2513
2552 kfree(sp->fcport); 2514 kfree(sp->fcport);
@@ -2576,10 +2538,8 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
2576 complete(&lio->u.els_logo.comp); 2538 complete(&lio->u.els_logo.comp);
2577} 2539}
2578 2540
2579static void 2541static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
2580qla2x00_els_dcmd_sp_done(void *ptr, int res)
2581{ 2542{
2582 srb_t *sp = ptr;
2583 fc_port_t *fcport = sp->fcport; 2543 fc_port_t *fcport = sp->fcport;
2584 struct srb_iocb *lio = &sp->u.iocb_cmd; 2544 struct srb_iocb *lio = &sp->u.iocb_cmd;
2585 struct scsi_qla_host *vha = sp->vha; 2545 struct scsi_qla_host *vha = sp->vha;
@@ -2699,16 +2659,16 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2699 els_iocb->s_id[0] = vha->d_id.b.al_pa; 2659 els_iocb->s_id[0] = vha->d_id.b.al_pa;
2700 els_iocb->s_id[1] = vha->d_id.b.area; 2660 els_iocb->s_id[1] = vha->d_id.b.area;
2701 els_iocb->s_id[2] = vha->d_id.b.domain; 2661 els_iocb->s_id[2] = vha->d_id.b.domain;
2702 els_iocb->control_flags = 0;
2703 2662
2704 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) { 2663 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2664 els_iocb->control_flags = 0;
2705 els_iocb->tx_byte_count = els_iocb->tx_len = 2665 els_iocb->tx_byte_count = els_iocb->tx_len =
2706 sizeof(struct els_plogi_payload); 2666 cpu_to_le32(sizeof(struct els_plogi_payload));
2707 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma, 2667 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2708 &els_iocb->tx_address); 2668 &els_iocb->tx_address);
2709 els_iocb->rx_dsd_count = 1; 2669 els_iocb->rx_dsd_count = 1;
2710 els_iocb->rx_byte_count = els_iocb->rx_len = 2670 els_iocb->rx_byte_count = els_iocb->rx_len =
2711 sizeof(struct els_plogi_payload); 2671 cpu_to_le32(sizeof(struct els_plogi_payload));
2712 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma, 2672 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2713 &els_iocb->rx_address); 2673 &els_iocb->rx_address);
2714 2674
@@ -2717,7 +2677,9 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2717 ql_dump_buffer(ql_log_info, vha, 0x0109, 2677 ql_dump_buffer(ql_log_info, vha, 0x0109,
2718 (uint8_t *)els_iocb, 0x70); 2678 (uint8_t *)els_iocb, 0x70);
2719 } else { 2679 } else {
2720 els_iocb->tx_byte_count = sizeof(struct els_logo_payload); 2680 els_iocb->control_flags = 1 << 13;
2681 els_iocb->tx_byte_count =
2682 cpu_to_le32(sizeof(struct els_logo_payload));
2721 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma, 2683 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2722 &els_iocb->tx_address); 2684 &els_iocb->tx_address);
2723 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload)); 2685 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
@@ -2755,10 +2717,23 @@ qla2x00_els_dcmd2_iocb_timeout(void *data)
2755 sp->done(sp, QLA_FUNCTION_TIMEOUT); 2717 sp->done(sp, QLA_FUNCTION_TIMEOUT);
2756} 2718}
2757 2719
2758static void 2720void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2759qla2x00_els_dcmd2_sp_done(void *ptr, int res) 2721{
2722 if (els_plogi->els_plogi_pyld)
2723 dma_free_coherent(&vha->hw->pdev->dev,
2724 els_plogi->tx_size,
2725 els_plogi->els_plogi_pyld,
2726 els_plogi->els_plogi_pyld_dma);
2727
2728 if (els_plogi->els_resp_pyld)
2729 dma_free_coherent(&vha->hw->pdev->dev,
2730 els_plogi->rx_size,
2731 els_plogi->els_resp_pyld,
2732 els_plogi->els_resp_pyld_dma);
2733}
2734
2735static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2760{ 2736{
2761 srb_t *sp = ptr;
2762 fc_port_t *fcport = sp->fcport; 2737 fc_port_t *fcport = sp->fcport;
2763 struct srb_iocb *lio = &sp->u.iocb_cmd; 2738 struct srb_iocb *lio = &sp->u.iocb_cmd;
2764 struct scsi_qla_host *vha = sp->vha; 2739 struct scsi_qla_host *vha = sp->vha;
@@ -2780,26 +2755,16 @@ qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2780 } else { 2755 } else {
2781 memset(&ea, 0, sizeof(ea)); 2756 memset(&ea, 0, sizeof(ea));
2782 ea.fcport = fcport; 2757 ea.fcport = fcport;
2783 ea.rc = res; 2758 ea.data[0] = MBS_COMMAND_COMPLETE;
2784 ea.event = FCME_ELS_PLOGI_DONE; 2759 ea.sp = sp;
2785 qla2x00_fcport_event_handler(vha, &ea); 2760 qla24xx_handle_plogi_done_event(vha, &ea);
2786 } 2761 }
2787 2762
2788 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); 2763 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2789 if (!e) { 2764 if (!e) {
2790 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2765 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2791 2766
2792 if (elsio->u.els_plogi.els_plogi_pyld) 2767 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
2793 dma_free_coherent(&sp->vha->hw->pdev->dev,
2794 elsio->u.els_plogi.tx_size,
2795 elsio->u.els_plogi.els_plogi_pyld,
2796 elsio->u.els_plogi.els_plogi_pyld_dma);
2797
2798 if (elsio->u.els_plogi.els_resp_pyld)
2799 dma_free_coherent(&sp->vha->hw->pdev->dev,
2800 elsio->u.els_plogi.rx_size,
2801 elsio->u.els_plogi.els_resp_pyld,
2802 elsio->u.els_plogi.els_resp_pyld_dma);
2803 sp->free(sp); 2768 sp->free(sp);
2804 return; 2769 return;
2805 } 2770 }
@@ -2899,18 +2864,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2899 2864
2900out: 2865out:
2901 fcport->flags &= ~(FCF_ASYNC_SENT); 2866 fcport->flags &= ~(FCF_ASYNC_SENT);
2902 if (elsio->u.els_plogi.els_plogi_pyld) 2867 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
2903 dma_free_coherent(&sp->vha->hw->pdev->dev,
2904 elsio->u.els_plogi.tx_size,
2905 elsio->u.els_plogi.els_plogi_pyld,
2906 elsio->u.els_plogi.els_plogi_pyld_dma);
2907
2908 if (elsio->u.els_plogi.els_resp_pyld)
2909 dma_free_coherent(&sp->vha->hw->pdev->dev,
2910 elsio->u.els_plogi.rx_size,
2911 elsio->u.els_plogi.els_resp_pyld,
2912 elsio->u.els_plogi.els_resp_pyld_dma);
2913
2914 sp->free(sp); 2868 sp->free(sp);
2915done: 2869done:
2916 return rval; 2870 return rval;
@@ -3115,7 +3069,6 @@ qla82xx_start_scsi(srb_t *sp)
3115 unsigned long flags; 3069 unsigned long flags;
3116 struct scsi_cmnd *cmd; 3070 struct scsi_cmnd *cmd;
3117 uint32_t *clr_ptr; 3071 uint32_t *clr_ptr;
3118 uint32_t index;
3119 uint32_t handle; 3072 uint32_t handle;
3120 uint16_t cnt; 3073 uint16_t cnt;
3121 uint16_t req_cnt; 3074 uint16_t req_cnt;
@@ -3155,16 +3108,8 @@ qla82xx_start_scsi(srb_t *sp)
3155 /* Acquire ring specific lock */ 3108 /* Acquire ring specific lock */
3156 spin_lock_irqsave(&ha->hardware_lock, flags); 3109 spin_lock_irqsave(&ha->hardware_lock, flags);
3157 3110
3158 /* Check for room in outstanding command list. */ 3111 handle = qla2xxx_get_next_handle(req);
3159 handle = req->current_outstanding_cmd; 3112 if (handle == 0)
3160 for (index = 1; index < req->num_outstanding_cmds; index++) {
3161 handle++;
3162 if (handle == req->num_outstanding_cmds)
3163 handle = 1;
3164 if (!req->outstanding_cmds[handle])
3165 break;
3166 }
3167 if (index == req->num_outstanding_cmds)
3168 goto queuing_error; 3113 goto queuing_error;
3169 3114
3170 /* Map the sg table so we have an accurate count of sg entries needed */ 3115 /* Map the sg table so we have an accurate count of sg entries needed */
@@ -3235,7 +3180,7 @@ sufficient_dsds:
3235 goto queuing_error; 3180 goto queuing_error;
3236 } 3181 }
3237 3182
3238 ctx = sp->u.scmd.ctx = 3183 ctx = sp->u.scmd.ct6_ctx =
3239 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 3184 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3240 if (!ctx) { 3185 if (!ctx) {
3241 ql_log(ql_log_fatal, vha, 0x3010, 3186 ql_log(ql_log_fatal, vha, 0x3010,
@@ -3431,9 +3376,9 @@ queuing_error:
3431 if (tot_dsds) 3376 if (tot_dsds)
3432 scsi_dma_unmap(cmd); 3377 scsi_dma_unmap(cmd);
3433 3378
3434 if (sp->u.scmd.ctx) { 3379 if (sp->u.scmd.crc_ctx) {
3435 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool); 3380 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3436 sp->u.scmd.ctx = NULL; 3381 sp->u.scmd.crc_ctx = NULL;
3437 } 3382 }
3438 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3383 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3439 3384
@@ -3668,6 +3613,9 @@ qla2x00_start_sp(srb_t *sp)
3668 break; 3613 break;
3669 } 3614 }
3670 3615
3616 if (sp->start_timer)
3617 add_timer(&sp->u.iocb_cmd.timer);
3618
3671 wmb(); 3619 wmb();
3672 qla2x00_start_iocbs(vha, qp->req); 3620 qla2x00_start_iocbs(vha, qp->req);
3673done: 3621done:
@@ -3769,7 +3717,6 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3769 struct qla_hw_data *ha = vha->hw; 3717 struct qla_hw_data *ha = vha->hw;
3770 unsigned long flags; 3718 unsigned long flags;
3771 uint32_t handle; 3719 uint32_t handle;
3772 uint32_t index;
3773 uint16_t req_cnt; 3720 uint16_t req_cnt;
3774 uint16_t cnt; 3721 uint16_t cnt;
3775 uint32_t *clr_ptr; 3722 uint32_t *clr_ptr;
@@ -3794,17 +3741,8 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3794 /* Acquire ring specific lock */ 3741 /* Acquire ring specific lock */
3795 spin_lock_irqsave(&ha->hardware_lock, flags); 3742 spin_lock_irqsave(&ha->hardware_lock, flags);
3796 3743
3797 /* Check for room in outstanding command list. */ 3744 handle = qla2xxx_get_next_handle(req);
3798 handle = req->current_outstanding_cmd; 3745 if (handle == 0) {
3799 for (index = 1; index < req->num_outstanding_cmds; index++) {
3800 handle++;
3801 if (handle == req->num_outstanding_cmds)
3802 handle = 1;
3803 if (!req->outstanding_cmds[handle])
3804 break;
3805 }
3806
3807 if (index == req->num_outstanding_cmds) {
3808 rval = EXT_STATUS_BUSY; 3746 rval = EXT_STATUS_BUSY;
3809 goto queuing_error; 3747 goto queuing_error;
3810 } 3748 }
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 78aec50abe0f..4c26630c1c3e 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -776,7 +776,6 @@ skip_rio:
776 case MBA_LOOP_INIT_ERR: 776 case MBA_LOOP_INIT_ERR:
777 ql_log(ql_log_warn, vha, 0x5090, 777 ql_log(ql_log_warn, vha, 0x5090,
778 "LOOP INIT ERROR (%x).\n", mb[1]); 778 "LOOP INIT ERROR (%x).\n", mb[1]);
779 ha->isp_ops->fw_dump(vha, 1);
780 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 779 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
781 break; 780 break;
782 781
@@ -1119,10 +1118,9 @@ global_port_update:
1119 struct event_arg ea; 1118 struct event_arg ea;
1120 1119
1121 memset(&ea, 0, sizeof(ea)); 1120 memset(&ea, 0, sizeof(ea));
1122 ea.event = FCME_RSCN;
1123 ea.id.b24 = rscn_entry; 1121 ea.id.b24 = rscn_entry;
1124 ea.id.b.rsvd_1 = rscn_entry >> 24; 1122 ea.id.b.rsvd_1 = rscn_entry >> 24;
1125 qla2x00_fcport_event_handler(vha, &ea); 1123 qla2x00_handle_rscn(vha, &ea);
1126 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 1124 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1127 } 1125 }
1128 break; 1126 break;
@@ -1514,7 +1512,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1514 if (comp_status == CS_DATA_UNDERRUN) { 1512 if (comp_status == CS_DATA_UNDERRUN) {
1515 res = DID_OK << 16; 1513 res = DID_OK << 16;
1516 bsg_reply->reply_payload_rcv_len = 1514 bsg_reply->reply_payload_rcv_len =
1517 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1515 le16_to_cpu(pkt->rsp_info_len);
1518 1516
1519 ql_log(ql_log_warn, vha, 0x5048, 1517 ql_log(ql_log_warn, vha, 0x5048,
1520 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", 1518 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
@@ -2257,11 +2255,8 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2257 struct bsg_job *bsg_job = NULL; 2255 struct bsg_job *bsg_job = NULL;
2258 struct fc_bsg_request *bsg_request; 2256 struct fc_bsg_request *bsg_request;
2259 struct fc_bsg_reply *bsg_reply; 2257 struct fc_bsg_reply *bsg_reply;
2260 sts_entry_t *sts; 2258 sts_entry_t *sts = pkt;
2261 struct sts_entry_24xx *sts24; 2259 struct sts_entry_24xx *sts24 = pkt;
2262
2263 sts = (sts_entry_t *) pkt;
2264 sts24 = (struct sts_entry_24xx *) pkt;
2265 2260
2266 /* Validate handle. */ 2261 /* Validate handle. */
2267 if (index >= req->num_outstanding_cmds) { 2262 if (index >= req->num_outstanding_cmds) {
@@ -2407,8 +2402,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2407 srb_t *sp; 2402 srb_t *sp;
2408 fc_port_t *fcport; 2403 fc_port_t *fcport;
2409 struct scsi_cmnd *cp; 2404 struct scsi_cmnd *cp;
2410 sts_entry_t *sts; 2405 sts_entry_t *sts = pkt;
2411 struct sts_entry_24xx *sts24; 2406 struct sts_entry_24xx *sts24 = pkt;
2412 uint16_t comp_status; 2407 uint16_t comp_status;
2413 uint16_t scsi_status; 2408 uint16_t scsi_status;
2414 uint16_t ox_id; 2409 uint16_t ox_id;
@@ -2426,8 +2421,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2426 uint16_t state_flags = 0; 2421 uint16_t state_flags = 0;
2427 uint16_t retry_delay = 0; 2422 uint16_t retry_delay = 0;
2428 2423
2429 sts = (sts_entry_t *) pkt;
2430 sts24 = (struct sts_entry_24xx *) pkt;
2431 if (IS_FWI2_CAPABLE(ha)) { 2424 if (IS_FWI2_CAPABLE(ha)) {
2432 comp_status = le16_to_cpu(sts24->comp_status); 2425 comp_status = le16_to_cpu(sts24->comp_status);
2433 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2426 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
@@ -2727,7 +2720,7 @@ check_scsi_status:
2727 "Port to be marked lost on fcport=%02x%02x%02x, current " 2720 "Port to be marked lost on fcport=%02x%02x%02x, current "
2728 "port state= %s comp_status %x.\n", fcport->d_id.b.domain, 2721 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
2729 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2722 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2730 port_state_str[atomic_read(&fcport->state)], 2723 port_state_str[FCS_ONLINE],
2731 comp_status); 2724 comp_status);
2732 2725
2733 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 2726 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
@@ -2844,6 +2837,8 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2844 if (sense_len == 0) { 2837 if (sense_len == 0) {
2845 rsp->status_srb = NULL; 2838 rsp->status_srb = NULL;
2846 sp->done(sp, cp->result); 2839 sp->done(sp, cp->result);
2840 } else {
2841 WARN_ON_ONCE(true);
2847 } 2842 }
2848} 2843}
2849 2844
@@ -3471,10 +3466,8 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3471 ha->msix_count, ret); 3466 ha->msix_count, ret);
3472 goto msix_out; 3467 goto msix_out;
3473 } else if (ret < ha->msix_count) { 3468 } else if (ret < ha->msix_count) {
3474 ql_log(ql_log_warn, vha, 0x00c6, 3469 ql_log(ql_log_info, vha, 0x00c6,
3475 "MSI-X: Failed to enable support " 3470 "MSI-X: Using %d vectors\n", ret);
3476 "with %d vectors, using %d vectors.\n",
3477 ha->msix_count, ret);
3478 ha->msix_count = ret; 3471 ha->msix_count = ret;
3479 /* Recalculate queue values */ 3472 /* Recalculate queue values */
3480 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) { 3473 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 133f5f6270ff..4c858e2d0ea8 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -253,21 +253,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
253 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 253 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
254 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 254 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
255 255
256 if (IS_P3P_TYPE(ha)) { 256 if (IS_P3P_TYPE(ha))
257 if (RD_REG_DWORD(&reg->isp82.hint) &
258 HINT_MBX_INT_PENDING) {
259 ha->flags.mbox_busy = 0;
260 spin_unlock_irqrestore(&ha->hardware_lock,
261 flags);
262
263 atomic_dec(&ha->num_pend_mbx_stage2);
264 ql_dbg(ql_dbg_mbx, vha, 0x1010,
265 "Pending mailbox timeout, exiting.\n");
266 rval = QLA_FUNCTION_TIMEOUT;
267 goto premature_exit;
268 }
269 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING); 257 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
270 } else if (IS_FWI2_CAPABLE(ha)) 258 else if (IS_FWI2_CAPABLE(ha))
271 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); 259 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
272 else 260 else
273 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); 261 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -394,8 +382,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
394 goto premature_exit; 382 goto premature_exit;
395 } 383 }
396 384
397 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) 385 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
386 ql_dbg(ql_dbg_mbx, vha, 0x11ff,
387 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
388 MBS_COMMAND_COMPLETE);
398 rval = QLA_FUNCTION_FAILED; 389 rval = QLA_FUNCTION_FAILED;
390 }
399 391
400 /* Load return mailbox registers. */ 392 /* Load return mailbox registers. */
401 iptr2 = mcp->mb; 393 iptr2 = mcp->mb;
@@ -6213,10 +6205,8 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6213 return rval; 6205 return rval;
6214} 6206}
6215 6207
6216static void qla2x00_async_mb_sp_done(void *s, int res) 6208static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
6217{ 6209{
6218 struct srb *sp = s;
6219
6220 sp->u.iocb_cmd.u.mbx.rc = res; 6210 sp->u.iocb_cmd.u.mbx.rc = res;
6221 6211
6222 complete(&sp->u.iocb_cmd.u.mbx.comp); 6212 complete(&sp->u.iocb_cmd.u.mbx.comp);
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index b2977e49356b..1a9a11ae7285 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -901,10 +901,8 @@ failed:
901 return 0; 901 return 0;
902} 902}
903 903
904static void qla_ctrlvp_sp_done(void *s, int res) 904static void qla_ctrlvp_sp_done(srb_t *sp, int res)
905{ 905{
906 struct srb *sp = s;
907
908 if (sp->comp) 906 if (sp->comp)
909 complete(sp->comp); 907 complete(sp->comp);
910 /* don't free sp here. Let the caller do the free */ 908 /* don't free sp here. Let the caller do the free */
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 942ee13b96a4..605b59c76c90 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -10,7 +10,6 @@
10#include <linux/pci.h> 10#include <linux/pci.h>
11#include <linux/ratelimit.h> 11#include <linux/ratelimit.h>
12#include <linux/vmalloc.h> 12#include <linux/vmalloc.h>
13#include <linux/bsg-lib.h>
14#include <scsi/scsi_tcq.h> 13#include <scsi/scsi_tcq.h>
15#include <linux/utsname.h> 14#include <linux/utsname.h>
16 15
@@ -149,7 +148,8 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
149 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); 148 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
150 spin_unlock_irqrestore(&ha->hardware_lock, flags); 149 spin_unlock_irqrestore(&ha->hardware_lock, flags);
151 150
152 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); 151 WARN_ON_ONCE(wait_for_completion_timeout(&ha->mbx_intr_comp,
152 mcp->tov * HZ) != 0);
153 } else { 153 } else {
154 ql_dbg(ql_dbg_mbx, vha, 0x112c, 154 ql_dbg(ql_dbg_mbx, vha, 0x112c,
155 "Cmd=%x Polling Mode.\n", command); 155 "Cmd=%x Polling Mode.\n", command);
@@ -688,14 +688,12 @@ qlafx00_config_rings(struct scsi_qla_host *vha)
688} 688}
689 689
690char * 690char *
691qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str) 691qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
692{ 692{
693 struct qla_hw_data *ha = vha->hw; 693 struct qla_hw_data *ha = vha->hw;
694 694
695 if (pci_is_pcie(ha->pdev)) { 695 if (pci_is_pcie(ha->pdev))
696 strcpy(str, "PCIe iSA"); 696 strlcpy(str, "PCIe iSA", str_len);
697 return str;
698 }
699 return str; 697 return str;
700} 698}
701 699
@@ -1799,10 +1797,8 @@ qla2x00_fxdisc_iocb_timeout(void *data)
1799 complete(&lio->u.fxiocb.fxiocb_comp); 1797 complete(&lio->u.fxiocb.fxiocb_comp);
1800} 1798}
1801 1799
1802static void 1800static void qla2x00_fxdisc_sp_done(srb_t *sp, int res)
1803qla2x00_fxdisc_sp_done(void *ptr, int res)
1804{ 1801{
1805 srb_t *sp = ptr;
1806 struct srb_iocb *lio = &sp->u.iocb_cmd; 1802 struct srb_iocb *lio = &sp->u.iocb_cmd;
1807 1803
1808 complete(&lio->u.fxiocb.fxiocb_comp); 1804 complete(&lio->u.fxiocb.fxiocb_comp);
@@ -1881,22 +1877,22 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
1881 phost_info = &preg_hsi->hsi; 1877 phost_info = &preg_hsi->hsi;
1882 memset(preg_hsi, 0, sizeof(struct register_host_info)); 1878 memset(preg_hsi, 0, sizeof(struct register_host_info));
1883 phost_info->os_type = OS_TYPE_LINUX; 1879 phost_info->os_type = OS_TYPE_LINUX;
1884 strncpy(phost_info->sysname, 1880 strlcpy(phost_info->sysname, p_sysid->sysname,
1885 p_sysid->sysname, SYSNAME_LENGTH); 1881 sizeof(phost_info->sysname));
1886 strncpy(phost_info->nodename, 1882 strlcpy(phost_info->nodename, p_sysid->nodename,
1887 p_sysid->nodename, NODENAME_LENGTH); 1883 sizeof(phost_info->nodename));
1888 if (!strcmp(phost_info->nodename, "(none)")) 1884 if (!strcmp(phost_info->nodename, "(none)"))
1889 ha->mr.host_info_resend = true; 1885 ha->mr.host_info_resend = true;
1890 strncpy(phost_info->release, 1886 strlcpy(phost_info->release, p_sysid->release,
1891 p_sysid->release, RELEASE_LENGTH); 1887 sizeof(phost_info->release));
1892 strncpy(phost_info->version, 1888 strlcpy(phost_info->version, p_sysid->version,
1893 p_sysid->version, VERSION_LENGTH); 1889 sizeof(phost_info->version));
1894 strncpy(phost_info->machine, 1890 strlcpy(phost_info->machine, p_sysid->machine,
1895 p_sysid->machine, MACHINE_LENGTH); 1891 sizeof(phost_info->machine));
1896 strncpy(phost_info->domainname, 1892 strlcpy(phost_info->domainname, p_sysid->domainname,
1897 p_sysid->domainname, DOMNAME_LENGTH); 1893 sizeof(phost_info->domainname));
1898 strncpy(phost_info->hostdriver, 1894 strlcpy(phost_info->hostdriver, QLA2XXX_VERSION,
1899 QLA2XXX_VERSION, VERSION_LENGTH); 1895 sizeof(phost_info->hostdriver));
1900 preg_hsi->utc = (uint64_t)ktime_get_real_seconds(); 1896 preg_hsi->utc = (uint64_t)ktime_get_real_seconds();
1901 ql_dbg(ql_dbg_init, vha, 0x0149, 1897 ql_dbg(ql_dbg_init, vha, 0x0149,
1902 "ISP%04X: Host registration with firmware\n", 1898 "ISP%04X: Host registration with firmware\n",
@@ -1941,8 +1937,10 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
1941 if (fx_type == FXDISC_GET_CONFIG_INFO) { 1937 if (fx_type == FXDISC_GET_CONFIG_INFO) {
1942 struct config_info_data *pinfo = 1938 struct config_info_data *pinfo =
1943 (struct config_info_data *) fdisc->u.fxiocb.rsp_addr; 1939 (struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
1944 strcpy(vha->hw->model_number, pinfo->model_num); 1940 strlcpy(vha->hw->model_number, pinfo->model_num,
1945 strcpy(vha->hw->model_desc, pinfo->model_description); 1941 ARRAY_SIZE(vha->hw->model_number));
1942 strlcpy(vha->hw->model_desc, pinfo->model_description,
1943 ARRAY_SIZE(vha->hw->model_desc));
1946 memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name, 1944 memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
1947 sizeof(vha->hw->mr.symbolic_name)); 1945 sizeof(vha->hw->mr.symbolic_name));
1948 memcpy(&vha->hw->mr.serial_num, pinfo->serial_num, 1946 memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
@@ -2541,6 +2539,8 @@ check_scsi_status:
2541 2539
2542 if (rsp->status_srb == NULL) 2540 if (rsp->status_srb == NULL)
2543 sp->done(sp, res); 2541 sp->done(sp, res);
2542 else
2543 WARN_ON_ONCE(true);
2544} 2544}
2545 2545
2546/** 2546/**
@@ -2618,6 +2618,8 @@ qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2618 if (sense_len == 0) { 2618 if (sense_len == 0) {
2619 rsp->status_srb = NULL; 2619 rsp->status_srb = NULL;
2620 sp->done(sp, cp->result); 2620 sp->done(sp, cp->result);
2621 } else {
2622 WARN_ON_ONCE(true);
2621 } 2623 }
2622} 2624}
2623 2625
@@ -3073,7 +3075,6 @@ qlafx00_start_scsi(srb_t *sp)
3073{ 3075{
3074 int nseg; 3076 int nseg;
3075 unsigned long flags; 3077 unsigned long flags;
3076 uint32_t index;
3077 uint32_t handle; 3078 uint32_t handle;
3078 uint16_t cnt; 3079 uint16_t cnt;
3079 uint16_t req_cnt; 3080 uint16_t req_cnt;
@@ -3097,16 +3098,8 @@ qlafx00_start_scsi(srb_t *sp)
3097 /* Acquire ring specific lock */ 3098 /* Acquire ring specific lock */
3098 spin_lock_irqsave(&ha->hardware_lock, flags); 3099 spin_lock_irqsave(&ha->hardware_lock, flags);
3099 3100
3100 /* Check for room in outstanding command list. */ 3101 handle = qla2xxx_get_next_handle(req);
3101 handle = req->current_outstanding_cmd; 3102 if (handle == 0)
3102 for (index = 1; index < req->num_outstanding_cmds; index++) {
3103 handle++;
3104 if (handle == req->num_outstanding_cmds)
3105 handle = 1;
3106 if (!req->outstanding_cmds[handle])
3107 break;
3108 }
3109 if (index == req->num_outstanding_cmds)
3110 goto queuing_error; 3103 goto queuing_error;
3111 3104
3112 /* Map the sg table so we have an accurate count of sg entries needed */ 3105 /* Map the sg table so we have an accurate count of sg entries needed */
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 963094b3c300..6cc19e060afc 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -180,10 +180,9 @@ static void qla_nvme_ls_complete(struct work_struct *work)
180 kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref); 180 kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
181} 181}
182 182
183static void qla_nvme_sp_ls_done(void *ptr, int res) 183static void qla_nvme_sp_ls_done(srb_t *sp, int res)
184{ 184{
185 srb_t *sp = ptr; 185 struct nvme_private *priv = sp->priv;
186 struct nvme_private *priv;
187 186
188 if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0)) 187 if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
189 return; 188 return;
@@ -191,17 +190,15 @@ static void qla_nvme_sp_ls_done(void *ptr, int res)
191 if (res) 190 if (res)
192 res = -EINVAL; 191 res = -EINVAL;
193 192
194 priv = (struct nvme_private *)sp->priv;
195 priv->comp_status = res; 193 priv->comp_status = res;
196 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete); 194 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
197 schedule_work(&priv->ls_work); 195 schedule_work(&priv->ls_work);
198} 196}
199 197
200/* it assumed that QPair lock is held. */ 198/* it assumed that QPair lock is held. */
201static void qla_nvme_sp_done(void *ptr, int res) 199static void qla_nvme_sp_done(srb_t *sp, int res)
202{ 200{
203 srb_t *sp = ptr; 201 struct nvme_private *priv = sp->priv;
204 struct nvme_private *priv = (struct nvme_private *)sp->priv;
205 202
206 priv->comp_status = res; 203 priv->comp_status = res;
207 kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref); 204 kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
@@ -222,7 +219,7 @@ static void qla_nvme_abort_work(struct work_struct *work)
222 "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n", 219 "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
223 __func__, sp, sp->handle, fcport, fcport->deleted); 220 __func__, sp, sp->handle, fcport, fcport->deleted);
224 221
225 if (!ha->flags.fw_started && (fcport && fcport->deleted)) 222 if (!ha->flags.fw_started && fcport->deleted)
226 goto out; 223 goto out;
227 224
228 if (ha->flags.host_shutting_down) { 225 if (ha->flags.host_shutting_down) {
@@ -267,7 +264,6 @@ static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
267 schedule_work(&priv->abort_work); 264 schedule_work(&priv->abort_work);
268} 265}
269 266
270
271static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, 267static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
272 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) 268 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
273{ 269{
@@ -357,7 +353,6 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
357{ 353{
358 unsigned long flags; 354 unsigned long flags;
359 uint32_t *clr_ptr; 355 uint32_t *clr_ptr;
360 uint32_t index;
361 uint32_t handle; 356 uint32_t handle;
362 struct cmd_nvme *cmd_pkt; 357 struct cmd_nvme *cmd_pkt;
363 uint16_t cnt, i; 358 uint16_t cnt, i;
@@ -381,17 +376,8 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
381 /* Acquire qpair specific lock */ 376 /* Acquire qpair specific lock */
382 spin_lock_irqsave(&qpair->qp_lock, flags); 377 spin_lock_irqsave(&qpair->qp_lock, flags);
383 378
384 /* Check for room in outstanding command list. */ 379 handle = qla2xxx_get_next_handle(req);
385 handle = req->current_outstanding_cmd; 380 if (handle == 0) {
386 for (index = 1; index < req->num_outstanding_cmds; index++) {
387 handle++;
388 if (handle == req->num_outstanding_cmds)
389 handle = 1;
390 if (!req->outstanding_cmds[handle])
391 break;
392 }
393
394 if (index == req->num_outstanding_cmds) {
395 rval = -EBUSY; 381 rval = -EBUSY;
396 goto queuing_error; 382 goto queuing_error;
397 } 383 }
@@ -653,7 +639,9 @@ void qla_nvme_unregister_remote_port(struct fc_port *fcport)
653 "%s: unregister remoteport on %p %8phN\n", 639 "%s: unregister remoteport on %p %8phN\n",
654 __func__, fcport, fcport->port_name); 640 __func__, fcport, fcport->port_name);
655 641
656 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0); 642 if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
643 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
644
657 init_completion(&fcport->nvme_del_done); 645 init_completion(&fcport->nvme_del_done);
658 ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port); 646 ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
659 if (ret) 647 if (ret)
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index 67bb4a2a3742..ef912902d4e5 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -7,7 +7,6 @@
7#ifndef __QLA_NVME_H 7#ifndef __QLA_NVME_H
8#define __QLA_NVME_H 8#define __QLA_NVME_H
9 9
10#include <linux/blk-mq.h>
11#include <uapi/scsi/fc/fc_fs.h> 10#include <uapi/scsi/fc/fc_fs.h>
12#include <uapi/scsi/fc/fc_els.h> 11#include <uapi/scsi/fc/fc_els.h>
13#include <linux/nvme-fc-driver.h> 12#include <linux/nvme-fc-driver.h>
@@ -119,7 +118,7 @@ struct pt_ls4_rx_unsol {
119 uint32_t exchange_address; 118 uint32_t exchange_address;
120 uint8_t d_id[3]; 119 uint8_t d_id[3];
121 uint8_t r_ctl; 120 uint8_t r_ctl;
122 uint8_t s_id[3]; 121 be_id_t s_id;
123 uint8_t cs_ctl; 122 uint8_t cs_ctl;
124 uint8_t f_ctl[3]; 123 uint8_t f_ctl[3];
125 uint8_t type; 124 uint8_t type;
@@ -144,5 +143,5 @@ int qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *);
144void qla_nvme_delete(struct scsi_qla_host *); 143void qla_nvme_delete(struct scsi_qla_host *);
145void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *, struct pt_ls4_request *, 144void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *, struct pt_ls4_request *,
146 struct req_que *); 145 struct req_que *);
147void qla24xx_async_gffid_sp_done(void *, int); 146void qla24xx_async_gffid_sp_done(struct srb *sp, int);
148#endif 147#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index c760ae354174..2b2028f2383e 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1977,7 +1977,7 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1977 } while (--retries); 1977 } while (--retries);
1978 1978
1979 ql_log(ql_log_fatal, vha, 0x00ac, 1979 ql_log(ql_log_fatal, vha, 0x00ac,
1980 "Rcv Peg initializatin failed: 0x%x.\n", val); 1980 "Rcv Peg initialization failed: 0x%x.\n", val);
1981 read_lock(&ha->hw_lock); 1981 read_lock(&ha->hw_lock);
1982 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED); 1982 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
1983 read_unlock(&ha->hw_lock); 1983 read_unlock(&ha->hw_lock);
@@ -1985,7 +1985,7 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
1985} 1985}
1986 1986
1987/* ISR related functions */ 1987/* ISR related functions */
1988static struct qla82xx_legacy_intr_set legacy_intr[] = \ 1988static struct qla82xx_legacy_intr_set legacy_intr[] =
1989 QLA82XX_LEGACY_INTR_CONFIG; 1989 QLA82XX_LEGACY_INTR_CONFIG;
1990 1990
1991/* 1991/*
@@ -2287,7 +2287,9 @@ qla82xx_disable_intrs(struct qla_hw_data *ha)
2287{ 2287{
2288 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2288 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2289 2289
2290 qla82xx_mbx_intr_disable(vha); 2290 if (ha->interrupts_on)
2291 qla82xx_mbx_intr_disable(vha);
2292
2291 spin_lock_irq(&ha->hardware_lock); 2293 spin_lock_irq(&ha->hardware_lock);
2292 if (IS_QLA8044(ha)) 2294 if (IS_QLA8044(ha))
2293 qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1); 2295 qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1);
@@ -3286,7 +3288,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3286 case QLA8XXX_DEV_NEED_QUIESCENT: 3288 case QLA8XXX_DEV_NEED_QUIESCENT:
3287 qla82xx_need_qsnt_handler(vha); 3289 qla82xx_need_qsnt_handler(vha);
3288 /* Reset timeout value after quiescence handler */ 3290 /* Reset timeout value after quiescence handler */
3289 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\ 3291 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout
3290 * HZ); 3292 * HZ);
3291 break; 3293 break;
3292 case QLA8XXX_DEV_QUIESCENT: 3294 case QLA8XXX_DEV_QUIESCENT:
@@ -3301,7 +3303,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3301 qla82xx_idc_lock(ha); 3303 qla82xx_idc_lock(ha);
3302 3304
3303 /* Reset timeout value after quiescence handler */ 3305 /* Reset timeout value after quiescence handler */
3304 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\ 3306 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout
3305 * HZ); 3307 * HZ);
3306 break; 3308 break;
3307 case QLA8XXX_DEV_FAILED: 3309 case QLA8XXX_DEV_FAILED:
@@ -3686,7 +3688,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3686 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 3688 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
3687 sp = req->outstanding_cmds[cnt]; 3689 sp = req->outstanding_cmds[cnt];
3688 if (sp) { 3690 if (sp) {
3689 if ((!sp->u.scmd.ctx || 3691 if ((!sp->u.scmd.crc_ctx ||
3690 (sp->flags & 3692 (sp->flags &
3691 SRB_FCP_CMND_DMA_VALID)) && 3693 SRB_FCP_CMND_DMA_VALID)) &&
3692 !ha->flags.isp82xx_fw_hung) { 3694 !ha->flags.isp82xx_fw_hung) {
@@ -3710,10 +3712,12 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3710 3712
3711 /* Wait for pending cmds (physical and virtual) to complete */ 3713 /* Wait for pending cmds (physical and virtual) to complete */
3712 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, 3714 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
3713 WAIT_HOST)) { 3715 WAIT_HOST) == QLA_SUCCESS) {
3714 ql_dbg(ql_dbg_init, vha, 0x00b3, 3716 ql_dbg(ql_dbg_init, vha, 0x00b3,
3715 "Done wait for " 3717 "Done wait for "
3716 "pending commands.\n"); 3718 "pending commands.\n");
3719 } else {
3720 WARN_ON_ONCE(true);
3717 } 3721 }
3718 } 3722 }
3719} 3723}
@@ -4232,7 +4236,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
4232 goto md_failed; 4236 goto md_failed;
4233 } 4237 }
4234 4238
4235 entry_hdr = (qla82xx_md_entry_hdr_t *) \ 4239 entry_hdr = (qla82xx_md_entry_hdr_t *)
4236 (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset); 4240 (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
4237 4241
4238 /* Walk through the entry headers */ 4242 /* Walk through the entry headers */
@@ -4339,7 +4343,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
4339 data_collected = (uint8_t *)data_ptr - 4343 data_collected = (uint8_t *)data_ptr -
4340 (uint8_t *)ha->md_dump; 4344 (uint8_t *)ha->md_dump;
4341skip_nxt_entry: 4345skip_nxt_entry:
4342 entry_hdr = (qla82xx_md_entry_hdr_t *) \ 4346 entry_hdr = (qla82xx_md_entry_hdr_t *)
4343 (((uint8_t *)entry_hdr) + entry_hdr->entry_size); 4347 (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
4344 } 4348 }
4345 4349
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 3c7beef92c35..230abee10598 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -486,13 +486,13 @@
486#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL) 486#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL)
487#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL) 487#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
488 488
489#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000 489#define QLA82XX_PCI_CRBSPACE 0x06000000UL
490#define QLA82XX_PCI_DIRECT_CRB (unsigned long)0x04400000 490#define QLA82XX_PCI_DIRECT_CRB 0x04400000UL
491#define QLA82XX_PCI_CAMQM (unsigned long)0x04800000 491#define QLA82XX_PCI_CAMQM 0x04800000UL
492#define QLA82XX_PCI_CAMQM_MAX (unsigned long)0x04ffffff 492#define QLA82XX_PCI_CAMQM_MAX 0x04ffffffUL
493#define QLA82XX_PCI_DDR_NET (unsigned long)0x00000000 493#define QLA82XX_PCI_DDR_NET 0x00000000UL
494#define QLA82XX_PCI_QDR_NET (unsigned long)0x04000000 494#define QLA82XX_PCI_QDR_NET 0x04000000UL
495#define QLA82XX_PCI_QDR_NET_MAX (unsigned long)0x043fffff 495#define QLA82XX_PCI_QDR_NET_MAX 0x043fffffUL
496 496
497/* 497/*
498 * Register offsets for MN 498 * Register offsets for MN
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 369ac04d0454..c056f466f1f4 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -2810,7 +2810,7 @@ error:
2810 2810
2811#define ISP8044_PEX_DMA_ENGINE_INDEX 8 2811#define ISP8044_PEX_DMA_ENGINE_INDEX 8
2812#define ISP8044_PEX_DMA_BASE_ADDRESS 0x77320000 2812#define ISP8044_PEX_DMA_BASE_ADDRESS 0x77320000
2813#define ISP8044_PEX_DMA_NUM_OFFSET 0x10000 2813#define ISP8044_PEX_DMA_NUM_OFFSET 0x10000UL
2814#define ISP8044_PEX_DMA_CMD_ADDR_LOW 0x0 2814#define ISP8044_PEX_DMA_CMD_ADDR_LOW 0x0
2815#define ISP8044_PEX_DMA_CMD_ADDR_HIGH 0x04 2815#define ISP8044_PEX_DMA_CMD_ADDR_HIGH 0x04
2816#define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL 0x08 2816#define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL 0x08
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 98e60a34afd9..73db01e3b4e4 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -69,7 +69,7 @@ MODULE_PARM_DESC(ql2xplogiabsentdevice,
69 "a Fabric scan. This is needed for several broken switches. " 69 "a Fabric scan. This is needed for several broken switches. "
70 "Default is 0 - no PLOGI. 1 - perform PLOGI."); 70 "Default is 0 - no PLOGI. 1 - perform PLOGI.");
71 71
72int ql2xloginretrycount = 0; 72int ql2xloginretrycount;
73module_param(ql2xloginretrycount, int, S_IRUGO); 73module_param(ql2xloginretrycount, int, S_IRUGO);
74MODULE_PARM_DESC(ql2xloginretrycount, 74MODULE_PARM_DESC(ql2xloginretrycount,
75 "Specify an alternate value for the NVRAM login retry count."); 75 "Specify an alternate value for the NVRAM login retry count.");
@@ -234,7 +234,7 @@ MODULE_PARM_DESC(ql2xmdenable,
234 "0 - MiniDump disabled. " 234 "0 - MiniDump disabled. "
235 "1 (Default) - MiniDump enabled."); 235 "1 (Default) - MiniDump enabled.");
236 236
237int ql2xexlogins = 0; 237int ql2xexlogins;
238module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR); 238module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
239MODULE_PARM_DESC(ql2xexlogins, 239MODULE_PARM_DESC(ql2xexlogins,
240 "Number of extended Logins. " 240 "Number of extended Logins. "
@@ -250,7 +250,7 @@ module_param(ql2xiniexchg, uint, 0644);
250MODULE_PARM_DESC(ql2xiniexchg, 250MODULE_PARM_DESC(ql2xiniexchg,
251 "Number of initiator exchanges."); 251 "Number of initiator exchanges.");
252 252
253int ql2xfwholdabts = 0; 253int ql2xfwholdabts;
254module_param(ql2xfwholdabts, int, S_IRUGO); 254module_param(ql2xfwholdabts, int, S_IRUGO);
255MODULE_PARM_DESC(ql2xfwholdabts, 255MODULE_PARM_DESC(ql2xfwholdabts,
256 "Allow FW to hold status IOCB until ABTS rsp received. " 256 "Allow FW to hold status IOCB until ABTS rsp received. "
@@ -536,80 +536,70 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
536} 536}
537 537
538static char * 538static char *
539qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str) 539qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
540{ 540{
541 struct qla_hw_data *ha = vha->hw; 541 struct qla_hw_data *ha = vha->hw;
542 static char *pci_bus_modes[] = { 542 static const char *const pci_bus_modes[] = {
543 "33", "66", "100", "133", 543 "33", "66", "100", "133",
544 }; 544 };
545 uint16_t pci_bus; 545 uint16_t pci_bus;
546 546
547 strcpy(str, "PCI");
548 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; 547 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
549 if (pci_bus) { 548 if (pci_bus) {
550 strcat(str, "-X ("); 549 snprintf(str, str_len, "PCI-X (%s MHz)",
551 strcat(str, pci_bus_modes[pci_bus]); 550 pci_bus_modes[pci_bus]);
552 } else { 551 } else {
553 pci_bus = (ha->pci_attr & BIT_8) >> 8; 552 pci_bus = (ha->pci_attr & BIT_8) >> 8;
554 strcat(str, " ("); 553 snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]);
555 strcat(str, pci_bus_modes[pci_bus]);
556 } 554 }
557 strcat(str, " MHz)");
558 555
559 return (str); 556 return str;
560} 557}
561 558
562static char * 559static char *
563qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str) 560qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
564{ 561{
565 static char *pci_bus_modes[] = { "33", "66", "100", "133", }; 562 static const char *const pci_bus_modes[] = {
563 "33", "66", "100", "133",
564 };
566 struct qla_hw_data *ha = vha->hw; 565 struct qla_hw_data *ha = vha->hw;
567 uint32_t pci_bus; 566 uint32_t pci_bus;
568 567
569 if (pci_is_pcie(ha->pdev)) { 568 if (pci_is_pcie(ha->pdev)) {
570 char lwstr[6];
571 uint32_t lstat, lspeed, lwidth; 569 uint32_t lstat, lspeed, lwidth;
570 const char *speed_str;
572 571
573 pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat); 572 pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
574 lspeed = lstat & PCI_EXP_LNKCAP_SLS; 573 lspeed = lstat & PCI_EXP_LNKCAP_SLS;
575 lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4; 574 lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
576 575
577 strcpy(str, "PCIe (");
578 switch (lspeed) { 576 switch (lspeed) {
579 case 1: 577 case 1:
580 strcat(str, "2.5GT/s "); 578 speed_str = "2.5GT/s";
581 break; 579 break;
582 case 2: 580 case 2:
583 strcat(str, "5.0GT/s "); 581 speed_str = "5.0GT/s";
584 break; 582 break;
585 case 3: 583 case 3:
586 strcat(str, "8.0GT/s "); 584 speed_str = "8.0GT/s";
587 break; 585 break;
588 default: 586 default:
589 strcat(str, "<unknown> "); 587 speed_str = "<unknown>";
590 break; 588 break;
591 } 589 }
592 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth); 590 snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth);
593 strcat(str, lwstr);
594 591
595 return str; 592 return str;
596 } 593 }
597 594
598 strcpy(str, "PCI");
599 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; 595 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
600 if (pci_bus == 0 || pci_bus == 8) { 596 if (pci_bus == 0 || pci_bus == 8)
601 strcat(str, " ("); 597 snprintf(str, str_len, "PCI (%s MHz)",
602 strcat(str, pci_bus_modes[pci_bus >> 3]); 598 pci_bus_modes[pci_bus >> 3]);
603 } else { 599 else
604 strcat(str, "-X "); 600 snprintf(str, str_len, "PCI-X Mode %d (%s MHz)",
605 if (pci_bus & BIT_2) 601 pci_bus & 4 ? 2 : 1,
606 strcat(str, "Mode 2"); 602 pci_bus_modes[pci_bus & 3]);
607 else
608 strcat(str, "Mode 1");
609 strcat(str, " (");
610 strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
611 }
612 strcat(str, " MHz)");
613 603
614 return str; 604 return str;
615} 605}
@@ -662,13 +652,10 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
662 return str; 652 return str;
663} 653}
664 654
665void 655void qla2x00_sp_free_dma(srb_t *sp)
666qla2x00_sp_free_dma(void *ptr)
667{ 656{
668 srb_t *sp = ptr;
669 struct qla_hw_data *ha = sp->vha->hw; 657 struct qla_hw_data *ha = sp->vha->hw;
670 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 658 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
671 void *ctx = GET_CMD_CTX_SP(sp);
672 659
673 if (sp->flags & SRB_DMA_VALID) { 660 if (sp->flags & SRB_DMA_VALID) {
674 scsi_dma_unmap(cmd); 661 scsi_dma_unmap(cmd);
@@ -681,24 +668,21 @@ qla2x00_sp_free_dma(void *ptr)
681 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 668 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
682 } 669 }
683 670
684 if (!ctx)
685 return;
686
687 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 671 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
688 /* List assured to be having elements */ 672 /* List assured to be having elements */
689 qla2x00_clean_dsd_pool(ha, ctx); 673 qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
690 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 674 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
691 } 675 }
692 676
693 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 677 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
694 struct crc_context *ctx0 = ctx; 678 struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
695 679
696 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); 680 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
697 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 681 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
698 } 682 }
699 683
700 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 684 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
701 struct ct6_dsd *ctx1 = ctx; 685 struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
702 686
703 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 687 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
704 ctx1->fcp_cmnd_dma); 688 ctx1->fcp_cmnd_dma);
@@ -709,10 +693,8 @@ qla2x00_sp_free_dma(void *ptr)
709 } 693 }
710} 694}
711 695
712void 696void qla2x00_sp_compl(srb_t *sp, int res)
713qla2x00_sp_compl(void *ptr, int res)
714{ 697{
715 srb_t *sp = ptr;
716 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 698 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
717 struct completion *comp = sp->comp; 699 struct completion *comp = sp->comp;
718 700
@@ -727,16 +709,12 @@ qla2x00_sp_compl(void *ptr, int res)
727 cmd->scsi_done(cmd); 709 cmd->scsi_done(cmd);
728 if (comp) 710 if (comp)
729 complete(comp); 711 complete(comp);
730 qla2x00_rel_sp(sp);
731} 712}
732 713
733void 714void qla2xxx_qpair_sp_free_dma(srb_t *sp)
734qla2xxx_qpair_sp_free_dma(void *ptr)
735{ 715{
736 srb_t *sp = (srb_t *)ptr;
737 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 716 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
738 struct qla_hw_data *ha = sp->fcport->vha->hw; 717 struct qla_hw_data *ha = sp->fcport->vha->hw;
739 void *ctx = GET_CMD_CTX_SP(sp);
740 718
741 if (sp->flags & SRB_DMA_VALID) { 719 if (sp->flags & SRB_DMA_VALID) {
742 scsi_dma_unmap(cmd); 720 scsi_dma_unmap(cmd);
@@ -749,17 +727,14 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
749 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 727 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
750 } 728 }
751 729
752 if (!ctx)
753 return;
754
755 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 730 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
756 /* List assured to be having elements */ 731 /* List assured to be having elements */
757 qla2x00_clean_dsd_pool(ha, ctx); 732 qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
758 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 733 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
759 } 734 }
760 735
761 if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) { 736 if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
762 struct crc_context *difctx = ctx; 737 struct crc_context *difctx = sp->u.scmd.crc_ctx;
763 struct dsd_dma *dif_dsd, *nxt_dsd; 738 struct dsd_dma *dif_dsd, *nxt_dsd;
764 739
765 list_for_each_entry_safe(dif_dsd, nxt_dsd, 740 list_for_each_entry_safe(dif_dsd, nxt_dsd,
@@ -795,7 +770,7 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
795 } 770 }
796 771
797 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 772 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
798 struct ct6_dsd *ctx1 = ctx; 773 struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
799 774
800 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 775 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
801 ctx1->fcp_cmnd_dma); 776 ctx1->fcp_cmnd_dma);
@@ -807,17 +782,15 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
807 } 782 }
808 783
809 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 784 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
810 struct crc_context *ctx0 = ctx; 785 struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
811 786
812 dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma); 787 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
813 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 788 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
814 } 789 }
815} 790}
816 791
817void 792void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
818qla2xxx_qpair_sp_compl(void *ptr, int res)
819{ 793{
820 srb_t *sp = ptr;
821 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 794 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
822 struct completion *comp = sp->comp; 795 struct completion *comp = sp->comp;
823 796
@@ -832,7 +805,6 @@ qla2xxx_qpair_sp_compl(void *ptr, int res)
832 cmd->scsi_done(cmd); 805 cmd->scsi_done(cmd);
833 if (comp) 806 if (comp)
834 complete(comp); 807 complete(comp);
835 qla2xxx_rel_qpair_sp(sp->qpair, sp);
836} 808}
837 809
838static int 810static int
@@ -845,9 +817,6 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
845 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 817 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
846 srb_t *sp; 818 srb_t *sp;
847 int rval; 819 int rval;
848 struct qla_qpair *qpair = NULL;
849 uint32_t tag;
850 uint16_t hwq;
851 820
852 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) || 821 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) ||
853 WARN_ON_ONCE(!rport)) { 822 WARN_ON_ONCE(!rport)) {
@@ -856,6 +825,10 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
856 } 825 }
857 826
858 if (ha->mqenable) { 827 if (ha->mqenable) {
828 uint32_t tag;
829 uint16_t hwq;
830 struct qla_qpair *qpair = NULL;
831
859 tag = blk_mq_unique_tag(cmd->request); 832 tag = blk_mq_unique_tag(cmd->request);
860 hwq = blk_mq_unique_tag_to_hwq(tag); 833 hwq = blk_mq_unique_tag_to_hwq(tag);
861 qpair = ha->queue_pair_map[hwq]; 834 qpair = ha->queue_pair_map[hwq];
@@ -925,9 +898,8 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
925 else 898 else
926 goto qc24_target_busy; 899 goto qc24_target_busy;
927 900
928 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 901 sp = scsi_cmd_priv(cmd);
929 if (!sp) 902 qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
930 goto qc24_host_busy;
931 903
932 sp->u.scmd.cmd = cmd; 904 sp->u.scmd.cmd = cmd;
933 sp->type = SRB_SCSI_CMD; 905 sp->type = SRB_SCSI_CMD;
@@ -948,9 +920,6 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
948qc24_host_busy_free_sp: 920qc24_host_busy_free_sp:
949 sp->free(sp); 921 sp->free(sp);
950 922
951qc24_host_busy:
952 return SCSI_MLQUEUE_HOST_BUSY;
953
954qc24_target_busy: 923qc24_target_busy:
955 return SCSI_MLQUEUE_TARGET_BUSY; 924 return SCSI_MLQUEUE_TARGET_BUSY;
956 925
@@ -1011,9 +980,8 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
1011 else 980 else
1012 goto qc24_target_busy; 981 goto qc24_target_busy;
1013 982
1014 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC); 983 sp = scsi_cmd_priv(cmd);
1015 if (!sp) 984 qla2xxx_init_sp(sp, vha, qpair, fcport);
1016 goto qc24_host_busy;
1017 985
1018 sp->u.scmd.cmd = cmd; 986 sp->u.scmd.cmd = cmd;
1019 sp->type = SRB_SCSI_CMD; 987 sp->type = SRB_SCSI_CMD;
@@ -1037,9 +1005,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
1037qc24_host_busy_free_sp: 1005qc24_host_busy_free_sp:
1038 sp->free(sp); 1006 sp->free(sp);
1039 1007
1040qc24_host_busy:
1041 return SCSI_MLQUEUE_HOST_BUSY;
1042
1043qc24_target_busy: 1008qc24_target_busy:
1044 return SCSI_MLQUEUE_TARGET_BUSY; 1009 return SCSI_MLQUEUE_TARGET_BUSY;
1045 1010
@@ -1058,8 +1023,8 @@ qc24_fail_command:
1058 * cmd = Scsi Command to wait on. 1023 * cmd = Scsi Command to wait on.
1059 * 1024 *
1060 * Return: 1025 * Return:
1061 * Not Found : 0 1026 * Completed in time : QLA_SUCCESS
1062 * Found : 1 1027 * Did not complete in time : QLA_FUNCTION_FAILED
1063 */ 1028 */
1064static int 1029static int
1065qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) 1030qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
@@ -1269,14 +1234,13 @@ static int
1269qla2xxx_eh_abort(struct scsi_cmnd *cmd) 1234qla2xxx_eh_abort(struct scsi_cmnd *cmd)
1270{ 1235{
1271 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 1236 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1237 DECLARE_COMPLETION_ONSTACK(comp);
1272 srb_t *sp; 1238 srb_t *sp;
1273 int ret; 1239 int ret;
1274 unsigned int id; 1240 unsigned int id;
1275 uint64_t lun; 1241 uint64_t lun;
1276 unsigned long flags;
1277 int rval; 1242 int rval;
1278 struct qla_hw_data *ha = vha->hw; 1243 struct qla_hw_data *ha = vha->hw;
1279 struct qla_qpair *qpair;
1280 1244
1281 if (qla2x00_isp_reg_stat(ha)) { 1245 if (qla2x00_isp_reg_stat(ha)) {
1282 ql_log(ql_log_info, vha, 0x8042, 1246 ql_log(ql_log_info, vha, 0x8042,
@@ -1288,28 +1252,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
1288 if (ret != 0) 1252 if (ret != 0)
1289 return ret; 1253 return ret;
1290 1254
1291 sp = (srb_t *) CMD_SP(cmd); 1255 sp = scsi_cmd_priv(cmd);
1292 if (!sp)
1293 return SUCCESS;
1294
1295 qpair = sp->qpair;
1296 if (!qpair)
1297 return SUCCESS;
1298 1256
1299 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 1257 if (sp->fcport && sp->fcport->deleted)
1300 if (sp->type != SRB_SCSI_CMD || GET_CMD_SP(sp) != cmd) {
1301 /* there's a chance an interrupt could clear
1302 the ptr as part of done & free */
1303 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1304 return SUCCESS; 1258 return SUCCESS;
1305 }
1306 1259
1307 if (sp_get(sp)){ 1260 /* Return if the command has already finished. */
1308 /* ref_count is already 0 */ 1261 if (sp_get(sp))
1309 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1310 return SUCCESS; 1262 return SUCCESS;
1311 }
1312 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1313 1263
1314 id = cmd->device->id; 1264 id = cmd->device->id;
1315 lun = cmd->device->lun; 1265 lun = cmd->device->lun;
@@ -1331,6 +1281,23 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
1331 sp->done(sp, DID_ABORT << 16); 1281 sp->done(sp, DID_ABORT << 16);
1332 ret = SUCCESS; 1282 ret = SUCCESS;
1333 break; 1283 break;
1284 case QLA_FUNCTION_PARAMETER_ERROR: {
1285 /* Wait for the command completion. */
1286 uint32_t ratov = ha->r_a_tov/10;
1287 uint32_t ratov_j = msecs_to_jiffies(4 * ratov * 1000);
1288
1289 WARN_ON_ONCE(sp->comp);
1290 sp->comp = &comp;
1291 if (!wait_for_completion_timeout(&comp, ratov_j)) {
1292 ql_dbg(ql_dbg_taskm, vha, 0xffff,
1293 "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
1294 __func__, ha->r_a_tov);
1295 ret = FAILED;
1296 } else {
1297 ret = SUCCESS;
1298 }
1299 break;
1300 }
1334 default: 1301 default:
1335 /* 1302 /*
1336 * Either abort failed or abort and completion raced. Let 1303 * Either abort failed or abort and completion raced. Let
@@ -1340,6 +1307,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
1340 break; 1307 break;
1341 } 1308 }
1342 1309
1310 sp->comp = NULL;
1311 atomic_dec(&sp->ref_count);
1343 ql_log(ql_log_info, vha, 0x801c, 1312 ql_log(ql_log_info, vha, 0x801c,
1344 "Abort command issued nexus=%ld:%d:%llu -- %x.\n", 1313 "Abort command issued nexus=%ld:%d:%llu -- %x.\n",
1345 vha->host_no, id, lun, ret); 1314 vha->host_no, id, lun, ret);
@@ -1347,6 +1316,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
1347 return ret; 1316 return ret;
1348} 1317}
1349 1318
1319/*
1320 * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
1321 */
1350int 1322int
1351qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, 1323qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
1352 uint64_t l, enum nexus_wait_type type) 1324 uint64_t l, enum nexus_wait_type type)
@@ -1420,6 +1392,9 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
1420 if (err != 0) 1392 if (err != 0)
1421 return err; 1393 return err;
1422 1394
1395 if (fcport->deleted)
1396 return SUCCESS;
1397
1423 ql_log(ql_log_info, vha, 0x8009, 1398 ql_log(ql_log_info, vha, 0x8009,
1424 "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no, 1399 "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no,
1425 cmd->device->id, cmd->device->lun, cmd); 1400 cmd->device->id, cmd->device->lun, cmd);
@@ -1534,6 +1509,9 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1534 return ret; 1509 return ret;
1535 ret = FAILED; 1510 ret = FAILED;
1536 1511
1512 if (qla2x00_chip_is_down(vha))
1513 return ret;
1514
1537 ql_log(ql_log_info, vha, 0x8012, 1515 ql_log(ql_log_info, vha, 0x8012,
1538 "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); 1516 "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
1539 1517
@@ -1746,6 +1724,8 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
1746 spin_lock_irqsave(qp->qp_lock_ptr, *flags); 1724 spin_lock_irqsave(qp->qp_lock_ptr, *flags);
1747 sp->comp = NULL; 1725 sp->comp = NULL;
1748 } 1726 }
1727
1728 atomic_dec(&sp->ref_count);
1749} 1729}
1750 1730
1751static void 1731static void
@@ -1800,8 +1780,13 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1800 int que; 1780 int que;
1801 struct qla_hw_data *ha = vha->hw; 1781 struct qla_hw_data *ha = vha->hw;
1802 1782
1783 /* Continue only if initialization complete. */
1784 if (!ha->base_qpair)
1785 return;
1803 __qla2x00_abort_all_cmds(ha->base_qpair, res); 1786 __qla2x00_abort_all_cmds(ha->base_qpair, res);
1804 1787
1788 if (!ha->queue_pair_map)
1789 return;
1805 for (que = 0; que < ha->max_qpairs; que++) { 1790 for (que = 0; que < ha->max_qpairs; que++) {
1806 if (!ha->queue_pair_map[que]) 1791 if (!ha->queue_pair_map[que])
1807 continue; 1792 continue;
@@ -2477,7 +2462,7 @@ static struct isp_operations qla27xx_isp_ops = {
2477 .config_rings = qla24xx_config_rings, 2462 .config_rings = qla24xx_config_rings,
2478 .reset_adapter = qla24xx_reset_adapter, 2463 .reset_adapter = qla24xx_reset_adapter,
2479 .nvram_config = qla81xx_nvram_config, 2464 .nvram_config = qla81xx_nvram_config,
2480 .update_fw_options = qla81xx_update_fw_options, 2465 .update_fw_options = qla24xx_update_fw_options,
2481 .load_risc = qla81xx_load_risc, 2466 .load_risc = qla81xx_load_risc,
2482 .pci_info_str = qla24xx_pci_info_str, 2467 .pci_info_str = qla24xx_pci_info_str,
2483 .fw_version_str = qla24xx_fw_version_str, 2468 .fw_version_str = qla24xx_fw_version_str,
@@ -3154,6 +3139,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3154 ql_log(ql_log_fatal, base_vha, 0x003d, 3139 ql_log(ql_log_fatal, base_vha, 0x003d,
3155 "Failed to allocate memory for queue pointers..." 3140 "Failed to allocate memory for queue pointers..."
3156 "aborting.\n"); 3141 "aborting.\n");
3142 ret = -ENODEV;
3157 goto probe_failed; 3143 goto probe_failed;
3158 } 3144 }
3159 3145
@@ -3418,7 +3404,8 @@ skip_dpc:
3418 "QLogic %s - %s.\n", ha->model_number, ha->model_desc); 3404 "QLogic %s - %s.\n", ha->model_number, ha->model_desc);
3419 ql_log(ql_log_info, base_vha, 0x00fc, 3405 ql_log(ql_log_info, base_vha, 0x00fc,
3420 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", 3406 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
3421 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info), 3407 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info,
3408 sizeof(pci_info)),
3422 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', 3409 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
3423 base_vha->host_no, 3410 base_vha->host_no,
3424 ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str))); 3411 ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
@@ -4598,6 +4585,7 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
4598 4585
4599 ha->fce = NULL; 4586 ha->fce = NULL;
4600 ha->fce_dma = 0; 4587 ha->fce_dma = 0;
4588 ha->flags.fce_enabled = 0;
4601 ha->eft = NULL; 4589 ha->eft = NULL;
4602 ha->eft_dma = 0; 4590 ha->eft_dma = 0;
4603 ha->fw_dumped = 0; 4591 ha->fw_dumped = 0;
@@ -4716,7 +4704,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
4716 mempool_destroy(ha->ctx_mempool); 4704 mempool_destroy(ha->ctx_mempool);
4717 ha->ctx_mempool = NULL; 4705 ha->ctx_mempool = NULL;
4718 4706
4719 if (ql2xenabledif) { 4707 if (ql2xenabledif && ha->dif_bundl_pool) {
4720 struct dsd_dma *dsd, *nxt; 4708 struct dsd_dma *dsd, *nxt;
4721 4709
4722 list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, 4710 list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
@@ -4739,8 +4727,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
4739 } 4727 }
4740 } 4728 }
4741 4729
4742 if (ha->dif_bundl_pool) 4730 dma_pool_destroy(ha->dif_bundl_pool);
4743 dma_pool_destroy(ha->dif_bundl_pool);
4744 ha->dif_bundl_pool = NULL; 4731 ha->dif_bundl_pool = NULL;
4745 4732
4746 qlt_mem_free(ha); 4733 qlt_mem_free(ha);
@@ -4812,7 +4799,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4812 if (!vha->gnl.l) { 4799 if (!vha->gnl.l) {
4813 ql_log(ql_log_fatal, vha, 0xd04a, 4800 ql_log(ql_log_fatal, vha, 0xd04a,
4814 "Alloc failed for name list.\n"); 4801 "Alloc failed for name list.\n");
4815 scsi_remove_host(vha->host); 4802 scsi_host_put(vha->host);
4816 return NULL; 4803 return NULL;
4817 } 4804 }
4818 4805
@@ -4825,7 +4812,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4825 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, 4812 dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
4826 vha->gnl.l, vha->gnl.ldma); 4813 vha->gnl.l, vha->gnl.ldma);
4827 vha->gnl.l = NULL; 4814 vha->gnl.l = NULL;
4828 scsi_remove_host(vha->host); 4815 scsi_host_put(vha->host);
4829 return NULL; 4816 return NULL;
4830 } 4817 }
4831 INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn); 4818 INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
@@ -5054,8 +5041,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
5054 "%s %8phC mem alloc fail.\n", 5041 "%s %8phC mem alloc fail.\n",
5055 __func__, e->u.new_sess.port_name); 5042 __func__, e->u.new_sess.port_name);
5056 5043
5057 if (pla) 5044 if (pla) {
5045 list_del(&pla->list);
5058 kmem_cache_free(qla_tgt_plogi_cachep, pla); 5046 kmem_cache_free(qla_tgt_plogi_cachep, pla);
5047 }
5059 return; 5048 return;
5060 } 5049 }
5061 5050
@@ -5086,6 +5075,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
5086 if (fcport) { 5075 if (fcport) {
5087 fcport->id_changed = 1; 5076 fcport->id_changed = 1;
5088 fcport->scan_state = QLA_FCPORT_FOUND; 5077 fcport->scan_state = QLA_FCPORT_FOUND;
5078 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
5089 memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE); 5079 memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
5090 5080
5091 if (pla) { 5081 if (pla) {
@@ -5165,8 +5155,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
5165 5155
5166 if (free_fcport) { 5156 if (free_fcport) {
5167 qla2x00_free_fcport(fcport); 5157 qla2x00_free_fcport(fcport);
5168 if (pla) 5158 if (pla) {
5159 list_del(&pla->list);
5169 kmem_cache_free(qla_tgt_plogi_cachep, pla); 5160 kmem_cache_free(qla_tgt_plogi_cachep, pla);
5161 }
5170 } 5162 }
5171} 5163}
5172 5164
@@ -5346,9 +5338,8 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
5346 } else { 5338 } else {
5347 if (vha->hw->current_topology != ISP_CFG_NL) { 5339 if (vha->hw->current_topology != ISP_CFG_NL) {
5348 memset(&ea, 0, sizeof(ea)); 5340 memset(&ea, 0, sizeof(ea));
5349 ea.event = FCME_RELOGIN;
5350 ea.fcport = fcport; 5341 ea.fcport = fcport;
5351 qla2x00_fcport_event_handler(vha, &ea); 5342 qla24xx_handle_relogin_event(vha, &ea);
5352 } else if (vha->hw->current_topology == 5343 } else if (vha->hw->current_topology ==
5353 ISP_CFG_NL) { 5344 ISP_CFG_NL) {
5354 fcport->login_retry--; 5345 fcport->login_retry--;
@@ -5686,7 +5677,6 @@ exit:
5686void 5677void
5687qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) 5678qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
5688{ 5679{
5689 uint16_t options = (requester_id << 15) | BIT_6;
5690 uint32_t data; 5680 uint32_t data;
5691 uint32_t lock_owner; 5681 uint32_t lock_owner;
5692 struct qla_hw_data *ha = base_vha->hw; 5682 struct qla_hw_data *ha = base_vha->hw;
@@ -5719,22 +5709,6 @@ retry_lock:
5719 } 5709 }
5720 5710
5721 return; 5711 return;
5722
5723 /* XXX: IDC-lock implementation using access-control mbx */
5724retry_lock2:
5725 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
5726 ql_dbg(ql_dbg_p3p, base_vha, 0xb072,
5727 "Failed to acquire IDC lock. retrying...\n");
5728 /* Retry/Perform IDC-Lock recovery */
5729 if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) {
5730 qla83xx_wait_logic();
5731 goto retry_lock2;
5732 } else
5733 ql_log(ql_log_warn, base_vha, 0xb076,
5734 "IDC Lock recovery FAILED.\n");
5735 }
5736
5737 return;
5738} 5712}
5739 5713
5740void 5714void
@@ -7156,6 +7130,7 @@ struct scsi_host_template qla2xxx_driver_template = {
7156 7130
7157 .supported_mode = MODE_INITIATOR, 7131 .supported_mode = MODE_INITIATOR,
7158 .track_queue_depth = 1, 7132 .track_queue_depth = 1,
7133 .cmd_size = sizeof(srb_t),
7159}; 7134};
7160 7135
7161static const struct pci_error_handlers qla2xxx_err_handler = { 7136static const struct pci_error_handlers qla2xxx_err_handler = {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 1eb82384d933..f2d5115b2d8d 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -473,22 +473,24 @@ qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t *data)
473 return QLA_FUNCTION_TIMEOUT; 473 return QLA_FUNCTION_TIMEOUT;
474} 474}
475 475
476uint32_t * 476int
477qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, 477qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
478 uint32_t dwords) 478 uint32_t dwords)
479{ 479{
480 ulong i; 480 ulong i;
481 int ret = QLA_SUCCESS;
481 struct qla_hw_data *ha = vha->hw; 482 struct qla_hw_data *ha = vha->hw;
482 483
483 /* Dword reads to flash. */ 484 /* Dword reads to flash. */
484 faddr = flash_data_addr(ha, faddr); 485 faddr = flash_data_addr(ha, faddr);
485 for (i = 0; i < dwords; i++, faddr++, dwptr++) { 486 for (i = 0; i < dwords; i++, faddr++, dwptr++) {
486 if (qla24xx_read_flash_dword(ha, faddr, dwptr)) 487 ret = qla24xx_read_flash_dword(ha, faddr, dwptr);
488 if (ret != QLA_SUCCESS)
487 break; 489 break;
488 cpu_to_le32s(dwptr); 490 cpu_to_le32s(dwptr);
489 } 491 }
490 492
491 return dwptr; 493 return ret;
492} 494}
493 495
494static int 496static int
@@ -680,8 +682,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
680 682
681 ha->flt_region_flt = flt_addr; 683 ha->flt_region_flt = flt_addr;
682 wptr = (uint16_t *)ha->flt; 684 wptr = (uint16_t *)ha->flt;
683 qla24xx_read_flash_data(vha, (void *)flt, flt_addr, 685 ha->isp_ops->read_optrom(vha, (void *)flt, flt_addr << 2,
684 (sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE) >> 2); 686 (sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE));
685 687
686 if (le16_to_cpu(*wptr) == 0xffff) 688 if (le16_to_cpu(*wptr) == 0xffff)
687 goto no_flash_data; 689 goto no_flash_data;
@@ -948,11 +950,11 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
948 struct req_que *req = ha->req_q_map[0]; 950 struct req_que *req = ha->req_q_map[0];
949 uint16_t cnt, chksum; 951 uint16_t cnt, chksum;
950 uint16_t *wptr = (void *)req->ring; 952 uint16_t *wptr = (void *)req->ring;
951 struct qla_fdt_layout *fdt = (void *)req->ring; 953 struct qla_fdt_layout *fdt = (struct qla_fdt_layout *)req->ring;
952 uint8_t man_id, flash_id; 954 uint8_t man_id, flash_id;
953 uint16_t mid = 0, fid = 0; 955 uint16_t mid = 0, fid = 0;
954 956
955 qla24xx_read_flash_data(vha, (void *)fdt, ha->flt_region_fdt, 957 ha->isp_ops->read_optrom(vha, fdt, ha->flt_region_fdt << 2,
956 OPTROM_BURST_DWORDS); 958 OPTROM_BURST_DWORDS);
957 if (le16_to_cpu(*wptr) == 0xffff) 959 if (le16_to_cpu(*wptr) == 0xffff)
958 goto no_flash_data; 960 goto no_flash_data;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 1c1f63be6eed..0ffda6171614 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -188,18 +188,17 @@ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
188 188
189static inline 189static inline
190struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 190struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
191 uint8_t *d_id) 191 be_id_t d_id)
192{ 192{
193 struct scsi_qla_host *host; 193 struct scsi_qla_host *host;
194 uint32_t key = 0; 194 uint32_t key;
195 195
196 if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) && 196 if (vha->d_id.b.area == d_id.area &&
197 (vha->d_id.b.al_pa == d_id[2])) 197 vha->d_id.b.domain == d_id.domain &&
198 vha->d_id.b.al_pa == d_id.al_pa)
198 return vha; 199 return vha;
199 200
200 key = (uint32_t)d_id[0] << 16; 201 key = be_to_port_id(d_id).b24;
201 key |= (uint32_t)d_id[1] << 8;
202 key |= (uint32_t)d_id[2];
203 202
204 host = btree_lookup32(&vha->hw->tgt.host_map, key); 203 host = btree_lookup32(&vha->hw->tgt.host_map, key);
205 if (!host) 204 if (!host)
@@ -357,9 +356,9 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
357 ql_dbg(ql_dbg_tgt, vha, 0xe03e, 356 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
358 "qla_target(%d): Received ATIO_TYPE7 " 357 "qla_target(%d): Received ATIO_TYPE7 "
359 "with unknown d_id %x:%x:%x\n", vha->vp_idx, 358 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
360 atio->u.isp24.fcp_hdr.d_id[0], 359 atio->u.isp24.fcp_hdr.d_id.domain,
361 atio->u.isp24.fcp_hdr.d_id[1], 360 atio->u.isp24.fcp_hdr.d_id.area,
362 atio->u.isp24.fcp_hdr.d_id[2]); 361 atio->u.isp24.fcp_hdr.d_id.al_pa);
363 362
364 363
365 qlt_queue_unknown_atio(vha, atio, ha_locked); 364 qlt_queue_unknown_atio(vha, atio, ha_locked);
@@ -560,10 +559,8 @@ static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
560 return qla2x00_post_work(vha, e); 559 return qla2x00_post_work(vha, e);
561} 560}
562 561
563static 562static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
564void qla2x00_async_nack_sp_done(void *s, int res)
565{ 563{
566 struct srb *sp = (struct srb *)s;
567 struct scsi_qla_host *vha = sp->vha; 564 struct scsi_qla_host *vha = sp->vha;
568 unsigned long flags; 565 unsigned long flags;
569 566
@@ -789,6 +786,8 @@ qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
789{ 786{
790 struct qlt_plogi_ack_t *pla; 787 struct qlt_plogi_ack_t *pla;
791 788
789 lockdep_assert_held(&vha->hw->hardware_lock);
790
792 list_for_each_entry(pla, &vha->plogi_ack_list, list) { 791 list_for_each_entry(pla, &vha->plogi_ack_list, list) {
793 if (pla->id.b24 == id->b24) { 792 if (pla->id.b24 == id->b24) {
794 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d, 793 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
@@ -1209,7 +1208,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
1209 sess->logout_on_delete = 0; 1208 sess->logout_on_delete = 0;
1210 sess->logo_ack_needed = 0; 1209 sess->logo_ack_needed = 0;
1211 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1210 sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1212 sess->scan_state = 0;
1213 } 1211 }
1214} 1212}
1215 1213
@@ -1284,13 +1282,12 @@ static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1284 /* At this point tgt could be already dead */ 1282 /* At this point tgt could be already dead */
1285} 1283}
1286 1284
1287static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, 1285static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id,
1288 uint16_t *loop_id) 1286 uint16_t *loop_id)
1289{ 1287{
1290 struct qla_hw_data *ha = vha->hw; 1288 struct qla_hw_data *ha = vha->hw;
1291 dma_addr_t gid_list_dma; 1289 dma_addr_t gid_list_dma;
1292 struct gid_list_info *gid_list; 1290 struct gid_list_info *gid_list, *gid;
1293 char *id_iter;
1294 int res, rc, i; 1291 int res, rc, i;
1295 uint16_t entries; 1292 uint16_t entries;
1296 1293
@@ -1313,19 +1310,17 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
1313 goto out_free_id_list; 1310 goto out_free_id_list;
1314 } 1311 }
1315 1312
1316 id_iter = (char *)gid_list; 1313 gid = gid_list;
1317 res = -ENOENT; 1314 res = -ENOENT;
1318 for (i = 0; i < entries; i++) { 1315 for (i = 0; i < entries; i++) {
1319 struct gid_list_info *gid = (struct gid_list_info *)id_iter; 1316 if (gid->al_pa == s_id.al_pa &&
1320 1317 gid->area == s_id.area &&
1321 if ((gid->al_pa == s_id[2]) && 1318 gid->domain == s_id.domain) {
1322 (gid->area == s_id[1]) &&
1323 (gid->domain == s_id[0])) {
1324 *loop_id = le16_to_cpu(gid->loop_id); 1319 *loop_id = le16_to_cpu(gid->loop_id);
1325 res = 0; 1320 res = 0;
1326 break; 1321 break;
1327 } 1322 }
1328 id_iter += ha->gid_list_info_size; 1323 gid = (void *)gid + ha->gid_list_info_size;
1329 } 1324 }
1330 1325
1331out_free_id_list: 1326out_free_id_list:
@@ -1582,11 +1577,10 @@ static void qlt_release(struct qla_tgt *tgt)
1582 struct qla_qpair_hint *h; 1577 struct qla_qpair_hint *h;
1583 struct qla_hw_data *ha = vha->hw; 1578 struct qla_hw_data *ha = vha->hw;
1584 1579
1585 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop && 1580 if (!tgt->tgt_stop && !tgt->tgt_stopped)
1586 !tgt->tgt_stopped)
1587 qlt_stop_phase1(tgt); 1581 qlt_stop_phase1(tgt);
1588 1582
1589 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped) 1583 if (!tgt->tgt_stopped)
1590 qlt_stop_phase2(tgt); 1584 qlt_stop_phase2(tgt);
1591 1585
1592 for (i = 0; i < vha->hw->max_qpairs + 1; i++) { 1586 for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
@@ -1772,12 +1766,8 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
1772 resp->fcp_hdr_le.f_ctl[1] = *p++; 1766 resp->fcp_hdr_le.f_ctl[1] = *p++;
1773 resp->fcp_hdr_le.f_ctl[2] = *p; 1767 resp->fcp_hdr_le.f_ctl[2] = *p;
1774 1768
1775 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; 1769 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1776 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; 1770 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1777 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1778 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1779 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1780 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1781 1771
1782 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1772 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1783 if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) { 1773 if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
@@ -1848,19 +1838,11 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
1848 resp->fcp_hdr_le.f_ctl[1] = *p++; 1838 resp->fcp_hdr_le.f_ctl[1] = *p++;
1849 resp->fcp_hdr_le.f_ctl[2] = *p; 1839 resp->fcp_hdr_le.f_ctl[2] = *p;
1850 if (ids_reversed) { 1840 if (ids_reversed) {
1851 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; 1841 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id;
1852 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; 1842 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id;
1853 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1854 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1855 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1856 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1857 } else { 1843 } else {
1858 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; 1844 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1859 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; 1845 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1860 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1861 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1862 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1863 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1864 } 1846 }
1865 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1847 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1866 if (status == FCP_TMF_CMPL) { 1848 if (status == FCP_TMF_CMPL) {
@@ -1927,18 +1909,14 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1927 tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); 1909 tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
1928 1910
1929 if (mcmd) { 1911 if (mcmd) {
1930 ctio->initiator_id[0] = entry->fcp_hdr_le.s_id[0]; 1912 ctio->initiator_id = entry->fcp_hdr_le.s_id;
1931 ctio->initiator_id[1] = entry->fcp_hdr_le.s_id[1];
1932 ctio->initiator_id[2] = entry->fcp_hdr_le.s_id[2];
1933 1913
1934 if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) 1914 if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
1935 tmp |= (mcmd->abort_io_attr << 9); 1915 tmp |= (mcmd->abort_io_attr << 9);
1936 else if (qpair->retry_term_cnt & 1) 1916 else if (qpair->retry_term_cnt & 1)
1937 tmp |= (0x4 << 9); 1917 tmp |= (0x4 << 9);
1938 } else { 1918 } else {
1939 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; 1919 ctio->initiator_id = entry->fcp_hdr_le.d_id;
1940 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1941 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1942 1920
1943 if (qpair->retry_term_cnt & 1) 1921 if (qpair->retry_term_cnt & 1)
1944 tmp |= (0x4 << 9); 1922 tmp |= (0x4 << 9);
@@ -1972,8 +1950,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1972 * XXX does not go through the list of other port (which may have cmds 1950 * XXX does not go through the list of other port (which may have cmds
1973 * for the same lun) 1951 * for the same lun)
1974 */ 1952 */
1975static void abort_cmds_for_lun(struct scsi_qla_host *vha, 1953static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
1976 u64 lun, uint8_t *s_id)
1977{ 1954{
1978 struct qla_tgt_sess_op *op; 1955 struct qla_tgt_sess_op *op;
1979 struct qla_tgt_cmd *cmd; 1956 struct qla_tgt_cmd *cmd;
@@ -2149,7 +2126,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2149 struct qla_hw_data *ha = vha->hw; 2126 struct qla_hw_data *ha = vha->hw;
2150 struct fc_port *sess; 2127 struct fc_port *sess;
2151 uint32_t tag = abts->exchange_addr_to_abort; 2128 uint32_t tag = abts->exchange_addr_to_abort;
2152 uint8_t s_id[3]; 2129 be_id_t s_id;
2153 int rc; 2130 int rc;
2154 unsigned long flags; 2131 unsigned long flags;
2155 2132
@@ -2173,13 +2150,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2173 2150
2174 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, 2151 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
2175 "qla_target(%d): task abort (s_id=%x:%x:%x, " 2152 "qla_target(%d): task abort (s_id=%x:%x:%x, "
2176 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2], 2153 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain,
2177 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag, 2154 abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag,
2178 le32_to_cpu(abts->fcp_hdr_le.parameter)); 2155 le32_to_cpu(abts->fcp_hdr_le.parameter));
2179 2156
2180 s_id[0] = abts->fcp_hdr_le.s_id[2]; 2157 s_id = le_id_to_be(abts->fcp_hdr_le.s_id);
2181 s_id[1] = abts->fcp_hdr_le.s_id[1];
2182 s_id[2] = abts->fcp_hdr_le.s_id[0];
2183 2158
2184 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 2159 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2185 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 2160 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
@@ -2243,9 +2218,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
2243 ctio->nport_handle = mcmd->sess->loop_id; 2218 ctio->nport_handle = mcmd->sess->loop_id;
2244 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2219 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2245 ctio->vp_index = ha->vp_idx; 2220 ctio->vp_index = ha->vp_idx;
2246 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2221 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2247 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2248 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2249 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2222 ctio->exchange_addr = atio->u.isp24.exchange_addr;
2250 temp = (atio->u.isp24.attr << 9)| 2223 temp = (atio->u.isp24.attr << 9)|
2251 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; 2224 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
@@ -2302,9 +2275,7 @@ void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
2302 ctio->nport_handle = cmd->sess->loop_id; 2275 ctio->nport_handle = cmd->sess->loop_id;
2303 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2276 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2304 ctio->vp_index = vha->vp_idx; 2277 ctio->vp_index = vha->vp_idx;
2305 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2278 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2306 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2307 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2308 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2279 ctio->exchange_addr = atio->u.isp24.exchange_addr;
2309 temp = (atio->u.isp24.attr << 9) | 2280 temp = (atio->u.isp24.attr << 9) |
2310 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; 2281 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
@@ -2605,9 +2576,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
2605 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; 2576 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
2606 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 2577 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2607 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2578 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2608 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2579 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2609 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2610 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2611 pkt->exchange_addr = atio->u.isp24.exchange_addr; 2580 pkt->exchange_addr = atio->u.isp24.exchange_addr;
2612 temp = atio->u.isp24.attr << 9; 2581 temp = atio->u.isp24.attr << 9;
2613 pkt->u.status0.flags |= cpu_to_le16(temp); 2582 pkt->u.status0.flags |= cpu_to_le16(temp);
@@ -3120,9 +3089,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
3120 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; 3089 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
3121 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 3090 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
3122 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3091 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3123 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 3092 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3124 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
3125 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
3126 pkt->exchange_addr = atio->u.isp24.exchange_addr; 3093 pkt->exchange_addr = atio->u.isp24.exchange_addr;
3127 3094
3128 /* silence compile warning */ 3095 /* silence compile warning */
@@ -3164,7 +3131,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
3164 pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 3131 pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
3165 3132
3166 if (!bundling) { 3133 if (!bundling) {
3167 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd; 3134 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
3168 } else { 3135 } else {
3169 /* 3136 /*
3170 * Configure Bundling if we need to fetch interlaving 3137 * Configure Bundling if we need to fetch interlaving
@@ -3174,7 +3141,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
3174 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 3141 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
3175 crc_ctx_pkt->u.bundling.dseg_count = 3142 crc_ctx_pkt->u.bundling.dseg_count =
3176 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); 3143 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
3177 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd; 3144 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
3178 } 3145 }
3179 3146
3180 /* Finish the common fields of CRC pkt */ 3147 /* Finish the common fields of CRC pkt */
@@ -3239,7 +3206,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
3239 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || 3206 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3240 (cmd->sess && cmd->sess->deleted)) { 3207 (cmd->sess && cmd->sess->deleted)) {
3241 cmd->state = QLA_TGT_STATE_PROCESSED; 3208 cmd->state = QLA_TGT_STATE_PROCESSED;
3242 return 0; 3209 res = 0;
3210 goto free;
3243 } 3211 }
3244 3212
3245 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, 3213 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
@@ -3250,9 +3218,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
3250 3218
3251 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 3219 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
3252 &full_req_cnt); 3220 &full_req_cnt);
3253 if (unlikely(res != 0)) { 3221 if (unlikely(res != 0))
3254 return res; 3222 goto free;
3255 }
3256 3223
3257 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3224 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3258 3225
@@ -3272,7 +3239,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
3272 vha->flags.online, qla2x00_reset_active(vha), 3239 vha->flags.online, qla2x00_reset_active(vha),
3273 cmd->reset_count, qpair->chip_reset); 3240 cmd->reset_count, qpair->chip_reset);
3274 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3241 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3275 return 0; 3242 res = 0;
3243 goto free;
3276 } 3244 }
3277 3245
3278 /* Does F/W have an IOCBs for this request */ 3246 /* Does F/W have an IOCBs for this request */
@@ -3375,6 +3343,8 @@ out_unmap_unlock:
3375 qlt_unmap_sg(vha, cmd); 3343 qlt_unmap_sg(vha, cmd);
3376 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3344 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3377 3345
3346free:
3347 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3378 return res; 3348 return res;
3379} 3349}
3380EXPORT_SYMBOL(qlt_xmit_response); 3350EXPORT_SYMBOL(qlt_xmit_response);
@@ -3672,9 +3642,7 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
3672 ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED; 3642 ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
3673 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3643 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3674 ctio24->vp_index = vha->vp_idx; 3644 ctio24->vp_index = vha->vp_idx;
3675 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 3645 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3676 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
3677 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
3678 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 3646 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3679 temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | 3647 temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
3680 CTIO7_FLAGS_TERMINATE; 3648 CTIO7_FLAGS_TERMINATE;
@@ -4107,8 +4075,6 @@ static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
4107 return fcp_task_attr; 4075 return fcp_task_attr;
4108} 4076}
4109 4077
4110static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *,
4111 uint8_t *);
4112/* 4078/*
4113 * Process context for I/O path into tcm_qla2xxx code 4079 * Process context for I/O path into tcm_qla2xxx code
4114 */ 4080 */
@@ -4352,9 +4318,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4352 return -ENODEV; 4318 return -ENODEV;
4353 } 4319 }
4354 4320
4355 id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2]; 4321 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
4356 id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
4357 id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
4358 if (IS_SW_RESV_ADDR(id)) 4322 if (IS_SW_RESV_ADDR(id))
4359 return -EBUSY; 4323 return -EBUSY;
4360 4324
@@ -4716,6 +4680,8 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
4716 struct qlt_plogi_ack_t *pla; 4680 struct qlt_plogi_ack_t *pla;
4717 unsigned long flags; 4681 unsigned long flags;
4718 4682
4683 lockdep_assert_held(&vha->hw->hardware_lock);
4684
4719 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4685 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4720 4686
4721 port_id.b.domain = iocb->u.isp24.port_id[2]; 4687 port_id.b.domain = iocb->u.isp24.port_id[2];
@@ -4799,8 +4765,10 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
4799 __func__, sess->port_name, sec); 4765 __func__, sess->port_name, sec);
4800 } 4766 }
4801 4767
4802 if (!conflict_sess) 4768 if (!conflict_sess) {
4769 list_del(&pla->list);
4803 kmem_cache_free(qla_tgt_plogi_cachep, pla); 4770 kmem_cache_free(qla_tgt_plogi_cachep, pla);
4771 }
4804 4772
4805 qlt_send_term_imm_notif(vha, iocb, 1); 4773 qlt_send_term_imm_notif(vha, iocb, 1);
4806 goto out; 4774 goto out;
@@ -4889,6 +4857,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4889 int res = 0; 4857 int res = 0;
4890 unsigned long flags; 4858 unsigned long flags;
4891 4859
4860 lockdep_assert_held(&ha->hardware_lock);
4861
4892 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4862 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4893 4863
4894 port_id.b.domain = iocb->u.isp24.port_id[2]; 4864 port_id.b.domain = iocb->u.isp24.port_id[2];
@@ -5165,6 +5135,8 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
5165 int send_notify_ack = 1; 5135 int send_notify_ack = 1;
5166 uint16_t status; 5136 uint16_t status;
5167 5137
5138 lockdep_assert_held(&ha->hardware_lock);
5139
5168 status = le16_to_cpu(iocb->u.isp2x.status); 5140 status = le16_to_cpu(iocb->u.isp2x.status);
5169 switch (status) { 5141 switch (status) {
5170 case IMM_NTFY_LIP_RESET: 5142 case IMM_NTFY_LIP_RESET:
@@ -5302,10 +5274,7 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
5302 u16 temp; 5274 u16 temp;
5303 port_id_t id; 5275 port_id_t id;
5304 5276
5305 id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2]; 5277 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
5306 id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
5307 id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
5308 id.b.rsvd_1 = 0;
5309 5278
5310 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5279 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5311 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1); 5280 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
@@ -5333,9 +5302,7 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
5333 ctio24->nport_handle = sess->loop_id; 5302 ctio24->nport_handle = sess->loop_id;
5334 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 5303 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5335 ctio24->vp_index = vha->vp_idx; 5304 ctio24->vp_index = vha->vp_idx;
5336 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 5305 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
5337 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
5338 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
5339 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 5306 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5340 temp = (atio->u.isp24.attr << 9) | 5307 temp = (atio->u.isp24.attr << 9) |
5341 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 5308 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
@@ -5767,7 +5734,7 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
5767 entry->error_subcode2); 5734 entry->error_subcode2);
5768 ha->tgt.tgt_ops->free_mcmd(mcmd); 5735 ha->tgt.tgt_ops->free_mcmd(mcmd);
5769 } 5736 }
5770 } else { 5737 } else if (mcmd) {
5771 ha->tgt.tgt_ops->free_mcmd(mcmd); 5738 ha->tgt.tgt_ops->free_mcmd(mcmd);
5772 } 5739 }
5773} 5740}
@@ -6121,21 +6088,21 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
6121 6088
6122/* Must be called under tgt_mutex */ 6089/* Must be called under tgt_mutex */
6123static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha, 6090static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
6124 uint8_t *s_id) 6091 be_id_t s_id)
6125{ 6092{
6126 struct fc_port *sess = NULL; 6093 struct fc_port *sess = NULL;
6127 fc_port_t *fcport = NULL; 6094 fc_port_t *fcport = NULL;
6128 int rc, global_resets; 6095 int rc, global_resets;
6129 uint16_t loop_id = 0; 6096 uint16_t loop_id = 0;
6130 6097
6131 if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) { 6098 if (s_id.domain == 0xFF && s_id.area == 0xFC) {
6132 /* 6099 /*
6133 * This is Domain Controller, so it should be 6100 * This is Domain Controller, so it should be
6134 * OK to drop SCSI commands from it. 6101 * OK to drop SCSI commands from it.
6135 */ 6102 */
6136 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, 6103 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
6137 "Unable to find initiator with S_ID %x:%x:%x", 6104 "Unable to find initiator with S_ID %x:%x:%x",
6138 s_id[0], s_id[1], s_id[2]); 6105 s_id.domain, s_id.area, s_id.al_pa);
6139 return NULL; 6106 return NULL;
6140 } 6107 }
6141 6108
@@ -6152,13 +6119,12 @@ retry:
6152 ql_log(ql_log_info, vha, 0xf071, 6119 ql_log(ql_log_info, vha, 0xf071,
6153 "qla_target(%d): Unable to find " 6120 "qla_target(%d): Unable to find "
6154 "initiator with S_ID %x:%x:%x", 6121 "initiator with S_ID %x:%x:%x",
6155 vha->vp_idx, s_id[0], s_id[1], 6122 vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa);
6156 s_id[2]);
6157 6123
6158 if (rc == -ENOENT) { 6124 if (rc == -ENOENT) {
6159 qlt_port_logo_t logo; 6125 qlt_port_logo_t logo;
6160 6126
6161 sid_to_portid(s_id, &logo.id); 6127 logo.id = be_to_port_id(s_id);
6162 logo.cmd_count = 1; 6128 logo.cmd_count = 1;
6163 qlt_send_first_logo(vha, &logo); 6129 qlt_send_first_logo(vha, &logo);
6164 } 6130 }
@@ -6197,8 +6163,7 @@ static void qlt_abort_work(struct qla_tgt *tgt,
6197 struct qla_hw_data *ha = vha->hw; 6163 struct qla_hw_data *ha = vha->hw;
6198 struct fc_port *sess = NULL; 6164 struct fc_port *sess = NULL;
6199 unsigned long flags = 0, flags2 = 0; 6165 unsigned long flags = 0, flags2 = 0;
6200 uint32_t be_s_id; 6166 be_id_t s_id;
6201 uint8_t s_id[3];
6202 int rc; 6167 int rc;
6203 6168
6204 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 6169 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
@@ -6206,12 +6171,9 @@ static void qlt_abort_work(struct qla_tgt *tgt,
6206 if (tgt->tgt_stop) 6171 if (tgt->tgt_stop)
6207 goto out_term2; 6172 goto out_term2;
6208 6173
6209 s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; 6174 s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id);
6210 s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
6211 s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
6212 6175
6213 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 6176 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6214 (unsigned char *)&be_s_id);
6215 if (!sess) { 6177 if (!sess) {
6216 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6178 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6217 6179
@@ -6248,9 +6210,6 @@ static void qlt_abort_work(struct qla_tgt *tgt,
6248out_term2: 6210out_term2:
6249 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6211 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6250 6212
6251 if (sess)
6252 ha->tgt.tgt_ops->put_sess(sess);
6253
6254out_term: 6213out_term:
6255 spin_lock_irqsave(&ha->hardware_lock, flags); 6214 spin_lock_irqsave(&ha->hardware_lock, flags);
6256 qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts, 6215 qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
@@ -6266,7 +6225,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
6266 struct qla_hw_data *ha = vha->hw; 6225 struct qla_hw_data *ha = vha->hw;
6267 struct fc_port *sess; 6226 struct fc_port *sess;
6268 unsigned long flags; 6227 unsigned long flags;
6269 uint8_t *s_id = NULL; /* to hide compiler warnings */ 6228 be_id_t s_id;
6270 int rc; 6229 int rc;
6271 u64 unpacked_lun; 6230 u64 unpacked_lun;
6272 int fn; 6231 int fn;
@@ -6495,22 +6454,10 @@ void qlt_remove_target_resources(struct qla_hw_data *ha)
6495static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 6454static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
6496 unsigned char *b) 6455 unsigned char *b)
6497{ 6456{
6498 int i; 6457 pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name);
6499 6458 pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name);
6500 pr_debug("qla2xxx HW vha->node_name: ");
6501 for (i = 0; i < WWN_SIZE; i++)
6502 pr_debug("%02x ", vha->node_name[i]);
6503 pr_debug("\n");
6504 pr_debug("qla2xxx HW vha->port_name: ");
6505 for (i = 0; i < WWN_SIZE; i++)
6506 pr_debug("%02x ", vha->port_name[i]);
6507 pr_debug("\n");
6508
6509 pr_debug("qla2xxx passed configfs WWPN: ");
6510 put_unaligned_be64(wwpn, b); 6459 put_unaligned_be64(wwpn, b);
6511 for (i = 0; i < WWN_SIZE; i++) 6460 pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b);
6512 pr_debug("%02x ", b[i]);
6513 pr_debug("\n");
6514} 6461}
6515 6462
6516/** 6463/**
@@ -6671,6 +6618,8 @@ qlt_enable_vha(struct scsi_qla_host *vha)
6671 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED) 6618 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6672 return; 6619 return;
6673 6620
6621 if (ha->tgt.num_act_qpairs > ha->max_qpairs)
6622 ha->tgt.num_act_qpairs = ha->max_qpairs;
6674 spin_lock_irqsave(&ha->hardware_lock, flags); 6623 spin_lock_irqsave(&ha->hardware_lock, flags);
6675 tgt->tgt_stopped = 0; 6624 tgt->tgt_stopped = 0;
6676 qlt_set_mode(vha); 6625 qlt_set_mode(vha);
@@ -6685,7 +6634,8 @@ qlt_enable_vha(struct scsi_qla_host *vha)
6685 } else { 6634 } else {
6686 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 6635 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6687 qla2xxx_wake_dpc(base_vha); 6636 qla2xxx_wake_dpc(base_vha);
6688 qla2x00_wait_for_hba_online(base_vha); 6637 WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) !=
6638 QLA_SUCCESS);
6689 } 6639 }
6690 mutex_unlock(&ha->optrom_mutex); 6640 mutex_unlock(&ha->optrom_mutex);
6691} 6641}
@@ -6716,7 +6666,9 @@ static void qlt_disable_vha(struct scsi_qla_host *vha)
6716 6666
6717 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 6667 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6718 qla2xxx_wake_dpc(vha); 6668 qla2xxx_wake_dpc(vha);
6719 qla2x00_wait_for_hba_online(vha); 6669 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
6670 ql_dbg(ql_dbg_tgt, vha, 0xe081,
6671 "qla2x00_wait_for_hba_online() failed\n");
6720} 6672}
6721 6673
6722/* 6674/*
@@ -6815,7 +6767,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6815 */ 6767 */
6816 ql_log(ql_log_warn, vha, 0xd03c, 6768 ql_log(ql_log_warn, vha, 0xd03c,
6817 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", 6769 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
6818 pkt->u.isp24.fcp_hdr.s_id, 6770 &pkt->u.isp24.fcp_hdr.s_id,
6819 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), 6771 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
6820 le32_to_cpu(pkt->u.isp24.exchange_addr), pkt); 6772 le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
6821 6773
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index b8d244f1e189..d006f0a97b8c 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -247,9 +247,9 @@ struct ctio_to_2xxx {
247 247
248struct fcp_hdr { 248struct fcp_hdr {
249 uint8_t r_ctl; 249 uint8_t r_ctl;
250 uint8_t d_id[3]; 250 be_id_t d_id;
251 uint8_t cs_ctl; 251 uint8_t cs_ctl;
252 uint8_t s_id[3]; 252 be_id_t s_id;
253 uint8_t type; 253 uint8_t type;
254 uint8_t f_ctl[3]; 254 uint8_t f_ctl[3];
255 uint8_t seq_id; 255 uint8_t seq_id;
@@ -261,9 +261,9 @@ struct fcp_hdr {
261} __packed; 261} __packed;
262 262
263struct fcp_hdr_le { 263struct fcp_hdr_le {
264 uint8_t d_id[3]; 264 le_id_t d_id;
265 uint8_t r_ctl; 265 uint8_t r_ctl;
266 uint8_t s_id[3]; 266 le_id_t s_id;
267 uint8_t cs_ctl; 267 uint8_t cs_ctl;
268 uint8_t f_ctl[3]; 268 uint8_t f_ctl[3];
269 uint8_t type; 269 uint8_t type;
@@ -402,7 +402,7 @@ struct ctio7_to_24xx {
402 uint16_t dseg_count; /* Data segment count. */ 402 uint16_t dseg_count; /* Data segment count. */
403 uint8_t vp_index; 403 uint8_t vp_index;
404 uint8_t add_flags; 404 uint8_t add_flags;
405 uint8_t initiator_id[3]; 405 le_id_t initiator_id;
406 uint8_t reserved; 406 uint8_t reserved;
407 uint32_t exchange_addr; 407 uint32_t exchange_addr;
408 union { 408 union {
@@ -498,7 +498,7 @@ struct ctio_crc2_to_fw {
498 uint8_t add_flags; /* additional flags */ 498 uint8_t add_flags; /* additional flags */
499#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3 499#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3
500 500
501 uint8_t initiator_id[3]; /* initiator ID */ 501 le_id_t initiator_id; /* initiator ID */
502 uint8_t reserved1; 502 uint8_t reserved1;
503 uint32_t exchange_addr; /* rcv exchange address */ 503 uint32_t exchange_addr; /* rcv exchange address */
504 uint16_t reserved2; 504 uint16_t reserved2;
@@ -682,7 +682,7 @@ struct qla_tgt_func_tmpl {
682 struct fc_port *(*find_sess_by_loop_id)(struct scsi_qla_host *, 682 struct fc_port *(*find_sess_by_loop_id)(struct scsi_qla_host *,
683 const uint16_t); 683 const uint16_t);
684 struct fc_port *(*find_sess_by_s_id)(struct scsi_qla_host *, 684 struct fc_port *(*find_sess_by_s_id)(struct scsi_qla_host *,
685 const uint8_t *); 685 const be_id_t);
686 void (*clear_nacl_from_fcport_map)(struct fc_port *); 686 void (*clear_nacl_from_fcport_map)(struct fc_port *);
687 void (*put_sess)(struct fc_port *); 687 void (*put_sess)(struct fc_port *);
688 void (*shutdown_sess)(struct fc_port *); 688 void (*shutdown_sess)(struct fc_port *);
@@ -912,7 +912,7 @@ struct qla_tgt_cmd {
912 uint8_t scsi_status, sense_key, asc, ascq; 912 uint8_t scsi_status, sense_key, asc, ascq;
913 913
914 struct crc_context *ctx; 914 struct crc_context *ctx;
915 uint8_t *cdb; 915 const uint8_t *cdb;
916 uint64_t lba; 916 uint64_t lba;
917 uint16_t a_guard, e_guard, a_app_tag, e_app_tag; 917 uint16_t a_guard, e_guard, a_app_tag, e_app_tag;
918 uint32_t a_ref_tag, e_ref_tag; 918 uint32_t a_ref_tag, e_ref_tag;
@@ -1030,22 +1030,11 @@ static inline bool qla_dual_mode_enabled(struct scsi_qla_host *ha)
1030 return (ha->host->active_mode == MODE_DUAL); 1030 return (ha->host->active_mode == MODE_DUAL);
1031} 1031}
1032 1032
1033static inline uint32_t sid_to_key(const uint8_t *s_id) 1033static inline uint32_t sid_to_key(const be_id_t s_id)
1034{ 1034{
1035 uint32_t key; 1035 return s_id.domain << 16 |
1036 1036 s_id.area << 8 |
1037 key = (((unsigned long)s_id[0] << 16) | 1037 s_id.al_pa;
1038 ((unsigned long)s_id[1] << 8) |
1039 (unsigned long)s_id[2]);
1040 return key;
1041}
1042
1043static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p)
1044{
1045 memset(p, 0, sizeof(*p));
1046 p->b.domain = s_id[0];
1047 p->b.area = s_id[1];
1048 p->b.al_pa = s_id[2];
1049} 1038}
1050 1039
1051/* 1040/*
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index de696a07532e..294d77c02cdf 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -429,7 +429,7 @@ qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
429 ql_dbg(ql_dbg_misc, vha, 0xd20a, 429 ql_dbg(ql_dbg_misc, vha, 0xd20a,
430 "%s: reset risc [%lx]\n", __func__, *len); 430 "%s: reset risc [%lx]\n", __func__, *len);
431 if (buf) 431 if (buf)
432 qla24xx_soft_reset(vha->hw); 432 WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS);
433 433
434 return qla27xx_next_entry(ent); 434 return qla27xx_next_entry(ent);
435} 435}
@@ -860,8 +860,9 @@ qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
860{ 860{
861 uint8_t v[] = { 0, 0, 0, 0, 0, 0 }; 861 uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
862 862
863 sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu", 863 WARN_ON_ONCE(sscanf(qla2x00_version_str,
864 v+0, v+1, v+2, v+3, v+4, v+5); 864 "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
865 v+0, v+1, v+2, v+3, v+4, v+5) != 6);
865 866
866 tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]; 867 tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
867 tmp->driver_info[1] = v[5] << 8 | v[4]; 868 tmp->driver_info[1] = v[5] << 8 | v[4];
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index cd6bdf71e533..a8f2a953ceff 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "10.01.00.16-k" 10#define QLA2XXX_VERSION "10.01.00.19-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 10 12#define QLA_DRIVER_MAJOR_VER 10
13#define QLA_DRIVER_MINOR_VER 1 13#define QLA_DRIVER_MINOR_VER 1
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index d15412d3d9bd..042a24314edc 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -620,6 +620,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
620{ 620{
621 struct qla_tgt_cmd *cmd = container_of(se_cmd, 621 struct qla_tgt_cmd *cmd = container_of(se_cmd,
622 struct qla_tgt_cmd, se_cmd); 622 struct qla_tgt_cmd, se_cmd);
623 struct scsi_qla_host *vha = cmd->vha;
623 624
624 if (cmd->aborted) { 625 if (cmd->aborted) {
625 /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task 626 /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
@@ -632,6 +633,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
632 cmd->se_cmd.transport_state, 633 cmd->se_cmd.transport_state,
633 cmd->se_cmd.t_state, 634 cmd->se_cmd.t_state,
634 cmd->se_cmd.se_cmd_flags); 635 cmd->se_cmd.se_cmd_flags);
636 vha->hw->tgt.tgt_ops->free_cmd(cmd);
635 return 0; 637 return 0;
636 } 638 }
637 639
@@ -659,6 +661,7 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
659{ 661{
660 struct qla_tgt_cmd *cmd = container_of(se_cmd, 662 struct qla_tgt_cmd *cmd = container_of(se_cmd,
661 struct qla_tgt_cmd, se_cmd); 663 struct qla_tgt_cmd, se_cmd);
664 struct scsi_qla_host *vha = cmd->vha;
662 int xmit_type = QLA_TGT_XMIT_STATUS; 665 int xmit_type = QLA_TGT_XMIT_STATUS;
663 666
664 if (cmd->aborted) { 667 if (cmd->aborted) {
@@ -672,6 +675,7 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
672 cmd, kref_read(&cmd->se_cmd.cmd_kref), 675 cmd, kref_read(&cmd->se_cmd.cmd_kref),
673 cmd->se_cmd.transport_state, cmd->se_cmd.t_state, 676 cmd->se_cmd.transport_state, cmd->se_cmd.t_state,
674 cmd->se_cmd.se_cmd_flags); 677 cmd->se_cmd.se_cmd_flags);
678 vha->hw->tgt.tgt_ops->free_cmd(cmd);
675 return 0; 679 return 0;
676 } 680 }
677 cmd->bufflen = se_cmd->data_length; 681 cmd->bufflen = se_cmd->data_length;
@@ -1136,9 +1140,8 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(struct se_wwn *wwn,
1136/* 1140/*
1137 * Expected to be called with struct qla_hw_data->tgt.sess_lock held 1141 * Expected to be called with struct qla_hw_data->tgt.sess_lock held
1138 */ 1142 */
1139static struct fc_port *tcm_qla2xxx_find_sess_by_s_id( 1143static struct fc_port *tcm_qla2xxx_find_sess_by_s_id(scsi_qla_host_t *vha,
1140 scsi_qla_host_t *vha, 1144 const be_id_t s_id)
1141 const uint8_t *s_id)
1142{ 1145{
1143 struct tcm_qla2xxx_lport *lport; 1146 struct tcm_qla2xxx_lport *lport;
1144 struct se_node_acl *se_nacl; 1147 struct se_node_acl *se_nacl;
@@ -1181,7 +1184,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
1181 struct tcm_qla2xxx_nacl *nacl, 1184 struct tcm_qla2xxx_nacl *nacl,
1182 struct se_session *se_sess, 1185 struct se_session *se_sess,
1183 struct fc_port *fc_port, 1186 struct fc_port *fc_port,
1184 uint8_t *s_id) 1187 be_id_t s_id)
1185{ 1188{
1186 u32 key; 1189 u32 key;
1187 void *slot; 1190 void *slot;
@@ -1348,14 +1351,9 @@ static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
1348 struct tcm_qla2xxx_nacl *nacl, struct fc_port *sess) 1351 struct tcm_qla2xxx_nacl *nacl, struct fc_port *sess)
1349{ 1352{
1350 struct se_session *se_sess = sess->se_sess; 1353 struct se_session *se_sess = sess->se_sess;
1351 unsigned char be_sid[3];
1352
1353 be_sid[0] = sess->d_id.b.domain;
1354 be_sid[1] = sess->d_id.b.area;
1355 be_sid[2] = sess->d_id.b.al_pa;
1356 1354
1357 tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, 1355 tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
1358 sess, be_sid); 1356 sess, port_id_to_be_id(sess->d_id));
1359 tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, 1357 tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
1360 sess, sess->loop_id); 1358 sess, sess->loop_id);
1361} 1359}
@@ -1401,19 +1399,14 @@ static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg,
1401 struct fc_port *qlat_sess = p; 1399 struct fc_port *qlat_sess = p;
1402 uint16_t loop_id = qlat_sess->loop_id; 1400 uint16_t loop_id = qlat_sess->loop_id;
1403 unsigned long flags; 1401 unsigned long flags;
1404 unsigned char be_sid[3];
1405
1406 be_sid[0] = qlat_sess->d_id.b.domain;
1407 be_sid[1] = qlat_sess->d_id.b.area;
1408 be_sid[2] = qlat_sess->d_id.b.al_pa;
1409 1402
1410 /* 1403 /*
1411 * And now setup se_nacl and session pointers into HW lport internal 1404 * And now setup se_nacl and session pointers into HW lport internal
1412 * mappings for fabric S_ID and LOOP_ID. 1405 * mappings for fabric S_ID and LOOP_ID.
1413 */ 1406 */
1414 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1407 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1415 tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, 1408 tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess, qlat_sess,
1416 se_sess, qlat_sess, be_sid); 1409 port_id_to_be_id(qlat_sess->d_id));
1417 tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, 1410 tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl,
1418 se_sess, qlat_sess, loop_id); 1411 se_sess, qlat_sess, loop_id);
1419 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1412 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 9335849f6bea..d539beef3ce8 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -200,10 +200,15 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f
200 /* Write mailbox command registers. */ 200 /* Write mailbox command registers. */
201 switch (mbox_param[param[0]] >> 4) { 201 switch (mbox_param[param[0]] >> 4) {
202 case 6: sbus_writew(param[5], qpti->qregs + MBOX5); 202 case 6: sbus_writew(param[5], qpti->qregs + MBOX5);
203 /* Fall through */
203 case 5: sbus_writew(param[4], qpti->qregs + MBOX4); 204 case 5: sbus_writew(param[4], qpti->qregs + MBOX4);
205 /* Fall through */
204 case 4: sbus_writew(param[3], qpti->qregs + MBOX3); 206 case 4: sbus_writew(param[3], qpti->qregs + MBOX3);
207 /* Fall through */
205 case 3: sbus_writew(param[2], qpti->qregs + MBOX2); 208 case 3: sbus_writew(param[2], qpti->qregs + MBOX2);
209 /* Fall through */
206 case 2: sbus_writew(param[1], qpti->qregs + MBOX1); 210 case 2: sbus_writew(param[1], qpti->qregs + MBOX1);
211 /* Fall through */
207 case 1: sbus_writew(param[0], qpti->qregs + MBOX0); 212 case 1: sbus_writew(param[0], qpti->qregs + MBOX0);
208 } 213 }
209 214
@@ -254,10 +259,15 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f
254 /* Read back output parameters. */ 259 /* Read back output parameters. */
255 switch (mbox_param[param[0]] & 0xf) { 260 switch (mbox_param[param[0]] & 0xf) {
256 case 6: param[5] = sbus_readw(qpti->qregs + MBOX5); 261 case 6: param[5] = sbus_readw(qpti->qregs + MBOX5);
262 /* Fall through */
257 case 5: param[4] = sbus_readw(qpti->qregs + MBOX4); 263 case 5: param[4] = sbus_readw(qpti->qregs + MBOX4);
264 /* Fall through */
258 case 4: param[3] = sbus_readw(qpti->qregs + MBOX3); 265 case 4: param[3] = sbus_readw(qpti->qregs + MBOX3);
266 /* Fall through */
259 case 3: param[2] = sbus_readw(qpti->qregs + MBOX2); 267 case 3: param[2] = sbus_readw(qpti->qregs + MBOX2);
268 /* Fall through */
260 case 2: param[1] = sbus_readw(qpti->qregs + MBOX1); 269 case 2: param[1] = sbus_readw(qpti->qregs + MBOX1);
270 /* Fall through */
261 case 1: param[0] = sbus_readw(qpti->qregs + MBOX0); 271 case 1: param[0] = sbus_readw(qpti->qregs + MBOX0);
262 } 272 }
263 273
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index c5a8756384bc..c19ea7ab54cb 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -1,4 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/bitops.h>
2#include <linux/seq_file.h> 3#include <linux/seq_file.h>
3#include <scsi/scsi_cmnd.h> 4#include <scsi/scsi_cmnd.h>
4#include <scsi/scsi_dbg.h> 5#include <scsi/scsi_dbg.h>
@@ -18,9 +19,7 @@ static int scsi_flags_show(struct seq_file *m, const unsigned long flags,
18 bool sep = false; 19 bool sep = false;
19 int i; 20 int i;
20 21
21 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) { 22 for_each_set_bit(i, &flags, BITS_PER_LONG) {
22 if (!(flags & BIT(i)))
23 continue;
24 if (sep) 23 if (sep)
25 seq_puts(m, "|"); 24 seq_puts(m, "|");
26 sep = true; 25 sep = true;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4e88d7e9cf9a..dc210b9d4896 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1678,10 +1678,11 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1678 blk_mq_start_request(req); 1678 blk_mq_start_request(req);
1679 } 1679 }
1680 1680
1681 cmd->flags &= SCMD_PRESERVED_FLAGS;
1681 if (sdev->simple_tags) 1682 if (sdev->simple_tags)
1682 cmd->flags |= SCMD_TAGGED; 1683 cmd->flags |= SCMD_TAGGED;
1683 else 1684 if (bd->last)
1684 cmd->flags &= ~SCMD_TAGGED; 1685 cmd->flags |= SCMD_LAST;
1685 1686
1686 scsi_init_cmd_errh(cmd); 1687 scsi_init_cmd_errh(cmd);
1687 cmd->scsi_done = scsi_mq_done; 1688 cmd->scsi_done = scsi_mq_done;
@@ -1821,10 +1822,37 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
1821} 1822}
1822EXPORT_SYMBOL_GPL(__scsi_init_queue); 1823EXPORT_SYMBOL_GPL(__scsi_init_queue);
1823 1824
1825static const struct blk_mq_ops scsi_mq_ops_no_commit = {
1826 .get_budget = scsi_mq_get_budget,
1827 .put_budget = scsi_mq_put_budget,
1828 .queue_rq = scsi_queue_rq,
1829 .complete = scsi_softirq_done,
1830 .timeout = scsi_timeout,
1831#ifdef CONFIG_BLK_DEBUG_FS
1832 .show_rq = scsi_show_rq,
1833#endif
1834 .init_request = scsi_mq_init_request,
1835 .exit_request = scsi_mq_exit_request,
1836 .initialize_rq_fn = scsi_initialize_rq,
1837 .busy = scsi_mq_lld_busy,
1838 .map_queues = scsi_map_queues,
1839};
1840
1841
1842static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
1843{
1844 struct request_queue *q = hctx->queue;
1845 struct scsi_device *sdev = q->queuedata;
1846 struct Scsi_Host *shost = sdev->host;
1847
1848 shost->hostt->commit_rqs(shost, hctx->queue_num);
1849}
1850
1824static const struct blk_mq_ops scsi_mq_ops = { 1851static const struct blk_mq_ops scsi_mq_ops = {
1825 .get_budget = scsi_mq_get_budget, 1852 .get_budget = scsi_mq_get_budget,
1826 .put_budget = scsi_mq_put_budget, 1853 .put_budget = scsi_mq_put_budget,
1827 .queue_rq = scsi_queue_rq, 1854 .queue_rq = scsi_queue_rq,
1855 .commit_rqs = scsi_commit_rqs,
1828 .complete = scsi_softirq_done, 1856 .complete = scsi_softirq_done,
1829 .timeout = scsi_timeout, 1857 .timeout = scsi_timeout,
1830#ifdef CONFIG_BLK_DEBUG_FS 1858#ifdef CONFIG_BLK_DEBUG_FS
@@ -1861,7 +1889,10 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
1861 sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT; 1889 sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
1862 1890
1863 memset(&shost->tag_set, 0, sizeof(shost->tag_set)); 1891 memset(&shost->tag_set, 0, sizeof(shost->tag_set));
1864 shost->tag_set.ops = &scsi_mq_ops; 1892 if (shost->hostt->commit_rqs)
1893 shost->tag_set.ops = &scsi_mq_ops;
1894 else
1895 shost->tag_set.ops = &scsi_mq_ops_no_commit;
1865 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1; 1896 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
1866 shost->tag_set.queue_depth = shost->can_queue; 1897 shost->tag_set.queue_depth = shost->can_queue;
1867 shost->tag_set.cmd_size = cmd_size; 1898 shost->tag_set.cmd_size = cmd_size;
@@ -2691,6 +2722,14 @@ void scsi_start_queue(struct scsi_device *sdev)
2691int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, 2722int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
2692 enum scsi_device_state new_state) 2723 enum scsi_device_state new_state)
2693{ 2724{
2725 switch (new_state) {
2726 case SDEV_RUNNING:
2727 case SDEV_TRANSPORT_OFFLINE:
2728 break;
2729 default:
2730 return -EINVAL;
2731 }
2732
2694 /* 2733 /*
2695 * Try to transition the scsi device to SDEV_RUNNING or one of the 2734 * Try to transition the scsi device to SDEV_RUNNING or one of the
2696 * offlined states and goose the device queue if successful. 2735 * offlined states and goose the device queue if successful.
@@ -2748,7 +2787,12 @@ static int scsi_internal_device_unblock(struct scsi_device *sdev,
2748static void 2787static void
2749device_block(struct scsi_device *sdev, void *data) 2788device_block(struct scsi_device *sdev, void *data)
2750{ 2789{
2751 scsi_internal_device_block(sdev); 2790 int ret;
2791
2792 ret = scsi_internal_device_block(sdev);
2793
2794 WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d\n",
2795 dev_name(&sdev->sdev_gendev), ret);
2752} 2796}
2753 2797
2754static int 2798static int
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index 39b8cc4574b4..c6ed0b12e807 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -15,57 +15,15 @@
15#include <scsi/scsi_eh.h> 15#include <scsi/scsi_eh.h>
16#include <scsi/scsi_dbg.h> 16#include <scsi/scsi_dbg.h>
17 17
18#define SCSI_LOG_SPOOLSIZE 4096
19
20#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG
21#warning SCSI logging bitmask too large
22#endif
23
24struct scsi_log_buf {
25 char buffer[SCSI_LOG_SPOOLSIZE];
26 unsigned long map;
27};
28
29static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log);
30
31static char *scsi_log_reserve_buffer(size_t *len) 18static char *scsi_log_reserve_buffer(size_t *len)
32{ 19{
33 struct scsi_log_buf *buf; 20 *len = 128;
34 unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE; 21 return kmalloc(*len, GFP_ATOMIC);
35 unsigned long idx = 0;
36
37 preempt_disable();
38 buf = this_cpu_ptr(&scsi_format_log);
39 idx = find_first_zero_bit(&buf->map, map_bits);
40 if (likely(idx < map_bits)) {
41 while (test_and_set_bit(idx, &buf->map)) {
42 idx = find_next_zero_bit(&buf->map, map_bits, idx);
43 if (idx >= map_bits)
44 break;
45 }
46 }
47 if (WARN_ON(idx >= map_bits)) {
48 preempt_enable();
49 return NULL;
50 }
51 *len = SCSI_LOG_BUFSIZE;
52 return buf->buffer + idx * SCSI_LOG_BUFSIZE;
53} 22}
54 23
55static void scsi_log_release_buffer(char *bufptr) 24static void scsi_log_release_buffer(char *bufptr)
56{ 25{
57 struct scsi_log_buf *buf; 26 kfree(bufptr);
58 unsigned long idx;
59 int ret;
60
61 buf = this_cpu_ptr(&scsi_format_log);
62 if (bufptr >= buf->buffer &&
63 bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) {
64 idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE;
65 ret = test_and_clear_bit(idx, &buf->map);
66 WARN_ON(!ret);
67 }
68 preempt_enable();
69} 27}
70 28
71static inline const char *scmd_name(const struct scsi_cmnd *scmd) 29static inline const char *scmd_name(const struct scsi_cmnd *scmd)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 4b925552458f..7623196de9e3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1981,6 +1981,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1981 sd_printk(KERN_INFO, sdkp, 1981 sd_printk(KERN_INFO, sdkp,
1982 "Unaligned partial completion (resid=%u, sector_sz=%u)\n", 1982 "Unaligned partial completion (resid=%u, sector_sz=%u)\n",
1983 resid, sector_size); 1983 resid, sector_size);
1984 scsi_print_command(SCpnt);
1984 resid = min(scsi_bufflen(SCpnt), 1985 resid = min(scsi_bufflen(SCpnt),
1985 round_up(resid, sector_size)); 1986 round_up(resid, sector_size));
1986 scsi_set_resid(SCpnt, resid); 1987 scsi_set_resid(SCpnt, resid);
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index 97e159c2cecd..bc6506884e3b 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -1,6 +1,8 @@
1# 1#
2# Kernel configuration file for the SMARTPQI 2# Kernel configuration file for the SMARTPQI
3# 3#
4# Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
5# Copyright (c) 2017-2018 Microsemi Corporation
4# Copyright (c) 2016 Microsemi Corporation 6# Copyright (c) 2016 Microsemi Corporation
5# Copyright (c) 2016 PMC-Sierra, Inc. 7# Copyright (c) 2016 PMC-Sierra, Inc.
6# (mailto:esc.storagedev@microsemi.com) 8# (mailto:esc.storagedev@microsemi.com)
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index e8e768849c70..79d2af36f655 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -822,6 +822,7 @@ union pqi_reset_register {
822#define PQI_HBA_BUS 2 822#define PQI_HBA_BUS 2
823#define PQI_EXTERNAL_RAID_VOLUME_BUS 3 823#define PQI_EXTERNAL_RAID_VOLUME_BUS 3
824#define PQI_MAX_BUS PQI_EXTERNAL_RAID_VOLUME_BUS 824#define PQI_MAX_BUS PQI_EXTERNAL_RAID_VOLUME_BUS
825#define PQI_VSEP_CISS_BTL 379
825 826
826struct report_lun_header { 827struct report_lun_header {
827 __be32 list_length; 828 __be32 list_length;
@@ -930,6 +931,9 @@ struct pqi_scsi_dev {
930 u8 active_path_index; 931 u8 active_path_index;
931 u8 path_map; 932 u8 path_map;
932 u8 bay; 933 u8 bay;
934 u8 box_index;
935 u8 phys_box_on_bus;
936 u8 phy_connected_dev_type;
933 u8 box[8]; 937 u8 box[8];
934 u16 phys_connector[8]; 938 u16 phys_connector[8];
935 bool raid_bypass_configured; /* RAID bypass configured */ 939 bool raid_bypass_configured; /* RAID bypass configured */
@@ -1073,6 +1077,9 @@ struct pqi_ctrl_info {
1073 unsigned int ctrl_id; 1077 unsigned int ctrl_id;
1074 struct pci_dev *pci_dev; 1078 struct pci_dev *pci_dev;
1075 char firmware_version[11]; 1079 char firmware_version[11];
1080 char serial_number[17];
1081 char model[17];
1082 char vendor[9];
1076 void __iomem *iomem_base; 1083 void __iomem *iomem_base;
1077 struct pqi_ctrl_registers __iomem *registers; 1084 struct pqi_ctrl_registers __iomem *registers;
1078 struct pqi_device_registers __iomem *pqi_registers; 1085 struct pqi_device_registers __iomem *pqi_registers;
@@ -1224,12 +1231,21 @@ struct bmic_identify_controller {
1224 __le16 extended_logical_unit_count; 1231 __le16 extended_logical_unit_count;
1225 u8 reserved1[34]; 1232 u8 reserved1[34];
1226 __le16 firmware_build_number; 1233 __le16 firmware_build_number;
1227 u8 reserved2[100]; 1234 u8 reserved2[8];
1235 u8 vendor_id[8];
1236 u8 product_id[16];
1237 u8 reserved3[68];
1228 u8 controller_mode; 1238 u8 controller_mode;
1229 u8 reserved3[32]; 1239 u8 reserved4[32];
1240};
1241
1242struct bmic_sense_subsystem_info {
1243 u8 reserved[44];
1244 u8 ctrl_serial_number[16];
1230}; 1245};
1231 1246
1232#define SA_EXPANDER_SMP_DEVICE 0x05 1247#define SA_EXPANDER_SMP_DEVICE 0x05
1248#define SA_CONTROLLER_DEVICE 0x07
1233/*SCSI Invalid Device Type for SAS devices*/ 1249/*SCSI Invalid Device Type for SAS devices*/
1234#define PQI_SAS_SCSI_INVALID_DEVTYPE 0xff 1250#define PQI_SAS_SCSI_INVALID_DEVTYPE 0xff
1235 1251
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 8fd5ffc55792..ea5409bebf57 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
33#define BUILD_TIMESTAMP 33#define BUILD_TIMESTAMP
34#endif 34#endif
35 35
36#define DRIVER_VERSION "1.2.6-015" 36#define DRIVER_VERSION "1.2.8-026"
37#define DRIVER_MAJOR 1 37#define DRIVER_MAJOR 1
38#define DRIVER_MINOR 2 38#define DRIVER_MINOR 2
39#define DRIVER_RELEASE 6 39#define DRIVER_RELEASE 8
40#define DRIVER_REVISION 15 40#define DRIVER_REVISION 26
41 41
42#define DRIVER_NAME "Microsemi PQI Driver (v" \ 42#define DRIVER_NAME "Microsemi PQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")" 43 DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -145,6 +145,18 @@ MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
145 "\t\tSupported: none, reboot, panic\n" 145 "\t\tSupported: none, reboot, panic\n"
146 "\t\tDefault: none"); 146 "\t\tDefault: none");
147 147
148static int pqi_expose_ld_first;
149module_param_named(expose_ld_first,
150 pqi_expose_ld_first, int, 0644);
151MODULE_PARM_DESC(expose_ld_first,
152 "Expose logical drives before physical drives.");
153
154static int pqi_hide_vsep;
155module_param_named(hide_vsep,
156 pqi_hide_vsep, int, 0644);
157MODULE_PARM_DESC(hide_vsep,
158 "Hide the virtual SEP for direct attached drives.");
159
148static char *raid_levels[] = { 160static char *raid_levels[] = {
149 "RAID-0", 161 "RAID-0",
150 "RAID-4", 162 "RAID-4",
@@ -472,6 +484,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
472 /* fall through */ 484 /* fall through */
473 case BMIC_IDENTIFY_CONTROLLER: 485 case BMIC_IDENTIFY_CONTROLLER:
474 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 486 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
487 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
475 request->data_direction = SOP_READ_FLAG; 488 request->data_direction = SOP_READ_FLAG;
476 cdb[0] = BMIC_READ; 489 cdb[0] = BMIC_READ;
477 cdb[6] = cmd; 490 cdb[6] = cmd;
@@ -600,6 +613,14 @@ static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
600 buffer, sizeof(*buffer)); 613 buffer, sizeof(*buffer));
601} 614}
602 615
616static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
617 struct bmic_sense_subsystem_info *sense_info)
618{
619 return pqi_send_ctrl_raid_request(ctrl_info,
620 BMIC_SENSE_SUBSYSTEM_INFORMATION,
621 sense_info, sizeof(*sense_info));
622}
623
603static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, 624static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
604 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 625 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
605{ 626{
@@ -1392,7 +1413,9 @@ static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1392 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1413 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1393 return; 1414 return;
1394 } 1415 }
1395 1416 device->box_index = id_phys->box_index;
1417 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1418 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1396 device->queue_depth = 1419 device->queue_depth =
1397 get_unaligned_le16(&id_phys->current_queue_depth_limit); 1420 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1398 device->device_type = id_phys->device_type; 1421 device->device_type = id_phys->device_type;
@@ -1719,6 +1742,10 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1719 existing_device->active_path_index = new_device->active_path_index; 1742 existing_device->active_path_index = new_device->active_path_index;
1720 existing_device->path_map = new_device->path_map; 1743 existing_device->path_map = new_device->path_map;
1721 existing_device->bay = new_device->bay; 1744 existing_device->bay = new_device->bay;
1745 existing_device->box_index = new_device->box_index;
1746 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
1747 existing_device->phy_connected_dev_type =
1748 new_device->phy_connected_dev_type;
1722 memcpy(existing_device->box, new_device->box, 1749 memcpy(existing_device->box, new_device->box,
1723 sizeof(existing_device->box)); 1750 sizeof(existing_device->box));
1724 memcpy(existing_device->phys_connector, new_device->phys_connector, 1751 memcpy(existing_device->phys_connector, new_device->phys_connector,
@@ -1945,6 +1972,11 @@ static inline bool pqi_skip_device(u8 *scsi3addr)
1945 return false; 1972 return false;
1946} 1973}
1947 1974
1975static inline void pqi_mask_device(u8 *scsi3addr)
1976{
1977 scsi3addr[3] |= 0xc0;
1978}
1979
1948static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device) 1980static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
1949{ 1981{
1950 if (!device->is_physical_device) 1982 if (!device->is_physical_device)
@@ -1988,6 +2020,8 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1988 unsigned int num_valid_devices; 2020 unsigned int num_valid_devices;
1989 bool is_physical_device; 2021 bool is_physical_device;
1990 u8 *scsi3addr; 2022 u8 *scsi3addr;
2023 unsigned int physical_index;
2024 unsigned int logical_index;
1991 static char *out_of_memory_msg = 2025 static char *out_of_memory_msg =
1992 "failed to allocate memory, device discovery stopped"; 2026 "failed to allocate memory, device discovery stopped";
1993 2027
@@ -2023,6 +2057,21 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2023 rc = -ENOMEM; 2057 rc = -ENOMEM;
2024 goto out; 2058 goto out;
2025 } 2059 }
2060 if (pqi_hide_vsep) {
2061 int i;
2062
2063 for (i = num_physicals - 1; i >= 0; i--) {
2064 phys_lun_ext_entry =
2065 &physdev_list->lun_entries[i];
2066 if (CISS_GET_DRIVE_NUMBER(
2067 phys_lun_ext_entry->lunid) ==
2068 PQI_VSEP_CISS_BTL) {
2069 pqi_mask_device(
2070 phys_lun_ext_entry->lunid);
2071 break;
2072 }
2073 }
2074 }
2026 } 2075 }
2027 2076
2028 num_new_devices = num_physicals + num_logicals; 2077 num_new_devices = num_physicals + num_logicals;
@@ -2050,19 +2099,23 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2050 2099
2051 device = NULL; 2100 device = NULL;
2052 num_valid_devices = 0; 2101 num_valid_devices = 0;
2102 physical_index = 0;
2103 logical_index = 0;
2053 2104
2054 for (i = 0; i < num_new_devices; i++) { 2105 for (i = 0; i < num_new_devices; i++) {
2055 2106
2056 if (i < num_physicals) { 2107 if ((!pqi_expose_ld_first && i < num_physicals) ||
2108 (pqi_expose_ld_first && i >= num_logicals)) {
2057 is_physical_device = true; 2109 is_physical_device = true;
2058 phys_lun_ext_entry = &physdev_list->lun_entries[i]; 2110 phys_lun_ext_entry =
2111 &physdev_list->lun_entries[physical_index++];
2059 log_lun_ext_entry = NULL; 2112 log_lun_ext_entry = NULL;
2060 scsi3addr = phys_lun_ext_entry->lunid; 2113 scsi3addr = phys_lun_ext_entry->lunid;
2061 } else { 2114 } else {
2062 is_physical_device = false; 2115 is_physical_device = false;
2063 phys_lun_ext_entry = NULL; 2116 phys_lun_ext_entry = NULL;
2064 log_lun_ext_entry = 2117 log_lun_ext_entry =
2065 &logdev_list->lun_entries[i - num_physicals]; 2118 &logdev_list->lun_entries[logical_index++];
2066 scsi3addr = log_lun_ext_entry->lunid; 2119 scsi3addr = log_lun_ext_entry->lunid;
2067 } 2120 }
2068 2121
@@ -2122,11 +2175,10 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2122 device->aio_handle = 2175 device->aio_handle =
2123 phys_lun_ext_entry->aio_handle; 2176 phys_lun_ext_entry->aio_handle;
2124 } 2177 }
2125 if (device->devtype == TYPE_DISK || 2178
2126 device->devtype == TYPE_ZBC) {
2127 pqi_get_physical_disk_info(ctrl_info, 2179 pqi_get_physical_disk_info(ctrl_info,
2128 device, id_phys); 2180 device, id_phys);
2129 } 2181
2130 } else { 2182 } else {
2131 memcpy(device->volume_id, log_lun_ext_entry->volume_id, 2183 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
2132 sizeof(device->volume_id)); 2184 sizeof(device->volume_id));
@@ -2184,18 +2236,20 @@ static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2184 2236
2185static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2237static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2186{ 2238{
2187 int rc; 2239 int rc = 0;
2188 2240
2189 if (pqi_ctrl_offline(ctrl_info)) 2241 if (pqi_ctrl_offline(ctrl_info))
2190 return -ENXIO; 2242 return -ENXIO;
2191 2243
2192 mutex_lock(&ctrl_info->scan_mutex); 2244 if (!mutex_trylock(&ctrl_info->scan_mutex)) {
2193
2194 rc = pqi_update_scsi_devices(ctrl_info);
2195 if (rc)
2196 pqi_schedule_rescan_worker_delayed(ctrl_info); 2245 pqi_schedule_rescan_worker_delayed(ctrl_info);
2197 2246 rc = -EINPROGRESS;
2198 mutex_unlock(&ctrl_info->scan_mutex); 2247 } else {
2248 rc = pqi_update_scsi_devices(ctrl_info);
2249 if (rc)
2250 pqi_schedule_rescan_worker_delayed(ctrl_info);
2251 mutex_unlock(&ctrl_info->scan_mutex);
2252 }
2199 2253
2200 return rc; 2254 return rc;
2201} 2255}
@@ -6091,23 +6145,65 @@ static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6091 return rc; 6145 return rc;
6092} 6146}
6093 6147
6094static ssize_t pqi_version_show(struct device *dev, 6148static ssize_t pqi_firmware_version_show(struct device *dev,
6149 struct device_attribute *attr, char *buffer)
6150{
6151 struct Scsi_Host *shost;
6152 struct pqi_ctrl_info *ctrl_info;
6153
6154 shost = class_to_shost(dev);
6155 ctrl_info = shost_to_hba(shost);
6156
6157 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6158}
6159
6160static ssize_t pqi_driver_version_show(struct device *dev,
6161 struct device_attribute *attr, char *buffer)
6162{
6163 struct Scsi_Host *shost;
6164 struct pqi_ctrl_info *ctrl_info;
6165
6166 shost = class_to_shost(dev);
6167 ctrl_info = shost_to_hba(shost);
6168
6169 return snprintf(buffer, PAGE_SIZE,
6170 "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6171}
6172
6173static ssize_t pqi_serial_number_show(struct device *dev,
6095 struct device_attribute *attr, char *buffer) 6174 struct device_attribute *attr, char *buffer)
6096{ 6175{
6097 ssize_t count = 0;
6098 struct Scsi_Host *shost; 6176 struct Scsi_Host *shost;
6099 struct pqi_ctrl_info *ctrl_info; 6177 struct pqi_ctrl_info *ctrl_info;
6100 6178
6101 shost = class_to_shost(dev); 6179 shost = class_to_shost(dev);
6102 ctrl_info = shost_to_hba(shost); 6180 ctrl_info = shost_to_hba(shost);
6103 6181
6104 count += snprintf(buffer + count, PAGE_SIZE - count, 6182 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6105 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP); 6183}
6184
6185static ssize_t pqi_model_show(struct device *dev,
6186 struct device_attribute *attr, char *buffer)
6187{
6188 struct Scsi_Host *shost;
6189 struct pqi_ctrl_info *ctrl_info;
6106 6190
6107 count += snprintf(buffer + count, PAGE_SIZE - count, 6191 shost = class_to_shost(dev);
6108 "firmware: %s\n", ctrl_info->firmware_version); 6192 ctrl_info = shost_to_hba(shost);
6109 6193
6110 return count; 6194 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6195}
6196
6197static ssize_t pqi_vendor_show(struct device *dev,
6198 struct device_attribute *attr, char *buffer)
6199{
6200 struct Scsi_Host *shost;
6201 struct pqi_ctrl_info *ctrl_info;
6202
6203 shost = class_to_shost(dev);
6204 ctrl_info = shost_to_hba(shost);
6205
6206 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6111} 6207}
6112 6208
6113static ssize_t pqi_host_rescan_store(struct device *dev, 6209static ssize_t pqi_host_rescan_store(struct device *dev,
@@ -6160,13 +6256,21 @@ static ssize_t pqi_lockup_action_store(struct device *dev,
6160 return -EINVAL; 6256 return -EINVAL;
6161} 6257}
6162 6258
6163static DEVICE_ATTR(version, 0444, pqi_version_show, NULL); 6259static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6260static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6261static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6262static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6263static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6164static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 6264static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6165static DEVICE_ATTR(lockup_action, 0644, 6265static DEVICE_ATTR(lockup_action, 0644,
6166 pqi_lockup_action_show, pqi_lockup_action_store); 6266 pqi_lockup_action_show, pqi_lockup_action_store);
6167 6267
6168static struct device_attribute *pqi_shost_attrs[] = { 6268static struct device_attribute *pqi_shost_attrs[] = {
6169 &dev_attr_version, 6269 &dev_attr_driver_version,
6270 &dev_attr_firmware_version,
6271 &dev_attr_model,
6272 &dev_attr_serial_number,
6273 &dev_attr_vendor,
6170 &dev_attr_rescan, 6274 &dev_attr_rescan,
6171 &dev_attr_lockup_action, 6275 &dev_attr_lockup_action,
6172 NULL 6276 NULL
@@ -6558,7 +6662,30 @@ static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
6558 return rc; 6662 return rc;
6559} 6663}
6560 6664
6561static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info) 6665static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
6666{
6667 int rc;
6668 struct bmic_sense_subsystem_info *sense_info;
6669
6670 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
6671 if (!sense_info)
6672 return -ENOMEM;
6673
6674 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
6675 if (rc)
6676 goto out;
6677
6678 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
6679 sizeof(sense_info->ctrl_serial_number));
6680 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
6681
6682out:
6683 kfree(sense_info);
6684
6685 return rc;
6686}
6687
6688static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
6562{ 6689{
6563 int rc; 6690 int rc;
6564 struct bmic_identify_controller *identify; 6691 struct bmic_identify_controller *identify;
@@ -6579,6 +6706,14 @@ static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
6579 sizeof(ctrl_info->firmware_version), 6706 sizeof(ctrl_info->firmware_version),
6580 "-%u", get_unaligned_le16(&identify->firmware_build_number)); 6707 "-%u", get_unaligned_le16(&identify->firmware_build_number));
6581 6708
6709 memcpy(ctrl_info->model, identify->product_id,
6710 sizeof(identify->product_id));
6711 ctrl_info->model[sizeof(identify->product_id)] = '\0';
6712
6713 memcpy(ctrl_info->vendor, identify->vendor_id,
6714 sizeof(identify->vendor_id));
6715 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
6716
6582out: 6717out:
6583 kfree(identify); 6718 kfree(identify);
6584 6719
@@ -7098,10 +7233,17 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
7098 if (rc) 7233 if (rc)
7099 return rc; 7234 return rc;
7100 7235
7101 rc = pqi_get_ctrl_firmware_version(ctrl_info); 7236 rc = pqi_get_ctrl_product_details(ctrl_info);
7102 if (rc) { 7237 if (rc) {
7103 dev_err(&ctrl_info->pci_dev->dev, 7238 dev_err(&ctrl_info->pci_dev->dev,
7104 "error obtaining firmware version\n"); 7239 "error obtaining product details\n");
7240 return rc;
7241 }
7242
7243 rc = pqi_get_ctrl_serial_number(ctrl_info);
7244 if (rc) {
7245 dev_err(&ctrl_info->pci_dev->dev,
7246 "error obtaining ctrl serial number\n");
7105 return rc; 7247 return rc;
7106 } 7248 }
7107 7249
@@ -7241,10 +7383,10 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
7241 return rc; 7383 return rc;
7242 } 7384 }
7243 7385
7244 rc = pqi_get_ctrl_firmware_version(ctrl_info); 7386 rc = pqi_get_ctrl_product_details(ctrl_info);
7245 if (rc) { 7387 if (rc) {
7246 dev_err(&ctrl_info->pci_dev->dev, 7388 dev_err(&ctrl_info->pci_dev->dev,
7247 "error obtaining firmware version\n"); 7389 "error obtaining product detail\n");
7248 return rc; 7390 return rc;
7249 } 7391 }
7250 7392
@@ -8024,6 +8166,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
8024 }, 8166 },
8025 { 8167 {
8026 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8168 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8169 0x1bd4, 0x004f)
8170 },
8171 {
8172 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8027 0x19e5, 0xd227) 8173 0x19e5, 0xd227)
8028 }, 8174 },
8029 { 8175 {
@@ -8088,6 +8234,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
8088 }, 8234 },
8089 { 8235 {
8090 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8236 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8237 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
8238 },
8239 {
8240 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8241 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
8242 },
8243 {
8244 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8091 PCI_VENDOR_ID_ADAPTEC2, 0x0900) 8245 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
8092 }, 8246 },
8093 { 8247 {
@@ -8244,6 +8398,26 @@ static const struct pci_device_id pqi_pci_id_table[] = {
8244 }, 8398 },
8245 { 8399 {
8246 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8400 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8401 0x1d8d, 0x0800)
8402 },
8403 {
8404 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8405 0x1d8d, 0x0908)
8406 },
8407 {
8408 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8409 0x1d8d, 0x0806)
8410 },
8411 {
8412 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8413 0x1d8d, 0x0916)
8414 },
8415 {
8416 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8417 PCI_VENDOR_ID_GIGABYTE, 0x1000)
8418 },
8419 {
8420 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8247 PCI_ANY_ID, PCI_ANY_ID) 8421 PCI_ANY_ID, PCI_ANY_ID)
8248 }, 8422 },
8249 { 0 } 8423 { 0 }
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index 5cca1b9ef1f1..6776dfc1d317 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -312,12 +312,110 @@ static int pqi_sas_get_linkerrors(struct sas_phy *phy)
312static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy, 312static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
313 u64 *identifier) 313 u64 *identifier)
314{ 314{
315 return 0; 315
316 int rc;
317 unsigned long flags;
318 struct Scsi_Host *shost;
319 struct pqi_ctrl_info *ctrl_info;
320 struct pqi_scsi_dev *found_device;
321 struct pqi_scsi_dev *device;
322
323 if (!rphy)
324 return -ENODEV;
325
326 shost = rphy_to_shost(rphy);
327 ctrl_info = shost_to_hba(shost);
328 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
329 found_device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
330
331 if (!found_device) {
332 rc = -ENODEV;
333 goto out;
334 }
335
336 if (found_device->devtype == TYPE_ENCLOSURE) {
337 *identifier = get_unaligned_be64(&found_device->wwid);
338 rc = 0;
339 goto out;
340 }
341
342 if (found_device->box_index == 0xff ||
343 found_device->phys_box_on_bus == 0 ||
344 found_device->bay == 0xff) {
345 rc = -EINVAL;
346 goto out;
347 }
348
349 list_for_each_entry(device, &ctrl_info->scsi_device_list,
350 scsi_device_list_entry) {
351 if (device->devtype == TYPE_ENCLOSURE &&
352 device->box_index == found_device->box_index &&
353 device->phys_box_on_bus ==
354 found_device->phys_box_on_bus &&
355 memcmp(device->phys_connector,
356 found_device->phys_connector, 2) == 0) {
357 *identifier =
358 get_unaligned_be64(&device->wwid);
359 rc = 0;
360 goto out;
361 }
362 }
363
364 if (found_device->phy_connected_dev_type != SA_CONTROLLER_DEVICE) {
365 rc = -EINVAL;
366 goto out;
367 }
368
369 list_for_each_entry(device, &ctrl_info->scsi_device_list,
370 scsi_device_list_entry) {
371 if (device->devtype == TYPE_ENCLOSURE &&
372 CISS_GET_DRIVE_NUMBER(device->scsi3addr) ==
373 PQI_VSEP_CISS_BTL) {
374 *identifier = get_unaligned_be64(&device->wwid);
375 rc = 0;
376 goto out;
377 }
378 }
379
380 rc = -EINVAL;
381out:
382 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
383
384 return rc;
385
316} 386}
317 387
318static int pqi_sas_get_bay_identifier(struct sas_rphy *rphy) 388static int pqi_sas_get_bay_identifier(struct sas_rphy *rphy)
319{ 389{
320 return -ENXIO; 390
391 int rc;
392 unsigned long flags;
393 struct pqi_ctrl_info *ctrl_info;
394 struct pqi_scsi_dev *device;
395 struct Scsi_Host *shost;
396
397 if (!rphy)
398 return -ENODEV;
399
400 shost = rphy_to_shost(rphy);
401 ctrl_info = shost_to_hba(shost);
402 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
403 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
404
405 if (!device) {
406 rc = -ENODEV;
407 goto out;
408 }
409
410 if (device->bay == 0xff)
411 rc = -EINVAL;
412 else
413 rc = device->bay;
414
415out:
416 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
417
418 return rc;
321} 419}
322 420
323static int pqi_sas_phy_reset(struct sas_phy *phy, int hard_reset) 421static int pqi_sas_phy_reset(struct sas_phy *phy, int hard_reset)
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 3d80ab67a626..955e4c938d49 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -397,10 +397,12 @@ static int sun3scsi_dma_finish(int write_flag)
397 case CSR_LEFT_3: 397 case CSR_LEFT_3:
398 *vaddr = (dregs->bpack_lo & 0xff00) >> 8; 398 *vaddr = (dregs->bpack_lo & 0xff00) >> 8;
399 vaddr--; 399 vaddr--;
400 /* Fall through */
400 401
401 case CSR_LEFT_2: 402 case CSR_LEFT_2:
402 *vaddr = (dregs->bpack_hi & 0x00ff); 403 *vaddr = (dregs->bpack_hi & 0x00ff);
403 vaddr--; 404 vaddr--;
405 /* Fall through */
404 406
405 case CSR_LEFT_1: 407 case CSR_LEFT_1:
406 *vaddr = (dregs->bpack_hi & 0xff00) >> 8; 408 *vaddr = (dregs->bpack_hi & 0xff00) >> 8;
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c
index dd3f07b31612..9dc17f1288f9 100644
--- a/drivers/scsi/sym53c8xx_2/sym_nvram.c
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c
@@ -648,7 +648,7 @@ static int sym_read_T93C46_nvram(struct sym_device *np, Tekram_nvram *nvram)
648{ 648{
649 u_char gpcntl, gpreg; 649 u_char gpcntl, gpreg;
650 u_char old_gpcntl, old_gpreg; 650 u_char old_gpcntl, old_gpreg;
651 int retv = 1; 651 int retv;
652 652
653 /* save current state of GPCNTL and GPREG */ 653 /* save current state of GPCNTL and GPREG */
654 old_gpreg = INB(np, nc_gpreg); 654 old_gpreg = INB(np, nc_gpreg);
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index 86dbb723f3ac..b2af04c57a39 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -62,23 +62,47 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
62} 62}
63 63
64/** 64/**
65 * Sets clocks used by the controller 65 * Called before and after HCE enable bit is set.
66 * @hba: host controller instance 66 * @hba: host controller instance
67 * @on: if true, enable clocks, otherwise disable
68 * @status: notify stage (pre, post change) 67 * @status: notify stage (pre, post change)
69 * 68 *
70 * Return zero for success and non-zero for failure 69 * Return zero for success and non-zero for failure
71 */ 70 */
72static int cdns_ufs_setup_clocks(struct ufs_hba *hba, bool on, 71static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
73 enum ufs_notify_change_status status) 72 enum ufs_notify_change_status status)
74{ 73{
75 if ((!on) || (status == PRE_CHANGE)) 74 if (status != PRE_CHANGE)
76 return 0; 75 return 0;
77 76
78 return cdns_ufs_set_hclkdiv(hba); 77 return cdns_ufs_set_hclkdiv(hba);
79} 78}
80 79
81/** 80/**
81 * Called before and after Link startup is carried out.
82 * @hba: host controller instance
83 * @status: notify stage (pre, post change)
84 *
85 * Return zero for success and non-zero for failure
86 */
87static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
88 enum ufs_notify_change_status status)
89{
90 if (status != PRE_CHANGE)
91 return 0;
92
93 /*
94 * Some UFS devices have issues if LCC is enabled.
95 * So we are setting PA_Local_TX_LCC_Enable to 0
96 * before link startup which will make sure that both host
97 * and device TX LCC are disabled once link startup is
98 * completed.
99 */
100 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
101
102 return 0;
103}
104
105/**
82 * cdns_ufs_init - performs additional ufs initialization 106 * cdns_ufs_init - performs additional ufs initialization
83 * @hba: host controller instance 107 * @hba: host controller instance
84 * 108 *
@@ -114,13 +138,15 @@ static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba)
114 138
115static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = { 139static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = {
116 .name = "cdns-ufs-pltfm", 140 .name = "cdns-ufs-pltfm",
117 .setup_clocks = cdns_ufs_setup_clocks, 141 .hce_enable_notify = cdns_ufs_hce_enable_notify,
142 .link_startup_notify = cdns_ufs_link_startup_notify,
118}; 143};
119 144
120static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = { 145static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
121 .name = "cdns-ufs-pltfm", 146 .name = "cdns-ufs-pltfm",
122 .init = cdns_ufs_init, 147 .init = cdns_ufs_init,
123 .setup_clocks = cdns_ufs_setup_clocks, 148 .hce_enable_notify = cdns_ufs_hce_enable_notify,
149 .link_startup_notify = cdns_ufs_link_startup_notify,
124 .phy_initialization = cdns_ufs_m31_16nm_phy_initialization, 150 .phy_initialization = cdns_ufs_m31_16nm_phy_initialization,
125}; 151};
126 152
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index f4d1dca962c4..6bbb1679bb91 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -447,13 +447,11 @@ static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
447 447
448static int ufs_hisi_get_resource(struct ufs_hisi_host *host) 448static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
449{ 449{
450 struct resource *mem_res;
451 struct device *dev = host->hba->dev; 450 struct device *dev = host->hba->dev;
452 struct platform_device *pdev = to_platform_device(dev); 451 struct platform_device *pdev = to_platform_device(dev);
453 452
454 /* get resource of ufs sys ctrl */ 453 /* get resource of ufs sys ctrl */
455 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 454 host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1);
456 host->ufs_sys_ctrl = devm_ioremap_resource(dev, mem_res);
457 if (IS_ERR(host->ufs_sys_ctrl)) 455 if (IS_ERR(host->ufs_sys_ctrl))
458 return PTR_ERR(host->ufs_sys_ctrl); 456 return PTR_ERR(host->ufs_sys_ctrl);
459 457
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index ee4b1da1e223..a5b71487a206 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -8,6 +8,7 @@
8#include <linux/of.h> 8#include <linux/of.h>
9#include <linux/platform_device.h> 9#include <linux/platform_device.h>
10#include <linux/phy/phy.h> 10#include <linux/phy/phy.h>
11#include <linux/gpio/consumer.h>
11#include <linux/reset-controller.h> 12#include <linux/reset-controller.h>
12 13
13#include "ufshcd.h" 14#include "ufshcd.h"
@@ -800,7 +801,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
800 struct ufs_pa_layer_attr *dev_max_params, 801 struct ufs_pa_layer_attr *dev_max_params,
801 struct ufs_pa_layer_attr *dev_req_params) 802 struct ufs_pa_layer_attr *dev_req_params)
802{ 803{
803 u32 val;
804 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 804 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
805 struct ufs_dev_params ufs_qcom_cap; 805 struct ufs_dev_params ufs_qcom_cap;
806 int ret = 0; 806 int ret = 0;
@@ -869,8 +869,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
869 ret = -EINVAL; 869 ret = -EINVAL;
870 } 870 }
871 871
872 val = ~(MAX_U32 << dev_req_params->lane_tx);
873
874 /* cache the power mode parameters to use internally */ 872 /* cache the power mode parameters to use internally */
875 memcpy(&host->dev_req_params, 873 memcpy(&host->dev_req_params,
876 dev_req_params, sizeof(*dev_req_params)); 874 dev_req_params, sizeof(*dev_req_params));
@@ -1140,6 +1138,15 @@ static int ufs_qcom_init(struct ufs_hba *hba)
1140 } 1138 }
1141 } 1139 }
1142 1140
1141 host->device_reset = devm_gpiod_get_optional(dev, "reset",
1142 GPIOD_OUT_HIGH);
1143 if (IS_ERR(host->device_reset)) {
1144 err = PTR_ERR(host->device_reset);
1145 if (err != -EPROBE_DEFER)
1146 dev_err(dev, "failed to acquire reset gpio: %d\n", err);
1147 goto out_variant_clear;
1148 }
1149
1143 err = ufs_qcom_bus_register(host); 1150 err = ufs_qcom_bus_register(host);
1144 if (err) 1151 if (err)
1145 goto out_variant_clear; 1152 goto out_variant_clear;
@@ -1546,12 +1553,37 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1546} 1553}
1547 1554
1548/** 1555/**
1556 * ufs_qcom_device_reset() - toggle the (optional) device reset line
1557 * @hba: per-adapter instance
1558 *
1559 * Toggles the (optional) reset line to reset the attached device.
1560 */
1561static void ufs_qcom_device_reset(struct ufs_hba *hba)
1562{
1563 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1564
1565 /* reset gpio is optional */
1566 if (!host->device_reset)
1567 return;
1568
1569 /*
1570 * The UFS device shall detect reset pulses of 1us, sleep for 10us to
1571 * be on the safe side.
1572 */
1573 gpiod_set_value_cansleep(host->device_reset, 1);
1574 usleep_range(10, 15);
1575
1576 gpiod_set_value_cansleep(host->device_reset, 0);
1577 usleep_range(10, 15);
1578}
1579
1580/**
1549 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations 1581 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1550 * 1582 *
1551 * The variant operations configure the necessary controller and PHY 1583 * The variant operations configure the necessary controller and PHY
1552 * handshake during initialization. 1584 * handshake during initialization.
1553 */ 1585 */
1554static struct ufs_hba_variant_ops ufs_hba_qcom_vops = { 1586static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1555 .name = "qcom", 1587 .name = "qcom",
1556 .init = ufs_qcom_init, 1588 .init = ufs_qcom_init,
1557 .exit = ufs_qcom_exit, 1589 .exit = ufs_qcom_exit,
@@ -1565,6 +1597,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1565 .suspend = ufs_qcom_suspend, 1597 .suspend = ufs_qcom_suspend,
1566 .resume = ufs_qcom_resume, 1598 .resume = ufs_qcom_resume,
1567 .dbg_register_dump = ufs_qcom_dump_dbg_regs, 1599 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
1600 .device_reset = ufs_qcom_device_reset,
1568}; 1601};
1569 1602
1570/** 1603/**
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 001915d1e0e4..d401f174bb70 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -195,6 +195,8 @@ struct ufs_qcom_testbus {
195 u8 select_minor; 195 u8 select_minor;
196}; 196};
197 197
198struct gpio_desc;
199
198struct ufs_qcom_host { 200struct ufs_qcom_host {
199 /* 201 /*
200 * Set this capability if host controller supports the QUniPro mode 202 * Set this capability if host controller supports the QUniPro mode
@@ -232,6 +234,8 @@ struct ufs_qcom_host {
232 struct ufs_qcom_testbus testbus; 234 struct ufs_qcom_testbus testbus;
233 235
234 struct reset_controller_dev rcdev; 236 struct reset_controller_dev rcdev;
237
238 struct gpio_desc *device_reset;
235}; 239};
236 240
237static inline u32 241static inline u32
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index f478685122ff..969a36b15897 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -571,9 +571,10 @@ static ssize_t _name##_show(struct device *dev, \
571 int ret; \ 571 int ret; \
572 int desc_len = QUERY_DESC_MAX_SIZE; \ 572 int desc_len = QUERY_DESC_MAX_SIZE; \
573 u8 *desc_buf; \ 573 u8 *desc_buf; \
574 \
574 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \ 575 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \
575 if (!desc_buf) \ 576 if (!desc_buf) \
576 return -ENOMEM; \ 577 return -ENOMEM; \
577 ret = ufshcd_query_descriptor_retry(hba, \ 578 ret = ufshcd_query_descriptor_retry(hba, \
578 UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \ 579 UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \
579 0, 0, desc_buf, &desc_len); \ 580 0, 0, desc_buf, &desc_len); \
@@ -582,14 +583,13 @@ static ssize_t _name##_show(struct device *dev, \
582 goto out; \ 583 goto out; \
583 } \ 584 } \
584 index = desc_buf[DEVICE_DESC_PARAM##_pname]; \ 585 index = desc_buf[DEVICE_DESC_PARAM##_pname]; \
585 memset(desc_buf, 0, QUERY_DESC_MAX_SIZE); \ 586 kfree(desc_buf); \
586 if (ufshcd_read_string_desc(hba, index, desc_buf, \ 587 desc_buf = NULL; \
587 QUERY_DESC_MAX_SIZE, true)) { \ 588 ret = ufshcd_read_string_desc(hba, index, &desc_buf, \
588 ret = -EINVAL; \ 589 SD_ASCII_STD); \
590 if (ret < 0) \
589 goto out; \ 591 goto out; \
590 } \ 592 ret = snprintf(buf, PAGE_SIZE, "%s\n", desc_buf); \
591 ret = snprintf(buf, PAGE_SIZE, "%s\n", \
592 desc_buf + QUERY_DESC_HDR_SIZE); \
593out: \ 593out: \
594 kfree(desc_buf); \ 594 kfree(desc_buf); \
595 return ret; \ 595 return ret; \
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 99a9c4d16f6b..3327981ef894 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -541,7 +541,7 @@ struct ufs_dev_info {
541 */ 541 */
542struct ufs_dev_desc { 542struct ufs_dev_desc {
543 u16 wmanufacturerid; 543 u16 wmanufacturerid;
544 char model[MAX_MODEL_LEN + 1]; 544 u8 *model;
545}; 545};
546 546
547/** 547/**
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index d7d521b394c3..8d40dc918f4e 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -391,12 +391,10 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
391{ 391{
392 struct ufs_hba *hba; 392 struct ufs_hba *hba;
393 void __iomem *mmio_base; 393 void __iomem *mmio_base;
394 struct resource *mem_res;
395 int irq, err; 394 int irq, err;
396 struct device *dev = &pdev->dev; 395 struct device *dev = &pdev->dev;
397 396
398 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 397 mmio_base = devm_platform_ioremap_resource(pdev, 0);
399 mmio_base = devm_ioremap_resource(dev, mem_res);
400 if (IS_ERR(mmio_base)) { 398 if (IS_ERR(mmio_base)) {
401 err = PTR_ERR(mmio_base); 399 err = PTR_ERR(mmio_base);
402 goto out; 400 goto out;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 029da74bb2f5..034dd9cb9ec8 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -299,16 +299,6 @@ static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
299 scsi_block_requests(hba->host); 299 scsi_block_requests(hba->host);
300} 300}
301 301
302/* replace non-printable or non-ASCII characters with spaces */
303static inline void ufshcd_remove_non_printable(char *val)
304{
305 if (!val)
306 return;
307
308 if (*val < 0x20 || *val > 0x7e)
309 *val = ' ';
310}
311
312static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, 302static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
313 const char *str) 303 const char *str)
314{ 304{
@@ -390,24 +380,25 @@ static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
390 } 380 }
391} 381}
392 382
393static void ufshcd_print_uic_err_hist(struct ufs_hba *hba, 383static void ufshcd_print_err_hist(struct ufs_hba *hba,
394 struct ufs_uic_err_reg_hist *err_hist, char *err_name) 384 struct ufs_err_reg_hist *err_hist,
385 char *err_name)
395{ 386{
396 int i; 387 int i;
397 bool found = false; 388 bool found = false;
398 389
399 for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) { 390 for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
400 int p = (i + err_hist->pos) % UIC_ERR_REG_HIST_LENGTH; 391 int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
401 392
402 if (err_hist->reg[p] == 0) 393 if (err_hist->reg[p] == 0)
403 continue; 394 continue;
404 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i, 395 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
405 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p])); 396 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
406 found = true; 397 found = true;
407 } 398 }
408 399
409 if (!found) 400 if (!found)
410 dev_err(hba->dev, "No record of %s uic errors\n", err_name); 401 dev_err(hba->dev, "No record of %s errors\n", err_name);
411} 402}
412 403
413static void ufshcd_print_host_regs(struct ufs_hba *hba) 404static void ufshcd_print_host_regs(struct ufs_hba *hba)
@@ -423,11 +414,22 @@ static void ufshcd_print_host_regs(struct ufs_hba *hba)
423 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp), 414 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
424 hba->ufs_stats.hibern8_exit_cnt); 415 hba->ufs_stats.hibern8_exit_cnt);
425 416
426 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err"); 417 ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
427 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err"); 418 ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
428 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err"); 419 ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
429 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err"); 420 ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
430 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err"); 421 ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
422 ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err,
423 "auto_hibern8_err");
424 ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err");
425 ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err,
426 "link_startup_fail");
427 ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail");
428 ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err,
429 "suspend_fail");
430 ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset");
431 ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
432 ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
431 433
432 ufshcd_print_clk_freqs(hba); 434 ufshcd_print_clk_freqs(hba);
433 435
@@ -3199,7 +3201,7 @@ out:
3199static inline int ufshcd_read_desc(struct ufs_hba *hba, 3201static inline int ufshcd_read_desc(struct ufs_hba *hba,
3200 enum desc_idn desc_id, 3202 enum desc_idn desc_id,
3201 int desc_index, 3203 int desc_index,
3202 u8 *buf, 3204 void *buf,
3203 u32 size) 3205 u32 size)
3204{ 3206{
3205 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); 3207 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
@@ -3218,48 +3220,77 @@ static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3218} 3220}
3219 3221
3220/** 3222/**
3223 * struct uc_string_id - unicode string
3224 *
3225 * @len: size of this descriptor inclusive
3226 * @type: descriptor type
3227 * @uc: unicode string character
3228 */
3229struct uc_string_id {
3230 u8 len;
3231 u8 type;
3232 wchar_t uc[0];
3233} __packed;
3234
3235/* replace non-printable or non-ASCII characters with spaces */
3236static inline char ufshcd_remove_non_printable(u8 ch)
3237{
3238 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3239}
3240
3241/**
3221 * ufshcd_read_string_desc - read string descriptor 3242 * ufshcd_read_string_desc - read string descriptor
3222 * @hba: pointer to adapter instance 3243 * @hba: pointer to adapter instance
3223 * @desc_index: descriptor index 3244 * @desc_index: descriptor index
3224 * @buf: pointer to buffer where descriptor would be read 3245 * @buf: pointer to buffer where descriptor would be read,
3225 * @size: size of buf 3246 * the caller should free the memory.
3226 * @ascii: if true convert from unicode to ascii characters 3247 * @ascii: if true convert from unicode to ascii characters
3248 * null terminated string.
3227 * 3249 *
3228 * Return 0 in case of success, non-zero otherwise 3250 * Return:
3251 * * string size on success.
3252 * * -ENOMEM: on allocation failure
3253 * * -EINVAL: on a wrong parameter
3229 */ 3254 */
3230int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, 3255int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3231 u8 *buf, u32 size, bool ascii) 3256 u8 **buf, bool ascii)
3232{ 3257{
3233 int err = 0; 3258 struct uc_string_id *uc_str;
3259 u8 *str;
3260 int ret;
3234 3261
3235 err = ufshcd_read_desc(hba, 3262 if (!buf)
3236 QUERY_DESC_IDN_STRING, desc_index, buf, size); 3263 return -EINVAL;
3237 3264
3238 if (err) { 3265 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3239 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", 3266 if (!uc_str)
3240 __func__, QUERY_REQ_RETRIES, err); 3267 return -ENOMEM;
3268
3269 ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING,
3270 desc_index, uc_str,
3271 QUERY_DESC_MAX_SIZE);
3272 if (ret < 0) {
3273 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3274 QUERY_REQ_RETRIES, ret);
3275 str = NULL;
3276 goto out;
3277 }
3278
3279 if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3280 dev_dbg(hba->dev, "String Desc is of zero length\n");
3281 str = NULL;
3282 ret = 0;
3241 goto out; 3283 goto out;
3242 } 3284 }
3243 3285
3244 if (ascii) { 3286 if (ascii) {
3245 int desc_len; 3287 ssize_t ascii_len;
3246 int ascii_len;
3247 int i; 3288 int i;
3248 char *buff_ascii;
3249
3250 desc_len = buf[0];
3251 /* remove header and divide by 2 to move from UTF16 to UTF8 */ 3289 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3252 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; 3290 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3253 if (size < ascii_len + QUERY_DESC_HDR_SIZE) { 3291 str = kzalloc(ascii_len, GFP_KERNEL);
3254 dev_err(hba->dev, "%s: buffer allocated size is too small\n", 3292 if (!str) {
3255 __func__); 3293 ret = -ENOMEM;
3256 err = -ENOMEM;
3257 goto out;
3258 }
3259
3260 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
3261 if (!buff_ascii) {
3262 err = -ENOMEM;
3263 goto out; 3294 goto out;
3264 } 3295 }
3265 3296
@@ -3267,22 +3298,28 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
3267 * the descriptor contains string in UTF16 format 3298 * the descriptor contains string in UTF16 format
3268 * we need to convert to utf-8 so it can be displayed 3299 * we need to convert to utf-8 so it can be displayed
3269 */ 3300 */
3270 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE], 3301 ret = utf16s_to_utf8s(uc_str->uc,
3271 desc_len - QUERY_DESC_HDR_SIZE, 3302 uc_str->len - QUERY_DESC_HDR_SIZE,
3272 UTF16_BIG_ENDIAN, buff_ascii, ascii_len); 3303 UTF16_BIG_ENDIAN, str, ascii_len);
3273 3304
3274 /* replace non-printable or non-ASCII characters with spaces */ 3305 /* replace non-printable or non-ASCII characters with spaces */
3275 for (i = 0; i < ascii_len; i++) 3306 for (i = 0; i < ret; i++)
3276 ufshcd_remove_non_printable(&buff_ascii[i]); 3307 str[i] = ufshcd_remove_non_printable(str[i]);
3277 3308
3278 memset(buf + QUERY_DESC_HDR_SIZE, 0, 3309 str[ret++] = '\0';
3279 size - QUERY_DESC_HDR_SIZE); 3310
3280 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); 3311 } else {
3281 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; 3312 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3282 kfree(buff_ascii); 3313 if (!str) {
3314 ret = -ENOMEM;
3315 goto out;
3316 }
3317 ret = uc_str->len;
3283 } 3318 }
3284out: 3319out:
3285 return err; 3320 *buf = str;
3321 kfree(uc_str);
3322 return ret;
3286} 3323}
3287 3324
3288/** 3325/**
@@ -4214,12 +4251,6 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4214{ 4251{
4215 int retry; 4252 int retry;
4216 4253
4217 /*
4218 * msleep of 1 and 5 used in this function might result in msleep(20),
4219 * but it was necessary to send the UFS FPGA to reset mode during
4220 * development and testing of this driver. msleep can be changed to
4221 * mdelay and retry count can be reduced based on the controller.
4222 */
4223 if (!ufshcd_is_hba_active(hba)) 4254 if (!ufshcd_is_hba_active(hba))
4224 /* change controller state to "reset state" */ 4255 /* change controller state to "reset state" */
4225 ufshcd_hba_stop(hba, true); 4256 ufshcd_hba_stop(hba, true);
@@ -4242,7 +4273,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4242 * instruction might be read back. 4273 * instruction might be read back.
4243 * This delay can be changed based on the controller. 4274 * This delay can be changed based on the controller.
4244 */ 4275 */
4245 msleep(1); 4276 usleep_range(1000, 1100);
4246 4277
4247 /* wait for the host controller to complete initialization */ 4278 /* wait for the host controller to complete initialization */
4248 retry = 10; 4279 retry = 10;
@@ -4254,7 +4285,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4254 "Controller enable failed\n"); 4285 "Controller enable failed\n");
4255 return -EIO; 4286 return -EIO;
4256 } 4287 }
4257 msleep(5); 4288 usleep_range(5000, 5100);
4258 } 4289 }
4259 4290
4260 /* enable UIC related interrupts */ 4291 /* enable UIC related interrupts */
@@ -4326,6 +4357,14 @@ static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4326 return ufshcd_disable_tx_lcc(hba, true); 4357 return ufshcd_disable_tx_lcc(hba, true);
4327} 4358}
4328 4359
4360static void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
4361 u32 reg)
4362{
4363 reg_hist->reg[reg_hist->pos] = reg;
4364 reg_hist->tstamp[reg_hist->pos] = ktime_get();
4365 reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
4366}
4367
4329/** 4368/**
4330 * ufshcd_link_startup - Initialize unipro link startup 4369 * ufshcd_link_startup - Initialize unipro link startup
4331 * @hba: per adapter instance 4370 * @hba: per adapter instance
@@ -4353,6 +4392,8 @@ link_startup:
4353 4392
4354 /* check if device is detected by inter-connect layer */ 4393 /* check if device is detected by inter-connect layer */
4355 if (!ret && !ufshcd_is_device_present(hba)) { 4394 if (!ret && !ufshcd_is_device_present(hba)) {
4395 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4396 0);
4356 dev_err(hba->dev, "%s: Device not present\n", __func__); 4397 dev_err(hba->dev, "%s: Device not present\n", __func__);
4357 ret = -ENXIO; 4398 ret = -ENXIO;
4358 goto out; 4399 goto out;
@@ -4363,13 +4404,19 @@ link_startup:
4363 * but we can't be sure if the link is up until link startup 4404 * but we can't be sure if the link is up until link startup
4364 * succeeds. So reset the local Uni-Pro and try again. 4405 * succeeds. So reset the local Uni-Pro and try again.
4365 */ 4406 */
4366 if (ret && ufshcd_hba_enable(hba)) 4407 if (ret && ufshcd_hba_enable(hba)) {
4408 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4409 (u32)ret);
4367 goto out; 4410 goto out;
4411 }
4368 } while (ret && retries--); 4412 } while (ret && retries--);
4369 4413
4370 if (ret) 4414 if (ret) {
4371 /* failed to get the link up... retire */ 4415 /* failed to get the link up... retire */
4416 ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
4417 (u32)ret);
4372 goto out; 4418 goto out;
4419 }
4373 4420
4374 if (link_startup_again) { 4421 if (link_startup_again) {
4375 link_startup_again = false; 4422 link_startup_again = false;
@@ -5345,14 +5392,6 @@ out:
5345 pm_runtime_put_sync(hba->dev); 5392 pm_runtime_put_sync(hba->dev);
5346} 5393}
5347 5394
5348static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
5349 u32 reg)
5350{
5351 reg_hist->reg[reg_hist->pos] = reg;
5352 reg_hist->tstamp[reg_hist->pos] = ktime_get();
5353 reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
5354}
5355
5356/** 5395/**
5357 * ufshcd_update_uic_error - check and set fatal UIC error flags. 5396 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5358 * @hba: per-adapter instance 5397 * @hba: per-adapter instance
@@ -5371,13 +5410,13 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
5371 * must be checked but this error is handled separately. 5410 * must be checked but this error is handled separately.
5372 */ 5411 */
5373 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__); 5412 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5374 ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg); 5413 ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
5375 } 5414 }
5376 5415
5377 /* PA_INIT_ERROR is fatal and needs UIC reset */ 5416 /* PA_INIT_ERROR is fatal and needs UIC reset */
5378 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); 5417 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5379 if (reg) 5418 if (reg)
5380 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg); 5419 ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
5381 5420
5382 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) 5421 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5383 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; 5422 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
@@ -5393,19 +5432,19 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
5393 /* UIC NL/TL/DME errors needs software retry */ 5432 /* UIC NL/TL/DME errors needs software retry */
5394 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); 5433 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5395 if (reg) { 5434 if (reg) {
5396 ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg); 5435 ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
5397 hba->uic_error |= UFSHCD_UIC_NL_ERROR; 5436 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
5398 } 5437 }
5399 5438
5400 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); 5439 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
5401 if (reg) { 5440 if (reg) {
5402 ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg); 5441 ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
5403 hba->uic_error |= UFSHCD_UIC_TL_ERROR; 5442 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
5404 } 5443 }
5405 5444
5406 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); 5445 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
5407 if (reg) { 5446 if (reg) {
5408 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg); 5447 ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
5409 hba->uic_error |= UFSHCD_UIC_DME_ERROR; 5448 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
5410 } 5449 }
5411 5450
@@ -5438,8 +5477,10 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
5438{ 5477{
5439 bool queue_eh_work = false; 5478 bool queue_eh_work = false;
5440 5479
5441 if (hba->errors & INT_FATAL_ERRORS) 5480 if (hba->errors & INT_FATAL_ERRORS) {
5481 ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
5442 queue_eh_work = true; 5482 queue_eh_work = true;
5483 }
5443 5484
5444 if (hba->errors & UIC_ERROR) { 5485 if (hba->errors & UIC_ERROR) {
5445 hba->uic_error = 0; 5486 hba->uic_error = 0;
@@ -5454,6 +5495,8 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
5454 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ? 5495 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
5455 "Enter" : "Exit", 5496 "Enter" : "Exit",
5456 hba->errors, ufshcd_get_upmcrs(hba)); 5497 hba->errors, ufshcd_get_upmcrs(hba));
5498 ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
5499 hba->errors);
5457 queue_eh_work = true; 5500 queue_eh_work = true;
5458 } 5501 }
5459 5502
@@ -5652,13 +5695,12 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
5652 memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq)); 5695 memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
5653 5696
5654 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete"); 5697 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
5655
5656 spin_lock_irqsave(hba->host->host_lock, flags);
5657 __clear_bit(free_slot, &hba->outstanding_tasks);
5658 spin_unlock_irqrestore(hba->host->host_lock, flags);
5659
5660 } 5698 }
5661 5699
5700 spin_lock_irqsave(hba->host->host_lock, flags);
5701 __clear_bit(free_slot, &hba->outstanding_tasks);
5702 spin_unlock_irqrestore(hba->host->host_lock, flags);
5703
5662 clear_bit(free_slot, &hba->tm_condition); 5704 clear_bit(free_slot, &hba->tm_condition);
5663 ufshcd_put_tm_slot(hba, free_slot); 5705 ufshcd_put_tm_slot(hba, free_slot);
5664 wake_up(&hba->tm_tag_wq); 5706 wake_up(&hba->tm_tag_wq);
@@ -5941,6 +5983,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
5941 5983
5942out: 5984out:
5943 hba->req_abort_count = 0; 5985 hba->req_abort_count = 0;
5986 ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err);
5944 if (!err) { 5987 if (!err) {
5945 err = SUCCESS; 5988 err = SUCCESS;
5946 } else { 5989 } else {
@@ -6034,6 +6077,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
6034 */ 6077 */
6035 scsi_print_command(hba->lrb[tag].cmd); 6078 scsi_print_command(hba->lrb[tag].cmd);
6036 if (!hba->req_abort_count) { 6079 if (!hba->req_abort_count) {
6080 ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0);
6037 ufshcd_print_host_regs(hba); 6081 ufshcd_print_host_regs(hba);
6038 ufshcd_print_host_state(hba); 6082 ufshcd_print_host_state(hba);
6039 ufshcd_print_pwr_info(hba); 6083 ufshcd_print_pwr_info(hba);
@@ -6169,7 +6213,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6169out: 6213out:
6170 if (err) 6214 if (err)
6171 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); 6215 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
6172 6216 ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err);
6173 return err; 6217 return err;
6174} 6218}
6175 6219
@@ -6189,6 +6233,9 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6189 int retries = MAX_HOST_RESET_RETRIES; 6233 int retries = MAX_HOST_RESET_RETRIES;
6190 6234
6191 do { 6235 do {
6236 /* Reset the attached device */
6237 ufshcd_vops_device_reset(hba);
6238
6192 err = ufshcd_host_reset_and_restore(hba); 6239 err = ufshcd_host_reset_and_restore(hba);
6193 } while (err && --retries); 6240 } while (err && --retries);
6194 6241
@@ -6453,6 +6500,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
6453 u8 model_index; 6500 u8 model_index;
6454 u8 *desc_buf; 6501 u8 *desc_buf;
6455 6502
6503 if (!dev_desc)
6504 return -EINVAL;
6505
6456 buff_len = max_t(size_t, hba->desc_size.dev_desc, 6506 buff_len = max_t(size_t, hba->desc_size.dev_desc,
6457 QUERY_DESC_MAX_SIZE + 1); 6507 QUERY_DESC_MAX_SIZE + 1);
6458 desc_buf = kmalloc(buff_len, GFP_KERNEL); 6508 desc_buf = kmalloc(buff_len, GFP_KERNEL);
@@ -6476,31 +6526,31 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
6476 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; 6526 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6477 6527
6478 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; 6528 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6479 6529 err = ufshcd_read_string_desc(hba, model_index,
6480 /* Zero-pad entire buffer for string termination. */ 6530 &dev_desc->model, SD_ASCII_STD);
6481 memset(desc_buf, 0, buff_len); 6531 if (err < 0) {
6482
6483 err = ufshcd_read_string_desc(hba, model_index, desc_buf,
6484 QUERY_DESC_MAX_SIZE, true/*ASCII*/);
6485 if (err) {
6486 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", 6532 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6487 __func__, err); 6533 __func__, err);
6488 goto out; 6534 goto out;
6489 } 6535 }
6490 6536
6491 desc_buf[QUERY_DESC_MAX_SIZE] = '\0'; 6537 /*
6492 strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE), 6538 * ufshcd_read_string_desc returns size of the string
6493 min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET], 6539 * reset the error value
6494 MAX_MODEL_LEN)); 6540 */
6495 6541 err = 0;
6496 /* Null terminate the model string */
6497 dev_desc->model[MAX_MODEL_LEN] = '\0';
6498 6542
6499out: 6543out:
6500 kfree(desc_buf); 6544 kfree(desc_buf);
6501 return err; 6545 return err;
6502} 6546}
6503 6547
6548static void ufs_put_device_desc(struct ufs_dev_desc *dev_desc)
6549{
6550 kfree(dev_desc->model);
6551 dev_desc->model = NULL;
6552}
6553
6504static void ufs_fixup_device_setup(struct ufs_hba *hba, 6554static void ufs_fixup_device_setup(struct ufs_hba *hba,
6505 struct ufs_dev_desc *dev_desc) 6555 struct ufs_dev_desc *dev_desc)
6506{ 6556{
@@ -6509,8 +6559,9 @@ static void ufs_fixup_device_setup(struct ufs_hba *hba,
6509 for (f = ufs_fixups; f->quirk; f++) { 6559 for (f = ufs_fixups; f->quirk; f++) {
6510 if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid || 6560 if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
6511 f->card.wmanufacturerid == UFS_ANY_VENDOR) && 6561 f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
6512 (STR_PRFX_EQUAL(f->card.model, dev_desc->model) || 6562 ((dev_desc->model &&
6513 !strcmp(f->card.model, UFS_ANY_MODEL))) 6563 STR_PRFX_EQUAL(f->card.model, dev_desc->model)) ||
6564 !strcmp(f->card.model, UFS_ANY_MODEL)))
6514 hba->dev_quirks |= f->quirk; 6565 hba->dev_quirks |= f->quirk;
6515 } 6566 }
6516} 6567}
@@ -6681,17 +6732,8 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
6681 6732
6682static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) 6733static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6683{ 6734{
6684 int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
6685
6686 hba->ufs_stats.hibern8_exit_cnt = 0; 6735 hba->ufs_stats.hibern8_exit_cnt = 0;
6687 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); 6736 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6688
6689 memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
6690 memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
6691 memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
6692 memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
6693 memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
6694
6695 hba->req_abort_count = 0; 6737 hba->req_abort_count = 0;
6696} 6738}
6697 6739
@@ -6861,6 +6903,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
6861 } 6903 }
6862 6904
6863 ufs_fixup_device_setup(hba, &card); 6905 ufs_fixup_device_setup(hba, &card);
6906 ufs_put_device_desc(&card);
6907
6864 ufshcd_tune_unipro_params(hba); 6908 ufshcd_tune_unipro_params(hba);
6865 6909
6866 /* UFS device is also active now */ 6910 /* UFS device is also active now */
@@ -7823,6 +7867,8 @@ enable_gating:
7823 ufshcd_release(hba); 7867 ufshcd_release(hba);
7824out: 7868out:
7825 hba->pm_op_in_progress = 0; 7869 hba->pm_op_in_progress = 0;
7870 if (ret)
7871 ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
7826 return ret; 7872 return ret;
7827} 7873}
7828 7874
@@ -7925,6 +7971,8 @@ disable_irq_and_vops_clks:
7925 ufshcd_setup_clocks(hba, false); 7971 ufshcd_setup_clocks(hba, false);
7926out: 7972out:
7927 hba->pm_op_in_progress = 0; 7973 hba->pm_op_in_progress = 0;
7974 if (ret)
7975 ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret);
7928 return ret; 7976 return ret;
7929} 7977}
7930 7978
@@ -8324,6 +8372,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8324 goto exit_gating; 8372 goto exit_gating;
8325 } 8373 }
8326 8374
8375 /* Reset the attached device */
8376 ufshcd_vops_device_reset(hba);
8377
8327 /* Host controller enable */ 8378 /* Host controller enable */
8328 err = ufshcd_hba_enable(hba); 8379 err = ufshcd_hba_enable(hba);
8329 if (err) { 8380 if (err) {
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 994d73d03207..c94cfda52829 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -298,6 +298,7 @@ struct ufs_pwr_mode_info {
298 * @resume: called during host controller PM callback 298 * @resume: called during host controller PM callback
299 * @dbg_register_dump: used to dump controller debug information 299 * @dbg_register_dump: used to dump controller debug information
300 * @phy_initialization: used to initialize phys 300 * @phy_initialization: used to initialize phys
301 * @device_reset: called to issue a reset pulse on the UFS device
301 */ 302 */
302struct ufs_hba_variant_ops { 303struct ufs_hba_variant_ops {
303 const char *name; 304 const char *name;
@@ -326,6 +327,7 @@ struct ufs_hba_variant_ops {
326 int (*resume)(struct ufs_hba *, enum ufs_pm_op); 327 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
327 void (*dbg_register_dump)(struct ufs_hba *hba); 328 void (*dbg_register_dump)(struct ufs_hba *hba);
328 int (*phy_initialization)(struct ufs_hba *); 329 int (*phy_initialization)(struct ufs_hba *);
330 void (*device_reset)(struct ufs_hba *hba);
329}; 331};
330 332
331/* clock gating state */ 333/* clock gating state */
@@ -412,17 +414,17 @@ struct ufs_init_prefetch {
412 u32 icc_level; 414 u32 icc_level;
413}; 415};
414 416
415#define UIC_ERR_REG_HIST_LENGTH 8 417#define UFS_ERR_REG_HIST_LENGTH 8
416/** 418/**
417 * struct ufs_uic_err_reg_hist - keeps history of uic errors 419 * struct ufs_err_reg_hist - keeps history of errors
418 * @pos: index to indicate cyclic buffer position 420 * @pos: index to indicate cyclic buffer position
419 * @reg: cyclic buffer for registers value 421 * @reg: cyclic buffer for registers value
420 * @tstamp: cyclic buffer for time stamp 422 * @tstamp: cyclic buffer for time stamp
421 */ 423 */
422struct ufs_uic_err_reg_hist { 424struct ufs_err_reg_hist {
423 int pos; 425 int pos;
424 u32 reg[UIC_ERR_REG_HIST_LENGTH]; 426 u32 reg[UFS_ERR_REG_HIST_LENGTH];
425 ktime_t tstamp[UIC_ERR_REG_HIST_LENGTH]; 427 ktime_t tstamp[UFS_ERR_REG_HIST_LENGTH];
426}; 428};
427 429
428/** 430/**
@@ -436,15 +438,37 @@ struct ufs_uic_err_reg_hist {
436 * @nl_err: tracks nl-uic errors 438 * @nl_err: tracks nl-uic errors
437 * @tl_err: tracks tl-uic errors 439 * @tl_err: tracks tl-uic errors
438 * @dme_err: tracks dme errors 440 * @dme_err: tracks dme errors
441 * @auto_hibern8_err: tracks auto-hibernate errors
442 * @fatal_err: tracks fatal errors
443 * @linkup_err: tracks link-startup errors
444 * @resume_err: tracks resume errors
445 * @suspend_err: tracks suspend errors
446 * @dev_reset: tracks device reset events
447 * @host_reset: tracks host reset events
448 * @tsk_abort: tracks task abort events
439 */ 449 */
440struct ufs_stats { 450struct ufs_stats {
441 u32 hibern8_exit_cnt; 451 u32 hibern8_exit_cnt;
442 ktime_t last_hibern8_exit_tstamp; 452 ktime_t last_hibern8_exit_tstamp;
443 struct ufs_uic_err_reg_hist pa_err; 453
444 struct ufs_uic_err_reg_hist dl_err; 454 /* uic specific errors */
445 struct ufs_uic_err_reg_hist nl_err; 455 struct ufs_err_reg_hist pa_err;
446 struct ufs_uic_err_reg_hist tl_err; 456 struct ufs_err_reg_hist dl_err;
447 struct ufs_uic_err_reg_hist dme_err; 457 struct ufs_err_reg_hist nl_err;
458 struct ufs_err_reg_hist tl_err;
459 struct ufs_err_reg_hist dme_err;
460
461 /* fatal errors */
462 struct ufs_err_reg_hist auto_hibern8_err;
463 struct ufs_err_reg_hist fatal_err;
464 struct ufs_err_reg_hist link_startup_err;
465 struct ufs_err_reg_hist resume_err;
466 struct ufs_err_reg_hist suspend_err;
467
468 /* abnormal events */
469 struct ufs_err_reg_hist dev_reset;
470 struct ufs_err_reg_hist host_reset;
471 struct ufs_err_reg_hist task_abort;
448}; 472};
449 473
450/** 474/**
@@ -891,8 +915,11 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
891 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val); 915 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
892int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, 916int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
893 enum flag_idn idn, bool *flag_res); 917 enum flag_idn idn, bool *flag_res);
894int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, 918
895 u8 *buf, u32 size, bool ascii); 919#define SD_ASCII_STD true
920#define SD_RAW false
921int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
922 u8 **buf, bool ascii);
896 923
897int ufshcd_hold(struct ufs_hba *hba, bool async); 924int ufshcd_hold(struct ufs_hba *hba, bool async);
898void ufshcd_release(struct ufs_hba *hba); 925void ufshcd_release(struct ufs_hba *hba);
@@ -1045,6 +1072,12 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
1045 hba->vops->dbg_register_dump(hba); 1072 hba->vops->dbg_register_dump(hba);
1046} 1073}
1047 1074
1075static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
1076{
1077 if (hba->vops && hba->vops->device_reset)
1078 hba->vops->device_reset(hba);
1079}
1080
1048extern struct ufs_pm_lvl_states ufs_pm_lvl_states[]; 1081extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
1049 1082
1050/* 1083/*
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 297e1076e571..bfec84aacd90 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -30,6 +30,8 @@
30#include <linux/seqlock.h> 30#include <linux/seqlock.h>
31#include <linux/blk-mq-virtio.h> 31#include <linux/blk-mq-virtio.h>
32 32
33#include "sd.h"
34
33#define VIRTIO_SCSI_MEMPOOL_SZ 64 35#define VIRTIO_SCSI_MEMPOOL_SZ 64
34#define VIRTIO_SCSI_EVENT_LEN 8 36#define VIRTIO_SCSI_EVENT_LEN 8
35#define VIRTIO_SCSI_VQ_BASE 2 37#define VIRTIO_SCSI_VQ_BASE 2
@@ -324,6 +326,36 @@ static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
324 scsi_device_put(sdev); 326 scsi_device_put(sdev);
325} 327}
326 328
329static void virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
330{
331 struct scsi_device *sdev;
332 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
333 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
334 int result, inquiry_len, inq_result_len = 256;
335 char *inq_result = kmalloc(inq_result_len, GFP_KERNEL);
336
337 shost_for_each_device(sdev, shost) {
338 inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
339
340 memset(scsi_cmd, 0, sizeof(scsi_cmd));
341 scsi_cmd[0] = INQUIRY;
342 scsi_cmd[4] = (unsigned char) inquiry_len;
343
344 memset(inq_result, 0, inq_result_len);
345
346 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
347 inq_result, inquiry_len, NULL,
348 SD_TIMEOUT, SD_MAX_RETRIES, NULL);
349
350 if (result == 0 && inq_result[0] >> 5) {
351 /* PQ indicates the LUN is not attached */
352 scsi_remove_device(sdev);
353 }
354 }
355
356 kfree(inq_result);
357}
358
327static void virtscsi_handle_event(struct work_struct *work) 359static void virtscsi_handle_event(struct work_struct *work)
328{ 360{
329 struct virtio_scsi_event_node *event_node = 361 struct virtio_scsi_event_node *event_node =
@@ -335,6 +367,7 @@ static void virtscsi_handle_event(struct work_struct *work)
335 cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) { 367 cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
336 event->event &= ~cpu_to_virtio32(vscsi->vdev, 368 event->event &= ~cpu_to_virtio32(vscsi->vdev,
337 VIRTIO_SCSI_T_EVENTS_MISSED); 369 VIRTIO_SCSI_T_EVENTS_MISSED);
370 virtscsi_rescan_hotunplug(vscsi);
338 scsi_scan_host(virtio_scsi_host(vscsi->vdev)); 371 scsi_scan_host(virtio_scsi_host(vscsi->vdev));
339 } 372 }
340 373
@@ -369,14 +402,7 @@ static void virtscsi_event_done(struct virtqueue *vq)
369 virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event); 402 virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
370}; 403};
371 404
372/** 405static int __virtscsi_add_cmd(struct virtqueue *vq,
373 * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue
374 * @vq : the struct virtqueue we're talking about
375 * @cmd : command structure
376 * @req_size : size of the request buffer
377 * @resp_size : size of the response buffer
378 */
379static int virtscsi_add_cmd(struct virtqueue *vq,
380 struct virtio_scsi_cmd *cmd, 406 struct virtio_scsi_cmd *cmd,
381 size_t req_size, size_t resp_size) 407 size_t req_size, size_t resp_size)
382{ 408{
@@ -421,17 +447,39 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
421 return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC); 447 return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
422} 448}
423 449
424static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq, 450static void virtscsi_kick_vq(struct virtio_scsi_vq *vq)
451{
452 bool needs_kick;
453 unsigned long flags;
454
455 spin_lock_irqsave(&vq->vq_lock, flags);
456 needs_kick = virtqueue_kick_prepare(vq->vq);
457 spin_unlock_irqrestore(&vq->vq_lock, flags);
458
459 if (needs_kick)
460 virtqueue_notify(vq->vq);
461}
462
463/**
464 * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it
465 * @vq : the struct virtqueue we're talking about
466 * @cmd : command structure
467 * @req_size : size of the request buffer
468 * @resp_size : size of the response buffer
469 * @kick : whether to kick the virtqueue immediately
470 */
471static int virtscsi_add_cmd(struct virtio_scsi_vq *vq,
425 struct virtio_scsi_cmd *cmd, 472 struct virtio_scsi_cmd *cmd,
426 size_t req_size, size_t resp_size) 473 size_t req_size, size_t resp_size,
474 bool kick)
427{ 475{
428 unsigned long flags; 476 unsigned long flags;
429 int err; 477 int err;
430 bool needs_kick = false; 478 bool needs_kick = false;
431 479
432 spin_lock_irqsave(&vq->vq_lock, flags); 480 spin_lock_irqsave(&vq->vq_lock, flags);
433 err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size); 481 err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
434 if (!err) 482 if (!err && kick)
435 needs_kick = virtqueue_kick_prepare(vq->vq); 483 needs_kick = virtqueue_kick_prepare(vq->vq);
436 484
437 spin_unlock_irqrestore(&vq->vq_lock, flags); 485 spin_unlock_irqrestore(&vq->vq_lock, flags);
@@ -496,6 +544,7 @@ static int virtscsi_queuecommand(struct Scsi_Host *shost,
496 struct virtio_scsi *vscsi = shost_priv(shost); 544 struct virtio_scsi *vscsi = shost_priv(shost);
497 struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc); 545 struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
498 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); 546 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
547 bool kick;
499 unsigned long flags; 548 unsigned long flags;
500 int req_size; 549 int req_size;
501 int ret; 550 int ret;
@@ -525,7 +574,8 @@ static int virtscsi_queuecommand(struct Scsi_Host *shost,
525 req_size = sizeof(cmd->req.cmd); 574 req_size = sizeof(cmd->req.cmd);
526 } 575 }
527 576
528 ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)); 577 kick = (sc->flags & SCMD_LAST) != 0;
578 ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick);
529 if (ret == -EIO) { 579 if (ret == -EIO) {
530 cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; 580 cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
531 spin_lock_irqsave(&req_vq->vq_lock, flags); 581 spin_lock_irqsave(&req_vq->vq_lock, flags);
@@ -543,8 +593,8 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
543 int ret = FAILED; 593 int ret = FAILED;
544 594
545 cmd->comp = &comp; 595 cmd->comp = &comp;
546 if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd, 596 if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd,
547 sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0) 597 sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0)
548 goto out; 598 goto out;
549 599
550 wait_for_completion(&comp); 600 wait_for_completion(&comp);
@@ -658,6 +708,13 @@ static int virtscsi_map_queues(struct Scsi_Host *shost)
658 return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2); 708 return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
659} 709}
660 710
711static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
712{
713 struct virtio_scsi *vscsi = shost_priv(shost);
714
715 virtscsi_kick_vq(&vscsi->req_vqs[hwq]);
716}
717
661/* 718/*
662 * The host guarantees to respond to each command, although I/O 719 * The host guarantees to respond to each command, although I/O
663 * latencies might be higher than on bare metal. Reset the timer 720 * latencies might be higher than on bare metal. Reset the timer
@@ -675,6 +732,7 @@ static struct scsi_host_template virtscsi_host_template = {
675 .this_id = -1, 732 .this_id = -1,
676 .cmd_size = sizeof(struct virtio_scsi_cmd), 733 .cmd_size = sizeof(struct virtio_scsi_cmd),
677 .queuecommand = virtscsi_queuecommand, 734 .queuecommand = virtscsi_queuecommand,
735 .commit_rqs = virtscsi_commit_rqs,
678 .change_queue_depth = virtscsi_change_queue_depth, 736 .change_queue_depth = virtscsi_change_queue_depth,
679 .eh_abort_handler = virtscsi_abort, 737 .eh_abort_handler = virtscsi_abort,
680 .eh_device_reset_handler = virtscsi_device_reset, 738 .eh_device_reset_handler = virtscsi_device_reset,
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index fb7b289fa09f..f81046f0e68a 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -1854,6 +1854,7 @@ round_4(unsigned int x)
1854 case 1: --x; 1854 case 1: --x;
1855 break; 1855 break;
1856 case 2: ++x; 1856 case 2: ++x;
1857 /* fall through */
1857 case 3: ++x; 1858 case 3: ++x;
1858 } 1859 }
1859 return x; 1860 return x;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 661bb9358364..35be1be87d2a 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1712,6 +1712,24 @@ static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
1712 return 0; 1712 return 0;
1713} 1713}
1714 1714
1715static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev)
1716{
1717 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1718
1719 if (!tcmu_kern_cmd_reply_supported)
1720 return;
1721
1722 if (udev->nl_reply_supported <= 0)
1723 return;
1724
1725 mutex_lock(&tcmu_nl_cmd_mutex);
1726
1727 list_del(&nl_cmd->nl_list);
1728 memset(nl_cmd, 0, sizeof(*nl_cmd));
1729
1730 mutex_unlock(&tcmu_nl_cmd_mutex);
1731}
1732
1715static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) 1733static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
1716{ 1734{
1717 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1735 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
@@ -1792,6 +1810,8 @@ static int tcmu_netlink_event_send(struct tcmu_dev *udev,
1792 if (ret == 0 || 1810 if (ret == 0 ||
1793 (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE)) 1811 (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
1794 return tcmu_wait_genl_cmd_reply(udev); 1812 return tcmu_wait_genl_cmd_reply(udev);
1813 else
1814 tcmu_destroy_genl_cmd_reply(udev);
1795 1815
1796 return ret; 1816 return ret;
1797} 1817}
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 98d904961b33..10f81629b9ce 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -6,6 +6,8 @@
6#ifndef _NVME_FC_DRIVER_H 6#ifndef _NVME_FC_DRIVER_H
7#define _NVME_FC_DRIVER_H 1 7#define _NVME_FC_DRIVER_H 1
8 8
9#include <linux/scatterlist.h>
10
9 11
10/* 12/*
11 * ********************** LLDD FC-NVME Host API ******************** 13 * ********************** LLDD FC-NVME Host API ********************
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 76ed5e4acd38..91bd749a02f7 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -57,6 +57,7 @@ struct scsi_pointer {
57#define SCMD_TAGGED (1 << 0) 57#define SCMD_TAGGED (1 << 0)
58#define SCMD_UNCHECKED_ISA_DMA (1 << 1) 58#define SCMD_UNCHECKED_ISA_DMA (1 << 1)
59#define SCMD_INITIALIZED (1 << 2) 59#define SCMD_INITIALIZED (1 << 2)
60#define SCMD_LAST (1 << 3)
60/* flags preserved across unprep / reprep */ 61/* flags preserved across unprep / reprep */
61#define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED) 62#define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
62 63
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index e03bd9d41fa8..7b196d234626 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -6,8 +6,6 @@ struct scsi_cmnd;
6struct scsi_device; 6struct scsi_device;
7struct scsi_sense_hdr; 7struct scsi_sense_hdr;
8 8
9#define SCSI_LOG_BUFSIZE 128
10
11extern void scsi_print_command(struct scsi_cmnd *); 9extern void scsi_print_command(struct scsi_cmnd *);
12extern size_t __scsi_format_command(char *, size_t, 10extern size_t __scsi_format_command(char *, size_t,
13 const unsigned char *, size_t); 11 const unsigned char *, size_t);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index cc139dbd71e5..31e0d6ca1eba 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -80,8 +80,10 @@ struct scsi_host_template {
80 * command block to the LLDD. When the driver finished 80 * command block to the LLDD. When the driver finished
81 * processing the command the done callback is invoked. 81 * processing the command the done callback is invoked.
82 * 82 *
83 * If queuecommand returns 0, then the HBA has accepted the 83 * If queuecommand returns 0, then the driver has accepted the
84 * command. The done() function must be called on the command 84 * command. It must also push it to the HBA if the scsi_cmnd
85 * flag SCMD_LAST is set, or if the driver does not implement
86 * commit_rqs. The done() function must be called on the command
85 * when the driver has finished with it. (you may call done on the 87 * when the driver has finished with it. (you may call done on the
86 * command before queuecommand returns, but in this case you 88 * command before queuecommand returns, but in this case you
87 * *must* return 0 from queuecommand). 89 * *must* return 0 from queuecommand).
@@ -110,6 +112,16 @@ struct scsi_host_template {
110 int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); 112 int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
111 113
112 /* 114 /*
115 * The commit_rqs function is used to trigger a hardware
116 * doorbell after some requests have been queued with
117 * queuecommand, when an error is encountered before sending
118 * the request with SCMD_LAST set.
119 *
120 * STATUS: OPTIONAL
121 */
122 void (*commit_rqs)(struct Scsi_Host *, u16);
123
124 /*
113 * This is an error handling strategy routine. You don't need to 125 * This is an error handling strategy routine. You don't need to
114 * define one of these if you don't want to - there is a default 126 * define one of these if you don't want to - there is a default
115 * routine that is present that should work in most cases. For those 127 * routine that is present that should work in most cases. For those
diff --git a/include/uapi/scsi/scsi_bsg_fc.h b/include/uapi/scsi/scsi_bsg_fc.h
index 52f32a60d056..3ae65e93235c 100644
--- a/include/uapi/scsi/scsi_bsg_fc.h
+++ b/include/uapi/scsi/scsi_bsg_fc.h
@@ -8,6 +8,8 @@
8#ifndef SCSI_BSG_FC_H 8#ifndef SCSI_BSG_FC_H
9#define SCSI_BSG_FC_H 9#define SCSI_BSG_FC_H
10 10
11#include <linux/types.h>
12
11/* 13/*
12 * This file intended to be included by both kernel and user space 14 * This file intended to be included by both kernel and user space
13 */ 15 */
@@ -66,10 +68,10 @@
66 * with the transport upon completion of the login. 68 * with the transport upon completion of the login.
67 */ 69 */
68struct fc_bsg_host_add_rport { 70struct fc_bsg_host_add_rport {
69 uint8_t reserved; 71 __u8 reserved;
70 72
71 /* FC Address Identier of the remote port to login to */ 73 /* FC Address Identier of the remote port to login to */
72 uint8_t port_id[3]; 74 __u8 port_id[3];
73}; 75};
74 76
75/* Response: 77/* Response:
@@ -87,10 +89,10 @@ struct fc_bsg_host_add_rport {
87 * remain logged in with the remote port. 89 * remain logged in with the remote port.
88 */ 90 */
89struct fc_bsg_host_del_rport { 91struct fc_bsg_host_del_rport {
90 uint8_t reserved; 92 __u8 reserved;
91 93
92 /* FC Address Identier of the remote port to logout of */ 94 /* FC Address Identier of the remote port to logout of */
93 uint8_t port_id[3]; 95 __u8 port_id[3];
94}; 96};
95 97
96/* Response: 98/* Response:
@@ -111,10 +113,10 @@ struct fc_bsg_host_els {
111 * ELS Command Code being sent (must be the same as byte 0 113 * ELS Command Code being sent (must be the same as byte 0
112 * of the payload) 114 * of the payload)
113 */ 115 */
114 uint8_t command_code; 116 __u8 command_code;
115 117
116 /* FC Address Identier of the remote port to send the ELS to */ 118 /* FC Address Identier of the remote port to send the ELS to */
117 uint8_t port_id[3]; 119 __u8 port_id[3];
118}; 120};
119 121
120/* Response: 122/* Response:
@@ -151,14 +153,14 @@ struct fc_bsg_ctels_reply {
151 * Note: x_RJT/BSY status will indicae that the rjt_data field 153 * Note: x_RJT/BSY status will indicae that the rjt_data field
152 * is valid and contains the reason/explanation values. 154 * is valid and contains the reason/explanation values.
153 */ 155 */
154 uint32_t status; /* See FC_CTELS_STATUS_xxx */ 156 __u32 status; /* See FC_CTELS_STATUS_xxx */
155 157
156 /* valid if status is not FC_CTELS_STATUS_OK */ 158 /* valid if status is not FC_CTELS_STATUS_OK */
157 struct { 159 struct {
158 uint8_t action; /* fragment_id for CT REJECT */ 160 __u8 action; /* fragment_id for CT REJECT */
159 uint8_t reason_code; 161 __u8 reason_code;
160 uint8_t reason_explanation; 162 __u8 reason_explanation;
161 uint8_t vendor_unique; 163 __u8 vendor_unique;
162 } rjt_data; 164 } rjt_data;
163}; 165};
164 166
@@ -174,17 +176,17 @@ struct fc_bsg_ctels_reply {
174 * and whether to tear it down after the request. 176 * and whether to tear it down after the request.
175 */ 177 */
176struct fc_bsg_host_ct { 178struct fc_bsg_host_ct {
177 uint8_t reserved; 179 __u8 reserved;
178 180
179 /* FC Address Identier of the remote port to send the ELS to */ 181 /* FC Address Identier of the remote port to send the ELS to */
180 uint8_t port_id[3]; 182 __u8 port_id[3];
181 183
182 /* 184 /*
183 * We need words 0-2 of the generic preamble for the LLD's 185 * We need words 0-2 of the generic preamble for the LLD's
184 */ 186 */
185 uint32_t preamble_word0; /* revision & IN_ID */ 187 __u32 preamble_word0; /* revision & IN_ID */
186 uint32_t preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */ 188 __u32 preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */
187 uint32_t preamble_word2; /* Cmd Code, Max Size */ 189 __u32 preamble_word2; /* Cmd Code, Max Size */
188 190
189}; 191};
190/* Response: 192/* Response:
@@ -204,17 +206,17 @@ struct fc_bsg_host_vendor {
204 * Identifies the vendor that the message is formatted for. This 206 * Identifies the vendor that the message is formatted for. This
205 * should be the recipient of the message. 207 * should be the recipient of the message.
206 */ 208 */
207 uint64_t vendor_id; 209 __u64 vendor_id;
208 210
209 /* start of vendor command area */ 211 /* start of vendor command area */
210 uint32_t vendor_cmd[0]; 212 __u32 vendor_cmd[0];
211}; 213};
212 214
213/* Response: 215/* Response:
214 */ 216 */
215struct fc_bsg_host_vendor_reply { 217struct fc_bsg_host_vendor_reply {
216 /* start of vendor response area */ 218 /* start of vendor response area */
217 uint32_t vendor_rsp[0]; 219 __u32 vendor_rsp[0];
218}; 220};
219 221
220 222
@@ -233,7 +235,7 @@ struct fc_bsg_rport_els {
233 * ELS Command Code being sent (must be the same as 235 * ELS Command Code being sent (must be the same as
234 * byte 0 of the payload) 236 * byte 0 of the payload)
235 */ 237 */
236 uint8_t els_code; 238 __u8 els_code;
237}; 239};
238 240
239/* Response: 241/* Response:
@@ -251,9 +253,9 @@ struct fc_bsg_rport_ct {
251 /* 253 /*
252 * We need words 0-2 of the generic preamble for the LLD's 254 * We need words 0-2 of the generic preamble for the LLD's
253 */ 255 */
254 uint32_t preamble_word0; /* revision & IN_ID */ 256 __u32 preamble_word0; /* revision & IN_ID */
255 uint32_t preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */ 257 __u32 preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */
256 uint32_t preamble_word2; /* Cmd Code, Max Size */ 258 __u32 preamble_word2; /* Cmd Code, Max Size */
257}; 259};
258/* Response: 260/* Response:
259 * 261 *
@@ -265,7 +267,7 @@ struct fc_bsg_rport_ct {
265 267
266/* request (CDB) structure of the sg_io_v4 */ 268/* request (CDB) structure of the sg_io_v4 */
267struct fc_bsg_request { 269struct fc_bsg_request {
268 uint32_t msgcode; 270 __u32 msgcode;
269 union { 271 union {
270 struct fc_bsg_host_add_rport h_addrport; 272 struct fc_bsg_host_add_rport h_addrport;
271 struct fc_bsg_host_del_rport h_delrport; 273 struct fc_bsg_host_del_rport h_delrport;
@@ -289,10 +291,10 @@ struct fc_bsg_reply {
289 * msg and status fields. The per-msgcode reply structure 291 * msg and status fields. The per-msgcode reply structure
290 * will contain valid data. 292 * will contain valid data.
291 */ 293 */
292 uint32_t result; 294 __u32 result;
293 295
294 /* If there was reply_payload, how much was recevied ? */ 296 /* If there was reply_payload, how much was recevied ? */
295 uint32_t reply_payload_rcv_len; 297 __u32 reply_payload_rcv_len;
296 298
297 union { 299 union {
298 struct fc_bsg_host_vendor_reply vendor_reply; 300 struct fc_bsg_host_vendor_reply vendor_reply;
diff --git a/include/uapi/scsi/scsi_netlink.h b/include/uapi/scsi/scsi_netlink.h
index 5dd382054e45..1b1737c3c9d8 100644
--- a/include/uapi/scsi/scsi_netlink.h
+++ b/include/uapi/scsi/scsi_netlink.h
@@ -26,12 +26,12 @@
26 26
27/* SCSI_TRANSPORT_MSG event message header */ 27/* SCSI_TRANSPORT_MSG event message header */
28struct scsi_nl_hdr { 28struct scsi_nl_hdr {
29 uint8_t version; 29 __u8 version;
30 uint8_t transport; 30 __u8 transport;
31 uint16_t magic; 31 __u16 magic;
32 uint16_t msgtype; 32 __u16 msgtype;
33 uint16_t msglen; 33 __u16 msglen;
34} __attribute__((aligned(sizeof(uint64_t)))); 34} __attribute__((aligned(sizeof(__u64))));
35 35
36/* scsi_nl_hdr->version value */ 36/* scsi_nl_hdr->version value */
37#define SCSI_NL_VERSION 1 37#define SCSI_NL_VERSION 1
@@ -75,10 +75,10 @@ struct scsi_nl_hdr {
75 */ 75 */
76struct scsi_nl_host_vendor_msg { 76struct scsi_nl_host_vendor_msg {
77 struct scsi_nl_hdr snlh; /* must be 1st element ! */ 77 struct scsi_nl_hdr snlh; /* must be 1st element ! */
78 uint64_t vendor_id; 78 __u64 vendor_id;
79 uint16_t host_no; 79 __u16 host_no;
80 uint16_t vmsg_datalen; 80 __u16 vmsg_datalen;
81} __attribute__((aligned(sizeof(uint64_t)))); 81} __attribute__((aligned(sizeof(__u64))));
82 82
83 83
84/* 84/*
diff --git a/include/uapi/scsi/scsi_netlink_fc.h b/include/uapi/scsi/scsi_netlink_fc.h
index a39023579051..7535253f1a96 100644
--- a/include/uapi/scsi/scsi_netlink_fc.h
+++ b/include/uapi/scsi/scsi_netlink_fc.h
@@ -7,6 +7,7 @@
7#ifndef SCSI_NETLINK_FC_H 7#ifndef SCSI_NETLINK_FC_H
8#define SCSI_NETLINK_FC_H 8#define SCSI_NETLINK_FC_H
9 9
10#include <linux/types.h>
10#include <scsi/scsi_netlink.h> 11#include <scsi/scsi_netlink.h>
11 12
12/* 13/*
@@ -43,14 +44,14 @@
43 */ 44 */
44struct fc_nl_event { 45struct fc_nl_event {
45 struct scsi_nl_hdr snlh; /* must be 1st element ! */ 46 struct scsi_nl_hdr snlh; /* must be 1st element ! */
46 uint64_t seconds; 47 __u64 seconds;
47 uint64_t vendor_id; 48 __u64 vendor_id;
48 uint16_t host_no; 49 __u16 host_no;
49 uint16_t event_datalen; 50 __u16 event_datalen;
50 uint32_t event_num; 51 __u32 event_num;
51 uint32_t event_code; 52 __u32 event_code;
52 uint32_t event_data; 53 __u32 event_data;
53} __attribute__((aligned(sizeof(uint64_t)))); 54} __attribute__((aligned(sizeof(__u64))));
54 55
55 56
56#endif /* SCSI_NETLINK_FC_H */ 57#endif /* SCSI_NETLINK_FC_H */