summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-10 16:01:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-10 16:01:12 -0400
commit5f85942c2ea2ed59d8f19c954bbb0f5c1a2ebdd1 (patch)
treeffd0c606829178dd0be28c557685203f760438d8 /drivers
parent0c14e43a42e4e44f70963f8ccf89461290c4e4da (diff)
parent1b5c2cb196684f1418fe82257a1b0a8cb0aabc9d (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly updates to the usual drivers: ufs, qedf, mpt3sas, lpfc, xfcp, hisi_sas, cxlflash, qla2xxx. In the absence of Nic, we're also taking target updates which are mostly minor except for the tcmu refactor. The only real core change to worry about is the removal of high page bouncing (in sas, storvsc and iscsi). This has been well tested and no problems have shown up so far" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (268 commits) scsi: lpfc: update driver version to 12.0.0.4 scsi: lpfc: Fix port initialization failure. scsi: lpfc: Fix 16gb hbas failing cq create. scsi: lpfc: Fix crash in blk_mq layer when executing modprobe -r lpfc scsi: lpfc: correct oversubscription of nvme io requests for an adapter scsi: lpfc: Fix MDS diagnostics failure (Rx < Tx) scsi: hisi_sas: Mark PHY as in reset for nexus reset scsi: hisi_sas: Fix return value when get_free_slot() failed scsi: hisi_sas: Terminate STP reject quickly for v2 hw scsi: hisi_sas: Add v2 hw force PHY function for internal ATA command scsi: hisi_sas: Include TMF elements in struct hisi_sas_slot scsi: hisi_sas: Try wait commands before before controller reset scsi: hisi_sas: Init disks after controller reset scsi: hisi_sas: Create a scsi_host_template per HW module scsi: hisi_sas: Reset disks when discovered scsi: hisi_sas: Add LED feature for v3 hw scsi: hisi_sas: Change common allocation mode of device id scsi: hisi_sas: change slot index allocation mode scsi: hisi_sas: Introduce hisi_sas_phy_set_linkrate() scsi: hisi_sas: fix a typo in hisi_sas_task_prep() ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-scsi.c12
-rw-r--r--drivers/hv/ring_buffer.c2
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h4
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/message/fusion/mptfc.c2
-rw-r--r--drivers/message/fusion/mptlan.c2
-rw-r--r--drivers/message/fusion/mptsas.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c18
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c90
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h21
-rw-r--r--drivers/s390/scsi/zfcp_erp.c194
-rw-r--r--drivers/s390/scsi/zfcp_ext.h16
-rw-r--r--drivers/s390/scsi/zfcp_fc.c11
-rw-r--r--drivers/s390/scsi/zfcp_fc.h22
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c61
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h6
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c141
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c5
-rw-r--r--drivers/scsi/3w-9xxx.c5
-rw-r--r--drivers/scsi/3w-xxxx.c3
-rw-r--r--drivers/scsi/Kconfig14
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/a100u2w.c13
-rw-r--r--drivers/scsi/am53c974.c13
-rw-r--r--drivers/scsi/cxlflash/Kconfig2
-rw-r--r--drivers/scsi/cxlflash/Makefile4
-rw-r--r--drivers/scsi/cxlflash/backend.h55
-rw-r--r--drivers/scsi/cxlflash/common.h12
-rw-r--r--drivers/scsi/cxlflash/cxl_hw.c13
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c4
-rw-r--r--drivers/scsi/cxlflash/main.c97
-rw-r--r--drivers/scsi/cxlflash/main.h21
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c1436
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.h77
-rw-r--r--drivers/scsi/cxlflash/sislite.h41
-rw-r--r--drivers/scsi/cxlflash/superpipe.c23
-rw-r--r--drivers/scsi/cxlflash/vlun.c3
-rw-r--r--drivers/scsi/dpt_i2o.c21
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c5
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h52
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c638
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c164
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c284
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c452
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/ips.c78
-rw-r--r--drivers/scsi/ips.h11
-rw-r--r--drivers/scsi/isci/init.c3
-rw-r--r--drivers/scsi/iscsi_tcp.c1
-rw-r--r--drivers/scsi/libsas/sas_ata.c5
-rw-r--r--drivers/scsi/libsas/sas_discover.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c124
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c98
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c153
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c238
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/megaraid.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c27
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h9
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h30
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h2
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c477
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h60
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c33
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c491
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c3
-rw-r--r--drivers/scsi/mvumi.c20
-rw-r--r--drivers/scsi/osd/osd_initiator.c16
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c2
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c2
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/drv_scsi_fw_funcs.c2
-rw-r--r--drivers/scsi/qedf/drv_scsi_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/qedf.h6
-rw-r--r--drivers/scsi/qedf/qedf_attr.c2
-rw-r--r--drivers/scsi/qedf/qedf_dbg.c4
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c2
-rw-r--r--drivers/scsi/qedf/qedf_els.c35
-rw-r--r--drivers/scsi/qedf/qedf_fip.c5
-rw-r--r--drivers/scsi/qedf/qedf_hsi.h2
-rw-r--r--drivers/scsi/qedf/qedf_io.c87
-rw-r--r--drivers/scsi/qedf/qedf_main.c130
-rw-r--r--drivers/scsi/qedf/qedf_version.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c105
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c192
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c37
-rw-r--r--drivers/scsi/qlogicpti.c6
-rw-r--r--drivers/scsi/scsi_debugfs.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c31
-rw-r--r--drivers/scsi/scsi_dh.c5
-rw-r--r--drivers/scsi/scsi_error.c7
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sd.h12
-rw-r--r--drivers/scsi/sd_zbc.c10
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/snic/snic_scsi.c6
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/scsi/storvsc_drv.c85
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c308
-rw-r--r--drivers/scsi/ufs/ufshcd.h21
-rw-r--r--drivers/scsi/wd719x.c13
-rw-r--r--drivers/scsi/zorro_esp.c1172
-rw-r--r--drivers/target/target_core_configfs.c25
-rw-r--r--drivers/target/target_core_file.c137
-rw-r--r--drivers/target/target_core_file.h1
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_pscsi.c26
-rw-r--r--drivers/target/target_core_transport.c64
-rw-r--r--drivers/target/target_core_user.c160
139 files changed, 7070 insertions, 1972 deletions
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index ce5019db50fd..6a91d04351d9 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -5054,6 +5054,18 @@ int ata_sas_port_init(struct ata_port *ap)
5054} 5054}
5055EXPORT_SYMBOL_GPL(ata_sas_port_init); 5055EXPORT_SYMBOL_GPL(ata_sas_port_init);
5056 5056
5057int ata_sas_tport_add(struct device *parent, struct ata_port *ap)
5058{
5059 return ata_tport_add(parent, ap);
5060}
5061EXPORT_SYMBOL_GPL(ata_sas_tport_add);
5062
5063void ata_sas_tport_delete(struct ata_port *ap)
5064{
5065 ata_tport_delete(ap);
5066}
5067EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
5068
5057/** 5069/**
5058 * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc 5070 * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
5059 * @ap: SATA port to destroy 5071 * @ap: SATA port to destroy
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 8699bb969e7e..3c836c099a8f 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -227,6 +227,8 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
227 ring_info->ring_buffer->feature_bits.value = 1; 227 ring_info->ring_buffer->feature_bits.value = 1;
228 228
229 ring_info->ring_size = page_cnt << PAGE_SHIFT; 229 ring_info->ring_size = page_cnt << PAGE_SHIFT;
230 ring_info->ring_size_div10_reciprocal =
231 reciprocal_value(ring_info->ring_size / 10);
230 ring_info->ring_datasize = ring_info->ring_size - 232 ring_info->ring_datasize = ring_info->ring_size -
231 sizeof(struct hv_ring_buffer); 233 sizeof(struct hv_ring_buffer);
232 234
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 4e9c0ce94f27..059997f8ebce 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -1802,13 +1802,13 @@ typedef struct _CONFIG_PAGE_FC_PORT_0
1802#define MPI_FCPORTPAGE0_SUPPORT_CLASS_2 (0x00000002) 1802#define MPI_FCPORTPAGE0_SUPPORT_CLASS_2 (0x00000002)
1803#define MPI_FCPORTPAGE0_SUPPORT_CLASS_3 (0x00000004) 1803#define MPI_FCPORTPAGE0_SUPPORT_CLASS_3 (0x00000004)
1804 1804
1805#define MPI_FCPORTPAGE0_SUPPORT_SPEED_UKNOWN (0x00000000) /* (SNIA)HBA_PORTSPEED_UNKNOWN 0 Unknown - transceiver incapable of reporting */ 1805#define MPI_FCPORTPAGE0_SUPPORT_SPEED_UNKNOWN (0x00000000) /* (SNIA)HBA_PORTSPEED_UNKNOWN 0 Unknown - transceiver incapable of reporting */
1806#define MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED (0x00000001) /* (SNIA)HBA_PORTSPEED_1GBIT 1 1 GBit/sec */ 1806#define MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED (0x00000001) /* (SNIA)HBA_PORTSPEED_1GBIT 1 1 GBit/sec */
1807#define MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED (0x00000002) /* (SNIA)HBA_PORTSPEED_2GBIT 2 2 GBit/sec */ 1807#define MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED (0x00000002) /* (SNIA)HBA_PORTSPEED_2GBIT 2 2 GBit/sec */
1808#define MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED (0x00000004) /* (SNIA)HBA_PORTSPEED_10GBIT 4 10 GBit/sec */ 1808#define MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED (0x00000004) /* (SNIA)HBA_PORTSPEED_10GBIT 4 10 GBit/sec */
1809#define MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED (0x00000008) /* (SNIA)HBA_PORTSPEED_4GBIT 8 4 GBit/sec */ 1809#define MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED (0x00000008) /* (SNIA)HBA_PORTSPEED_4GBIT 8 4 GBit/sec */
1810 1810
1811#define MPI_FCPORTPAGE0_CURRENT_SPEED_UKNOWN MPI_FCPORTPAGE0_SUPPORT_SPEED_UKNOWN 1811#define MPI_FCPORTPAGE0_CURRENT_SPEED_UNKNOWN MPI_FCPORTPAGE0_SUPPORT_SPEED_UNKNOWN
1812#define MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED 1812#define MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED
1813#define MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED 1813#define MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED
1814#define MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED 1814#define MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index a746ccdd630a..a625ac4e2872 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -7600,7 +7600,7 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
7600 7600
7601 snprintf(evStr, EVENT_DESCR_STR_SZ, 7601 snprintf(evStr, EVENT_DESCR_STR_SZ,
7602 "SAS Initiator Device Table Overflow: max initiators=%02d " 7602 "SAS Initiator Device Table Overflow: max initiators=%02d "
7603 "current initators=%02d", 7603 "current initiators=%02d",
7604 max_init, current_init); 7604 max_init, current_init);
7605 break; 7605 break;
7606 } 7606 }
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 6d461ca97150..06b175420be9 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -693,7 +693,7 @@ mptfc_display_port_link_speed(MPT_ADAPTER *ioc, int portnum, FCPortPage0_t *pp0d
693 state = pp0dest->PortState; 693 state = pp0dest->PortState;
694 694
695 if (state != MPI_FCPORTPAGE0_PORTSTATE_OFFLINE && 695 if (state != MPI_FCPORTPAGE0_PORTSTATE_OFFLINE &&
696 new_speed != MPI_FCPORTPAGE0_CURRENT_SPEED_UKNOWN) { 696 new_speed != MPI_FCPORTPAGE0_CURRENT_SPEED_UNKNOWN) {
697 697
698 old = old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT ? "1 Gbps" : 698 old = old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT ? "1 Gbps" :
699 old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT ? "2 Gbps" : 699 old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT ? "2 Gbps" :
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 55dd71bbdc2a..4cbed4d06aa7 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -670,7 +670,7 @@ out:
670} 670}
671 671
672/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 672/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
673static int 673static netdev_tx_t
674mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) 674mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
675{ 675{
676 struct mpt_lan_priv *priv = netdev_priv(dev); 676 struct mpt_lan_priv *priv = netdev_priv(dev);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 19a5aa70ecda..76a66da33996 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -4320,7 +4320,7 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
4320 if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == 4320 if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID ==
4321 hot_plug_info->id) { 4321 hot_plug_info->id) {
4322 printk(MYIOC_s_WARN_FMT "firmware bug: unable " 4322 printk(MYIOC_s_WARN_FMT "firmware bug: unable "
4323 "to add hidden disk - target_id matchs " 4323 "to add hidden disk - target_id matches "
4324 "volume_id\n", ioc->name); 4324 "volume_id\n", ioc->name);
4325 mptsas_free_fw_event(ioc, fw_event); 4325 mptsas_free_fw_event(ioc, fw_event);
4326 return; 4326 return;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 99d8e7398a5b..23304aca25f9 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -189,7 +189,6 @@ struct netvsc_device;
189struct net_device_context; 189struct net_device_context;
190 190
191extern u32 netvsc_ring_bytes; 191extern u32 netvsc_ring_bytes;
192extern struct reciprocal_value netvsc_ring_reciprocal;
193 192
194struct netvsc_device *netvsc_device_add(struct hv_device *device, 193struct netvsc_device *netvsc_device_add(struct hv_device *device,
195 const struct netvsc_device_info *info); 194 const struct netvsc_device_info *info);
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d2ee66c259a7..5d5bd513847f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -31,7 +31,6 @@
31#include <linux/vmalloc.h> 31#include <linux/vmalloc.h>
32#include <linux/rtnetlink.h> 32#include <linux/rtnetlink.h>
33#include <linux/prefetch.h> 33#include <linux/prefetch.h>
34#include <linux/reciprocal_div.h>
35 34
36#include <asm/sync_bitops.h> 35#include <asm/sync_bitops.h>
37 36
@@ -635,17 +634,6 @@ void netvsc_device_remove(struct hv_device *device)
635#define RING_AVAIL_PERCENT_HIWATER 20 634#define RING_AVAIL_PERCENT_HIWATER 20
636#define RING_AVAIL_PERCENT_LOWATER 10 635#define RING_AVAIL_PERCENT_LOWATER 10
637 636
638/*
639 * Get the percentage of available bytes to write in the ring.
640 * The return value is in range from 0 to 100.
641 */
642static u32 hv_ringbuf_avail_percent(const struct hv_ring_buffer_info *ring_info)
643{
644 u32 avail_write = hv_get_bytes_to_write(ring_info);
645
646 return reciprocal_divide(avail_write * 100, netvsc_ring_reciprocal);
647}
648
649static inline void netvsc_free_send_slot(struct netvsc_device *net_device, 637static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
650 u32 index) 638 u32 index)
651{ 639{
@@ -694,8 +682,8 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
694 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); 682 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
695 683
696 if (netif_tx_queue_stopped(txq) && 684 if (netif_tx_queue_stopped(txq) &&
697 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || 685 (hv_get_avail_to_write_percent(&channel->outbound) >
698 queue_sends < 1)) { 686 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
699 netif_tx_wake_queue(txq); 687 netif_tx_wake_queue(txq);
700 ndev_ctx->eth_stats.wake_queue++; 688 ndev_ctx->eth_stats.wake_queue++;
701 } 689 }
@@ -802,7 +790,7 @@ static inline int netvsc_send_pkt(
802 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); 790 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
803 u64 req_id; 791 u64 req_id;
804 int ret; 792 int ret;
805 u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound); 793 u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
806 794
807 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; 795 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
808 if (skb) 796 if (skb)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index bef4d55a108c..1e9bde68698d 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -35,7 +35,6 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/rtnetlink.h> 36#include <linux/rtnetlink.h>
37#include <linux/netpoll.h> 37#include <linux/netpoll.h>
38#include <linux/reciprocal_div.h>
39 38
40#include <net/arp.h> 39#include <net/arp.h>
41#include <net/route.h> 40#include <net/route.h>
@@ -59,7 +58,6 @@ static unsigned int ring_size __ro_after_init = 128;
59module_param(ring_size, uint, 0444); 58module_param(ring_size, uint, 0444);
60MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); 59MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
61unsigned int netvsc_ring_bytes __ro_after_init; 60unsigned int netvsc_ring_bytes __ro_after_init;
62struct reciprocal_value netvsc_ring_reciprocal __ro_after_init;
63 61
64static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | 62static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
65 NETIF_MSG_LINK | NETIF_MSG_IFUP | 63 NETIF_MSG_LINK | NETIF_MSG_IFUP |
@@ -2130,7 +2128,6 @@ static int __init netvsc_drv_init(void)
2130 ring_size); 2128 ring_size);
2131 } 2129 }
2132 netvsc_ring_bytes = ring_size * PAGE_SIZE; 2130 netvsc_ring_bytes = ring_size * PAGE_SIZE;
2133 netvsc_ring_reciprocal = reciprocal_value(netvsc_ring_bytes);
2134 2131
2135 ret = vmbus_driver_register(&netvsc_drv); 2132 ret = vmbus_driver_register(&netvsc_drv);
2136 if (ret) 2133 if (ret)
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 18c4f933e8b9..3b368fcf13f4 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -285,6 +285,8 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
285 struct list_head *entry; 285 struct list_head *entry;
286 unsigned long flags; 286 unsigned long flags;
287 287
288 lockdep_assert_held(&adapter->erp_lock);
289
288 if (unlikely(!debug_level_enabled(dbf->rec, level))) 290 if (unlikely(!debug_level_enabled(dbf->rec, level)))
289 return; 291 return;
290 292
@@ -599,16 +601,18 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
599} 601}
600 602
601/** 603/**
602 * zfcp_dbf_scsi - trace event for scsi commands 604 * zfcp_dbf_scsi_common() - Common trace event helper for scsi.
603 * @tag: identifier for event 605 * @tag: Identifier for event.
604 * @sc: pointer to struct scsi_cmnd 606 * @level: trace level of event.
605 * @fsf: pointer to struct zfcp_fsf_req 607 * @sdev: Pointer to SCSI device as context for this event.
608 * @sc: Pointer to SCSI command, or NULL with task management function (TMF).
609 * @fsf: Pointer to FSF request, or NULL.
606 */ 610 */
607void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc, 611void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev,
608 struct zfcp_fsf_req *fsf) 612 struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
609{ 613{
610 struct zfcp_adapter *adapter = 614 struct zfcp_adapter *adapter =
611 (struct zfcp_adapter *) sc->device->host->hostdata[0]; 615 (struct zfcp_adapter *) sdev->host->hostdata[0];
612 struct zfcp_dbf *dbf = adapter->dbf; 616 struct zfcp_dbf *dbf = adapter->dbf;
613 struct zfcp_dbf_scsi *rec = &dbf->scsi_buf; 617 struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
614 struct fcp_resp_with_ext *fcp_rsp; 618 struct fcp_resp_with_ext *fcp_rsp;
@@ -620,16 +624,28 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
620 624
621 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); 625 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
622 rec->id = ZFCP_DBF_SCSI_CMND; 626 rec->id = ZFCP_DBF_SCSI_CMND;
623 rec->scsi_result = sc->result; 627 if (sc) {
624 rec->scsi_retries = sc->retries; 628 rec->scsi_result = sc->result;
625 rec->scsi_allowed = sc->allowed; 629 rec->scsi_retries = sc->retries;
626 rec->scsi_id = sc->device->id; 630 rec->scsi_allowed = sc->allowed;
627 rec->scsi_lun = (u32)sc->device->lun; 631 rec->scsi_id = sc->device->id;
628 rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32); 632 rec->scsi_lun = (u32)sc->device->lun;
629 rec->host_scribble = (unsigned long)sc->host_scribble; 633 rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
630 634 rec->host_scribble = (unsigned long)sc->host_scribble;
631 memcpy(rec->scsi_opcode, sc->cmnd, 635
632 min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE)); 636 memcpy(rec->scsi_opcode, sc->cmnd,
637 min_t(int, sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
638 } else {
639 rec->scsi_result = ~0;
640 rec->scsi_retries = ~0;
641 rec->scsi_allowed = ~0;
642 rec->scsi_id = sdev->id;
643 rec->scsi_lun = (u32)sdev->lun;
644 rec->scsi_lun_64_hi = (u32)(sdev->lun >> 32);
645 rec->host_scribble = ~0;
646
647 memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
648 }
633 649
634 if (fsf) { 650 if (fsf) {
635 rec->fsf_req_id = fsf->req_id; 651 rec->fsf_req_id = fsf->req_id;
@@ -664,6 +680,46 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
664 spin_unlock_irqrestore(&dbf->scsi_lock, flags); 680 spin_unlock_irqrestore(&dbf->scsi_lock, flags);
665} 681}
666 682
683/**
684 * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
685 * @tag: Identifier for event.
686 * @adapter: Pointer to zfcp adapter as context for this event.
687 * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
688 * @ret: Return value of calling function.
689 *
690 * This SCSI trace variant does not depend on any of:
691 * scsi_cmnd, zfcp_fsf_req, scsi_device.
692 */
693void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
694 unsigned int scsi_id, int ret)
695{
696 struct zfcp_dbf *dbf = adapter->dbf;
697 struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
698 unsigned long flags;
699 static int const level = 1;
700
701 if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
702 return;
703
704 spin_lock_irqsave(&dbf->scsi_lock, flags);
705 memset(rec, 0, sizeof(*rec));
706
707 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
708 rec->id = ZFCP_DBF_SCSI_CMND;
709 rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */
710 rec->scsi_retries = ~0;
711 rec->scsi_allowed = ~0;
712 rec->fcp_rsp_info = ~0;
713 rec->scsi_id = scsi_id;
714 rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
715 rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
716 rec->host_scribble = ~0;
717 memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
718
719 debug_event(dbf->scsi, level, rec, sizeof(*rec));
720 spin_unlock_irqrestore(&dbf->scsi_lock, flags);
721}
722
667static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size) 723static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
668{ 724{
669 struct debug_info *d; 725 struct debug_info *d;
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index e2a973cd2573..d116c07ed77a 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -359,7 +359,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
359 scmd->device->host->hostdata[0]; 359 scmd->device->host->hostdata[0];
360 360
361 if (debug_level_enabled(adapter->dbf->scsi, level)) 361 if (debug_level_enabled(adapter->dbf->scsi, level))
362 zfcp_dbf_scsi(tag, level, scmd, req); 362 zfcp_dbf_scsi_common(tag, level, scmd->device, scmd, req);
363} 363}
364 364
365/** 365/**
@@ -402,16 +402,23 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
402} 402}
403 403
404/** 404/**
405 * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset 405 * zfcp_dbf_scsi_devreset() - Trace event for Logical Unit or Target Reset.
406 * @tag: tag indicating success or failure of reset operation 406 * @tag: Tag indicating success or failure of reset operation.
407 * @scmnd: SCSI command which caused this error recovery 407 * @sdev: Pointer to SCSI device as context for this event.
408 * @flag: indicates type of reset (Target Reset, Logical Unit Reset) 408 * @flag: Indicates type of reset (Target Reset, Logical Unit Reset).
409 * @fsf_req: Pointer to FSF request representing the TMF, or NULL.
409 */ 410 */
410static inline 411static inline
411void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag, 412void zfcp_dbf_scsi_devreset(char *tag, struct scsi_device *sdev, u8 flag,
412 struct zfcp_fsf_req *fsf_req) 413 struct zfcp_fsf_req *fsf_req)
413{ 414{
415 struct zfcp_adapter *adapter = (struct zfcp_adapter *)
416 sdev->host->hostdata[0];
414 char tmp_tag[ZFCP_DBF_TAG_LEN]; 417 char tmp_tag[ZFCP_DBF_TAG_LEN];
418 static int const level = 1;
419
420 if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
421 return;
415 422
416 if (flag == FCP_TMF_TGT_RESET) 423 if (flag == FCP_TMF_TGT_RESET)
417 memcpy(tmp_tag, "tr_", 3); 424 memcpy(tmp_tag, "tr_", 3);
@@ -419,7 +426,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
419 memcpy(tmp_tag, "lr_", 3); 426 memcpy(tmp_tag, "lr_", 3);
420 427
421 memcpy(&tmp_tag[3], tag, 4); 428 memcpy(&tmp_tag[3], tag, 4);
422 _zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req); 429 zfcp_dbf_scsi_common(tmp_tag, level, sdev, NULL, fsf_req);
423} 430}
424 431
425/** 432/**
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 1d91a32db08e..e7e6b63905e2 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -19,7 +19,6 @@
19enum zfcp_erp_act_flags { 19enum zfcp_erp_act_flags {
20 ZFCP_STATUS_ERP_TIMEDOUT = 0x10000000, 20 ZFCP_STATUS_ERP_TIMEDOUT = 0x10000000,
21 ZFCP_STATUS_ERP_CLOSE_ONLY = 0x01000000, 21 ZFCP_STATUS_ERP_CLOSE_ONLY = 0x01000000,
22 ZFCP_STATUS_ERP_DISMISSING = 0x00100000,
23 ZFCP_STATUS_ERP_DISMISSED = 0x00200000, 22 ZFCP_STATUS_ERP_DISMISSED = 0x00200000,
24 ZFCP_STATUS_ERP_LOWMEM = 0x00400000, 23 ZFCP_STATUS_ERP_LOWMEM = 0x00400000,
25 ZFCP_STATUS_ERP_NO_REF = 0x00800000, 24 ZFCP_STATUS_ERP_NO_REF = 0x00800000,
@@ -27,7 +26,6 @@ enum zfcp_erp_act_flags {
27 26
28enum zfcp_erp_steps { 27enum zfcp_erp_steps {
29 ZFCP_ERP_STEP_UNINITIALIZED = 0x0000, 28 ZFCP_ERP_STEP_UNINITIALIZED = 0x0000,
30 ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001,
31 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, 29 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
32 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, 30 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
33 ZFCP_ERP_STEP_PORT_OPENING = 0x0800, 31 ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
@@ -35,16 +33,28 @@ enum zfcp_erp_steps {
35 ZFCP_ERP_STEP_LUN_OPENING = 0x2000, 33 ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
36}; 34};
37 35
36/**
37 * enum zfcp_erp_act_type - Type of ERP action object.
38 * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
39 * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
40 * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
41 * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
42 * @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with
43 * either of the first four enum values.
44 * Used to indicate that an ERP action could not be
45 * set up despite a detected need for some recovery.
46 * @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with
47 * either of the first four enum values.
48 * Used to indicate that ERP not needed because
49 * the object has ZFCP_STATUS_COMMON_ERP_FAILED.
50 */
38enum zfcp_erp_act_type { 51enum zfcp_erp_act_type {
39 ZFCP_ERP_ACTION_REOPEN_LUN = 1, 52 ZFCP_ERP_ACTION_REOPEN_LUN = 1,
40 ZFCP_ERP_ACTION_REOPEN_PORT = 2, 53 ZFCP_ERP_ACTION_REOPEN_PORT = 2,
41 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3, 54 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
42 ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4, 55 ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
43}; 56 ZFCP_ERP_ACTION_NONE = 0xc0,
44 57 ZFCP_ERP_ACTION_FAILED = 0xe0,
45enum zfcp_erp_act_state {
46 ZFCP_ERP_ACTION_RUNNING = 1,
47 ZFCP_ERP_ACTION_READY = 2,
48}; 58};
49 59
50enum zfcp_erp_act_result { 60enum zfcp_erp_act_result {
@@ -62,14 +72,14 @@ static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
62 ZFCP_STATUS_COMMON_UNBLOCKED | mask); 72 ZFCP_STATUS_COMMON_UNBLOCKED | mask);
63} 73}
64 74
65static int zfcp_erp_action_exists(struct zfcp_erp_action *act) 75static bool zfcp_erp_action_is_running(struct zfcp_erp_action *act)
66{ 76{
67 struct zfcp_erp_action *curr_act; 77 struct zfcp_erp_action *curr_act;
68 78
69 list_for_each_entry(curr_act, &act->adapter->erp_running_head, list) 79 list_for_each_entry(curr_act, &act->adapter->erp_running_head, list)
70 if (act == curr_act) 80 if (act == curr_act)
71 return ZFCP_ERP_ACTION_RUNNING; 81 return true;
72 return 0; 82 return false;
73} 83}
74 84
75static void zfcp_erp_action_ready(struct zfcp_erp_action *act) 85static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
@@ -85,7 +95,7 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act)
85static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) 95static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
86{ 96{
87 act->status |= ZFCP_STATUS_ERP_DISMISSED; 97 act->status |= ZFCP_STATUS_ERP_DISMISSED;
88 if (zfcp_erp_action_exists(act) == ZFCP_ERP_ACTION_RUNNING) 98 if (zfcp_erp_action_is_running(act))
89 zfcp_erp_action_ready(act); 99 zfcp_erp_action_ready(act);
90} 100}
91 101
@@ -126,6 +136,49 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
126 } 136 }
127} 137}
128 138
139static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
140 struct zfcp_port *port,
141 struct scsi_device *sdev)
142{
143 int need = want;
144 struct zfcp_scsi_dev *zsdev;
145
146 switch (want) {
147 case ZFCP_ERP_ACTION_REOPEN_LUN:
148 zsdev = sdev_to_zfcp(sdev);
149 if (atomic_read(&zsdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
150 need = 0;
151 break;
152 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
153 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
154 need = 0;
155 break;
156 case ZFCP_ERP_ACTION_REOPEN_PORT:
157 if (atomic_read(&port->status) &
158 ZFCP_STATUS_COMMON_ERP_FAILED) {
159 need = 0;
160 /* ensure propagation of failed status to new devices */
161 zfcp_erp_set_port_status(
162 port, ZFCP_STATUS_COMMON_ERP_FAILED);
163 }
164 break;
165 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
166 if (atomic_read(&adapter->status) &
167 ZFCP_STATUS_COMMON_ERP_FAILED) {
168 need = 0;
169 /* ensure propagation of failed status to new devices */
170 zfcp_erp_set_adapter_status(
171 adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
172 }
173 break;
174 default:
175 need = 0;
176 break;
177 }
178
179 return need;
180}
181
129static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, 182static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
130 struct zfcp_port *port, 183 struct zfcp_port *port,
131 struct scsi_device *sdev) 184 struct scsi_device *sdev)
@@ -241,48 +294,70 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
241 return erp_action; 294 return erp_action;
242} 295}
243 296
244static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, 297static void zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
245 struct zfcp_port *port, 298 struct zfcp_port *port,
246 struct scsi_device *sdev, 299 struct scsi_device *sdev,
247 char *id, u32 act_status) 300 char *id, u32 act_status)
248{ 301{
249 int retval = 1, need; 302 int need;
250 struct zfcp_erp_action *act; 303 struct zfcp_erp_action *act;
251 304
252 if (!adapter->erp_thread) 305 need = zfcp_erp_handle_failed(want, adapter, port, sdev);
253 return -EIO; 306 if (!need) {
307 need = ZFCP_ERP_ACTION_FAILED; /* marker for trace */
308 goto out;
309 }
310
311 if (!adapter->erp_thread) {
312 need = ZFCP_ERP_ACTION_NONE; /* marker for trace */
313 goto out;
314 }
254 315
255 need = zfcp_erp_required_act(want, adapter, port, sdev); 316 need = zfcp_erp_required_act(want, adapter, port, sdev);
256 if (!need) 317 if (!need)
257 goto out; 318 goto out;
258 319
259 act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev); 320 act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
260 if (!act) 321 if (!act) {
322 need |= ZFCP_ERP_ACTION_NONE; /* marker for trace */
261 goto out; 323 goto out;
324 }
262 atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); 325 atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
263 ++adapter->erp_total_count; 326 ++adapter->erp_total_count;
264 list_add_tail(&act->list, &adapter->erp_ready_head); 327 list_add_tail(&act->list, &adapter->erp_ready_head);
265 wake_up(&adapter->erp_ready_wq); 328 wake_up(&adapter->erp_ready_wq);
266 retval = 0;
267 out: 329 out:
268 zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need); 330 zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
269 return retval;
270} 331}
271 332
272static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, 333void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
334 u64 port_name, u32 port_id)
335{
336 unsigned long flags;
337 static /* don't waste stack */ struct zfcp_port tmpport;
338
339 write_lock_irqsave(&adapter->erp_lock, flags);
340 /* Stand-in zfcp port with fields just good enough for
341 * zfcp_dbf_rec_trig() and zfcp_dbf_set_common().
342 * Under lock because tmpport is static.
343 */
344 atomic_set(&tmpport.status, -1); /* unknown */
345 tmpport.wwpn = port_name;
346 tmpport.d_id = port_id;
347 zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL,
348 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
349 ZFCP_ERP_ACTION_NONE);
350 write_unlock_irqrestore(&adapter->erp_lock, flags);
351}
352
353static void _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
273 int clear_mask, char *id) 354 int clear_mask, char *id)
274{ 355{
275 zfcp_erp_adapter_block(adapter, clear_mask); 356 zfcp_erp_adapter_block(adapter, clear_mask);
276 zfcp_scsi_schedule_rports_block(adapter); 357 zfcp_scsi_schedule_rports_block(adapter);
277 358
278 /* ensure propagation of failed status to new devices */ 359 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
279 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { 360 adapter, NULL, NULL, id, 0);
280 zfcp_erp_set_adapter_status(adapter,
281 ZFCP_STATUS_COMMON_ERP_FAILED);
282 return -EIO;
283 }
284 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
285 adapter, NULL, NULL, id, 0);
286} 361}
287 362
288/** 363/**
@@ -299,12 +374,8 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
299 zfcp_scsi_schedule_rports_block(adapter); 374 zfcp_scsi_schedule_rports_block(adapter);
300 375
301 write_lock_irqsave(&adapter->erp_lock, flags); 376 write_lock_irqsave(&adapter->erp_lock, flags);
302 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) 377 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
303 zfcp_erp_set_adapter_status(adapter, 378 NULL, NULL, id, 0);
304 ZFCP_STATUS_COMMON_ERP_FAILED);
305 else
306 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
307 NULL, NULL, id, 0);
308 write_unlock_irqrestore(&adapter->erp_lock, flags); 379 write_unlock_irqrestore(&adapter->erp_lock, flags);
309} 380}
310 381
@@ -345,9 +416,6 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
345 zfcp_erp_port_block(port, clear); 416 zfcp_erp_port_block(port, clear);
346 zfcp_scsi_schedule_rport_block(port); 417 zfcp_scsi_schedule_rport_block(port);
347 418
348 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
349 return;
350
351 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, 419 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
352 port->adapter, port, NULL, id, 0); 420 port->adapter, port, NULL, id, 0);
353} 421}
@@ -368,19 +436,13 @@ void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id)
368 write_unlock_irqrestore(&adapter->erp_lock, flags); 436 write_unlock_irqrestore(&adapter->erp_lock, flags);
369} 437}
370 438
371static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id) 439static void _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
372{ 440{
373 zfcp_erp_port_block(port, clear); 441 zfcp_erp_port_block(port, clear);
374 zfcp_scsi_schedule_rport_block(port); 442 zfcp_scsi_schedule_rport_block(port);
375 443
376 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { 444 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
377 /* ensure propagation of failed status to new devices */ 445 port->adapter, port, NULL, id, 0);
378 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
379 return -EIO;
380 }
381
382 return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
383 port->adapter, port, NULL, id, 0);
384} 446}
385 447
386/** 448/**
@@ -388,20 +450,15 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
388 * @port: port to recover 450 * @port: port to recover
389 * @clear_mask: flags in port status to be cleared 451 * @clear_mask: flags in port status to be cleared
390 * @id: Id for debug trace event. 452 * @id: Id for debug trace event.
391 *
392 * Returns 0 if recovery has been triggered, < 0 if not.
393 */ 453 */
394int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id) 454void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
395{ 455{
396 int retval;
397 unsigned long flags; 456 unsigned long flags;
398 struct zfcp_adapter *adapter = port->adapter; 457 struct zfcp_adapter *adapter = port->adapter;
399 458
400 write_lock_irqsave(&adapter->erp_lock, flags); 459 write_lock_irqsave(&adapter->erp_lock, flags);
401 retval = _zfcp_erp_port_reopen(port, clear, id); 460 _zfcp_erp_port_reopen(port, clear, id);
402 write_unlock_irqrestore(&adapter->erp_lock, flags); 461 write_unlock_irqrestore(&adapter->erp_lock, flags);
403
404 return retval;
405} 462}
406 463
407static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask) 464static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
@@ -418,9 +475,6 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
418 475
419 zfcp_erp_lun_block(sdev, clear); 476 zfcp_erp_lun_block(sdev, clear);
420 477
421 if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
422 return;
423
424 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter, 478 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
425 zfcp_sdev->port, sdev, id, act_status); 479 zfcp_sdev->port, sdev, id, act_status);
426} 480}
@@ -482,21 +536,23 @@ void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
482 zfcp_erp_wait(adapter); 536 zfcp_erp_wait(adapter);
483} 537}
484 538
485static int status_change_set(unsigned long mask, atomic_t *status) 539static int zfcp_erp_status_change_set(unsigned long mask, atomic_t *status)
486{ 540{
487 return (atomic_read(status) ^ mask) & mask; 541 return (atomic_read(status) ^ mask) & mask;
488} 542}
489 543
490static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) 544static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
491{ 545{
492 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) 546 if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED,
547 &adapter->status))
493 zfcp_dbf_rec_run("eraubl1", &adapter->erp_action); 548 zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
494 atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); 549 atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
495} 550}
496 551
497static void zfcp_erp_port_unblock(struct zfcp_port *port) 552static void zfcp_erp_port_unblock(struct zfcp_port *port)
498{ 553{
499 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) 554 if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED,
555 &port->status))
500 zfcp_dbf_rec_run("erpubl1", &port->erp_action); 556 zfcp_dbf_rec_run("erpubl1", &port->erp_action);
501 atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); 557 atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
502} 558}
@@ -505,7 +561,8 @@ static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
505{ 561{
506 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 562 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
507 563
508 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status)) 564 if (zfcp_erp_status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED,
565 &zfcp_sdev->status))
509 zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action); 566 zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
510 atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status); 567 atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
511} 568}
@@ -553,7 +610,7 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
553 unsigned long flags; 610 unsigned long flags;
554 611
555 write_lock_irqsave(&adapter->erp_lock, flags); 612 write_lock_irqsave(&adapter->erp_lock, flags);
556 if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { 613 if (zfcp_erp_action_is_running(erp_action)) {
557 erp_action->status |= set_mask; 614 erp_action->status |= set_mask;
558 zfcp_erp_action_ready(erp_action); 615 zfcp_erp_action_ready(erp_action);
559 } 616 }
@@ -1634,3 +1691,14 @@ void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
1634 atomic_set(&zfcp_sdev->erp_counter, 0); 1691 atomic_set(&zfcp_sdev->erp_counter, 0);
1635} 1692}
1636 1693
1694/**
1695 * zfcp_erp_adapter_reset_sync() - Really reopen adapter and wait.
1696 * @adapter: Pointer to zfcp_adapter to reopen.
1697 * @id: Trace tag string of length %ZFCP_DBF_TAG_LEN.
1698 */
1699void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id)
1700{
1701 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
1702 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, id);
1703 zfcp_erp_wait(adapter);
1704}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index e5eed8aac0ce..bd0c5a9f04cb 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -50,17 +50,23 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
50extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32); 50extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
51extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); 51extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
52extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); 52extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
53extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *, 53extern void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev,
54 struct zfcp_fsf_req *); 54 struct scsi_cmnd *sc,
55 struct zfcp_fsf_req *fsf);
56extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
57 unsigned int scsi_id, int ret);
55 58
56/* zfcp_erp.c */ 59/* zfcp_erp.c */
57extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); 60extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
58extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32); 61extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
62extern void zfcp_erp_port_forced_no_port_dbf(char *id,
63 struct zfcp_adapter *adapter,
64 u64 port_name, u32 port_id);
59extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *); 65extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
60extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *); 66extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
61extern void zfcp_erp_set_port_status(struct zfcp_port *, u32); 67extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
62extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32); 68extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
63extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *); 69extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id);
64extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *); 70extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
65extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *); 71extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
66extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); 72extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
@@ -73,6 +79,7 @@ extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
73extern void zfcp_erp_wait(struct zfcp_adapter *); 79extern void zfcp_erp_wait(struct zfcp_adapter *);
74extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long); 80extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
75extern void zfcp_erp_timeout_handler(struct timer_list *t); 81extern void zfcp_erp_timeout_handler(struct timer_list *t);
82extern void zfcp_erp_adapter_reset_sync(struct zfcp_adapter *adapter, char *id);
76 83
77/* zfcp_fc.c */ 84/* zfcp_fc.c */
78extern struct kmem_cache *zfcp_fc_req_cache; 85extern struct kmem_cache *zfcp_fc_req_cache;
@@ -120,7 +127,8 @@ extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
120 struct zfcp_fsf_ct_els *, unsigned int); 127 struct zfcp_fsf_ct_els *, unsigned int);
121extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *); 128extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
122extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); 129extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
123extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *, u8); 130extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
131 u8 tm_flags);
124extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *); 132extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
125extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int); 133extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
126 134
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 6162cf57a20a..f6c415d6ef48 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -111,11 +111,10 @@ void zfcp_fc_post_event(struct work_struct *work)
111 111
112 list_for_each_entry_safe(event, tmp, &tmp_lh, list) { 112 list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
113 fc_host_post_event(adapter->scsi_host, fc_get_event_number(), 113 fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
114 event->code, event->data); 114 event->code, event->data);
115 list_del(&event->list); 115 list_del(&event->list);
116 kfree(event); 116 kfree(event);
117 } 117 }
118
119} 118}
120 119
121/** 120/**
@@ -126,7 +125,7 @@ void zfcp_fc_post_event(struct work_struct *work)
126 * @event_data: The event data (e.g. n_port page in case of els) 125 * @event_data: The event data (e.g. n_port page in case of els)
127 */ 126 */
128void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter, 127void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
129 enum fc_host_event_code event_code, u32 event_data) 128 enum fc_host_event_code event_code, u32 event_data)
130{ 129{
131 struct zfcp_fc_event *event; 130 struct zfcp_fc_event *event;
132 131
@@ -425,6 +424,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
425 struct zfcp_port *port = container_of(work, struct zfcp_port, 424 struct zfcp_port *port = container_of(work, struct zfcp_port,
426 gid_pn_work); 425 gid_pn_work);
427 426
427 set_worker_desc("zgidpn%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
428 ret = zfcp_fc_ns_gid_pn(port); 428 ret = zfcp_fc_ns_gid_pn(port);
429 if (ret) { 429 if (ret) {
430 /* could not issue gid_pn for some reason */ 430 /* could not issue gid_pn for some reason */
@@ -559,6 +559,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
559 container_of(work, struct zfcp_port, test_link_work); 559 container_of(work, struct zfcp_port, test_link_work);
560 int retval; 560 int retval;
561 561
562 set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
562 get_device(&port->dev); 563 get_device(&port->dev);
563 port->rport_task = RPORT_DEL; 564 port->rport_task = RPORT_DEL;
564 zfcp_scsi_rport_work(&port->rport_work); 565 zfcp_scsi_rport_work(&port->rport_work);
@@ -596,7 +597,7 @@ void zfcp_fc_test_link(struct zfcp_port *port)
596 put_device(&port->dev); 597 put_device(&port->dev);
597} 598}
598 599
599static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num) 600static struct zfcp_fc_req *zfcp_fc_alloc_sg_env(int buf_num)
600{ 601{
601 struct zfcp_fc_req *fc_req; 602 struct zfcp_fc_req *fc_req;
602 603
@@ -748,7 +749,7 @@ void zfcp_fc_scan_ports(struct work_struct *work)
748 if (zfcp_fc_wka_port_get(&adapter->gs->ds)) 749 if (zfcp_fc_wka_port_get(&adapter->gs->ds))
749 return; 750 return;
750 751
751 fc_req = zfcp_alloc_sg_env(buf_num); 752 fc_req = zfcp_fc_alloc_sg_env(buf_num);
752 if (!fc_req) 753 if (!fc_req)
753 goto out; 754 goto out;
754 755
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 6a397ddaadf0..3cd74729cfb9 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -207,21 +207,14 @@ struct zfcp_fc_wka_ports {
207 * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd 207 * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
208 * @fcp: fcp_cmnd to setup 208 * @fcp: fcp_cmnd to setup
209 * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB 209 * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
210 * @tm: task management flags to setup task management command
211 */ 210 */
212static inline 211static inline
213void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi, 212void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
214 u8 tm_flags)
215{ 213{
216 u32 datalen; 214 u32 datalen;
217 215
218 int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun); 216 int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
219 217
220 if (unlikely(tm_flags)) {
221 fcp->fc_tm_flags = tm_flags;
222 return;
223 }
224
225 fcp->fc_pri_ta = FCP_PTA_SIMPLE; 218 fcp->fc_pri_ta = FCP_PTA_SIMPLE;
226 219
227 if (scsi->sc_data_direction == DMA_FROM_DEVICE) 220 if (scsi->sc_data_direction == DMA_FROM_DEVICE)
@@ -241,6 +234,19 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
241} 234}
242 235
243/** 236/**
237 * zfcp_fc_fcp_tm() - Setup FCP command as task management command.
238 * @fcp: Pointer to FCP_CMND IU to set up.
239 * @dev: Pointer to SCSI_device where to send the task management command.
240 * @tm_flags: Task management flags to setup tm command.
241 */
242static inline
243void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags)
244{
245 int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun);
246 fcp->fc_tm_flags = tm_flags;
247}
248
249/**
244 * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly 250 * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
245 * @fcp_rsp: FCP RSP IU to evaluate 251 * @fcp_rsp: FCP RSP IU to evaluate
246 * @scsi: SCSI command where to update status and sense buffer 252 * @scsi: SCSI command where to update status and sense buffer
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index b12cb81ad8a2..3c86e27f094d 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -4,7 +4,7 @@
4 * 4 *
5 * Implementation of FSF commands. 5 * Implementation of FSF commands.
6 * 6 *
7 * Copyright IBM Corp. 2002, 2017 7 * Copyright IBM Corp. 2002, 2018
8 */ 8 */
9 9
10#define KMSG_COMPONENT "zfcp" 10#define KMSG_COMPONENT "zfcp"
@@ -437,6 +437,9 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
437#define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3) 437#define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
438#define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4) 438#define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
439#define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5) 439#define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
440#define ZFCP_FSF_PORTSPEED_32GBIT (1 << 6)
441#define ZFCP_FSF_PORTSPEED_64GBIT (1 << 7)
442#define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8)
440#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15) 443#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
441 444
442static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed) 445static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
@@ -454,6 +457,12 @@ static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
454 fdmi_speed |= FC_PORTSPEED_8GBIT; 457 fdmi_speed |= FC_PORTSPEED_8GBIT;
455 if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT) 458 if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
456 fdmi_speed |= FC_PORTSPEED_16GBIT; 459 fdmi_speed |= FC_PORTSPEED_16GBIT;
460 if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT)
461 fdmi_speed |= FC_PORTSPEED_32GBIT;
462 if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT)
463 fdmi_speed |= FC_PORTSPEED_64GBIT;
464 if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT)
465 fdmi_speed |= FC_PORTSPEED_128GBIT;
457 if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED) 466 if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
458 fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED; 467 fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
459 return fdmi_speed; 468 return fdmi_speed;
@@ -662,7 +671,7 @@ static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
662 return req; 671 return req;
663} 672}
664 673
665static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) 674static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool)
666{ 675{
667 struct fsf_qtcb *qtcb; 676 struct fsf_qtcb *qtcb;
668 677
@@ -701,9 +710,10 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
701 710
702 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) { 711 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
703 if (likely(pool)) 712 if (likely(pool))
704 req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool); 713 req->qtcb = zfcp_fsf_qtcb_alloc(
714 adapter->pool.qtcb_pool);
705 else 715 else
706 req->qtcb = zfcp_qtcb_alloc(NULL); 716 req->qtcb = zfcp_fsf_qtcb_alloc(NULL);
707 717
708 if (unlikely(!req->qtcb)) { 718 if (unlikely(!req->qtcb)) {
709 zfcp_fsf_req_free(req); 719 zfcp_fsf_req_free(req);
@@ -2036,10 +2046,14 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2036 sizeof(blktrc)); 2046 sizeof(blktrc));
2037} 2047}
2038 2048
2039static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) 2049/**
2050 * zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF.
2051 * @req: Pointer to FSF request.
2052 * @sdev: Pointer to SCSI device as request context.
2053 */
2054static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
2055 struct scsi_device *sdev)
2040{ 2056{
2041 struct scsi_cmnd *scmnd = req->data;
2042 struct scsi_device *sdev = scmnd->device;
2043 struct zfcp_scsi_dev *zfcp_sdev; 2057 struct zfcp_scsi_dev *zfcp_sdev;
2044 struct fsf_qtcb_header *header = &req->qtcb->header; 2058 struct fsf_qtcb_header *header = &req->qtcb->header;
2045 2059
@@ -2051,7 +2065,7 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
2051 switch (header->fsf_status) { 2065 switch (header->fsf_status) {
2052 case FSF_HANDLE_MISMATCH: 2066 case FSF_HANDLE_MISMATCH:
2053 case FSF_PORT_HANDLE_NOT_VALID: 2067 case FSF_PORT_HANDLE_NOT_VALID:
2054 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1"); 2068 zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1");
2055 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2069 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2056 break; 2070 break;
2057 case FSF_FCPLUN_NOT_VALID: 2071 case FSF_FCPLUN_NOT_VALID:
@@ -2069,8 +2083,7 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
2069 req->qtcb->bottom.io.data_direction, 2083 req->qtcb->bottom.io.data_direction,
2070 (unsigned long long)zfcp_scsi_dev_lun(sdev), 2084 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2071 (unsigned long long)zfcp_sdev->port->wwpn); 2085 (unsigned long long)zfcp_sdev->port->wwpn);
2072 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, 2086 zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3");
2073 "fssfch3");
2074 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2087 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2075 break; 2088 break;
2076 case FSF_CMND_LENGTH_NOT_VALID: 2089 case FSF_CMND_LENGTH_NOT_VALID:
@@ -2080,8 +2093,7 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
2080 req->qtcb->bottom.io.fcp_cmnd_length, 2093 req->qtcb->bottom.io.fcp_cmnd_length,
2081 (unsigned long long)zfcp_scsi_dev_lun(sdev), 2094 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2082 (unsigned long long)zfcp_sdev->port->wwpn); 2095 (unsigned long long)zfcp_sdev->port->wwpn);
2083 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, 2096 zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
2084 "fssfch4");
2085 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2097 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2086 break; 2098 break;
2087 case FSF_PORT_BOXED: 2099 case FSF_PORT_BOXED:
@@ -2120,7 +2132,7 @@ static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2120 return; 2132 return;
2121 } 2133 }
2122 2134
2123 zfcp_fsf_fcp_handler_common(req); 2135 zfcp_fsf_fcp_handler_common(req, scpnt->device);
2124 2136
2125 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2137 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2126 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); 2138 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
@@ -2258,7 +2270,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2258 2270
2259 BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE); 2271 BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE);
2260 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; 2272 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2261 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0); 2273 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2262 2274
2263 if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) && 2275 if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
2264 scsi_prot_sg_count(scsi_cmnd)) { 2276 scsi_prot_sg_count(scsi_cmnd)) {
@@ -2297,10 +2309,11 @@ out:
2297 2309
2298static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req) 2310static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2299{ 2311{
2312 struct scsi_device *sdev = req->data;
2300 struct fcp_resp_with_ext *fcp_rsp; 2313 struct fcp_resp_with_ext *fcp_rsp;
2301 struct fcp_resp_rsp_info *rsp_info; 2314 struct fcp_resp_rsp_info *rsp_info;
2302 2315
2303 zfcp_fsf_fcp_handler_common(req); 2316 zfcp_fsf_fcp_handler_common(req, sdev);
2304 2317
2305 fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu; 2318 fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
2306 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; 2319 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
@@ -2311,17 +2324,18 @@ static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2311} 2324}
2312 2325
2313/** 2326/**
2314 * zfcp_fsf_fcp_task_mgmt - send SCSI task management command 2327 * zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF).
2315 * @scmnd: SCSI command to send the task management command for 2328 * @sdev: Pointer to SCSI device to send the task management command to.
2316 * @tm_flags: unsigned byte for task management flags 2329 * @tm_flags: Unsigned byte for task management flags.
2317 * Returns: on success pointer to struct fsf_req, NULL otherwise 2330 *
2331 * Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise.
2318 */ 2332 */
2319struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd, 2333struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
2320 u8 tm_flags) 2334 u8 tm_flags)
2321{ 2335{
2322 struct zfcp_fsf_req *req = NULL; 2336 struct zfcp_fsf_req *req = NULL;
2323 struct fcp_cmnd *fcp_cmnd; 2337 struct fcp_cmnd *fcp_cmnd;
2324 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device); 2338 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2325 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; 2339 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2326 2340
2327 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 2341 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
@@ -2341,7 +2355,8 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2341 goto out; 2355 goto out;
2342 } 2356 }
2343 2357
2344 req->data = scmnd; 2358 req->data = sdev;
2359
2345 req->handler = zfcp_fsf_fcp_task_mgmt_handler; 2360 req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2346 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2361 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2347 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 2362 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
@@ -2352,7 +2367,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2352 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2367 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2353 2368
2354 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; 2369 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2355 zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags); 2370 zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
2356 2371
2357 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); 2372 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2358 if (!zfcp_fsf_req_send(req)) 2373 if (!zfcp_fsf_req_send(req))
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 4baca67aba6d..535628b92f0a 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -4,7 +4,7 @@
4 * 4 *
5 * Interface to the FSF support functions. 5 * Interface to the FSF support functions.
6 * 6 *
7 * Copyright IBM Corp. 2002, 2017 7 * Copyright IBM Corp. 2002, 2018
8 */ 8 */
9 9
10#ifndef FSF_H 10#ifndef FSF_H
@@ -356,7 +356,7 @@ struct fsf_qtcb_bottom_config {
356 u32 adapter_features; 356 u32 adapter_features;
357 u32 connection_features; 357 u32 connection_features;
358 u32 fc_topology; 358 u32 fc_topology;
359 u32 fc_link_speed; 359 u32 fc_link_speed; /* one of ZFCP_FSF_PORTSPEED_* */
360 u32 adapter_type; 360 u32 adapter_type;
361 u8 res0; 361 u8 res0;
362 u8 peer_d_id[3]; 362 u8 peer_d_id[3];
@@ -382,7 +382,7 @@ struct fsf_qtcb_bottom_port {
382 u32 class_of_service; /* should be 0x00000006 for class 2 and 3 */ 382 u32 class_of_service; /* should be 0x00000006 for class 2 and 3 */
383 u8 supported_fc4_types[32]; /* should be 0x00000100 for scsi fcp */ 383 u8 supported_fc4_types[32]; /* should be 0x00000100 for scsi fcp */
384 u8 active_fc4_types[32]; 384 u8 active_fc4_types[32];
385 u32 supported_speed; /* 0x0001 for 1 GBit/s or 0x0002 for 2 GBit/s */ 385 u32 supported_speed; /* any combination of ZFCP_FSF_PORTSPEED_* */
386 u32 maximum_frame_size; /* fixed value of 2112 */ 386 u32 maximum_frame_size; /* fixed value of 2112 */
387 u64 seconds_since_last_reset; 387 u64 seconds_since_last_reset;
388 u64 tx_frames; 388 u64 tx_frames;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 22f9562f415c..a8efcb330bc1 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -181,6 +181,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
181 if (abrt_req) 181 if (abrt_req)
182 break; 182 break;
183 183
184 zfcp_dbf_scsi_abort("abrt_wt", scpnt, NULL);
184 zfcp_erp_wait(adapter); 185 zfcp_erp_wait(adapter);
185 ret = fc_block_scsi_eh(scpnt); 186 ret = fc_block_scsi_eh(scpnt);
186 if (ret) { 187 if (ret) {
@@ -264,44 +265,52 @@ static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
264 write_unlock_irqrestore(&adapter->abort_lock, flags); 265 write_unlock_irqrestore(&adapter->abort_lock, flags);
265} 266}
266 267
267static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) 268/**
269 * zfcp_scsi_task_mgmt_function() - Send a task management function (sync).
270 * @sdev: Pointer to SCSI device to send the task management command to.
271 * @tm_flags: Task management flags,
272 * here we only handle %FCP_TMF_TGT_RESET or %FCP_TMF_LUN_RESET.
273 */
274static int zfcp_scsi_task_mgmt_function(struct scsi_device *sdev, u8 tm_flags)
268{ 275{
269 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); 276 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
270 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 277 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
278 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
271 struct zfcp_fsf_req *fsf_req = NULL; 279 struct zfcp_fsf_req *fsf_req = NULL;
272 int retval = SUCCESS, ret; 280 int retval = SUCCESS, ret;
273 int retry = 3; 281 int retry = 3;
274 282
275 while (retry--) { 283 while (retry--) {
276 fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags); 284 fsf_req = zfcp_fsf_fcp_task_mgmt(sdev, tm_flags);
277 if (fsf_req) 285 if (fsf_req)
278 break; 286 break;
279 287
288 zfcp_dbf_scsi_devreset("wait", sdev, tm_flags, NULL);
280 zfcp_erp_wait(adapter); 289 zfcp_erp_wait(adapter);
281 ret = fc_block_scsi_eh(scpnt); 290 ret = fc_block_rport(rport);
282 if (ret) { 291 if (ret) {
283 zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL); 292 zfcp_dbf_scsi_devreset("fiof", sdev, tm_flags, NULL);
284 return ret; 293 return ret;
285 } 294 }
286 295
287 if (!(atomic_read(&adapter->status) & 296 if (!(atomic_read(&adapter->status) &
288 ZFCP_STATUS_COMMON_RUNNING)) { 297 ZFCP_STATUS_COMMON_RUNNING)) {
289 zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL); 298 zfcp_dbf_scsi_devreset("nres", sdev, tm_flags, NULL);
290 return SUCCESS; 299 return SUCCESS;
291 } 300 }
292 } 301 }
293 if (!fsf_req) { 302 if (!fsf_req) {
294 zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL); 303 zfcp_dbf_scsi_devreset("reqf", sdev, tm_flags, NULL);
295 return FAILED; 304 return FAILED;
296 } 305 }
297 306
298 wait_for_completion(&fsf_req->completion); 307 wait_for_completion(&fsf_req->completion);
299 308
300 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { 309 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
301 zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req); 310 zfcp_dbf_scsi_devreset("fail", sdev, tm_flags, fsf_req);
302 retval = FAILED; 311 retval = FAILED;
303 } else { 312 } else {
304 zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req); 313 zfcp_dbf_scsi_devreset("okay", sdev, tm_flags, fsf_req);
305 zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags); 314 zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
306 } 315 }
307 316
@@ -311,27 +320,81 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
311 320
312static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) 321static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
313{ 322{
314 return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET); 323 struct scsi_device *sdev = scpnt->device;
324
325 return zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_LUN_RESET);
315} 326}
316 327
317static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) 328static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
318{ 329{
319 return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET); 330 struct scsi_target *starget = scsi_target(scpnt->device);
331 struct fc_rport *rport = starget_to_rport(starget);
332 struct Scsi_Host *shost = rport_to_shost(rport);
333 struct scsi_device *sdev = NULL, *tmp_sdev;
334 struct zfcp_adapter *adapter =
335 (struct zfcp_adapter *)shost->hostdata[0];
336 int ret;
337
338 shost_for_each_device(tmp_sdev, shost) {
339 if (tmp_sdev->id == starget->id) {
340 sdev = tmp_sdev;
341 break;
342 }
343 }
344 if (!sdev) {
345 ret = FAILED;
346 zfcp_dbf_scsi_eh("tr_nosd", adapter, starget->id, ret);
347 return ret;
348 }
349
350 ret = zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_TGT_RESET);
351
352 /* release reference from above shost_for_each_device */
353 if (sdev)
354 scsi_device_put(tmp_sdev);
355
356 return ret;
320} 357}
321 358
322static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 359static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
323{ 360{
324 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); 361 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
325 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 362 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
326 int ret; 363 int ret = SUCCESS, fc_ret;
327 364
328 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); 365 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
329 zfcp_erp_wait(adapter); 366 zfcp_erp_wait(adapter);
330 ret = fc_block_scsi_eh(scpnt); 367 fc_ret = fc_block_scsi_eh(scpnt);
331 if (ret) 368 if (fc_ret)
369 ret = fc_ret;
370
371 zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret);
372 return ret;
373}
374
375/**
376 * zfcp_scsi_sysfs_host_reset() - Support scsi_host sysfs attribute host_reset.
377 * @shost: Pointer to Scsi_Host to perform action on.
378 * @reset_type: We support %SCSI_ADAPTER_RESET but not %SCSI_FIRMWARE_RESET.
379 *
380 * Return: 0 on %SCSI_ADAPTER_RESET, -%EOPNOTSUPP otherwise.
381 *
382 * This is similar to zfcp_sysfs_adapter_failed_store().
383 */
384static int zfcp_scsi_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
385{
386 struct zfcp_adapter *adapter =
387 (struct zfcp_adapter *)shost->hostdata[0];
388 int ret = 0;
389
390 if (reset_type != SCSI_ADAPTER_RESET) {
391 ret = -EOPNOTSUPP;
392 zfcp_dbf_scsi_eh("scshr_n", adapter, ~0, ret);
332 return ret; 393 return ret;
394 }
333 395
334 return SUCCESS; 396 zfcp_erp_adapter_reset_sync(adapter, "scshr_y");
397 return ret;
335} 398}
336 399
337struct scsi_transport_template *zfcp_scsi_transport_template; 400struct scsi_transport_template *zfcp_scsi_transport_template;
@@ -349,6 +412,7 @@ static struct scsi_host_template zfcp_scsi_host_template = {
349 .slave_configure = zfcp_scsi_slave_configure, 412 .slave_configure = zfcp_scsi_slave_configure,
350 .slave_destroy = zfcp_scsi_slave_destroy, 413 .slave_destroy = zfcp_scsi_slave_destroy,
351 .change_queue_depth = scsi_change_queue_depth, 414 .change_queue_depth = scsi_change_queue_depth,
415 .host_reset = zfcp_scsi_sysfs_host_reset,
352 .proc_name = "zfcp", 416 .proc_name = "zfcp",
353 .can_queue = 4096, 417 .can_queue = 4096,
354 .this_id = -1, 418 .this_id = -1,
@@ -363,6 +427,7 @@ static struct scsi_host_template zfcp_scsi_host_template = {
363 .shost_attrs = zfcp_sysfs_shost_attrs, 427 .shost_attrs = zfcp_sysfs_shost_attrs,
364 .sdev_attrs = zfcp_sysfs_sdev_attrs, 428 .sdev_attrs = zfcp_sysfs_sdev_attrs,
365 .track_queue_depth = 1, 429 .track_queue_depth = 1,
430 .supported_mode = MODE_INITIATOR,
366}; 431};
367 432
368/** 433/**
@@ -430,7 +495,7 @@ void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
430} 495}
431 496
432static struct fc_host_statistics* 497static struct fc_host_statistics*
433zfcp_init_fc_host_stats(struct zfcp_adapter *adapter) 498zfcp_scsi_init_fc_host_stats(struct zfcp_adapter *adapter)
434{ 499{
435 struct fc_host_statistics *fc_stats; 500 struct fc_host_statistics *fc_stats;
436 501
@@ -444,9 +509,9 @@ zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
444 return adapter->fc_stats; 509 return adapter->fc_stats;
445} 510}
446 511
447static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats, 512static void zfcp_scsi_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
448 struct fsf_qtcb_bottom_port *data, 513 struct fsf_qtcb_bottom_port *data,
449 struct fsf_qtcb_bottom_port *old) 514 struct fsf_qtcb_bottom_port *old)
450{ 515{
451 fc_stats->seconds_since_last_reset = 516 fc_stats->seconds_since_last_reset =
452 data->seconds_since_last_reset - old->seconds_since_last_reset; 517 data->seconds_since_last_reset - old->seconds_since_last_reset;
@@ -477,8 +542,8 @@ static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
477 fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb; 542 fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb;
478} 543}
479 544
480static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats, 545static void zfcp_scsi_set_fc_host_stats(struct fc_host_statistics *fc_stats,
481 struct fsf_qtcb_bottom_port *data) 546 struct fsf_qtcb_bottom_port *data)
482{ 547{
483 fc_stats->seconds_since_last_reset = data->seconds_since_last_reset; 548 fc_stats->seconds_since_last_reset = data->seconds_since_last_reset;
484 fc_stats->tx_frames = data->tx_frames; 549 fc_stats->tx_frames = data->tx_frames;
@@ -502,7 +567,8 @@ static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
502 fc_stats->fcp_output_megabytes = data->output_mb; 567 fc_stats->fcp_output_megabytes = data->output_mb;
503} 568}
504 569
505static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host) 570static struct fc_host_statistics *
571zfcp_scsi_get_fc_host_stats(struct Scsi_Host *host)
506{ 572{
507 struct zfcp_adapter *adapter; 573 struct zfcp_adapter *adapter;
508 struct fc_host_statistics *fc_stats; 574 struct fc_host_statistics *fc_stats;
@@ -510,7 +576,7 @@ static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
510 int ret; 576 int ret;
511 577
512 adapter = (struct zfcp_adapter *)host->hostdata[0]; 578 adapter = (struct zfcp_adapter *)host->hostdata[0];
513 fc_stats = zfcp_init_fc_host_stats(adapter); 579 fc_stats = zfcp_scsi_init_fc_host_stats(adapter);
514 if (!fc_stats) 580 if (!fc_stats)
515 return NULL; 581 return NULL;
516 582
@@ -527,16 +593,16 @@ static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host)
527 if (adapter->stats_reset && 593 if (adapter->stats_reset &&
528 ((jiffies/HZ - adapter->stats_reset) < 594 ((jiffies/HZ - adapter->stats_reset) <
529 data->seconds_since_last_reset)) 595 data->seconds_since_last_reset))
530 zfcp_adjust_fc_host_stats(fc_stats, data, 596 zfcp_scsi_adjust_fc_host_stats(fc_stats, data,
531 adapter->stats_reset_data); 597 adapter->stats_reset_data);
532 else 598 else
533 zfcp_set_fc_host_stats(fc_stats, data); 599 zfcp_scsi_set_fc_host_stats(fc_stats, data);
534 600
535 kfree(data); 601 kfree(data);
536 return fc_stats; 602 return fc_stats;
537} 603}
538 604
539static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost) 605static void zfcp_scsi_reset_fc_host_stats(struct Scsi_Host *shost)
540{ 606{
541 struct zfcp_adapter *adapter; 607 struct zfcp_adapter *adapter;
542 struct fsf_qtcb_bottom_port *data; 608 struct fsf_qtcb_bottom_port *data;
@@ -558,7 +624,7 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
558 } 624 }
559} 625}
560 626
561static void zfcp_get_host_port_state(struct Scsi_Host *shost) 627static void zfcp_scsi_get_host_port_state(struct Scsi_Host *shost)
562{ 628{
563 struct zfcp_adapter *adapter = 629 struct zfcp_adapter *adapter =
564 (struct zfcp_adapter *)shost->hostdata[0]; 630 (struct zfcp_adapter *)shost->hostdata[0];
@@ -575,7 +641,8 @@ static void zfcp_get_host_port_state(struct Scsi_Host *shost)
575 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 641 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
576} 642}
577 643
578static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) 644static void zfcp_scsi_set_rport_dev_loss_tmo(struct fc_rport *rport,
645 u32 timeout)
579{ 646{
580 rport->dev_loss_tmo = timeout; 647 rport->dev_loss_tmo = timeout;
581} 648}
@@ -602,6 +669,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
602 if (port) { 669 if (port) {
603 zfcp_erp_port_forced_reopen(port, 0, "sctrpi1"); 670 zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
604 put_device(&port->dev); 671 put_device(&port->dev);
672 } else {
673 zfcp_erp_port_forced_no_port_dbf(
674 "sctrpin", adapter,
675 rport->port_name /* zfcp_scsi_rport_register */,
676 rport->port_id /* zfcp_scsi_rport_register */);
605 } 677 }
606} 678}
607 679
@@ -687,6 +759,9 @@ void zfcp_scsi_rport_work(struct work_struct *work)
687 struct zfcp_port *port = container_of(work, struct zfcp_port, 759 struct zfcp_port *port = container_of(work, struct zfcp_port,
688 rport_work); 760 rport_work);
689 761
762 set_worker_desc("zrp%c-%16llx",
763 (port->rport_task == RPORT_ADD) ? 'a' : 'd',
764 port->wwpn); /* < WORKER_DESC_LEN=24 */
690 while (port->rport_task) { 765 while (port->rport_task) {
691 if (port->rport_task == RPORT_ADD) { 766 if (port->rport_task == RPORT_ADD) {
692 port->rport_task = RPORT_NONE; 767 port->rport_task = RPORT_NONE;
@@ -761,10 +836,10 @@ struct fc_function_template zfcp_transport_functions = {
761 .show_host_supported_speeds = 1, 836 .show_host_supported_speeds = 1,
762 .show_host_maxframe_size = 1, 837 .show_host_maxframe_size = 1,
763 .show_host_serial_number = 1, 838 .show_host_serial_number = 1,
764 .get_fc_host_stats = zfcp_get_fc_host_stats, 839 .get_fc_host_stats = zfcp_scsi_get_fc_host_stats,
765 .reset_fc_host_stats = zfcp_reset_fc_host_stats, 840 .reset_fc_host_stats = zfcp_scsi_reset_fc_host_stats,
766 .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo, 841 .set_rport_dev_loss_tmo = zfcp_scsi_set_rport_dev_loss_tmo,
767 .get_host_port_state = zfcp_get_host_port_state, 842 .get_host_port_state = zfcp_scsi_get_host_port_state,
768 .terminate_rport_io = zfcp_scsi_terminate_rport_io, 843 .terminate_rport_io = zfcp_scsi_terminate_rport_io,
769 .show_host_port_state = 1, 844 .show_host_port_state = 1,
770 .show_host_active_fc4s = 1, 845 .show_host_active_fc4s = 1,
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 3ac823f2540f..b277be6f7611 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -200,10 +200,7 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
200 goto out; 200 goto out;
201 } 201 }
202 202
203 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); 203 zfcp_erp_adapter_reset_sync(adapter, "syafai2");
204 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
205 "syafai2");
206 zfcp_erp_wait(adapter);
207out: 204out:
208 zfcp_ccw_adapter_put(adapter); 205 zfcp_ccw_adapter_put(adapter);
209 return retval ? retval : (ssize_t) count; 206 return retval ? retval : (ssize_t) count;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index b42c9c479d4b..99ba4a770406 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -882,6 +882,11 @@ static int twa_chrdev_open(struct inode *inode, struct file *file)
882 unsigned int minor_number; 882 unsigned int minor_number;
883 int retval = TW_IOCTL_ERROR_OS_ENODEV; 883 int retval = TW_IOCTL_ERROR_OS_ENODEV;
884 884
885 if (!capable(CAP_SYS_ADMIN)) {
886 retval = -EACCES;
887 goto out;
888 }
889
885 minor_number = iminor(inode); 890 minor_number = iminor(inode);
886 if (minor_number >= twa_device_extension_count) 891 if (minor_number >= twa_device_extension_count)
887 goto out; 892 goto out;
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 33261b690774..f6179e3d6953 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1033,6 +1033,9 @@ static int tw_chrdev_open(struct inode *inode, struct file *file)
1033 1033
1034 dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n"); 1034 dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n");
1035 1035
1036 if (!capable(CAP_SYS_ADMIN))
1037 return -EACCES;
1038
1036 minor_number = iminor(inode); 1039 minor_number = iminor(inode);
1037 if (minor_number >= tw_device_extension_count) 1040 if (minor_number >= tw_device_extension_count)
1038 return -ENODEV; 1041 return -ENODEV;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 11e89e56b865..35c909bbf8ba 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1351,6 +1351,20 @@ config SCSI_ZORRO7XX
1351 accelerator card for the Amiga 1200, 1351 accelerator card for the Amiga 1200,
1352 - the SCSI controller on the GVP Turbo 040/060 accelerator. 1352 - the SCSI controller on the GVP Turbo 040/060 accelerator.
1353 1353
1354config SCSI_ZORRO_ESP
1355 tristate "Zorro ESP SCSI support"
1356 depends on ZORRO && SCSI
1357 select SCSI_SPI_ATTRS
1358 help
1359 Support for various NCR53C9x (ESP) based SCSI controllers on Zorro
1360 expansion boards for the Amiga.
1361 This includes:
1362 - the Phase5 Blizzard 1230 II and IV SCSI controllers,
1363 - the Phase5 Blizzard 2060 SCSI controller,
1364 - the Phase5 Blizzard Cyberstorm and Cyberstorm II SCSI
1365 controllers,
1366 - the Fastlane Zorro III SCSI controller.
1367
1354config ATARI_SCSI 1368config ATARI_SCSI
1355 tristate "Atari native SCSI support" 1369 tristate "Atari native SCSI support"
1356 depends on ATARI && SCSI 1370 depends on ATARI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 56c940394729..80aca2456353 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
48obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o 48obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o
49obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o 49obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
50obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o 50obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o
51obj-$(CONFIG_SCSI_ZORRO_ESP) += esp_scsi.o zorro_esp.o
51obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o 52obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o
52obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o 53obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o
53obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o 54obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o
@@ -189,7 +190,7 @@ $(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
189$(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c 190$(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c
190 191
191quiet_cmd_bflags = GEN $@ 192quiet_cmd_bflags = GEN $@
192 cmd_bflags = sed -n 's/.*BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@ 193 cmd_bflags = sed -n 's/.*define *BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@
193 194
194$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h 195$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h
195 $(call if_changed,bflags) 196 $(call if_changed,bflags)
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 8086bd0ac9fd..b2942ec3d455 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1222,19 +1222,8 @@ static struct pci_driver inia100_pci_driver = {
1222 .remove = inia100_remove_one, 1222 .remove = inia100_remove_one,
1223}; 1223};
1224 1224
1225static int __init inia100_init(void) 1225module_pci_driver(inia100_pci_driver);
1226{
1227 return pci_register_driver(&inia100_pci_driver);
1228}
1229
1230static void __exit inia100_exit(void)
1231{
1232 pci_unregister_driver(&inia100_pci_driver);
1233}
1234 1226
1235MODULE_DESCRIPTION("Initio A100U2W SCSI driver"); 1227MODULE_DESCRIPTION("Initio A100U2W SCSI driver");
1236MODULE_AUTHOR("Initio Corporation"); 1228MODULE_AUTHOR("Initio Corporation");
1237MODULE_LICENSE("Dual BSD/GPL"); 1229MODULE_LICENSE("Dual BSD/GPL");
1238
1239module_init(inia100_init);
1240module_exit(inia100_exit);
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
index beea30e5a34a..d81ca66e24d6 100644
--- a/drivers/scsi/am53c974.c
+++ b/drivers/scsi/am53c974.c
@@ -556,15 +556,7 @@ static struct pci_driver am53c974_driver = {
556 .remove = pci_esp_remove_one, 556 .remove = pci_esp_remove_one,
557}; 557};
558 558
559static int __init am53c974_module_init(void) 559module_pci_driver(am53c974_driver);
560{
561 return pci_register_driver(&am53c974_driver);
562}
563
564static void __exit am53c974_module_exit(void)
565{
566 pci_unregister_driver(&am53c974_driver);
567}
568 560
569MODULE_DESCRIPTION("AM53C974 SCSI driver"); 561MODULE_DESCRIPTION("AM53C974 SCSI driver");
570MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>"); 562MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
@@ -577,6 +569,3 @@ MODULE_PARM_DESC(am53c974_debug, "Enable debugging");
577 569
578module_param(am53c974_fenab, bool, 0444); 570module_param(am53c974_fenab, bool, 0444);
579MODULE_PARM_DESC(am53c974_fenab, "Enable 24-bit DMA transfer sizes"); 571MODULE_PARM_DESC(am53c974_fenab, "Enable 24-bit DMA transfer sizes");
580
581module_init(am53c974_module_init);
582module_exit(am53c974_module_exit);
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig
index a011c5dbf214..f1b17e3efb3f 100644
--- a/drivers/scsi/cxlflash/Kconfig
+++ b/drivers/scsi/cxlflash/Kconfig
@@ -4,7 +4,7 @@
4 4
5config CXLFLASH 5config CXLFLASH
6 tristate "Support for IBM CAPI Flash" 6 tristate "Support for IBM CAPI Flash"
7 depends on PCI && SCSI && CXL && EEH 7 depends on PCI && SCSI && (CXL || OCXL) && EEH
8 select IRQ_POLL 8 select IRQ_POLL
9 default m 9 default m
10 help 10 help
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
index 7ec3f6b55dde..283377d8f6fb 100644
--- a/drivers/scsi/cxlflash/Makefile
+++ b/drivers/scsi/cxlflash/Makefile
@@ -1,2 +1,4 @@
1obj-$(CONFIG_CXLFLASH) += cxlflash.o 1obj-$(CONFIG_CXLFLASH) += cxlflash.o
2cxlflash-y += main.o superpipe.o lunmgt.o vlun.o cxl_hw.o 2cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
3cxlflash-$(CONFIG_CXL) += cxl_hw.o
4cxlflash-$(CONFIG_OCXL) += ocxl_hw.o
diff --git a/drivers/scsi/cxlflash/backend.h b/drivers/scsi/cxlflash/backend.h
index 339e42b03c49..55638d19c2fd 100644
--- a/drivers/scsi/cxlflash/backend.h
+++ b/drivers/scsi/cxlflash/backend.h
@@ -12,30 +12,41 @@
12 * 2 of the License, or (at your option) any later version. 12 * 2 of the License, or (at your option) any later version.
13 */ 13 */
14 14
15#ifndef _CXLFLASH_BACKEND_H
16#define _CXLFLASH_BACKEND_H
17
15extern const struct cxlflash_backend_ops cxlflash_cxl_ops; 18extern const struct cxlflash_backend_ops cxlflash_cxl_ops;
19extern const struct cxlflash_backend_ops cxlflash_ocxl_ops;
16 20
17struct cxlflash_backend_ops { 21struct cxlflash_backend_ops {
18 struct module *module; 22 struct module *module;
19 void __iomem * (*psa_map)(void *); 23 void __iomem * (*psa_map)(void *ctx_cookie);
20 void (*psa_unmap)(void __iomem *); 24 void (*psa_unmap)(void __iomem *addr);
21 int (*process_element)(void *); 25 int (*process_element)(void *ctx_cookie);
22 int (*map_afu_irq)(void *, int, irq_handler_t, void *, char *); 26 int (*map_afu_irq)(void *ctx_cookie, int num, irq_handler_t handler,
23 void (*unmap_afu_irq)(void *, int, void *); 27 void *cookie, char *name);
24 int (*start_context)(void *); 28 void (*unmap_afu_irq)(void *ctx_cookie, int num, void *cookie);
25 int (*stop_context)(void *); 29 u64 (*get_irq_objhndl)(void *ctx_cookie, int irq);
26 int (*afu_reset)(void *); 30 int (*start_context)(void *ctx_cookie);
27 void (*set_master)(void *); 31 int (*stop_context)(void *ctx_cookie);
28 void * (*get_context)(struct pci_dev *, void *); 32 int (*afu_reset)(void *ctx_cookie);
29 void * (*dev_context_init)(struct pci_dev *, void *); 33 void (*set_master)(void *ctx_cookie);
30 int (*release_context)(void *); 34 void * (*get_context)(struct pci_dev *dev, void *afu_cookie);
31 void (*perst_reloads_same_image)(void *, bool); 35 void * (*dev_context_init)(struct pci_dev *dev, void *afu_cookie);
32 ssize_t (*read_adapter_vpd)(struct pci_dev *, void *, size_t); 36 int (*release_context)(void *ctx_cookie);
33 int (*allocate_afu_irqs)(void *, int); 37 void (*perst_reloads_same_image)(void *afu_cookie, bool image);
34 void (*free_afu_irqs)(void *); 38 ssize_t (*read_adapter_vpd)(struct pci_dev *dev, void *buf,
35 void * (*create_afu)(struct pci_dev *); 39 size_t count);
36 struct file * (*get_fd)(void *, struct file_operations *, int *); 40 int (*allocate_afu_irqs)(void *ctx_cookie, int num);
37 void * (*fops_get_context)(struct file *); 41 void (*free_afu_irqs)(void *ctx_cookie);
38 int (*start_work)(void *, u64); 42 void * (*create_afu)(struct pci_dev *dev);
39 int (*fd_mmap)(struct file *, struct vm_area_struct *); 43 void (*destroy_afu)(void *afu_cookie);
40 int (*fd_release)(struct inode *, struct file *); 44 struct file * (*get_fd)(void *ctx_cookie, struct file_operations *fops,
45 int *fd);
46 void * (*fops_get_context)(struct file *file);
47 int (*start_work)(void *ctx_cookie, u64 irqs);
48 int (*fd_mmap)(struct file *file, struct vm_area_struct *vm);
49 int (*fd_release)(struct inode *inode, struct file *file);
41}; 50};
51
52#endif /* _CXLFLASH_BACKEND_H */
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 102fd26ca886..8908a20065c8 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -211,6 +211,7 @@ struct hwq {
211 struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */ 211 struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
212 ctx_hndl_t ctx_hndl; /* master's context handle */ 212 ctx_hndl_t ctx_hndl; /* master's context handle */
213 u32 index; /* Index of this hwq */ 213 u32 index; /* Index of this hwq */
214 int num_irqs; /* Number of interrupts requested for context */
214 struct list_head pending_cmds; /* Commands pending completion */ 215 struct list_head pending_cmds; /* Commands pending completion */
215 216
216 atomic_t hsq_credits; 217 atomic_t hsq_credits;
@@ -223,6 +224,7 @@ struct hwq {
223 u64 *hrrq_end; 224 u64 *hrrq_end;
224 u64 *hrrq_curr; 225 u64 *hrrq_curr;
225 bool toggle; 226 bool toggle;
227 bool hrrq_online;
226 228
227 s64 room; 229 s64 room;
228 230
@@ -231,13 +233,14 @@ struct hwq {
231 233
232struct afu { 234struct afu {
233 struct hwq hwqs[CXLFLASH_MAX_HWQS]; 235 struct hwq hwqs[CXLFLASH_MAX_HWQS];
234 int (*send_cmd)(struct afu *, struct afu_cmd *); 236 int (*send_cmd)(struct afu *afu, struct afu_cmd *cmd);
235 int (*context_reset)(struct hwq *); 237 int (*context_reset)(struct hwq *hwq);
236 238
237 /* AFU HW */ 239 /* AFU HW */
238 struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */ 240 struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
239 241
240 atomic_t cmds_active; /* Number of currently active AFU commands */ 242 atomic_t cmds_active; /* Number of currently active AFU commands */
243 struct mutex sync_active; /* Mutex to serialize AFU commands */
241 u64 hb; 244 u64 hb;
242 u32 internal_lun; /* User-desired LUN mode for this AFU */ 245 u32 internal_lun; /* User-desired LUN mode for this AFU */
243 246
@@ -272,6 +275,11 @@ static inline bool afu_has_cap(struct afu *afu, u64 cap)
272 return afu_cap & cap; 275 return afu_cap & cap;
273} 276}
274 277
278static inline bool afu_is_ocxl_lisn(struct afu *afu)
279{
280 return afu_has_cap(afu, SISL_INTVER_CAP_OCXL_LISN);
281}
282
275static inline bool afu_is_afu_debug(struct afu *afu) 283static inline bool afu_is_afu_debug(struct afu *afu)
276{ 284{
277 return afu_has_cap(afu, SISL_INTVER_CAP_AFU_DEBUG); 285 return afu_has_cap(afu, SISL_INTVER_CAP_AFU_DEBUG);
diff --git a/drivers/scsi/cxlflash/cxl_hw.c b/drivers/scsi/cxlflash/cxl_hw.c
index db1cadad5c5d..b42da88386bd 100644
--- a/drivers/scsi/cxlflash/cxl_hw.c
+++ b/drivers/scsi/cxlflash/cxl_hw.c
@@ -49,6 +49,12 @@ static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
49 cxl_unmap_afu_irq(ctx_cookie, num, cookie); 49 cxl_unmap_afu_irq(ctx_cookie, num, cookie);
50} 50}
51 51
52static u64 cxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
53{
54 /* Dummy fop for cxl */
55 return 0;
56}
57
52static int cxlflash_start_context(void *ctx_cookie) 58static int cxlflash_start_context(void *ctx_cookie)
53{ 59{
54 return cxl_start_context(ctx_cookie, 0, NULL); 60 return cxl_start_context(ctx_cookie, 0, NULL);
@@ -110,6 +116,11 @@ static void *cxlflash_create_afu(struct pci_dev *dev)
110 return cxl_pci_to_afu(dev); 116 return cxl_pci_to_afu(dev);
111} 117}
112 118
119static void cxlflash_destroy_afu(void *afu)
120{
121 /* Dummy fop for cxl */
122}
123
113static struct file *cxlflash_get_fd(void *ctx_cookie, 124static struct file *cxlflash_get_fd(void *ctx_cookie,
114 struct file_operations *fops, int *fd) 125 struct file_operations *fops, int *fd)
115{ 126{
@@ -148,6 +159,7 @@ const struct cxlflash_backend_ops cxlflash_cxl_ops = {
148 .process_element = cxlflash_process_element, 159 .process_element = cxlflash_process_element,
149 .map_afu_irq = cxlflash_map_afu_irq, 160 .map_afu_irq = cxlflash_map_afu_irq,
150 .unmap_afu_irq = cxlflash_unmap_afu_irq, 161 .unmap_afu_irq = cxlflash_unmap_afu_irq,
162 .get_irq_objhndl = cxlflash_get_irq_objhndl,
151 .start_context = cxlflash_start_context, 163 .start_context = cxlflash_start_context,
152 .stop_context = cxlflash_stop_context, 164 .stop_context = cxlflash_stop_context,
153 .afu_reset = cxlflash_afu_reset, 165 .afu_reset = cxlflash_afu_reset,
@@ -160,6 +172,7 @@ const struct cxlflash_backend_ops cxlflash_cxl_ops = {
160 .allocate_afu_irqs = cxlflash_allocate_afu_irqs, 172 .allocate_afu_irqs = cxlflash_allocate_afu_irqs,
161 .free_afu_irqs = cxlflash_free_afu_irqs, 173 .free_afu_irqs = cxlflash_free_afu_irqs,
162 .create_afu = cxlflash_create_afu, 174 .create_afu = cxlflash_create_afu,
175 .destroy_afu = cxlflash_destroy_afu,
163 .get_fd = cxlflash_get_fd, 176 .get_fd = cxlflash_get_fd,
164 .fops_get_context = cxlflash_fops_get_context, 177 .fops_get_context = cxlflash_fops_get_context,
165 .start_work = cxlflash_start_work, 178 .start_work = cxlflash_start_work,
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
index 4d232e271af6..edea1255fdab 100644
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -12,9 +12,11 @@
12 * 2 of the License, or (at your option) any later version. 12 * 2 of the License, or (at your option) any later version.
13 */ 13 */
14 14
15#include <misc/cxl.h>
16#include <asm/unaligned.h> 15#include <asm/unaligned.h>
17 16
17#include <linux/interrupt.h>
18#include <linux/pci.h>
19
18#include <scsi/scsi_host.h> 20#include <scsi/scsi_host.h>
19#include <uapi/scsi/cxlflash_ioctl.h> 21#include <uapi/scsi/cxlflash_ioctl.h>
20 22
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index d8fe7ab870b8..6637116529aa 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -19,8 +19,6 @@
19 19
20#include <asm/unaligned.h> 20#include <asm/unaligned.h>
21 21
22#include <misc/cxl.h>
23
24#include <scsi/scsi_cmnd.h> 22#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_host.h> 23#include <scsi/scsi_host.h>
26#include <uapi/scsi/cxlflash_ioctl.h> 24#include <uapi/scsi/cxlflash_ioctl.h>
@@ -339,8 +337,8 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
339 writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin); 337 writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
340out: 338out:
341 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); 339 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
342 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__, 340 dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
343 cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc); 341 __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
344 return rc; 342 return rc;
345} 343}
346 344
@@ -473,6 +471,7 @@ static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
473 struct afu_cmd *cmd = NULL; 471 struct afu_cmd *cmd = NULL;
474 struct device *dev = &cfg->dev->dev; 472 struct device *dev = &cfg->dev->dev;
475 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); 473 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
474 bool needs_deletion = false;
476 char *buf = NULL; 475 char *buf = NULL;
477 ulong lock_flags; 476 ulong lock_flags;
478 int rc = 0; 477 int rc = 0;
@@ -527,6 +526,7 @@ static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
527 if (!to) { 526 if (!to) {
528 dev_err(dev, "%s: TMF timed out\n", __func__); 527 dev_err(dev, "%s: TMF timed out\n", __func__);
529 rc = -ETIMEDOUT; 528 rc = -ETIMEDOUT;
529 needs_deletion = true;
530 } else if (cmd->cmd_aborted) { 530 } else if (cmd->cmd_aborted) {
531 dev_err(dev, "%s: TMF aborted\n", __func__); 531 dev_err(dev, "%s: TMF aborted\n", __func__);
532 rc = -EAGAIN; 532 rc = -EAGAIN;
@@ -537,6 +537,12 @@ static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
537 } 537 }
538 cfg->tmf_active = false; 538 cfg->tmf_active = false;
539 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 539 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
540
541 if (needs_deletion) {
542 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
543 list_del(&cmd->list);
544 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
545 }
540out: 546out:
541 kfree(buf); 547 kfree(buf);
542 return rc; 548 return rc;
@@ -608,6 +614,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
608 rc = 0; 614 rc = 0;
609 goto out; 615 goto out;
610 default: 616 default:
617 atomic_inc(&afu->cmds_active);
611 break; 618 break;
612 } 619 }
613 620
@@ -633,6 +640,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
633 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); 640 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
634 641
635 rc = afu->send_cmd(afu, cmd); 642 rc = afu->send_cmd(afu, cmd);
643 atomic_dec(&afu->cmds_active);
636out: 644out:
637 return rc; 645 return rc;
638} 646}
@@ -793,6 +801,10 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
793 WARN_ON(cfg->ops->release_context(hwq->ctx_cookie)); 801 WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
794 hwq->ctx_cookie = NULL; 802 hwq->ctx_cookie = NULL;
795 803
804 spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
805 hwq->hrrq_online = false;
806 spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
807
796 spin_lock_irqsave(&hwq->hsq_slock, lock_flags); 808 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
797 flush_pending_cmds(hwq); 809 flush_pending_cmds(hwq);
798 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); 810 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
@@ -946,9 +958,9 @@ static void cxlflash_remove(struct pci_dev *pdev)
946 return; 958 return;
947 } 959 }
948 960
949 /* If a Task Management Function is active, wait for it to complete 961 /* Yield to running recovery threads before continuing with remove */
950 * before continuing with remove. 962 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
951 */ 963 cfg->state != STATE_PROBING);
952 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 964 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
953 if (cfg->tmf_active) 965 if (cfg->tmf_active)
954 wait_event_interruptible_lock_irq(cfg->tmf_waitq, 966 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
@@ -971,6 +983,7 @@ static void cxlflash_remove(struct pci_dev *pdev)
971 case INIT_STATE_AFU: 983 case INIT_STATE_AFU:
972 term_afu(cfg); 984 term_afu(cfg);
973 case INIT_STATE_PCI: 985 case INIT_STATE_PCI:
986 cfg->ops->destroy_afu(cfg->afu_cookie);
974 pci_disable_device(pdev); 987 pci_disable_device(pdev);
975 case INIT_STATE_NONE: 988 case INIT_STATE_NONE:
976 free_mem(cfg); 989 free_mem(cfg);
@@ -1303,7 +1316,10 @@ static void afu_err_intr_init(struct afu *afu)
1303 for (i = 0; i < afu->num_hwqs; i++) { 1316 for (i = 0; i < afu->num_hwqs; i++) {
1304 hwq = get_hwq(afu, i); 1317 hwq = get_hwq(afu, i);
1305 1318
1306 writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl); 1319 reg = readq_be(&hwq->host_map->ctx_ctrl);
1320 WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
1321 reg |= SISL_MSI_SYNC_ERROR;
1322 writeq_be(reg, &hwq->host_map->ctx_ctrl);
1307 writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask); 1323 writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1308 } 1324 }
1309} 1325}
@@ -1463,6 +1479,12 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1463 1479
1464 spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); 1480 spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1465 1481
1482 /* Silently drop spurious interrupts when queue is not online */
1483 if (!hwq->hrrq_online) {
1484 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1485 return IRQ_HANDLED;
1486 }
1487
1466 if (afu_is_irqpoll_enabled(afu)) { 1488 if (afu_is_irqpoll_enabled(afu)) {
1467 irq_poll_sched(&hwq->irqpoll); 1489 irq_poll_sched(&hwq->irqpoll);
1468 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); 1490 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
@@ -1752,6 +1774,8 @@ static int init_global(struct cxlflash_cfg *cfg)
1752 u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */ 1774 u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
1753 int i = 0, num_ports = 0; 1775 int i = 0, num_ports = 0;
1754 int rc = 0; 1776 int rc = 0;
1777 int j;
1778 void *ctx;
1755 u64 reg; 1779 u64 reg;
1756 1780
1757 rc = read_vpd(cfg, &wwpn[0]); 1781 rc = read_vpd(cfg, &wwpn[0]);
@@ -1767,6 +1791,7 @@ static int init_global(struct cxlflash_cfg *cfg)
1767 1791
1768 writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start); 1792 writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1769 writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end); 1793 writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1794 hwq->hrrq_online = true;
1770 1795
1771 if (afu_is_sq_cmd_mode(afu)) { 1796 if (afu_is_sq_cmd_mode(afu)) {
1772 writeq_be((u64)hwq->hsq_start, &hmap->sq_start); 1797 writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
@@ -1812,6 +1837,25 @@ static int init_global(struct cxlflash_cfg *cfg)
1812 msleep(100); 1837 msleep(100);
1813 } 1838 }
1814 1839
1840 if (afu_is_ocxl_lisn(afu)) {
1841 /* Set up the LISN effective address for each master */
1842 for (i = 0; i < afu->num_hwqs; i++) {
1843 hwq = get_hwq(afu, i);
1844 ctx = hwq->ctx_cookie;
1845
1846 for (j = 0; j < hwq->num_irqs; j++) {
1847 reg = cfg->ops->get_irq_objhndl(ctx, j);
1848 writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
1849 }
1850
1851 reg = hwq->ctx_hndl;
1852 writeq_be(SISL_LISN_PASID(reg, reg),
1853 &hwq->ctrl_map->lisn_pasid[0]);
1854 writeq_be(SISL_LISN_PASID(0UL, reg),
1855 &hwq->ctrl_map->lisn_pasid[1]);
1856 }
1857 }
1858
1815 /* Set up master's own CTX_CAP to allow real mode, host translation */ 1859 /* Set up master's own CTX_CAP to allow real mode, host translation */
1816 /* tables, afu cmds and read/write GSCSI cmds. */ 1860 /* tables, afu cmds and read/write GSCSI cmds. */
1817 /* First, unlock ctx_cap write by reading mbox */ 1861 /* First, unlock ctx_cap write by reading mbox */
@@ -1911,7 +1955,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1911 int rc = 0; 1955 int rc = 0;
1912 enum undo_level level = UNDO_NOOP; 1956 enum undo_level level = UNDO_NOOP;
1913 bool is_primary_hwq = (hwq->index == PRIMARY_HWQ); 1957 bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1914 int num_irqs = is_primary_hwq ? 3 : 2; 1958 int num_irqs = hwq->num_irqs;
1915 1959
1916 rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs); 1960 rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
1917 if (unlikely(rc)) { 1961 if (unlikely(rc)) {
@@ -1965,16 +2009,20 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
1965 struct device *dev = &cfg->dev->dev; 2009 struct device *dev = &cfg->dev->dev;
1966 struct hwq *hwq = get_hwq(cfg->afu, index); 2010 struct hwq *hwq = get_hwq(cfg->afu, index);
1967 int rc = 0; 2011 int rc = 0;
2012 int num_irqs;
1968 enum undo_level level; 2013 enum undo_level level;
1969 2014
1970 hwq->afu = cfg->afu; 2015 hwq->afu = cfg->afu;
1971 hwq->index = index; 2016 hwq->index = index;
1972 INIT_LIST_HEAD(&hwq->pending_cmds); 2017 INIT_LIST_HEAD(&hwq->pending_cmds);
1973 2018
1974 if (index == PRIMARY_HWQ) 2019 if (index == PRIMARY_HWQ) {
1975 ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie); 2020 ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
1976 else 2021 num_irqs = 3;
2022 } else {
1977 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie); 2023 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
2024 num_irqs = 2;
2025 }
1978 if (IS_ERR_OR_NULL(ctx)) { 2026 if (IS_ERR_OR_NULL(ctx)) {
1979 rc = -ENOMEM; 2027 rc = -ENOMEM;
1980 goto err1; 2028 goto err1;
@@ -1982,6 +2030,7 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
1982 2030
1983 WARN_ON(hwq->ctx_cookie); 2031 WARN_ON(hwq->ctx_cookie);
1984 hwq->ctx_cookie = ctx; 2032 hwq->ctx_cookie = ctx;
2033 hwq->num_irqs = num_irqs;
1985 2034
1986 /* Set it up as a master with the CXL */ 2035 /* Set it up as a master with the CXL */
1987 cfg->ops->set_master(ctx); 2036 cfg->ops->set_master(ctx);
@@ -2075,6 +2124,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
2075 2124
2076 cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true); 2125 cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
2077 2126
2127 mutex_init(&afu->sync_active);
2078 afu->num_hwqs = afu->desired_hwqs; 2128 afu->num_hwqs = afu->desired_hwqs;
2079 for (i = 0; i < afu->num_hwqs; i++) { 2129 for (i = 0; i < afu->num_hwqs; i++) {
2080 rc = init_mc(cfg, i); 2130 rc = init_mc(cfg, i);
@@ -2254,10 +2304,10 @@ static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
2254 struct device *dev = &cfg->dev->dev; 2304 struct device *dev = &cfg->dev->dev;
2255 struct afu_cmd *cmd = NULL; 2305 struct afu_cmd *cmd = NULL;
2256 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); 2306 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2307 ulong lock_flags;
2257 char *buf = NULL; 2308 char *buf = NULL;
2258 int rc = 0; 2309 int rc = 0;
2259 int nretry = 0; 2310 int nretry = 0;
2260 static DEFINE_MUTEX(sync_active);
2261 2311
2262 if (cfg->state != STATE_NORMAL) { 2312 if (cfg->state != STATE_NORMAL) {
2263 dev_dbg(dev, "%s: Sync not required state=%u\n", 2313 dev_dbg(dev, "%s: Sync not required state=%u\n",
@@ -2265,7 +2315,7 @@ static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
2265 return 0; 2315 return 0;
2266 } 2316 }
2267 2317
2268 mutex_lock(&sync_active); 2318 mutex_lock(&afu->sync_active);
2269 atomic_inc(&afu->cmds_active); 2319 atomic_inc(&afu->cmds_active);
2270 buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); 2320 buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2271 if (unlikely(!buf)) { 2321 if (unlikely(!buf)) {
@@ -2299,6 +2349,11 @@ retry:
2299 case -ETIMEDOUT: 2349 case -ETIMEDOUT:
2300 rc = afu->context_reset(hwq); 2350 rc = afu->context_reset(hwq);
2301 if (rc) { 2351 if (rc) {
2352 /* Delete the command from pending_cmds list */
2353 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
2354 list_del(&cmd->list);
2355 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
2356
2302 cxlflash_schedule_async_reset(cfg); 2357 cxlflash_schedule_async_reset(cfg);
2303 break; 2358 break;
2304 } 2359 }
@@ -2315,7 +2370,7 @@ retry:
2315 *rcb->ioasa = cmd->sa; 2370 *rcb->ioasa = cmd->sa;
2316out: 2371out:
2317 atomic_dec(&afu->cmds_active); 2372 atomic_dec(&afu->cmds_active);
2318 mutex_unlock(&sync_active); 2373 mutex_unlock(&afu->sync_active);
2319 kfree(buf); 2374 kfree(buf);
2320 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 2375 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2321 return rc; 2376 return rc;
@@ -3138,7 +3193,8 @@ static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
3138static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, 3193static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3139 CXLFLASH_NOTIFY_SHUTDOWN }; 3194 CXLFLASH_NOTIFY_SHUTDOWN };
3140static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS, 3195static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
3141 CXLFLASH_NOTIFY_SHUTDOWN }; 3196 (CXLFLASH_NOTIFY_SHUTDOWN |
3197 CXLFLASH_OCXL_DEV) };
3142 3198
3143/* 3199/*
3144 * PCI device binding table 3200 * PCI device binding table
@@ -3649,8 +3705,9 @@ static int cxlflash_probe(struct pci_dev *pdev,
3649 3705
3650 cfg->init_state = INIT_STATE_NONE; 3706 cfg->init_state = INIT_STATE_NONE;
3651 cfg->dev = pdev; 3707 cfg->dev = pdev;
3652 cfg->ops = &cxlflash_cxl_ops;
3653 cfg->cxl_fops = cxlflash_cxl_fops; 3708 cfg->cxl_fops = cxlflash_cxl_fops;
3709 cfg->ops = cxlflash_assign_ops(ddv);
3710 WARN_ON_ONCE(!cfg->ops);
3654 3711
3655 /* 3712 /*
3656 * Promoted LUNs move to the top of the LUN table. The rest stay on 3713 * Promoted LUNs move to the top of the LUN table. The rest stay on
@@ -3681,8 +3738,6 @@ static int cxlflash_probe(struct pci_dev *pdev,
3681 3738
3682 pci_set_drvdata(pdev, cfg); 3739 pci_set_drvdata(pdev, cfg);
3683 3740
3684 cfg->afu_cookie = cfg->ops->create_afu(pdev);
3685
3686 rc = init_pci(cfg); 3741 rc = init_pci(cfg);
3687 if (rc) { 3742 if (rc) {
3688 dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc); 3743 dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
@@ -3690,6 +3745,12 @@ static int cxlflash_probe(struct pci_dev *pdev,
3690 } 3745 }
3691 cfg->init_state = INIT_STATE_PCI; 3746 cfg->init_state = INIT_STATE_PCI;
3692 3747
3748 cfg->afu_cookie = cfg->ops->create_afu(pdev);
3749 if (unlikely(!cfg->afu_cookie)) {
3750 dev_err(dev, "%s: create_afu failed\n", __func__);
3751 goto out_remove;
3752 }
3753
3693 rc = init_afu(cfg); 3754 rc = init_afu(cfg);
3694 if (rc && !wq_has_sleeper(&cfg->reset_waitq)) { 3755 if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3695 dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc); 3756 dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index ba0108a7a9c2..2a3977823812 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -20,6 +20,8 @@
20#include <scsi/scsi.h> 20#include <scsi/scsi.h>
21#include <scsi/scsi_device.h> 21#include <scsi/scsi_device.h>
22 22
23#include "backend.h"
24
23#define CXLFLASH_NAME "cxlflash" 25#define CXLFLASH_NAME "cxlflash"
24#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter" 26#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
25#define CXLFLASH_MAX_ADAPTERS 32 27#define CXLFLASH_MAX_ADAPTERS 32
@@ -97,8 +99,27 @@ struct dev_dependent_vals {
97 u64 flags; 99 u64 flags;
98#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL 100#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL
99#define CXLFLASH_WWPN_VPD_REQUIRED 0x0000000000000002ULL 101#define CXLFLASH_WWPN_VPD_REQUIRED 0x0000000000000002ULL
102#define CXLFLASH_OCXL_DEV 0x0000000000000004ULL
100}; 103};
101 104
105static inline const struct cxlflash_backend_ops *
106cxlflash_assign_ops(struct dev_dependent_vals *ddv)
107{
108 const struct cxlflash_backend_ops *ops = NULL;
109
110#ifdef CONFIG_OCXL
111 if (ddv->flags & CXLFLASH_OCXL_DEV)
112 ops = &cxlflash_ocxl_ops;
113#endif
114
115#ifdef CONFIG_CXL
116 if (!(ddv->flags & CXLFLASH_OCXL_DEV))
117 ops = &cxlflash_cxl_ops;
118#endif
119
120 return ops;
121}
122
102struct asyc_intr_info { 123struct asyc_intr_info {
103 u64 status; 124 u64 status;
104 char *desc; 125 char *desc;
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
new file mode 100644
index 000000000000..0a95b5f25380
--- /dev/null
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -0,0 +1,1436 @@
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
5 * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2018 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/file.h>
16#include <linux/idr.h>
17#include <linux/module.h>
18#include <linux/mount.h>
19#include <linux/poll.h>
20#include <linux/sched/signal.h>
21
22#include <misc/ocxl.h>
23
24#include <uapi/misc/cxl.h>
25
26#include "backend.h"
27#include "ocxl_hw.h"
28
29/*
30 * Pseudo-filesystem to allocate inodes.
31 */
32
33#define OCXLFLASH_FS_MAGIC 0x1697698f
34
35static int ocxlflash_fs_cnt;
36static struct vfsmount *ocxlflash_vfs_mount;
37
38static const struct dentry_operations ocxlflash_fs_dops = {
39 .d_dname = simple_dname,
40};
41
42/*
43 * ocxlflash_fs_mount() - mount the pseudo-filesystem
44 * @fs_type: File system type.
45 * @flags: Flags for the filesystem.
46 * @dev_name: Device name associated with the filesystem.
47 * @data: Data pointer.
48 *
49 * Return: pointer to the directory entry structure
50 */
51static struct dentry *ocxlflash_fs_mount(struct file_system_type *fs_type,
52 int flags, const char *dev_name,
53 void *data)
54{
55 return mount_pseudo(fs_type, "ocxlflash:", NULL, &ocxlflash_fs_dops,
56 OCXLFLASH_FS_MAGIC);
57}
58
59static struct file_system_type ocxlflash_fs_type = {
60 .name = "ocxlflash",
61 .owner = THIS_MODULE,
62 .mount = ocxlflash_fs_mount,
63 .kill_sb = kill_anon_super,
64};
65
66/*
67 * ocxlflash_release_mapping() - release the memory mapping
68 * @ctx: Context whose mapping is to be released.
69 */
70static void ocxlflash_release_mapping(struct ocxlflash_context *ctx)
71{
72 if (ctx->mapping)
73 simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
74 ctx->mapping = NULL;
75}
76
77/*
78 * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file
79 * @dev: Generic device of the host.
80 * @name: Name of the pseudo filesystem.
81 * @fops: File operations.
82 * @priv: Private data.
83 * @flags: Flags for the file.
84 *
85 * Return: pointer to the file on success, ERR_PTR on failure
86 */
87static struct file *ocxlflash_getfile(struct device *dev, const char *name,
88 const struct file_operations *fops,
89 void *priv, int flags)
90{
91 struct qstr this;
92 struct path path;
93 struct file *file;
94 struct inode *inode = NULL;
95 int rc;
96
97 if (fops->owner && !try_module_get(fops->owner)) {
98 dev_err(dev, "%s: Owner does not exist\n", __func__);
99 rc = -ENOENT;
100 goto err1;
101 }
102
103 rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount,
104 &ocxlflash_fs_cnt);
105 if (unlikely(rc < 0)) {
106 dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n",
107 __func__, rc);
108 goto err2;
109 }
110
111 inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb);
112 if (IS_ERR(inode)) {
113 rc = PTR_ERR(inode);
114 dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n",
115 __func__, rc);
116 goto err3;
117 }
118
119 this.name = name;
120 this.len = strlen(name);
121 this.hash = 0;
122 path.dentry = d_alloc_pseudo(ocxlflash_vfs_mount->mnt_sb, &this);
123 if (!path.dentry) {
124 dev_err(dev, "%s: d_alloc_pseudo failed\n", __func__);
125 rc = -ENOMEM;
126 goto err4;
127 }
128
129 path.mnt = mntget(ocxlflash_vfs_mount);
130 d_instantiate(path.dentry, inode);
131
132 file = alloc_file(&path, OPEN_FMODE(flags), fops);
133 if (IS_ERR(file)) {
134 rc = PTR_ERR(file);
135 dev_err(dev, "%s: alloc_file failed rc=%d\n",
136 __func__, rc);
137 goto err5;
138 }
139
140 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
141 file->private_data = priv;
142out:
143 return file;
144err5:
145 path_put(&path);
146err4:
147 iput(inode);
148err3:
149 simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
150err2:
151 module_put(fops->owner);
152err1:
153 file = ERR_PTR(rc);
154 goto out;
155}
156
157/**
158 * ocxlflash_psa_map() - map the process specific MMIO space
159 * @ctx_cookie: Adapter context for which the mapping needs to be done.
160 *
161 * Return: MMIO pointer of the mapped region
162 */
163static void __iomem *ocxlflash_psa_map(void *ctx_cookie)
164{
165 struct ocxlflash_context *ctx = ctx_cookie;
166 struct device *dev = ctx->hw_afu->dev;
167
168 mutex_lock(&ctx->state_mutex);
169 if (ctx->state != STARTED) {
170 dev_err(dev, "%s: Context not started, state=%d\n", __func__,
171 ctx->state);
172 mutex_unlock(&ctx->state_mutex);
173 return NULL;
174 }
175 mutex_unlock(&ctx->state_mutex);
176
177 return ioremap(ctx->psn_phys, ctx->psn_size);
178}
179
180/**
181 * ocxlflash_psa_unmap() - unmap the process specific MMIO space
182 * @addr: MMIO pointer to unmap.
183 */
184static void ocxlflash_psa_unmap(void __iomem *addr)
185{
186 iounmap(addr);
187}
188
189/**
190 * ocxlflash_process_element() - get process element of the adapter context
191 * @ctx_cookie: Adapter context associated with the process element.
192 *
193 * Return: process element of the adapter context
194 */
195static int ocxlflash_process_element(void *ctx_cookie)
196{
197 struct ocxlflash_context *ctx = ctx_cookie;
198
199 return ctx->pe;
200}
201
202/**
203 * afu_map_irq() - map the interrupt of the adapter context
204 * @flags: Flags.
205 * @ctx: Adapter context.
206 * @num: Per-context AFU interrupt number.
207 * @handler: Interrupt handler to register.
208 * @cookie: Interrupt handler private data.
209 * @name: Name of the interrupt.
210 *
211 * Return: 0 on success, -errno on failure
212 */
213static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
214 irq_handler_t handler, void *cookie, char *name)
215{
216 struct ocxl_hw_afu *afu = ctx->hw_afu;
217 struct device *dev = afu->dev;
218 struct ocxlflash_irqs *irq;
219 void __iomem *vtrig;
220 u32 virq;
221 int rc = 0;
222
223 if (num < 0 || num >= ctx->num_irqs) {
224 dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
225 rc = -ENOENT;
226 goto out;
227 }
228
229 irq = &ctx->irqs[num];
230 virq = irq_create_mapping(NULL, irq->hwirq);
231 if (unlikely(!virq)) {
232 dev_err(dev, "%s: irq_create_mapping failed\n", __func__);
233 rc = -ENOMEM;
234 goto out;
235 }
236
237 rc = request_irq(virq, handler, 0, name, cookie);
238 if (unlikely(rc)) {
239 dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc);
240 goto err1;
241 }
242
243 vtrig = ioremap(irq->ptrig, PAGE_SIZE);
244 if (unlikely(!vtrig)) {
245 dev_err(dev, "%s: Trigger page mapping failed\n", __func__);
246 rc = -ENOMEM;
247 goto err2;
248 }
249
250 irq->virq = virq;
251 irq->vtrig = vtrig;
252out:
253 return rc;
254err2:
255 free_irq(virq, cookie);
256err1:
257 irq_dispose_mapping(virq);
258 goto out;
259}
260
261/**
262 * ocxlflash_map_afu_irq() - map the interrupt of the adapter context
263 * @ctx_cookie: Adapter context.
264 * @num: Per-context AFU interrupt number.
265 * @handler: Interrupt handler to register.
266 * @cookie: Interrupt handler private data.
267 * @name: Name of the interrupt.
268 *
269 * Return: 0 on success, -errno on failure
270 */
271static int ocxlflash_map_afu_irq(void *ctx_cookie, int num,
272 irq_handler_t handler, void *cookie,
273 char *name)
274{
275 return afu_map_irq(0, ctx_cookie, num, handler, cookie, name);
276}
277
278/**
279 * afu_unmap_irq() - unmap the interrupt
280 * @flags: Flags.
281 * @ctx: Adapter context.
282 * @num: Per-context AFU interrupt number.
283 * @cookie: Interrupt handler private data.
284 */
285static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num,
286 void *cookie)
287{
288 struct ocxl_hw_afu *afu = ctx->hw_afu;
289 struct device *dev = afu->dev;
290 struct ocxlflash_irqs *irq;
291
292 if (num < 0 || num >= ctx->num_irqs) {
293 dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
294 return;
295 }
296
297 irq = &ctx->irqs[num];
298 if (irq->vtrig)
299 iounmap(irq->vtrig);
300
301 if (irq_find_mapping(NULL, irq->hwirq)) {
302 free_irq(irq->virq, cookie);
303 irq_dispose_mapping(irq->virq);
304 }
305
306 memset(irq, 0, sizeof(*irq));
307}
308
309/**
310 * ocxlflash_unmap_afu_irq() - unmap the interrupt
311 * @ctx_cookie: Adapter context.
312 * @num: Per-context AFU interrupt number.
313 * @cookie: Interrupt handler private data.
314 */
315static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
316{
317 return afu_unmap_irq(0, ctx_cookie, num, cookie);
318}
319
320/**
321 * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt
322 * @ctx_cookie: Context associated with the interrupt.
323 * @irq: Interrupt number.
324 *
325 * Return: effective address of the mapped region
326 */
327static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
328{
329 struct ocxlflash_context *ctx = ctx_cookie;
330
331 if (irq < 0 || irq >= ctx->num_irqs)
332 return 0;
333
334 return (__force u64)ctx->irqs[irq].vtrig;
335}
336
337/**
338 * ocxlflash_xsl_fault() - callback when translation error is triggered
339 * @data: Private data provided at callback registration, the context.
340 * @addr: Address that triggered the error.
341 * @dsisr: Value of dsisr register.
342 */
343static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr)
344{
345 struct ocxlflash_context *ctx = data;
346
347 spin_lock(&ctx->slock);
348 ctx->fault_addr = addr;
349 ctx->fault_dsisr = dsisr;
350 ctx->pending_fault = true;
351 spin_unlock(&ctx->slock);
352
353 wake_up_all(&ctx->wq);
354}
355
356/**
357 * start_context() - local routine to start a context
358 * @ctx: Adapter context to be started.
359 *
360 * Assign the context specific MMIO space, add and enable the PE.
361 *
362 * Return: 0 on success, -errno on failure
363 */
364static int start_context(struct ocxlflash_context *ctx)
365{
366 struct ocxl_hw_afu *afu = ctx->hw_afu;
367 struct ocxl_afu_config *acfg = &afu->acfg;
368 void *link_token = afu->link_token;
369 struct device *dev = afu->dev;
370 bool master = ctx->master;
371 struct mm_struct *mm;
372 int rc = 0;
373 u32 pid;
374
375 mutex_lock(&ctx->state_mutex);
376 if (ctx->state != OPENED) {
377 dev_err(dev, "%s: Context state invalid, state=%d\n",
378 __func__, ctx->state);
379 rc = -EINVAL;
380 goto out;
381 }
382
383 if (master) {
384 ctx->psn_size = acfg->global_mmio_size;
385 ctx->psn_phys = afu->gmmio_phys;
386 } else {
387 ctx->psn_size = acfg->pp_mmio_stride;
388 ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size);
389 }
390
391 /* pid and mm not set for master contexts */
392 if (master) {
393 pid = 0;
394 mm = NULL;
395 } else {
396 pid = current->mm->context.id;
397 mm = current->mm;
398 }
399
400 rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, mm,
401 ocxlflash_xsl_fault, ctx);
402 if (unlikely(rc)) {
403 dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
404 __func__, rc);
405 goto out;
406 }
407
408 ctx->state = STARTED;
409out:
410 mutex_unlock(&ctx->state_mutex);
411 return rc;
412}
413
414/**
415 * ocxlflash_start_context() - start a kernel context
416 * @ctx_cookie: Adapter context to be started.
417 *
418 * Return: 0 on success, -errno on failure
419 */
420static int ocxlflash_start_context(void *ctx_cookie)
421{
422 struct ocxlflash_context *ctx = ctx_cookie;
423
424 return start_context(ctx);
425}
426
427/**
428 * ocxlflash_stop_context() - stop a context
429 * @ctx_cookie: Adapter context to be stopped.
430 *
431 * Return: 0 on success, -errno on failure
432 */
433static int ocxlflash_stop_context(void *ctx_cookie)
434{
435 struct ocxlflash_context *ctx = ctx_cookie;
436 struct ocxl_hw_afu *afu = ctx->hw_afu;
437 struct ocxl_afu_config *acfg = &afu->acfg;
438 struct pci_dev *pdev = afu->pdev;
439 struct device *dev = afu->dev;
440 enum ocxlflash_ctx_state state;
441 int rc = 0;
442
443 mutex_lock(&ctx->state_mutex);
444 state = ctx->state;
445 ctx->state = CLOSED;
446 mutex_unlock(&ctx->state_mutex);
447 if (state != STARTED)
448 goto out;
449
450 rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos,
451 ctx->pe);
452 if (unlikely(rc)) {
453 dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n",
454 __func__, rc);
455 /* If EBUSY, PE could be referenced in future by the AFU */
456 if (rc == -EBUSY)
457 goto out;
458 }
459
460 rc = ocxl_link_remove_pe(afu->link_token, ctx->pe);
461 if (unlikely(rc)) {
462 dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n",
463 __func__, rc);
464 goto out;
465 }
466out:
467 return rc;
468}
469
470/**
471 * ocxlflash_afu_reset() - reset the AFU
472 * @ctx_cookie: Adapter context.
473 */
474static int ocxlflash_afu_reset(void *ctx_cookie)
475{
476 struct ocxlflash_context *ctx = ctx_cookie;
477 struct device *dev = ctx->hw_afu->dev;
478
479 /* Pending implementation from OCXL transport services */
480 dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__);
481
482 /* Silently return success until it is implemented */
483 return 0;
484}
485
486/**
487 * ocxlflash_set_master() - sets the context as master
488 * @ctx_cookie: Adapter context to set as master.
489 */
490static void ocxlflash_set_master(void *ctx_cookie)
491{
492 struct ocxlflash_context *ctx = ctx_cookie;
493
494 ctx->master = true;
495}
496
497/**
498 * ocxlflash_get_context() - obtains the context associated with the host
499 * @pdev: PCI device associated with the host.
500 * @afu_cookie: Hardware AFU associated with the host.
501 *
502 * Return: returns the pointer to host adapter context
503 */
504static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie)
505{
506 struct ocxl_hw_afu *afu = afu_cookie;
507
508 return afu->ocxl_ctx;
509}
510
511/**
512 * ocxlflash_dev_context_init() - allocate and initialize an adapter context
513 * @pdev: PCI device associated with the host.
514 * @afu_cookie: Hardware AFU associated with the host.
515 *
516 * Return: returns the adapter context on success, ERR_PTR on failure
517 */
518static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie)
519{
520 struct ocxl_hw_afu *afu = afu_cookie;
521 struct device *dev = afu->dev;
522 struct ocxlflash_context *ctx;
523 int rc;
524
525 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
526 if (unlikely(!ctx)) {
527 dev_err(dev, "%s: Context allocation failed\n", __func__);
528 rc = -ENOMEM;
529 goto err1;
530 }
531
532 idr_preload(GFP_KERNEL);
533 rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT);
534 idr_preload_end();
535 if (unlikely(rc < 0)) {
536 dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc);
537 goto err2;
538 }
539
540 spin_lock_init(&ctx->slock);
541 init_waitqueue_head(&ctx->wq);
542 mutex_init(&ctx->state_mutex);
543
544 ctx->state = OPENED;
545 ctx->pe = rc;
546 ctx->master = false;
547 ctx->mapping = NULL;
548 ctx->hw_afu = afu;
549 ctx->irq_bitmap = 0;
550 ctx->pending_irq = false;
551 ctx->pending_fault = false;
552out:
553 return ctx;
554err2:
555 kfree(ctx);
556err1:
557 ctx = ERR_PTR(rc);
558 goto out;
559}
560
561/**
562 * ocxlflash_release_context() - releases an adapter context
563 * @ctx_cookie: Adapter context to be released.
564 *
565 * Return: 0 on success, -errno on failure
566 */
567static int ocxlflash_release_context(void *ctx_cookie)
568{
569 struct ocxlflash_context *ctx = ctx_cookie;
570 struct device *dev;
571 int rc = 0;
572
573 if (!ctx)
574 goto out;
575
576 dev = ctx->hw_afu->dev;
577 mutex_lock(&ctx->state_mutex);
578 if (ctx->state >= STARTED) {
579 dev_err(dev, "%s: Context in use, state=%d\n", __func__,
580 ctx->state);
581 mutex_unlock(&ctx->state_mutex);
582 rc = -EBUSY;
583 goto out;
584 }
585 mutex_unlock(&ctx->state_mutex);
586
587 idr_remove(&ctx->hw_afu->idr, ctx->pe);
588 ocxlflash_release_mapping(ctx);
589 kfree(ctx);
590out:
591 return rc;
592}
593
594/**
595 * ocxlflash_perst_reloads_same_image() - sets the image reload policy
596 * @afu_cookie: Hardware AFU associated with the host.
597 * @image: Whether to load the same image on PERST.
598 */
599static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
600{
601 struct ocxl_hw_afu *afu = afu_cookie;
602
603 afu->perst_same_image = image;
604}
605
606/**
607 * ocxlflash_read_adapter_vpd() - reads the adapter VPD
608 * @pdev: PCI device associated with the host.
609 * @buf: Buffer to get the VPD data.
610 * @count: Size of buffer (maximum bytes that can be read).
611 *
612 * Return: size of VPD on success, -errno on failure
613 */
614static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf,
615 size_t count)
616{
617 return pci_read_vpd(pdev, 0, count, buf);
618}
619
620/**
621 * free_afu_irqs() - internal service to free interrupts
622 * @ctx: Adapter context.
623 */
624static void free_afu_irqs(struct ocxlflash_context *ctx)
625{
626 struct ocxl_hw_afu *afu = ctx->hw_afu;
627 struct device *dev = afu->dev;
628 int i;
629
630 if (!ctx->irqs) {
631 dev_err(dev, "%s: Interrupts not allocated\n", __func__);
632 return;
633 }
634
635 for (i = ctx->num_irqs; i >= 0; i--)
636 ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq);
637
638 kfree(ctx->irqs);
639 ctx->irqs = NULL;
640}
641
642/**
643 * alloc_afu_irqs() - internal service to allocate interrupts
644 * @ctx: Context associated with the request.
645 * @num: Number of interrupts requested.
646 *
647 * Return: 0 on success, -errno on failure
648 */
649static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
650{
651 struct ocxl_hw_afu *afu = ctx->hw_afu;
652 struct device *dev = afu->dev;
653 struct ocxlflash_irqs *irqs;
654 u64 addr;
655 int rc = 0;
656 int hwirq;
657 int i;
658
659 if (ctx->irqs) {
660 dev_err(dev, "%s: Interrupts already allocated\n", __func__);
661 rc = -EEXIST;
662 goto out;
663 }
664
665 if (num > OCXL_MAX_IRQS) {
666 dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num);
667 rc = -EINVAL;
668 goto out;
669 }
670
671 irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL);
672 if (unlikely(!irqs)) {
673 dev_err(dev, "%s: Context irqs allocation failed\n", __func__);
674 rc = -ENOMEM;
675 goto out;
676 }
677
678 for (i = 0; i < num; i++) {
679 rc = ocxl_link_irq_alloc(afu->link_token, &hwirq, &addr);
680 if (unlikely(rc)) {
681 dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n",
682 __func__, rc);
683 goto err;
684 }
685
686 irqs[i].hwirq = hwirq;
687 irqs[i].ptrig = addr;
688 }
689
690 ctx->irqs = irqs;
691 ctx->num_irqs = num;
692out:
693 return rc;
694err:
695 for (i = i-1; i >= 0; i--)
696 ocxl_link_free_irq(afu->link_token, irqs[i].hwirq);
697 kfree(irqs);
698 goto out;
699}
700
701/**
702 * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts
703 * @ctx_cookie: Context associated with the request.
704 * @num: Number of interrupts requested.
705 *
706 * Return: 0 on success, -errno on failure
707 */
708static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
709{
710 return alloc_afu_irqs(ctx_cookie, num);
711}
712
713/**
714 * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context
715 * @ctx_cookie: Adapter context.
716 */
717static void ocxlflash_free_afu_irqs(void *ctx_cookie)
718{
719 free_afu_irqs(ctx_cookie);
720}
721
722/**
723 * ocxlflash_unconfig_afu() - unconfigure the AFU
724 * @afu: AFU associated with the host.
725 */
726static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu)
727{
728 if (afu->gmmio_virt) {
729 iounmap(afu->gmmio_virt);
730 afu->gmmio_virt = NULL;
731 }
732}
733
734/**
735 * ocxlflash_destroy_afu() - destroy the AFU structure
736 * @afu_cookie: AFU to be freed.
737 */
738static void ocxlflash_destroy_afu(void *afu_cookie)
739{
740 struct ocxl_hw_afu *afu = afu_cookie;
741 int pos;
742
743 if (!afu)
744 return;
745
746 ocxlflash_release_context(afu->ocxl_ctx);
747 idr_destroy(&afu->idr);
748
749 /* Disable the AFU */
750 pos = afu->acfg.dvsec_afu_control_pos;
751 ocxl_config_set_afu_state(afu->pdev, pos, 0);
752
753 ocxlflash_unconfig_afu(afu);
754 kfree(afu);
755}
756
757/**
758 * ocxlflash_config_fn() - configure the host function
759 * @pdev: PCI device associated with the host.
760 * @afu: AFU associated with the host.
761 *
762 * Return: 0 on success, -errno on failure
763 */
764static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
765{
766 struct ocxl_fn_config *fcfg = &afu->fcfg;
767 struct device *dev = &pdev->dev;
768 u16 base, enabled, supported;
769 int rc = 0;
770
771 /* Read DVSEC config of the function */
772 rc = ocxl_config_read_function(pdev, fcfg);
773 if (unlikely(rc)) {
774 dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n",
775 __func__, rc);
776 goto out;
777 }
778
779 /* Check if function has AFUs defined, only 1 per function supported */
780 if (fcfg->max_afu_index >= 0) {
781 afu->is_present = true;
782 if (fcfg->max_afu_index != 0)
783 dev_warn(dev, "%s: Unexpected AFU index value %d\n",
784 __func__, fcfg->max_afu_index);
785 }
786
787 rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported);
788 if (unlikely(rc)) {
789 dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n",
790 __func__, rc);
791 goto out;
792 }
793
794 afu->fn_actag_base = base;
795 afu->fn_actag_enabled = enabled;
796
797 ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled);
798 dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n",
799 __func__, base, enabled);
800
801 rc = ocxl_link_setup(pdev, 0, &afu->link_token);
802 if (unlikely(rc)) {
803 dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n",
804 __func__, rc);
805 goto out;
806 }
807
808 rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos);
809 if (unlikely(rc)) {
810 dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n",
811 __func__, rc);
812 goto err;
813 }
814out:
815 return rc;
816err:
817 ocxl_link_release(pdev, afu->link_token);
818 goto out;
819}
820
821/**
822 * ocxlflash_unconfig_fn() - unconfigure the host function
823 * @pdev: PCI device associated with the host.
824 * @afu: AFU associated with the host.
825 */
826static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
827{
828 ocxl_link_release(pdev, afu->link_token);
829}
830
831/**
832 * ocxlflash_map_mmio() - map the AFU MMIO space
833 * @afu: AFU associated with the host.
834 *
835 * Return: 0 on success, -errno on failure
836 */
837static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu)
838{
839 struct ocxl_afu_config *acfg = &afu->acfg;
840 struct pci_dev *pdev = afu->pdev;
841 struct device *dev = afu->dev;
842 phys_addr_t gmmio, ppmmio;
843 int rc = 0;
844
845 rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash");
846 if (unlikely(rc)) {
847 dev_err(dev, "%s: pci_request_region for global failed rc=%d\n",
848 __func__, rc);
849 goto out;
850 }
851 gmmio = pci_resource_start(pdev, acfg->global_mmio_bar);
852 gmmio += acfg->global_mmio_offset;
853
854 rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash");
855 if (unlikely(rc)) {
856 dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n",
857 __func__, rc);
858 goto err1;
859 }
860 ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar);
861 ppmmio += acfg->pp_mmio_offset;
862
863 afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size);
864 if (unlikely(!afu->gmmio_virt)) {
865 dev_err(dev, "%s: MMIO mapping failed\n", __func__);
866 rc = -ENOMEM;
867 goto err2;
868 }
869
870 afu->gmmio_phys = gmmio;
871 afu->ppmmio_phys = ppmmio;
872out:
873 return rc;
874err2:
875 pci_release_region(pdev, acfg->pp_mmio_bar);
876err1:
877 pci_release_region(pdev, acfg->global_mmio_bar);
878 goto out;
879}
880
881/**
882 * ocxlflash_config_afu() - configure the host AFU
883 * @pdev: PCI device associated with the host.
884 * @afu: AFU associated with the host.
885 *
886 * Must be called _after_ host function configuration.
887 *
888 * Return: 0 on success, -errno on failure
889 */
890static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
891{
892 struct ocxl_afu_config *acfg = &afu->acfg;
893 struct ocxl_fn_config *fcfg = &afu->fcfg;
894 struct device *dev = &pdev->dev;
895 int count;
896 int base;
897 int pos;
898 int rc = 0;
899
900 /* This HW AFU function does not have any AFUs defined */
901 if (!afu->is_present)
902 goto out;
903
904 /* Read AFU config at index 0 */
905 rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0);
906 if (unlikely(rc)) {
907 dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n",
908 __func__, rc);
909 goto out;
910 }
911
912 /* Only one AFU per function is supported, so actag_base is same */
913 base = afu->fn_actag_base;
914 count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled);
915 pos = acfg->dvsec_afu_control_pos;
916
917 ocxl_config_set_afu_actag(pdev, pos, base, count);
918 dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count);
919 afu->afu_actag_base = base;
920 afu->afu_actag_enabled = count;
921 afu->max_pasid = 1 << acfg->pasid_supported_log;
922
923 ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log);
924
925 rc = ocxlflash_map_mmio(afu);
926 if (unlikely(rc)) {
927 dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n",
928 __func__, rc);
929 goto out;
930 }
931
932 /* Enable the AFU */
933 ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1);
934out:
935 return rc;
936}
937
938/**
939 * ocxlflash_create_afu() - create the AFU for OCXL
940 * @pdev: PCI device associated with the host.
941 *
942 * Return: AFU on success, NULL on failure
943 */
944static void *ocxlflash_create_afu(struct pci_dev *pdev)
945{
946 struct device *dev = &pdev->dev;
947 struct ocxlflash_context *ctx;
948 struct ocxl_hw_afu *afu;
949 int rc;
950
951 afu = kzalloc(sizeof(*afu), GFP_KERNEL);
952 if (unlikely(!afu)) {
953 dev_err(dev, "%s: HW AFU allocation failed\n", __func__);
954 goto out;
955 }
956
957 afu->pdev = pdev;
958 afu->dev = dev;
959 idr_init(&afu->idr);
960
961 rc = ocxlflash_config_fn(pdev, afu);
962 if (unlikely(rc)) {
963 dev_err(dev, "%s: Function configuration failed rc=%d\n",
964 __func__, rc);
965 goto err1;
966 }
967
968 rc = ocxlflash_config_afu(pdev, afu);
969 if (unlikely(rc)) {
970 dev_err(dev, "%s: AFU configuration failed rc=%d\n",
971 __func__, rc);
972 goto err2;
973 }
974
975 ctx = ocxlflash_dev_context_init(pdev, afu);
976 if (IS_ERR(ctx)) {
977 rc = PTR_ERR(ctx);
978 dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n",
979 __func__, rc);
980 goto err3;
981 }
982
983 afu->ocxl_ctx = ctx;
984out:
985 return afu;
986err3:
987 ocxlflash_unconfig_afu(afu);
988err2:
989 ocxlflash_unconfig_fn(pdev, afu);
990err1:
991 idr_destroy(&afu->idr);
992 kfree(afu);
993 afu = NULL;
994 goto out;
995}
996
997/**
998 * ctx_event_pending() - check for any event pending on the context
999 * @ctx: Context to be checked.
1000 *
1001 * Return: true if there is an event pending, false if none pending
1002 */
1003static inline bool ctx_event_pending(struct ocxlflash_context *ctx)
1004{
1005 if (ctx->pending_irq || ctx->pending_fault)
1006 return true;
1007
1008 return false;
1009}
1010
1011/**
1012 * afu_poll() - poll the AFU for events on the context
1013 * @file: File associated with the adapter context.
1014 * @poll: Poll structure from the user.
1015 *
1016 * Return: poll mask
1017 */
1018static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
1019{
1020 struct ocxlflash_context *ctx = file->private_data;
1021 struct device *dev = ctx->hw_afu->dev;
1022 ulong lock_flags;
1023 int mask = 0;
1024
1025 poll_wait(file, &ctx->wq, poll);
1026
1027 spin_lock_irqsave(&ctx->slock, lock_flags);
1028 if (ctx_event_pending(ctx))
1029 mask |= POLLIN | POLLRDNORM;
1030 else if (ctx->state == CLOSED)
1031 mask |= POLLERR;
1032 spin_unlock_irqrestore(&ctx->slock, lock_flags);
1033
1034 dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n",
1035 __func__, ctx->pe, mask);
1036
1037 return mask;
1038}
1039
1040/**
1041 * afu_read() - perform a read on the context for any event
1042 * @file: File associated with the adapter context.
1043 * @buf: Buffer to receive the data.
1044 * @count: Size of buffer (maximum bytes that can be read).
1045 * @off: Offset.
1046 *
1047 * Return: size of the data read on success, -errno on failure
1048 */
1049static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
1050 loff_t *off)
1051{
1052 struct ocxlflash_context *ctx = file->private_data;
1053 struct device *dev = ctx->hw_afu->dev;
1054 struct cxl_event event;
1055 ulong lock_flags;
1056 ssize_t esize;
1057 ssize_t rc;
1058 int bit;
1059 DEFINE_WAIT(event_wait);
1060
1061 if (*off != 0) {
1062 dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n",
1063 __func__, *off);
1064 rc = -EINVAL;
1065 goto out;
1066 }
1067
1068 spin_lock_irqsave(&ctx->slock, lock_flags);
1069
1070 for (;;) {
1071 prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE);
1072
1073 if (ctx_event_pending(ctx) || (ctx->state == CLOSED))
1074 break;
1075
1076 if (file->f_flags & O_NONBLOCK) {
1077 dev_err(dev, "%s: File cannot be blocked on I/O\n",
1078 __func__);
1079 rc = -EAGAIN;
1080 goto err;
1081 }
1082
1083 if (signal_pending(current)) {
1084 dev_err(dev, "%s: Signal pending on the process\n",
1085 __func__);
1086 rc = -ERESTARTSYS;
1087 goto err;
1088 }
1089
1090 spin_unlock_irqrestore(&ctx->slock, lock_flags);
1091 schedule();
1092 spin_lock_irqsave(&ctx->slock, lock_flags);
1093 }
1094
1095 finish_wait(&ctx->wq, &event_wait);
1096
1097 memset(&event, 0, sizeof(event));
1098 event.header.process_element = ctx->pe;
1099 event.header.size = sizeof(struct cxl_event_header);
1100 if (ctx->pending_irq) {
1101 esize = sizeof(struct cxl_event_afu_interrupt);
1102 event.header.size += esize;
1103 event.header.type = CXL_EVENT_AFU_INTERRUPT;
1104
1105 bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs);
1106 clear_bit(bit, &ctx->irq_bitmap);
1107 event.irq.irq = bit + 1;
1108 if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs))
1109 ctx->pending_irq = false;
1110 } else if (ctx->pending_fault) {
1111 event.header.size += sizeof(struct cxl_event_data_storage);
1112 event.header.type = CXL_EVENT_DATA_STORAGE;
1113 event.fault.addr = ctx->fault_addr;
1114 event.fault.dsisr = ctx->fault_dsisr;
1115 ctx->pending_fault = false;
1116 }
1117
1118 spin_unlock_irqrestore(&ctx->slock, lock_flags);
1119
1120 if (copy_to_user(buf, &event, event.header.size)) {
1121 dev_err(dev, "%s: copy_to_user failed\n", __func__);
1122 rc = -EFAULT;
1123 goto out;
1124 }
1125
1126 rc = event.header.size;
1127out:
1128 return rc;
1129err:
1130 finish_wait(&ctx->wq, &event_wait);
1131 spin_unlock_irqrestore(&ctx->slock, lock_flags);
1132 goto out;
1133}
1134
1135/**
1136 * afu_release() - release and free the context
1137 * @inode: File inode pointer.
1138 * @file: File associated with the context.
1139 *
1140 * Return: 0 on success, -errno on failure
1141 */
1142static int afu_release(struct inode *inode, struct file *file)
1143{
1144 struct ocxlflash_context *ctx = file->private_data;
1145 int i;
1146
1147 /* Unmap and free the interrupts associated with the context */
1148 for (i = ctx->num_irqs; i >= 0; i--)
1149 afu_unmap_irq(0, ctx, i, ctx);
1150 free_afu_irqs(ctx);
1151
1152 return ocxlflash_release_context(ctx);
1153}
1154
1155/**
1156 * ocxlflash_mmap_fault() - mmap fault handler
1157 * @vmf: VM fault associated with current fault.
1158 *
1159 * Return: 0 on success, -errno on failure
1160 */
1161static int ocxlflash_mmap_fault(struct vm_fault *vmf)
1162{
1163 struct vm_area_struct *vma = vmf->vma;
1164 struct ocxlflash_context *ctx = vma->vm_file->private_data;
1165 struct device *dev = ctx->hw_afu->dev;
1166 u64 mmio_area, offset;
1167
1168 offset = vmf->pgoff << PAGE_SHIFT;
1169 if (offset >= ctx->psn_size)
1170 return VM_FAULT_SIGBUS;
1171
1172 mutex_lock(&ctx->state_mutex);
1173 if (ctx->state != STARTED) {
1174 dev_err(dev, "%s: Context not started, state=%d\n",
1175 __func__, ctx->state);
1176 mutex_unlock(&ctx->state_mutex);
1177 return VM_FAULT_SIGBUS;
1178 }
1179 mutex_unlock(&ctx->state_mutex);
1180
1181 mmio_area = ctx->psn_phys;
1182 mmio_area += offset;
1183
1184 vm_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
1185 return VM_FAULT_NOPAGE;
1186}
1187
1188static const struct vm_operations_struct ocxlflash_vmops = {
1189 .fault = ocxlflash_mmap_fault,
1190};
1191
1192/**
1193 * afu_mmap() - map the fault handler operations
1194 * @file: File associated with the context.
1195 * @vma: VM area associated with mapping.
1196 *
1197 * Return: 0 on success, -errno on failure
1198 */
1199static int afu_mmap(struct file *file, struct vm_area_struct *vma)
1200{
1201 struct ocxlflash_context *ctx = file->private_data;
1202
1203 if ((vma_pages(vma) + vma->vm_pgoff) >
1204 (ctx->psn_size >> PAGE_SHIFT))
1205 return -EINVAL;
1206
1207 vma->vm_flags |= VM_IO | VM_PFNMAP;
1208 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1209 vma->vm_ops = &ocxlflash_vmops;
1210 return 0;
1211}
1212
1213static const struct file_operations ocxl_afu_fops = {
1214 .owner = THIS_MODULE,
1215 .poll = afu_poll,
1216 .read = afu_read,
1217 .release = afu_release,
1218 .mmap = afu_mmap,
1219};
1220
1221#define PATCH_FOPS(NAME) \
1222 do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0)
1223
1224/**
1225 * ocxlflash_get_fd() - get file descriptor for an adapter context
1226 * @ctx_cookie: Adapter context.
1227 * @fops: File operations to be associated.
1228 * @fd: File descriptor to be returned back.
1229 *
1230 * Return: pointer to the file on success, ERR_PTR on failure
1231 */
1232static struct file *ocxlflash_get_fd(void *ctx_cookie,
1233 struct file_operations *fops, int *fd)
1234{
1235 struct ocxlflash_context *ctx = ctx_cookie;
1236 struct device *dev = ctx->hw_afu->dev;
1237 struct file *file;
1238 int flags, fdtmp;
1239 int rc = 0;
1240 char *name = NULL;
1241
1242 /* Only allow one fd per context */
1243 if (ctx->mapping) {
1244 dev_err(dev, "%s: Context is already mapped to an fd\n",
1245 __func__);
1246 rc = -EEXIST;
1247 goto err1;
1248 }
1249
1250 flags = O_RDWR | O_CLOEXEC;
1251
1252 /* This code is similar to anon_inode_getfd() */
1253 rc = get_unused_fd_flags(flags);
1254 if (unlikely(rc < 0)) {
1255 dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n",
1256 __func__, rc);
1257 goto err1;
1258 }
1259 fdtmp = rc;
1260
1261 /* Patch the file ops that are not defined */
1262 if (fops) {
1263 PATCH_FOPS(poll);
1264 PATCH_FOPS(read);
1265 PATCH_FOPS(release);
1266 PATCH_FOPS(mmap);
1267 } else /* Use default ops */
1268 fops = (struct file_operations *)&ocxl_afu_fops;
1269
1270 name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe);
1271 file = ocxlflash_getfile(dev, name, fops, ctx, flags);
1272 kfree(name);
1273 if (IS_ERR(file)) {
1274 rc = PTR_ERR(file);
1275 dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n",
1276 __func__, rc);
1277 goto err2;
1278 }
1279
1280 ctx->mapping = file->f_mapping;
1281 *fd = fdtmp;
1282out:
1283 return file;
1284err2:
1285 put_unused_fd(fdtmp);
1286err1:
1287 file = ERR_PTR(rc);
1288 goto out;
1289}
1290
1291/**
1292 * ocxlflash_fops_get_context() - get the context associated with the file
1293 * @file: File associated with the adapter context.
1294 *
1295 * Return: pointer to the context
1296 */
1297static void *ocxlflash_fops_get_context(struct file *file)
1298{
1299 return file->private_data;
1300}
1301
1302/**
1303 * ocxlflash_afu_irq() - interrupt handler for user contexts
1304 * @irq: Interrupt number.
1305 * @data: Private data provided at interrupt registration, the context.
1306 *
1307 * Return: Always return IRQ_HANDLED.
1308 */
1309static irqreturn_t ocxlflash_afu_irq(int irq, void *data)
1310{
1311 struct ocxlflash_context *ctx = data;
1312 struct device *dev = ctx->hw_afu->dev;
1313 int i;
1314
1315 dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n",
1316 __func__, ctx->pe, irq);
1317
1318 for (i = 0; i < ctx->num_irqs; i++) {
1319 if (ctx->irqs[i].virq == irq)
1320 break;
1321 }
1322 if (unlikely(i >= ctx->num_irqs)) {
1323 dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__);
1324 goto out;
1325 }
1326
1327 spin_lock(&ctx->slock);
1328 set_bit(i - 1, &ctx->irq_bitmap);
1329 ctx->pending_irq = true;
1330 spin_unlock(&ctx->slock);
1331
1332 wake_up_all(&ctx->wq);
1333out:
1334 return IRQ_HANDLED;
1335}
1336
1337/**
1338 * ocxlflash_start_work() - start a user context
1339 * @ctx_cookie: Context to be started.
1340 * @num_irqs: Number of interrupts requested.
1341 *
1342 * Return: 0 on success, -errno on failure
1343 */
1344static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs)
1345{
1346 struct ocxlflash_context *ctx = ctx_cookie;
1347 struct ocxl_hw_afu *afu = ctx->hw_afu;
1348 struct device *dev = afu->dev;
1349 char *name;
1350 int rc = 0;
1351 int i;
1352
1353 rc = alloc_afu_irqs(ctx, num_irqs);
1354 if (unlikely(rc < 0)) {
1355 dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc);
1356 goto out;
1357 }
1358
1359 for (i = 0; i < num_irqs; i++) {
1360 name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i",
1361 dev_name(dev), ctx->pe, i);
1362 rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name);
1363 kfree(name);
1364 if (unlikely(rc < 0)) {
1365 dev_err(dev, "%s: afu_map_irq failed rc=%d\n",
1366 __func__, rc);
1367 goto err;
1368 }
1369 }
1370
1371 rc = start_context(ctx);
1372 if (unlikely(rc)) {
1373 dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc);
1374 goto err;
1375 }
1376out:
1377 return rc;
1378err:
1379 for (i = i-1; i >= 0; i--)
1380 afu_unmap_irq(0, ctx, i, ctx);
1381 free_afu_irqs(ctx);
1382 goto out;
1383};
1384
1385/**
1386 * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor
1387 * @file: File installed with adapter file descriptor.
1388 * @vma: VM area associated with mapping.
1389 *
1390 * Return: 0 on success, -errno on failure
1391 */
1392static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma)
1393{
1394 return afu_mmap(file, vma);
1395}
1396
1397/**
1398 * ocxlflash_fd_release() - release the context associated with the file
1399 * @inode: File inode pointer.
1400 * @file: File associated with the adapter context.
1401 *
1402 * Return: 0 on success, -errno on failure
1403 */
1404static int ocxlflash_fd_release(struct inode *inode, struct file *file)
1405{
1406 return afu_release(inode, file);
1407}
1408
1409/* Backend ops to ocxlflash services */
1410const struct cxlflash_backend_ops cxlflash_ocxl_ops = {
1411 .module = THIS_MODULE,
1412 .psa_map = ocxlflash_psa_map,
1413 .psa_unmap = ocxlflash_psa_unmap,
1414 .process_element = ocxlflash_process_element,
1415 .map_afu_irq = ocxlflash_map_afu_irq,
1416 .unmap_afu_irq = ocxlflash_unmap_afu_irq,
1417 .get_irq_objhndl = ocxlflash_get_irq_objhndl,
1418 .start_context = ocxlflash_start_context,
1419 .stop_context = ocxlflash_stop_context,
1420 .afu_reset = ocxlflash_afu_reset,
1421 .set_master = ocxlflash_set_master,
1422 .get_context = ocxlflash_get_context,
1423 .dev_context_init = ocxlflash_dev_context_init,
1424 .release_context = ocxlflash_release_context,
1425 .perst_reloads_same_image = ocxlflash_perst_reloads_same_image,
1426 .read_adapter_vpd = ocxlflash_read_adapter_vpd,
1427 .allocate_afu_irqs = ocxlflash_allocate_afu_irqs,
1428 .free_afu_irqs = ocxlflash_free_afu_irqs,
1429 .create_afu = ocxlflash_create_afu,
1430 .destroy_afu = ocxlflash_destroy_afu,
1431 .get_fd = ocxlflash_get_fd,
1432 .fops_get_context = ocxlflash_fops_get_context,
1433 .start_work = ocxlflash_start_work,
1434 .fd_mmap = ocxlflash_fd_mmap,
1435 .fd_release = ocxlflash_fd_release,
1436};
diff --git a/drivers/scsi/cxlflash/ocxl_hw.h b/drivers/scsi/cxlflash/ocxl_hw.h
new file mode 100644
index 000000000000..9270d35c4620
--- /dev/null
+++ b/drivers/scsi/cxlflash/ocxl_hw.h
@@ -0,0 +1,77 @@
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
5 * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2018 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#define OCXL_MAX_IRQS 4 /* Max interrupts per process */
16
17struct ocxlflash_irqs {
18 int hwirq;
19 u32 virq;
20 u64 ptrig;
21 void __iomem *vtrig;
22};
23
24/* OCXL hardware AFU associated with the host */
25struct ocxl_hw_afu {
26 struct ocxlflash_context *ocxl_ctx; /* Host context */
27 struct pci_dev *pdev; /* PCI device */
28 struct device *dev; /* Generic device */
29 bool perst_same_image; /* Same image loaded on perst */
30
31 struct ocxl_fn_config fcfg; /* DVSEC config of the function */
32 struct ocxl_afu_config acfg; /* AFU configuration data */
33
34 int fn_actag_base; /* Function acTag base */
35 int fn_actag_enabled; /* Function acTag number enabled */
36 int afu_actag_base; /* AFU acTag base */
37 int afu_actag_enabled; /* AFU acTag number enabled */
38
39 phys_addr_t ppmmio_phys; /* Per process MMIO space */
40 phys_addr_t gmmio_phys; /* Global AFU MMIO space */
41 void __iomem *gmmio_virt; /* Global MMIO map */
42
43 void *link_token; /* Link token for the SPA */
44 struct idr idr; /* IDR to manage contexts */
45 int max_pasid; /* Maximum number of contexts */
46 bool is_present; /* Function has AFUs defined */
47};
48
49enum ocxlflash_ctx_state {
50 CLOSED,
51 OPENED,
52 STARTED
53};
54
55struct ocxlflash_context {
56 struct ocxl_hw_afu *hw_afu; /* HW AFU back pointer */
57 struct address_space *mapping; /* Mapping for pseudo filesystem */
58 bool master; /* Whether this is a master context */
59 int pe; /* Process element */
60
61 phys_addr_t psn_phys; /* Process mapping */
62 u64 psn_size; /* Process mapping size */
63
64 spinlock_t slock; /* Protects irq/fault/event updates */
65 wait_queue_head_t wq; /* Wait queue for poll and interrupts */
66 struct mutex state_mutex; /* Mutex to update context state */
67 enum ocxlflash_ctx_state state; /* Context state */
68
69 struct ocxlflash_irqs *irqs; /* Pointer to array of structures */
70 int num_irqs; /* Number of interrupts */
71 bool pending_irq; /* Pending interrupt on the context */
72 ulong irq_bitmap; /* Bits indicating pending irq num */
73
74 u64 fault_addr; /* Address that triggered the fault */
75 u64 fault_dsisr; /* Value of dsisr register at fault */
76 bool pending_fault; /* Pending translation fault */
77};
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index bedf1ce2f33c..874abce35ab4 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -258,23 +258,30 @@ struct sisl_host_map {
258 * exit since there is no way to tell which 258 * exit since there is no way to tell which
259 * command caused the error. 259 * command caused the error.
260 */ 260 */
261#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ULL /* b59, user error */ 261#define SISL_ISTATUS_PERM_ERR_LISN_3_EA 0x0400ULL /* b53, user error */
262#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ULL /* b60, user error */ 262#define SISL_ISTATUS_PERM_ERR_LISN_2_EA 0x0200ULL /* b54, user error */
263#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ULL /* b61, user error */ 263#define SISL_ISTATUS_PERM_ERR_LISN_1_EA 0x0100ULL /* b55, user error */
264#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ULL /* b62, user error */ 264#define SISL_ISTATUS_PERM_ERR_LISN_3_PASID 0x0080ULL /* b56, user error */
265#define SISL_ISTATUS_PERM_ERR_LISN_2_PASID 0x0040ULL /* b57, user error */
266#define SISL_ISTATUS_PERM_ERR_LISN_1_PASID 0x0020ULL /* b58, user error */
267#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ULL /* b59, user error */
268#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ULL /* b60, user error */
269#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ULL /* b61, user error */
270#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ULL /* b62, user error */
265 /* Page in wait accessing RCB/IOASA/RRQ is reported in b63. 271 /* Page in wait accessing RCB/IOASA/RRQ is reported in b63.
266 * Same error in data/LXT/RHT access is reported via IOASA. 272 * Same error in data/LXT/RHT access is reported via IOASA.
267 */ 273 */
268#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ULL /* b63, can be generated 274#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ULL /* b63, can only be
269 * only when AFU auto 275 * generated when AFU
270 * retry is disabled. 276 * auto retry is
271 * If user can determine 277 * disabled. If user
272 * the command that 278 * can determine the
273 * caused the error, it 279 * command that caused
274 * can be retried. 280 * the error, it can
275 */ 281 * be retried.
276#define SISL_ISTATUS_UNMASK (0x001FULL) /* 1 means unmasked */ 282 */
277#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */ 283#define SISL_ISTATUS_UNMASK (0x07FFULL) /* 1 means unmasked */
284#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */
278 285
279 __be64 intr_clear; 286 __be64 intr_clear;
280 __be64 intr_mask; 287 __be64 intr_mask;
@@ -284,6 +291,7 @@ struct sisl_host_map {
284 __be64 cmd_room; 291 __be64 cmd_room;
285 __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */ 292 __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */
286#define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */ 293#define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */
294#define SISL_CTX_CTRL_LISN_MASK (0xFFULL)
287 __be64 mbox_w; /* restricted use */ 295 __be64 mbox_w; /* restricted use */
288 __be64 sq_start; /* Submission Queue (R/W): write sequence and */ 296 __be64 sq_start; /* Submission Queue (R/W): write sequence and */
289 __be64 sq_end; /* inclusion semantics are the same as RRQ */ 297 __be64 sq_end; /* inclusion semantics are the same as RRQ */
@@ -309,6 +317,10 @@ struct sisl_ctrl_map {
309#define SISL_CTX_CAP_WRITE_CMD 0x0000000000000002ULL /* afu_rc 0x21 */ 317#define SISL_CTX_CAP_WRITE_CMD 0x0000000000000002ULL /* afu_rc 0x21 */
310#define SISL_CTX_CAP_READ_CMD 0x0000000000000001ULL /* afu_rc 0x21 */ 318#define SISL_CTX_CAP_READ_CMD 0x0000000000000001ULL /* afu_rc 0x21 */
311 __be64 mbox_r; 319 __be64 mbox_r;
320 __be64 lisn_pasid[2];
321 /* pasid _a arg must be ULL */
322#define SISL_LISN_PASID(_a, _b) (((_a) << 32) | (_b))
323 __be64 lisn_ea[3];
312}; 324};
313 325
314/* single copy global regs */ 326/* single copy global regs */
@@ -415,6 +427,7 @@ struct sisl_global_regs {
415#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL 427#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL
416#define SISL_INTVER_CAP_LUN_PROVISION 0x080000000000ULL 428#define SISL_INTVER_CAP_LUN_PROVISION 0x080000000000ULL
417#define SISL_INTVER_CAP_AFU_DEBUG 0x040000000000ULL 429#define SISL_INTVER_CAP_AFU_DEBUG 0x040000000000ULL
430#define SISL_INTVER_CAP_OCXL_LISN 0x020000000000ULL
418}; 431};
419 432
420#define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */ 433#define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 2fe79df5c73c..e489d89cbb45 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -14,8 +14,9 @@
14 14
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/file.h> 16#include <linux/file.h>
17#include <linux/interrupt.h>
18#include <linux/pci.h>
17#include <linux/syscalls.h> 19#include <linux/syscalls.h>
18#include <misc/cxl.h>
19#include <asm/unaligned.h> 20#include <asm/unaligned.h>
20 21
21#include <scsi/scsi.h> 22#include <scsi/scsi.h>
@@ -269,6 +270,7 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
269 int rc = 0; 270 int rc = 0;
270 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); 271 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
271 u64 val; 272 u64 val;
273 int i;
272 274
273 /* Unlock cap and restrict user to read/write cmds in translated mode */ 275 /* Unlock cap and restrict user to read/write cmds in translated mode */
274 readq_be(&ctrl_map->mbox_r); 276 readq_be(&ctrl_map->mbox_r);
@@ -282,6 +284,19 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
282 goto out; 284 goto out;
283 } 285 }
284 286
287 if (afu_is_ocxl_lisn(afu)) {
288 /* Set up the LISN effective address for each interrupt */
289 for (i = 0; i < ctxi->irqs; i++) {
290 val = cfg->ops->get_irq_objhndl(ctxi->ctx, i);
291 writeq_be(val, &ctrl_map->lisn_ea[i]);
292 }
293
294 /* Use primary HWQ PASID as identifier for all interrupts */
295 val = hwq->ctx_hndl;
296 writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]);
297 writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]);
298 }
299
285 /* Set up MMIO registers pointing to the RHT */ 300 /* Set up MMIO registers pointing to the RHT */
286 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start); 301 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
287 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl)); 302 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
@@ -974,6 +989,10 @@ static int cxlflash_disk_detach(struct scsi_device *sdev,
974 * theoretically never occur), every call into this routine results 989 * theoretically never occur), every call into this routine results
975 * in a complete freeing of a context. 990 * in a complete freeing of a context.
976 * 991 *
992 * Detaching the LUN is typically an ioctl() operation and the underlying
993 * code assumes that ioctl_rwsem has been acquired as a reader. To support
994 * that design point, the semaphore is acquired and released around detach.
995 *
977 * Return: 0 on success 996 * Return: 0 on success
978 */ 997 */
979static int cxlflash_cxl_release(struct inode *inode, struct file *file) 998static int cxlflash_cxl_release(struct inode *inode, struct file *file)
@@ -1012,9 +1031,11 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
1012 1031
1013 dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid); 1032 dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
1014 1033
1034 down_read(&cfg->ioctl_rwsem);
1015 detach.context_id = ctxi->ctxid; 1035 detach.context_id = ctxi->ctxid;
1016 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) 1036 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
1017 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach); 1037 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
1038 up_read(&cfg->ioctl_rwsem);
1018out_release: 1039out_release:
1019 cfg->ops->fd_release(inode, file); 1040 cfg->ops->fd_release(inode, file);
1020out: 1041out:
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 5deef57a7834..66e445a17d6c 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -12,8 +12,9 @@
12 * 2 of the License, or (at your option) any later version. 12 * 2 of the License, or (at your option) any later version.
13 */ 13 */
14 14
15#include <linux/interrupt.h>
16#include <linux/pci.h>
15#include <linux/syscalls.h> 17#include <linux/syscalls.h>
16#include <misc/cxl.h>
17#include <asm/unaligned.h> 18#include <asm/unaligned.h>
18#include <asm/bitsperlong.h> 19#include <asm/bitsperlong.h>
19 20
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 5ceea8da7bb6..37de8fb186d7 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1706,7 +1706,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1706 u32 reply_size = 0; 1706 u32 reply_size = 0;
1707 u32 __user *user_msg = arg; 1707 u32 __user *user_msg = arg;
1708 u32 __user * user_reply = NULL; 1708 u32 __user * user_reply = NULL;
1709 void *sg_list[pHba->sg_tablesize]; 1709 void **sg_list = NULL;
1710 u32 sg_offset = 0; 1710 u32 sg_offset = 0;
1711 u32 sg_count = 0; 1711 u32 sg_count = 0;
1712 int sg_index = 0; 1712 int sg_index = 0;
@@ -1748,19 +1748,23 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1748 msg[2] = 0x40000000; // IOCTL context 1748 msg[2] = 0x40000000; // IOCTL context
1749 msg[3] = adpt_ioctl_to_context(pHba, reply); 1749 msg[3] = adpt_ioctl_to_context(pHba, reply);
1750 if (msg[3] == (u32)-1) { 1750 if (msg[3] == (u32)-1) {
1751 kfree(reply); 1751 rcode = -EBUSY;
1752 return -EBUSY; 1752 goto free;
1753 } 1753 }
1754 1754
1755 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize); 1755 sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
1756 if (!sg_list) {
1757 rcode = -ENOMEM;
1758 goto free;
1759 }
1756 if(sg_offset) { 1760 if(sg_offset) {
1757 // TODO add 64 bit API 1761 // TODO add 64 bit API
1758 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset); 1762 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1759 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); 1763 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1760 if (sg_count > pHba->sg_tablesize){ 1764 if (sg_count > pHba->sg_tablesize){
1761 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count); 1765 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1762 kfree (reply); 1766 rcode = -EINVAL;
1763 return -EINVAL; 1767 goto free;
1764 } 1768 }
1765 1769
1766 for(i = 0; i < sg_count; i++) { 1770 for(i = 0; i < sg_count; i++) {
@@ -1879,7 +1883,6 @@ cleanup:
1879 if (rcode != -ETIME && rcode != -EINTR) { 1883 if (rcode != -ETIME && rcode != -EINTR) {
1880 struct sg_simple_element *sg = 1884 struct sg_simple_element *sg =
1881 (struct sg_simple_element*) (msg +sg_offset); 1885 (struct sg_simple_element*) (msg +sg_offset);
1882 kfree (reply);
1883 while(sg_index) { 1886 while(sg_index) {
1884 if(sg_list[--sg_index]) { 1887 if(sg_list[--sg_index]) {
1885 dma_free_coherent(&pHba->pDev->dev, 1888 dma_free_coherent(&pHba->pDev->dev,
@@ -1889,6 +1892,10 @@ cleanup:
1889 } 1892 }
1890 } 1893 }
1891 } 1894 }
1895
1896free:
1897 kfree(sg_list);
1898 kfree(reply);
1892 return rcode; 1899 return rcode;
1893} 1900}
1894 1901
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 9dffcb28c9b7..9db645dde35e 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -1202,8 +1202,6 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
1202 case ESAS2R_INIT_MSG_START: 1202 case ESAS2R_INIT_MSG_START:
1203 case ESAS2R_INIT_MSG_REINIT: 1203 case ESAS2R_INIT_MSG_REINIT:
1204 { 1204 {
1205 struct timeval now;
1206 do_gettimeofday(&now);
1207 esas2r_hdebug("CFG init"); 1205 esas2r_hdebug("CFG init");
1208 esas2r_build_cfg_req(a, 1206 esas2r_build_cfg_req(a,
1209 rq, 1207 rq,
@@ -1212,7 +1210,8 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
1212 NULL); 1210 NULL);
1213 ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init; 1211 ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
1214 ci->sgl_page_size = cpu_to_le32(sgl_page_size); 1212 ci->sgl_page_size = cpu_to_le32(sgl_page_size);
1215 ci->epoch_time = cpu_to_le32(now.tv_sec); 1213 /* firmware interface overflows in y2106 */
1214 ci->epoch_time = cpu_to_le32(ktime_get_real_seconds());
1216 rq->flags |= RF_FAILURE_OK; 1215 rq->flags |= RF_FAILURE_OK;
1217 a->init_msg = ESAS2R_INIT_MSG_INIT; 1216 a->init_msg = ESAS2R_INIT_MSG_INIT;
1218 break; 1217 break;
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index 97623002908f..34bcc8c04ff4 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -1849,7 +1849,7 @@ int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count)
1849 /* allocate a request */ 1849 /* allocate a request */
1850 rq = esas2r_alloc_request(a); 1850 rq = esas2r_alloc_request(a);
1851 if (rq == NULL) { 1851 if (rq == NULL) {
1852 esas2r_debug("esas2r_read_vda: out of requestss"); 1852 esas2r_debug("esas2r_read_vda: out of requests");
1853 return -EBUSY; 1853 return -EBUSY;
1854 } 1854 }
1855 1855
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index e07eac5be087..c07118617d89 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -283,7 +283,7 @@ MODULE_PARM_DESC(num_requests,
283int num_ae_requests = 4; 283int num_ae_requests = 4;
284module_param(num_ae_requests, int, 0); 284module_param(num_ae_requests, int, 0);
285MODULE_PARM_DESC(num_ae_requests, 285MODULE_PARM_DESC(num_ae_requests,
286 "Number of VDA asynchromous event requests. Default 4."); 286 "Number of VDA asynchronous event requests. Default 4.");
287 287
288int cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN; 288int cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN;
289module_param(cmd_per_lun, int, 0); 289module_param(cmd_per_lun, int, 0);
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index d1153e8e846b..7052a5d45f7f 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -136,12 +136,14 @@ struct hisi_sas_phy {
136 struct hisi_sas_port *port; 136 struct hisi_sas_port *port;
137 struct asd_sas_phy sas_phy; 137 struct asd_sas_phy sas_phy;
138 struct sas_identify identify; 138 struct sas_identify identify;
139 struct completion *reset_completion;
140 spinlock_t lock;
139 u64 port_id; /* from hw */ 141 u64 port_id; /* from hw */
140 u64 dev_sas_addr;
141 u64 frame_rcvd_size; 142 u64 frame_rcvd_size;
142 u8 frame_rcvd[32]; 143 u8 frame_rcvd[32];
143 u8 phy_attached; 144 u8 phy_attached;
144 u8 reserved[3]; 145 u8 in_reset;
146 u8 reserved[2];
145 u32 phy_type; 147 u32 phy_type;
146 enum sas_linkrate minimum_linkrate; 148 enum sas_linkrate minimum_linkrate;
147 enum sas_linkrate maximum_linkrate; 149 enum sas_linkrate maximum_linkrate;
@@ -162,7 +164,7 @@ struct hisi_sas_cq {
162 164
163struct hisi_sas_dq { 165struct hisi_sas_dq {
164 struct hisi_hba *hisi_hba; 166 struct hisi_hba *hisi_hba;
165 struct hisi_sas_slot *slot_prep; 167 struct list_head list;
166 spinlock_t lock; 168 spinlock_t lock;
167 int wr_point; 169 int wr_point;
168 int id; 170 int id;
@@ -174,15 +176,22 @@ struct hisi_sas_device {
174 struct completion *completion; 176 struct completion *completion;
175 struct hisi_sas_dq *dq; 177 struct hisi_sas_dq *dq;
176 struct list_head list; 178 struct list_head list;
177 u64 attached_phy;
178 enum sas_device_type dev_type; 179 enum sas_device_type dev_type;
179 int device_id; 180 int device_id;
180 int sata_idx; 181 int sata_idx;
181 u8 dev_status; 182 u8 dev_status;
182}; 183};
183 184
185struct hisi_sas_tmf_task {
186 int force_phy;
187 int phy_id;
188 u8 tmf;
189 u16 tag_of_task_to_be_managed;
190};
191
184struct hisi_sas_slot { 192struct hisi_sas_slot {
185 struct list_head entry; 193 struct list_head entry;
194 struct list_head delivery;
186 struct sas_task *task; 195 struct sas_task *task;
187 struct hisi_sas_port *port; 196 struct hisi_sas_port *port;
188 u64 n_elem; 197 u64 n_elem;
@@ -192,17 +201,15 @@ struct hisi_sas_slot {
192 int cmplt_queue_slot; 201 int cmplt_queue_slot;
193 int idx; 202 int idx;
194 int abort; 203 int abort;
204 int ready;
195 void *buf; 205 void *buf;
196 dma_addr_t buf_dma; 206 dma_addr_t buf_dma;
197 void *cmd_hdr; 207 void *cmd_hdr;
198 dma_addr_t cmd_hdr_dma; 208 dma_addr_t cmd_hdr_dma;
199 struct work_struct abort_slot; 209 struct work_struct abort_slot;
200 struct timer_list internal_abort_timer; 210 struct timer_list internal_abort_timer;
201}; 211 bool is_internal;
202 212 struct hisi_sas_tmf_task *tmf;
203struct hisi_sas_tmf_task {
204 u8 tmf;
205 u16 tag_of_task_to_be_managed;
206}; 213};
207 214
208struct hisi_sas_hw { 215struct hisi_sas_hw {
@@ -215,14 +222,13 @@ struct hisi_sas_hw {
215 void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no); 222 void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
216 int (*get_free_slot)(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq); 223 int (*get_free_slot)(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq);
217 void (*start_delivery)(struct hisi_sas_dq *dq); 224 void (*start_delivery)(struct hisi_sas_dq *dq);
218 int (*prep_ssp)(struct hisi_hba *hisi_hba, 225 void (*prep_ssp)(struct hisi_hba *hisi_hba,
219 struct hisi_sas_slot *slot, int is_tmf, 226 struct hisi_sas_slot *slot);
220 struct hisi_sas_tmf_task *tmf); 227 void (*prep_smp)(struct hisi_hba *hisi_hba,
221 int (*prep_smp)(struct hisi_hba *hisi_hba,
222 struct hisi_sas_slot *slot); 228 struct hisi_sas_slot *slot);
223 int (*prep_stp)(struct hisi_hba *hisi_hba, 229 void (*prep_stp)(struct hisi_hba *hisi_hba,
224 struct hisi_sas_slot *slot); 230 struct hisi_sas_slot *slot);
225 int (*prep_abort)(struct hisi_hba *hisi_hba, 231 void (*prep_abort)(struct hisi_hba *hisi_hba,
226 struct hisi_sas_slot *slot, 232 struct hisi_sas_slot *slot,
227 int device_id, int abort_flag, int tag_to_abort); 233 int device_id, int abort_flag, int tag_to_abort);
228 int (*slot_complete)(struct hisi_hba *hisi_hba, 234 int (*slot_complete)(struct hisi_hba *hisi_hba,
@@ -245,8 +251,11 @@ struct hisi_sas_hw {
245 u32 (*get_phys_state)(struct hisi_hba *hisi_hba); 251 u32 (*get_phys_state)(struct hisi_hba *hisi_hba);
246 int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type, 252 int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type,
247 u8 reg_index, u8 reg_count, u8 *write_data); 253 u8 reg_index, u8 reg_count, u8 *write_data);
254 void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
255 int delay_ms, int timeout_ms);
248 int max_command_entries; 256 int max_command_entries;
249 int complete_hdr_size; 257 int complete_hdr_size;
258 struct scsi_host_template *sht;
250}; 259};
251 260
252struct hisi_hba { 261struct hisi_hba {
@@ -273,6 +282,8 @@ struct hisi_hba {
273 struct workqueue_struct *wq; 282 struct workqueue_struct *wq;
274 283
275 int slot_index_count; 284 int slot_index_count;
285 int last_slot_index;
286 int last_dev_id;
276 unsigned long *slot_index_tags; 287 unsigned long *slot_index_tags;
277 unsigned long reject_stp_links_msk; 288 unsigned long reject_stp_links_msk;
278 289
@@ -411,7 +422,7 @@ struct hisi_sas_command_table_ssp {
411 union { 422 union {
412 struct { 423 struct {
413 struct ssp_command_iu task; 424 struct ssp_command_iu task;
414 u32 prot[6]; 425 u32 prot[7];
415 }; 426 };
416 struct ssp_tmf_iu ssp_task; 427 struct ssp_tmf_iu ssp_task;
417 struct xfer_rdy_iu xfer_rdy; 428 struct xfer_rdy_iu xfer_rdy;
@@ -437,10 +448,7 @@ struct hisi_sas_slot_buf_table {
437}; 448};
438 449
439extern struct scsi_transport_template *hisi_sas_stt; 450extern struct scsi_transport_template *hisi_sas_stt;
440extern struct scsi_host_template *hisi_sas_sht;
441
442extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba); 451extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
443extern void hisi_sas_init_add(struct hisi_hba *hisi_hba);
444extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost); 452extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
445extern void hisi_sas_free(struct hisi_hba *hisi_hba); 453extern void hisi_sas_free(struct hisi_hba *hisi_hba);
446extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, 454extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
@@ -454,6 +462,11 @@ extern int hisi_sas_probe(struct platform_device *pdev,
454 const struct hisi_sas_hw *ops); 462 const struct hisi_sas_hw *ops);
455extern int hisi_sas_remove(struct platform_device *pdev); 463extern int hisi_sas_remove(struct platform_device *pdev);
456 464
465extern int hisi_sas_slave_configure(struct scsi_device *sdev);
466extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
467extern void hisi_sas_scan_start(struct Scsi_Host *shost);
468extern struct device_attribute *host_attrs[];
469extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
457extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy); 470extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy);
458extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, 471extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
459 struct sas_task *task, 472 struct sas_task *task,
@@ -465,4 +478,5 @@ extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
465extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 478extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
466 enum hisi_sas_phy_event event); 479 enum hisi_sas_phy_event event);
467extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba); 480extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
481extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max);
468#endif 482#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 49c1fa643803..6f562974f8f6 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -24,6 +24,9 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
24static int hisi_sas_softreset_ata_disk(struct domain_device *device); 24static int hisi_sas_softreset_ata_disk(struct domain_device *device);
25static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 25static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
26 void *funcdata); 26 void *funcdata);
27static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
28 struct domain_device *device);
29static void hisi_sas_dev_gone(struct domain_device *device);
27 30
28u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) 31u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
29{ 32{
@@ -78,22 +81,23 @@ u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
78 case ATA_CMD_STANDBYNOW1: 81 case ATA_CMD_STANDBYNOW1:
79 case ATA_CMD_ZAC_MGMT_OUT: 82 case ATA_CMD_ZAC_MGMT_OUT:
80 return HISI_SAS_SATA_PROTOCOL_NONDATA; 83 return HISI_SAS_SATA_PROTOCOL_NONDATA;
84
85 case ATA_CMD_SET_MAX:
86 switch (fis->features) {
87 case ATA_SET_MAX_PASSWD:
88 case ATA_SET_MAX_LOCK:
89 return HISI_SAS_SATA_PROTOCOL_PIO;
90
91 case ATA_SET_MAX_PASSWD_DMA:
92 case ATA_SET_MAX_UNLOCK_DMA:
93 return HISI_SAS_SATA_PROTOCOL_DMA;
94
95 default:
96 return HISI_SAS_SATA_PROTOCOL_NONDATA;
97 }
98
81 default: 99 default:
82 { 100 {
83 if (fis->command == ATA_CMD_SET_MAX) {
84 switch (fis->features) {
85 case ATA_SET_MAX_PASSWD:
86 case ATA_SET_MAX_LOCK:
87 return HISI_SAS_SATA_PROTOCOL_PIO;
88
89 case ATA_SET_MAX_PASSWD_DMA:
90 case ATA_SET_MAX_UNLOCK_DMA:
91 return HISI_SAS_SATA_PROTOCOL_DMA;
92
93 default:
94 return HISI_SAS_SATA_PROTOCOL_NONDATA;
95 }
96 }
97 if (direction == DMA_NONE) 101 if (direction == DMA_NONE)
98 return HISI_SAS_SATA_PROTOCOL_NONDATA; 102 return HISI_SAS_SATA_PROTOCOL_NONDATA;
99 return HISI_SAS_SATA_PROTOCOL_PIO; 103 return HISI_SAS_SATA_PROTOCOL_PIO;
@@ -134,6 +138,22 @@ int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
134} 138}
135EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag); 139EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
136 140
141/*
142 * This function assumes linkrate mask fits in 8 bits, which it
143 * does for all HW versions supported.
144 */
145u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
146{
147 u16 rate = 0;
148 int i;
149
150 max -= SAS_LINK_RATE_1_5_GBPS;
151 for (i = 0; i <= max; i++)
152 rate |= 1 << (i * 2);
153 return rate;
154}
155EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
156
137static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 157static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
138{ 158{
139 return device->port->ha->lldd_ha; 159 return device->port->ha->lldd_ha;
@@ -178,11 +198,18 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
178 unsigned int index; 198 unsigned int index;
179 void *bitmap = hisi_hba->slot_index_tags; 199 void *bitmap = hisi_hba->slot_index_tags;
180 200
181 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count); 201 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
182 if (index >= hisi_hba->slot_index_count) 202 hisi_hba->last_slot_index + 1);
183 return -SAS_QUEUE_FULL; 203 if (index >= hisi_hba->slot_index_count) {
204 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
205 0);
206 if (index >= hisi_hba->slot_index_count)
207 return -SAS_QUEUE_FULL;
208 }
184 hisi_sas_slot_index_set(hisi_hba, index); 209 hisi_sas_slot_index_set(hisi_hba, index);
185 *slot_idx = index; 210 *slot_idx = index;
211 hisi_hba->last_slot_index = index;
212
186 return 0; 213 return 0;
187} 214}
188 215
@@ -197,6 +224,8 @@ static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
197void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 224void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
198 struct hisi_sas_slot *slot) 225 struct hisi_sas_slot *slot)
199{ 226{
227 struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue];
228 unsigned long flags;
200 229
201 if (task) { 230 if (task) {
202 struct device *dev = hisi_hba->dev; 231 struct device *dev = hisi_hba->dev;
@@ -216,40 +245,43 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
216 if (slot->buf) 245 if (slot->buf)
217 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma); 246 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
218 247
248 spin_lock_irqsave(&dq->lock, flags);
219 list_del_init(&slot->entry); 249 list_del_init(&slot->entry);
250 spin_unlock_irqrestore(&dq->lock, flags);
220 slot->buf = NULL; 251 slot->buf = NULL;
221 slot->task = NULL; 252 slot->task = NULL;
222 slot->port = NULL; 253 slot->port = NULL;
254 spin_lock_irqsave(&hisi_hba->lock, flags);
223 hisi_sas_slot_index_free(hisi_hba, slot->idx); 255 hisi_sas_slot_index_free(hisi_hba, slot->idx);
256 spin_unlock_irqrestore(&hisi_hba->lock, flags);
224 257
225 /* slot memory is fully zeroed when it is reused */ 258 /* slot memory is fully zeroed when it is reused */
226} 259}
227EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 260EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
228 261
229static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 262static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
230 struct hisi_sas_slot *slot) 263 struct hisi_sas_slot *slot)
231{ 264{
232 return hisi_hba->hw->prep_smp(hisi_hba, slot); 265 hisi_hba->hw->prep_smp(hisi_hba, slot);
233} 266}
234 267
235static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 268static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
236 struct hisi_sas_slot *slot, int is_tmf, 269 struct hisi_sas_slot *slot)
237 struct hisi_sas_tmf_task *tmf)
238{ 270{
239 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf); 271 hisi_hba->hw->prep_ssp(hisi_hba, slot);
240} 272}
241 273
242static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 274static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
243 struct hisi_sas_slot *slot) 275 struct hisi_sas_slot *slot)
244{ 276{
245 return hisi_hba->hw->prep_stp(hisi_hba, slot); 277 hisi_hba->hw->prep_stp(hisi_hba, slot);
246} 278}
247 279
248static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 280static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
249 struct hisi_sas_slot *slot, 281 struct hisi_sas_slot *slot,
250 int device_id, int abort_flag, int tag_to_abort) 282 int device_id, int abort_flag, int tag_to_abort)
251{ 283{
252 return hisi_hba->hw->prep_abort(hisi_hba, slot, 284 hisi_hba->hw->prep_abort(hisi_hba, slot,
253 device_id, abort_flag, tag_to_abort); 285 device_id, abort_flag, tag_to_abort);
254} 286}
255 287
@@ -269,7 +301,6 @@ static void hisi_sas_slot_abort(struct work_struct *work)
269 struct scsi_lun lun; 301 struct scsi_lun lun;
270 struct device *dev = hisi_hba->dev; 302 struct device *dev = hisi_hba->dev;
271 int tag = abort_slot->idx; 303 int tag = abort_slot->idx;
272 unsigned long flags;
273 304
274 if (!(task->task_proto & SAS_PROTOCOL_SSP)) { 305 if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
275 dev_err(dev, "cannot abort slot for non-ssp task\n"); 306 dev_err(dev, "cannot abort slot for non-ssp task\n");
@@ -283,27 +314,29 @@ static void hisi_sas_slot_abort(struct work_struct *work)
283 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task); 314 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
284out: 315out:
285 /* Do cleanup for this task */ 316 /* Do cleanup for this task */
286 spin_lock_irqsave(&hisi_hba->lock, flags);
287 hisi_sas_slot_task_free(hisi_hba, task, abort_slot); 317 hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
288 spin_unlock_irqrestore(&hisi_hba->lock, flags);
289 if (task->task_done) 318 if (task->task_done)
290 task->task_done(task); 319 task->task_done(task);
291} 320}
292 321
293static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq 322static int hisi_sas_task_prep(struct sas_task *task,
294 *dq, int is_tmf, struct hisi_sas_tmf_task *tmf, 323 struct hisi_sas_dq **dq_pointer,
295 int *pass) 324 bool is_tmf, struct hisi_sas_tmf_task *tmf,
325 int *pass)
296{ 326{
297 struct hisi_hba *hisi_hba = dq->hisi_hba;
298 struct domain_device *device = task->dev; 327 struct domain_device *device = task->dev;
328 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
299 struct hisi_sas_device *sas_dev = device->lldd_dev; 329 struct hisi_sas_device *sas_dev = device->lldd_dev;
300 struct hisi_sas_port *port; 330 struct hisi_sas_port *port;
301 struct hisi_sas_slot *slot; 331 struct hisi_sas_slot *slot;
302 struct hisi_sas_cmd_hdr *cmd_hdr_base; 332 struct hisi_sas_cmd_hdr *cmd_hdr_base;
303 struct asd_sas_port *sas_port = device->port; 333 struct asd_sas_port *sas_port = device->port;
304 struct device *dev = hisi_hba->dev; 334 struct device *dev = hisi_hba->dev;
305 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; 335 int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
306 unsigned long flags; 336 int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
337 unsigned long flags, flags_dq;
338 struct hisi_sas_dq *dq;
339 int wr_q_index;
307 340
308 if (!sas_port) { 341 if (!sas_port) {
309 struct task_status_struct *ts = &task->task_status; 342 struct task_status_struct *ts = &task->task_status;
@@ -330,6 +363,8 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
330 return -ECOMM; 363 return -ECOMM;
331 } 364 }
332 365
366 *dq_pointer = dq = sas_dev->dq;
367
333 port = to_hisi_sas_port(sas_port); 368 port = to_hisi_sas_port(sas_port);
334 if (port && !port->port_attached) { 369 if (port && !port->port_attached) {
335 dev_info(dev, "task prep: %s port%d not attach device\n", 370 dev_info(dev, "task prep: %s port%d not attach device\n",
@@ -341,6 +376,8 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
341 } 376 }
342 377
343 if (!sas_protocol_ata(task->task_proto)) { 378 if (!sas_protocol_ata(task->task_proto)) {
379 unsigned int req_len, resp_len;
380
344 if (task->num_scatter) { 381 if (task->num_scatter) {
345 n_elem = dma_map_sg(dev, task->scatter, 382 n_elem = dma_map_sg(dev, task->scatter,
346 task->num_scatter, task->data_dir); 383 task->num_scatter, task->data_dir);
@@ -348,31 +385,74 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
348 rc = -ENOMEM; 385 rc = -ENOMEM;
349 goto prep_out; 386 goto prep_out;
350 } 387 }
388 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
389 n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
390 1, DMA_TO_DEVICE);
391 if (!n_elem_req) {
392 rc = -ENOMEM;
393 goto prep_out;
394 }
395 req_len = sg_dma_len(&task->smp_task.smp_req);
396 if (req_len & 0x3) {
397 rc = -EINVAL;
398 goto err_out_dma_unmap;
399 }
400 n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
401 1, DMA_FROM_DEVICE);
402 if (!n_elem_resp) {
403 rc = -ENOMEM;
404 goto err_out_dma_unmap;
405 }
406 resp_len = sg_dma_len(&task->smp_task.smp_resp);
407 if (resp_len & 0x3) {
408 rc = -EINVAL;
409 goto err_out_dma_unmap;
410 }
351 } 411 }
352 } else 412 } else
353 n_elem = task->num_scatter; 413 n_elem = task->num_scatter;
354 414
415 if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
416 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
417 n_elem);
418 rc = -EINVAL;
419 goto err_out_dma_unmap;
420 }
421
355 spin_lock_irqsave(&hisi_hba->lock, flags); 422 spin_lock_irqsave(&hisi_hba->lock, flags);
356 if (hisi_hba->hw->slot_index_alloc) 423 if (hisi_hba->hw->slot_index_alloc)
357 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx, 424 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
358 device); 425 device);
359 else 426 else
360 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); 427 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
361 if (rc) {
362 spin_unlock_irqrestore(&hisi_hba->lock, flags);
363 goto err_out;
364 }
365 spin_unlock_irqrestore(&hisi_hba->lock, flags); 428 spin_unlock_irqrestore(&hisi_hba->lock, flags);
366
367 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
368 if (rc) 429 if (rc)
369 goto err_out_tag; 430 goto err_out_dma_unmap;
370 431
371 dlvry_queue = dq->id;
372 dlvry_queue_slot = dq->wr_point;
373 slot = &hisi_hba->slot_info[slot_idx]; 432 slot = &hisi_hba->slot_info[slot_idx];
374 memset(slot, 0, sizeof(struct hisi_sas_slot)); 433 memset(slot, 0, sizeof(struct hisi_sas_slot));
375 434
435 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
436 GFP_ATOMIC, &slot->buf_dma);
437 if (!slot->buf) {
438 rc = -ENOMEM;
439 goto err_out_tag;
440 }
441
442 spin_lock_irqsave(&dq->lock, flags_dq);
443 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
444 if (wr_q_index < 0) {
445 spin_unlock_irqrestore(&dq->lock, flags_dq);
446 rc = -EAGAIN;
447 goto err_out_buf;
448 }
449
450 list_add_tail(&slot->delivery, &dq->list);
451 spin_unlock_irqrestore(&dq->lock, flags_dq);
452
453 dlvry_queue = dq->id;
454 dlvry_queue_slot = wr_q_index;
455
376 slot->idx = slot_idx; 456 slot->idx = slot_idx;
377 slot->n_elem = n_elem; 457 slot->n_elem = n_elem;
378 slot->dlvry_queue = dlvry_queue; 458 slot->dlvry_queue = dlvry_queue;
@@ -381,99 +461,94 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
381 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 461 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
382 slot->task = task; 462 slot->task = task;
383 slot->port = port; 463 slot->port = port;
464 slot->tmf = tmf;
465 slot->is_internal = is_tmf;
384 task->lldd_task = slot; 466 task->lldd_task = slot;
385 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort); 467 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
386 468
387 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
388 GFP_ATOMIC, &slot->buf_dma);
389 if (!slot->buf) {
390 rc = -ENOMEM;
391 goto err_out_slot_buf;
392 }
393 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 469 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
394 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 470 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
395 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); 471 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
396 472
397 switch (task->task_proto) { 473 switch (task->task_proto) {
398 case SAS_PROTOCOL_SMP: 474 case SAS_PROTOCOL_SMP:
399 rc = hisi_sas_task_prep_smp(hisi_hba, slot); 475 hisi_sas_task_prep_smp(hisi_hba, slot);
400 break; 476 break;
401 case SAS_PROTOCOL_SSP: 477 case SAS_PROTOCOL_SSP:
402 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf); 478 hisi_sas_task_prep_ssp(hisi_hba, slot);
403 break; 479 break;
404 case SAS_PROTOCOL_SATA: 480 case SAS_PROTOCOL_SATA:
405 case SAS_PROTOCOL_STP: 481 case SAS_PROTOCOL_STP:
406 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 482 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
407 rc = hisi_sas_task_prep_ata(hisi_hba, slot); 483 hisi_sas_task_prep_ata(hisi_hba, slot);
408 break; 484 break;
409 default: 485 default:
410 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n", 486 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
411 task->task_proto); 487 task->task_proto);
412 rc = -EINVAL;
413 break; 488 break;
414 } 489 }
415 490
416 if (rc) { 491 spin_lock_irqsave(&dq->lock, flags);
417 dev_err(dev, "task prep: rc = 0x%x\n", rc);
418 goto err_out_buf;
419 }
420
421 spin_lock_irqsave(&hisi_hba->lock, flags);
422 list_add_tail(&slot->entry, &sas_dev->list); 492 list_add_tail(&slot->entry, &sas_dev->list);
423 spin_unlock_irqrestore(&hisi_hba->lock, flags); 493 spin_unlock_irqrestore(&dq->lock, flags);
424 spin_lock_irqsave(&task->task_state_lock, flags); 494 spin_lock_irqsave(&task->task_state_lock, flags);
425 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 495 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
426 spin_unlock_irqrestore(&task->task_state_lock, flags); 496 spin_unlock_irqrestore(&task->task_state_lock, flags);
427 497
428 dq->slot_prep = slot;
429 ++(*pass); 498 ++(*pass);
499 slot->ready = 1;
430 500
431 return 0; 501 return 0;
432 502
433err_out_buf: 503err_out_buf:
434 dma_pool_free(hisi_hba->buffer_pool, slot->buf, 504 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
435 slot->buf_dma); 505 slot->buf_dma);
436err_out_slot_buf:
437 /* Nothing to be done */
438err_out_tag: 506err_out_tag:
439 spin_lock_irqsave(&hisi_hba->lock, flags); 507 spin_lock_irqsave(&hisi_hba->lock, flags);
440 hisi_sas_slot_index_free(hisi_hba, slot_idx); 508 hisi_sas_slot_index_free(hisi_hba, slot_idx);
441 spin_unlock_irqrestore(&hisi_hba->lock, flags); 509 spin_unlock_irqrestore(&hisi_hba->lock, flags);
442err_out: 510err_out_dma_unmap:
443 dev_err(dev, "task prep: failed[%d]!\n", rc); 511 if (!sas_protocol_ata(task->task_proto)) {
444 if (!sas_protocol_ata(task->task_proto)) 512 if (task->num_scatter) {
445 if (n_elem) 513 dma_unmap_sg(dev, task->scatter, task->num_scatter,
446 dma_unmap_sg(dev, task->scatter, 514 task->data_dir);
447 task->num_scatter, 515 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
448 task->data_dir); 516 if (n_elem_req)
517 dma_unmap_sg(dev, &task->smp_task.smp_req,
518 1, DMA_TO_DEVICE);
519 if (n_elem_resp)
520 dma_unmap_sg(dev, &task->smp_task.smp_resp,
521 1, DMA_FROM_DEVICE);
522 }
523 }
449prep_out: 524prep_out:
525 dev_err(dev, "task prep: failed[%d]!\n", rc);
450 return rc; 526 return rc;
451} 527}
452 528
453static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, 529static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
454 int is_tmf, struct hisi_sas_tmf_task *tmf) 530 bool is_tmf, struct hisi_sas_tmf_task *tmf)
455{ 531{
456 u32 rc; 532 u32 rc;
457 u32 pass = 0; 533 u32 pass = 0;
458 unsigned long flags; 534 unsigned long flags;
459 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); 535 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
460 struct device *dev = hisi_hba->dev; 536 struct device *dev = hisi_hba->dev;
461 struct domain_device *device = task->dev; 537 struct hisi_sas_dq *dq = NULL;
462 struct hisi_sas_device *sas_dev = device->lldd_dev;
463 struct hisi_sas_dq *dq = sas_dev->dq;
464 538
465 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 539 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
466 return -EINVAL; 540 return -EINVAL;
467 541
468 /* protect task_prep and start_delivery sequence */ 542 /* protect task_prep and start_delivery sequence */
469 spin_lock_irqsave(&dq->lock, flags); 543 rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
470 rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
471 if (rc) 544 if (rc)
472 dev_err(dev, "task exec: failed[%d]!\n", rc); 545 dev_err(dev, "task exec: failed[%d]!\n", rc);
473 546
474 if (likely(pass)) 547 if (likely(pass)) {
548 spin_lock_irqsave(&dq->lock, flags);
475 hisi_hba->hw->start_delivery(dq); 549 hisi_hba->hw->start_delivery(dq);
476 spin_unlock_irqrestore(&dq->lock, flags); 550 spin_unlock_irqrestore(&dq->lock, flags);
551 }
477 552
478 return rc; 553 return rc;
479} 554}
@@ -524,10 +599,12 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
524 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 599 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
525 struct hisi_sas_device *sas_dev = NULL; 600 struct hisi_sas_device *sas_dev = NULL;
526 unsigned long flags; 601 unsigned long flags;
602 int last = hisi_hba->last_dev_id;
603 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
527 int i; 604 int i;
528 605
529 spin_lock_irqsave(&hisi_hba->lock, flags); 606 spin_lock_irqsave(&hisi_hba->lock, flags);
530 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 607 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
531 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 608 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
532 int queue = i % hisi_hba->queue_count; 609 int queue = i % hisi_hba->queue_count;
533 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 610 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
@@ -542,18 +619,57 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
542 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 619 INIT_LIST_HEAD(&hisi_hba->devices[i].list);
543 break; 620 break;
544 } 621 }
622 i++;
545 } 623 }
624 hisi_hba->last_dev_id = i;
546 spin_unlock_irqrestore(&hisi_hba->lock, flags); 625 spin_unlock_irqrestore(&hisi_hba->lock, flags);
547 626
548 return sas_dev; 627 return sas_dev;
549} 628}
550 629
630#define HISI_SAS_SRST_ATA_DISK_CNT 3
631static int hisi_sas_init_device(struct domain_device *device)
632{
633 int rc = TMF_RESP_FUNC_COMPLETE;
634 struct scsi_lun lun;
635 struct hisi_sas_tmf_task tmf_task;
636 int retry = HISI_SAS_SRST_ATA_DISK_CNT;
637 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
638
639 switch (device->dev_type) {
640 case SAS_END_DEVICE:
641 int_to_scsilun(0, &lun);
642
643 tmf_task.tmf = TMF_CLEAR_TASK_SET;
644 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
645 &tmf_task);
646 if (rc == TMF_RESP_FUNC_COMPLETE)
647 hisi_sas_release_task(hisi_hba, device);
648 break;
649 case SAS_SATA_DEV:
650 case SAS_SATA_PM:
651 case SAS_SATA_PM_PORT:
652 case SAS_SATA_PENDING:
653 while (retry-- > 0) {
654 rc = hisi_sas_softreset_ata_disk(device);
655 if (!rc)
656 break;
657 }
658 break;
659 default:
660 break;
661 }
662
663 return rc;
664}
665
551static int hisi_sas_dev_found(struct domain_device *device) 666static int hisi_sas_dev_found(struct domain_device *device)
552{ 667{
553 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 668 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
554 struct domain_device *parent_dev = device->parent; 669 struct domain_device *parent_dev = device->parent;
555 struct hisi_sas_device *sas_dev; 670 struct hisi_sas_device *sas_dev;
556 struct device *dev = hisi_hba->dev; 671 struct device *dev = hisi_hba->dev;
672 int rc;
557 673
558 if (hisi_hba->hw->alloc_dev) 674 if (hisi_hba->hw->alloc_dev)
559 sas_dev = hisi_hba->hw->alloc_dev(device); 675 sas_dev = hisi_hba->hw->alloc_dev(device);
@@ -576,10 +692,8 @@ static int hisi_sas_dev_found(struct domain_device *device)
576 for (phy_no = 0; phy_no < phy_num; phy_no++) { 692 for (phy_no = 0; phy_no < phy_num; phy_no++) {
577 phy = &parent_dev->ex_dev.ex_phy[phy_no]; 693 phy = &parent_dev->ex_dev.ex_phy[phy_no];
578 if (SAS_ADDR(phy->attached_sas_addr) == 694 if (SAS_ADDR(phy->attached_sas_addr) ==
579 SAS_ADDR(device->sas_addr)) { 695 SAS_ADDR(device->sas_addr))
580 sas_dev->attached_phy = phy_no;
581 break; 696 break;
582 }
583 } 697 }
584 698
585 if (phy_no == phy_num) { 699 if (phy_no == phy_num) {
@@ -587,17 +701,25 @@ static int hisi_sas_dev_found(struct domain_device *device)
587 "dev:%016llx at ex:%016llx\n", 701 "dev:%016llx at ex:%016llx\n",
588 SAS_ADDR(device->sas_addr), 702 SAS_ADDR(device->sas_addr),
589 SAS_ADDR(parent_dev->sas_addr)); 703 SAS_ADDR(parent_dev->sas_addr));
590 return -EINVAL; 704 rc = -EINVAL;
705 goto err_out;
591 } 706 }
592 } 707 }
593 708
594 dev_info(dev, "dev[%d:%x] found\n", 709 dev_info(dev, "dev[%d:%x] found\n",
595 sas_dev->device_id, sas_dev->dev_type); 710 sas_dev->device_id, sas_dev->dev_type);
596 711
712 rc = hisi_sas_init_device(device);
713 if (rc)
714 goto err_out;
597 return 0; 715 return 0;
716
717err_out:
718 hisi_sas_dev_gone(device);
719 return rc;
598} 720}
599 721
600static int hisi_sas_slave_configure(struct scsi_device *sdev) 722int hisi_sas_slave_configure(struct scsi_device *sdev)
601{ 723{
602 struct domain_device *dev = sdev_to_domain_dev(sdev); 724 struct domain_device *dev = sdev_to_domain_dev(sdev);
603 int ret = sas_slave_configure(sdev); 725 int ret = sas_slave_configure(sdev);
@@ -609,15 +731,17 @@ static int hisi_sas_slave_configure(struct scsi_device *sdev)
609 731
610 return 0; 732 return 0;
611} 733}
734EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
612 735
613static void hisi_sas_scan_start(struct Scsi_Host *shost) 736void hisi_sas_scan_start(struct Scsi_Host *shost)
614{ 737{
615 struct hisi_hba *hisi_hba = shost_priv(shost); 738 struct hisi_hba *hisi_hba = shost_priv(shost);
616 739
617 hisi_hba->hw->phys_init(hisi_hba); 740 hisi_hba->hw->phys_init(hisi_hba);
618} 741}
742EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
619 743
620static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 744int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
621{ 745{
622 struct hisi_hba *hisi_hba = shost_priv(shost); 746 struct hisi_hba *hisi_hba = shost_priv(shost);
623 struct sas_ha_struct *sha = &hisi_hba->sha; 747 struct sas_ha_struct *sha = &hisi_hba->sha;
@@ -629,6 +753,7 @@ static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
629 sas_drain_work(sha); 753 sas_drain_work(sha);
630 return 1; 754 return 1;
631} 755}
756EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
632 757
633static void hisi_sas_phyup_work(struct work_struct *work) 758static void hisi_sas_phyup_work(struct work_struct *work)
634{ 759{
@@ -803,6 +928,33 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
803 return hisi_sas_task_exec(task, gfp_flags, 0, NULL); 928 return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
804} 929}
805 930
931static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
932 struct sas_phy_linkrates *r)
933{
934 struct sas_phy_linkrates _r;
935
936 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
937 struct asd_sas_phy *sas_phy = &phy->sas_phy;
938 enum sas_linkrate min, max;
939
940 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
941 max = sas_phy->phy->maximum_linkrate;
942 min = r->minimum_linkrate;
943 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
944 max = r->maximum_linkrate;
945 min = sas_phy->phy->minimum_linkrate;
946 } else
947 return;
948
949 _r.maximum_linkrate = max;
950 _r.minimum_linkrate = min;
951
952 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
953 msleep(100);
954 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
955 hisi_hba->hw->phy_start(hisi_hba, phy_no);
956}
957
806static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 958static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
807 void *funcdata) 959 void *funcdata)
808{ 960{
@@ -826,7 +978,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
826 break; 978 break;
827 979
828 case PHY_FUNC_SET_LINK_RATE: 980 case PHY_FUNC_SET_LINK_RATE:
829 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata); 981 hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
830 break; 982 break;
831 case PHY_FUNC_GET_EVENTS: 983 case PHY_FUNC_GET_EVENTS:
832 if (hisi_hba->hw->get_events) { 984 if (hisi_hba->hw->get_events) {
@@ -990,7 +1142,6 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
990 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1142 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
991 struct device *dev = hisi_hba->dev; 1143 struct device *dev = hisi_hba->dev;
992 int s = sizeof(struct host_to_dev_fis); 1144 int s = sizeof(struct host_to_dev_fis);
993 unsigned long flags;
994 1145
995 ata_for_each_link(link, ap, EDGE) { 1146 ata_for_each_link(link, ap, EDGE) {
996 int pmp = sata_srst_pmp(link); 1147 int pmp = sata_srst_pmp(link);
@@ -1015,11 +1166,8 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1015 dev_err(dev, "ata disk reset failed\n"); 1166 dev_err(dev, "ata disk reset failed\n");
1016 } 1167 }
1017 1168
1018 if (rc == TMF_RESP_FUNC_COMPLETE) { 1169 if (rc == TMF_RESP_FUNC_COMPLETE)
1019 spin_lock_irqsave(&hisi_hba->lock, flags);
1020 hisi_sas_release_task(hisi_hba, device); 1170 hisi_sas_release_task(hisi_hba, device);
1021 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1022 }
1023 1171
1024 return rc; 1172 return rc;
1025} 1173}
@@ -1111,12 +1259,103 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1111 } 1259 }
1112} 1260}
1113 1261
1262static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
1263{
1264 struct hisi_sas_device *sas_dev;
1265 struct domain_device *device;
1266 int i;
1267
1268 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1269 sas_dev = &hisi_hba->devices[i];
1270 device = sas_dev->sas_device;
1271
1272 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1273 continue;
1274
1275 hisi_sas_init_device(device);
1276 }
1277}
1278
1279static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
1280 struct asd_sas_port *sas_port,
1281 struct domain_device *device)
1282{
1283 struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 };
1284 struct ata_port *ap = device->sata_dev.ap;
1285 struct device *dev = hisi_hba->dev;
1286 int s = sizeof(struct host_to_dev_fis);
1287 int rc = TMF_RESP_FUNC_FAILED;
1288 struct asd_sas_phy *sas_phy;
1289 struct ata_link *link;
1290 u8 fis[20] = {0};
1291 u32 state;
1292
1293 state = hisi_hba->hw->get_phys_state(hisi_hba);
1294 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
1295 if (!(state & BIT(sas_phy->id)))
1296 continue;
1297
1298 ata_for_each_link(link, ap, EDGE) {
1299 int pmp = sata_srst_pmp(link);
1300
1301 tmf_task.phy_id = sas_phy->id;
1302 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1303 rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
1304 &tmf_task);
1305 if (rc != TMF_RESP_FUNC_COMPLETE) {
1306 dev_err(dev, "phy%d ata reset failed rc=%d\n",
1307 sas_phy->id, rc);
1308 break;
1309 }
1310 }
1311 }
1312}
1313
1314static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
1315{
1316 struct device *dev = hisi_hba->dev;
1317 int port_no, rc, i;
1318
1319 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1320 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1321 struct domain_device *device = sas_dev->sas_device;
1322
1323 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1324 continue;
1325
1326 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1327 HISI_SAS_INT_ABT_DEV, 0);
1328 if (rc < 0)
1329 dev_err(dev, "STP reject: abort dev failed %d\n", rc);
1330 }
1331
1332 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
1333 struct hisi_sas_port *port = &hisi_hba->port[port_no];
1334 struct asd_sas_port *sas_port = &port->sas_port;
1335 struct domain_device *port_dev = sas_port->port_dev;
1336 struct domain_device *device;
1337
1338 if (!port_dev || !DEV_IS_EXPANDER(port_dev->dev_type))
1339 continue;
1340
1341 /* Try to find a SATA device */
1342 list_for_each_entry(device, &sas_port->dev_list,
1343 dev_list_node) {
1344 if (dev_is_sata(device)) {
1345 hisi_sas_send_ata_reset_each_phy(hisi_hba,
1346 sas_port,
1347 device);
1348 break;
1349 }
1350 }
1351 }
1352}
1353
1114static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1354static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1115{ 1355{
1116 struct device *dev = hisi_hba->dev; 1356 struct device *dev = hisi_hba->dev;
1117 struct Scsi_Host *shost = hisi_hba->shost; 1357 struct Scsi_Host *shost = hisi_hba->shost;
1118 u32 old_state, state; 1358 u32 old_state, state;
1119 unsigned long flags;
1120 int rc; 1359 int rc;
1121 1360
1122 if (!hisi_hba->hw->soft_reset) 1361 if (!hisi_hba->hw->soft_reset)
@@ -1129,6 +1368,11 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1129 old_state = hisi_hba->hw->get_phys_state(hisi_hba); 1368 old_state = hisi_hba->hw->get_phys_state(hisi_hba);
1130 1369
1131 scsi_block_requests(shost); 1370 scsi_block_requests(shost);
1371 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
1372
1373 if (timer_pending(&hisi_hba->timer))
1374 del_timer_sync(&hisi_hba->timer);
1375
1132 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1376 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1133 rc = hisi_hba->hw->soft_reset(hisi_hba); 1377 rc = hisi_hba->hw->soft_reset(hisi_hba);
1134 if (rc) { 1378 if (rc) {
@@ -1137,9 +1381,6 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1137 scsi_unblock_requests(shost); 1381 scsi_unblock_requests(shost);
1138 goto out; 1382 goto out;
1139 } 1383 }
1140 spin_lock_irqsave(&hisi_hba->lock, flags);
1141 hisi_sas_release_tasks(hisi_hba);
1142 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1143 1384
1144 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1385 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1145 1386
@@ -1147,6 +1388,10 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1147 hisi_hba->hw->phys_init(hisi_hba); 1388 hisi_hba->hw->phys_init(hisi_hba);
1148 msleep(1000); 1389 msleep(1000);
1149 hisi_sas_refresh_port_id(hisi_hba); 1390 hisi_sas_refresh_port_id(hisi_hba);
1391
1392 if (hisi_hba->reject_stp_links_msk)
1393 hisi_sas_terminate_stp_reject(hisi_hba);
1394 hisi_sas_reset_init_all_devices(hisi_hba);
1150 scsi_unblock_requests(shost); 1395 scsi_unblock_requests(shost);
1151 1396
1152 state = hisi_hba->hw->get_phys_state(hisi_hba); 1397 state = hisi_hba->hw->get_phys_state(hisi_hba);
@@ -1165,20 +1410,25 @@ static int hisi_sas_abort_task(struct sas_task *task)
1165 struct hisi_sas_tmf_task tmf_task; 1410 struct hisi_sas_tmf_task tmf_task;
1166 struct domain_device *device = task->dev; 1411 struct domain_device *device = task->dev;
1167 struct hisi_sas_device *sas_dev = device->lldd_dev; 1412 struct hisi_sas_device *sas_dev = device->lldd_dev;
1168 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); 1413 struct hisi_hba *hisi_hba;
1169 struct device *dev = hisi_hba->dev; 1414 struct device *dev;
1170 int rc = TMF_RESP_FUNC_FAILED; 1415 int rc = TMF_RESP_FUNC_FAILED;
1171 unsigned long flags; 1416 unsigned long flags;
1172 1417
1173 if (!sas_dev) { 1418 if (!sas_dev)
1174 dev_warn(dev, "Device has been removed\n");
1175 return TMF_RESP_FUNC_FAILED; 1419 return TMF_RESP_FUNC_FAILED;
1176 }
1177 1420
1421 hisi_hba = dev_to_hisi_hba(task->dev);
1422 dev = hisi_hba->dev;
1423
1424 spin_lock_irqsave(&task->task_state_lock, flags);
1178 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1425 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1426 spin_unlock_irqrestore(&task->task_state_lock, flags);
1179 rc = TMF_RESP_FUNC_COMPLETE; 1427 rc = TMF_RESP_FUNC_COMPLETE;
1180 goto out; 1428 goto out;
1181 } 1429 }
1430 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1431 spin_unlock_irqrestore(&task->task_state_lock, flags);
1182 1432
1183 sas_dev->dev_status = HISI_SAS_DEV_EH; 1433 sas_dev->dev_status = HISI_SAS_DEV_EH;
1184 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1434 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
@@ -1209,11 +1459,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
1209 * will have already been completed 1459 * will have already been completed
1210 */ 1460 */
1211 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1461 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1212 if (task->lldd_task) { 1462 if (task->lldd_task)
1213 spin_lock_irqsave(&hisi_hba->lock, flags);
1214 hisi_sas_do_release_task(hisi_hba, task, slot); 1463 hisi_sas_do_release_task(hisi_hba, task, slot);
1215 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1216 }
1217 } 1464 }
1218 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1465 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1219 task->task_proto & SAS_PROTOCOL_STP) { 1466 task->task_proto & SAS_PROTOCOL_STP) {
@@ -1235,11 +1482,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
1235 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1482 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1236 HISI_SAS_INT_ABT_CMD, tag); 1483 HISI_SAS_INT_ABT_CMD, tag);
1237 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1484 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1238 task->lldd_task) { 1485 task->lldd_task)
1239 spin_lock_irqsave(&hisi_hba->lock, flags);
1240 hisi_sas_do_release_task(hisi_hba, task, slot); 1486 hisi_sas_do_release_task(hisi_hba, task, slot);
1241 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1242 }
1243 } 1487 }
1244 1488
1245out: 1489out:
@@ -1254,7 +1498,6 @@ static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1254 struct device *dev = hisi_hba->dev; 1498 struct device *dev = hisi_hba->dev;
1255 struct hisi_sas_tmf_task tmf_task; 1499 struct hisi_sas_tmf_task tmf_task;
1256 int rc = TMF_RESP_FUNC_FAILED; 1500 int rc = TMF_RESP_FUNC_FAILED;
1257 unsigned long flags;
1258 1501
1259 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1502 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1260 HISI_SAS_INT_ABT_DEV, 0); 1503 HISI_SAS_INT_ABT_DEV, 0);
@@ -1267,11 +1510,8 @@ static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1267 tmf_task.tmf = TMF_ABORT_TASK_SET; 1510 tmf_task.tmf = TMF_ABORT_TASK_SET;
1268 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1511 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1269 1512
1270 if (rc == TMF_RESP_FUNC_COMPLETE) { 1513 if (rc == TMF_RESP_FUNC_COMPLETE)
1271 spin_lock_irqsave(&hisi_hba->lock, flags);
1272 hisi_sas_release_task(hisi_hba, device); 1514 hisi_sas_release_task(hisi_hba, device);
1273 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1274 }
1275 1515
1276 return rc; 1516 return rc;
1277} 1517}
@@ -1289,12 +1529,39 @@ static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1289 1529
1290static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1530static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1291{ 1531{
1292 struct sas_phy *phy = sas_get_local_phy(device); 1532 struct sas_phy *local_phy = sas_get_local_phy(device);
1293 int rc, reset_type = (device->dev_type == SAS_SATA_DEV || 1533 int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
1294 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; 1534 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1295 rc = sas_phy_reset(phy, reset_type); 1535 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1296 sas_put_local_phy(phy); 1536 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1297 msleep(2000); 1537 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number];
1538 struct hisi_sas_phy *phy = container_of(sas_phy,
1539 struct hisi_sas_phy, sas_phy);
1540 DECLARE_COMPLETION_ONSTACK(phyreset);
1541
1542 if (scsi_is_sas_phy_local(local_phy)) {
1543 phy->in_reset = 1;
1544 phy->reset_completion = &phyreset;
1545 }
1546
1547 rc = sas_phy_reset(local_phy, reset_type);
1548 sas_put_local_phy(local_phy);
1549
1550 if (scsi_is_sas_phy_local(local_phy)) {
1551 int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
1552 unsigned long flags;
1553
1554 spin_lock_irqsave(&phy->lock, flags);
1555 phy->reset_completion = NULL;
1556 phy->in_reset = 0;
1557 spin_unlock_irqrestore(&phy->lock, flags);
1558
1559 /* report PHY down if timed out */
1560 if (!ret)
1561 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
1562 } else
1563 msleep(2000);
1564
1298 return rc; 1565 return rc;
1299} 1566}
1300 1567
@@ -1304,7 +1571,6 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1304 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1571 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1305 struct device *dev = hisi_hba->dev; 1572 struct device *dev = hisi_hba->dev;
1306 int rc = TMF_RESP_FUNC_FAILED; 1573 int rc = TMF_RESP_FUNC_FAILED;
1307 unsigned long flags;
1308 1574
1309 if (sas_dev->dev_status != HISI_SAS_DEV_EH) 1575 if (sas_dev->dev_status != HISI_SAS_DEV_EH)
1310 return TMF_RESP_FUNC_FAILED; 1576 return TMF_RESP_FUNC_FAILED;
@@ -1320,11 +1586,9 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1320 1586
1321 rc = hisi_sas_debug_I_T_nexus_reset(device); 1587 rc = hisi_sas_debug_I_T_nexus_reset(device);
1322 1588
1323 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) { 1589 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1324 spin_lock_irqsave(&hisi_hba->lock, flags);
1325 hisi_sas_release_task(hisi_hba, device); 1590 hisi_sas_release_task(hisi_hba, device);
1326 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1591
1327 }
1328 return rc; 1592 return rc;
1329} 1593}
1330 1594
@@ -1333,7 +1597,6 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1333 struct hisi_sas_device *sas_dev = device->lldd_dev; 1597 struct hisi_sas_device *sas_dev = device->lldd_dev;
1334 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1598 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1335 struct device *dev = hisi_hba->dev; 1599 struct device *dev = hisi_hba->dev;
1336 unsigned long flags;
1337 int rc = TMF_RESP_FUNC_FAILED; 1600 int rc = TMF_RESP_FUNC_FAILED;
1338 1601
1339 sas_dev->dev_status = HISI_SAS_DEV_EH; 1602 sas_dev->dev_status = HISI_SAS_DEV_EH;
@@ -1353,11 +1616,8 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1353 1616
1354 rc = sas_phy_reset(phy, 1); 1617 rc = sas_phy_reset(phy, 1);
1355 1618
1356 if (rc == 0) { 1619 if (rc == 0)
1357 spin_lock_irqsave(&hisi_hba->lock, flags);
1358 hisi_sas_release_task(hisi_hba, device); 1620 hisi_sas_release_task(hisi_hba, device);
1359 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1360 }
1361 sas_put_local_phy(phy); 1621 sas_put_local_phy(phy);
1362 } else { 1622 } else {
1363 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; 1623 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
@@ -1371,11 +1631,8 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1371 hisi_sas_dereg_device(hisi_hba, device); 1631 hisi_sas_dereg_device(hisi_hba, device);
1372 1632
1373 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1633 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1374 if (rc == TMF_RESP_FUNC_COMPLETE) { 1634 if (rc == TMF_RESP_FUNC_COMPLETE)
1375 spin_lock_irqsave(&hisi_hba->lock, flags);
1376 hisi_sas_release_task(hisi_hba, device); 1635 hisi_sas_release_task(hisi_hba, device);
1377 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1378 }
1379 } 1636 }
1380out: 1637out:
1381 if (rc != TMF_RESP_FUNC_COMPLETE) 1638 if (rc != TMF_RESP_FUNC_COMPLETE)
@@ -1445,7 +1702,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1445 struct hisi_sas_cmd_hdr *cmd_hdr_base; 1702 struct hisi_sas_cmd_hdr *cmd_hdr_base;
1446 struct hisi_sas_dq *dq = sas_dev->dq; 1703 struct hisi_sas_dq *dq = sas_dev->dq;
1447 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; 1704 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1448 unsigned long flags, flags_dq; 1705 unsigned long flags, flags_dq = 0;
1706 int wr_q_index;
1449 1707
1450 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 1708 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1451 return -EINVAL; 1709 return -EINVAL;
@@ -1464,16 +1722,28 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1464 } 1722 }
1465 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1723 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1466 1724
1467 spin_lock_irqsave(&dq->lock, flags_dq); 1725 slot = &hisi_hba->slot_info[slot_idx];
1468 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq); 1726 memset(slot, 0, sizeof(struct hisi_sas_slot));
1469 if (rc) 1727
1728 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1729 GFP_ATOMIC, &slot->buf_dma);
1730 if (!slot->buf) {
1731 rc = -ENOMEM;
1470 goto err_out_tag; 1732 goto err_out_tag;
1733 }
1471 1734
1472 dlvry_queue = dq->id; 1735 spin_lock_irqsave(&dq->lock, flags_dq);
1473 dlvry_queue_slot = dq->wr_point; 1736 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
1737 if (wr_q_index < 0) {
1738 spin_unlock_irqrestore(&dq->lock, flags_dq);
1739 rc = -EAGAIN;
1740 goto err_out_buf;
1741 }
1742 list_add_tail(&slot->delivery, &dq->list);
1743 spin_unlock_irqrestore(&dq->lock, flags_dq);
1474 1744
1475 slot = &hisi_hba->slot_info[slot_idx]; 1745 dlvry_queue = dq->id;
1476 memset(slot, 0, sizeof(struct hisi_sas_slot)); 1746 dlvry_queue_slot = wr_q_index;
1477 1747
1478 slot->idx = slot_idx; 1748 slot->idx = slot_idx;
1479 slot->n_elem = n_elem; 1749 slot->n_elem = n_elem;
@@ -1483,47 +1753,36 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1483 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 1753 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1484 slot->task = task; 1754 slot->task = task;
1485 slot->port = port; 1755 slot->port = port;
1756 slot->is_internal = true;
1486 task->lldd_task = slot; 1757 task->lldd_task = slot;
1487 1758
1488 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
1489 GFP_ATOMIC, &slot->buf_dma);
1490 if (!slot->buf) {
1491 rc = -ENOMEM;
1492 goto err_out_tag;
1493 }
1494
1495 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 1759 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1496 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 1760 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1497 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); 1761 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
1498 1762
1499 rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id, 1763 hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1500 abort_flag, task_tag); 1764 abort_flag, task_tag);
1501 if (rc)
1502 goto err_out_buf;
1503 1765
1504 spin_lock_irqsave(&hisi_hba->lock, flags);
1505 list_add_tail(&slot->entry, &sas_dev->list);
1506 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1507 spin_lock_irqsave(&task->task_state_lock, flags); 1766 spin_lock_irqsave(&task->task_state_lock, flags);
1508 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 1767 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1509 spin_unlock_irqrestore(&task->task_state_lock, flags); 1768 spin_unlock_irqrestore(&task->task_state_lock, flags);
1510 1769
1511 dq->slot_prep = slot; 1770 slot->ready = 1;
1512
1513 /* send abort command to the chip */ 1771 /* send abort command to the chip */
1772 spin_lock_irqsave(&dq->lock, flags);
1773 list_add_tail(&slot->entry, &sas_dev->list);
1514 hisi_hba->hw->start_delivery(dq); 1774 hisi_hba->hw->start_delivery(dq);
1515 spin_unlock_irqrestore(&dq->lock, flags_dq); 1775 spin_unlock_irqrestore(&dq->lock, flags);
1516 1776
1517 return 0; 1777 return 0;
1518 1778
1519err_out_buf: 1779err_out_buf:
1520 dma_pool_free(hisi_hba->buffer_pool, slot->buf, 1780 dma_pool_free(hisi_hba->buffer_pool, slot->buf,
1521 slot->buf_dma); 1781 slot->buf_dma);
1522err_out_tag: 1782err_out_tag:
1523 spin_lock_irqsave(&hisi_hba->lock, flags); 1783 spin_lock_irqsave(&hisi_hba->lock, flags);
1524 hisi_sas_slot_index_free(hisi_hba, slot_idx); 1784 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1525 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1785 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1526 spin_unlock_irqrestore(&dq->lock, flags_dq);
1527err_out: 1786err_out:
1528 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); 1787 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1529 1788
@@ -1651,6 +1910,7 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1651 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1910 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1652 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1911 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1653 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1912 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1913 struct device *dev = hisi_hba->dev;
1654 1914
1655 if (rdy) { 1915 if (rdy) {
1656 /* Phy down but ready */ 1916 /* Phy down but ready */
@@ -1659,6 +1919,10 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1659 } else { 1919 } else {
1660 struct hisi_sas_port *port = phy->port; 1920 struct hisi_sas_port *port = phy->port;
1661 1921
1922 if (phy->in_reset) {
1923 dev_info(dev, "ignore flutter phy%d down\n", phy_no);
1924 return;
1925 }
1662 /* Phy down and not ready */ 1926 /* Phy down and not ready */
1663 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); 1927 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1664 sas_phy_disconnected(sas_phy); 1928 sas_phy_disconnected(sas_phy);
@@ -1693,34 +1957,11 @@ EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1693struct scsi_transport_template *hisi_sas_stt; 1957struct scsi_transport_template *hisi_sas_stt;
1694EXPORT_SYMBOL_GPL(hisi_sas_stt); 1958EXPORT_SYMBOL_GPL(hisi_sas_stt);
1695 1959
1696static struct device_attribute *host_attrs[] = { 1960struct device_attribute *host_attrs[] = {
1697 &dev_attr_phy_event_threshold, 1961 &dev_attr_phy_event_threshold,
1698 NULL, 1962 NULL,
1699}; 1963};
1700 1964EXPORT_SYMBOL_GPL(host_attrs);
1701static struct scsi_host_template _hisi_sas_sht = {
1702 .module = THIS_MODULE,
1703 .name = DRV_NAME,
1704 .queuecommand = sas_queuecommand,
1705 .target_alloc = sas_target_alloc,
1706 .slave_configure = hisi_sas_slave_configure,
1707 .scan_finished = hisi_sas_scan_finished,
1708 .scan_start = hisi_sas_scan_start,
1709 .change_queue_depth = sas_change_queue_depth,
1710 .bios_param = sas_bios_param,
1711 .can_queue = 1,
1712 .this_id = -1,
1713 .sg_tablesize = SG_ALL,
1714 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1715 .use_clustering = ENABLE_CLUSTERING,
1716 .eh_device_reset_handler = sas_eh_device_reset_handler,
1717 .eh_target_reset_handler = sas_eh_target_reset_handler,
1718 .target_destroy = sas_target_destroy,
1719 .ioctl = sas_ioctl,
1720 .shost_attrs = host_attrs,
1721};
1722struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
1723EXPORT_SYMBOL_GPL(hisi_sas_sht);
1724 1965
1725static struct sas_domain_function_template hisi_sas_transport_ops = { 1966static struct sas_domain_function_template hisi_sas_transport_ops = {
1726 .lldd_dev_found = hisi_sas_dev_found, 1967 .lldd_dev_found = hisi_sas_dev_found,
@@ -1798,6 +2039,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1798 2039
1799 /* Delivery queue structure */ 2040 /* Delivery queue structure */
1800 spin_lock_init(&dq->lock); 2041 spin_lock_init(&dq->lock);
2042 INIT_LIST_HEAD(&dq->list);
1801 dq->id = i; 2043 dq->id = i;
1802 dq->hisi_hba = hisi_hba; 2044 dq->hisi_hba = hisi_hba;
1803 2045
@@ -1822,13 +2064,11 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1822 goto err_out; 2064 goto err_out;
1823 2065
1824 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2066 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1825 hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2067 hisi_hba->itct = dma_zalloc_coherent(dev, s, &hisi_hba->itct_dma,
1826 GFP_KERNEL); 2068 GFP_KERNEL);
1827 if (!hisi_hba->itct) 2069 if (!hisi_hba->itct)
1828 goto err_out; 2070 goto err_out;
1829 2071
1830 memset(hisi_hba->itct, 0, s);
1831
1832 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2072 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
1833 sizeof(struct hisi_sas_slot), 2073 sizeof(struct hisi_sas_slot),
1834 GFP_KERNEL); 2074 GFP_KERNEL);
@@ -2031,7 +2271,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2031 struct hisi_hba *hisi_hba; 2271 struct hisi_hba *hisi_hba;
2032 struct device *dev = &pdev->dev; 2272 struct device *dev = &pdev->dev;
2033 2273
2034 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba)); 2274 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2035 if (!shost) { 2275 if (!shost) {
2036 dev_err(dev, "scsi host alloc failed\n"); 2276 dev_err(dev, "scsi host alloc failed\n");
2037 return NULL; 2277 return NULL;
@@ -2080,19 +2320,8 @@ err_out:
2080 return NULL; 2320 return NULL;
2081} 2321}
2082 2322
2083void hisi_sas_init_add(struct hisi_hba *hisi_hba)
2084{
2085 int i;
2086
2087 for (i = 0; i < hisi_hba->n_phy; i++)
2088 memcpy(&hisi_hba->phy[i].dev_sas_addr,
2089 hisi_hba->sas_addr,
2090 SAS_ADDR_SIZE);
2091}
2092EXPORT_SYMBOL_GPL(hisi_sas_init_add);
2093
2094int hisi_sas_probe(struct platform_device *pdev, 2323int hisi_sas_probe(struct platform_device *pdev,
2095 const struct hisi_sas_hw *hw) 2324 const struct hisi_sas_hw *hw)
2096{ 2325{
2097 struct Scsi_Host *shost; 2326 struct Scsi_Host *shost;
2098 struct hisi_hba *hisi_hba; 2327 struct hisi_hba *hisi_hba;
@@ -2144,8 +2373,6 @@ int hisi_sas_probe(struct platform_device *pdev,
2144 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2373 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2145 } 2374 }
2146 2375
2147 hisi_sas_init_add(hisi_hba);
2148
2149 rc = scsi_add_host(shost, &pdev->dev); 2376 rc = scsi_add_host(shost, &pdev->dev);
2150 if (rc) 2377 if (rc)
2151 goto err_out_ha; 2378 goto err_out_ha;
@@ -2177,6 +2404,9 @@ int hisi_sas_remove(struct platform_device *pdev)
2177 struct hisi_hba *hisi_hba = sha->lldd_ha; 2404 struct hisi_hba *hisi_hba = sha->lldd_ha;
2178 struct Scsi_Host *shost = sha->core.shost; 2405 struct Scsi_Host *shost = sha->core.shost;
2179 2406
2407 if (timer_pending(&hisi_hba->timer))
2408 del_timer(&hisi_hba->timer);
2409
2180 sas_unregister_ha(sha); 2410 sas_unregister_ha(sha);
2181 sas_remove_host(sha->core.shost); 2411 sas_remove_host(sha->core.shost);
2182 2412
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 84a0ccc4daf5..89ab18c1959c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -855,39 +855,12 @@ static enum sas_linkrate phy_get_max_linkrate_v1_hw(void)
855static void phy_set_linkrate_v1_hw(struct hisi_hba *hisi_hba, int phy_no, 855static void phy_set_linkrate_v1_hw(struct hisi_hba *hisi_hba, int phy_no,
856 struct sas_phy_linkrates *r) 856 struct sas_phy_linkrates *r)
857{ 857{
858 u32 prog_phy_link_rate = 858 enum sas_linkrate max = r->maximum_linkrate;
859 hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); 859 u32 prog_phy_link_rate = 0x800;
860 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
861 struct asd_sas_phy *sas_phy = &phy->sas_phy;
862 int i;
863 enum sas_linkrate min, max;
864 u32 rate_mask = 0;
865
866 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
867 max = sas_phy->phy->maximum_linkrate;
868 min = r->minimum_linkrate;
869 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
870 max = r->maximum_linkrate;
871 min = sas_phy->phy->minimum_linkrate;
872 } else
873 return;
874
875 sas_phy->phy->maximum_linkrate = max;
876 sas_phy->phy->minimum_linkrate = min;
877
878 max -= SAS_LINK_RATE_1_5_GBPS;
879 860
880 for (i = 0; i <= max; i++) 861 prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
881 rate_mask |= 1 << (i * 2);
882
883 prog_phy_link_rate &= ~0xff;
884 prog_phy_link_rate |= rate_mask;
885
886 disable_phy_v1_hw(hisi_hba, phy_no);
887 msleep(100);
888 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, 862 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
889 prog_phy_link_rate); 863 prog_phy_link_rate);
890 start_phy_v1_hw(hisi_hba, phy_no);
891} 864}
892 865
893static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id) 866static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
@@ -921,37 +894,45 @@ get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
921 return -EAGAIN; 894 return -EAGAIN;
922 } 895 }
923 896
924 return 0; 897 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
898
899 return w;
925} 900}
926 901
902/* DQ lock must be taken here */
927static void start_delivery_v1_hw(struct hisi_sas_dq *dq) 903static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
928{ 904{
929 struct hisi_hba *hisi_hba = dq->hisi_hba; 905 struct hisi_hba *hisi_hba = dq->hisi_hba;
930 int dlvry_queue = dq->slot_prep->dlvry_queue; 906 struct hisi_sas_slot *s, *s1;
931 int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot; 907 struct list_head *dq_list;
908 int dlvry_queue = dq->id;
909 int wp, count = 0;
910
911 dq_list = &dq->list;
912 list_for_each_entry_safe(s, s1, &dq->list, delivery) {
913 if (!s->ready)
914 break;
915 count++;
916 wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
917 list_del(&s->delivery);
918 }
932 919
933 dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS; 920 if (!count)
934 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), 921 return;
935 dq->wr_point); 922
923 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
936} 924}
937 925
938static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba, 926static void prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
939 struct hisi_sas_slot *slot, 927 struct hisi_sas_slot *slot,
940 struct hisi_sas_cmd_hdr *hdr, 928 struct hisi_sas_cmd_hdr *hdr,
941 struct scatterlist *scatter, 929 struct scatterlist *scatter,
942 int n_elem) 930 int n_elem)
943{ 931{
944 struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); 932 struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
945 struct device *dev = hisi_hba->dev;
946 struct scatterlist *sg; 933 struct scatterlist *sg;
947 int i; 934 int i;
948 935
949 if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
950 dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
951 n_elem);
952 return -EINVAL;
953 }
954
955 for_each_sg(scatter, sg, n_elem, i) { 936 for_each_sg(scatter, sg, n_elem, i) {
956 struct hisi_sas_sge *entry = &sge_page->sge[i]; 937 struct hisi_sas_sge *entry = &sge_page->sge[i];
957 938
@@ -964,48 +945,25 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
964 hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); 945 hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
965 946
966 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); 947 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
967
968 return 0;
969} 948}
970 949
971static int prep_smp_v1_hw(struct hisi_hba *hisi_hba, 950static void prep_smp_v1_hw(struct hisi_hba *hisi_hba,
972 struct hisi_sas_slot *slot) 951 struct hisi_sas_slot *slot)
973{ 952{
974 struct sas_task *task = slot->task; 953 struct sas_task *task = slot->task;
975 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 954 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
976 struct domain_device *device = task->dev; 955 struct domain_device *device = task->dev;
977 struct device *dev = hisi_hba->dev;
978 struct hisi_sas_port *port = slot->port; 956 struct hisi_sas_port *port = slot->port;
979 struct scatterlist *sg_req, *sg_resp; 957 struct scatterlist *sg_req;
980 struct hisi_sas_device *sas_dev = device->lldd_dev; 958 struct hisi_sas_device *sas_dev = device->lldd_dev;
981 dma_addr_t req_dma_addr; 959 dma_addr_t req_dma_addr;
982 unsigned int req_len, resp_len; 960 unsigned int req_len;
983 int elem, rc;
984 961
985 /*
986 * DMA-map SMP request, response buffers
987 */
988 /* req */ 962 /* req */
989 sg_req = &task->smp_task.smp_req; 963 sg_req = &task->smp_task.smp_req;
990 elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
991 if (!elem)
992 return -ENOMEM;
993 req_len = sg_dma_len(sg_req); 964 req_len = sg_dma_len(sg_req);
994 req_dma_addr = sg_dma_address(sg_req); 965 req_dma_addr = sg_dma_address(sg_req);
995 966
996 /* resp */
997 sg_resp = &task->smp_task.smp_resp;
998 elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
999 if (!elem) {
1000 rc = -ENOMEM;
1001 goto err_out_req;
1002 }
1003 resp_len = sg_dma_len(sg_resp);
1004 if ((req_len & 0x3) || (resp_len & 0x3)) {
1005 rc = -EINVAL;
1006 goto err_out_resp;
1007 }
1008
1009 /* create header */ 967 /* create header */
1010 /* dw0 */ 968 /* dw0 */
1011 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | 969 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
@@ -1025,21 +983,10 @@ static int prep_smp_v1_hw(struct hisi_hba *hisi_hba,
1025 983
1026 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); 984 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
1027 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 985 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
1028
1029 return 0;
1030
1031err_out_resp:
1032 dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
1033 DMA_FROM_DEVICE);
1034err_out_req:
1035 dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
1036 DMA_TO_DEVICE);
1037 return rc;
1038} 986}
1039 987
1040static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba, 988static void prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
1041 struct hisi_sas_slot *slot, int is_tmf, 989 struct hisi_sas_slot *slot)
1042 struct hisi_sas_tmf_task *tmf)
1043{ 990{
1044 struct sas_task *task = slot->task; 991 struct sas_task *task = slot->task;
1045 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 992 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
@@ -1048,7 +995,8 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
1048 struct hisi_sas_port *port = slot->port; 995 struct hisi_sas_port *port = slot->port;
1049 struct sas_ssp_task *ssp_task = &task->ssp_task; 996 struct sas_ssp_task *ssp_task = &task->ssp_task;
1050 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 997 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
1051 int has_data = 0, rc, priority = is_tmf; 998 struct hisi_sas_tmf_task *tmf = slot->tmf;
999 int has_data = 0, priority = !!tmf;
1052 u8 *buf_cmd, fburst = 0; 1000 u8 *buf_cmd, fburst = 0;
1053 u32 dw1, dw2; 1001 u32 dw1, dw2;
1054 1002
@@ -1062,7 +1010,7 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
1062 1010
1063 dw1 = 1 << CMD_HDR_VERIFY_DTL_OFF; 1011 dw1 = 1 << CMD_HDR_VERIFY_DTL_OFF;
1064 1012
1065 if (is_tmf) { 1013 if (tmf) {
1066 dw1 |= 3 << CMD_HDR_SSP_FRAME_TYPE_OFF; 1014 dw1 |= 3 << CMD_HDR_SSP_FRAME_TYPE_OFF;
1067 } else { 1015 } else {
1068 switch (scsi_cmnd->sc_data_direction) { 1016 switch (scsi_cmnd->sc_data_direction) {
@@ -1083,7 +1031,7 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
1083 dw1 |= sas_dev->device_id << CMD_HDR_DEVICE_ID_OFF; 1031 dw1 |= sas_dev->device_id << CMD_HDR_DEVICE_ID_OFF;
1084 hdr->dw1 = cpu_to_le32(dw1); 1032 hdr->dw1 = cpu_to_le32(dw1);
1085 1033
1086 if (is_tmf) { 1034 if (tmf) {
1087 dw2 = ((sizeof(struct ssp_tmf_iu) + 1035 dw2 = ((sizeof(struct ssp_tmf_iu) +
1088 sizeof(struct ssp_frame_hdr)+3)/4) << 1036 sizeof(struct ssp_frame_hdr)+3)/4) <<
1089 CMD_HDR_CFL_OFF; 1037 CMD_HDR_CFL_OFF;
@@ -1097,12 +1045,9 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
1097 1045
1098 hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); 1046 hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
1099 1047
1100 if (has_data) { 1048 if (has_data)
1101 rc = prep_prd_sge_v1_hw(hisi_hba, slot, hdr, task->scatter, 1049 prep_prd_sge_v1_hw(hisi_hba, slot, hdr, task->scatter,
1102 slot->n_elem); 1050 slot->n_elem);
1103 if (rc)
1104 return rc;
1105 }
1106 1051
1107 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 1052 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
1108 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 1053 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
@@ -1117,7 +1062,7 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
1117 hdr->dw2 = cpu_to_le32(dw2); 1062 hdr->dw2 = cpu_to_le32(dw2);
1118 1063
1119 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 1064 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
1120 if (!is_tmf) { 1065 if (!tmf) {
1121 buf_cmd[9] = fburst | task->ssp_task.task_attr | 1066 buf_cmd[9] = fburst | task->ssp_task.task_attr |
1122 (task->ssp_task.task_prio << 3); 1067 (task->ssp_task.task_prio << 3);
1123 memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, 1068 memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
@@ -1136,8 +1081,6 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
1136 break; 1081 break;
1137 } 1082 }
1138 } 1083 }
1139
1140 return 0;
1141} 1084}
1142 1085
1143/* by default, task resp is complete */ 1086/* by default, task resp is complete */
@@ -1430,6 +1373,7 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
1430 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; 1373 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
1431 struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; 1374 struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
1432 irqreturn_t res = IRQ_HANDLED; 1375 irqreturn_t res = IRQ_HANDLED;
1376 unsigned long flags;
1433 1377
1434 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); 1378 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2);
1435 if (!(irq_value & CHL_INT2_SL_PHY_ENA_MSK)) { 1379 if (!(irq_value & CHL_INT2_SL_PHY_ENA_MSK)) {
@@ -1483,6 +1427,13 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
1483 SAS_PROTOCOL_SMP; 1427 SAS_PROTOCOL_SMP;
1484 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); 1428 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
1485 1429
1430 spin_lock_irqsave(&phy->lock, flags);
1431 if (phy->reset_completion) {
1432 phy->in_reset = 0;
1433 complete(phy->reset_completion);
1434 }
1435 spin_unlock_irqrestore(&phy->lock, flags);
1436
1486end: 1437end:
1487 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, 1438 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
1488 CHL_INT2_SL_PHY_ENA_MSK); 1439 CHL_INT2_SL_PHY_ENA_MSK);
@@ -1845,6 +1796,28 @@ static int hisi_sas_v1_init(struct hisi_hba *hisi_hba)
1845 return 0; 1796 return 0;
1846} 1797}
1847 1798
1799static struct scsi_host_template sht_v1_hw = {
1800 .name = DRV_NAME,
1801 .module = THIS_MODULE,
1802 .queuecommand = sas_queuecommand,
1803 .target_alloc = sas_target_alloc,
1804 .slave_configure = hisi_sas_slave_configure,
1805 .scan_finished = hisi_sas_scan_finished,
1806 .scan_start = hisi_sas_scan_start,
1807 .change_queue_depth = sas_change_queue_depth,
1808 .bios_param = sas_bios_param,
1809 .can_queue = 1,
1810 .this_id = -1,
1811 .sg_tablesize = SG_ALL,
1812 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
1813 .use_clustering = ENABLE_CLUSTERING,
1814 .eh_device_reset_handler = sas_eh_device_reset_handler,
1815 .eh_target_reset_handler = sas_eh_target_reset_handler,
1816 .target_destroy = sas_target_destroy,
1817 .ioctl = sas_ioctl,
1818 .shost_attrs = host_attrs,
1819};
1820
1848static const struct hisi_sas_hw hisi_sas_v1_hw = { 1821static const struct hisi_sas_hw hisi_sas_v1_hw = {
1849 .hw_init = hisi_sas_v1_init, 1822 .hw_init = hisi_sas_v1_init,
1850 .setup_itct = setup_itct_v1_hw, 1823 .setup_itct = setup_itct_v1_hw,
@@ -1864,6 +1837,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
1864 .get_wideport_bitmap = get_wideport_bitmap_v1_hw, 1837 .get_wideport_bitmap = get_wideport_bitmap_v1_hw,
1865 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW, 1838 .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW,
1866 .complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr), 1839 .complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr),
1840 .sht = &sht_v1_hw,
1867}; 1841};
1868 1842
1869static int hisi_sas_v1_probe(struct platform_device *pdev) 1843static int hisi_sas_v1_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index f89fb9a49ea9..213c530e63f2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -144,6 +144,7 @@
144#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19 144#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF 19
145#define SAS_ECC_INTR_MSK 0x1ec 145#define SAS_ECC_INTR_MSK 0x1ec
146#define HGC_ERR_STAT_EN 0x238 146#define HGC_ERR_STAT_EN 0x238
147#define CQE_SEND_CNT 0x248
147#define DLVRY_Q_0_BASE_ADDR_LO 0x260 148#define DLVRY_Q_0_BASE_ADDR_LO 0x260
148#define DLVRY_Q_0_BASE_ADDR_HI 0x264 149#define DLVRY_Q_0_BASE_ADDR_HI 0x264
149#define DLVRY_Q_0_DEPTH 0x268 150#define DLVRY_Q_0_DEPTH 0x268
@@ -295,6 +296,10 @@
295#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) 296#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
296#define CMD_HDR_TLR_CTRL_OFF 6 297#define CMD_HDR_TLR_CTRL_OFF 6
297#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) 298#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
299#define CMD_HDR_PHY_ID_OFF 8
300#define CMD_HDR_PHY_ID_MSK (0x1ff << CMD_HDR_PHY_ID_OFF)
301#define CMD_HDR_FORCE_PHY_OFF 17
302#define CMD_HDR_FORCE_PHY_MSK (0x1 << CMD_HDR_FORCE_PHY_OFF)
298#define CMD_HDR_PORT_OFF 18 303#define CMD_HDR_PORT_OFF 18
299#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) 304#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
300#define CMD_HDR_PRIORITY_OFF 27 305#define CMD_HDR_PRIORITY_OFF 27
@@ -1216,7 +1221,22 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
1216 } 1221 }
1217 1222
1218 for (i = 0; i < hisi_hba->n_phy; i++) { 1223 for (i = 0; i < hisi_hba->n_phy; i++) {
1219 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855); 1224 struct hisi_sas_phy *phy = &hisi_hba->phy[i];
1225 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1226 u32 prog_phy_link_rate = 0x800;
1227
1228 if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate <
1229 SAS_LINK_RATE_1_5_GBPS)) {
1230 prog_phy_link_rate = 0x855;
1231 } else {
1232 enum sas_linkrate max = sas_phy->phy->maximum_linkrate;
1233
1234 prog_phy_link_rate =
1235 hisi_sas_get_prog_phy_linkrate_mask(max) |
1236 0x800;
1237 }
1238 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
1239 prog_phy_link_rate);
1220 hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, sas_phy_ctrl); 1240 hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, sas_phy_ctrl);
1221 hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d); 1241 hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d);
1222 hisi_sas_phy_write32(hisi_hba, i, SL_CONTROL, 0x0); 1242 hisi_sas_phy_write32(hisi_hba, i, SL_CONTROL, 0x0);
@@ -1585,39 +1605,12 @@ static enum sas_linkrate phy_get_max_linkrate_v2_hw(void)
1585static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no, 1605static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no,
1586 struct sas_phy_linkrates *r) 1606 struct sas_phy_linkrates *r)
1587{ 1607{
1588 u32 prog_phy_link_rate = 1608 enum sas_linkrate max = r->maximum_linkrate;
1589 hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); 1609 u32 prog_phy_link_rate = 0x800;
1590 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1591 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1592 int i;
1593 enum sas_linkrate min, max;
1594 u32 rate_mask = 0;
1595
1596 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1597 max = sas_phy->phy->maximum_linkrate;
1598 min = r->minimum_linkrate;
1599 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1600 max = r->maximum_linkrate;
1601 min = sas_phy->phy->minimum_linkrate;
1602 } else
1603 return;
1604
1605 sas_phy->phy->maximum_linkrate = max;
1606 sas_phy->phy->minimum_linkrate = min;
1607
1608 max -= SAS_LINK_RATE_1_5_GBPS;
1609
1610 for (i = 0; i <= max; i++)
1611 rate_mask |= 1 << (i * 2);
1612
1613 prog_phy_link_rate &= ~0xff;
1614 prog_phy_link_rate |= rate_mask;
1615 1610
1616 disable_phy_v2_hw(hisi_hba, phy_no); 1611 prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
1617 msleep(100);
1618 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, 1612 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
1619 prog_phy_link_rate); 1613 prog_phy_link_rate);
1620 start_phy_v2_hw(hisi_hba, phy_no);
1621} 1614}
1622 1615
1623static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id) 1616static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
@@ -1658,42 +1651,50 @@ get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
1658 r = hisi_sas_read32_relaxed(hisi_hba, 1651 r = hisi_sas_read32_relaxed(hisi_hba,
1659 DLVRY_Q_0_RD_PTR + (queue * 0x14)); 1652 DLVRY_Q_0_RD_PTR + (queue * 0x14));
1660 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { 1653 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
1661 dev_warn(dev, "full queue=%d r=%d w=%d\n\n", 1654 dev_warn(dev, "full queue=%d r=%d w=%d\n",
1662 queue, r, w); 1655 queue, r, w);
1663 return -EAGAIN; 1656 return -EAGAIN;
1664 } 1657 }
1665 1658
1666 return 0; 1659 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
1660
1661 return w;
1667} 1662}
1668 1663
1664/* DQ lock must be taken here */
1669static void start_delivery_v2_hw(struct hisi_sas_dq *dq) 1665static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
1670{ 1666{
1671 struct hisi_hba *hisi_hba = dq->hisi_hba; 1667 struct hisi_hba *hisi_hba = dq->hisi_hba;
1672 int dlvry_queue = dq->slot_prep->dlvry_queue; 1668 struct hisi_sas_slot *s, *s1;
1673 int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot; 1669 struct list_head *dq_list;
1670 int dlvry_queue = dq->id;
1671 int wp, count = 0;
1672
1673 dq_list = &dq->list;
1674 list_for_each_entry_safe(s, s1, &dq->list, delivery) {
1675 if (!s->ready)
1676 break;
1677 count++;
1678 wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
1679 list_del(&s->delivery);
1680 }
1681
1682 if (!count)
1683 return;
1674 1684
1675 dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS; 1685 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
1676 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
1677 dq->wr_point);
1678} 1686}
1679 1687
1680static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, 1688static void prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
1681 struct hisi_sas_slot *slot, 1689 struct hisi_sas_slot *slot,
1682 struct hisi_sas_cmd_hdr *hdr, 1690 struct hisi_sas_cmd_hdr *hdr,
1683 struct scatterlist *scatter, 1691 struct scatterlist *scatter,
1684 int n_elem) 1692 int n_elem)
1685{ 1693{
1686 struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); 1694 struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
1687 struct device *dev = hisi_hba->dev;
1688 struct scatterlist *sg; 1695 struct scatterlist *sg;
1689 int i; 1696 int i;
1690 1697
1691 if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
1692 dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
1693 n_elem);
1694 return -EINVAL;
1695 }
1696
1697 for_each_sg(scatter, sg, n_elem, i) { 1698 for_each_sg(scatter, sg, n_elem, i) {
1698 struct hisi_sas_sge *entry = &sge_page->sge[i]; 1699 struct hisi_sas_sge *entry = &sge_page->sge[i];
1699 1700
@@ -1706,47 +1707,24 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
1706 hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); 1707 hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
1707 1708
1708 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); 1709 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
1709
1710 return 0;
1711} 1710}
1712 1711
1713static int prep_smp_v2_hw(struct hisi_hba *hisi_hba, 1712static void prep_smp_v2_hw(struct hisi_hba *hisi_hba,
1714 struct hisi_sas_slot *slot) 1713 struct hisi_sas_slot *slot)
1715{ 1714{
1716 struct sas_task *task = slot->task; 1715 struct sas_task *task = slot->task;
1717 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1716 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1718 struct domain_device *device = task->dev; 1717 struct domain_device *device = task->dev;
1719 struct device *dev = hisi_hba->dev;
1720 struct hisi_sas_port *port = slot->port; 1718 struct hisi_sas_port *port = slot->port;
1721 struct scatterlist *sg_req, *sg_resp; 1719 struct scatterlist *sg_req;
1722 struct hisi_sas_device *sas_dev = device->lldd_dev; 1720 struct hisi_sas_device *sas_dev = device->lldd_dev;
1723 dma_addr_t req_dma_addr; 1721 dma_addr_t req_dma_addr;
1724 unsigned int req_len, resp_len; 1722 unsigned int req_len;
1725 int elem, rc;
1726 1723
1727 /*
1728 * DMA-map SMP request, response buffers
1729 */
1730 /* req */ 1724 /* req */
1731 sg_req = &task->smp_task.smp_req; 1725 sg_req = &task->smp_task.smp_req;
1732 elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
1733 if (!elem)
1734 return -ENOMEM;
1735 req_len = sg_dma_len(sg_req);
1736 req_dma_addr = sg_dma_address(sg_req); 1726 req_dma_addr = sg_dma_address(sg_req);
1737 1727 req_len = sg_dma_len(&task->smp_task.smp_req);
1738 /* resp */
1739 sg_resp = &task->smp_task.smp_resp;
1740 elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
1741 if (!elem) {
1742 rc = -ENOMEM;
1743 goto err_out_req;
1744 }
1745 resp_len = sg_dma_len(sg_resp);
1746 if ((req_len & 0x3) || (resp_len & 0x3)) {
1747 rc = -EINVAL;
1748 goto err_out_resp;
1749 }
1750 1728
1751 /* create header */ 1729 /* create header */
1752 /* dw0 */ 1730 /* dw0 */
@@ -1768,21 +1746,10 @@ static int prep_smp_v2_hw(struct hisi_hba *hisi_hba,
1768 1746
1769 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); 1747 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
1770 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1748 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
1771
1772 return 0;
1773
1774err_out_resp:
1775 dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
1776 DMA_FROM_DEVICE);
1777err_out_req:
1778 dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
1779 DMA_TO_DEVICE);
1780 return rc;
1781} 1749}
1782 1750
1783static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba, 1751static void prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
1784 struct hisi_sas_slot *slot, int is_tmf, 1752 struct hisi_sas_slot *slot)
1785 struct hisi_sas_tmf_task *tmf)
1786{ 1753{
1787 struct sas_task *task = slot->task; 1754 struct sas_task *task = slot->task;
1788 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1755 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
@@ -1791,7 +1758,8 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
1791 struct hisi_sas_port *port = slot->port; 1758 struct hisi_sas_port *port = slot->port;
1792 struct sas_ssp_task *ssp_task = &task->ssp_task; 1759 struct sas_ssp_task *ssp_task = &task->ssp_task;
1793 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 1760 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
1794 int has_data = 0, rc, priority = is_tmf; 1761 struct hisi_sas_tmf_task *tmf = slot->tmf;
1762 int has_data = 0, priority = !!tmf;
1795 u8 *buf_cmd; 1763 u8 *buf_cmd;
1796 u32 dw1 = 0, dw2 = 0; 1764 u32 dw1 = 0, dw2 = 0;
1797 1765
@@ -1802,7 +1770,7 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
1802 (1 << CMD_HDR_CMD_OFF)); /* ssp */ 1770 (1 << CMD_HDR_CMD_OFF)); /* ssp */
1803 1771
1804 dw1 = 1 << CMD_HDR_VDTL_OFF; 1772 dw1 = 1 << CMD_HDR_VDTL_OFF;
1805 if (is_tmf) { 1773 if (tmf) {
1806 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; 1774 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
1807 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; 1775 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
1808 } else { 1776 } else {
@@ -1833,12 +1801,9 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
1833 1801
1834 hdr->transfer_tags = cpu_to_le32(slot->idx); 1802 hdr->transfer_tags = cpu_to_le32(slot->idx);
1835 1803
1836 if (has_data) { 1804 if (has_data)
1837 rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, 1805 prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
1838 slot->n_elem); 1806 slot->n_elem);
1839 if (rc)
1840 return rc;
1841 }
1842 1807
1843 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 1808 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
1844 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 1809 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
@@ -1848,7 +1813,7 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
1848 sizeof(struct ssp_frame_hdr); 1813 sizeof(struct ssp_frame_hdr);
1849 1814
1850 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 1815 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
1851 if (!is_tmf) { 1816 if (!tmf) {
1852 buf_cmd[9] = task->ssp_task.task_attr | 1817 buf_cmd[9] = task->ssp_task.task_attr |
1853 (task->ssp_task.task_prio << 3); 1818 (task->ssp_task.task_prio << 3);
1854 memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, 1819 memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
@@ -1867,8 +1832,6 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
1867 break; 1832 break;
1868 } 1833 }
1869 } 1834 }
1870
1871 return 0;
1872} 1835}
1873 1836
1874#define TRANS_TX_ERR 0 1837#define TRANS_TX_ERR 0
@@ -2380,23 +2343,24 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2380 struct device *dev = hisi_hba->dev; 2343 struct device *dev = hisi_hba->dev;
2381 struct task_status_struct *ts; 2344 struct task_status_struct *ts;
2382 struct domain_device *device; 2345 struct domain_device *device;
2346 struct sas_ha_struct *ha;
2383 enum exec_status sts; 2347 enum exec_status sts;
2384 struct hisi_sas_complete_v2_hdr *complete_queue = 2348 struct hisi_sas_complete_v2_hdr *complete_queue =
2385 hisi_hba->complete_hdr[slot->cmplt_queue]; 2349 hisi_hba->complete_hdr[slot->cmplt_queue];
2386 struct hisi_sas_complete_v2_hdr *complete_hdr = 2350 struct hisi_sas_complete_v2_hdr *complete_hdr =
2387 &complete_queue[slot->cmplt_queue_slot]; 2351 &complete_queue[slot->cmplt_queue_slot];
2388 unsigned long flags; 2352 unsigned long flags;
2389 int aborted; 2353 bool is_internal = slot->is_internal;
2390 2354
2391 if (unlikely(!task || !task->lldd_task || !task->dev)) 2355 if (unlikely(!task || !task->lldd_task || !task->dev))
2392 return -EINVAL; 2356 return -EINVAL;
2393 2357
2394 ts = &task->task_status; 2358 ts = &task->task_status;
2395 device = task->dev; 2359 device = task->dev;
2360 ha = device->port->ha;
2396 sas_dev = device->lldd_dev; 2361 sas_dev = device->lldd_dev;
2397 2362
2398 spin_lock_irqsave(&task->task_state_lock, flags); 2363 spin_lock_irqsave(&task->task_state_lock, flags);
2399 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
2400 task->task_state_flags &= 2364 task->task_state_flags &=
2401 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 2365 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
2402 spin_unlock_irqrestore(&task->task_state_lock, flags); 2366 spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -2404,15 +2368,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2404 memset(ts, 0, sizeof(*ts)); 2368 memset(ts, 0, sizeof(*ts));
2405 ts->resp = SAS_TASK_COMPLETE; 2369 ts->resp = SAS_TASK_COMPLETE;
2406 2370
2407 if (unlikely(aborted)) {
2408 dev_dbg(dev, "slot_complete: task(%p) aborted\n", task);
2409 ts->stat = SAS_ABORTED_TASK;
2410 spin_lock_irqsave(&hisi_hba->lock, flags);
2411 hisi_sas_slot_task_free(hisi_hba, task, slot);
2412 spin_unlock_irqrestore(&hisi_hba->lock, flags);
2413 return ts->stat;
2414 }
2415
2416 if (unlikely(!sas_dev)) { 2371 if (unlikely(!sas_dev)) {
2417 dev_dbg(dev, "slot complete: port has no device\n"); 2372 dev_dbg(dev, "slot complete: port has no device\n");
2418 ts->stat = SAS_PHY_DOWN; 2373 ts->stat = SAS_PHY_DOWN;
@@ -2459,10 +2414,10 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2459 slot_err_v2_hw(hisi_hba, task, slot, 2); 2414 slot_err_v2_hw(hisi_hba, task, slot, 2);
2460 2415
2461 if (ts->stat != SAS_DATA_UNDERRUN) 2416 if (ts->stat != SAS_DATA_UNDERRUN)
2462 dev_info(dev, "erroneous completion iptt=%d task=%p " 2417 dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d "
2463 "CQ hdr: 0x%x 0x%x 0x%x 0x%x " 2418 "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
2464 "Error info: 0x%x 0x%x 0x%x 0x%x\n", 2419 "Error info: 0x%x 0x%x 0x%x 0x%x\n",
2465 slot->idx, task, 2420 slot->idx, task, sas_dev->device_id,
2466 complete_hdr->dw0, complete_hdr->dw1, 2421 complete_hdr->dw0, complete_hdr->dw1,
2467 complete_hdr->act, complete_hdr->dw3, 2422 complete_hdr->act, complete_hdr->dw3,
2468 error_info[0], error_info[1], 2423 error_info[0], error_info[1],
@@ -2523,13 +2478,27 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2523 } 2478 }
2524 2479
2525out: 2480out:
2481 hisi_sas_slot_task_free(hisi_hba, task, slot);
2482 sts = ts->stat;
2526 spin_lock_irqsave(&task->task_state_lock, flags); 2483 spin_lock_irqsave(&task->task_state_lock, flags);
2484 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
2485 spin_unlock_irqrestore(&task->task_state_lock, flags);
2486 dev_info(dev, "slot complete: task(%p) aborted\n", task);
2487 return SAS_ABORTED_TASK;
2488 }
2527 task->task_state_flags |= SAS_TASK_STATE_DONE; 2489 task->task_state_flags |= SAS_TASK_STATE_DONE;
2528 spin_unlock_irqrestore(&task->task_state_lock, flags); 2490 spin_unlock_irqrestore(&task->task_state_lock, flags);
2529 spin_lock_irqsave(&hisi_hba->lock, flags); 2491
2530 hisi_sas_slot_task_free(hisi_hba, task, slot); 2492 if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
2531 spin_unlock_irqrestore(&hisi_hba->lock, flags); 2493 spin_lock_irqsave(&device->done_lock, flags);
2532 sts = ts->stat; 2494 if (test_bit(SAS_HA_FROZEN, &ha->state)) {
2495 spin_unlock_irqrestore(&device->done_lock, flags);
2496 dev_info(dev, "slot complete: task(%p) ignored\n ",
2497 task);
2498 return sts;
2499 }
2500 spin_unlock_irqrestore(&device->done_lock, flags);
2501 }
2533 2502
2534 if (task->task_done) 2503 if (task->task_done)
2535 task->task_done(task); 2504 task->task_done(task);
@@ -2537,7 +2506,7 @@ out:
2537 return sts; 2506 return sts;
2538} 2507}
2539 2508
2540static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, 2509static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
2541 struct hisi_sas_slot *slot) 2510 struct hisi_sas_slot *slot)
2542{ 2511{
2543 struct sas_task *task = slot->task; 2512 struct sas_task *task = slot->task;
@@ -2547,8 +2516,9 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
2547 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 2516 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
2548 struct asd_sas_port *sas_port = device->port; 2517 struct asd_sas_port *sas_port = device->port;
2549 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 2518 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
2519 struct hisi_sas_tmf_task *tmf = slot->tmf;
2550 u8 *buf_cmd; 2520 u8 *buf_cmd;
2551 int has_data = 0, rc = 0, hdr_tag = 0; 2521 int has_data = 0, hdr_tag = 0;
2552 u32 dw1 = 0, dw2 = 0; 2522 u32 dw1 = 0, dw2 = 0;
2553 2523
2554 /* create header */ 2524 /* create header */
@@ -2559,6 +2529,12 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
2559 else 2529 else
2560 hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF); 2530 hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
2561 2531
2532 if (tmf && tmf->force_phy) {
2533 hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK;
2534 hdr->dw0 |= cpu_to_le32((1 << tmf->phy_id)
2535 << CMD_HDR_PHY_ID_OFF);
2536 }
2537
2562 /* dw1 */ 2538 /* dw1 */
2563 switch (task->data_dir) { 2539 switch (task->data_dir) {
2564 case DMA_TO_DEVICE: 2540 case DMA_TO_DEVICE:
@@ -2596,12 +2572,9 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
2596 /* dw3 */ 2572 /* dw3 */
2597 hdr->transfer_tags = cpu_to_le32(slot->idx); 2573 hdr->transfer_tags = cpu_to_le32(slot->idx);
2598 2574
2599 if (has_data) { 2575 if (has_data)
2600 rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter, 2576 prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
2601 slot->n_elem); 2577 slot->n_elem);
2602 if (rc)
2603 return rc;
2604 }
2605 2578
2606 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 2579 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
2607 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 2580 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
@@ -2613,8 +2586,6 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
2613 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 2586 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
2614 /* fill in command FIS */ 2587 /* fill in command FIS */
2615 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 2588 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
2616
2617 return 0;
2618} 2589}
2619 2590
2620static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t) 2591static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t)
@@ -2651,7 +2622,7 @@ static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t)
2651 } 2622 }
2652} 2623}
2653 2624
2654static int prep_abort_v2_hw(struct hisi_hba *hisi_hba, 2625static void prep_abort_v2_hw(struct hisi_hba *hisi_hba,
2655 struct hisi_sas_slot *slot, 2626 struct hisi_sas_slot *slot,
2656 int device_id, int abort_flag, int tag_to_abort) 2627 int device_id, int abort_flag, int tag_to_abort)
2657{ 2628{
@@ -2679,8 +2650,6 @@ static int prep_abort_v2_hw(struct hisi_hba *hisi_hba,
2679 /* dw7 */ 2650 /* dw7 */
2680 hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF); 2651 hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
2681 hdr->transfer_tags = cpu_to_le32(slot->idx); 2652 hdr->transfer_tags = cpu_to_le32(slot->idx);
2682
2683 return 0;
2684} 2653}
2685 2654
2686static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) 2655static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
@@ -2692,6 +2661,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
2692 struct device *dev = hisi_hba->dev; 2661 struct device *dev = hisi_hba->dev;
2693 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; 2662 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
2694 struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; 2663 struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
2664 unsigned long flags;
2695 2665
2696 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); 2666 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
2697 2667
@@ -2744,6 +2714,12 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
2744 set_link_timer_quirk(hisi_hba); 2714 set_link_timer_quirk(hisi_hba);
2745 } 2715 }
2746 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); 2716 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
2717 spin_lock_irqsave(&phy->lock, flags);
2718 if (phy->reset_completion) {
2719 phy->in_reset = 0;
2720 complete(phy->reset_completion);
2721 }
2722 spin_unlock_irqrestore(&phy->lock, flags);
2747 2723
2748end: 2724end:
2749 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 2725 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -3151,14 +3127,12 @@ static void cq_tasklet_v2_hw(unsigned long val)
3151 struct hisi_sas_complete_v2_hdr *complete_queue; 3127 struct hisi_sas_complete_v2_hdr *complete_queue;
3152 u32 rd_point = cq->rd_point, wr_point, dev_id; 3128 u32 rd_point = cq->rd_point, wr_point, dev_id;
3153 int queue = cq->id; 3129 int queue = cq->id;
3154 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
3155 3130
3156 if (unlikely(hisi_hba->reject_stp_links_msk)) 3131 if (unlikely(hisi_hba->reject_stp_links_msk))
3157 phys_try_accept_stp_links_v2_hw(hisi_hba); 3132 phys_try_accept_stp_links_v2_hw(hisi_hba);
3158 3133
3159 complete_queue = hisi_hba->complete_hdr[queue]; 3134 complete_queue = hisi_hba->complete_hdr[queue];
3160 3135
3161 spin_lock(&dq->lock);
3162 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + 3136 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
3163 (0x14 * queue)); 3137 (0x14 * queue));
3164 3138
@@ -3208,7 +3182,6 @@ static void cq_tasklet_v2_hw(unsigned long val)
3208 /* update rd_point */ 3182 /* update rd_point */
3209 cq->rd_point = rd_point; 3183 cq->rd_point = rd_point;
3210 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); 3184 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
3211 spin_unlock(&dq->lock);
3212} 3185}
3213 3186
3214static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) 3187static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
@@ -3235,6 +3208,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
3235 u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate; 3208 u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate;
3236 irqreturn_t res = IRQ_HANDLED; 3209 irqreturn_t res = IRQ_HANDLED;
3237 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; 3210 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
3211 unsigned long flags;
3238 int phy_no, offset; 3212 int phy_no, offset;
3239 3213
3240 phy_no = sas_phy->id; 3214 phy_no = sas_phy->id;
@@ -3295,6 +3269,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
3295 sas_phy->oob_mode = SATA_OOB_MODE; 3269 sas_phy->oob_mode = SATA_OOB_MODE;
3296 /* Make up some unique SAS address */ 3270 /* Make up some unique SAS address */
3297 attached_sas_addr[0] = 0x50; 3271 attached_sas_addr[0] = 0x50;
3272 attached_sas_addr[6] = hisi_hba->shost->host_no;
3298 attached_sas_addr[7] = phy_no; 3273 attached_sas_addr[7] = phy_no;
3299 memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE); 3274 memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE);
3300 memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis)); 3275 memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis));
@@ -3308,6 +3283,12 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
3308 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; 3283 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
3309 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); 3284 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
3310 3285
3286 spin_lock_irqsave(&phy->lock, flags);
3287 if (phy->reset_completion) {
3288 phy->in_reset = 0;
3289 complete(phy->reset_completion);
3290 }
3291 spin_unlock_irqrestore(&phy->lock, flags);
3311end: 3292end:
3312 hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp); 3293 hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
3313 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk); 3294 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk);
@@ -3546,6 +3527,46 @@ static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
3546 return 0; 3527 return 0;
3547} 3528}
3548 3529
3530static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
3531 int delay_ms, int timeout_ms)
3532{
3533 struct device *dev = hisi_hba->dev;
3534 int entries, entries_old = 0, time;
3535
3536 for (time = 0; time < timeout_ms; time += delay_ms) {
3537 entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT);
3538 if (entries == entries_old)
3539 break;
3540
3541 entries_old = entries;
3542 msleep(delay_ms);
3543 }
3544
3545 dev_dbg(dev, "wait commands complete %dms\n", time);
3546}
3547
3548static struct scsi_host_template sht_v2_hw = {
3549 .name = DRV_NAME,
3550 .module = THIS_MODULE,
3551 .queuecommand = sas_queuecommand,
3552 .target_alloc = sas_target_alloc,
3553 .slave_configure = hisi_sas_slave_configure,
3554 .scan_finished = hisi_sas_scan_finished,
3555 .scan_start = hisi_sas_scan_start,
3556 .change_queue_depth = sas_change_queue_depth,
3557 .bios_param = sas_bios_param,
3558 .can_queue = 1,
3559 .this_id = -1,
3560 .sg_tablesize = SG_ALL,
3561 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
3562 .use_clustering = ENABLE_CLUSTERING,
3563 .eh_device_reset_handler = sas_eh_device_reset_handler,
3564 .eh_target_reset_handler = sas_eh_target_reset_handler,
3565 .target_destroy = sas_target_destroy,
3566 .ioctl = sas_ioctl,
3567 .shost_attrs = host_attrs,
3568};
3569
3549static const struct hisi_sas_hw hisi_sas_v2_hw = { 3570static const struct hisi_sas_hw hisi_sas_v2_hw = {
3550 .hw_init = hisi_sas_v2_init, 3571 .hw_init = hisi_sas_v2_init,
3551 .setup_itct = setup_itct_v2_hw, 3572 .setup_itct = setup_itct_v2_hw,
@@ -3574,6 +3595,8 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
3574 .soft_reset = soft_reset_v2_hw, 3595 .soft_reset = soft_reset_v2_hw,
3575 .get_phys_state = get_phys_state_v2_hw, 3596 .get_phys_state = get_phys_state_v2_hw,
3576 .write_gpio = write_gpio_v2_hw, 3597 .write_gpio = write_gpio_v2_hw,
3598 .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v2_hw,
3599 .sht = &sht_v2_hw,
3577}; 3600};
3578 3601
3579static int hisi_sas_v2_probe(struct platform_device *pdev) 3602static int hisi_sas_v2_probe(struct platform_device *pdev)
@@ -3598,9 +3621,6 @@ static int hisi_sas_v2_remove(struct platform_device *pdev)
3598 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 3621 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
3599 struct hisi_hba *hisi_hba = sha->lldd_ha; 3622 struct hisi_hba *hisi_hba = sha->lldd_ha;
3600 3623
3601 if (timer_pending(&hisi_hba->timer))
3602 del_timer(&hisi_hba->timer);
3603
3604 hisi_sas_kill_tasklets(hisi_hba); 3624 hisi_sas_kill_tasklets(hisi_hba);
3605 3625
3606 return hisi_sas_remove(pdev); 3626 return hisi_sas_remove(pdev);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 6f3e5ba6b472..9f1e2d03f914 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -92,6 +92,7 @@
92#define SAS_ECC_INTR 0x1e8 92#define SAS_ECC_INTR 0x1e8
93#define SAS_ECC_INTR_MSK 0x1ec 93#define SAS_ECC_INTR_MSK 0x1ec
94#define HGC_ERR_STAT_EN 0x238 94#define HGC_ERR_STAT_EN 0x238
95#define CQE_SEND_CNT 0x248
95#define DLVRY_Q_0_BASE_ADDR_LO 0x260 96#define DLVRY_Q_0_BASE_ADDR_LO 0x260
96#define DLVRY_Q_0_BASE_ADDR_HI 0x264 97#define DLVRY_Q_0_BASE_ADDR_HI 0x264
97#define DLVRY_Q_0_DEPTH 0x268 98#define DLVRY_Q_0_DEPTH 0x268
@@ -106,6 +107,11 @@
106#define COMPL_Q_0_RD_PTR 0x4f0 107#define COMPL_Q_0_RD_PTR 0x4f0
107#define AWQOS_AWCACHE_CFG 0xc84 108#define AWQOS_AWCACHE_CFG 0xc84
108#define ARQOS_ARCACHE_CFG 0xc88 109#define ARQOS_ARCACHE_CFG 0xc88
110#define HILINK_ERR_DFX 0xe04
111#define SAS_GPIO_CFG_0 0x1000
112#define SAS_GPIO_CFG_1 0x1004
113#define SAS_GPIO_TX_0_1 0x1040
114#define SAS_CFG_DRIVE_VLD 0x1070
109 115
110/* phy registers requiring init */ 116/* phy registers requiring init */
111#define PORT_BASE (0x2000) 117#define PORT_BASE (0x2000)
@@ -167,6 +173,7 @@
167#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22 173#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
168#define CHL_INT2 (PORT_BASE + 0x1bc) 174#define CHL_INT2 (PORT_BASE + 0x1bc)
169#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0 175#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
176#define CHL_INT2_RX_INVLD_DW_OFF 30
170#define CHL_INT2_STP_LINK_TIMEOUT_OFF 31 177#define CHL_INT2_STP_LINK_TIMEOUT_OFF 31
171#define CHL_INT0_MSK (PORT_BASE + 0x1c0) 178#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
172#define CHL_INT1_MSK (PORT_BASE + 0x1c4) 179#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
@@ -216,6 +223,9 @@
216#define SAS_RAS_INTR1 (RAS_BASE + 0x04) 223#define SAS_RAS_INTR1 (RAS_BASE + 0x04)
217#define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08) 224#define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08)
218#define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c) 225#define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c)
226#define CFG_SAS_RAS_INTR_MASK (RAS_BASE + 0x1c)
227#define SAS_RAS_INTR2 (RAS_BASE + 0x20)
228#define SAS_RAS_INTR2_MASK (RAS_BASE + 0x24)
219 229
220/* HW dma structures */ 230/* HW dma structures */
221/* Delivery queue header */ 231/* Delivery queue header */
@@ -348,10 +358,11 @@ struct hisi_sas_err_record_v3 {
348#define DIR_TO_DEVICE 2 358#define DIR_TO_DEVICE 2
349#define DIR_RESERVED 3 359#define DIR_RESERVED 3
350 360
351#define CMD_IS_UNCONSTRAINT(cmd) \ 361#define FIS_CMD_IS_UNCONSTRAINED(fis) \
352 ((cmd == ATA_CMD_READ_LOG_EXT) || \ 362 ((fis.command == ATA_CMD_READ_LOG_EXT) || \
353 (cmd == ATA_CMD_READ_LOG_DMA_EXT) || \ 363 (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \
354 (cmd == ATA_CMD_DEV_RESET)) 364 ((fis.command == ATA_CMD_DEV_RESET) && \
365 ((fis.control & ATA_SRST) != 0)))
355 366
356static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) 367static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
357{ 368{
@@ -390,8 +401,23 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
390 return readl(regs); 401 return readl(regs);
391} 402}
392 403
404#define hisi_sas_read32_poll_timeout(off, val, cond, delay_us, \
405 timeout_us) \
406({ \
407 void __iomem *regs = hisi_hba->regs + off; \
408 readl_poll_timeout(regs, val, cond, delay_us, timeout_us); \
409})
410
411#define hisi_sas_read32_poll_timeout_atomic(off, val, cond, delay_us, \
412 timeout_us) \
413({ \
414 void __iomem *regs = hisi_hba->regs + off; \
415 readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\
416})
417
393static void init_reg_v3_hw(struct hisi_hba *hisi_hba) 418static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
394{ 419{
420 struct pci_dev *pdev = hisi_hba->pci_dev;
395 int i; 421 int i;
396 422
397 /* Global registers init */ 423 /* Global registers init */
@@ -409,7 +435,10 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
409 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); 435 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
410 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); 436 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
411 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); 437 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
412 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff); 438 if (pdev->revision >= 0x21)
439 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7fff);
440 else
441 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
413 hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); 442 hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
414 hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0); 443 hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
415 hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0); 444 hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
@@ -422,13 +451,33 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
422 hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); 451 hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
423 452
424 for (i = 0; i < hisi_hba->n_phy; i++) { 453 for (i = 0; i < hisi_hba->n_phy; i++) {
425 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855); 454 struct hisi_sas_phy *phy = &hisi_hba->phy[i];
455 struct asd_sas_phy *sas_phy = &phy->sas_phy;
456 u32 prog_phy_link_rate = 0x800;
457
458 if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate <
459 SAS_LINK_RATE_1_5_GBPS)) {
460 prog_phy_link_rate = 0x855;
461 } else {
462 enum sas_linkrate max = sas_phy->phy->maximum_linkrate;
463
464 prog_phy_link_rate =
465 hisi_sas_get_prog_phy_linkrate_mask(max) |
466 0x800;
467 }
468 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
469 prog_phy_link_rate);
426 hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80); 470 hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80);
427 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); 471 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
428 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); 472 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
429 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); 473 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
430 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); 474 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
431 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff); 475 if (pdev->revision >= 0x21)
476 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK,
477 0xffffffff);
478 else
479 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK,
480 0xff87ffff);
432 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe); 481 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
433 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); 482 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
434 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); 483 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
@@ -503,6 +552,16 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
503 /* RAS registers init */ 552 /* RAS registers init */
504 hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0); 553 hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0);
505 hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0); 554 hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0);
555 hisi_sas_write32(hisi_hba, SAS_RAS_INTR2_MASK, 0x0);
556 hisi_sas_write32(hisi_hba, CFG_SAS_RAS_INTR_MASK, 0x0);
557
558 /* LED registers init */
559 hisi_sas_write32(hisi_hba, SAS_CFG_DRIVE_VLD, 0x80000ff);
560 hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1, 0x80808080);
561 hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1 + 0x4, 0x80808080);
562 /* Configure blink generator rate A to 1Hz and B to 4Hz */
563 hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_1, 0x121700);
564 hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_0, 0x800000);
506} 565}
507 566
508static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 567static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -654,8 +713,8 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
654 udelay(50); 713 udelay(50);
655 714
656 /* Ensure axi bus idle */ 715 /* Ensure axi bus idle */
657 ret = readl_poll_timeout(hisi_hba->regs + AXI_CFG, val, !val, 716 ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val,
658 20000, 1000000); 717 20000, 1000000);
659 if (ret) { 718 if (ret) {
660 dev_err(dev, "axi bus is not idle, ret = %d!\n", ret); 719 dev_err(dev, "axi bus is not idle, ret = %d!\n", ret);
661 return -EIO; 720 return -EIO;
@@ -794,42 +853,49 @@ get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
794 r = hisi_sas_read32_relaxed(hisi_hba, 853 r = hisi_sas_read32_relaxed(hisi_hba,
795 DLVRY_Q_0_RD_PTR + (queue * 0x14)); 854 DLVRY_Q_0_RD_PTR + (queue * 0x14));
796 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { 855 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
797 dev_warn(dev, "full queue=%d r=%d w=%d\n\n", 856 dev_warn(dev, "full queue=%d r=%d w=%d\n",
798 queue, r, w); 857 queue, r, w);
799 return -EAGAIN; 858 return -EAGAIN;
800 } 859 }
801 860
802 return 0; 861 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
862
863 return w;
803} 864}
804 865
805static void start_delivery_v3_hw(struct hisi_sas_dq *dq) 866static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
806{ 867{
807 struct hisi_hba *hisi_hba = dq->hisi_hba; 868 struct hisi_hba *hisi_hba = dq->hisi_hba;
808 int dlvry_queue = dq->slot_prep->dlvry_queue; 869 struct hisi_sas_slot *s, *s1;
809 int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot; 870 struct list_head *dq_list;
871 int dlvry_queue = dq->id;
872 int wp, count = 0;
873
874 dq_list = &dq->list;
875 list_for_each_entry_safe(s, s1, &dq->list, delivery) {
876 if (!s->ready)
877 break;
878 count++;
879 wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
880 list_del(&s->delivery);
881 }
882
883 if (!count)
884 return;
810 885
811 dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS; 886 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
812 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
813 dq->wr_point);
814} 887}
815 888
816static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, 889static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
817 struct hisi_sas_slot *slot, 890 struct hisi_sas_slot *slot,
818 struct hisi_sas_cmd_hdr *hdr, 891 struct hisi_sas_cmd_hdr *hdr,
819 struct scatterlist *scatter, 892 struct scatterlist *scatter,
820 int n_elem) 893 int n_elem)
821{ 894{
822 struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot); 895 struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
823 struct device *dev = hisi_hba->dev;
824 struct scatterlist *sg; 896 struct scatterlist *sg;
825 int i; 897 int i;
826 898
827 if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
828 dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
829 n_elem);
830 return -EINVAL;
831 }
832
833 for_each_sg(scatter, sg, n_elem, i) { 899 for_each_sg(scatter, sg, n_elem, i) {
834 struct hisi_sas_sge *entry = &sge_page->sge[i]; 900 struct hisi_sas_sge *entry = &sge_page->sge[i];
835 901
@@ -842,13 +908,10 @@ static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
842 hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); 908 hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
843 909
844 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); 910 hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
845
846 return 0;
847} 911}
848 912
849static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba, 913static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
850 struct hisi_sas_slot *slot, int is_tmf, 914 struct hisi_sas_slot *slot)
851 struct hisi_sas_tmf_task *tmf)
852{ 915{
853 struct sas_task *task = slot->task; 916 struct sas_task *task = slot->task;
854 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 917 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
@@ -857,7 +920,8 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
857 struct hisi_sas_port *port = slot->port; 920 struct hisi_sas_port *port = slot->port;
858 struct sas_ssp_task *ssp_task = &task->ssp_task; 921 struct sas_ssp_task *ssp_task = &task->ssp_task;
859 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 922 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
860 int has_data = 0, rc, priority = is_tmf; 923 struct hisi_sas_tmf_task *tmf = slot->tmf;
924 int has_data = 0, priority = !!tmf;
861 u8 *buf_cmd; 925 u8 *buf_cmd;
862 u32 dw1 = 0, dw2 = 0; 926 u32 dw1 = 0, dw2 = 0;
863 927
@@ -868,7 +932,7 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
868 (1 << CMD_HDR_CMD_OFF)); /* ssp */ 932 (1 << CMD_HDR_CMD_OFF)); /* ssp */
869 933
870 dw1 = 1 << CMD_HDR_VDTL_OFF; 934 dw1 = 1 << CMD_HDR_VDTL_OFF;
871 if (is_tmf) { 935 if (tmf) {
872 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; 936 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
873 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; 937 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
874 } else { 938 } else {
@@ -898,12 +962,9 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
898 hdr->dw2 = cpu_to_le32(dw2); 962 hdr->dw2 = cpu_to_le32(dw2);
899 hdr->transfer_tags = cpu_to_le32(slot->idx); 963 hdr->transfer_tags = cpu_to_le32(slot->idx);
900 964
901 if (has_data) { 965 if (has_data)
902 rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, 966 prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
903 slot->n_elem); 967 slot->n_elem);
904 if (rc)
905 return rc;
906 }
907 968
908 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 969 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
909 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 970 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
@@ -913,7 +974,7 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
913 sizeof(struct ssp_frame_hdr); 974 sizeof(struct ssp_frame_hdr);
914 975
915 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 976 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
916 if (!is_tmf) { 977 if (!tmf) {
917 buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3); 978 buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3);
918 memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 979 memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
919 } else { 980 } else {
@@ -930,48 +991,25 @@ static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
930 break; 991 break;
931 } 992 }
932 } 993 }
933
934 return 0;
935} 994}
936 995
937static int prep_smp_v3_hw(struct hisi_hba *hisi_hba, 996static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
938 struct hisi_sas_slot *slot) 997 struct hisi_sas_slot *slot)
939{ 998{
940 struct sas_task *task = slot->task; 999 struct sas_task *task = slot->task;
941 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; 1000 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
942 struct domain_device *device = task->dev; 1001 struct domain_device *device = task->dev;
943 struct device *dev = hisi_hba->dev;
944 struct hisi_sas_port *port = slot->port; 1002 struct hisi_sas_port *port = slot->port;
945 struct scatterlist *sg_req, *sg_resp; 1003 struct scatterlist *sg_req;
946 struct hisi_sas_device *sas_dev = device->lldd_dev; 1004 struct hisi_sas_device *sas_dev = device->lldd_dev;
947 dma_addr_t req_dma_addr; 1005 dma_addr_t req_dma_addr;
948 unsigned int req_len, resp_len; 1006 unsigned int req_len;
949 int elem, rc;
950 1007
951 /*
952 * DMA-map SMP request, response buffers
953 */
954 /* req */ 1008 /* req */
955 sg_req = &task->smp_task.smp_req; 1009 sg_req = &task->smp_task.smp_req;
956 elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
957 if (!elem)
958 return -ENOMEM;
959 req_len = sg_dma_len(sg_req); 1010 req_len = sg_dma_len(sg_req);
960 req_dma_addr = sg_dma_address(sg_req); 1011 req_dma_addr = sg_dma_address(sg_req);
961 1012
962 /* resp */
963 sg_resp = &task->smp_task.smp_resp;
964 elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
965 if (!elem) {
966 rc = -ENOMEM;
967 goto err_out_req;
968 }
969 resp_len = sg_dma_len(sg_resp);
970 if ((req_len & 0x3) || (resp_len & 0x3)) {
971 rc = -EINVAL;
972 goto err_out_resp;
973 }
974
975 /* create header */ 1013 /* create header */
976 /* dw0 */ 1014 /* dw0 */
977 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | 1015 hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
@@ -993,18 +1031,9 @@ static int prep_smp_v3_hw(struct hisi_hba *hisi_hba,
993 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr); 1031 hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
994 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); 1032 hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
995 1033
996 return 0;
997
998err_out_resp:
999 dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
1000 DMA_FROM_DEVICE);
1001err_out_req:
1002 dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
1003 DMA_TO_DEVICE);
1004 return rc;
1005} 1034}
1006 1035
1007static int prep_ata_v3_hw(struct hisi_hba *hisi_hba, 1036static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
1008 struct hisi_sas_slot *slot) 1037 struct hisi_sas_slot *slot)
1009{ 1038{
1010 struct sas_task *task = slot->task; 1039 struct sas_task *task = slot->task;
@@ -1015,7 +1044,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
1015 struct asd_sas_port *sas_port = device->port; 1044 struct asd_sas_port *sas_port = device->port;
1016 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 1045 struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
1017 u8 *buf_cmd; 1046 u8 *buf_cmd;
1018 int has_data = 0, rc = 0, hdr_tag = 0; 1047 int has_data = 0, hdr_tag = 0;
1019 u32 dw1 = 0, dw2 = 0; 1048 u32 dw1 = 0, dw2 = 0;
1020 1049
1021 hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); 1050 hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
@@ -1046,7 +1075,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
1046 << CMD_HDR_FRAME_TYPE_OFF; 1075 << CMD_HDR_FRAME_TYPE_OFF;
1047 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; 1076 dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
1048 1077
1049 if (CMD_IS_UNCONSTRAINT(task->ata_task.fis.command)) 1078 if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis))
1050 dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF; 1079 dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF;
1051 1080
1052 hdr->dw1 = cpu_to_le32(dw1); 1081 hdr->dw1 = cpu_to_le32(dw1);
@@ -1064,12 +1093,9 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
1064 /* dw3 */ 1093 /* dw3 */
1065 hdr->transfer_tags = cpu_to_le32(slot->idx); 1094 hdr->transfer_tags = cpu_to_le32(slot->idx);
1066 1095
1067 if (has_data) { 1096 if (has_data)
1068 rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, 1097 prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
1069 slot->n_elem); 1098 slot->n_elem);
1070 if (rc)
1071 return rc;
1072 }
1073 1099
1074 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len); 1100 hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
1075 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); 1101 hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
@@ -1081,11 +1107,9 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
1081 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 1107 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
1082 /* fill in command FIS */ 1108 /* fill in command FIS */
1083 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 1109 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
1084
1085 return 0;
1086} 1110}
1087 1111
1088static int prep_abort_v3_hw(struct hisi_hba *hisi_hba, 1112static void prep_abort_v3_hw(struct hisi_hba *hisi_hba,
1089 struct hisi_sas_slot *slot, 1113 struct hisi_sas_slot *slot,
1090 int device_id, int abort_flag, int tag_to_abort) 1114 int device_id, int abort_flag, int tag_to_abort)
1091{ 1115{
@@ -1110,7 +1134,6 @@ static int prep_abort_v3_hw(struct hisi_hba *hisi_hba,
1110 hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF); 1134 hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
1111 hdr->transfer_tags = cpu_to_le32(slot->idx); 1135 hdr->transfer_tags = cpu_to_le32(slot->idx);
1112 1136
1113 return 0;
1114} 1137}
1115 1138
1116static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1139static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
@@ -1120,6 +1143,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
1120 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1143 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1121 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1144 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1122 struct device *dev = hisi_hba->dev; 1145 struct device *dev = hisi_hba->dev;
1146 unsigned long flags;
1123 1147
1124 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); 1148 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
1125 1149
@@ -1188,6 +1212,12 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
1188 phy->phy_attached = 1; 1212 phy->phy_attached = 1;
1189 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); 1213 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
1190 res = IRQ_HANDLED; 1214 res = IRQ_HANDLED;
1215 spin_lock_irqsave(&phy->lock, flags);
1216 if (phy->reset_completion) {
1217 phy->in_reset = 0;
1218 complete(phy->reset_completion);
1219 }
1220 spin_unlock_irqrestore(&phy->lock, flags);
1191end: 1221end:
1192 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1222 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
1193 CHL_INT0_SL_PHY_ENABLE_MSK); 1223 CHL_INT0_SL_PHY_ENABLE_MSK);
@@ -1301,14 +1331,10 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
1301{ 1331{
1302 struct hisi_hba *hisi_hba = p; 1332 struct hisi_hba *hisi_hba = p;
1303 struct device *dev = hisi_hba->dev; 1333 struct device *dev = hisi_hba->dev;
1304 u32 ent_msk, ent_tmp, irq_msk; 1334 struct pci_dev *pci_dev = hisi_hba->pci_dev;
1335 u32 irq_msk;
1305 int phy_no = 0; 1336 int phy_no = 0;
1306 1337
1307 ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
1308 ent_tmp = ent_msk;
1309 ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK;
1310 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk);
1311
1312 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) 1338 irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
1313 & 0xeeeeeeee; 1339 & 0xeeeeeeee;
1314 1340
@@ -1319,6 +1345,13 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
1319 CHL_INT1); 1345 CHL_INT1);
1320 u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no, 1346 u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
1321 CHL_INT2); 1347 CHL_INT2);
1348 u32 irq_msk1 = hisi_sas_phy_read32(hisi_hba, phy_no,
1349 CHL_INT1_MSK);
1350 u32 irq_msk2 = hisi_sas_phy_read32(hisi_hba, phy_no,
1351 CHL_INT2_MSK);
1352
1353 irq_value1 &= ~irq_msk1;
1354 irq_value2 &= ~irq_msk2;
1322 1355
1323 if ((irq_msk & (4 << (phy_no * 4))) && 1356 if ((irq_msk & (4 << (phy_no * 4))) &&
1324 irq_value1) { 1357 irq_value1) {
@@ -1364,8 +1397,28 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
1364 1397
1365 hisi_sas_phy_write32(hisi_hba, phy_no, 1398 hisi_sas_phy_write32(hisi_hba, phy_no,
1366 CHL_INT2, irq_value2); 1399 CHL_INT2, irq_value2);
1367 }
1368 1400
1401 if ((irq_value2 & BIT(CHL_INT2_RX_INVLD_DW_OFF)) &&
1402 (pci_dev->revision == 0x20)) {
1403 u32 reg_value;
1404 int rc;
1405
1406 rc = hisi_sas_read32_poll_timeout_atomic(
1407 HILINK_ERR_DFX, reg_value,
1408 !((reg_value >> 8) & BIT(phy_no)),
1409 1000, 10000);
1410 if (rc) {
1411 disable_phy_v3_hw(hisi_hba, phy_no);
1412 hisi_sas_phy_write32(hisi_hba, phy_no,
1413 CHL_INT2,
1414 BIT(CHL_INT2_RX_INVLD_DW_OFF));
1415 hisi_sas_phy_read32(hisi_hba, phy_no,
1416 ERR_CNT_INVLD_DW);
1417 mdelay(1);
1418 enable_phy_v3_hw(hisi_hba, phy_no);
1419 }
1420 }
1421 }
1369 1422
1370 if (irq_msk & (2 << (phy_no * 4)) && irq_value0) { 1423 if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
1371 hisi_sas_phy_write32(hisi_hba, phy_no, 1424 hisi_sas_phy_write32(hisi_hba, phy_no,
@@ -1378,8 +1431,6 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
1378 phy_no++; 1431 phy_no++;
1379 } 1432 }
1380 1433
1381 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp);
1382
1383 return IRQ_HANDLED; 1434 return IRQ_HANDLED;
1384} 1435}
1385 1436
@@ -1448,6 +1499,7 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
1448 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00); 1499 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00);
1449 1500
1450 irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 1501 irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
1502 irq_value &= ~irq_msk;
1451 1503
1452 for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) { 1504 for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) {
1453 const struct hisi_sas_hw_error *error = &fatal_axi_error[i]; 1505 const struct hisi_sas_hw_error *error = &fatal_axi_error[i];
@@ -1549,37 +1601,30 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
1549 struct device *dev = hisi_hba->dev; 1601 struct device *dev = hisi_hba->dev;
1550 struct task_status_struct *ts; 1602 struct task_status_struct *ts;
1551 struct domain_device *device; 1603 struct domain_device *device;
1604 struct sas_ha_struct *ha;
1552 enum exec_status sts; 1605 enum exec_status sts;
1553 struct hisi_sas_complete_v3_hdr *complete_queue = 1606 struct hisi_sas_complete_v3_hdr *complete_queue =
1554 hisi_hba->complete_hdr[slot->cmplt_queue]; 1607 hisi_hba->complete_hdr[slot->cmplt_queue];
1555 struct hisi_sas_complete_v3_hdr *complete_hdr = 1608 struct hisi_sas_complete_v3_hdr *complete_hdr =
1556 &complete_queue[slot->cmplt_queue_slot]; 1609 &complete_queue[slot->cmplt_queue_slot];
1557 int aborted;
1558 unsigned long flags; 1610 unsigned long flags;
1611 bool is_internal = slot->is_internal;
1559 1612
1560 if (unlikely(!task || !task->lldd_task || !task->dev)) 1613 if (unlikely(!task || !task->lldd_task || !task->dev))
1561 return -EINVAL; 1614 return -EINVAL;
1562 1615
1563 ts = &task->task_status; 1616 ts = &task->task_status;
1564 device = task->dev; 1617 device = task->dev;
1618 ha = device->port->ha;
1565 sas_dev = device->lldd_dev; 1619 sas_dev = device->lldd_dev;
1566 1620
1567 spin_lock_irqsave(&task->task_state_lock, flags); 1621 spin_lock_irqsave(&task->task_state_lock, flags);
1568 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1569 task->task_state_flags &= 1622 task->task_state_flags &=
1570 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1623 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1571 spin_unlock_irqrestore(&task->task_state_lock, flags); 1624 spin_unlock_irqrestore(&task->task_state_lock, flags);
1572 1625
1573 memset(ts, 0, sizeof(*ts)); 1626 memset(ts, 0, sizeof(*ts));
1574 ts->resp = SAS_TASK_COMPLETE; 1627 ts->resp = SAS_TASK_COMPLETE;
1575 if (unlikely(aborted)) {
1576 dev_dbg(dev, "slot complete: task(%p) aborted\n", task);
1577 ts->stat = SAS_ABORTED_TASK;
1578 spin_lock_irqsave(&hisi_hba->lock, flags);
1579 hisi_sas_slot_task_free(hisi_hba, task, slot);
1580 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1581 return ts->stat;
1582 }
1583 1628
1584 if (unlikely(!sas_dev)) { 1629 if (unlikely(!sas_dev)) {
1585 dev_dbg(dev, "slot complete: port has not device\n"); 1630 dev_dbg(dev, "slot complete: port has not device\n");
@@ -1619,10 +1664,10 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
1619 1664
1620 slot_err_v3_hw(hisi_hba, task, slot); 1665 slot_err_v3_hw(hisi_hba, task, slot);
1621 if (ts->stat != SAS_DATA_UNDERRUN) 1666 if (ts->stat != SAS_DATA_UNDERRUN)
1622 dev_info(dev, "erroneous completion iptt=%d task=%p " 1667 dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d "
1623 "CQ hdr: 0x%x 0x%x 0x%x 0x%x " 1668 "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
1624 "Error info: 0x%x 0x%x 0x%x 0x%x\n", 1669 "Error info: 0x%x 0x%x 0x%x 0x%x\n",
1625 slot->idx, task, 1670 slot->idx, task, sas_dev->device_id,
1626 complete_hdr->dw0, complete_hdr->dw1, 1671 complete_hdr->dw0, complete_hdr->dw1,
1627 complete_hdr->act, complete_hdr->dw3, 1672 complete_hdr->act, complete_hdr->dw3,
1628 error_info[0], error_info[1], 1673 error_info[0], error_info[1],
@@ -1677,13 +1722,27 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
1677 } 1722 }
1678 1723
1679out: 1724out:
1725 hisi_sas_slot_task_free(hisi_hba, task, slot);
1726 sts = ts->stat;
1680 spin_lock_irqsave(&task->task_state_lock, flags); 1727 spin_lock_irqsave(&task->task_state_lock, flags);
1728 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
1729 spin_unlock_irqrestore(&task->task_state_lock, flags);
1730 dev_info(dev, "slot complete: task(%p) aborted\n", task);
1731 return SAS_ABORTED_TASK;
1732 }
1681 task->task_state_flags |= SAS_TASK_STATE_DONE; 1733 task->task_state_flags |= SAS_TASK_STATE_DONE;
1682 spin_unlock_irqrestore(&task->task_state_lock, flags); 1734 spin_unlock_irqrestore(&task->task_state_lock, flags);
1683 spin_lock_irqsave(&hisi_hba->lock, flags); 1735
1684 hisi_sas_slot_task_free(hisi_hba, task, slot); 1736 if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
1685 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1737 spin_lock_irqsave(&device->done_lock, flags);
1686 sts = ts->stat; 1738 if (test_bit(SAS_HA_FROZEN, &ha->state)) {
1739 spin_unlock_irqrestore(&device->done_lock, flags);
1740 dev_info(dev, "slot complete: task(%p) ignored\n ",
1741 task);
1742 return sts;
1743 }
1744 spin_unlock_irqrestore(&device->done_lock, flags);
1745 }
1687 1746
1688 if (task->task_done) 1747 if (task->task_done)
1689 task->task_done(task); 1748 task->task_done(task);
@@ -1699,25 +1758,27 @@ static void cq_tasklet_v3_hw(unsigned long val)
1699 struct hisi_sas_complete_v3_hdr *complete_queue; 1758 struct hisi_sas_complete_v3_hdr *complete_queue;
1700 u32 rd_point = cq->rd_point, wr_point; 1759 u32 rd_point = cq->rd_point, wr_point;
1701 int queue = cq->id; 1760 int queue = cq->id;
1702 struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
1703 1761
1704 complete_queue = hisi_hba->complete_hdr[queue]; 1762 complete_queue = hisi_hba->complete_hdr[queue];
1705 1763
1706 spin_lock(&dq->lock);
1707 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + 1764 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
1708 (0x14 * queue)); 1765 (0x14 * queue));
1709 1766
1710 while (rd_point != wr_point) { 1767 while (rd_point != wr_point) {
1711 struct hisi_sas_complete_v3_hdr *complete_hdr; 1768 struct hisi_sas_complete_v3_hdr *complete_hdr;
1769 struct device *dev = hisi_hba->dev;
1712 int iptt; 1770 int iptt;
1713 1771
1714 complete_hdr = &complete_queue[rd_point]; 1772 complete_hdr = &complete_queue[rd_point];
1715 1773
1716 iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK; 1774 iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
1717 slot = &hisi_hba->slot_info[iptt]; 1775 if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) {
1718 slot->cmplt_queue_slot = rd_point; 1776 slot = &hisi_hba->slot_info[iptt];
1719 slot->cmplt_queue = queue; 1777 slot->cmplt_queue_slot = rd_point;
1720 slot_complete_v3_hw(hisi_hba, slot); 1778 slot->cmplt_queue = queue;
1779 slot_complete_v3_hw(hisi_hba, slot);
1780 } else
1781 dev_err(dev, "IPTT %d is invalid, discard it.\n", iptt);
1721 1782
1722 if (++rd_point >= HISI_SAS_QUEUE_SLOTS) 1783 if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
1723 rd_point = 0; 1784 rd_point = 0;
@@ -1726,7 +1787,6 @@ static void cq_tasklet_v3_hw(unsigned long val)
1726 /* update rd_point */ 1787 /* update rd_point */
1727 cq->rd_point = rd_point; 1788 cq->rd_point = rd_point;
1728 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); 1789 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
1729 spin_unlock(&dq->lock);
1730} 1790}
1731 1791
1732static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) 1792static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
@@ -1839,39 +1899,12 @@ static int hisi_sas_v3_init(struct hisi_hba *hisi_hba)
1839static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no, 1899static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no,
1840 struct sas_phy_linkrates *r) 1900 struct sas_phy_linkrates *r)
1841{ 1901{
1842 u32 prog_phy_link_rate = 1902 enum sas_linkrate max = r->maximum_linkrate;
1843 hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); 1903 u32 prog_phy_link_rate = 0x800;
1844 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1845 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1846 int i;
1847 enum sas_linkrate min, max;
1848 u32 rate_mask = 0;
1849
1850 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1851 max = sas_phy->phy->maximum_linkrate;
1852 min = r->minimum_linkrate;
1853 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1854 max = r->maximum_linkrate;
1855 min = sas_phy->phy->minimum_linkrate;
1856 } else
1857 return;
1858
1859 sas_phy->phy->maximum_linkrate = max;
1860 sas_phy->phy->minimum_linkrate = min;
1861
1862 max -= SAS_LINK_RATE_1_5_GBPS;
1863
1864 for (i = 0; i <= max; i++)
1865 rate_mask |= 1 << (i * 2);
1866
1867 prog_phy_link_rate &= ~0xff;
1868 prog_phy_link_rate |= rate_mask;
1869 1904
1870 disable_phy_v3_hw(hisi_hba, phy_no); 1905 prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
1871 msleep(100);
1872 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, 1906 hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
1873 prog_phy_link_rate); 1907 prog_phy_link_rate);
1874 start_phy_v3_hw(hisi_hba, phy_no);
1875} 1908}
1876 1909
1877static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba) 1910static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba)
@@ -1948,8 +1981,9 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
1948 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1); 1981 hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1);
1949 1982
1950 /* wait until bus idle */ 1983 /* wait until bus idle */
1951 rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE + 1984 rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE +
1952 AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100); 1985 AM_CURR_TRANS_RETURN, status,
1986 status == 0x3, 10, 100);
1953 if (rc) { 1987 if (rc) {
1954 dev_err(dev, "axi bus is not idle, rc = %d\n", rc); 1988 dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
1955 return rc; 1989 return rc;
@@ -1960,6 +1994,75 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
1960 return hw_init_v3_hw(hisi_hba); 1994 return hw_init_v3_hw(hisi_hba);
1961} 1995}
1962 1996
1997static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type,
1998 u8 reg_index, u8 reg_count, u8 *write_data)
1999{
2000 struct device *dev = hisi_hba->dev;
2001 u32 *data = (u32 *)write_data;
2002 int i;
2003
2004 switch (reg_type) {
2005 case SAS_GPIO_REG_TX:
2006 if ((reg_index + reg_count) > ((hisi_hba->n_phy + 3) / 4)) {
2007 dev_err(dev, "write gpio: invalid reg range[%d, %d]\n",
2008 reg_index, reg_index + reg_count - 1);
2009 return -EINVAL;
2010 }
2011
2012 for (i = 0; i < reg_count; i++)
2013 hisi_sas_write32(hisi_hba,
2014 SAS_GPIO_TX_0_1 + (reg_index + i) * 4,
2015 data[i]);
2016 break;
2017 default:
2018 dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
2019 reg_type);
2020 return -EINVAL;
2021 }
2022
2023 return 0;
2024}
2025
2026static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
2027 int delay_ms, int timeout_ms)
2028{
2029 struct device *dev = hisi_hba->dev;
2030 int entries, entries_old = 0, time;
2031
2032 for (time = 0; time < timeout_ms; time += delay_ms) {
2033 entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT);
2034 if (entries == entries_old)
2035 break;
2036
2037 entries_old = entries;
2038 msleep(delay_ms);
2039 }
2040
2041 dev_dbg(dev, "wait commands complete %dms\n", time);
2042}
2043
2044static struct scsi_host_template sht_v3_hw = {
2045 .name = DRV_NAME,
2046 .module = THIS_MODULE,
2047 .queuecommand = sas_queuecommand,
2048 .target_alloc = sas_target_alloc,
2049 .slave_configure = hisi_sas_slave_configure,
2050 .scan_finished = hisi_sas_scan_finished,
2051 .scan_start = hisi_sas_scan_start,
2052 .change_queue_depth = sas_change_queue_depth,
2053 .bios_param = sas_bios_param,
2054 .can_queue = 1,
2055 .this_id = -1,
2056 .sg_tablesize = SG_ALL,
2057 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
2058 .use_clustering = ENABLE_CLUSTERING,
2059 .eh_device_reset_handler = sas_eh_device_reset_handler,
2060 .eh_target_reset_handler = sas_eh_target_reset_handler,
2061 .target_destroy = sas_target_destroy,
2062 .ioctl = sas_ioctl,
2063 .shost_attrs = host_attrs,
2064};
2065
1963static const struct hisi_sas_hw hisi_sas_v3_hw = { 2066static const struct hisi_sas_hw hisi_sas_v3_hw = {
1964 .hw_init = hisi_sas_v3_init, 2067 .hw_init = hisi_sas_v3_init,
1965 .setup_itct = setup_itct_v3_hw, 2068 .setup_itct = setup_itct_v3_hw,
@@ -1985,6 +2088,8 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
1985 .soft_reset = soft_reset_v3_hw, 2088 .soft_reset = soft_reset_v3_hw,
1986 .get_phys_state = get_phys_state_v3_hw, 2089 .get_phys_state = get_phys_state_v3_hw,
1987 .get_events = phy_get_events_v3_hw, 2090 .get_events = phy_get_events_v3_hw,
2091 .write_gpio = write_gpio_v3_hw,
2092 .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw,
1988}; 2093};
1989 2094
1990static struct Scsi_Host * 2095static struct Scsi_Host *
@@ -1994,7 +2099,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
1994 struct hisi_hba *hisi_hba; 2099 struct hisi_hba *hisi_hba;
1995 struct device *dev = &pdev->dev; 2100 struct device *dev = &pdev->dev;
1996 2101
1997 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba)); 2102 shost = scsi_host_alloc(&sht_v3_hw, sizeof(*hisi_hba));
1998 if (!shost) { 2103 if (!shost) {
1999 dev_err(dev, "shost alloc failed\n"); 2104 dev_err(dev, "shost alloc failed\n");
2000 return NULL; 2105 return NULL;
@@ -2108,8 +2213,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2108 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2213 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2109 } 2214 }
2110 2215
2111 hisi_sas_init_add(hisi_hba);
2112
2113 rc = scsi_add_host(shost, dev); 2216 rc = scsi_add_host(shost, dev);
2114 if (rc) 2217 if (rc)
2115 goto err_out_ha; 2218 goto err_out_ha;
@@ -2161,6 +2264,9 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
2161 struct hisi_hba *hisi_hba = sha->lldd_ha; 2264 struct hisi_hba *hisi_hba = sha->lldd_ha;
2162 struct Scsi_Host *shost = sha->core.shost; 2265 struct Scsi_Host *shost = sha->core.shost;
2163 2266
2267 if (timer_pending(&hisi_hba->timer))
2268 del_timer(&hisi_hba->timer);
2269
2164 sas_unregister_ha(sha); 2270 sas_unregister_ha(sha);
2165 sas_remove_host(sha->core.shost); 2271 sas_remove_host(sha->core.shost);
2166 2272
@@ -2222,6 +2328,29 @@ static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = {
2222 { .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" }, 2328 { .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" },
2223}; 2329};
2224 2330
2331static const struct hisi_sas_hw_error sas_ras_intr2_nfe[] = {
2332 { .irq_msk = BIT(0), .msg = "DMAC0_AXI_BUS_ERR" },
2333 { .irq_msk = BIT(1), .msg = "DMAC1_AXI_BUS_ERR" },
2334 { .irq_msk = BIT(2), .msg = "DMAC2_AXI_BUS_ERR" },
2335 { .irq_msk = BIT(3), .msg = "DMAC3_AXI_BUS_ERR" },
2336 { .irq_msk = BIT(4), .msg = "DMAC4_AXI_BUS_ERR" },
2337 { .irq_msk = BIT(5), .msg = "DMAC5_AXI_BUS_ERR" },
2338 { .irq_msk = BIT(6), .msg = "DMAC6_AXI_BUS_ERR" },
2339 { .irq_msk = BIT(7), .msg = "DMAC7_AXI_BUS_ERR" },
2340 { .irq_msk = BIT(8), .msg = "DMAC0_FIFO_OMIT_ERR" },
2341 { .irq_msk = BIT(9), .msg = "DMAC1_FIFO_OMIT_ERR" },
2342 { .irq_msk = BIT(10), .msg = "DMAC2_FIFO_OMIT_ERR" },
2343 { .irq_msk = BIT(11), .msg = "DMAC3_FIFO_OMIT_ERR" },
2344 { .irq_msk = BIT(12), .msg = "DMAC4_FIFO_OMIT_ERR" },
2345 { .irq_msk = BIT(13), .msg = "DMAC5_FIFO_OMIT_ERR" },
2346 { .irq_msk = BIT(14), .msg = "DMAC6_FIFO_OMIT_ERR" },
2347 { .irq_msk = BIT(15), .msg = "DMAC7_FIFO_OMIT_ERR" },
2348 { .irq_msk = BIT(16), .msg = "HGC_RLSE_SLOT_UNMATCH" },
2349 { .irq_msk = BIT(17), .msg = "HGC_LM_ADD_FCH_LIST_ERR" },
2350 { .irq_msk = BIT(18), .msg = "HGC_AXI_BUS_ERR" },
2351 { .irq_msk = BIT(19), .msg = "HGC_FIFO_OMIT_ERR" },
2352};
2353
2225static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba) 2354static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
2226{ 2355{
2227 struct device *dev = hisi_hba->dev; 2356 struct device *dev = hisi_hba->dev;
@@ -2252,6 +2381,17 @@ static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
2252 } 2381 }
2253 hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value); 2382 hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value);
2254 2383
2384 irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR2);
2385 for (i = 0; i < ARRAY_SIZE(sas_ras_intr2_nfe); i++) {
2386 ras_error = &sas_ras_intr2_nfe[i];
2387 if (ras_error->irq_msk & irq_value) {
2388 dev_warn(dev, "SAS_RAS_INTR2: %s(irq_value=0x%x) found.\n",
2389 ras_error->msg, irq_value);
2390 need_reset = true;
2391 }
2392 }
2393 hisi_sas_write32(hisi_hba, SAS_RAS_INTR2, irq_value);
2394
2255 return need_reset; 2395 return need_reset;
2256} 2396}
2257 2397
@@ -2307,7 +2447,6 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
2307 u32 device_state, status; 2447 u32 device_state, status;
2308 int rc; 2448 int rc;
2309 u32 reg_val; 2449 u32 reg_val;
2310 unsigned long flags;
2311 2450
2312 if (!pdev->pm_cap) { 2451 if (!pdev->pm_cap) {
2313 dev_err(dev, "PCI PM not supported\n"); 2452 dev_err(dev, "PCI PM not supported\n");
@@ -2332,8 +2471,9 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
2332 AM_CTRL_GLOBAL, reg_val); 2471 AM_CTRL_GLOBAL, reg_val);
2333 2472
2334 /* wait until bus idle */ 2473 /* wait until bus idle */
2335 rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE + 2474 rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE +
2336 AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100); 2475 AM_CURR_TRANS_RETURN, status,
2476 status == 0x3, 10, 100);
2337 if (rc) { 2477 if (rc) {
2338 dev_err(dev, "axi bus is not idle, rc = %d\n", rc); 2478 dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
2339 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 2479 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
@@ -2351,9 +2491,7 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
2351 pci_disable_device(pdev); 2491 pci_disable_device(pdev);
2352 pci_set_power_state(pdev, device_state); 2492 pci_set_power_state(pdev, device_state);
2353 2493
2354 spin_lock_irqsave(&hisi_hba->lock, flags);
2355 hisi_sas_release_tasks(hisi_hba); 2494 hisi_sas_release_tasks(hisi_hba);
2356 spin_unlock_irqrestore(&hisi_hba->lock, flags);
2357 2495
2358 sas_suspend_ha(sha); 2496 sas_suspend_ha(sha);
2359 return 0; 2497 return 0;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index dda1a64ab89c..6615ad8754b8 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -435,6 +435,8 @@ struct ipr_error_table_t ipr_error_table[] = {
435 "4080: IOA exceeded maximum operating temperature"}, 435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL, 436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437 "4085: Service required"}, 437 "4085: Service required"},
438 {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
439 "4086: SAS Adapter Hardware Configuration Error"},
438 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL, 440 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
439 "3140: Device bus not ready to ready transition"}, 441 "3140: Device bus not ready to ready transition"},
440 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL, 442 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index e3c8857741a1..bd6ac6b5980a 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -291,7 +291,7 @@ static void ips_freescb(ips_ha_t *, ips_scb_t *);
291static void ips_setup_funclist(ips_ha_t *); 291static void ips_setup_funclist(ips_ha_t *);
292static void ips_statinit(ips_ha_t *); 292static void ips_statinit(ips_ha_t *);
293static void ips_statinit_memio(ips_ha_t *); 293static void ips_statinit_memio(ips_ha_t *);
294static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time_t); 294static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time64_t);
295static void ips_ffdc_reset(ips_ha_t *, int); 295static void ips_ffdc_reset(ips_ha_t *, int);
296static void ips_ffdc_time(ips_ha_t *); 296static void ips_ffdc_time(ips_ha_t *);
297static uint32_t ips_statupd_copperhead(ips_ha_t *); 297static uint32_t ips_statupd_copperhead(ips_ha_t *);
@@ -985,10 +985,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
985 985
986 /* FFDC */ 986 /* FFDC */
987 if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) { 987 if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) {
988 struct timeval tv; 988 ha->last_ffdc = ktime_get_real_seconds();
989
990 do_gettimeofday(&tv);
991 ha->last_ffdc = tv.tv_sec;
992 ha->reset_count++; 989 ha->reset_count++;
993 ips_ffdc_reset(ha, IPS_INTR_IORL); 990 ips_ffdc_reset(ha, IPS_INTR_IORL);
994 } 991 }
@@ -2392,7 +2389,6 @@ static int
2392ips_hainit(ips_ha_t * ha) 2389ips_hainit(ips_ha_t * ha)
2393{ 2390{
2394 int i; 2391 int i;
2395 struct timeval tv;
2396 2392
2397 METHOD_TRACE("ips_hainit", 1); 2393 METHOD_TRACE("ips_hainit", 1);
2398 2394
@@ -2407,8 +2403,7 @@ ips_hainit(ips_ha_t * ha)
2407 2403
2408 /* Send FFDC */ 2404 /* Send FFDC */
2409 ha->reset_count = 1; 2405 ha->reset_count = 1;
2410 do_gettimeofday(&tv); 2406 ha->last_ffdc = ktime_get_real_seconds();
2411 ha->last_ffdc = tv.tv_sec;
2412 ips_ffdc_reset(ha, IPS_INTR_IORL); 2407 ips_ffdc_reset(ha, IPS_INTR_IORL);
2413 2408
2414 if (!ips_read_config(ha, IPS_INTR_IORL)) { 2409 if (!ips_read_config(ha, IPS_INTR_IORL)) {
@@ -2548,12 +2543,9 @@ ips_next(ips_ha_t * ha, int intr)
2548 2543
2549 if ((ha->subsys->param[3] & 0x300000) 2544 if ((ha->subsys->param[3] & 0x300000)
2550 && (ha->scb_activelist.count == 0)) { 2545 && (ha->scb_activelist.count == 0)) {
2551 struct timeval tv; 2546 time64_t now = ktime_get_real_seconds();
2552 2547 if (now - ha->last_ffdc > IPS_SECS_8HOURS) {
2553 do_gettimeofday(&tv); 2548 ha->last_ffdc = now;
2554
2555 if (tv.tv_sec - ha->last_ffdc > IPS_SECS_8HOURS) {
2556 ha->last_ffdc = tv.tv_sec;
2557 ips_ffdc_time(ha); 2549 ips_ffdc_time(ha);
2558 } 2550 }
2559 } 2551 }
@@ -5988,59 +5980,21 @@ ips_ffdc_time(ips_ha_t * ha)
5988/* */ 5980/* */
5989/****************************************************************************/ 5981/****************************************************************************/
5990static void 5982static void
5991ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time_t current_time) 5983ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time64_t current_time)
5992{ 5984{
5993 long days; 5985 struct tm tm;
5994 long rem;
5995 int i;
5996 int year;
5997 int yleap;
5998 int year_lengths[2] = { IPS_DAYS_NORMAL_YEAR, IPS_DAYS_LEAP_YEAR };
5999 int month_lengths[12][2] = { {31, 31},
6000 {28, 29},
6001 {31, 31},
6002 {30, 30},
6003 {31, 31},
6004 {30, 30},
6005 {31, 31},
6006 {31, 31},
6007 {30, 30},
6008 {31, 31},
6009 {30, 30},
6010 {31, 31}
6011 };
6012 5986
6013 METHOD_TRACE("ips_fix_ffdc_time", 1); 5987 METHOD_TRACE("ips_fix_ffdc_time", 1);
6014 5988
6015 days = current_time / IPS_SECS_DAY; 5989 time64_to_tm(current_time, 0, &tm);
6016 rem = current_time % IPS_SECS_DAY;
6017
6018 scb->cmd.ffdc.hour = (rem / IPS_SECS_HOUR);
6019 rem = rem % IPS_SECS_HOUR;
6020 scb->cmd.ffdc.minute = (rem / IPS_SECS_MIN);
6021 scb->cmd.ffdc.second = (rem % IPS_SECS_MIN);
6022
6023 year = IPS_EPOCH_YEAR;
6024 while (days < 0 || days >= year_lengths[yleap = IPS_IS_LEAP_YEAR(year)]) {
6025 int newy;
6026
6027 newy = year + (days / IPS_DAYS_NORMAL_YEAR);
6028 if (days < 0)
6029 --newy;
6030 days -= (newy - year) * IPS_DAYS_NORMAL_YEAR +
6031 IPS_NUM_LEAP_YEARS_THROUGH(newy - 1) -
6032 IPS_NUM_LEAP_YEARS_THROUGH(year - 1);
6033 year = newy;
6034 }
6035
6036 scb->cmd.ffdc.yearH = year / 100;
6037 scb->cmd.ffdc.yearL = year % 100;
6038
6039 for (i = 0; days >= month_lengths[i][yleap]; ++i)
6040 days -= month_lengths[i][yleap];
6041 5990
6042 scb->cmd.ffdc.month = i + 1; 5991 scb->cmd.ffdc.hour = tm.tm_hour;
6043 scb->cmd.ffdc.day = days + 1; 5992 scb->cmd.ffdc.minute = tm.tm_min;
5993 scb->cmd.ffdc.second = tm.tm_sec;
5994 scb->cmd.ffdc.yearH = (tm.tm_year + 1900) / 100;
5995 scb->cmd.ffdc.yearL = tm.tm_year % 100;
5996 scb->cmd.ffdc.month = tm.tm_mon + 1;
5997 scb->cmd.ffdc.day = tm.tm_mday;
6044} 5998}
6045 5999
6046/**************************************************************************** 6000/****************************************************************************
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 366be3b2f9b4..db546171e97f 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -402,16 +402,7 @@
402 #define IPS_BIOS_HEADER 0xC0 402 #define IPS_BIOS_HEADER 0xC0
403 403
404 /* time oriented stuff */ 404 /* time oriented stuff */
405 #define IPS_IS_LEAP_YEAR(y) (((y % 4 == 0) && ((y % 100 != 0) || (y % 400 == 0))) ? 1 : 0)
406 #define IPS_NUM_LEAP_YEARS_THROUGH(y) ((y) / 4 - (y) / 100 + (y) / 400)
407
408 #define IPS_SECS_MIN 60
409 #define IPS_SECS_HOUR 3600
410 #define IPS_SECS_8HOURS 28800 405 #define IPS_SECS_8HOURS 28800
411 #define IPS_SECS_DAY 86400
412 #define IPS_DAYS_NORMAL_YEAR 365
413 #define IPS_DAYS_LEAP_YEAR 366
414 #define IPS_EPOCH_YEAR 1970
415 406
416 /* 407 /*
417 * Scsi_Host Template 408 * Scsi_Host Template
@@ -1054,7 +1045,7 @@ typedef struct ips_ha {
1054 uint8_t active; 1045 uint8_t active;
1055 int ioctl_reset; /* IOCTL Requested Reset Flag */ 1046 int ioctl_reset; /* IOCTL Requested Reset Flag */
1056 uint16_t reset_count; /* number of resets */ 1047 uint16_t reset_count; /* number of resets */
1057 time_t last_ffdc; /* last time we sent ffdc info*/ 1048 time64_t last_ffdc; /* last time we sent ffdc info*/
1058 uint8_t slot_num; /* PCI Slot Number */ 1049 uint8_t slot_num; /* PCI Slot Number */
1059 int ioctl_len; /* size of ioctl buffer */ 1050 int ioctl_len; /* size of ioctl buffer */
1060 dma_addr_t ioctl_busaddr; /* dma address of ioctl buffer*/ 1051 dma_addr_t ioctl_busaddr; /* dma address of ioctl buffer*/
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 922e3e56c90d..05cf4daf8788 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -433,9 +433,6 @@ static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
433 (u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED))) 433 (u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED)))
434 return SCI_FAILURE_INVALID_PARAMETER_VALUE; 434 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
435 435
436 if (u->in_connection_align_insertion_frequency < 3)
437 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
438
439 if ((u->in_connection_align_insertion_frequency < 3) || 436 if ((u->in_connection_align_insertion_frequency < 3) ||
440 (u->align_insertion_frequency == 0) || 437 (u->align_insertion_frequency == 0) ||
441 (u->notify_enable_spin_up_insertion_frequency == 0)) 438 (u->notify_enable_spin_up_insertion_frequency == 0))
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 2ba4b68fdb73..b025a0b74341 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -962,7 +962,6 @@ static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
962 if (conn->datadgst_en) 962 if (conn->datadgst_en)
963 sdev->request_queue->backing_dev_info->capabilities 963 sdev->request_queue->backing_dev_info->capabilities
964 |= BDI_CAP_STABLE_WRITES; 964 |= BDI_CAP_STABLE_WRITES;
965 blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
966 blk_queue_dma_alignment(sdev->request_queue, 0); 965 blk_queue_dma_alignment(sdev->request_queue, 0);
967 return 0; 966 return 0;
968} 967}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 0cc1567eacc1..ff1d612f6fb9 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -577,6 +577,11 @@ int sas_ata_init(struct domain_device *found_dev)
577 ata_sas_port_destroy(ap); 577 ata_sas_port_destroy(ap);
578 return rc; 578 return rc;
579 } 579 }
580 rc = ata_sas_tport_add(found_dev->sata_dev.ata_host.dev, ap);
581 if (rc) {
582 ata_sas_port_destroy(ap);
583 return rc;
584 }
580 found_dev->sata_dev.ap = ap; 585 found_dev->sata_dev.ap = ap;
581 586
582 return 0; 587 return 0;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index a0fa7ef3a071..1ffca28fe6a8 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -314,6 +314,7 @@ void sas_free_device(struct kref *kref)
314 kfree(dev->ex_dev.ex_phy); 314 kfree(dev->ex_dev.ex_phy);
315 315
316 if (dev_is_sata(dev) && dev->sata_dev.ap) { 316 if (dev_is_sata(dev) && dev->sata_dev.ap) {
317 ata_sas_tport_delete(dev->sata_dev.ap);
317 ata_sas_port_destroy(dev->sata_dev.ap); 318 ata_sas_port_destroy(dev->sata_dev.ap);
318 dev->sata_dev.ap = NULL; 319 dev->sata_dev.ap = NULL;
319 } 320 }
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 6c0d351c0d0d..20b249a649dd 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -64,8 +64,6 @@ struct lpfc_sli2_slim;
64#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 64#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
65#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 65#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
66#define LPFC_VNAME_LEN 100 /* vport symbolic name length */ 66#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
67#define LPFC_TGTQ_INTERVAL 40000 /* Min amount of time between tgt
68 queue depth change in millisecs */
69#define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */ 67#define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */
70#define LPFC_MIN_TGT_QDEPTH 10 68#define LPFC_MIN_TGT_QDEPTH 10
71#define LPFC_MAX_TGT_QDEPTH 0xFFFF 69#define LPFC_MAX_TGT_QDEPTH 0xFFFF
@@ -784,6 +782,7 @@ struct lpfc_hba {
784 uint32_t cfg_nvme_oas; 782 uint32_t cfg_nvme_oas;
785 uint32_t cfg_nvme_embed_cmd; 783 uint32_t cfg_nvme_embed_cmd;
786 uint32_t cfg_nvme_io_channel; 784 uint32_t cfg_nvme_io_channel;
785 uint32_t cfg_nvmet_mrq_post;
787 uint32_t cfg_nvmet_mrq; 786 uint32_t cfg_nvmet_mrq;
788 uint32_t cfg_enable_nvmet; 787 uint32_t cfg_enable_nvmet;
789 uint32_t cfg_nvme_enable_fb; 788 uint32_t cfg_nvme_enable_fb;
@@ -922,12 +921,6 @@ struct lpfc_hba {
922 atomic_t fc4ScsiOutputRequests; 921 atomic_t fc4ScsiOutputRequests;
923 atomic_t fc4ScsiControlRequests; 922 atomic_t fc4ScsiControlRequests;
924 atomic_t fc4ScsiIoCmpls; 923 atomic_t fc4ScsiIoCmpls;
925 atomic_t fc4NvmeInputRequests;
926 atomic_t fc4NvmeOutputRequests;
927 atomic_t fc4NvmeControlRequests;
928 atomic_t fc4NvmeIoCmpls;
929 atomic_t fc4NvmeLsRequests;
930 atomic_t fc4NvmeLsCmpls;
931 924
932 uint64_t bg_guard_err_cnt; 925 uint64_t bg_guard_err_cnt;
933 uint64_t bg_apptag_err_cnt; 926 uint64_t bg_apptag_err_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 2ac1d21c553f..729d343861f4 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2,7 +2,7 @@
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. * 7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com * 8 * www.broadcom.com *
@@ -149,10 +149,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
149 struct lpfc_nvmet_tgtport *tgtp; 149 struct lpfc_nvmet_tgtport *tgtp;
150 struct nvme_fc_local_port *localport; 150 struct nvme_fc_local_port *localport;
151 struct lpfc_nvme_lport *lport; 151 struct lpfc_nvme_lport *lport;
152 struct lpfc_nvme_rport *rport;
152 struct lpfc_nodelist *ndlp; 153 struct lpfc_nodelist *ndlp;
153 struct nvme_fc_remote_port *nrport; 154 struct nvme_fc_remote_port *nrport;
154 uint64_t data1, data2, data3, tot; 155 struct lpfc_nvme_ctrl_stat *cstat;
156 uint64_t data1, data2, data3;
157 uint64_t totin, totout, tot;
155 char *statep; 158 char *statep;
159 int i;
156 int len = 0; 160 int len = 0;
157 161
158 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { 162 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
@@ -293,6 +297,13 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
293 len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n"); 297 len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
294 298
295 spin_lock_irq(shost->host_lock); 299 spin_lock_irq(shost->host_lock);
300 len += snprintf(buf + len, PAGE_SIZE - len,
301 "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
302 phba->brd_no,
303 phba->sli4_hba.max_cfg_param.max_xri,
304 phba->sli4_hba.nvme_xri_max,
305 phba->sli4_hba.scsi_xri_max,
306 lpfc_sli4_get_els_iocb_cnt(phba));
296 307
297 /* Port state is only one of two values for now. */ 308 /* Port state is only one of two values for now. */
298 if (localport->port_id) 309 if (localport->port_id)
@@ -309,11 +320,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
309 localport->port_id, statep); 320 localport->port_id, statep);
310 321
311 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 322 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
312 if (!ndlp->nrport) 323 rport = lpfc_ndlp_get_nrport(ndlp);
324 if (!rport)
313 continue; 325 continue;
314 326
315 /* local short-hand pointer. */ 327 /* local short-hand pointer. */
316 nrport = ndlp->nrport->remoteport; 328 nrport = rport->remoteport;
329 if (!nrport)
330 continue;
317 331
318 /* Port state is only one of two values for now. */ 332 /* Port state is only one of two values for now. */
319 switch (nrport->port_state) { 333 switch (nrport->port_state) {
@@ -364,11 +378,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
364 } 378 }
365 spin_unlock_irq(shost->host_lock); 379 spin_unlock_irq(shost->host_lock);
366 380
381 if (!lport)
382 return len;
383
367 len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n"); 384 len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
368 len += snprintf(buf+len, PAGE_SIZE-len, 385 len += snprintf(buf+len, PAGE_SIZE-len,
369 "LS: Xmt %010x Cmpl %010x Abort %08x\n", 386 "LS: Xmt %010x Cmpl %010x Abort %08x\n",
370 atomic_read(&phba->fc4NvmeLsRequests), 387 atomic_read(&lport->fc4NvmeLsRequests),
371 atomic_read(&phba->fc4NvmeLsCmpls), 388 atomic_read(&lport->fc4NvmeLsCmpls),
372 atomic_read(&lport->xmt_ls_abort)); 389 atomic_read(&lport->xmt_ls_abort));
373 390
374 len += snprintf(buf + len, PAGE_SIZE - len, 391 len += snprintf(buf + len, PAGE_SIZE - len,
@@ -377,28 +394,33 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
377 atomic_read(&lport->cmpl_ls_xb), 394 atomic_read(&lport->cmpl_ls_xb),
378 atomic_read(&lport->cmpl_ls_err)); 395 atomic_read(&lport->cmpl_ls_err));
379 396
380 tot = atomic_read(&phba->fc4NvmeIoCmpls); 397 totin = 0;
381 data1 = atomic_read(&phba->fc4NvmeInputRequests); 398 totout = 0;
382 data2 = atomic_read(&phba->fc4NvmeOutputRequests); 399 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
383 data3 = atomic_read(&phba->fc4NvmeControlRequests); 400 cstat = &lport->cstat[i];
401 tot = atomic_read(&cstat->fc4NvmeIoCmpls);
402 totin += tot;
403 data1 = atomic_read(&cstat->fc4NvmeInputRequests);
404 data2 = atomic_read(&cstat->fc4NvmeOutputRequests);
405 data3 = atomic_read(&cstat->fc4NvmeControlRequests);
406 totout += (data1 + data2 + data3);
407 }
384 len += snprintf(buf+len, PAGE_SIZE-len, 408 len += snprintf(buf+len, PAGE_SIZE-len,
385 "FCP: Rd %016llx Wr %016llx IO %016llx\n", 409 "Total FCP Cmpl %016llx Issue %016llx "
386 data1, data2, data3); 410 "OutIO %016llx\n",
411 totin, totout, totout - totin);
387 412
388 len += snprintf(buf+len, PAGE_SIZE-len, 413 len += snprintf(buf+len, PAGE_SIZE-len,
389 " noxri %08x nondlp %08x qdepth %08x " 414 " abort %08x noxri %08x nondlp %08x qdepth %08x "
390 "wqerr %08x\n", 415 "wqerr %08x err %08x\n",
416 atomic_read(&lport->xmt_fcp_abort),
391 atomic_read(&lport->xmt_fcp_noxri), 417 atomic_read(&lport->xmt_fcp_noxri),
392 atomic_read(&lport->xmt_fcp_bad_ndlp), 418 atomic_read(&lport->xmt_fcp_bad_ndlp),
393 atomic_read(&lport->xmt_fcp_qdepth), 419 atomic_read(&lport->xmt_fcp_qdepth),
420 atomic_read(&lport->xmt_fcp_err),
394 atomic_read(&lport->xmt_fcp_wqerr)); 421 atomic_read(&lport->xmt_fcp_wqerr));
395 422
396 len += snprintf(buf + len, PAGE_SIZE - len, 423 len += snprintf(buf + len, PAGE_SIZE - len,
397 " Cmpl %016llx Outstanding %016llx Abort %08x\n",
398 tot, ((data1 + data2 + data3) - tot),
399 atomic_read(&lport->xmt_fcp_abort));
400
401 len += snprintf(buf + len, PAGE_SIZE - len,
402 "FCP CMPL: xb %08x Err %08x\n", 424 "FCP CMPL: xb %08x Err %08x\n",
403 atomic_read(&lport->cmpl_fcp_xb), 425 atomic_read(&lport->cmpl_fcp_xb),
404 atomic_read(&lport->cmpl_fcp_err)); 426 atomic_read(&lport->cmpl_fcp_err));
@@ -3280,6 +3302,9 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
3280{ 3302{
3281 struct Scsi_Host *shost; 3303 struct Scsi_Host *shost;
3282 struct lpfc_nodelist *ndlp; 3304 struct lpfc_nodelist *ndlp;
3305#if (IS_ENABLED(CONFIG_NVME_FC))
3306 struct lpfc_nvme_rport *rport;
3307#endif
3283 3308
3284 shost = lpfc_shost_from_vport(vport); 3309 shost = lpfc_shost_from_vport(vport);
3285 spin_lock_irq(shost->host_lock); 3310 spin_lock_irq(shost->host_lock);
@@ -3289,8 +3314,9 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
3289 if (ndlp->rport) 3314 if (ndlp->rport)
3290 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; 3315 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3291#if (IS_ENABLED(CONFIG_NVME_FC)) 3316#if (IS_ENABLED(CONFIG_NVME_FC))
3292 if (ndlp->nrport) 3317 rport = lpfc_ndlp_get_nrport(ndlp);
3293 nvme_fc_set_remoteport_devloss(ndlp->nrport->remoteport, 3318 if (rport)
3319 nvme_fc_set_remoteport_devloss(rport->remoteport,
3294 vport->cfg_devloss_tmo); 3320 vport->cfg_devloss_tmo);
3295#endif 3321#endif
3296 } 3322 }
@@ -3414,6 +3440,15 @@ LPFC_ATTR_R(nvmet_mrq,
3414 "Specify number of RQ pairs for processing NVMET cmds"); 3440 "Specify number of RQ pairs for processing NVMET cmds");
3415 3441
3416/* 3442/*
3443 * lpfc_nvmet_mrq_post: Specify number of RQ buffer to initially post
3444 * to each NVMET RQ. Range 64 to 2048, default is 512.
3445 */
3446LPFC_ATTR_R(nvmet_mrq_post,
3447 LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST,
3448 LPFC_NVMET_RQE_DEF_COUNT,
3449 "Specify number of RQ buffers to initially post");
3450
3451/*
3417 * lpfc_enable_fc4_type: Defines what FC4 types are supported. 3452 * lpfc_enable_fc4_type: Defines what FC4 types are supported.
3418 * Supported Values: 1 - register just FCP 3453 * Supported Values: 1 - register just FCP
3419 * 3 - register both FCP and NVME 3454 * 3 - register both FCP and NVME
@@ -3469,8 +3504,49 @@ LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
3469# tgt_queue_depth: This parameter is used to limit the number of outstanding 3504# tgt_queue_depth: This parameter is used to limit the number of outstanding
3470# commands per target port. Value range is [10,65535]. Default value is 65535. 3505# commands per target port. Value range is [10,65535]. Default value is 65535.
3471*/ 3506*/
3472LPFC_VPORT_ATTR_RW(tgt_queue_depth, 65535, 10, 65535, 3507static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH;
3473 "Max number of FCP commands we can queue to a specific target port"); 3508module_param(lpfc_tgt_queue_depth, uint, 0444);
3509MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth");
3510lpfc_vport_param_show(tgt_queue_depth);
3511lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
3512 LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH);
3513
3514/**
3515 * lpfc_tgt_queue_depth_store: Sets an attribute value.
3516 * @phba: pointer the the adapter structure.
3517 * @val: integer attribute value.
3518 *
3519 * Description: Sets the parameter to the new value.
3520 *
3521 * Returns:
3522 * zero on success
3523 * -EINVAL if val is invalid
3524 */
3525static int
3526lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
3527{
3528 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3529 struct lpfc_nodelist *ndlp;
3530
3531 if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
3532 return -EINVAL;
3533
3534 if (val == vport->cfg_tgt_queue_depth)
3535 return 0;
3536
3537 spin_lock_irq(shost->host_lock);
3538 vport->cfg_tgt_queue_depth = val;
3539
3540 /* Next loop thru nodelist and change cmd_qdepth */
3541 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
3542 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
3543
3544 spin_unlock_irq(shost->host_lock);
3545 return 0;
3546}
3547
3548lpfc_vport_param_store(tgt_queue_depth);
3549static DEVICE_ATTR_RW(lpfc_tgt_queue_depth);
3474 3550
3475/* 3551/*
3476# hba_queue_depth: This parameter is used to limit the number of outstanding 3552# hba_queue_depth: This parameter is used to limit the number of outstanding
@@ -5302,6 +5378,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
5302 &dev_attr_lpfc_suppress_rsp, 5378 &dev_attr_lpfc_suppress_rsp,
5303 &dev_attr_lpfc_nvme_io_channel, 5379 &dev_attr_lpfc_nvme_io_channel,
5304 &dev_attr_lpfc_nvmet_mrq, 5380 &dev_attr_lpfc_nvmet_mrq,
5381 &dev_attr_lpfc_nvmet_mrq_post,
5305 &dev_attr_lpfc_nvme_enable_fb, 5382 &dev_attr_lpfc_nvme_enable_fb,
5306 &dev_attr_lpfc_nvmet_fb_size, 5383 &dev_attr_lpfc_nvmet_fb_size,
5307 &dev_attr_lpfc_enable_bg, 5384 &dev_attr_lpfc_enable_bg,
@@ -6352,6 +6429,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
6352 6429
6353 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type); 6430 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
6354 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq); 6431 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
6432 lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
6355 6433
6356 /* Initialize first burst. Target vs Initiator are different. */ 6434 /* Initialize first burst. Target vs Initiator are different. */
6357 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); 6435 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 0f174ca80f67..edb1a18a6414 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -3621,7 +3621,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3621 bsg_reply->result = 0; 3621 bsg_reply->result = 0;
3622 3622
3623 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3623 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3624 "2937 SLI_CONFIG ext-buffer maibox command " 3624 "2937 SLI_CONFIG ext-buffer mailbox command "
3625 "(x%x/x%x) complete bsg job done, bsize:%d\n", 3625 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3626 phba->mbox_ext_buf_ctx.nembType, 3626 phba->mbox_ext_buf_ctx.nembType,
3627 phba->mbox_ext_buf_ctx.mboxType, size); 3627 phba->mbox_ext_buf_ctx.mboxType, size);
@@ -3632,7 +3632,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3632 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); 3632 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3633 } else { 3633 } else {
3634 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3634 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3635 "2938 SLI_CONFIG ext-buffer maibox " 3635 "2938 SLI_CONFIG ext-buffer mailbox "
3636 "command (x%x/x%x) failure, rc:x%x\n", 3636 "command (x%x/x%x) failure, rc:x%x\n",
3637 phba->mbox_ext_buf_ctx.nembType, 3637 phba->mbox_ext_buf_ctx.nembType,
3638 phba->mbox_ext_buf_ctx.mboxType, rc); 3638 phba->mbox_ext_buf_ctx.mboxType, rc);
@@ -3666,7 +3666,7 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3666 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3666 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3667 3667
3668 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3668 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3669 "2939 SLI_CONFIG ext-buffer rd maibox command " 3669 "2939 SLI_CONFIG ext-buffer rd mailbox command "
3670 "complete, ctxState:x%x, mbxStatus:x%x\n", 3670 "complete, ctxState:x%x, mbxStatus:x%x\n",
3671 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3671 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3672 3672
@@ -3706,7 +3706,7 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3706 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3706 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3707 3707
3708 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3708 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3709 "2940 SLI_CONFIG ext-buffer wr maibox command " 3709 "2940 SLI_CONFIG ext-buffer wr mailbox command "
3710 "complete, ctxState:x%x, mbxStatus:x%x\n", 3710 "complete, ctxState:x%x, mbxStatus:x%x\n",
3711 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3711 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3712 3712
@@ -3988,12 +3988,12 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
3988 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 3988 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3989 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3989 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3990 "2947 Issued SLI_CONFIG ext-buffer " 3990 "2947 Issued SLI_CONFIG ext-buffer "
3991 "maibox command, rc:x%x\n", rc); 3991 "mailbox command, rc:x%x\n", rc);
3992 return SLI_CONFIG_HANDLED; 3992 return SLI_CONFIG_HANDLED;
3993 } 3993 }
3994 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3994 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3995 "2948 Failed to issue SLI_CONFIG ext-buffer " 3995 "2948 Failed to issue SLI_CONFIG ext-buffer "
3996 "maibox command, rc:x%x\n", rc); 3996 "mailbox command, rc:x%x\n", rc);
3997 rc = -EPIPE; 3997 rc = -EPIPE;
3998 3998
3999job_error: 3999job_error:
@@ -4147,12 +4147,12 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
4147 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4147 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4148 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4148 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4149 "2955 Issued SLI_CONFIG ext-buffer " 4149 "2955 Issued SLI_CONFIG ext-buffer "
4150 "maibox command, rc:x%x\n", rc); 4150 "mailbox command, rc:x%x\n", rc);
4151 return SLI_CONFIG_HANDLED; 4151 return SLI_CONFIG_HANDLED;
4152 } 4152 }
4153 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4153 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4154 "2956 Failed to issue SLI_CONFIG ext-buffer " 4154 "2956 Failed to issue SLI_CONFIG ext-buffer "
4155 "maibox command, rc:x%x\n", rc); 4155 "mailbox command, rc:x%x\n", rc);
4156 rc = -EPIPE; 4156 rc = -EPIPE;
4157 goto job_error; 4157 goto job_error;
4158 } 4158 }
@@ -4492,12 +4492,12 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
4492 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4492 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4493 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4493 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4494 "2969 Issued SLI_CONFIG ext-buffer " 4494 "2969 Issued SLI_CONFIG ext-buffer "
4495 "maibox command, rc:x%x\n", rc); 4495 "mailbox command, rc:x%x\n", rc);
4496 return SLI_CONFIG_HANDLED; 4496 return SLI_CONFIG_HANDLED;
4497 } 4497 }
4498 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4498 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4499 "2970 Failed to issue SLI_CONFIG ext-buffer " 4499 "2970 Failed to issue SLI_CONFIG ext-buffer "
4500 "maibox command, rc:x%x\n", rc); 4500 "mailbox command, rc:x%x\n", rc);
4501 rc = -EPIPE; 4501 rc = -EPIPE;
4502 goto job_error; 4502 goto job_error;
4503 } 4503 }
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 0617c8ea88c6..d4a200ae5a6f 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -471,6 +471,11 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
471 "Parse GID_FTrsp: did:x%x flg:x%x x%x", 471 "Parse GID_FTrsp: did:x%x flg:x%x x%x",
472 Did, ndlp->nlp_flag, vport->fc_flag); 472 Did, ndlp->nlp_flag, vport->fc_flag);
473 473
474 /* Don't assume the rport is always the previous
475 * FC4 type.
476 */
477 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
478
474 /* By default, the driver expects to support FCP FC4 */ 479 /* By default, the driver expects to support FCP FC4 */
475 if (fc4_type == FC_TYPE_FCP) 480 if (fc4_type == FC_TYPE_FCP)
476 ndlp->nlp_fc4_type |= NLP_FC4_FCP; 481 ndlp->nlp_fc4_type |= NLP_FC4_FCP;
@@ -691,6 +696,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
691 vport->fc_flag &= ~FC_RSCN_DEFERRED; 696 vport->fc_flag &= ~FC_RSCN_DEFERRED;
692 spin_unlock_irq(shost->host_lock); 697 spin_unlock_irq(shost->host_lock);
693 698
699 /* This is a GID_FT completing so the gidft_inp counter was
700 * incremented before the GID_FT was issued to the wire.
701 */
702 vport->gidft_inp--;
703
694 /* 704 /*
695 * Skip processing the NS response 705 * Skip processing the NS response
696 * Re-issue the NS cmd 706 * Re-issue the NS cmd
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index fb0dc2aeed91..9df0c051349f 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2,7 +2,7 @@
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2007-2015 Emulex. All rights reserved. * 6 * Copyright (C) 2007-2015 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. * 7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com * 8 * www.broadcom.com *
@@ -544,7 +544,7 @@ static int
544lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) 544lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
545{ 545{
546 int len = 0; 546 int len = 0;
547 int cnt; 547 int i, iocnt, outio, cnt;
548 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 548 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
549 struct lpfc_hba *phba = vport->phba; 549 struct lpfc_hba *phba = vport->phba;
550 struct lpfc_nodelist *ndlp; 550 struct lpfc_nodelist *ndlp;
@@ -552,12 +552,15 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
552 struct nvme_fc_local_port *localport; 552 struct nvme_fc_local_port *localport;
553 struct lpfc_nvmet_tgtport *tgtp; 553 struct lpfc_nvmet_tgtport *tgtp;
554 struct nvme_fc_remote_port *nrport; 554 struct nvme_fc_remote_port *nrport;
555 struct lpfc_nvme_rport *rport;
555 556
556 cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); 557 cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
558 outio = 0;
557 559
558 len += snprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n"); 560 len += snprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
559 spin_lock_irq(shost->host_lock); 561 spin_lock_irq(shost->host_lock);
560 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 562 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
563 iocnt = 0;
561 if (!cnt) { 564 if (!cnt) {
562 len += snprintf(buf+len, size-len, 565 len += snprintf(buf+len, size-len,
563 "Missing Nodelist Entries\n"); 566 "Missing Nodelist Entries\n");
@@ -585,9 +588,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
585 break; 588 break;
586 case NLP_STE_UNMAPPED_NODE: 589 case NLP_STE_UNMAPPED_NODE:
587 statep = "UNMAP "; 590 statep = "UNMAP ";
591 iocnt = 1;
588 break; 592 break;
589 case NLP_STE_MAPPED_NODE: 593 case NLP_STE_MAPPED_NODE:
590 statep = "MAPPED"; 594 statep = "MAPPED";
595 iocnt = 1;
591 break; 596 break;
592 case NLP_STE_NPR_NODE: 597 case NLP_STE_NPR_NODE:
593 statep = "NPR "; 598 statep = "NPR ";
@@ -614,8 +619,10 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
614 len += snprintf(buf+len, size-len, "UNKNOWN_TYPE "); 619 len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
615 if (ndlp->nlp_type & NLP_FC_NODE) 620 if (ndlp->nlp_type & NLP_FC_NODE)
616 len += snprintf(buf+len, size-len, "FC_NODE "); 621 len += snprintf(buf+len, size-len, "FC_NODE ");
617 if (ndlp->nlp_type & NLP_FABRIC) 622 if (ndlp->nlp_type & NLP_FABRIC) {
618 len += snprintf(buf+len, size-len, "FABRIC "); 623 len += snprintf(buf+len, size-len, "FABRIC ");
624 iocnt = 0;
625 }
619 if (ndlp->nlp_type & NLP_FCP_TARGET) 626 if (ndlp->nlp_type & NLP_FCP_TARGET)
620 len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ", 627 len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
621 ndlp->nlp_sid); 628 ndlp->nlp_sid);
@@ -632,10 +639,20 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
632 ndlp->nlp_usg_map); 639 ndlp->nlp_usg_map);
633 len += snprintf(buf+len, size-len, "refcnt:%x", 640 len += snprintf(buf+len, size-len, "refcnt:%x",
634 kref_read(&ndlp->kref)); 641 kref_read(&ndlp->kref));
642 if (iocnt) {
643 i = atomic_read(&ndlp->cmd_pending);
644 len += snprintf(buf + len, size - len,
645 " OutIO:x%x Qdepth x%x",
646 i, ndlp->cmd_qdepth);
647 outio += i;
648 }
635 len += snprintf(buf+len, size-len, "\n"); 649 len += snprintf(buf+len, size-len, "\n");
636 } 650 }
637 spin_unlock_irq(shost->host_lock); 651 spin_unlock_irq(shost->host_lock);
638 652
653 len += snprintf(buf + len, size - len,
654 "\nOutstanding IO x%x\n", outio);
655
639 if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) { 656 if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
640 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 657 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
641 len += snprintf(buf + len, size - len, 658 len += snprintf(buf + len, size - len,
@@ -679,10 +696,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
679 len += snprintf(buf + len, size - len, "\tRport List:\n"); 696 len += snprintf(buf + len, size - len, "\tRport List:\n");
680 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 697 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
681 /* local short-hand pointer. */ 698 /* local short-hand pointer. */
682 if (!ndlp->nrport) 699 rport = lpfc_ndlp_get_nrport(ndlp);
700 if (!rport)
683 continue; 701 continue;
684 702
685 nrport = ndlp->nrport->remoteport; 703 nrport = rport->remoteport;
704 if (!nrport)
705 continue;
686 706
687 /* Port state is only one of two values for now. */ 707 /* Port state is only one of two values for now. */
688 switch (nrport->port_state) { 708 switch (nrport->port_state) {
@@ -751,10 +771,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
751 struct lpfc_nvmet_tgtport *tgtp; 771 struct lpfc_nvmet_tgtport *tgtp;
752 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; 772 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
753 struct nvme_fc_local_port *localport; 773 struct nvme_fc_local_port *localport;
774 struct lpfc_nvme_ctrl_stat *cstat;
754 struct lpfc_nvme_lport *lport; 775 struct lpfc_nvme_lport *lport;
755 uint64_t tot, data1, data2, data3; 776 uint64_t data1, data2, data3;
777 uint64_t tot, totin, totout;
778 int cnt, i, maxch;
756 int len = 0; 779 int len = 0;
757 int cnt;
758 780
759 if (phba->nvmet_support) { 781 if (phba->nvmet_support) {
760 if (!phba->targetport) 782 if (!phba->targetport)
@@ -880,33 +902,52 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
880 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 902 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
881 return len; 903 return len;
882 904
905 localport = vport->localport;
906 if (!localport)
907 return len;
908 lport = (struct lpfc_nvme_lport *)localport->private;
909 if (!lport)
910 return len;
911
883 len += snprintf(buf + len, size - len, 912 len += snprintf(buf + len, size - len,
884 "\nNVME Lport Statistics\n"); 913 "\nNVME Lport Statistics\n");
885 914
886 len += snprintf(buf + len, size - len, 915 len += snprintf(buf + len, size - len,
887 "LS: Xmt %016x Cmpl %016x\n", 916 "LS: Xmt %016x Cmpl %016x\n",
888 atomic_read(&phba->fc4NvmeLsRequests), 917 atomic_read(&lport->fc4NvmeLsRequests),
889 atomic_read(&phba->fc4NvmeLsCmpls)); 918 atomic_read(&lport->fc4NvmeLsCmpls));
890
891 tot = atomic_read(&phba->fc4NvmeIoCmpls);
892 data1 = atomic_read(&phba->fc4NvmeInputRequests);
893 data2 = atomic_read(&phba->fc4NvmeOutputRequests);
894 data3 = atomic_read(&phba->fc4NvmeControlRequests);
895 919
896 len += snprintf(buf + len, size - len, 920 if (phba->cfg_nvme_io_channel < 32)
897 "FCP: Rd %016llx Wr %016llx IO %016llx\n", 921 maxch = phba->cfg_nvme_io_channel;
898 data1, data2, data3); 922 else
899 923 maxch = 32;
900 len += snprintf(buf + len, size - len, 924 totin = 0;
901 " Cmpl %016llx Outstanding %016llx\n", 925 totout = 0;
902 tot, (data1 + data2 + data3) - tot); 926 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
927 cstat = &lport->cstat[i];
928 tot = atomic_read(&cstat->fc4NvmeIoCmpls);
929 totin += tot;
930 data1 = atomic_read(&cstat->fc4NvmeInputRequests);
931 data2 = atomic_read(&cstat->fc4NvmeOutputRequests);
932 data3 = atomic_read(&cstat->fc4NvmeControlRequests);
933 totout += (data1 + data2 + data3);
934
935 /* Limit to 32, debugfs display buffer limitation */
936 if (i >= 32)
937 continue;
903 938
904 localport = vport->localport; 939 len += snprintf(buf + len, PAGE_SIZE - len,
905 if (!localport) 940 "FCP (%d): Rd %016llx Wr %016llx "
906 return len; 941 "IO %016llx ",
907 lport = (struct lpfc_nvme_lport *)localport->private; 942 i, data1, data2, data3);
908 if (!lport) 943 len += snprintf(buf + len, PAGE_SIZE - len,
909 return len; 944 "Cmpl %016llx OutIO %016llx\n",
945 tot, ((data1 + data2 + data3) - tot));
946 }
947 len += snprintf(buf + len, PAGE_SIZE - len,
948 "Total FCP Cmpl %016llx Issue %016llx "
949 "OutIO %016llx\n",
950 totin, totout, totout - totin);
910 951
911 len += snprintf(buf + len, size - len, 952 len += snprintf(buf + len, size - len,
912 "LS Xmt Err: Abrt %08x Err %08x " 953 "LS Xmt Err: Abrt %08x Err %08x "
@@ -918,11 +959,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
918 959
919 len += snprintf(buf + len, size - len, 960 len += snprintf(buf + len, size - len,
920 "FCP Xmt Err: noxri %06x nondlp %06x " 961 "FCP Xmt Err: noxri %06x nondlp %06x "
921 "qdepth %06x wqerr %06x Abrt %06x\n", 962 "qdepth %06x wqerr %06x err %06x Abrt %06x\n",
922 atomic_read(&lport->xmt_fcp_noxri), 963 atomic_read(&lport->xmt_fcp_noxri),
923 atomic_read(&lport->xmt_fcp_bad_ndlp), 964 atomic_read(&lport->xmt_fcp_bad_ndlp),
924 atomic_read(&lport->xmt_fcp_qdepth), 965 atomic_read(&lport->xmt_fcp_qdepth),
925 atomic_read(&lport->xmt_fcp_wqerr), 966 atomic_read(&lport->xmt_fcp_wqerr),
967 atomic_read(&lport->xmt_fcp_err),
926 atomic_read(&lport->xmt_fcp_abort)); 968 atomic_read(&lport->xmt_fcp_abort));
927 969
928 len += snprintf(buf + len, size - len, 970 len += snprintf(buf + len, size - len,
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 74895e62aaea..6d84a10fef07 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -6268,7 +6268,6 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
6268 * flush the RSCN. Otherwise, the outstanding requests 6268 * flush the RSCN. Otherwise, the outstanding requests
6269 * need to complete. 6269 * need to complete.
6270 */ 6270 */
6271 vport->gidft_inp = 0;
6272 if (lpfc_issue_gidft(vport) > 0) 6271 if (lpfc_issue_gidft(vport) > 0)
6273 return 1; 6272 return 1;
6274 } else { 6273 } else {
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 3e7712cd6c9a..2fef54fab86d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -708,8 +708,7 @@ lpfc_work_done(struct lpfc_hba *phba)
708 HA_RXMASK)); 708 HA_RXMASK));
709 } 709 }
710 } 710 }
711 if ((phba->sli_rev == LPFC_SLI_REV4) && 711 if (phba->sli_rev == LPFC_SLI_REV4)
712 (!list_empty(&pring->txq)))
713 lpfc_drain_txq(phba); 712 lpfc_drain_txq(phba);
714 /* 713 /*
715 * Turn on Ring interrupts 714 * Turn on Ring interrupts
@@ -3876,10 +3875,6 @@ int
3876lpfc_issue_gidft(struct lpfc_vport *vport) 3875lpfc_issue_gidft(struct lpfc_vport *vport)
3877{ 3876{
3878 struct lpfc_hba *phba = vport->phba; 3877 struct lpfc_hba *phba = vport->phba;
3879 struct lpfc_nodelist *ndlp;
3880
3881 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
3882 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
3883 3878
3884 /* Good status, issue CT Request to NameServer */ 3879 /* Good status, issue CT Request to NameServer */
3885 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 3880 if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 98b80559c215..f43f0bacb77a 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -2,7 +2,7 @@
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2009-2016 Emulex. All rights reserved. * 6 * Copyright (C) 2009-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. * 7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com * 8 * www.broadcom.com *
@@ -566,6 +566,7 @@ struct lpfc_register {
566 566
567/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */ 567/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
568#define LPFC_SLI_INTF 0x0058 568#define LPFC_SLI_INTF 0x0058
569#define LPFC_SLI_ASIC_VER 0x009C
569 570
570#define LPFC_CTL_PORT_SEM_OFFSET 0x400 571#define LPFC_CTL_PORT_SEM_OFFSET 0x400
571#define lpfc_port_smphr_perr_SHIFT 31 572#define lpfc_port_smphr_perr_SHIFT 31
@@ -3912,6 +3913,7 @@ struct lpfc_acqe_link {
3912#define LPFC_ASYNC_LINK_FAULT_NONE 0x0 3913#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
3913#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1 3914#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
3914#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2 3915#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
3916#define LPFC_ASYNC_LINK_FAULT_LR_LRR 0x3
3915#define lpfc_acqe_logical_link_speed_SHIFT 16 3917#define lpfc_acqe_logical_link_speed_SHIFT 16
3916#define lpfc_acqe_logical_link_speed_MASK 0x0000FFFF 3918#define lpfc_acqe_logical_link_speed_MASK 0x0000FFFF
3917#define lpfc_acqe_logical_link_speed_WORD word1 3919#define lpfc_acqe_logical_link_speed_WORD word1
@@ -4616,6 +4618,9 @@ union lpfc_wqe128 {
4616 struct send_frame_wqe send_frame; 4618 struct send_frame_wqe send_frame;
4617}; 4619};
4618 4620
4621#define MAGIC_NUMER_G6 0xFEAA0003
4622#define MAGIC_NUMER_G7 0xFEAA0005
4623
4619struct lpfc_grp_hdr { 4624struct lpfc_grp_hdr {
4620 uint32_t size; 4625 uint32_t size;
4621 uint32_t magic_number; 4626 uint32_t magic_number;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7887468c71b4..7ae343b14630 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2,7 +2,7 @@
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. * 7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com * 8 * www.broadcom.com *
@@ -1266,6 +1266,9 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1266 uint64_t tot, data1, data2, data3; 1266 uint64_t tot, data1, data2, data3;
1267 struct lpfc_nvmet_tgtport *tgtp; 1267 struct lpfc_nvmet_tgtport *tgtp;
1268 struct lpfc_register reg_data; 1268 struct lpfc_register reg_data;
1269 struct nvme_fc_local_port *localport;
1270 struct lpfc_nvme_lport *lport;
1271 struct lpfc_nvme_ctrl_stat *cstat;
1269 void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr; 1272 void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
1270 1273
1271 vports = lpfc_create_vport_work_array(phba); 1274 vports = lpfc_create_vport_work_array(phba);
@@ -1299,14 +1302,25 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1299 tot += atomic_read(&tgtp->xmt_fcp_release); 1302 tot += atomic_read(&tgtp->xmt_fcp_release);
1300 tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; 1303 tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
1301 } else { 1304 } else {
1302 tot = atomic_read(&phba->fc4NvmeIoCmpls); 1305 localport = phba->pport->localport;
1303 data1 = atomic_read( 1306 if (!localport || !localport->private)
1304 &phba->fc4NvmeInputRequests); 1307 goto skip_eqdelay;
1305 data2 = atomic_read( 1308 lport = (struct lpfc_nvme_lport *)
1306 &phba->fc4NvmeOutputRequests); 1309 localport->private;
1307 data3 = atomic_read( 1310 tot = 0;
1308 &phba->fc4NvmeControlRequests); 1311 for (i = 0;
1309 tot = (data1 + data2 + data3) - tot; 1312 i < phba->cfg_nvme_io_channel; i++) {
1313 cstat = &lport->cstat[i];
1314 data1 = atomic_read(
1315 &cstat->fc4NvmeInputRequests);
1316 data2 = atomic_read(
1317 &cstat->fc4NvmeOutputRequests);
1318 data3 = atomic_read(
1319 &cstat->fc4NvmeControlRequests);
1320 tot += (data1 + data2 + data3);
1321 tot -= atomic_read(
1322 &cstat->fc4NvmeIoCmpls);
1323 }
1310 } 1324 }
1311 } 1325 }
1312 1326
@@ -4265,32 +4279,24 @@ lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
4265 * @phba: pointer to lpfc hba data structure. 4279 * @phba: pointer to lpfc hba data structure.
4266 * @acqe_link: pointer to the async link completion queue entry. 4280 * @acqe_link: pointer to the async link completion queue entry.
4267 * 4281 *
4268 * This routine is to parse the SLI4 link-attention link fault code and 4282 * This routine is to parse the SLI4 link-attention link fault code.
4269 * translate it into the base driver's read link attention mailbox command
4270 * status.
4271 *
4272 * Return: Link-attention status in terms of base driver's coding.
4273 **/ 4283 **/
4274static uint16_t 4284static void
4275lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 4285lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
4276 struct lpfc_acqe_link *acqe_link) 4286 struct lpfc_acqe_link *acqe_link)
4277{ 4287{
4278 uint16_t latt_fault;
4279
4280 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 4288 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
4281 case LPFC_ASYNC_LINK_FAULT_NONE: 4289 case LPFC_ASYNC_LINK_FAULT_NONE:
4282 case LPFC_ASYNC_LINK_FAULT_LOCAL: 4290 case LPFC_ASYNC_LINK_FAULT_LOCAL:
4283 case LPFC_ASYNC_LINK_FAULT_REMOTE: 4291 case LPFC_ASYNC_LINK_FAULT_REMOTE:
4284 latt_fault = 0; 4292 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
4285 break; 4293 break;
4286 default: 4294 default:
4287 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4295 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4288 "0398 Invalid link fault code: x%x\n", 4296 "0398 Unknown link fault code: x%x\n",
4289 bf_get(lpfc_acqe_link_fault, acqe_link)); 4297 bf_get(lpfc_acqe_link_fault, acqe_link));
4290 latt_fault = MBXERR_ERROR;
4291 break; 4298 break;
4292 } 4299 }
4293 return latt_fault;
4294} 4300}
4295 4301
4296/** 4302/**
@@ -4565,9 +4571,12 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
4565 * the READ_TOPOLOGY completion routine to continue without actually 4571 * the READ_TOPOLOGY completion routine to continue without actually
4566 * sending the READ_TOPOLOGY mailbox command to the port. 4572 * sending the READ_TOPOLOGY mailbox command to the port.
4567 */ 4573 */
4568 /* Parse and translate status field */ 4574 /* Initialize completion status */
4569 mb = &pmb->u.mb; 4575 mb = &pmb->u.mb;
4570 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 4576 mb->mbxStatus = MBX_SUCCESS;
4577
4578 /* Parse port fault information field */
4579 lpfc_sli4_parse_latt_fault(phba, acqe_link);
4571 4580
4572 /* Parse and translate link attention fields */ 4581 /* Parse and translate link attention fields */
4573 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 4582 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
@@ -4695,10 +4704,12 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
4695 break; 4704 break;
4696 } 4705 }
4697 4706
4698 /* Parse and translate status field */ 4707 /* Initialize completion status */
4699 mb = &pmb->u.mb; 4708 mb = &pmb->u.mb;
4700 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, 4709 mb->mbxStatus = MBX_SUCCESS;
4701 (void *)acqe_fc); 4710
4711 /* Parse port fault information field */
4712 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
4702 4713
4703 /* Parse and translate link attention fields */ 4714 /* Parse and translate link attention fields */
4704 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 4715 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
@@ -5103,7 +5114,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
5103 if (rc) { 5114 if (rc) {
5104 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5115 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5105 LOG_DISCOVERY, 5116 LOG_DISCOVERY,
5106 "2772 Issue FCF rediscover mabilbox " 5117 "2772 Issue FCF rediscover mailbox "
5107 "command failed, fail through to FCF " 5118 "command failed, fail through to FCF "
5108 "dead event\n"); 5119 "dead event\n");
5109 spin_lock_irq(&phba->hbalock); 5120 spin_lock_irq(&phba->hbalock);
@@ -5195,7 +5206,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
5195 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 5206 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5196 LOG_DISCOVERY, 5207 LOG_DISCOVERY,
5197 "2774 Issue FCF rediscover " 5208 "2774 Issue FCF rediscover "
5198 "mabilbox command failed, " 5209 "mailbox command failed, "
5199 "through to CVL event\n"); 5210 "through to CVL event\n");
5200 spin_lock_irq(&phba->hbalock); 5211 spin_lock_irq(&phba->hbalock);
5201 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 5212 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
@@ -5839,6 +5850,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
5839 int fof_vectors = 0; 5850 int fof_vectors = 0;
5840 int extra; 5851 int extra;
5841 uint64_t wwn; 5852 uint64_t wwn;
5853 u32 if_type;
5854 u32 if_fam;
5842 5855
5843 phba->sli4_hba.num_online_cpu = num_online_cpus(); 5856 phba->sli4_hba.num_online_cpu = num_online_cpus();
5844 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 5857 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
@@ -6160,15 +6173,28 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6160 */ 6173 */
6161 rc = lpfc_get_sli4_parameters(phba, mboxq); 6174 rc = lpfc_get_sli4_parameters(phba, mboxq);
6162 if (rc) { 6175 if (rc) {
6176 if_type = bf_get(lpfc_sli_intf_if_type,
6177 &phba->sli4_hba.sli_intf);
6178 if_fam = bf_get(lpfc_sli_intf_sli_family,
6179 &phba->sli4_hba.sli_intf);
6163 if (phba->sli4_hba.extents_in_use && 6180 if (phba->sli4_hba.extents_in_use &&
6164 phba->sli4_hba.rpi_hdrs_in_use) { 6181 phba->sli4_hba.rpi_hdrs_in_use) {
6165 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6166 "2999 Unsupported SLI4 Parameters " 6183 "2999 Unsupported SLI4 Parameters "
6167 "Extents and RPI headers enabled.\n"); 6184 "Extents and RPI headers enabled.\n");
6185 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6186 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
6187 mempool_free(mboxq, phba->mbox_mem_pool);
6188 rc = -EIO;
6189 goto out_free_bsmbx;
6190 }
6191 }
6192 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
6193 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
6194 mempool_free(mboxq, phba->mbox_mem_pool);
6195 rc = -EIO;
6196 goto out_free_bsmbx;
6168 } 6197 }
6169 mempool_free(mboxq, phba->mbox_mem_pool);
6170 rc = -EIO;
6171 goto out_free_bsmbx;
6172 } 6198 }
6173 6199
6174 mempool_free(mboxq, phba->mbox_mem_pool); 6200 mempool_free(mboxq, phba->mbox_mem_pool);
@@ -6406,8 +6432,11 @@ lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
6406 return error; 6432 return error;
6407 } 6433 }
6408 6434
6409 /* workqueue for deferred irq use */ 6435 /* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */
6410 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); 6436 if (phba->sli_rev == LPFC_SLI_REV4)
6437 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6438 else
6439 phba->wq = NULL;
6411 6440
6412 return 0; 6441 return 0;
6413} 6442}
@@ -6430,7 +6459,8 @@ lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
6430 } 6459 }
6431 6460
6432 /* Stop kernel worker thread */ 6461 /* Stop kernel worker thread */
6433 kthread_stop(phba->worker_thread); 6462 if (phba->worker_thread)
6463 kthread_stop(phba->worker_thread);
6434} 6464}
6435 6465
6436/** 6466/**
@@ -6895,12 +6925,6 @@ lpfc_create_shost(struct lpfc_hba *phba)
6895 atomic_set(&phba->fc4ScsiOutputRequests, 0); 6925 atomic_set(&phba->fc4ScsiOutputRequests, 0);
6896 atomic_set(&phba->fc4ScsiControlRequests, 0); 6926 atomic_set(&phba->fc4ScsiControlRequests, 0);
6897 atomic_set(&phba->fc4ScsiIoCmpls, 0); 6927 atomic_set(&phba->fc4ScsiIoCmpls, 0);
6898 atomic_set(&phba->fc4NvmeInputRequests, 0);
6899 atomic_set(&phba->fc4NvmeOutputRequests, 0);
6900 atomic_set(&phba->fc4NvmeControlRequests, 0);
6901 atomic_set(&phba->fc4NvmeIoCmpls, 0);
6902 atomic_set(&phba->fc4NvmeLsRequests, 0);
6903 atomic_set(&phba->fc4NvmeLsCmpls, 0);
6904 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 6928 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
6905 if (!vport) 6929 if (!vport)
6906 return -ENODEV; 6930 return -ENODEV;
@@ -7781,6 +7805,40 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
7781 phba->sli4_hba.max_cfg_param.max_wq, 7805 phba->sli4_hba.max_cfg_param.max_wq,
7782 phba->sli4_hba.max_cfg_param.max_rq); 7806 phba->sli4_hba.max_cfg_param.max_rq);
7783 7807
7808 /*
7809 * Calculate NVME queue resources based on how
7810 * many WQ/CQs are available.
7811 */
7812 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
7813 length = phba->sli4_hba.max_cfg_param.max_wq;
7814 if (phba->sli4_hba.max_cfg_param.max_cq <
7815 phba->sli4_hba.max_cfg_param.max_wq)
7816 length = phba->sli4_hba.max_cfg_param.max_cq;
7817
7818 /*
7819 * Whats left after this can go toward NVME.
7820 * The minus 6 accounts for ELS, NVME LS, MBOX
7821 * fof plus a couple extra. When configured for
7822 * NVMET, FCP io channel WQs are not created.
7823 */
7824 length -= 6;
7825 if (!phba->nvmet_support)
7826 length -= phba->cfg_fcp_io_channel;
7827
7828 if (phba->cfg_nvme_io_channel > length) {
7829 lpfc_printf_log(
7830 phba, KERN_ERR, LOG_SLI,
7831 "2005 Reducing NVME IO channel to %d: "
7832 "WQ %d CQ %d NVMEIO %d FCPIO %d\n",
7833 length,
7834 phba->sli4_hba.max_cfg_param.max_wq,
7835 phba->sli4_hba.max_cfg_param.max_cq,
7836 phba->cfg_nvme_io_channel,
7837 phba->cfg_fcp_io_channel);
7838
7839 phba->cfg_nvme_io_channel = length;
7840 }
7841 }
7784 } 7842 }
7785 7843
7786 if (rc) 7844 if (rc)
@@ -10533,6 +10591,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
10533 struct lpfc_pc_sli4_params *sli4_params; 10591 struct lpfc_pc_sli4_params *sli4_params;
10534 uint32_t mbox_tmo; 10592 uint32_t mbox_tmo;
10535 int length; 10593 int length;
10594 bool exp_wqcq_pages = true;
10536 struct lpfc_sli4_parameters *mbx_sli4_parameters; 10595 struct lpfc_sli4_parameters *mbx_sli4_parameters;
10537 10596
10538 /* 10597 /*
@@ -10659,8 +10718,15 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
10659 phba->nvme_support, phba->nvme_embed_pbde, 10718 phba->nvme_support, phba->nvme_embed_pbde,
10660 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); 10719 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
10661 10720
10721 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
10722 LPFC_SLI_INTF_IF_TYPE_2) &&
10723 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
10724 LPFC_SLI_INTF_FAMILY_LNCR_A0))
10725 exp_wqcq_pages = false;
10726
10662 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) && 10727 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
10663 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) && 10728 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
10729 exp_wqcq_pages &&
10664 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT)) 10730 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
10665 phba->enab_exp_wqcq_pages = 1; 10731 phba->enab_exp_wqcq_pages = 1;
10666 else 10732 else
@@ -11322,7 +11388,11 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
11322 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, 11388 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
11323 const struct firmware *fw) 11389 const struct firmware *fw)
11324{ 11390{
11325 if (offset == ADD_STATUS_FW_NOT_SUPPORTED) 11391 if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
11392 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
11393 magic_number != MAGIC_NUMER_G6) ||
11394 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
11395 magic_number != MAGIC_NUMER_G7))
11326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11396 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11327 "3030 This firmware version is not supported on " 11397 "3030 This firmware version is not supported on "
11328 "this HBA model. Device:%x Magic:%x Type:%x " 11398 "this HBA model. Device:%x Magic:%x Type:%x "
@@ -11719,6 +11789,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
11719 lpfc_nvme_free(phba); 11789 lpfc_nvme_free(phba);
11720 lpfc_free_iocb_list(phba); 11790 lpfc_free_iocb_list(phba);
11721 11791
11792 lpfc_unset_driver_resource_phase2(phba);
11722 lpfc_sli4_driver_resource_unset(phba); 11793 lpfc_sli4_driver_resource_unset(phba);
11723 11794
11724 /* Unmap adapter Control and Doorbell registers */ 11795 /* Unmap adapter Control and Doorbell registers */
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 022060636ae1..1a803975bcbc 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1936,31 +1936,14 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1936 goto out; 1936 goto out;
1937 } 1937 }
1938 1938
1939 /* When the rport rejected the FCP PRLI as unsupported. 1939 /* Adjust the nlp_type accordingly if the PRLI failed */
1940 * This should only happen in Pt2Pt so an NVME PRLI 1940 if (npr)
1941 * should be outstanding still.
1942 */
1943 if (npr && ndlp->nlp_flag & NLP_FCP_PRLI_RJT) {
1944 ndlp->nlp_fc4_type &= ~NLP_FC4_FCP; 1941 ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
1945 goto out_err; 1942 if (nvpr)
1946 } 1943 ndlp->nlp_fc4_type &= ~NLP_FC4_NVME;
1947
1948 /* The LS Req had some error. Don't let this be a
1949 * target.
1950 */
1951 if ((ndlp->fc4_prli_sent == 1) &&
1952 (ndlp->nlp_state == NLP_STE_PRLI_ISSUE) &&
1953 (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_FCP_INITIATOR)))
1954 /* The FCP PRLI completed successfully but
1955 * the NVME PRLI failed. Since they are sent in
1956 * succession, allow the FCP to complete.
1957 */
1958 goto out_err;
1959 1944
1960 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1945 /* We can't set the DSM state till BOTH PRLIs complete */
1961 ndlp->nlp_type |= NLP_FCP_INITIATOR; 1946 goto out_err;
1962 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1963 return ndlp->nlp_state;
1964 } 1947 }
1965 1948
1966 if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) && 1949 if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
@@ -1999,6 +1982,12 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1999 if (bf_get_be32(prli_disc, nvpr)) 1982 if (bf_get_be32(prli_disc, nvpr))
2000 ndlp->nlp_type |= NLP_NVME_DISCOVERY; 1983 ndlp->nlp_type |= NLP_NVME_DISCOVERY;
2001 1984
1985 /* This node is an NVME target. Adjust the command
1986 * queue depth on this node to not exceed the available
1987 * xris.
1988 */
1989 ndlp->cmd_qdepth = phba->sli4_hba.nvme_xri_max;
1990
2002 /* 1991 /*
2003 * If prli_fba is set, the Target supports FirstBurst. 1992 * If prli_fba is set, the Target supports FirstBurst.
2004 * If prli_fb_sz is 0, the FirstBurst size is unlimited, 1993 * If prli_fb_sz is 0, the FirstBurst size is unlimited,
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 378dca40ca20..76a5a99605aa 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2,7 +2,7 @@
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. * 7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com * 8 * www.broadcom.com *
@@ -334,7 +334,14 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
334 "6146 remoteport delete of remoteport %p\n", 334 "6146 remoteport delete of remoteport %p\n",
335 remoteport); 335 remoteport);
336 spin_lock_irq(&vport->phba->hbalock); 336 spin_lock_irq(&vport->phba->hbalock);
337 ndlp->nrport = NULL; 337
338 /* The register rebind might have occurred before the delete
339 * downcall. Guard against this race.
340 */
341 if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
342 ndlp->nrport = NULL;
343 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
344 }
338 spin_unlock_irq(&vport->phba->hbalock); 345 spin_unlock_irq(&vport->phba->hbalock);
339 346
340 /* Remove original register reference. The host transport 347 /* Remove original register reference. The host transport
@@ -357,15 +364,19 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
357 struct lpfc_dmabuf *buf_ptr; 364 struct lpfc_dmabuf *buf_ptr;
358 struct lpfc_nodelist *ndlp; 365 struct lpfc_nodelist *ndlp;
359 366
360 atomic_inc(&vport->phba->fc4NvmeLsCmpls);
361
362 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2; 367 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
363 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; 368 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
364 if (status) { 369
370 if (vport->localport) {
365 lport = (struct lpfc_nvme_lport *)vport->localport->private; 371 lport = (struct lpfc_nvme_lport *)vport->localport->private;
366 if (bf_get(lpfc_wcqe_c_xb, wcqe)) 372 if (lport) {
367 atomic_inc(&lport->cmpl_ls_xb); 373 atomic_inc(&lport->fc4NvmeLsCmpls);
368 atomic_inc(&lport->cmpl_ls_err); 374 if (status) {
375 if (bf_get(lpfc_wcqe_c_xb, wcqe))
376 atomic_inc(&lport->cmpl_ls_xb);
377 atomic_inc(&lport->cmpl_ls_err);
378 }
379 }
369 } 380 }
370 381
371 ndlp = (struct lpfc_nodelist *)cmdwqe->context1; 382 ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
@@ -570,6 +581,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
570 581
571 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 582 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
572 rport = (struct lpfc_nvme_rport *)pnvme_rport->private; 583 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
584 if (unlikely(!lport) || unlikely(!rport))
585 return -EINVAL;
586
573 vport = lport->vport; 587 vport = lport->vport;
574 588
575 if (vport->load_flag & FC_UNLOADING) 589 if (vport->load_flag & FC_UNLOADING)
@@ -639,7 +653,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
639 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, 653 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
640 &pnvme_lsreq->rspdma); 654 &pnvme_lsreq->rspdma);
641 655
642 atomic_inc(&vport->phba->fc4NvmeLsRequests); 656 atomic_inc(&lport->fc4NvmeLsRequests);
643 657
644 /* Hardcode the wait to 30 seconds. Connections are failing otherwise. 658 /* Hardcode the wait to 30 seconds. Connections are failing otherwise.
645 * This code allows it all to work. 659 * This code allows it all to work.
@@ -690,6 +704,8 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
690 struct lpfc_iocbq *wqe, *next_wqe; 704 struct lpfc_iocbq *wqe, *next_wqe;
691 705
692 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 706 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
707 if (unlikely(!lport))
708 return;
693 vport = lport->vport; 709 vport = lport->vport;
694 phba = vport->phba; 710 phba = vport->phba;
695 711
@@ -949,28 +965,48 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
949 struct lpfc_nodelist *ndlp; 965 struct lpfc_nodelist *ndlp;
950 struct lpfc_nvme_fcpreq_priv *freqpriv; 966 struct lpfc_nvme_fcpreq_priv *freqpriv;
951 struct lpfc_nvme_lport *lport; 967 struct lpfc_nvme_lport *lport;
968 struct lpfc_nvme_ctrl_stat *cstat;
952 unsigned long flags; 969 unsigned long flags;
953 uint32_t code, status; 970 uint32_t code, status, idx;
954 uint16_t cid, sqhd, data; 971 uint16_t cid, sqhd, data;
955 uint32_t *ptr; 972 uint32_t *ptr;
956 973
957 /* Sanity check on return of outstanding command */ 974 /* Sanity check on return of outstanding command */
958 if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) { 975 if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) {
976 if (!lpfc_ncmd) {
977 lpfc_printf_vlog(vport, KERN_ERR,
978 LOG_NODE | LOG_NVME_IOERR,
979 "6071 Null lpfc_ncmd pointer. No "
980 "release, skip completion\n");
981 return;
982 }
983
959 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, 984 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
960 "6071 Completion pointers bad on wqe %p.\n", 985 "6066 Missing cmpl ptrs: lpfc_ncmd %p, "
961 wcqe); 986 "nvmeCmd %p nrport %p\n",
987 lpfc_ncmd, lpfc_ncmd->nvmeCmd,
988 lpfc_ncmd->nrport);
989
990 /* Release the lpfc_ncmd regardless of the missing elements. */
991 lpfc_release_nvme_buf(phba, lpfc_ncmd);
962 return; 992 return;
963 } 993 }
964 atomic_inc(&phba->fc4NvmeIoCmpls);
965
966 nCmd = lpfc_ncmd->nvmeCmd; 994 nCmd = lpfc_ncmd->nvmeCmd;
967 rport = lpfc_ncmd->nrport; 995 rport = lpfc_ncmd->nrport;
968 status = bf_get(lpfc_wcqe_c_status, wcqe); 996 status = bf_get(lpfc_wcqe_c_status, wcqe);
969 if (status) { 997
998 if (vport->localport) {
970 lport = (struct lpfc_nvme_lport *)vport->localport->private; 999 lport = (struct lpfc_nvme_lport *)vport->localport->private;
971 if (bf_get(lpfc_wcqe_c_xb, wcqe)) 1000 if (lport) {
972 atomic_inc(&lport->cmpl_fcp_xb); 1001 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
973 atomic_inc(&lport->cmpl_fcp_err); 1002 cstat = &lport->cstat[idx];
1003 atomic_inc(&cstat->fc4NvmeIoCmpls);
1004 if (status) {
1005 if (bf_get(lpfc_wcqe_c_xb, wcqe))
1006 atomic_inc(&lport->cmpl_fcp_xb);
1007 atomic_inc(&lport->cmpl_fcp_err);
1008 }
1009 }
974 } 1010 }
975 1011
976 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n", 1012 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
@@ -1163,7 +1199,8 @@ out_err:
1163static int 1199static int
1164lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, 1200lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1165 struct lpfc_nvme_buf *lpfc_ncmd, 1201 struct lpfc_nvme_buf *lpfc_ncmd,
1166 struct lpfc_nodelist *pnode) 1202 struct lpfc_nodelist *pnode,
1203 struct lpfc_nvme_ctrl_stat *cstat)
1167{ 1204{
1168 struct lpfc_hba *phba = vport->phba; 1205 struct lpfc_hba *phba = vport->phba;
1169 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; 1206 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
@@ -1201,7 +1238,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1201 } else { 1238 } else {
1202 wqe->fcp_iwrite.initial_xfer_len = 0; 1239 wqe->fcp_iwrite.initial_xfer_len = 0;
1203 } 1240 }
1204 atomic_inc(&phba->fc4NvmeOutputRequests); 1241 atomic_inc(&cstat->fc4NvmeOutputRequests);
1205 } else { 1242 } else {
1206 /* From the iread template, initialize words 7 - 11 */ 1243 /* From the iread template, initialize words 7 - 11 */
1207 memcpy(&wqe->words[7], 1244 memcpy(&wqe->words[7],
@@ -1214,13 +1251,13 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1214 /* Word 5 */ 1251 /* Word 5 */
1215 wqe->fcp_iread.rsrvd5 = 0; 1252 wqe->fcp_iread.rsrvd5 = 0;
1216 1253
1217 atomic_inc(&phba->fc4NvmeInputRequests); 1254 atomic_inc(&cstat->fc4NvmeInputRequests);
1218 } 1255 }
1219 } else { 1256 } else {
1220 /* From the icmnd template, initialize words 4 - 11 */ 1257 /* From the icmnd template, initialize words 4 - 11 */
1221 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], 1258 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1222 sizeof(uint32_t) * 8); 1259 sizeof(uint32_t) * 8);
1223 atomic_inc(&phba->fc4NvmeControlRequests); 1260 atomic_inc(&cstat->fc4NvmeControlRequests);
1224 } 1261 }
1225 /* 1262 /*
1226 * Finish initializing those WQE fields that are independent 1263 * Finish initializing those WQE fields that are independent
@@ -1400,7 +1437,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1400{ 1437{
1401 int ret = 0; 1438 int ret = 0;
1402 int expedite = 0; 1439 int expedite = 0;
1440 int idx;
1403 struct lpfc_nvme_lport *lport; 1441 struct lpfc_nvme_lport *lport;
1442 struct lpfc_nvme_ctrl_stat *cstat;
1404 struct lpfc_vport *vport; 1443 struct lpfc_vport *vport;
1405 struct lpfc_hba *phba; 1444 struct lpfc_hba *phba;
1406 struct lpfc_nodelist *ndlp; 1445 struct lpfc_nodelist *ndlp;
@@ -1425,9 +1464,10 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1425 vport = lport->vport; 1464 vport = lport->vport;
1426 1465
1427 if (unlikely(!hw_queue_handle)) { 1466 if (unlikely(!hw_queue_handle)) {
1428 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, 1467 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1429 "6129 Fail Abort, NULL hw_queue_handle\n"); 1468 "6117 Fail IO, NULL hw_queue_handle\n");
1430 ret = -EINVAL; 1469 atomic_inc(&lport->xmt_fcp_err);
1470 ret = -EBUSY;
1431 goto out_fail; 1471 goto out_fail;
1432 } 1472 }
1433 1473
@@ -1439,12 +1479,18 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1439 } 1479 }
1440 1480
1441 if (vport->load_flag & FC_UNLOADING) { 1481 if (vport->load_flag & FC_UNLOADING) {
1482 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1483 "6124 Fail IO, Driver unload\n");
1484 atomic_inc(&lport->xmt_fcp_err);
1442 ret = -ENODEV; 1485 ret = -ENODEV;
1443 goto out_fail; 1486 goto out_fail;
1444 } 1487 }
1445 1488
1446 freqpriv = pnvme_fcreq->private; 1489 freqpriv = pnvme_fcreq->private;
1447 if (unlikely(!freqpriv)) { 1490 if (unlikely(!freqpriv)) {
1491 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1492 "6158 Fail IO, NULL request data\n");
1493 atomic_inc(&lport->xmt_fcp_err);
1448 ret = -EINVAL; 1494 ret = -EINVAL;
1449 goto out_fail; 1495 goto out_fail;
1450 } 1496 }
@@ -1462,32 +1508,26 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1462 */ 1508 */
1463 ndlp = rport->ndlp; 1509 ndlp = rport->ndlp;
1464 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1510 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1465 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, 1511 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1466 "6053 rport %p, ndlp %p, DID x%06x " 1512 "6053 Fail IO, ndlp not ready: rport %p "
1467 "ndlp not ready.\n", 1513 "ndlp %p, DID x%06x\n",
1468 rport, ndlp, pnvme_rport->port_id); 1514 rport, ndlp, pnvme_rport->port_id);
1469 1515 atomic_inc(&lport->xmt_fcp_err);
1470 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); 1516 ret = -EBUSY;
1471 if (!ndlp) { 1517 goto out_fail;
1472 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1473 "6066 Missing node for DID %x\n",
1474 pnvme_rport->port_id);
1475 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1476 ret = -ENODEV;
1477 goto out_fail;
1478 }
1479 } 1518 }
1480 1519
1481 /* The remote node has to be a mapped target or it's an error. */ 1520 /* The remote node has to be a mapped target or it's an error. */
1482 if ((ndlp->nlp_type & NLP_NVME_TARGET) && 1521 if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1483 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 1522 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1484 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, 1523 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1485 "6036 rport %p, DID x%06x not ready for " 1524 "6036 Fail IO, DID x%06x not ready for "
1486 "IO. State x%x, Type x%x\n", 1525 "IO. State x%x, Type x%x Flg x%x\n",
1487 rport, pnvme_rport->port_id, 1526 pnvme_rport->port_id,
1488 ndlp->nlp_state, ndlp->nlp_type); 1527 ndlp->nlp_state, ndlp->nlp_type,
1528 ndlp->upcall_flags);
1489 atomic_inc(&lport->xmt_fcp_bad_ndlp); 1529 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1490 ret = -ENODEV; 1530 ret = -EBUSY;
1491 goto out_fail; 1531 goto out_fail;
1492 1532
1493 } 1533 }
@@ -1508,6 +1548,12 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1508 */ 1548 */
1509 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) && 1549 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1510 !expedite) { 1550 !expedite) {
1551 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1552 "6174 Fail IO, ndlp qdepth exceeded: "
1553 "idx %d DID %x pend %d qdepth %d\n",
1554 lpfc_queue_info->index, ndlp->nlp_DID,
1555 atomic_read(&ndlp->cmd_pending),
1556 ndlp->cmd_qdepth);
1511 atomic_inc(&lport->xmt_fcp_qdepth); 1557 atomic_inc(&lport->xmt_fcp_qdepth);
1512 ret = -EBUSY; 1558 ret = -EBUSY;
1513 goto out_fail; 1559 goto out_fail;
@@ -1517,8 +1563,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1517 if (lpfc_ncmd == NULL) { 1563 if (lpfc_ncmd == NULL) {
1518 atomic_inc(&lport->xmt_fcp_noxri); 1564 atomic_inc(&lport->xmt_fcp_noxri);
1519 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1565 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1520 "6065 driver's buffer pool is empty, " 1566 "6065 Fail IO, driver buffer pool is empty: "
1521 "IO failed\n"); 1567 "idx %d DID %x\n",
1568 lpfc_queue_info->index, ndlp->nlp_DID);
1522 ret = -EBUSY; 1569 ret = -EBUSY;
1523 goto out_fail; 1570 goto out_fail;
1524 } 1571 }
@@ -1543,15 +1590,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1543 lpfc_ncmd->ndlp = ndlp; 1590 lpfc_ncmd->ndlp = ndlp;
1544 lpfc_ncmd->start_time = jiffies; 1591 lpfc_ncmd->start_time = jiffies;
1545 1592
1546 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
1547 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1548 if (ret) {
1549 ret = -ENOMEM;
1550 goto out_free_nvme_buf;
1551 }
1552
1553 atomic_inc(&ndlp->cmd_pending);
1554
1555 /* 1593 /*
1556 * Issue the IO on the WQ indicated by index in the hw_queue_handle. 1594 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1557 * This identfier was create in our hardware queue create callback 1595 * This identfier was create in our hardware queue create callback
@@ -1560,7 +1598,23 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1560 * index to use and that they have affinitized a CPU to this hardware 1598 * index to use and that they have affinitized a CPU to this hardware
1561 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ. 1599 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1562 */ 1600 */
1563 lpfc_ncmd->cur_iocbq.hba_wqidx = lpfc_queue_info->index; 1601 idx = lpfc_queue_info->index;
1602 lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1603 cstat = &lport->cstat[idx];
1604
1605 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1606 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1607 if (ret) {
1608 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1609 "6175 Fail IO, Prep DMA: "
1610 "idx %d DID %x\n",
1611 lpfc_queue_info->index, ndlp->nlp_DID);
1612 atomic_inc(&lport->xmt_fcp_err);
1613 ret = -ENOMEM;
1614 goto out_free_nvme_buf;
1615 }
1616
1617 atomic_inc(&ndlp->cmd_pending);
1564 1618
1565 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n", 1619 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1566 lpfc_ncmd->cur_iocbq.sli4_xritag, 1620 lpfc_ncmd->cur_iocbq.sli4_xritag,
@@ -1571,7 +1625,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1571 atomic_inc(&lport->xmt_fcp_wqerr); 1625 atomic_inc(&lport->xmt_fcp_wqerr);
1572 atomic_dec(&ndlp->cmd_pending); 1626 atomic_dec(&ndlp->cmd_pending);
1573 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1627 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1574 "6113 FCP could not issue WQE err %x " 1628 "6113 Fail IO, Could not issue WQE err %x "
1575 "sid: x%x did: x%x oxid: x%x\n", 1629 "sid: x%x did: x%x oxid: x%x\n",
1576 ret, vport->fc_myDID, ndlp->nlp_DID, 1630 ret, vport->fc_myDID, ndlp->nlp_DID,
1577 lpfc_ncmd->cur_iocbq.sli4_xritag); 1631 lpfc_ncmd->cur_iocbq.sli4_xritag);
@@ -1605,11 +1659,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1605 out_free_nvme_buf: 1659 out_free_nvme_buf:
1606 if (lpfc_ncmd->nvmeCmd->sg_cnt) { 1660 if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1607 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE) 1661 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1608 atomic_dec(&phba->fc4NvmeOutputRequests); 1662 atomic_dec(&cstat->fc4NvmeOutputRequests);
1609 else 1663 else
1610 atomic_dec(&phba->fc4NvmeInputRequests); 1664 atomic_dec(&cstat->fc4NvmeInputRequests);
1611 } else 1665 } else
1612 atomic_dec(&phba->fc4NvmeControlRequests); 1666 atomic_dec(&cstat->fc4NvmeControlRequests);
1613 lpfc_release_nvme_buf(phba, lpfc_ncmd); 1667 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1614 out_fail: 1668 out_fail:
1615 return ret; 1669 return ret;
@@ -2390,7 +2444,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2390 struct nvme_fc_port_info nfcp_info; 2444 struct nvme_fc_port_info nfcp_info;
2391 struct nvme_fc_local_port *localport; 2445 struct nvme_fc_local_port *localport;
2392 struct lpfc_nvme_lport *lport; 2446 struct lpfc_nvme_lport *lport;
2393 int len; 2447 struct lpfc_nvme_ctrl_stat *cstat;
2448 int len, i;
2394 2449
2395 /* Initialize this localport instance. The vport wwn usage ensures 2450 /* Initialize this localport instance. The vport wwn usage ensures
2396 * that NPIV is accounted for. 2451 * that NPIV is accounted for.
@@ -2414,6 +2469,11 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2414 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; 2469 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2415 lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel; 2470 lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
2416 2471
2472 cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) *
2473 phba->cfg_nvme_io_channel), GFP_KERNEL);
2474 if (!cstat)
2475 return -ENOMEM;
2476
2417 /* localport is allocated from the stack, but the registration 2477 /* localport is allocated from the stack, but the registration
2418 * call allocates heap memory as well as the private area. 2478 * call allocates heap memory as well as the private area.
2419 */ 2479 */
@@ -2436,11 +2496,13 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2436 lport = (struct lpfc_nvme_lport *)localport->private; 2496 lport = (struct lpfc_nvme_lport *)localport->private;
2437 vport->localport = localport; 2497 vport->localport = localport;
2438 lport->vport = vport; 2498 lport->vport = vport;
2499 lport->cstat = cstat;
2439 vport->nvmei_support = 1; 2500 vport->nvmei_support = 1;
2440 2501
2441 atomic_set(&lport->xmt_fcp_noxri, 0); 2502 atomic_set(&lport->xmt_fcp_noxri, 0);
2442 atomic_set(&lport->xmt_fcp_bad_ndlp, 0); 2503 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2443 atomic_set(&lport->xmt_fcp_qdepth, 0); 2504 atomic_set(&lport->xmt_fcp_qdepth, 0);
2505 atomic_set(&lport->xmt_fcp_err, 0);
2444 atomic_set(&lport->xmt_fcp_wqerr, 0); 2506 atomic_set(&lport->xmt_fcp_wqerr, 0);
2445 atomic_set(&lport->xmt_fcp_abort, 0); 2507 atomic_set(&lport->xmt_fcp_abort, 0);
2446 atomic_set(&lport->xmt_ls_abort, 0); 2508 atomic_set(&lport->xmt_ls_abort, 0);
@@ -2449,6 +2511,16 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2449 atomic_set(&lport->cmpl_fcp_err, 0); 2511 atomic_set(&lport->cmpl_fcp_err, 0);
2450 atomic_set(&lport->cmpl_ls_xb, 0); 2512 atomic_set(&lport->cmpl_ls_xb, 0);
2451 atomic_set(&lport->cmpl_ls_err, 0); 2513 atomic_set(&lport->cmpl_ls_err, 0);
2514 atomic_set(&lport->fc4NvmeLsRequests, 0);
2515 atomic_set(&lport->fc4NvmeLsCmpls, 0);
2516
2517 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
2518 cstat = &lport->cstat[i];
2519 atomic_set(&cstat->fc4NvmeInputRequests, 0);
2520 atomic_set(&cstat->fc4NvmeOutputRequests, 0);
2521 atomic_set(&cstat->fc4NvmeControlRequests, 0);
2522 atomic_set(&cstat->fc4NvmeIoCmpls, 0);
2523 }
2452 2524
2453 /* Don't post more new bufs if repost already recovered 2525 /* Don't post more new bufs if repost already recovered
2454 * the nvme sgls. 2526 * the nvme sgls.
@@ -2458,6 +2530,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2458 phba->sli4_hba.nvme_xri_max); 2530 phba->sli4_hba.nvme_xri_max);
2459 vport->phba->total_nvme_bufs += len; 2531 vport->phba->total_nvme_bufs += len;
2460 } 2532 }
2533 } else {
2534 kfree(cstat);
2461 } 2535 }
2462 2536
2463 return ret; 2537 return ret;
@@ -2520,6 +2594,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2520#if (IS_ENABLED(CONFIG_NVME_FC)) 2594#if (IS_ENABLED(CONFIG_NVME_FC))
2521 struct nvme_fc_local_port *localport; 2595 struct nvme_fc_local_port *localport;
2522 struct lpfc_nvme_lport *lport; 2596 struct lpfc_nvme_lport *lport;
2597 struct lpfc_nvme_ctrl_stat *cstat;
2523 int ret; 2598 int ret;
2524 2599
2525 if (vport->nvmei_support == 0) 2600 if (vport->nvmei_support == 0)
@@ -2528,6 +2603,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2528 localport = vport->localport; 2603 localport = vport->localport;
2529 vport->localport = NULL; 2604 vport->localport = NULL;
2530 lport = (struct lpfc_nvme_lport *)localport->private; 2605 lport = (struct lpfc_nvme_lport *)localport->private;
2606 cstat = lport->cstat;
2531 2607
2532 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 2608 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2533 "6011 Destroying NVME localport %p\n", 2609 "6011 Destroying NVME localport %p\n",
@@ -2543,6 +2619,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2543 * indefinitely or succeeds 2619 * indefinitely or succeeds
2544 */ 2620 */
2545 lpfc_nvme_lport_unreg_wait(vport, lport); 2621 lpfc_nvme_lport_unreg_wait(vport, lport);
2622 kfree(cstat);
2546 2623
2547 /* Regardless of the unregister upcall response, clear 2624 /* Regardless of the unregister upcall response, clear
2548 * nvmei_support. All rports are unregistered and the 2625 * nvmei_support. All rports are unregistered and the
@@ -2607,6 +2684,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2607 struct nvme_fc_local_port *localport; 2684 struct nvme_fc_local_port *localport;
2608 struct lpfc_nvme_lport *lport; 2685 struct lpfc_nvme_lport *lport;
2609 struct lpfc_nvme_rport *rport; 2686 struct lpfc_nvme_rport *rport;
2687 struct lpfc_nvme_rport *oldrport;
2610 struct nvme_fc_remote_port *remote_port; 2688 struct nvme_fc_remote_port *remote_port;
2611 struct nvme_fc_port_info rpinfo; 2689 struct nvme_fc_port_info rpinfo;
2612 struct lpfc_nodelist *prev_ndlp; 2690 struct lpfc_nodelist *prev_ndlp;
@@ -2639,7 +2717,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2639 2717
2640 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); 2718 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2641 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 2719 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2642 if (!ndlp->nrport) 2720
2721 oldrport = lpfc_ndlp_get_nrport(ndlp);
2722 if (!oldrport)
2643 lpfc_nlp_get(ndlp); 2723 lpfc_nlp_get(ndlp);
2644 2724
2645 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port); 2725 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
@@ -2648,9 +2728,15 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2648 * a resume of the existing rport. Else this is a 2728 * a resume of the existing rport. Else this is a
2649 * new rport. 2729 * new rport.
2650 */ 2730 */
2731 /* Guard against an unregister/reregister
2732 * race that leaves the WAIT flag set.
2733 */
2734 spin_lock_irq(&vport->phba->hbalock);
2735 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2736 spin_unlock_irq(&vport->phba->hbalock);
2651 rport = remote_port->private; 2737 rport = remote_port->private;
2652 if (ndlp->nrport) { 2738 if (oldrport) {
2653 if (ndlp->nrport == remote_port->private) { 2739 if (oldrport == remote_port->private) {
2654 /* Same remoteport. Just reuse. */ 2740 /* Same remoteport. Just reuse. */
2655 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 2741 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2656 LOG_NVME_DISC, 2742 LOG_NVME_DISC,
@@ -2674,11 +2760,20 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2674 */ 2760 */
2675 spin_lock_irq(&vport->phba->hbalock); 2761 spin_lock_irq(&vport->phba->hbalock);
2676 ndlp->nrport = NULL; 2762 ndlp->nrport = NULL;
2763 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2677 spin_unlock_irq(&vport->phba->hbalock); 2764 spin_unlock_irq(&vport->phba->hbalock);
2678 rport->ndlp = NULL; 2765 rport->ndlp = NULL;
2679 rport->remoteport = NULL; 2766 rport->remoteport = NULL;
2680 if (prev_ndlp) 2767
2681 lpfc_nlp_put(ndlp); 2768 /* Reference only removed if previous NDLP is no longer
2769 * active. It might be just a swap and removing the
2770 * reference would cause a premature cleanup.
2771 */
2772 if (prev_ndlp && prev_ndlp != ndlp) {
2773 if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
2774 (!prev_ndlp->nrport))
2775 lpfc_nlp_put(prev_ndlp);
2776 }
2682 } 2777 }
2683 2778
2684 /* Clean bind the rport to the ndlp. */ 2779 /* Clean bind the rport to the ndlp. */
@@ -2746,7 +2841,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2746 if (!lport) 2841 if (!lport)
2747 goto input_err; 2842 goto input_err;
2748 2843
2749 rport = ndlp->nrport; 2844 rport = lpfc_ndlp_get_nrport(ndlp);
2750 if (!rport) 2845 if (!rport)
2751 goto input_err; 2846 goto input_err;
2752 2847
@@ -2767,6 +2862,15 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2767 * The transport will update it. 2862 * The transport will update it.
2768 */ 2863 */
2769 ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG; 2864 ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
2865
2866 /* Don't let the host nvme transport keep sending keep-alives
2867 * on this remoteport. Vport is unloading, no recovery. The
2868 * return values is ignored. The upcall is a courtesy to the
2869 * transport.
2870 */
2871 if (vport->load_flag & FC_UNLOADING)
2872 (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2873
2770 ret = nvme_fc_unregister_remoteport(remoteport); 2874 ret = nvme_fc_unregister_remoteport(remoteport);
2771 if (ret != 0) { 2875 if (ret != 0) {
2772 lpfc_nlp_put(ndlp); 2876 lpfc_nlp_put(ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index 9216653e0441..04bd463dd043 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -2,7 +2,7 @@
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. * 7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com * 8 * www.broadcom.com *
@@ -30,21 +30,36 @@
30#define LPFC_NVME_FB_SHIFT 9 30#define LPFC_NVME_FB_SHIFT 9
31#define LPFC_NVME_MAX_FB (1 << 20) /* 1M */ 31#define LPFC_NVME_MAX_FB (1 << 20) /* 1M */
32 32
33#define lpfc_ndlp_get_nrport(ndlp) \
34 ((!ndlp->nrport || (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG)) \
35 ? NULL : ndlp->nrport)
36
33struct lpfc_nvme_qhandle { 37struct lpfc_nvme_qhandle {
34 uint32_t index; /* WQ index to use */ 38 uint32_t index; /* WQ index to use */
35 uint32_t qidx; /* queue index passed to create */ 39 uint32_t qidx; /* queue index passed to create */
36 uint32_t cpu_id; /* current cpu id at time of create */ 40 uint32_t cpu_id; /* current cpu id at time of create */
37}; 41};
38 42
43struct lpfc_nvme_ctrl_stat {
44 atomic_t fc4NvmeInputRequests;
45 atomic_t fc4NvmeOutputRequests;
46 atomic_t fc4NvmeControlRequests;
47 atomic_t fc4NvmeIoCmpls;
48};
49
39/* Declare nvme-based local and remote port definitions. */ 50/* Declare nvme-based local and remote port definitions. */
40struct lpfc_nvme_lport { 51struct lpfc_nvme_lport {
41 struct lpfc_vport *vport; 52 struct lpfc_vport *vport;
42 struct completion lport_unreg_done; 53 struct completion lport_unreg_done;
43 /* Add stats counters here */ 54 /* Add stats counters here */
55 struct lpfc_nvme_ctrl_stat *cstat;
56 atomic_t fc4NvmeLsRequests;
57 atomic_t fc4NvmeLsCmpls;
44 atomic_t xmt_fcp_noxri; 58 atomic_t xmt_fcp_noxri;
45 atomic_t xmt_fcp_bad_ndlp; 59 atomic_t xmt_fcp_bad_ndlp;
46 atomic_t xmt_fcp_qdepth; 60 atomic_t xmt_fcp_qdepth;
47 atomic_t xmt_fcp_wqerr; 61 atomic_t xmt_fcp_wqerr;
62 atomic_t xmt_fcp_err;
48 atomic_t xmt_fcp_abort; 63 atomic_t xmt_fcp_abort;
49 atomic_t xmt_ls_abort; 64 atomic_t xmt_ls_abort;
50 atomic_t xmt_ls_err; 65 atomic_t xmt_ls_err;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index c1bcef3f103c..81f520abfd64 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -22,8 +22,10 @@
22 ********************************************************************/ 22 ********************************************************************/
23 23
24#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */ 24#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
25#define LPFC_NVMET_RQE_DEF_COUNT 512 25#define LPFC_NVMET_RQE_MIN_POST 128
26#define LPFC_NVMET_SUCCESS_LEN 12 26#define LPFC_NVMET_RQE_DEF_POST 512
27#define LPFC_NVMET_RQE_DEF_COUNT 2048
28#define LPFC_NVMET_SUCCESS_LEN 12
27 29
28#define LPFC_NVMET_MRQ_OFF 0xffff 30#define LPFC_NVMET_MRQ_OFF 0xffff
29#define LPFC_NVMET_MRQ_AUTO 0 31#define LPFC_NVMET_MRQ_AUTO 0
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 050f04418f5f..a94fb9f8bb44 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -2,7 +2,7 @@
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. * 7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com * 8 * www.broadcom.com *
@@ -1021,7 +1021,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1021 if (lpfc_test_rrq_active(phba, ndlp, 1021 if (lpfc_test_rrq_active(phba, ndlp,
1022 lpfc_cmd->cur_iocbq.sli4_lxritag)) 1022 lpfc_cmd->cur_iocbq.sli4_lxritag))
1023 continue; 1023 continue;
1024 list_del(&lpfc_cmd->list); 1024 list_del_init(&lpfc_cmd->list);
1025 found = 1; 1025 found = 1;
1026 break; 1026 break;
1027 } 1027 }
@@ -1036,7 +1036,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1036 if (lpfc_test_rrq_active( 1036 if (lpfc_test_rrq_active(
1037 phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) 1037 phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
1038 continue; 1038 continue;
1039 list_del(&lpfc_cmd->list); 1039 list_del_init(&lpfc_cmd->list);
1040 found = 1; 1040 found = 1;
1041 break; 1041 break;
1042 } 1042 }
@@ -3983,9 +3983,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3983 } 3983 }
3984#endif 3984#endif
3985 3985
3986 if (pnode && NLP_CHK_NODE_ACT(pnode))
3987 atomic_dec(&pnode->cmd_pending);
3988
3989 if (lpfc_cmd->status) { 3986 if (lpfc_cmd->status) {
3990 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 3987 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
3991 (lpfc_cmd->result & IOERR_DRVR_MASK)) 3988 (lpfc_cmd->result & IOERR_DRVR_MASK))
@@ -4125,6 +4122,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4125 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 4122 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4126 spin_lock_irqsave(shost->host_lock, flags); 4123 spin_lock_irqsave(shost->host_lock, flags);
4127 if (pnode && NLP_CHK_NODE_ACT(pnode)) { 4124 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4125 atomic_dec(&pnode->cmd_pending);
4128 if (pnode->cmd_qdepth > 4126 if (pnode->cmd_qdepth >
4129 atomic_read(&pnode->cmd_pending) && 4127 atomic_read(&pnode->cmd_pending) &&
4130 (atomic_read(&pnode->cmd_pending) > 4128 (atomic_read(&pnode->cmd_pending) >
@@ -4138,16 +4136,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4138 } 4136 }
4139 spin_unlock_irqrestore(shost->host_lock, flags); 4137 spin_unlock_irqrestore(shost->host_lock, flags);
4140 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) { 4138 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4141 if ((pnode->cmd_qdepth != vport->cfg_tgt_queue_depth) && 4139 atomic_dec(&pnode->cmd_pending);
4142 time_after(jiffies, pnode->last_change_time +
4143 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
4144 spin_lock_irqsave(shost->host_lock, flags);
4145 pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
4146 pnode->last_change_time = jiffies;
4147 spin_unlock_irqrestore(shost->host_lock, flags);
4148 }
4149 } 4140 }
4150
4151 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4141 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4152 4142
4153 spin_lock_irqsave(&phba->hbalock, flags); 4143 spin_lock_irqsave(&phba->hbalock, flags);
@@ -4591,6 +4581,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4591 ndlp->nlp_portname.u.wwn[7]); 4581 ndlp->nlp_portname.u.wwn[7]);
4592 goto out_tgt_busy; 4582 goto out_tgt_busy;
4593 } 4583 }
4584 atomic_inc(&ndlp->cmd_pending);
4585
4594 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp); 4586 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
4595 if (lpfc_cmd == NULL) { 4587 if (lpfc_cmd == NULL) {
4596 lpfc_rampdown_queue_depth(phba); 4588 lpfc_rampdown_queue_depth(phba);
@@ -4643,11 +4635,9 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4643 4635
4644 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 4636 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4645 4637
4646 atomic_inc(&ndlp->cmd_pending);
4647 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, 4638 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4648 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 4639 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4649 if (err) { 4640 if (err) {
4650 atomic_dec(&ndlp->cmd_pending);
4651 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4641 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4652 "3376 FCP could not issue IOCB err %x" 4642 "3376 FCP could not issue IOCB err %x"
4653 "FCP cmd x%x <%d/%llu> " 4643 "FCP cmd x%x <%d/%llu> "
@@ -4691,6 +4681,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4691 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4681 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4692 lpfc_release_scsi_buf(phba, lpfc_cmd); 4682 lpfc_release_scsi_buf(phba, lpfc_cmd);
4693 out_host_busy: 4683 out_host_busy:
4684 atomic_dec(&ndlp->cmd_pending);
4694 return SCSI_MLQUEUE_HOST_BUSY; 4685 return SCSI_MLQUEUE_HOST_BUSY;
4695 4686
4696 out_tgt_busy: 4687 out_tgt_busy:
@@ -4725,7 +4716,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4725 int ret = SUCCESS, status = 0; 4716 int ret = SUCCESS, status = 0;
4726 struct lpfc_sli_ring *pring_s4; 4717 struct lpfc_sli_ring *pring_s4;
4727 int ret_val; 4718 int ret_val;
4728 unsigned long flags, iflags; 4719 unsigned long flags;
4729 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 4720 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4730 4721
4731 status = fc_block_scsi_eh(cmnd); 4722 status = fc_block_scsi_eh(cmnd);
@@ -4825,16 +4816,16 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4825 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 4816 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4826 abtsiocb->vport = vport; 4817 abtsiocb->vport = vport;
4827 if (phba->sli_rev == LPFC_SLI_REV4) { 4818 if (phba->sli_rev == LPFC_SLI_REV4) {
4828 pring_s4 = lpfc_sli4_calc_ring(phba, iocb); 4819 pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocb);
4829 if (pring_s4 == NULL) { 4820 if (pring_s4 == NULL) {
4830 ret = FAILED; 4821 ret = FAILED;
4831 goto out_unlock; 4822 goto out_unlock;
4832 } 4823 }
4833 /* Note: both hbalock and ring_lock must be set here */ 4824 /* Note: both hbalock and ring_lock must be set here */
4834 spin_lock_irqsave(&pring_s4->ring_lock, iflags); 4825 spin_lock(&pring_s4->ring_lock);
4835 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 4826 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
4836 abtsiocb, 0); 4827 abtsiocb, 0);
4837 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags); 4828 spin_unlock(&pring_s4->ring_lock);
4838 } else { 4829 } else {
4839 ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, 4830 ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4840 abtsiocb, 0); 4831 abtsiocb, 0);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 8e38e0204c47..c38e4da71f5f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -2,7 +2,7 @@
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 5 * “Broadcom” refers to Broadcom Inc and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. * 7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com * 8 * www.broadcom.com *
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index cb17e2b2be81..4b70d53acb72 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2,7 +2,7 @@
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. * 7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com * 8 * www.broadcom.com *
@@ -96,6 +96,34 @@ lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
96 return &iocbq->iocb; 96 return &iocbq->iocb;
97} 97}
98 98
99#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
100/**
101 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
102 * @srcp: Source memory pointer.
103 * @destp: Destination memory pointer.
104 * @cnt: Number of words required to be copied.
105 * Must be a multiple of sizeof(uint64_t)
106 *
107 * This function is used for copying data between driver memory
108 * and the SLI WQ. This function also changes the endianness
109 * of each word if native endianness is different from SLI
110 * endianness. This function can be called with or without
111 * lock.
112 **/
113void
114lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
115{
116 uint64_t *src = srcp;
117 uint64_t *dest = destp;
118 int i;
119
120 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
121 *dest++ = *src++;
122}
123#else
124#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
125#endif
126
99/** 127/**
100 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 128 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
101 * @q: The Work Queue to operate on. 129 * @q: The Work Queue to operate on.
@@ -137,7 +165,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
137 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); 165 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
138 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 166 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
139 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 167 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
140 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 168 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
141 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 169 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
142 /* write to DPP aperture taking advatage of Combined Writes */ 170 /* write to DPP aperture taking advatage of Combined Writes */
143 tmp = (uint8_t *)temp_wqe; 171 tmp = (uint8_t *)temp_wqe;
@@ -240,7 +268,7 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
240 /* If the host has not yet processed the next entry then we are done */ 268 /* If the host has not yet processed the next entry then we are done */
241 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 269 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
242 return -ENOMEM; 270 return -ENOMEM;
243 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 271 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
244 /* Save off the mailbox pointer for completion */ 272 /* Save off the mailbox pointer for completion */
245 q->phba->mbox = (MAILBOX_t *)temp_mqe; 273 q->phba->mbox = (MAILBOX_t *)temp_mqe;
246 274
@@ -663,8 +691,8 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
663 /* If the host has not yet processed the next entry then we are done */ 691 /* If the host has not yet processed the next entry then we are done */
664 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index) 692 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
665 return -EBUSY; 693 return -EBUSY;
666 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 694 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
667 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 695 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
668 696
669 /* Update the host index to point to the next slot */ 697 /* Update the host index to point to the next slot */
670 hq->host_index = ((hq_put_index + 1) % hq->entry_count); 698 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
@@ -7199,7 +7227,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7199 lpfc_post_rq_buffer( 7227 lpfc_post_rq_buffer(
7200 phba, phba->sli4_hba.nvmet_mrq_hdr[i], 7228 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7201 phba->sli4_hba.nvmet_mrq_data[i], 7229 phba->sli4_hba.nvmet_mrq_data[i],
7202 LPFC_NVMET_RQE_DEF_COUNT, i); 7230 phba->cfg_nvmet_mrq_post, i);
7203 } 7231 }
7204 } 7232 }
7205 7233
@@ -8185,8 +8213,8 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8185 */ 8213 */
8186 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 8214 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8187 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 8215 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8188 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 8216 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8189 sizeof(struct lpfc_mqe)); 8217 sizeof(struct lpfc_mqe));
8190 8218
8191 /* Post the high mailbox dma address to the port and wait for ready. */ 8219 /* Post the high mailbox dma address to the port and wait for ready. */
8192 dma_address = &phba->sli4_hba.bmbx.dma_address; 8220 dma_address = &phba->sli4_hba.bmbx.dma_address;
@@ -8210,11 +8238,11 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8210 * If so, update the mailbox status so that the upper layers 8238 * If so, update the mailbox status so that the upper layers
8211 * can complete the request normally. 8239 * can complete the request normally.
8212 */ 8240 */
8213 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 8241 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8214 sizeof(struct lpfc_mqe)); 8242 sizeof(struct lpfc_mqe));
8215 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 8243 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8216 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 8244 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8217 sizeof(struct lpfc_mcqe)); 8245 sizeof(struct lpfc_mcqe));
8218 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 8246 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8219 /* 8247 /*
8220 * When the CQE status indicates a failure and the mailbox status 8248 * When the CQE status indicates a failure and the mailbox status
@@ -11300,11 +11328,11 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11300 unsigned long iflags; 11328 unsigned long iflags;
11301 struct lpfc_sli_ring *pring_s4; 11329 struct lpfc_sli_ring *pring_s4;
11302 11330
11303 spin_lock_irq(&phba->hbalock); 11331 spin_lock_irqsave(&phba->hbalock, iflags);
11304 11332
11305 /* all I/Os are in process of being flushed */ 11333 /* all I/Os are in process of being flushed */
11306 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { 11334 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
11307 spin_unlock_irq(&phba->hbalock); 11335 spin_unlock_irqrestore(&phba->hbalock, iflags);
11308 return 0; 11336 return 0;
11309 } 11337 }
11310 sum = 0; 11338 sum = 0;
@@ -11366,14 +11394,14 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11366 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11394 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11367 11395
11368 if (phba->sli_rev == LPFC_SLI_REV4) { 11396 if (phba->sli_rev == LPFC_SLI_REV4) {
11369 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq); 11397 pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocbq);
11370 if (pring_s4 == NULL) 11398 if (!pring_s4)
11371 continue; 11399 continue;
11372 /* Note: both hbalock and ring_lock must be set here */ 11400 /* Note: both hbalock and ring_lock must be set here */
11373 spin_lock_irqsave(&pring_s4->ring_lock, iflags); 11401 spin_lock(&pring_s4->ring_lock);
11374 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11402 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11375 abtsiocbq, 0); 11403 abtsiocbq, 0);
11376 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags); 11404 spin_unlock(&pring_s4->ring_lock);
11377 } else { 11405 } else {
11378 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, 11406 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11379 abtsiocbq, 0); 11407 abtsiocbq, 0);
@@ -11385,7 +11413,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11385 else 11413 else
11386 sum++; 11414 sum++;
11387 } 11415 }
11388 spin_unlock_irq(&phba->hbalock); 11416 spin_unlock_irqrestore(&phba->hbalock, iflags);
11389 return sum; 11417 return sum;
11390} 11418}
11391 11419
@@ -12830,7 +12858,7 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12830 12858
12831 /* Move mbox data to caller's mailbox region, do endian swapping */ 12859 /* Move mbox data to caller's mailbox region, do endian swapping */
12832 if (pmb->mbox_cmpl && mbox) 12860 if (pmb->mbox_cmpl && mbox)
12833 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 12861 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
12834 12862
12835 /* 12863 /*
12836 * For mcqe errors, conditionally move a modified error code to 12864 * For mcqe errors, conditionally move a modified error code to
@@ -12913,7 +12941,7 @@ lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
12913 bool workposted; 12941 bool workposted;
12914 12942
12915 /* Copy the mailbox MCQE and convert endian order as needed */ 12943 /* Copy the mailbox MCQE and convert endian order as needed */
12916 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 12944 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
12917 12945
12918 /* Invoke the proper event handling routine */ 12946 /* Invoke the proper event handling routine */
12919 if (!bf_get(lpfc_trailer_async, &mcqe)) 12947 if (!bf_get(lpfc_trailer_async, &mcqe))
@@ -12944,6 +12972,17 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12944 int txcmplq_cnt = 0; 12972 int txcmplq_cnt = 0;
12945 int fcp_txcmplq_cnt = 0; 12973 int fcp_txcmplq_cnt = 0;
12946 12974
12975 /* Check for response status */
12976 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
12977 /* Log the error status */
12978 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12979 "0357 ELS CQE error: status=x%x: "
12980 "CQE: %08x %08x %08x %08x\n",
12981 bf_get(lpfc_wcqe_c_status, wcqe),
12982 wcqe->word0, wcqe->total_data_placed,
12983 wcqe->parameter, wcqe->word3);
12984 }
12985
12947 /* Get an irspiocbq for later ELS response processing use */ 12986 /* Get an irspiocbq for later ELS response processing use */
12948 irspiocbq = lpfc_sli_get_iocbq(phba); 12987 irspiocbq = lpfc_sli_get_iocbq(phba);
12949 if (!irspiocbq) { 12988 if (!irspiocbq) {
@@ -13173,7 +13212,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13173 bool workposted = false; 13212 bool workposted = false;
13174 13213
13175 /* Copy the work queue CQE and convert endian order if needed */ 13214 /* Copy the work queue CQE and convert endian order if needed */
13176 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 13215 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13177 13216
13178 /* Check and process for different type of WCQE and dispatch */ 13217 /* Check and process for different type of WCQE and dispatch */
13179 switch (bf_get(lpfc_cqe_code, &cqevt)) { 13218 switch (bf_get(lpfc_cqe_code, &cqevt)) {
@@ -13364,14 +13403,12 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13364 phba->lpfc_rampdown_queue_depth(phba); 13403 phba->lpfc_rampdown_queue_depth(phba);
13365 13404
13366 /* Log the error status */ 13405 /* Log the error status */
13367 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13406 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13368 "0373 FCP complete error: status=x%x, " 13407 "0373 FCP CQE error: status=x%x: "
13369 "hw_status=x%x, total_data_specified=%d, " 13408 "CQE: %08x %08x %08x %08x\n",
13370 "parameter=x%x, word3=x%x\n",
13371 bf_get(lpfc_wcqe_c_status, wcqe), 13409 bf_get(lpfc_wcqe_c_status, wcqe),
13372 bf_get(lpfc_wcqe_c_hw_status, wcqe), 13410 wcqe->word0, wcqe->total_data_placed,
13373 wcqe->total_data_placed, wcqe->parameter, 13411 wcqe->parameter, wcqe->word3);
13374 wcqe->word3);
13375 } 13412 }
13376 13413
13377 /* Look up the FCP command IOCB and create pseudo response IOCB */ 13414 /* Look up the FCP command IOCB and create pseudo response IOCB */
@@ -13581,7 +13618,7 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13581 bool workposted = false; 13618 bool workposted = false;
13582 13619
13583 /* Copy the work queue CQE and convert endian order if needed */ 13620 /* Copy the work queue CQE and convert endian order if needed */
13584 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 13621 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
13585 13622
13586 /* Check and process for different type of WCQE and dispatch */ 13623 /* Check and process for different type of WCQE and dispatch */
13587 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 13624 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
@@ -19032,9 +19069,22 @@ lpfc_drain_txq(struct lpfc_hba *phba)
19032 struct lpfc_sglq *sglq; 19069 struct lpfc_sglq *sglq;
19033 union lpfc_wqe128 wqe; 19070 union lpfc_wqe128 wqe;
19034 uint32_t txq_cnt = 0; 19071 uint32_t txq_cnt = 0;
19072 struct lpfc_queue *wq;
19035 19073
19036 pring = lpfc_phba_elsring(phba); 19074 if (phba->link_flag & LS_MDS_LOOPBACK) {
19037 if (unlikely(!pring)) 19075 /* MDS WQE are posted only to first WQ*/
19076 wq = phba->sli4_hba.fcp_wq[0];
19077 if (unlikely(!wq))
19078 return 0;
19079 pring = wq->pring;
19080 } else {
19081 wq = phba->sli4_hba.els_wq;
19082 if (unlikely(!wq))
19083 return 0;
19084 pring = lpfc_phba_elsring(phba);
19085 }
19086
19087 if (unlikely(!pring) || list_empty(&pring->txq))
19038 return 0; 19088 return 0;
19039 19089
19040 spin_lock_irqsave(&pring->ring_lock, iflags); 19090 spin_lock_irqsave(&pring->ring_lock, iflags);
@@ -19075,7 +19125,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
19075 fail_msg = "to convert bpl to sgl"; 19125 fail_msg = "to convert bpl to sgl";
19076 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 19126 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
19077 fail_msg = "to convert iocb to wqe"; 19127 fail_msg = "to convert iocb to wqe";
19078 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 19128 else if (lpfc_sli4_wq_put(wq, &wqe))
19079 fail_msg = " - Wq is full"; 19129 fail_msg = " - Wq is full";
19080 else 19130 else
19081 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 19131 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e8b089abbfb3..18c23afcf46b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -2,7 +2,7 @@
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. * 7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com * 8 * www.broadcom.com *
@@ -20,7 +20,7 @@
20 * included with this package. * 20 * included with this package. *
21 *******************************************************************/ 21 *******************************************************************/
22 22
23#define LPFC_DRIVER_VERSION "12.0.0.1" 23#define LPFC_DRIVER_VERSION "12.0.0.4"
24#define LPFC_DRIVER_NAME "lpfc" 24#define LPFC_DRIVER_NAME "lpfc"
25 25
26/* Used for SLI 2/3 */ 26/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 91f5e2c68dbc..3b3767e240d8 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4166,6 +4166,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4166 int irq, i, j; 4166 int irq, i, j;
4167 int error = -ENODEV; 4167 int error = -ENODEV;
4168 4168
4169 if (hba_count >= MAX_CONTROLLERS)
4170 goto out;
4171
4169 if (pci_enable_device(pdev)) 4172 if (pci_enable_device(pdev))
4170 goto out; 4173 goto out;
4171 pci_set_master(pdev); 4174 pci_set_master(pdev);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 27fab8235ea5..75dc25f78336 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
35/* 35/*
36 * MegaRAID SAS Driver meta data 36 * MegaRAID SAS Driver meta data
37 */ 37 */
38#define MEGASAS_VERSION "07.704.04.00-rc1" 38#define MEGASAS_VERSION "07.705.02.00-rc1"
39#define MEGASAS_RELDATE "December 7, 2017" 39#define MEGASAS_RELDATE "April 4, 2018"
40 40
41/* 41/*
42 * Device IDs 42 * Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index ce656c466ca9..c5d0c4bd71d2 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -92,7 +92,7 @@ MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
92 92
93int smp_affinity_enable = 1; 93int smp_affinity_enable = 1;
94module_param(smp_affinity_enable, int, S_IRUGO); 94module_param(smp_affinity_enable, int, S_IRUGO);
95MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)"); 95MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
96 96
97int rdpq_enable = 1; 97int rdpq_enable = 1;
98module_param(rdpq_enable, int, S_IRUGO); 98module_param(rdpq_enable, int, S_IRUGO);
@@ -2224,9 +2224,9 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2224 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2224 sizeof(struct MR_LD_VF_AFFILIATION_111));
2225 else { 2225 else {
2226 new_affiliation_111 = 2226 new_affiliation_111 =
2227 pci_alloc_consistent(instance->pdev, 2227 pci_zalloc_consistent(instance->pdev,
2228 sizeof(struct MR_LD_VF_AFFILIATION_111), 2228 sizeof(struct MR_LD_VF_AFFILIATION_111),
2229 &new_affiliation_111_h); 2229 &new_affiliation_111_h);
2230 if (!new_affiliation_111) { 2230 if (!new_affiliation_111) {
2231 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2231 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2232 "memory for new affiliation for scsi%d\n", 2232 "memory for new affiliation for scsi%d\n",
@@ -2234,8 +2234,6 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2234 megasas_return_cmd(instance, cmd); 2234 megasas_return_cmd(instance, cmd);
2235 return -ENOMEM; 2235 return -ENOMEM;
2236 } 2236 }
2237 memset(new_affiliation_111, 0,
2238 sizeof(struct MR_LD_VF_AFFILIATION_111));
2239 } 2237 }
2240 2238
2241 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2239 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -2333,10 +2331,10 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2333 sizeof(struct MR_LD_VF_AFFILIATION)); 2331 sizeof(struct MR_LD_VF_AFFILIATION));
2334 else { 2332 else {
2335 new_affiliation = 2333 new_affiliation =
2336 pci_alloc_consistent(instance->pdev, 2334 pci_zalloc_consistent(instance->pdev,
2337 (MAX_LOGICAL_DRIVES + 1) * 2335 (MAX_LOGICAL_DRIVES + 1) *
2338 sizeof(struct MR_LD_VF_AFFILIATION), 2336 sizeof(struct MR_LD_VF_AFFILIATION),
2339 &new_affiliation_h); 2337 &new_affiliation_h);
2340 if (!new_affiliation) { 2338 if (!new_affiliation) {
2341 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2339 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2342 "memory for new affiliation for scsi%d\n", 2340 "memory for new affiliation for scsi%d\n",
@@ -2344,8 +2342,6 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2344 megasas_return_cmd(instance, cmd); 2342 megasas_return_cmd(instance, cmd);
2345 return -ENOMEM; 2343 return -ENOMEM;
2346 } 2344 }
2347 memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2348 sizeof(struct MR_LD_VF_AFFILIATION));
2349 } 2345 }
2350 2346
2351 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 2347 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -5636,16 +5632,15 @@ megasas_get_seq_num(struct megasas_instance *instance,
5636 } 5632 }
5637 5633
5638 dcmd = &cmd->frame->dcmd; 5634 dcmd = &cmd->frame->dcmd;
5639 el_info = pci_alloc_consistent(instance->pdev, 5635 el_info = pci_zalloc_consistent(instance->pdev,
5640 sizeof(struct megasas_evt_log_info), 5636 sizeof(struct megasas_evt_log_info),
5641 &el_info_h); 5637 &el_info_h);
5642 5638
5643 if (!el_info) { 5639 if (!el_info) {
5644 megasas_return_cmd(instance, cmd); 5640 megasas_return_cmd(instance, cmd);
5645 return -ENOMEM; 5641 return -ENOMEM;
5646 } 5642 }
5647 5643
5648 memset(el_info, 0, sizeof(*el_info));
5649 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 5644 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5650 5645
5651 dcmd->cmd = MFI_CMD_DCMD; 5646 dcmd->cmd = MFI_CMD_DCMD;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f4d988dd1e9d..98a7a090b75e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -684,15 +684,14 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
684 array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * 684 array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
685 MAX_MSIX_QUEUES_FUSION; 685 MAX_MSIX_QUEUES_FUSION;
686 686
687 fusion->rdpq_virt = pci_alloc_consistent(instance->pdev, array_size, 687 fusion->rdpq_virt = pci_zalloc_consistent(instance->pdev, array_size,
688 &fusion->rdpq_phys); 688 &fusion->rdpq_phys);
689 if (!fusion->rdpq_virt) { 689 if (!fusion->rdpq_virt) {
690 dev_err(&instance->pdev->dev, 690 dev_err(&instance->pdev->dev,
691 "Failed from %s %d\n", __func__, __LINE__); 691 "Failed from %s %d\n", __func__, __LINE__);
692 return -ENOMEM; 692 return -ENOMEM;
693 } 693 }
694 694
695 memset(fusion->rdpq_virt, 0, array_size);
696 msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 695 msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
697 696
698 fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq", 697 fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
@@ -2981,6 +2980,9 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
2981 pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value); 2980 pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value);
2982 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); 2981 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
2983 } else { 2982 } else {
2983 if (os_timeout_value)
2984 os_timeout_value++;
2985
2984 /* system pd Fast Path */ 2986 /* system pd Fast Path */
2985 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 2987 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2986 timeout_limit = (scmd->device->type == TYPE_DISK) ? 2988 timeout_limit = (scmd->device->type == TYPE_DISK) ?
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index b015c30d2c32..1e45268a78fc 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -9,7 +9,7 @@
9 * scatter/gather formats. 9 * scatter/gather formats.
10 * Creation Date: June 21, 2006 10 * Creation Date: June 21, 2006
11 * 11 *
12 * mpi2.h Version: 02.00.48 12 * mpi2.h Version: 02.00.50
13 * 13 *
14 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 14 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
15 * prefix are for use only on MPI v2.5 products, and must not be used 15 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -114,6 +114,8 @@
114 * 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT. 114 * 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT.
115 * 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT. 115 * 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT.
116 * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT. 116 * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT.
117 * 06-13-17 02.00.49 Bumped MPI2_HEADER_VERSION_UNIT.
118 * 09-29-17 02.00.50 Bumped MPI2_HEADER_VERSION_UNIT.
117 * -------------------------------------------------------------------------- 119 * --------------------------------------------------------------------------
118 */ 120 */
119 121
@@ -152,8 +154,9 @@
152 MPI26_VERSION_MINOR) 154 MPI26_VERSION_MINOR)
153#define MPI2_VERSION_02_06 (0x0206) 155#define MPI2_VERSION_02_06 (0x0206)
154 156
155/*Unit and Dev versioning for this MPI header set */ 157
156#define MPI2_HEADER_VERSION_UNIT (0x30) 158/* Unit and Dev versioning for this MPI header set */
159#define MPI2_HEADER_VERSION_UNIT (0x32)
157#define MPI2_HEADER_VERSION_DEV (0x00) 160#define MPI2_HEADER_VERSION_DEV (0x00)
158#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 161#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
159#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 162#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 0ad88deb3176..5122920a961a 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -7,7 +7,7 @@
7 * Title: MPI Configuration messages and pages 7 * Title: MPI Configuration messages and pages
8 * Creation Date: November 10, 2006 8 * Creation Date: November 10, 2006
9 * 9 *
10 * mpi2_cnfg.h Version: 02.00.40 10 * mpi2_cnfg.h Version: 02.00.42
11 * 11 *
12 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 12 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
13 * prefix are for use only on MPI v2.5 products, and must not be used 13 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -219,6 +219,18 @@
219 * Added ChassisSlot field to SAS Enclosure Page 0. 219 * Added ChassisSlot field to SAS Enclosure Page 0.
220 * Added ChassisSlot Valid bit (bit 5) to the Flags field 220 * Added ChassisSlot Valid bit (bit 5) to the Flags field
221 * in SAS Enclosure Page 0. 221 * in SAS Enclosure Page 0.
222 * 06-13-17 02.00.41 Added MPI26_MFGPAGE_DEVID_SAS3816 and
223 * MPI26_MFGPAGE_DEVID_SAS3916 defines.
224 * Removed MPI26_MFGPAGE_DEVID_SAS4008 define.
225 * Added MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN define.
226 * Renamed PI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS to
227 * PI26_PCIEIOUNIT1_LINKFLAGS_SRIS_EN.
228 * Renamed MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS to
229 * MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SEPARATE_REFCLK.
230 * 09-29-17 02.00.42 Added ControllerResetTO field to PCIe Device Page 2.
231 * Added NOIOB field to PCIe Device Page 2.
232 * Added MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN to
233 * the Capabilities field of PCIe Device Page 2.
222 * -------------------------------------------------------------------------- 234 * --------------------------------------------------------------------------
223 */ 235 */
224 236
@@ -556,7 +568,8 @@ typedef struct _MPI2_CONFIG_REPLY {
556#define MPI26_MFGPAGE_DEVID_SAS3616 (0x00D1) 568#define MPI26_MFGPAGE_DEVID_SAS3616 (0x00D1)
557#define MPI26_MFGPAGE_DEVID_SAS3708 (0x00D2) 569#define MPI26_MFGPAGE_DEVID_SAS3708 (0x00D2)
558 570
559#define MPI26_MFGPAGE_DEVID_SAS4008 (0x00A1) 571#define MPI26_MFGPAGE_DEVID_SAS3816 (0x00A1)
572#define MPI26_MFGPAGE_DEVID_SAS3916 (0x00A0)
560 573
561 574
562/*Manufacturing Page 0 */ 575/*Manufacturing Page 0 */
@@ -3864,20 +3877,25 @@ typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 {
3864typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 { 3877typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 {
3865 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ 3878 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
3866 U16 DevHandle; /*0x08 */ 3879 U16 DevHandle; /*0x08 */
3867 U16 Reserved1; /*0x0A */ 3880 U8 ControllerResetTO; /* 0x0A */
3868 U32 MaximumDataTransferSize;/*0x0C */ 3881 U8 Reserved1; /* 0x0B */
3882 U32 MaximumDataTransferSize; /*0x0C */
3869 U32 Capabilities; /*0x10 */ 3883 U32 Capabilities; /*0x10 */
3870 U32 Reserved2; /*0x14 */ 3884 U16 NOIOB; /* 0x14 */
3885 U16 Reserved2; /* 0x16 */
3871} MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2, 3886} MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2,
3872 Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t; 3887 Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t;
3873 3888
3874#define MPI26_PCIEDEVICE2_PAGEVERSION (0x00) 3889#define MPI26_PCIEDEVICE2_PAGEVERSION (0x01)
3875 3890
3876/*defines for PCIe Device Page 2 Capabilities field */ 3891/*defines for PCIe Device Page 2 Capabilities field */
3892#define MPI26_PCIEDEV2_CAP_DATA_BLK_ALIGN_AND_GRAN (0x00000008)
3877#define MPI26_PCIEDEV2_CAP_SGL_FORMAT (0x00000004) 3893#define MPI26_PCIEDEV2_CAP_SGL_FORMAT (0x00000004)
3878#define MPI26_PCIEDEV2_CAP_BIT_BUCKET_SUPPORT (0x00000002) 3894#define MPI26_PCIEDEV2_CAP_BIT_BUCKET_SUPPORT (0x00000002)
3879#define MPI26_PCIEDEV2_CAP_SGL_SUPPORT (0x00000001) 3895#define MPI26_PCIEDEV2_CAP_SGL_SUPPORT (0x00000001)
3880 3896
3897/* Defines for the NOIOB field */
3898#define MPI26_PCIEDEV2_NOIOB_UNSUPPORTED (0x0000)
3881 3899
3882/**************************************************************************** 3900/****************************************************************************
3883* PCIe Link Config Pages (MPI v2.6 and later) 3901* PCIe Link Config Pages (MPI v2.6 and later)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
index 948a3ba682d7..6213ce6791ac 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -75,7 +75,7 @@
75 75
76typedef struct _MPI2_SCSI_IO_CDB_EEDP32 { 76typedef struct _MPI2_SCSI_IO_CDB_EEDP32 {
77 U8 CDB[20]; /*0x00 */ 77 U8 CDB[20]; /*0x00 */
78 U32 PrimaryReferenceTag; /*0x14 */ 78 __be32 PrimaryReferenceTag; /*0x14 */
79 U16 PrimaryApplicationTag; /*0x18 */ 79 U16 PrimaryApplicationTag; /*0x18 */
80 U16 PrimaryApplicationTagMask; /*0x1A */ 80 U16 PrimaryApplicationTagMask; /*0x1A */
81 U32 TransferLength; /*0x1C */ 81 U32 TransferLength; /*0x1C */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index cc2aff7aa67b..1faec3a93e69 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -7,7 +7,7 @@
7 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 7 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
8 * Creation Date: October 11, 2006 8 * Creation Date: October 11, 2006
9 * 9 *
10 * mpi2_ioc.h Version: 02.00.32 10 * mpi2_ioc.h Version: 02.00.34
11 * 11 *
12 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 12 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
13 * prefix are for use only on MPI v2.5 products, and must not be used 13 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -167,6 +167,10 @@
167 * 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP. 167 * 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP.
168 * Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related 168 * Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related
169 * defines for the ReasonCode field. 169 * defines for the ReasonCode field.
170 * 06-13-17 02.00.33 Added MPI2_FW_DOWNLOAD_ITYPE_CPLD.
171 * 09-29-17 02.00.34 Added MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED
172 * to the ReasonCode field in PCIe Device Status Change
173 * Event Data.
170 * -------------------------------------------------------------------------- 174 * --------------------------------------------------------------------------
171 */ 175 */
172 176
@@ -1182,6 +1186,7 @@ typedef struct _MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE {
1182#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E) 1186#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E)
1183#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F) 1187#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F)
1184#define MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE (0x10) 1188#define MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE (0x10)
1189#define MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED (0x11)
1185 1190
1186 1191
1187/*PCIe Enumeration Event data (MPI v2.6 and later) */ 1192/*PCIe Enumeration Event data (MPI v2.6 and later) */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 61f93a134956..bf04fa90f433 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -87,7 +87,7 @@ MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
87 87
88static int smp_affinity_enable = 1; 88static int smp_affinity_enable = 1;
89module_param(smp_affinity_enable, int, S_IRUGO); 89module_param(smp_affinity_enable, int, S_IRUGO);
90MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)"); 90MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
91 91
92static int max_msix_vectors = -1; 92static int max_msix_vectors = -1;
93module_param(max_msix_vectors, int, 0); 93module_param(max_msix_vectors, int, 0);
@@ -297,12 +297,15 @@ static void *
297_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc, 297_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
298 dma_addr_t chain_buffer_dma) 298 dma_addr_t chain_buffer_dma)
299{ 299{
300 u16 index; 300 u16 index, j;
301 301 struct chain_tracker *ct;
302 for (index = 0; index < ioc->chain_depth; index++) { 302
303 if (ioc->chain_lookup[index].chain_buffer_dma == 303 for (index = 0; index < ioc->scsiio_depth; index++) {
304 chain_buffer_dma) 304 for (j = 0; j < ioc->chains_needed_per_io; j++) {
305 return ioc->chain_lookup[index].chain_buffer; 305 ct = &ioc->chain_lookup[index].chains_per_smid[j];
306 if (ct && ct->chain_buffer_dma == chain_buffer_dma)
307 return ct->chain_buffer;
308 }
306 } 309 }
307 pr_info(MPT3SAS_FMT 310 pr_info(MPT3SAS_FMT
308 "Provided chain_buffer_dma address is not in the lookup list\n", 311 "Provided chain_buffer_dma address is not in the lookup list\n",
@@ -394,13 +397,14 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
394 buff_ptr_phys = buffer_iomem_phys; 397 buff_ptr_phys = buffer_iomem_phys;
395 WARN_ON(buff_ptr_phys > U32_MAX); 398 WARN_ON(buff_ptr_phys > U32_MAX);
396 399
397 if (sgel->FlagsLength & 400 if (le32_to_cpu(sgel->FlagsLength) &
398 (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT)) 401 (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
399 is_write = 1; 402 is_write = 1;
400 403
401 for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) { 404 for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
402 405
403 sgl_flags = (sgel->FlagsLength >> MPI2_SGE_FLAGS_SHIFT); 406 sgl_flags =
407 (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
404 408
405 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) { 409 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
406 case MPI2_SGE_FLAGS_CHAIN_ELEMENT: 410 case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
@@ -411,7 +415,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
411 */ 415 */
412 sgel_next = 416 sgel_next =
413 _base_get_chain_buffer_dma_to_chain_buffer(ioc, 417 _base_get_chain_buffer_dma_to_chain_buffer(ioc,
414 sgel->Address); 418 le32_to_cpu(sgel->Address));
415 if (sgel_next == NULL) 419 if (sgel_next == NULL)
416 return; 420 return;
417 /* 421 /*
@@ -426,7 +430,8 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
426 dst_addr_phys = _base_get_chain_phys(ioc, 430 dst_addr_phys = _base_get_chain_phys(ioc,
427 smid, sge_chain_count); 431 smid, sge_chain_count);
428 WARN_ON(dst_addr_phys > U32_MAX); 432 WARN_ON(dst_addr_phys > U32_MAX);
429 sgel->Address = (u32)dst_addr_phys; 433 sgel->Address =
434 cpu_to_le32(lower_32_bits(dst_addr_phys));
430 sgel = sgel_next; 435 sgel = sgel_next;
431 sge_chain_count++; 436 sge_chain_count++;
432 break; 437 break;
@@ -435,22 +440,28 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
435 if (is_scsiio_req) { 440 if (is_scsiio_req) {
436 _base_clone_to_sys_mem(buff_ptr, 441 _base_clone_to_sys_mem(buff_ptr,
437 sg_virt(sg_scmd), 442 sg_virt(sg_scmd),
438 (sgel->FlagsLength & 0x00ffffff)); 443 (le32_to_cpu(sgel->FlagsLength) &
444 0x00ffffff));
439 /* 445 /*
440 * FIXME: this relies on a a zero 446 * FIXME: this relies on a a zero
441 * PCI mem_offset. 447 * PCI mem_offset.
442 */ 448 */
443 sgel->Address = (u32)buff_ptr_phys; 449 sgel->Address =
450 cpu_to_le32((u32)buff_ptr_phys);
444 } else { 451 } else {
445 _base_clone_to_sys_mem(buff_ptr, 452 _base_clone_to_sys_mem(buff_ptr,
446 ioc->config_vaddr, 453 ioc->config_vaddr,
447 (sgel->FlagsLength & 0x00ffffff)); 454 (le32_to_cpu(sgel->FlagsLength) &
448 sgel->Address = (u32)buff_ptr_phys; 455 0x00ffffff));
456 sgel->Address =
457 cpu_to_le32((u32)buff_ptr_phys);
449 } 458 }
450 } 459 }
451 buff_ptr += (sgel->FlagsLength & 0x00ffffff); 460 buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
452 buff_ptr_phys += (sgel->FlagsLength & 0x00ffffff); 461 0x00ffffff);
453 if ((sgel->FlagsLength & 462 buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
463 0x00ffffff);
464 if ((le32_to_cpu(sgel->FlagsLength) &
454 (MPI2_SGE_FLAGS_END_OF_BUFFER 465 (MPI2_SGE_FLAGS_END_OF_BUFFER
455 << MPI2_SGE_FLAGS_SHIFT))) 466 << MPI2_SGE_FLAGS_SHIFT)))
456 goto eob_clone_chain; 467 goto eob_clone_chain;
@@ -1019,6 +1030,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1019 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: 1030 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
1020 desc = "Cable Event"; 1031 desc = "Cable Event";
1021 break; 1032 break;
1033 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1034 desc = "SAS Device Discovery Error";
1035 break;
1022 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 1036 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1023 desc = "PCIE Device Status Change"; 1037 desc = "PCIE Device Status Change";
1024 break; 1038 break;
@@ -1433,7 +1447,7 @@ _base_interrupt(int irq, void *bus_id)
1433 cpu_to_le32(reply); 1447 cpu_to_le32(reply);
1434 if (ioc->is_mcpu_endpoint) 1448 if (ioc->is_mcpu_endpoint)
1435 _base_clone_reply_to_sys_mem(ioc, 1449 _base_clone_reply_to_sys_mem(ioc,
1436 cpu_to_le32(reply), 1450 reply,
1437 ioc->reply_free_host_index); 1451 ioc->reply_free_host_index);
1438 writel(ioc->reply_free_host_index, 1452 writel(ioc->reply_free_host_index,
1439 &ioc->chip->ReplyFreeHostIndex); 1453 &ioc->chip->ReplyFreeHostIndex);
@@ -1671,7 +1685,8 @@ _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1671 * @ioc: per adapter object 1685 * @ioc: per adapter object
1672 * @scmd: SCSI commands of the IO request 1686 * @scmd: SCSI commands of the IO request
1673 * 1687 *
1674 * Returns chain tracker(from ioc->free_chain_list) 1688 * Returns chain tracker from chain_lookup table using key as
1689 * smid and smid's chain_offset.
1675 */ 1690 */
1676static struct chain_tracker * 1691static struct chain_tracker *
1677_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, 1692_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
@@ -1679,20 +1694,15 @@ _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
1679{ 1694{
1680 struct chain_tracker *chain_req; 1695 struct chain_tracker *chain_req;
1681 struct scsiio_tracker *st = scsi_cmd_priv(scmd); 1696 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
1682 unsigned long flags; 1697 u16 smid = st->smid;
1698 u8 chain_offset =
1699 atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
1683 1700
1684 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1701 if (chain_offset == ioc->chains_needed_per_io)
1685 if (list_empty(&ioc->free_chain_list)) {
1686 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1687 dfailprintk(ioc, pr_warn(MPT3SAS_FMT
1688 "chain buffers not available\n", ioc->name));
1689 return NULL; 1702 return NULL;
1690 } 1703
1691 chain_req = list_entry(ioc->free_chain_list.next, 1704 chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
1692 struct chain_tracker, tracker_list); 1705 atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
1693 list_del_init(&chain_req->tracker_list);
1694 list_add_tail(&chain_req->tracker_list, &st->chain_list);
1695 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1696 return chain_req; 1706 return chain_req;
1697} 1707}
1698 1708
@@ -3044,7 +3054,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3044 3054
3045 for (i = 0; i < ioc->combined_reply_index_count; i++) { 3055 for (i = 0; i < ioc->combined_reply_index_count; i++) {
3046 ioc->replyPostRegisterIndex[i] = (resource_size_t *) 3056 ioc->replyPostRegisterIndex[i] = (resource_size_t *)
3047 ((u8 *)&ioc->chip->Doorbell + 3057 ((u8 __force *)&ioc->chip->Doorbell +
3048 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + 3058 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3049 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET)); 3059 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3050 } 3060 }
@@ -3273,13 +3283,7 @@ void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3273 return; 3283 return;
3274 st->cb_idx = 0xFF; 3284 st->cb_idx = 0xFF;
3275 st->direct_io = 0; 3285 st->direct_io = 0;
3276 if (!list_empty(&st->chain_list)) { 3286 atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
3277 unsigned long flags;
3278
3279 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3280 list_splice_init(&st->chain_list, &ioc->free_chain_list);
3281 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3282 }
3283} 3287}
3284 3288
3285/** 3289/**
@@ -3339,7 +3343,7 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3339 spinlock_t *writeq_lock) 3343 spinlock_t *writeq_lock)
3340{ 3344{
3341 unsigned long flags; 3345 unsigned long flags;
3342 __u64 data_out = cpu_to_le64(b); 3346 __u64 data_out = b;
3343 3347
3344 spin_lock_irqsave(writeq_lock, flags); 3348 spin_lock_irqsave(writeq_lock, flags);
3345 writel((u32)(data_out), addr); 3349 writel((u32)(data_out), addr);
@@ -3362,7 +3366,7 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3362static inline void 3366static inline void
3363_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) 3367_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3364{ 3368{
3365 writeq(cpu_to_le64(b), addr); 3369 writeq(b, addr);
3366} 3370}
3367#else 3371#else
3368static inline void 3372static inline void
@@ -3389,7 +3393,7 @@ _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3389 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); 3393 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3390 3394
3391 _clone_sg_entries(ioc, (void *) mfp, smid); 3395 _clone_sg_entries(ioc, (void *) mfp, smid);
3392 mpi_req_iomem = (void *)ioc->chip + 3396 mpi_req_iomem = (void __force *)ioc->chip +
3393 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); 3397 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3394 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, 3398 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3395 ioc->request_sz); 3399 ioc->request_sz);
@@ -3473,7 +3477,8 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3473 3477
3474 request_hdr = (MPI2RequestHeader_t *)mfp; 3478 request_hdr = (MPI2RequestHeader_t *)mfp;
3475 /* TBD 256 is offset within sys register. */ 3479 /* TBD 256 is offset within sys register. */
3476 mpi_req_iomem = (void *)ioc->chip + MPI_FRAME_START_OFFSET 3480 mpi_req_iomem = (void __force *)ioc->chip
3481 + MPI_FRAME_START_OFFSET
3477 + (smid * ioc->request_sz); 3482 + (smid * ioc->request_sz);
3478 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, 3483 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3479 ioc->request_sz); 3484 ioc->request_sz);
@@ -3542,7 +3547,7 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3542 3547
3543 _clone_sg_entries(ioc, (void *) mfp, smid); 3548 _clone_sg_entries(ioc, (void *) mfp, smid);
3544 /* TBD 256 is offset within sys register */ 3549 /* TBD 256 is offset within sys register */
3545 mpi_req_iomem = (void *)ioc->chip + 3550 mpi_req_iomem = (void __force *)ioc->chip +
3546 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); 3551 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3547 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp, 3552 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3548 ioc->request_sz); 3553 ioc->request_sz);
@@ -3823,6 +3828,105 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
3823} 3828}
3824 3829
3825/** 3830/**
3831 * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
3832 * version from FW Image Header.
3833 * @ioc: per adapter object
3834 *
3835 * Returns 0 for success, non-zero for failure.
3836 */
3837 static int
3838_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
3839{
3840 Mpi2FWImageHeader_t *FWImgHdr;
3841 Mpi25FWUploadRequest_t *mpi_request;
3842 Mpi2FWUploadReply_t mpi_reply;
3843 int r = 0;
3844 void *fwpkg_data = NULL;
3845 dma_addr_t fwpkg_data_dma;
3846 u16 smid, ioc_status;
3847 size_t data_length;
3848
3849 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3850 __func__));
3851
3852 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
3853 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
3854 ioc->name, __func__);
3855 return -EAGAIN;
3856 }
3857
3858 data_length = sizeof(Mpi2FWImageHeader_t);
3859 fwpkg_data = pci_alloc_consistent(ioc->pdev, data_length,
3860 &fwpkg_data_dma);
3861 if (!fwpkg_data) {
3862 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
3863 ioc->name, __FILE__, __LINE__, __func__);
3864 return -ENOMEM;
3865 }
3866
3867 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3868 if (!smid) {
3869 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3870 ioc->name, __func__);
3871 r = -EAGAIN;
3872 goto out;
3873 }
3874
3875 ioc->base_cmds.status = MPT3_CMD_PENDING;
3876 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3877 ioc->base_cmds.smid = smid;
3878 memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
3879 mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
3880 mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
3881 mpi_request->ImageSize = cpu_to_le32(data_length);
3882 ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
3883 data_length);
3884 init_completion(&ioc->base_cmds.done);
3885 mpt3sas_base_put_smid_default(ioc, smid);
3886 /* Wait for 15 seconds */
3887 wait_for_completion_timeout(&ioc->base_cmds.done,
3888 FW_IMG_HDR_READ_TIMEOUT*HZ);
3889 pr_info(MPT3SAS_FMT "%s: complete\n",
3890 ioc->name, __func__);
3891 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3892 pr_err(MPT3SAS_FMT "%s: timeout\n",
3893 ioc->name, __func__);
3894 _debug_dump_mf(mpi_request,
3895 sizeof(Mpi25FWUploadRequest_t)/4);
3896 r = -ETIME;
3897 } else {
3898 memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
3899 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
3900 memcpy(&mpi_reply, ioc->base_cmds.reply,
3901 sizeof(Mpi2FWUploadReply_t));
3902 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
3903 MPI2_IOCSTATUS_MASK;
3904 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
3905 FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
3906 if (FWImgHdr->PackageVersion.Word) {
3907 pr_info(MPT3SAS_FMT "FW Package Version"
3908 "(%02d.%02d.%02d.%02d)\n",
3909 ioc->name,
3910 FWImgHdr->PackageVersion.Struct.Major,
3911 FWImgHdr->PackageVersion.Struct.Minor,
3912 FWImgHdr->PackageVersion.Struct.Unit,
3913 FWImgHdr->PackageVersion.Struct.Dev);
3914 }
3915 } else {
3916 _debug_dump_mf(&mpi_reply,
3917 sizeof(Mpi2FWUploadReply_t)/4);
3918 }
3919 }
3920 }
3921 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3922out:
3923 if (fwpkg_data)
3924 pci_free_consistent(ioc->pdev, data_length, fwpkg_data,
3925 fwpkg_data_dma);
3926 return r;
3927}
3928
3929/**
3826 * _base_display_ioc_capabilities - Disply IOC's capabilities. 3930 * _base_display_ioc_capabilities - Disply IOC's capabilities.
3827 * @ioc: per adapter object 3931 * @ioc: per adapter object
3828 * 3932 *
@@ -4038,6 +4142,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
4038 Mpi2ConfigReply_t mpi_reply; 4142 Mpi2ConfigReply_t mpi_reply;
4039 u32 iounit_pg1_flags; 4143 u32 iounit_pg1_flags;
4040 4144
4145 ioc->nvme_abort_timeout = 30;
4041 mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0); 4146 mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
4042 if (ioc->ir_firmware) 4147 if (ioc->ir_firmware)
4043 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply, 4148 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
@@ -4056,6 +4161,18 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
4056 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply, 4161 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
4057 &ioc->manu_pg11); 4162 &ioc->manu_pg11);
4058 } 4163 }
4164 if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
4165 ioc->tm_custom_handling = 1;
4166 else {
4167 ioc->tm_custom_handling = 0;
4168 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
4169 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
4170 else if (ioc->manu_pg11.NVMeAbortTO >
4171 NVME_TASK_ABORT_MAX_TIMEOUT)
4172 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
4173 else
4174 ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
4175 }
4059 4176
4060 mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); 4177 mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
4061 mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); 4178 mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
@@ -4085,6 +4202,27 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
4085} 4202}
4086 4203
4087/** 4204/**
4205 * mpt3sas_free_enclosure_list - release memory
4206 * @ioc: per adapter object
4207 *
4208 * Free memory allocated during encloure add.
4209 *
4210 * Return nothing.
4211 */
4212void
4213mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
4214{
4215 struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
4216
4217 /* Free enclosure list */
4218 list_for_each_entry_safe(enclosure_dev,
4219 enclosure_dev_next, &ioc->enclosure_list, list) {
4220 list_del(&enclosure_dev->list);
4221 kfree(enclosure_dev);
4222 }
4223}
4224
4225/**
4088 * _base_release_memory_pools - release memory 4226 * _base_release_memory_pools - release memory
4089 * @ioc: per adapter object 4227 * @ioc: per adapter object
4090 * 4228 *
@@ -4096,6 +4234,8 @@ static void
4096_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) 4234_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4097{ 4235{
4098 int i = 0; 4236 int i = 0;
4237 int j = 0;
4238 struct chain_tracker *ct;
4099 struct reply_post_struct *rps; 4239 struct reply_post_struct *rps;
4100 4240
4101 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4241 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
@@ -4153,7 +4293,14 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4153 } 4293 }
4154 } while (ioc->rdpq_array_enable && 4294 } while (ioc->rdpq_array_enable &&
4155 (++i < ioc->reply_queue_count)); 4295 (++i < ioc->reply_queue_count));
4156 4296 if (ioc->reply_post_free_array &&
4297 ioc->rdpq_array_enable) {
4298 dma_pool_free(ioc->reply_post_free_array_dma_pool,
4299 ioc->reply_post_free_array,
4300 ioc->reply_post_free_array_dma);
4301 ioc->reply_post_free_array = NULL;
4302 }
4303 dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
4157 dma_pool_destroy(ioc->reply_post_free_dma_pool); 4304 dma_pool_destroy(ioc->reply_post_free_dma_pool);
4158 kfree(ioc->reply_post); 4305 kfree(ioc->reply_post);
4159 } 4306 }
@@ -4179,19 +4326,49 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4179 kfree(ioc->hpr_lookup); 4326 kfree(ioc->hpr_lookup);
4180 kfree(ioc->internal_lookup); 4327 kfree(ioc->internal_lookup);
4181 if (ioc->chain_lookup) { 4328 if (ioc->chain_lookup) {
4182 for (i = 0; i < ioc->chain_depth; i++) { 4329 for (i = 0; i < ioc->scsiio_depth; i++) {
4183 if (ioc->chain_lookup[i].chain_buffer) 4330 for (j = ioc->chains_per_prp_buffer;
4184 dma_pool_free(ioc->chain_dma_pool, 4331 j < ioc->chains_needed_per_io; j++) {
4185 ioc->chain_lookup[i].chain_buffer, 4332 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4186 ioc->chain_lookup[i].chain_buffer_dma); 4333 if (ct && ct->chain_buffer)
4334 dma_pool_free(ioc->chain_dma_pool,
4335 ct->chain_buffer,
4336 ct->chain_buffer_dma);
4337 }
4338 kfree(ioc->chain_lookup[i].chains_per_smid);
4187 } 4339 }
4188 dma_pool_destroy(ioc->chain_dma_pool); 4340 dma_pool_destroy(ioc->chain_dma_pool);
4189 free_pages((ulong)ioc->chain_lookup, ioc->chain_pages); 4341 kfree(ioc->chain_lookup);
4190 ioc->chain_lookup = NULL; 4342 ioc->chain_lookup = NULL;
4191 } 4343 }
4192} 4344}
4193 4345
4194/** 4346/**
4347 * is_MSB_are_same - checks whether all reply queues in a set are
4348 * having same upper 32bits in their base memory address.
4349 * @reply_pool_start_address: Base address of a reply queue set
4350 * @pool_sz: Size of single Reply Descriptor Post Queues pool size
4351 *
4352 * Returns 1 if reply queues in a set have a same upper 32bits
4353 * in their base memory address,
4354 * else 0
4355 */
4356
4357static int
4358is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
4359{
4360 long reply_pool_end_address;
4361
4362 reply_pool_end_address = reply_pool_start_address + pool_sz;
4363
4364 if (upper_32_bits(reply_pool_start_address) ==
4365 upper_32_bits(reply_pool_end_address))
4366 return 1;
4367 else
4368 return 0;
4369}
4370
4371/**
4195 * _base_allocate_memory_pools - allocate start of day memory pools 4372 * _base_allocate_memory_pools - allocate start of day memory pools
4196 * @ioc: per adapter object 4373 * @ioc: per adapter object
4197 * 4374 *
@@ -4203,12 +4380,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4203 struct mpt3sas_facts *facts; 4380 struct mpt3sas_facts *facts;
4204 u16 max_sge_elements; 4381 u16 max_sge_elements;
4205 u16 chains_needed_per_io; 4382 u16 chains_needed_per_io;
4206 u32 sz, total_sz, reply_post_free_sz; 4383 u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
4207 u32 retry_sz; 4384 u32 retry_sz;
4208 u16 max_request_credit, nvme_blocks_needed; 4385 u16 max_request_credit, nvme_blocks_needed;
4209 unsigned short sg_tablesize; 4386 unsigned short sg_tablesize;
4210 u16 sge_size; 4387 u16 sge_size;
4211 int i; 4388 int i, j;
4389 struct chain_tracker *ct;
4212 4390
4213 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4391 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4214 __func__)); 4392 __func__));
@@ -4489,37 +4667,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4489 ioc->name, ioc->request, ioc->scsiio_depth)); 4667 ioc->name, ioc->request, ioc->scsiio_depth));
4490 4668
4491 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); 4669 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
4492 sz = ioc->chain_depth * sizeof(struct chain_tracker); 4670 sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
4493 ioc->chain_pages = get_order(sz); 4671 ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
4494 ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
4495 GFP_KERNEL, ioc->chain_pages);
4496 if (!ioc->chain_lookup) { 4672 if (!ioc->chain_lookup) {
4497 pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n", 4673 pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages "
4498 ioc->name); 4674 "failed\n", ioc->name);
4499 goto out; 4675 goto out;
4500 } 4676 }
4501 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, 4677
4502 ioc->chain_segment_sz, 16, 0); 4678 sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
4503 if (!ioc->chain_dma_pool) { 4679 for (i = 0; i < ioc->scsiio_depth; i++) {
4504 pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n", 4680 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
4505 ioc->name); 4681 if (!ioc->chain_lookup[i].chains_per_smid) {
4506 goto out; 4682 pr_err(MPT3SAS_FMT "chain_lookup: "
4507 } 4683 " kzalloc failed\n", ioc->name);
4508 for (i = 0; i < ioc->chain_depth; i++) { 4684 goto out;
4509 ioc->chain_lookup[i].chain_buffer = dma_pool_alloc(
4510 ioc->chain_dma_pool , GFP_KERNEL,
4511 &ioc->chain_lookup[i].chain_buffer_dma);
4512 if (!ioc->chain_lookup[i].chain_buffer) {
4513 ioc->chain_depth = i;
4514 goto chain_done;
4515 } 4685 }
4516 total_sz += ioc->chain_segment_sz;
4517 } 4686 }
4518 chain_done:
4519 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4520 "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
4521 ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
4522 ((ioc->chain_depth * ioc->chain_segment_sz))/1024));
4523 4687
4524 /* initialize hi-priority queue smid's */ 4688 /* initialize hi-priority queue smid's */
4525 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, 4689 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
@@ -4561,6 +4725,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4561 * be required for NVMe PRP's, only each set of NVMe blocks will be 4725 * be required for NVMe PRP's, only each set of NVMe blocks will be
4562 * contiguous, so a new set is allocated for each possible I/O. 4726 * contiguous, so a new set is allocated for each possible I/O.
4563 */ 4727 */
4728 ioc->chains_per_prp_buffer = 0;
4564 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { 4729 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4565 nvme_blocks_needed = 4730 nvme_blocks_needed =
4566 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1; 4731 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
@@ -4583,6 +4748,11 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4583 ioc->name); 4748 ioc->name);
4584 goto out; 4749 goto out;
4585 } 4750 }
4751
4752 ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
4753 ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
4754 ioc->chains_needed_per_io);
4755
4586 for (i = 0; i < ioc->scsiio_depth; i++) { 4756 for (i = 0; i < ioc->scsiio_depth; i++) {
4587 ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc( 4757 ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
4588 ioc->pcie_sgl_dma_pool, GFP_KERNEL, 4758 ioc->pcie_sgl_dma_pool, GFP_KERNEL,
@@ -4593,13 +4763,55 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4593 ioc->name); 4763 ioc->name);
4594 goto out; 4764 goto out;
4595 } 4765 }
4766 for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
4767 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4768 ct->chain_buffer =
4769 ioc->pcie_sg_lookup[i].pcie_sgl +
4770 (j * ioc->chain_segment_sz);
4771 ct->chain_buffer_dma =
4772 ioc->pcie_sg_lookup[i].pcie_sgl_dma +
4773 (j * ioc->chain_segment_sz);
4774 }
4596 } 4775 }
4597 4776
4598 dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), " 4777 dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
4599 "element_size(%d), pool_size(%d kB)\n", ioc->name, 4778 "element_size(%d), pool_size(%d kB)\n", ioc->name,
4600 ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); 4779 ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
4780 dinitprintk(ioc, pr_info(MPT3SAS_FMT "Number of chains can "
4781 "fit in a PRP page(%d)\n", ioc->name,
4782 ioc->chains_per_prp_buffer));
4601 total_sz += sz * ioc->scsiio_depth; 4783 total_sz += sz * ioc->scsiio_depth;
4602 } 4784 }
4785
4786 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
4787 ioc->chain_segment_sz, 16, 0);
4788 if (!ioc->chain_dma_pool) {
4789 pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
4790 ioc->name);
4791 goto out;
4792 }
4793 for (i = 0; i < ioc->scsiio_depth; i++) {
4794 for (j = ioc->chains_per_prp_buffer;
4795 j < ioc->chains_needed_per_io; j++) {
4796 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4797 ct->chain_buffer = dma_pool_alloc(
4798 ioc->chain_dma_pool, GFP_KERNEL,
4799 &ct->chain_buffer_dma);
4800 if (!ct->chain_buffer) {
4801 pr_err(MPT3SAS_FMT "chain_lookup: "
4802 " pci_pool_alloc failed\n", ioc->name);
4803 _base_release_memory_pools(ioc);
4804 goto out;
4805 }
4806 }
4807 total_sz += ioc->chain_segment_sz;
4808 }
4809
4810 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4811 "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
4812 ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
4813 ((ioc->chain_depth * ioc->chain_segment_sz))/1024));
4814
4603 /* sense buffers, 4 byte align */ 4815 /* sense buffers, 4 byte align */
4604 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; 4816 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
4605 ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4817 ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
@@ -4616,6 +4828,37 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4616 ioc->name); 4828 ioc->name);
4617 goto out; 4829 goto out;
4618 } 4830 }
4831 /* sense buffer requires to be in same 4 gb region.
4832 * Below function will check the same.
4833 * In case of failure, new pci pool will be created with updated
4834 * alignment. Older allocation and pool will be destroyed.
4835 * Alignment will be used such a way that next allocation if
4836 * success, will always meet same 4gb region requirement.
4837 * Actual requirement is not alignment, but we need start and end of
4838 * DMA address must have same upper 32 bit address.
4839 */
4840 if (!is_MSB_are_same((long)ioc->sense, sz)) {
4841 //Release Sense pool & Reallocate
4842 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4843 dma_pool_destroy(ioc->sense_dma_pool);
4844 ioc->sense = NULL;
4845
4846 ioc->sense_dma_pool =
4847 dma_pool_create("sense pool", &ioc->pdev->dev, sz,
4848 roundup_pow_of_two(sz), 0);
4849 if (!ioc->sense_dma_pool) {
4850 pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
4851 ioc->name);
4852 goto out;
4853 }
4854 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
4855 &ioc->sense_dma);
4856 if (!ioc->sense) {
4857 pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
4858 ioc->name);
4859 goto out;
4860 }
4861 }
4619 dinitprintk(ioc, pr_info(MPT3SAS_FMT 4862 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4620 "sense pool(0x%p): depth(%d), element_size(%d), pool_size" 4863 "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
4621 "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth, 4864 "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
@@ -4675,6 +4918,28 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4675 ioc->name, (unsigned long long)ioc->reply_free_dma)); 4918 ioc->name, (unsigned long long)ioc->reply_free_dma));
4676 total_sz += sz; 4919 total_sz += sz;
4677 4920
4921 if (ioc->rdpq_array_enable) {
4922 reply_post_free_array_sz = ioc->reply_queue_count *
4923 sizeof(Mpi2IOCInitRDPQArrayEntry);
4924 ioc->reply_post_free_array_dma_pool =
4925 dma_pool_create("reply_post_free_array pool",
4926 &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
4927 if (!ioc->reply_post_free_array_dma_pool) {
4928 dinitprintk(ioc,
4929 pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
4930 "dma_pool_create failed\n", ioc->name));
4931 goto out;
4932 }
4933 ioc->reply_post_free_array =
4934 dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
4935 GFP_KERNEL, &ioc->reply_post_free_array_dma);
4936 if (!ioc->reply_post_free_array) {
4937 dinitprintk(ioc,
4938 pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
4939 "dma_pool_alloc failed\n", ioc->name));
4940 goto out;
4941 }
4942 }
4678 ioc->config_page_sz = 512; 4943 ioc->config_page_sz = 512;
4679 ioc->config_page = pci_alloc_consistent(ioc->pdev, 4944 ioc->config_page = pci_alloc_consistent(ioc->pdev,
4680 ioc->config_page_sz, &ioc->config_page_dma); 4945 ioc->config_page_sz, &ioc->config_page_dma);
@@ -5002,7 +5267,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5002 5267
5003 /* send message 32-bits at a time */ 5268 /* send message 32-bits at a time */
5004 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { 5269 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
5005 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell); 5270 writel((u32)(request[i]), &ioc->chip->Doorbell);
5006 if ((_base_wait_for_doorbell_ack(ioc, 5))) 5271 if ((_base_wait_for_doorbell_ack(ioc, 5)))
5007 failed = 1; 5272 failed = 1;
5008 } 5273 }
@@ -5023,7 +5288,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5023 } 5288 }
5024 5289
5025 /* read the first two 16-bits, it gives the total length of the reply */ 5290 /* read the first two 16-bits, it gives the total length of the reply */
5026 reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell) 5291 reply[0] = (u16)(readl(&ioc->chip->Doorbell)
5027 & MPI2_DOORBELL_DATA_MASK); 5292 & MPI2_DOORBELL_DATA_MASK);
5028 writel(0, &ioc->chip->HostInterruptStatus); 5293 writel(0, &ioc->chip->HostInterruptStatus);
5029 if ((_base_wait_for_doorbell_int(ioc, 5))) { 5294 if ((_base_wait_for_doorbell_int(ioc, 5))) {
@@ -5032,7 +5297,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5032 ioc->name, __LINE__); 5297 ioc->name, __LINE__);
5033 return -EFAULT; 5298 return -EFAULT;
5034 } 5299 }
5035 reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell) 5300 reply[1] = (u16)(readl(&ioc->chip->Doorbell)
5036 & MPI2_DOORBELL_DATA_MASK); 5301 & MPI2_DOORBELL_DATA_MASK);
5037 writel(0, &ioc->chip->HostInterruptStatus); 5302 writel(0, &ioc->chip->HostInterruptStatus);
5038 5303
@@ -5046,7 +5311,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5046 if (i >= reply_bytes/2) /* overflow case */ 5311 if (i >= reply_bytes/2) /* overflow case */
5047 readl(&ioc->chip->Doorbell); 5312 readl(&ioc->chip->Doorbell);
5048 else 5313 else
5049 reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell) 5314 reply[i] = (u16)(readl(&ioc->chip->Doorbell)
5050 & MPI2_DOORBELL_DATA_MASK); 5315 & MPI2_DOORBELL_DATA_MASK);
5051 writel(0, &ioc->chip->HostInterruptStatus); 5316 writel(0, &ioc->chip->HostInterruptStatus);
5052 } 5317 }
@@ -5481,8 +5746,6 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
5481 ktime_t current_time; 5746 ktime_t current_time;
5482 u16 ioc_status; 5747 u16 ioc_status;
5483 u32 reply_post_free_array_sz = 0; 5748 u32 reply_post_free_array_sz = 0;
5484 Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
5485 dma_addr_t reply_post_free_array_dma;
5486 5749
5487 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 5750 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5488 __func__)); 5751 __func__));
@@ -5516,23 +5779,14 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
5516 if (ioc->rdpq_array_enable) { 5779 if (ioc->rdpq_array_enable) {
5517 reply_post_free_array_sz = ioc->reply_queue_count * 5780 reply_post_free_array_sz = ioc->reply_queue_count *
5518 sizeof(Mpi2IOCInitRDPQArrayEntry); 5781 sizeof(Mpi2IOCInitRDPQArrayEntry);
5519 reply_post_free_array = pci_alloc_consistent(ioc->pdev, 5782 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
5520 reply_post_free_array_sz, &reply_post_free_array_dma);
5521 if (!reply_post_free_array) {
5522 pr_err(MPT3SAS_FMT
5523 "reply_post_free_array: pci_alloc_consistent failed\n",
5524 ioc->name);
5525 r = -ENOMEM;
5526 goto out;
5527 }
5528 memset(reply_post_free_array, 0, reply_post_free_array_sz);
5529 for (i = 0; i < ioc->reply_queue_count; i++) 5783 for (i = 0; i < ioc->reply_queue_count; i++)
5530 reply_post_free_array[i].RDPQBaseAddress = 5784 ioc->reply_post_free_array[i].RDPQBaseAddress =
5531 cpu_to_le64( 5785 cpu_to_le64(
5532 (u64)ioc->reply_post[i].reply_post_free_dma); 5786 (u64)ioc->reply_post[i].reply_post_free_dma);
5533 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE; 5787 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
5534 mpi_request.ReplyDescriptorPostQueueAddress = 5788 mpi_request.ReplyDescriptorPostQueueAddress =
5535 cpu_to_le64((u64)reply_post_free_array_dma); 5789 cpu_to_le64((u64)ioc->reply_post_free_array_dma);
5536 } else { 5790 } else {
5537 mpi_request.ReplyDescriptorPostQueueAddress = 5791 mpi_request.ReplyDescriptorPostQueueAddress =
5538 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma); 5792 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
@@ -5562,7 +5816,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
5562 if (r != 0) { 5816 if (r != 0) {
5563 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 5817 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
5564 ioc->name, __func__, r); 5818 ioc->name, __func__, r);
5565 goto out; 5819 return r;
5566 } 5820 }
5567 5821
5568 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 5822 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
@@ -5572,11 +5826,6 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
5572 r = -EIO; 5826 r = -EIO;
5573 } 5827 }
5574 5828
5575out:
5576 if (reply_post_free_array)
5577 pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
5578 reply_post_free_array,
5579 reply_post_free_array_dma);
5580 return r; 5829 return r;
5581} 5830}
5582 5831
@@ -6157,12 +6406,6 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
6157 &ioc->internal_free_list); 6406 &ioc->internal_free_list);
6158 } 6407 }
6159 6408
6160 /* chain pool */
6161 INIT_LIST_HEAD(&ioc->free_chain_list);
6162 for (i = 0; i < ioc->chain_depth; i++)
6163 list_add_tail(&ioc->chain_lookup[i].tracker_list,
6164 &ioc->free_chain_list);
6165
6166 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 6409 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
6167 6410
6168 /* initialize Reply Free Queue */ 6411 /* initialize Reply Free Queue */
@@ -6172,7 +6415,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
6172 ioc->reply_free[i] = cpu_to_le32(reply_address); 6415 ioc->reply_free[i] = cpu_to_le32(reply_address);
6173 if (ioc->is_mcpu_endpoint) 6416 if (ioc->is_mcpu_endpoint)
6174 _base_clone_reply_to_sys_mem(ioc, 6417 _base_clone_reply_to_sys_mem(ioc,
6175 (__le32)reply_address, i); 6418 reply_address, i);
6176 } 6419 }
6177 6420
6178 /* initialize reply queues */ 6421 /* initialize reply queues */
@@ -6230,12 +6473,18 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
6230 skip_init_reply_post_host_index: 6473 skip_init_reply_post_host_index:
6231 6474
6232 _base_unmask_interrupts(ioc); 6475 _base_unmask_interrupts(ioc);
6476
6477 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6478 r = _base_display_fwpkg_version(ioc);
6479 if (r)
6480 return r;
6481 }
6482
6483 _base_static_config_pages(ioc);
6233 r = _base_event_notification(ioc); 6484 r = _base_event_notification(ioc);
6234 if (r) 6485 if (r)
6235 return r; 6486 return r;
6236 6487
6237 _base_static_config_pages(ioc);
6238
6239 if (ioc->is_driver_loading) { 6488 if (ioc->is_driver_loading) {
6240 6489
6241 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier 6490 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
@@ -6492,6 +6741,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
6492 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); 6741 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
6493 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); 6742 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
6494 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION); 6743 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
6744 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
6495 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) { 6745 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
6496 if (ioc->is_gen35_ioc) { 6746 if (ioc->is_gen35_ioc) {
6497 _base_unmask_events(ioc, 6747 _base_unmask_events(ioc,
@@ -6558,6 +6808,7 @@ mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
6558 mpt3sas_base_stop_watchdog(ioc); 6808 mpt3sas_base_stop_watchdog(ioc);
6559 mpt3sas_base_free_resources(ioc); 6809 mpt3sas_base_free_resources(ioc);
6560 _base_release_memory_pools(ioc); 6810 _base_release_memory_pools(ioc);
6811 mpt3sas_free_enclosure_list(ioc);
6561 pci_set_drvdata(ioc->pdev, NULL); 6812 pci_set_drvdata(ioc->pdev, NULL);
6562 kfree(ioc->cpu_msix_table); 6813 kfree(ioc->cpu_msix_table);
6563 if (ioc->is_warpdrive) 6814 if (ioc->is_warpdrive)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index ae36d8fb2f2b..f02974c0be4a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -74,8 +74,8 @@
74#define MPT3SAS_DRIVER_NAME "mpt3sas" 74#define MPT3SAS_DRIVER_NAME "mpt3sas"
75#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" 75#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
76#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" 76#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
77#define MPT3SAS_DRIVER_VERSION "17.100.00.00" 77#define MPT3SAS_DRIVER_VERSION "25.100.00.00"
78#define MPT3SAS_MAJOR_VERSION 17 78#define MPT3SAS_MAJOR_VERSION 25
79#define MPT3SAS_MINOR_VERSION 100 79#define MPT3SAS_MINOR_VERSION 100
80#define MPT3SAS_BUILD_VERSION 0 80#define MPT3SAS_BUILD_VERSION 0
81#define MPT3SAS_RELEASE_VERSION 00 81#define MPT3SAS_RELEASE_VERSION 00
@@ -138,6 +138,7 @@
138#define MAX_CHAIN_ELEMT_SZ 16 138#define MAX_CHAIN_ELEMT_SZ 16
139#define DEFAULT_NUM_FWCHAIN_ELEMTS 8 139#define DEFAULT_NUM_FWCHAIN_ELEMTS 8
140 140
141#define FW_IMG_HDR_READ_TIMEOUT 15
141/* 142/*
142 * NVMe defines 143 * NVMe defines
143 */ 144 */
@@ -145,8 +146,12 @@
145#define NVME_CMD_PRP1_OFFSET 24 /* PRP1 offset in NVMe cmd */ 146#define NVME_CMD_PRP1_OFFSET 24 /* PRP1 offset in NVMe cmd */
146#define NVME_CMD_PRP2_OFFSET 32 /* PRP2 offset in NVMe cmd */ 147#define NVME_CMD_PRP2_OFFSET 32 /* PRP2 offset in NVMe cmd */
147#define NVME_ERROR_RESPONSE_SIZE 16 /* Max NVME Error Response */ 148#define NVME_ERROR_RESPONSE_SIZE 16 /* Max NVME Error Response */
149#define NVME_TASK_ABORT_MIN_TIMEOUT 6
150#define NVME_TASK_ABORT_MAX_TIMEOUT 60
151#define NVME_TASK_MNGT_CUSTOM_MASK (0x0010)
148#define NVME_PRP_PAGE_SIZE 4096 /* Page size */ 152#define NVME_PRP_PAGE_SIZE 4096 /* Page size */
149 153
154
150/* 155/*
151 * reset phases 156 * reset phases
152 */ 157 */
@@ -362,7 +367,15 @@ struct Mpi2ManufacturingPage11_t {
362 u8 EEDPTagMode; /* 09h */ 367 u8 EEDPTagMode; /* 09h */
363 u8 Reserved3; /* 0Ah */ 368 u8 Reserved3; /* 0Ah */
364 u8 Reserved4; /* 0Bh */ 369 u8 Reserved4; /* 0Bh */
365 __le32 Reserved5[23]; /* 0Ch-60h*/ 370 __le32 Reserved5[8]; /* 0Ch-2Ch */
371 u16 AddlFlags2; /* 2Ch */
372 u8 AddlFlags3; /* 2Eh */
373 u8 Reserved6; /* 2Fh */
374 __le32 Reserved7[7]; /* 30h - 4Bh */
375 u8 NVMeAbortTO; /* 4Ch */
376 u8 Reserved8; /* 4Dh */
377 u16 Reserved9; /* 4Eh */
378 __le32 Reserved10[4]; /* 50h - 60h */
366}; 379};
367 380
368/** 381/**
@@ -572,6 +585,7 @@ struct _pcie_device {
572 u8 enclosure_level; 585 u8 enclosure_level;
573 u8 connector_name[4]; 586 u8 connector_name[4];
574 u8 *serial_number; 587 u8 *serial_number;
588 u8 reset_timeout;
575 struct kref refcount; 589 struct kref refcount;
576}; 590};
577/** 591/**
@@ -741,6 +755,17 @@ struct _sas_node {
741 struct list_head sas_port_list; 755 struct list_head sas_port_list;
742}; 756};
743 757
758
759/**
760 * struct _enclosure_node - enclosure information
761 * @list: list of enclosures
762 * @pg0: enclosure pg0;
763 */
764struct _enclosure_node {
765 struct list_head list;
766 Mpi2SasEnclosurePage0_t pg0;
767};
768
744/** 769/**
745 * enum reset_type - reset state 770 * enum reset_type - reset state
746 * @FORCE_BIG_HAMMER: issue diagnostic reset 771 * @FORCE_BIG_HAMMER: issue diagnostic reset
@@ -770,7 +795,11 @@ struct pcie_sg_list {
770struct chain_tracker { 795struct chain_tracker {
771 void *chain_buffer; 796 void *chain_buffer;
772 dma_addr_t chain_buffer_dma; 797 dma_addr_t chain_buffer_dma;
773 struct list_head tracker_list; 798};
799
800struct chain_lookup {
801 struct chain_tracker *chains_per_smid;
802 atomic_t chain_offset;
774}; 803};
775 804
776/** 805/**
@@ -829,8 +858,8 @@ struct _sc_list {
829 */ 858 */
830struct _event_ack_list { 859struct _event_ack_list {
831 struct list_head list; 860 struct list_head list;
832 u16 Event; 861 U16 Event;
833 u32 EventContext; 862 U32 EventContext;
834}; 863};
835 864
836/** 865/**
@@ -1009,6 +1038,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
1009 * @iounit_pg8: static iounit page 8 1038 * @iounit_pg8: static iounit page 8
1010 * @sas_hba: sas host object 1039 * @sas_hba: sas host object
1011 * @sas_expander_list: expander object list 1040 * @sas_expander_list: expander object list
1041 * @enclosure_list: enclosure object list
1012 * @sas_node_lock: 1042 * @sas_node_lock:
1013 * @sas_device_list: sas device object list 1043 * @sas_device_list: sas device object list
1014 * @sas_device_init_list: sas device object list (used only at init time) 1044 * @sas_device_init_list: sas device object list (used only at init time)
@@ -1194,6 +1224,10 @@ struct MPT3SAS_ADAPTER {
1194 void *event_log; 1224 void *event_log;
1195 u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; 1225 u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
1196 1226
1227 u8 tm_custom_handling;
1228 u8 nvme_abort_timeout;
1229
1230
1197 /* static config pages */ 1231 /* static config pages */
1198 struct mpt3sas_facts facts; 1232 struct mpt3sas_facts facts;
1199 struct mpt3sas_port_facts *pfacts; 1233 struct mpt3sas_port_facts *pfacts;
@@ -1214,6 +1248,7 @@ struct MPT3SAS_ADAPTER {
1214 /* sas hba, expander, and device list */ 1248 /* sas hba, expander, and device list */
1215 struct _sas_node sas_hba; 1249 struct _sas_node sas_hba;
1216 struct list_head sas_expander_list; 1250 struct list_head sas_expander_list;
1251 struct list_head enclosure_list;
1217 spinlock_t sas_node_lock; 1252 spinlock_t sas_node_lock;
1218 struct list_head sas_device_list; 1253 struct list_head sas_device_list;
1219 struct list_head sas_device_init_list; 1254 struct list_head sas_device_init_list;
@@ -1261,7 +1296,7 @@ struct MPT3SAS_ADAPTER {
1261 u32 page_size; 1296 u32 page_size;
1262 1297
1263 /* chain */ 1298 /* chain */
1264 struct chain_tracker *chain_lookup; 1299 struct chain_lookup *chain_lookup;
1265 struct list_head free_chain_list; 1300 struct list_head free_chain_list;
1266 struct dma_pool *chain_dma_pool; 1301 struct dma_pool *chain_dma_pool;
1267 ulong chain_pages; 1302 ulong chain_pages;
@@ -1315,6 +1350,9 @@ struct MPT3SAS_ADAPTER {
1315 u8 rdpq_array_enable; 1350 u8 rdpq_array_enable;
1316 u8 rdpq_array_enable_assigned; 1351 u8 rdpq_array_enable_assigned;
1317 struct dma_pool *reply_post_free_dma_pool; 1352 struct dma_pool *reply_post_free_dma_pool;
1353 struct dma_pool *reply_post_free_array_dma_pool;
1354 Mpi2IOCInitRDPQArrayEntry *reply_post_free_array;
1355 dma_addr_t reply_post_free_array_dma;
1318 u8 reply_queue_count; 1356 u8 reply_queue_count;
1319 struct list_head reply_queue_list; 1357 struct list_head reply_queue_list;
1320 1358
@@ -1384,6 +1422,7 @@ int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc);
1384void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc); 1422void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc);
1385int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc); 1423int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc);
1386void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc); 1424void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc);
1425void mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc);
1387int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, 1426int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
1388 enum reset_type type); 1427 enum reset_type type);
1389 1428
@@ -1451,10 +1490,11 @@ u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
1451 u32 reply); 1490 u32 reply);
1452void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase); 1491void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
1453 1492
1454int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, 1493int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
1455 u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout); 1494 u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method);
1456int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, 1495int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
1457 u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout); 1496 u64 lun, u8 type, u16 smid_task, u16 msix_task,
1497 u8 timeout, u8 tr_method);
1458 1498
1459void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); 1499void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
1460void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); 1500void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index d3cb387ba9f4..3269ef43f07e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -297,7 +297,7 @@ mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
297 nvme_error_reply = 297 nvme_error_reply =
298 (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply; 298 (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply;
299 sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE, 299 sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE,
300 le32_to_cpu(nvme_error_reply->ErrorResponseCount)); 300 le16_to_cpu(nvme_error_reply->ErrorResponseCount));
301 sense_data = mpt3sas_base_get_sense_buffer(ioc, smid); 301 sense_data = mpt3sas_base_get_sense_buffer(ioc, smid);
302 memcpy(ioc->ctl_cmds.sense, sense_data, sz); 302 memcpy(ioc->ctl_cmds.sense, sense_data, sz);
303 } 303 }
@@ -644,9 +644,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
644 MPI2RequestHeader_t *mpi_request = NULL, *request; 644 MPI2RequestHeader_t *mpi_request = NULL, *request;
645 MPI2DefaultReply_t *mpi_reply; 645 MPI2DefaultReply_t *mpi_reply;
646 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL; 646 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
647 struct _pcie_device *pcie_device = NULL;
647 u32 ioc_state; 648 u32 ioc_state;
648 u16 smid; 649 u16 smid;
649 unsigned long timeout; 650 u8 timeout;
650 u8 issue_reset; 651 u8 issue_reset;
651 u32 sz, sz_arg; 652 u32 sz, sz_arg;
652 void *psge; 653 void *psge;
@@ -659,6 +660,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
659 long ret; 660 long ret;
660 u16 wait_state_count; 661 u16 wait_state_count;
661 u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE; 662 u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
663 u8 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
662 664
663 issue_reset = 0; 665 issue_reset = 0;
664 666
@@ -803,12 +805,13 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
803 * Build the PRPs and set direction bits. 805 * Build the PRPs and set direction bits.
804 * Send the request. 806 * Send the request.
805 */ 807 */
806 nvme_encap_request->ErrorResponseBaseAddress = ioc->sense_dma & 808 nvme_encap_request->ErrorResponseBaseAddress =
807 0xFFFFFFFF00000000; 809 cpu_to_le64(ioc->sense_dma & 0xFFFFFFFF00000000UL);
808 nvme_encap_request->ErrorResponseBaseAddress |= 810 nvme_encap_request->ErrorResponseBaseAddress |=
809 (U64)mpt3sas_base_get_sense_buffer_dma(ioc, smid); 811 cpu_to_le64(le32_to_cpu(
812 mpt3sas_base_get_sense_buffer_dma(ioc, smid)));
810 nvme_encap_request->ErrorResponseAllocationLength = 813 nvme_encap_request->ErrorResponseAllocationLength =
811 NVME_ERROR_RESPONSE_SIZE; 814 cpu_to_le16(NVME_ERROR_RESPONSE_SIZE);
812 memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE); 815 memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE);
813 ioc->build_nvme_prp(ioc, smid, nvme_encap_request, 816 ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
814 data_out_dma, data_out_sz, data_in_dma, data_in_sz); 817 data_out_dma, data_out_sz, data_in_dma, data_in_sz);
@@ -1073,14 +1076,26 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
1073 ioc->name, 1076 ioc->name,
1074 le16_to_cpu(mpi_request->FunctionDependent1)); 1077 le16_to_cpu(mpi_request->FunctionDependent1));
1075 mpt3sas_halt_firmware(ioc); 1078 mpt3sas_halt_firmware(ioc);
1076 mpt3sas_scsih_issue_locked_tm(ioc, 1079 pcie_device = mpt3sas_get_pdev_by_handle(ioc,
1077 le16_to_cpu(mpi_request->FunctionDependent1), 0, 1080 le16_to_cpu(mpi_request->FunctionDependent1));
1078 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30); 1081 if (pcie_device && (!ioc->tm_custom_handling))
1082 mpt3sas_scsih_issue_locked_tm(ioc,
1083 le16_to_cpu(mpi_request->FunctionDependent1),
1084 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
1085 0, pcie_device->reset_timeout,
1086 tr_method);
1087 else
1088 mpt3sas_scsih_issue_locked_tm(ioc,
1089 le16_to_cpu(mpi_request->FunctionDependent1),
1090 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
1091 0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
1079 } else 1092 } else
1080 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1093 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1081 } 1094 }
1082 1095
1083 out: 1096 out:
1097 if (pcie_device)
1098 pcie_device_put(pcie_device);
1084 1099
1085 /* free memory associated with sg buffers */ 1100 /* free memory associated with sg buffers */
1086 if (data_in) 1101 if (data_in)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index a44046cff0f3..18b46faef6f1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -184,7 +184,7 @@ struct mpt3_ioctl_iocinfo {
184 184
185 185
186/* number of event log entries */ 186/* number of event log entries */
187#define MPT3SAS_CTL_EVENT_LOG_SIZE (50) 187#define MPT3SAS_CTL_EVENT_LOG_SIZE (200)
188 188
189/** 189/**
190 * struct mpt3_ioctl_eventquery - query event count and type 190 * struct mpt3_ioctl_eventquery - query event count and type
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8cd3782fab49..b8d131a455d0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -157,8 +157,8 @@ MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
157 157
158 158
159/* raid transport support */ 159/* raid transport support */
160struct raid_template *mpt3sas_raid_template; 160static struct raid_template *mpt3sas_raid_template;
161struct raid_template *mpt2sas_raid_template; 161static struct raid_template *mpt2sas_raid_template;
162 162
163 163
164/** 164/**
@@ -1088,7 +1088,7 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1088 pcie_device->slot); 1088 pcie_device->slot);
1089 if (pcie_device->connector_name[0] != '\0') 1089 if (pcie_device->connector_name[0] != '\0')
1090 pr_info(MPT3SAS_FMT 1090 pr_info(MPT3SAS_FMT
1091 "removing enclosure level(0x%04x), connector name( %s)\n", 1091 "removing enclosure level(0x%04x), connector name( %s)\n",
1092 ioc->name, pcie_device->enclosure_level, 1092 ioc->name, pcie_device->enclosure_level,
1093 pcie_device->connector_name); 1093 pcie_device->connector_name);
1094 1094
@@ -1362,6 +1362,30 @@ mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1362} 1362}
1363 1363
1364/** 1364/**
1365 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1366 * @ioc: per adapter object
1367 * @handle: enclosure handle (assigned by firmware)
1368 * Context: Calling function should acquire ioc->sas_device_lock
1369 *
1370 * This searches for enclosure device based on handle, then returns the
1371 * enclosure object.
1372 */
1373static struct _enclosure_node *
1374mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1375{
1376 struct _enclosure_node *enclosure_dev, *r;
1377
1378 r = NULL;
1379 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1380 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1381 continue;
1382 r = enclosure_dev;
1383 goto out;
1384 }
1385out:
1386 return r;
1387}
1388/**
1365 * mpt3sas_scsih_expander_find_by_sas_address - expander device search 1389 * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1366 * @ioc: per adapter object 1390 * @ioc: per adapter object
1367 * @sas_address: sas address 1391 * @sas_address: sas address
@@ -2608,6 +2632,7 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2608 * @smid_task: smid assigned to the task 2632 * @smid_task: smid assigned to the task
2609 * @msix_task: MSIX table index supplied by the OS 2633 * @msix_task: MSIX table index supplied by the OS
2610 * @timeout: timeout in seconds 2634 * @timeout: timeout in seconds
2635 * @tr_method: Target Reset Method
2611 * Context: user 2636 * Context: user
2612 * 2637 *
2613 * A generic API for sending task management requests to firmware. 2638 * A generic API for sending task management requests to firmware.
@@ -2618,8 +2643,8 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2618 * Return SUCCESS or FAILED. 2643 * Return SUCCESS or FAILED.
2619 */ 2644 */
2620int 2645int
2621mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, 2646mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
2622 u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout) 2647 u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method)
2623{ 2648{
2624 Mpi2SCSITaskManagementRequest_t *mpi_request; 2649 Mpi2SCSITaskManagementRequest_t *mpi_request;
2625 Mpi2SCSITaskManagementReply_t *mpi_reply; 2650 Mpi2SCSITaskManagementReply_t *mpi_reply;
@@ -2665,8 +2690,8 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2665 } 2690 }
2666 2691
2667 dtmprintk(ioc, pr_info(MPT3SAS_FMT 2692 dtmprintk(ioc, pr_info(MPT3SAS_FMT
2668 "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n", 2693 "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
2669 ioc->name, handle, type, smid_task)); 2694 ioc->name, handle, type, smid_task, timeout, tr_method));
2670 ioc->tm_cmds.status = MPT3_CMD_PENDING; 2695 ioc->tm_cmds.status = MPT3_CMD_PENDING;
2671 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 2696 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2672 ioc->tm_cmds.smid = smid; 2697 ioc->tm_cmds.smid = smid;
@@ -2675,6 +2700,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2675 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 2700 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2676 mpi_request->DevHandle = cpu_to_le16(handle); 2701 mpi_request->DevHandle = cpu_to_le16(handle);
2677 mpi_request->TaskType = type; 2702 mpi_request->TaskType = type;
2703 mpi_request->MsgFlags = tr_method;
2678 mpi_request->TaskMID = cpu_to_le16(smid_task); 2704 mpi_request->TaskMID = cpu_to_le16(smid_task);
2679 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); 2705 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
2680 mpt3sas_scsih_set_tm_flag(ioc, handle); 2706 mpt3sas_scsih_set_tm_flag(ioc, handle);
@@ -2721,13 +2747,14 @@ out:
2721} 2747}
2722 2748
2723int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, 2749int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2724 u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout) 2750 u64 lun, u8 type, u16 smid_task, u16 msix_task,
2751 u8 timeout, u8 tr_method)
2725{ 2752{
2726 int ret; 2753 int ret;
2727 2754
2728 mutex_lock(&ioc->tm_cmds.mutex); 2755 mutex_lock(&ioc->tm_cmds.mutex);
2729 ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task, 2756 ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
2730 msix_task, timeout); 2757 msix_task, timeout, tr_method);
2731 mutex_unlock(&ioc->tm_cmds.mutex); 2758 mutex_unlock(&ioc->tm_cmds.mutex);
2732 2759
2733 return ret; 2760 return ret;
@@ -2830,6 +2857,8 @@ scsih_abort(struct scsi_cmnd *scmd)
2830 u16 handle; 2857 u16 handle;
2831 int r; 2858 int r;
2832 2859
2860 u8 timeout = 30;
2861 struct _pcie_device *pcie_device = NULL;
2833 sdev_printk(KERN_INFO, scmd->device, 2862 sdev_printk(KERN_INFO, scmd->device,
2834 "attempting task abort! scmd(%p)\n", scmd); 2863 "attempting task abort! scmd(%p)\n", scmd);
2835 _scsih_tm_display_info(ioc, scmd); 2864 _scsih_tm_display_info(ioc, scmd);
@@ -2864,15 +2893,20 @@ scsih_abort(struct scsi_cmnd *scmd)
2864 mpt3sas_halt_firmware(ioc); 2893 mpt3sas_halt_firmware(ioc);
2865 2894
2866 handle = sas_device_priv_data->sas_target->handle; 2895 handle = sas_device_priv_data->sas_target->handle;
2896 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2897 if (pcie_device && (!ioc->tm_custom_handling))
2898 timeout = ioc->nvme_abort_timeout;
2867 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun, 2899 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2868 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 2900 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
2869 st->smid, st->msix_io, 30); 2901 st->smid, st->msix_io, timeout, 0);
2870 /* Command must be cleared after abort */ 2902 /* Command must be cleared after abort */
2871 if (r == SUCCESS && st->cb_idx != 0xFF) 2903 if (r == SUCCESS && st->cb_idx != 0xFF)
2872 r = FAILED; 2904 r = FAILED;
2873 out: 2905 out:
2874 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", 2906 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
2875 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 2907 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2908 if (pcie_device)
2909 pcie_device_put(pcie_device);
2876 return r; 2910 return r;
2877} 2911}
2878 2912
@@ -2888,7 +2922,10 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
2888 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2922 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2889 struct MPT3SAS_DEVICE *sas_device_priv_data; 2923 struct MPT3SAS_DEVICE *sas_device_priv_data;
2890 struct _sas_device *sas_device = NULL; 2924 struct _sas_device *sas_device = NULL;
2925 struct _pcie_device *pcie_device = NULL;
2891 u16 handle; 2926 u16 handle;
2927 u8 tr_method = 0;
2928 u8 tr_timeout = 30;
2892 int r; 2929 int r;
2893 2930
2894 struct scsi_target *starget = scmd->device->sdev_target; 2931 struct scsi_target *starget = scmd->device->sdev_target;
@@ -2926,8 +2963,16 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
2926 goto out; 2963 goto out;
2927 } 2964 }
2928 2965
2966 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2967
2968 if (pcie_device && (!ioc->tm_custom_handling)) {
2969 tr_timeout = pcie_device->reset_timeout;
2970 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
2971 } else
2972 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2929 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun, 2973 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2930 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, 30); 2974 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
2975 tr_timeout, tr_method);
2931 /* Check for busy commands after reset */ 2976 /* Check for busy commands after reset */
2932 if (r == SUCCESS && atomic_read(&scmd->device->device_busy)) 2977 if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
2933 r = FAILED; 2978 r = FAILED;
@@ -2937,6 +2982,8 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
2937 2982
2938 if (sas_device) 2983 if (sas_device)
2939 sas_device_put(sas_device); 2984 sas_device_put(sas_device);
2985 if (pcie_device)
2986 pcie_device_put(pcie_device);
2940 2987
2941 return r; 2988 return r;
2942} 2989}
@@ -2953,7 +3000,10 @@ scsih_target_reset(struct scsi_cmnd *scmd)
2953 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3000 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2954 struct MPT3SAS_DEVICE *sas_device_priv_data; 3001 struct MPT3SAS_DEVICE *sas_device_priv_data;
2955 struct _sas_device *sas_device = NULL; 3002 struct _sas_device *sas_device = NULL;
3003 struct _pcie_device *pcie_device = NULL;
2956 u16 handle; 3004 u16 handle;
3005 u8 tr_method = 0;
3006 u8 tr_timeout = 30;
2957 int r; 3007 int r;
2958 struct scsi_target *starget = scmd->device->sdev_target; 3008 struct scsi_target *starget = scmd->device->sdev_target;
2959 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; 3009 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
@@ -2990,8 +3040,16 @@ scsih_target_reset(struct scsi_cmnd *scmd)
2990 goto out; 3040 goto out;
2991 } 3041 }
2992 3042
3043 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3044
3045 if (pcie_device && (!ioc->tm_custom_handling)) {
3046 tr_timeout = pcie_device->reset_timeout;
3047 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3048 } else
3049 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2993 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0, 3050 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
2994 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30); 3051 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3052 tr_timeout, tr_method);
2995 /* Check for busy commands after reset */ 3053 /* Check for busy commands after reset */
2996 if (r == SUCCESS && atomic_read(&starget->target_busy)) 3054 if (r == SUCCESS && atomic_read(&starget->target_busy))
2997 r = FAILED; 3055 r = FAILED;
@@ -3001,7 +3059,8 @@ scsih_target_reset(struct scsi_cmnd *scmd)
3001 3059
3002 if (sas_device) 3060 if (sas_device)
3003 sas_device_put(sas_device); 3061 sas_device_put(sas_device);
3004 3062 if (pcie_device)
3063 pcie_device_put(pcie_device);
3005 return r; 3064 return r;
3006} 3065}
3007 3066
@@ -3535,6 +3594,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3535 unsigned long flags; 3594 unsigned long flags;
3536 struct _tr_list *delayed_tr; 3595 struct _tr_list *delayed_tr;
3537 u32 ioc_state; 3596 u32 ioc_state;
3597 u8 tr_method = 0;
3538 3598
3539 if (ioc->pci_error_recovery) { 3599 if (ioc->pci_error_recovery) {
3540 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3600 dewtprintk(ioc, pr_info(MPT3SAS_FMT
@@ -3577,6 +3637,11 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3577 sas_address = pcie_device->wwid; 3637 sas_address = pcie_device->wwid;
3578 } 3638 }
3579 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 3639 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3640 if (pcie_device && (!ioc->tm_custom_handling))
3641 tr_method =
3642 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3643 else
3644 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3580 } 3645 }
3581 if (sas_target_priv_data) { 3646 if (sas_target_priv_data) {
3582 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3647 dewtprintk(ioc, pr_info(MPT3SAS_FMT
@@ -3640,6 +3705,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3640 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3705 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3641 mpi_request->DevHandle = cpu_to_le16(handle); 3706 mpi_request->DevHandle = cpu_to_le16(handle);
3642 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 3707 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3708 mpi_request->MsgFlags = tr_method;
3643 set_bit(handle, ioc->device_remove_in_progress); 3709 set_bit(handle, ioc->device_remove_in_progress);
3644 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); 3710 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
3645 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); 3711 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
@@ -3680,11 +3746,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3680 u32 ioc_state; 3746 u32 ioc_state;
3681 struct _sc_list *delayed_sc; 3747 struct _sc_list *delayed_sc;
3682 3748
3683 if (ioc->remove_host) { 3749 if (ioc->pci_error_recovery) {
3684 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3685 "%s: host has been removed\n", __func__, ioc->name));
3686 return 1;
3687 } else if (ioc->pci_error_recovery) {
3688 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3750 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3689 "%s: host in pci error recovery\n", __func__, 3751 "%s: host in pci error recovery\n", __func__,
3690 ioc->name)); 3752 ioc->name));
@@ -3725,7 +3787,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3725 if (!delayed_sc) 3787 if (!delayed_sc)
3726 return _scsih_check_for_pending_tm(ioc, smid); 3788 return _scsih_check_for_pending_tm(ioc, smid);
3727 INIT_LIST_HEAD(&delayed_sc->list); 3789 INIT_LIST_HEAD(&delayed_sc->list);
3728 delayed_sc->handle = mpi_request_tm->DevHandle; 3790 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
3729 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list); 3791 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
3730 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3792 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3731 "DELAYED:sc:handle(0x%04x), (open)\n", 3793 "DELAYED:sc:handle(0x%04x), (open)\n",
@@ -3806,8 +3868,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3806 u16 smid; 3868 u16 smid;
3807 struct _tr_list *delayed_tr; 3869 struct _tr_list *delayed_tr;
3808 3870
3809 if (ioc->shost_recovery || ioc->remove_host || 3871 if (ioc->pci_error_recovery) {
3810 ioc->pci_error_recovery) {
3811 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3872 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3812 "%s: host reset in progress!\n", 3873 "%s: host reset in progress!\n",
3813 __func__, ioc->name)); 3874 __func__, ioc->name));
@@ -3860,8 +3921,7 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3860 Mpi2SCSITaskManagementReply_t *mpi_reply = 3921 Mpi2SCSITaskManagementReply_t *mpi_reply =
3861 mpt3sas_base_get_reply_virt_addr(ioc, reply); 3922 mpt3sas_base_get_reply_virt_addr(ioc, reply);
3862 3923
3863 if (ioc->shost_recovery || ioc->remove_host || 3924 if (ioc->shost_recovery || ioc->pci_error_recovery) {
3864 ioc->pci_error_recovery) {
3865 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3925 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3866 "%s: host reset in progress!\n", 3926 "%s: host reset in progress!\n",
3867 __func__, ioc->name)); 3927 __func__, ioc->name));
@@ -3903,8 +3963,8 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3903 * Context - processed in interrupt context. 3963 * Context - processed in interrupt context.
3904 */ 3964 */
3905static void 3965static void
3906_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event, 3966_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
3907 u32 event_context) 3967 U32 event_context)
3908{ 3968{
3909 Mpi2EventAckRequest_t *ack_request; 3969 Mpi2EventAckRequest_t *ack_request;
3910 int i = smid - ioc->internal_smid; 3970 int i = smid - ioc->internal_smid;
@@ -3979,13 +4039,13 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
3979 4039
3980 dewtprintk(ioc, pr_info(MPT3SAS_FMT 4040 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3981 "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4041 "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3982 ioc->name, le16_to_cpu(handle), smid, 4042 ioc->name, handle, smid,
3983 ioc->tm_sas_control_cb_idx)); 4043 ioc->tm_sas_control_cb_idx));
3984 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4044 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3985 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 4045 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
3986 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 4046 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
3987 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 4047 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
3988 mpi_request->DevHandle = handle; 4048 mpi_request->DevHandle = cpu_to_le16(handle);
3989 mpt3sas_base_put_smid_default(ioc, smid); 4049 mpt3sas_base_put_smid_default(ioc, smid);
3990} 4050}
3991 4051
@@ -5618,10 +5678,10 @@ static int
5618_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5678_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5619{ 5679{
5620 struct _sas_node *sas_expander; 5680 struct _sas_node *sas_expander;
5681 struct _enclosure_node *enclosure_dev;
5621 Mpi2ConfigReply_t mpi_reply; 5682 Mpi2ConfigReply_t mpi_reply;
5622 Mpi2ExpanderPage0_t expander_pg0; 5683 Mpi2ExpanderPage0_t expander_pg0;
5623 Mpi2ExpanderPage1_t expander_pg1; 5684 Mpi2ExpanderPage1_t expander_pg1;
5624 Mpi2SasEnclosurePage0_t enclosure_pg0;
5625 u32 ioc_status; 5685 u32 ioc_status;
5626 u16 parent_handle; 5686 u16 parent_handle;
5627 u64 sas_address, sas_address_parent = 0; 5687 u64 sas_address, sas_address_parent = 0;
@@ -5743,11 +5803,12 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5743 } 5803 }
5744 5804
5745 if (sas_expander->enclosure_handle) { 5805 if (sas_expander->enclosure_handle) {
5746 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 5806 enclosure_dev =
5747 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 5807 mpt3sas_scsih_enclosure_find_by_handle(ioc,
5748 sas_expander->enclosure_handle))) 5808 sas_expander->enclosure_handle);
5809 if (enclosure_dev)
5749 sas_expander->enclosure_logical_id = 5810 sas_expander->enclosure_logical_id =
5750 le64_to_cpu(enclosure_pg0.EnclosureLogicalID); 5811 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
5751 } 5812 }
5752 5813
5753 _scsih_expander_node_add(ioc, sas_expander); 5814 _scsih_expander_node_add(ioc, sas_expander);
@@ -5891,52 +5952,6 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
5891} 5952}
5892 5953
5893/** 5954/**
5894 * _scsih_get_enclosure_logicalid_chassis_slot - get device's
5895 * EnclosureLogicalID and ChassisSlot information.
5896 * @ioc: per adapter object
5897 * @sas_device_pg0: SAS device page0
5898 * @sas_device: per sas device object
5899 *
5900 * Returns nothing.
5901 */
5902static void
5903_scsih_get_enclosure_logicalid_chassis_slot(struct MPT3SAS_ADAPTER *ioc,
5904 Mpi2SasDevicePage0_t *sas_device_pg0, struct _sas_device *sas_device)
5905{
5906 Mpi2ConfigReply_t mpi_reply;
5907 Mpi2SasEnclosurePage0_t enclosure_pg0;
5908
5909 if (!sas_device_pg0 || !sas_device)
5910 return;
5911
5912 sas_device->enclosure_handle =
5913 le16_to_cpu(sas_device_pg0->EnclosureHandle);
5914 sas_device->is_chassis_slot_valid = 0;
5915
5916 if (!le16_to_cpu(sas_device_pg0->EnclosureHandle))
5917 return;
5918
5919 if (mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
5920 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
5921 le16_to_cpu(sas_device_pg0->EnclosureHandle))) {
5922 pr_err(MPT3SAS_FMT
5923 "Enclosure Pg0 read failed for handle(0x%04x)\n",
5924 ioc->name, le16_to_cpu(sas_device_pg0->EnclosureHandle));
5925 return;
5926 }
5927
5928 sas_device->enclosure_logical_id =
5929 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
5930
5931 if (le16_to_cpu(enclosure_pg0.Flags) &
5932 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
5933 sas_device->is_chassis_slot_valid = 1;
5934 sas_device->chassis_slot = enclosure_pg0.ChassisSlot;
5935 }
5936}
5937
5938
5939/**
5940 * _scsih_check_device - checking device responsiveness 5955 * _scsih_check_device - checking device responsiveness
5941 * @ioc: per adapter object 5956 * @ioc: per adapter object
5942 * @parent_sas_address: sas address of parent expander or sas host 5957 * @parent_sas_address: sas address of parent expander or sas host
@@ -5953,6 +5968,7 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
5953 Mpi2ConfigReply_t mpi_reply; 5968 Mpi2ConfigReply_t mpi_reply;
5954 Mpi2SasDevicePage0_t sas_device_pg0; 5969 Mpi2SasDevicePage0_t sas_device_pg0;
5955 struct _sas_device *sas_device; 5970 struct _sas_device *sas_device;
5971 struct _enclosure_node *enclosure_dev = NULL;
5956 u32 ioc_status; 5972 u32 ioc_status;
5957 unsigned long flags; 5973 unsigned long flags;
5958 u64 sas_address; 5974 u64 sas_address;
@@ -6007,8 +6023,21 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
6007 sas_device->connector_name[0] = '\0'; 6023 sas_device->connector_name[0] = '\0';
6008 } 6024 }
6009 6025
6010 _scsih_get_enclosure_logicalid_chassis_slot(ioc, 6026 sas_device->enclosure_handle =
6011 &sas_device_pg0, sas_device); 6027 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6028 sas_device->is_chassis_slot_valid = 0;
6029 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
6030 sas_device->enclosure_handle);
6031 if (enclosure_dev) {
6032 sas_device->enclosure_logical_id =
6033 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6034 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6035 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6036 sas_device->is_chassis_slot_valid = 1;
6037 sas_device->chassis_slot =
6038 enclosure_dev->pg0.ChassisSlot;
6039 }
6040 }
6012 } 6041 }
6013 6042
6014 /* check if device is present */ 6043 /* check if device is present */
@@ -6055,12 +6084,11 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6055{ 6084{
6056 Mpi2ConfigReply_t mpi_reply; 6085 Mpi2ConfigReply_t mpi_reply;
6057 Mpi2SasDevicePage0_t sas_device_pg0; 6086 Mpi2SasDevicePage0_t sas_device_pg0;
6058 Mpi2SasEnclosurePage0_t enclosure_pg0;
6059 struct _sas_device *sas_device; 6087 struct _sas_device *sas_device;
6088 struct _enclosure_node *enclosure_dev = NULL;
6060 u32 ioc_status; 6089 u32 ioc_status;
6061 u64 sas_address; 6090 u64 sas_address;
6062 u32 device_info; 6091 u32 device_info;
6063 int encl_pg0_rc = -1;
6064 6092
6065 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 6093 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6066 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 6094 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
@@ -6106,12 +6134,12 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6106 } 6134 }
6107 6135
6108 if (sas_device_pg0.EnclosureHandle) { 6136 if (sas_device_pg0.EnclosureHandle) {
6109 encl_pg0_rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 6137 enclosure_dev =
6110 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 6138 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6111 sas_device_pg0.EnclosureHandle); 6139 le16_to_cpu(sas_device_pg0.EnclosureHandle));
6112 if (encl_pg0_rc) 6140 if (enclosure_dev == NULL)
6113 pr_info(MPT3SAS_FMT 6141 pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
6114 "Enclosure Pg0 read failed for handle(0x%04x)\n", 6142 "doesn't match with enclosure device!\n",
6115 ioc->name, sas_device_pg0.EnclosureHandle); 6143 ioc->name, sas_device_pg0.EnclosureHandle);
6116 } 6144 }
6117 6145
@@ -6152,18 +6180,16 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6152 sas_device->enclosure_level = 0; 6180 sas_device->enclosure_level = 0;
6153 sas_device->connector_name[0] = '\0'; 6181 sas_device->connector_name[0] = '\0';
6154 } 6182 }
6155 6183 /* get enclosure_logical_id & chassis_slot*/
6156 /* get enclosure_logical_id & chassis_slot */
6157 sas_device->is_chassis_slot_valid = 0; 6184 sas_device->is_chassis_slot_valid = 0;
6158 if (encl_pg0_rc == 0) { 6185 if (enclosure_dev) {
6159 sas_device->enclosure_logical_id = 6186 sas_device->enclosure_logical_id =
6160 le64_to_cpu(enclosure_pg0.EnclosureLogicalID); 6187 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6161 6188 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6162 if (le16_to_cpu(enclosure_pg0.Flags) &
6163 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 6189 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6164 sas_device->is_chassis_slot_valid = 1; 6190 sas_device->is_chassis_slot_valid = 1;
6165 sas_device->chassis_slot = 6191 sas_device->chassis_slot =
6166 enclosure_pg0.ChassisSlot; 6192 enclosure_dev->pg0.ChassisSlot;
6167 } 6193 }
6168 } 6194 }
6169 6195
@@ -6845,8 +6871,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6845 Mpi26PCIeDevicePage0_t pcie_device_pg0; 6871 Mpi26PCIeDevicePage0_t pcie_device_pg0;
6846 Mpi26PCIeDevicePage2_t pcie_device_pg2; 6872 Mpi26PCIeDevicePage2_t pcie_device_pg2;
6847 Mpi2ConfigReply_t mpi_reply; 6873 Mpi2ConfigReply_t mpi_reply;
6848 Mpi2SasEnclosurePage0_t enclosure_pg0;
6849 struct _pcie_device *pcie_device; 6874 struct _pcie_device *pcie_device;
6875 struct _enclosure_node *enclosure_dev;
6850 u32 pcie_device_type; 6876 u32 pcie_device_type;
6851 u32 ioc_status; 6877 u32 ioc_status;
6852 u64 wwid; 6878 u64 wwid;
@@ -6917,7 +6943,7 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6917 if (pcie_device->enclosure_handle != 0) 6943 if (pcie_device->enclosure_handle != 0)
6918 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot); 6944 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
6919 6945
6920 if (le16_to_cpu(pcie_device_pg0.Flags) & 6946 if (le32_to_cpu(pcie_device_pg0.Flags) &
6921 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { 6947 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6922 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel; 6948 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
6923 memcpy(&pcie_device->connector_name[0], 6949 memcpy(&pcie_device->connector_name[0],
@@ -6928,13 +6954,14 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6928 } 6954 }
6929 6955
6930 /* get enclosure_logical_id */ 6956 /* get enclosure_logical_id */
6931 if (pcie_device->enclosure_handle && 6957 if (pcie_device->enclosure_handle) {
6932 !(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 6958 enclosure_dev =
6933 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 6959 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6934 pcie_device->enclosure_handle))) 6960 pcie_device->enclosure_handle);
6935 pcie_device->enclosure_logical_id = 6961 if (enclosure_dev)
6936 le64_to_cpu(enclosure_pg0.EnclosureLogicalID); 6962 pcie_device->enclosure_logical_id =
6937 6963 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6964 }
6938 /* TODO -- Add device name once FW supports it */ 6965 /* TODO -- Add device name once FW supports it */
6939 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply, 6966 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
6940 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) { 6967 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
@@ -6953,6 +6980,11 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6953 } 6980 }
6954 pcie_device->nvme_mdts = 6981 pcie_device->nvme_mdts =
6955 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize); 6982 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
6983 if (pcie_device_pg2.ControllerResetTO)
6984 pcie_device->reset_timeout =
6985 pcie_device_pg2.ControllerResetTO;
6986 else
6987 pcie_device->reset_timeout = 30;
6956 6988
6957 if (ioc->wait_for_discovery_to_complete) 6989 if (ioc->wait_for_discovery_to_complete)
6958 _scsih_pcie_device_init_add(ioc, pcie_device); 6990 _scsih_pcie_device_init_add(ioc, pcie_device);
@@ -7205,6 +7237,9 @@ _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7205 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION: 7237 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
7206 reason_str = "internal async notification"; 7238 reason_str = "internal async notification";
7207 break; 7239 break;
7240 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
7241 reason_str = "pcie hot reset failed";
7242 break;
7208 default: 7243 default:
7209 reason_str = "unknown reason"; 7244 reason_str = "unknown reason";
7210 break; 7245 break;
@@ -7320,10 +7355,60 @@ static void
7320_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, 7355_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7321 struct fw_event_work *fw_event) 7356 struct fw_event_work *fw_event)
7322{ 7357{
7358 Mpi2ConfigReply_t mpi_reply;
7359 struct _enclosure_node *enclosure_dev = NULL;
7360 Mpi2EventDataSasEnclDevStatusChange_t *event_data =
7361 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
7362 int rc;
7363 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
7364
7323 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 7365 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7324 _scsih_sas_enclosure_dev_status_change_event_debug(ioc, 7366 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
7325 (Mpi2EventDataSasEnclDevStatusChange_t *) 7367 (Mpi2EventDataSasEnclDevStatusChange_t *)
7326 fw_event->event_data); 7368 fw_event->event_data);
7369 if (ioc->shost_recovery)
7370 return;
7371
7372 if (enclosure_handle)
7373 enclosure_dev =
7374 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7375 enclosure_handle);
7376 switch (event_data->ReasonCode) {
7377 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7378 if (!enclosure_dev) {
7379 enclosure_dev =
7380 kzalloc(sizeof(struct _enclosure_node),
7381 GFP_KERNEL);
7382 if (!enclosure_dev) {
7383 pr_info(MPT3SAS_FMT
7384 "failure at %s:%d/%s()!\n", ioc->name,
7385 __FILE__, __LINE__, __func__);
7386 return;
7387 }
7388 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
7389 &enclosure_dev->pg0,
7390 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
7391 enclosure_handle);
7392
7393 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
7394 MPI2_IOCSTATUS_MASK)) {
7395 kfree(enclosure_dev);
7396 return;
7397 }
7398
7399 list_add_tail(&enclosure_dev->list,
7400 &ioc->enclosure_list);
7401 }
7402 break;
7403 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7404 if (enclosure_dev) {
7405 list_del(&enclosure_dev->list);
7406 kfree(enclosure_dev);
7407 }
7408 break;
7409 default:
7410 break;
7411 }
7327} 7412}
7328 7413
7329/** 7414/**
@@ -7409,7 +7494,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7409 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 7494 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7410 r = mpt3sas_scsih_issue_tm(ioc, handle, lun, 7495 r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
7411 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid, 7496 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
7412 st->msix_io, 30); 7497 st->msix_io, 30, 0);
7413 if (r == FAILED) { 7498 if (r == FAILED) {
7414 sdev_printk(KERN_WARNING, sdev, 7499 sdev_printk(KERN_WARNING, sdev,
7415 "mpt3sas_scsih_issue_tm: FAILED when sending " 7500 "mpt3sas_scsih_issue_tm: FAILED when sending "
@@ -7450,7 +7535,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7450 7535
7451 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun, 7536 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
7452 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid, 7537 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
7453 st->msix_io, 30); 7538 st->msix_io, 30, 0);
7454 if (r == FAILED || st->cb_idx != 0xFF) { 7539 if (r == FAILED || st->cb_idx != 0xFF) {
7455 sdev_printk(KERN_WARNING, sdev, 7540 sdev_printk(KERN_WARNING, sdev,
7456 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " 7541 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
@@ -7527,6 +7612,44 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
7527} 7612}
7528 7613
7529/** 7614/**
7615 * _scsih_sas_device_discovery_error_event - display SAS device discovery error
7616 * events
7617 * @ioc: per adapter object
7618 * @fw_event: The fw_event_work object
7619 * Context: user.
7620 *
7621 * Return nothing.
7622 */
7623static void
7624_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
7625 struct fw_event_work *fw_event)
7626{
7627 Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
7628 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
7629
7630 switch (event_data->ReasonCode) {
7631 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
7632 pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
7633 "(handle:0x%04x, sas_address:0x%016llx,"
7634 "physical_port:0x%02x) has failed",
7635 ioc->name, le16_to_cpu(event_data->DevHandle),
7636 (unsigned long long)le64_to_cpu(event_data->SASAddress),
7637 event_data->PhysicalPort);
7638 break;
7639 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
7640 pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
7641 "(handle:0x%04x, sas_address:0x%016llx,"
7642 "physical_port:0x%02x) has timed out",
7643 ioc->name, le16_to_cpu(event_data->DevHandle),
7644 (unsigned long long)le64_to_cpu(event_data->SASAddress),
7645 event_data->PhysicalPort);
7646 break;
7647 default:
7648 break;
7649 }
7650}
7651
7652/**
7530 * _scsih_pcie_enumeration_event - handle enumeration events 7653 * _scsih_pcie_enumeration_event - handle enumeration events
7531 * @ioc: per adapter object 7654 * @ioc: per adapter object
7532 * @fw_event: The fw_event_work object 7655 * @fw_event: The fw_event_work object
@@ -8360,12 +8483,23 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
8360 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 8483 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8361 struct scsi_target *starget; 8484 struct scsi_target *starget;
8362 struct _sas_device *sas_device = NULL; 8485 struct _sas_device *sas_device = NULL;
8486 struct _enclosure_node *enclosure_dev = NULL;
8363 unsigned long flags; 8487 unsigned long flags;
8364 8488
8489 if (sas_device_pg0->EnclosureHandle) {
8490 enclosure_dev =
8491 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8492 le16_to_cpu(sas_device_pg0->EnclosureHandle));
8493 if (enclosure_dev == NULL)
8494 pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
8495 "doesn't match with enclosure device!\n",
8496 ioc->name, sas_device_pg0->EnclosureHandle);
8497 }
8365 spin_lock_irqsave(&ioc->sas_device_lock, flags); 8498 spin_lock_irqsave(&ioc->sas_device_lock, flags);
8366 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 8499 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
8367 if ((sas_device->sas_address == sas_device_pg0->SASAddress) && 8500 if ((sas_device->sas_address == le64_to_cpu(
8368 (sas_device->slot == sas_device_pg0->Slot)) { 8501 sas_device_pg0->SASAddress)) && (sas_device->slot ==
8502 le16_to_cpu(sas_device_pg0->Slot))) {
8369 sas_device->responding = 1; 8503 sas_device->responding = 1;
8370 starget = sas_device->starget; 8504 starget = sas_device->starget;
8371 if (starget && starget->hostdata) { 8505 if (starget && starget->hostdata) {
@@ -8377,7 +8511,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
8377 if (starget) { 8511 if (starget) {
8378 starget_printk(KERN_INFO, starget, 8512 starget_printk(KERN_INFO, starget,
8379 "handle(0x%04x), sas_addr(0x%016llx)\n", 8513 "handle(0x%04x), sas_addr(0x%016llx)\n",
8380 sas_device_pg0->DevHandle, 8514 le16_to_cpu(sas_device_pg0->DevHandle),
8381 (unsigned long long) 8515 (unsigned long long)
8382 sas_device->sas_address); 8516 sas_device->sas_address);
8383 8517
@@ -8389,7 +8523,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
8389 sas_device->enclosure_logical_id, 8523 sas_device->enclosure_logical_id,
8390 sas_device->slot); 8524 sas_device->slot);
8391 } 8525 }
8392 if (sas_device_pg0->Flags & 8526 if (le16_to_cpu(sas_device_pg0->Flags) &
8393 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 8527 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
8394 sas_device->enclosure_level = 8528 sas_device->enclosure_level =
8395 sas_device_pg0->EnclosureLevel; 8529 sas_device_pg0->EnclosureLevel;
@@ -8400,17 +8534,30 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
8400 sas_device->connector_name[0] = '\0'; 8534 sas_device->connector_name[0] = '\0';
8401 } 8535 }
8402 8536
8403 _scsih_get_enclosure_logicalid_chassis_slot(ioc, 8537 sas_device->enclosure_handle =
8404 sas_device_pg0, sas_device); 8538 le16_to_cpu(sas_device_pg0->EnclosureHandle);
8539 sas_device->is_chassis_slot_valid = 0;
8540 if (enclosure_dev) {
8541 sas_device->enclosure_logical_id = le64_to_cpu(
8542 enclosure_dev->pg0.EnclosureLogicalID);
8543 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
8544 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
8545 sas_device->is_chassis_slot_valid = 1;
8546 sas_device->chassis_slot =
8547 enclosure_dev->pg0.ChassisSlot;
8548 }
8549 }
8405 8550
8406 if (sas_device->handle == sas_device_pg0->DevHandle) 8551 if (sas_device->handle == le16_to_cpu(
8552 sas_device_pg0->DevHandle))
8407 goto out; 8553 goto out;
8408 pr_info("\thandle changed from(0x%04x)!!!\n", 8554 pr_info("\thandle changed from(0x%04x)!!!\n",
8409 sas_device->handle); 8555 sas_device->handle);
8410 sas_device->handle = sas_device_pg0->DevHandle; 8556 sas_device->handle = le16_to_cpu(
8557 sas_device_pg0->DevHandle);
8411 if (sas_target_priv_data) 8558 if (sas_target_priv_data)
8412 sas_target_priv_data->handle = 8559 sas_target_priv_data->handle =
8413 sas_device_pg0->DevHandle; 8560 le16_to_cpu(sas_device_pg0->DevHandle);
8414 goto out; 8561 goto out;
8415 } 8562 }
8416 } 8563 }
@@ -8419,6 +8566,52 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
8419} 8566}
8420 8567
8421/** 8568/**
8569 * _scsih_create_enclosure_list_after_reset - Free Existing list,
8570 * And create enclosure list by scanning all Enclosure Page(0)s
8571 * @ioc: per adapter object
8572 *
8573 * Return nothing.
8574 */
8575static void
8576_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
8577{
8578 struct _enclosure_node *enclosure_dev;
8579 Mpi2ConfigReply_t mpi_reply;
8580 u16 enclosure_handle;
8581 int rc;
8582
8583 /* Free existing enclosure list */
8584 mpt3sas_free_enclosure_list(ioc);
8585
8586 /* Re constructing enclosure list after reset*/
8587 enclosure_handle = 0xFFFF;
8588 do {
8589 enclosure_dev =
8590 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
8591 if (!enclosure_dev) {
8592 pr_err(MPT3SAS_FMT
8593 "failure at %s:%d/%s()!\n", ioc->name,
8594 __FILE__, __LINE__, __func__);
8595 return;
8596 }
8597 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8598 &enclosure_dev->pg0,
8599 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
8600 enclosure_handle);
8601
8602 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8603 MPI2_IOCSTATUS_MASK)) {
8604 kfree(enclosure_dev);
8605 return;
8606 }
8607 list_add_tail(&enclosure_dev->list,
8608 &ioc->enclosure_list);
8609 enclosure_handle =
8610 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
8611 } while (1);
8612}
8613
8614/**
8422 * _scsih_search_responding_sas_devices - 8615 * _scsih_search_responding_sas_devices -
8423 * @ioc: per adapter object 8616 * @ioc: per adapter object
8424 * 8617 *
@@ -8449,15 +8642,10 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
8449 MPI2_IOCSTATUS_MASK; 8642 MPI2_IOCSTATUS_MASK;
8450 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 8643 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8451 break; 8644 break;
8452 handle = sas_device_pg0.DevHandle = 8645 handle = le16_to_cpu(sas_device_pg0.DevHandle);
8453 le16_to_cpu(sas_device_pg0.DevHandle);
8454 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 8646 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
8455 if (!(_scsih_is_end_device(device_info))) 8647 if (!(_scsih_is_end_device(device_info)))
8456 continue; 8648 continue;
8457 sas_device_pg0.SASAddress =
8458 le64_to_cpu(sas_device_pg0.SASAddress);
8459 sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot);
8460 sas_device_pg0.Flags = le16_to_cpu(sas_device_pg0.Flags);
8461 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0); 8649 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
8462 } 8650 }
8463 8651
@@ -8487,8 +8675,9 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
8487 8675
8488 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 8676 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8489 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { 8677 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
8490 if ((pcie_device->wwid == pcie_device_pg0->WWID) && 8678 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
8491 (pcie_device->slot == pcie_device_pg0->Slot)) { 8679 && (pcie_device->slot == le16_to_cpu(
8680 pcie_device_pg0->Slot))) {
8492 pcie_device->responding = 1; 8681 pcie_device->responding = 1;
8493 starget = pcie_device->starget; 8682 starget = pcie_device->starget;
8494 if (starget && starget->hostdata) { 8683 if (starget && starget->hostdata) {
@@ -8523,14 +8712,16 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
8523 pcie_device->connector_name[0] = '\0'; 8712 pcie_device->connector_name[0] = '\0';
8524 } 8713 }
8525 8714
8526 if (pcie_device->handle == pcie_device_pg0->DevHandle) 8715 if (pcie_device->handle == le16_to_cpu(
8716 pcie_device_pg0->DevHandle))
8527 goto out; 8717 goto out;
8528 pr_info("\thandle changed from(0x%04x)!!!\n", 8718 pr_info("\thandle changed from(0x%04x)!!!\n",
8529 pcie_device->handle); 8719 pcie_device->handle);
8530 pcie_device->handle = pcie_device_pg0->DevHandle; 8720 pcie_device->handle = le16_to_cpu(
8721 pcie_device_pg0->DevHandle);
8531 if (sas_target_priv_data) 8722 if (sas_target_priv_data)
8532 sas_target_priv_data->handle = 8723 sas_target_priv_data->handle =
8533 pcie_device_pg0->DevHandle; 8724 le16_to_cpu(pcie_device_pg0->DevHandle);
8534 goto out; 8725 goto out;
8535 } 8726 }
8536 } 8727 }
@@ -8579,10 +8770,6 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8579 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 8770 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8580 if (!(_scsih_is_nvme_device(device_info))) 8771 if (!(_scsih_is_nvme_device(device_info)))
8581 continue; 8772 continue;
8582 pcie_device_pg0.WWID = le64_to_cpu(pcie_device_pg0.WWID),
8583 pcie_device_pg0.Slot = le16_to_cpu(pcie_device_pg0.Slot);
8584 pcie_device_pg0.Flags = le32_to_cpu(pcie_device_pg0.Flags);
8585 pcie_device_pg0.DevHandle = handle;
8586 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0); 8773 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
8587 } 8774 }
8588out: 8775out:
@@ -8736,22 +8923,16 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
8736{ 8923{
8737 struct _sas_node *sas_expander = NULL; 8924 struct _sas_node *sas_expander = NULL;
8738 unsigned long flags; 8925 unsigned long flags;
8739 int i, encl_pg0_rc = -1; 8926 int i;
8740 Mpi2ConfigReply_t mpi_reply; 8927 struct _enclosure_node *enclosure_dev = NULL;
8741 Mpi2SasEnclosurePage0_t enclosure_pg0;
8742 u16 handle = le16_to_cpu(expander_pg0->DevHandle); 8928 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
8929 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
8743 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress); 8930 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
8744 8931
8745 if (le16_to_cpu(expander_pg0->EnclosureHandle)) { 8932 if (enclosure_handle)
8746 encl_pg0_rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 8933 enclosure_dev =
8747 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 8934 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8748 le16_to_cpu(expander_pg0->EnclosureHandle)); 8935 enclosure_handle);
8749 if (encl_pg0_rc)
8750 pr_info(MPT3SAS_FMT
8751 "Enclosure Pg0 read failed for handle(0x%04x)\n",
8752 ioc->name,
8753 le16_to_cpu(expander_pg0->EnclosureHandle));
8754 }
8755 8936
8756 spin_lock_irqsave(&ioc->sas_node_lock, flags); 8937 spin_lock_irqsave(&ioc->sas_node_lock, flags);
8757 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 8938 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
@@ -8759,12 +8940,12 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
8759 continue; 8940 continue;
8760 sas_expander->responding = 1; 8941 sas_expander->responding = 1;
8761 8942
8762 if (!encl_pg0_rc) 8943 if (enclosure_dev) {
8763 sas_expander->enclosure_logical_id = 8944 sas_expander->enclosure_logical_id =
8764 le64_to_cpu(enclosure_pg0.EnclosureLogicalID); 8945 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8765 8946 sas_expander->enclosure_handle =
8766 sas_expander->enclosure_handle = 8947 le16_to_cpu(expander_pg0->EnclosureHandle);
8767 le16_to_cpu(expander_pg0->EnclosureHandle); 8948 }
8768 8949
8769 if (sas_expander->handle == handle) 8950 if (sas_expander->handle == handle)
8770 goto out; 8951 goto out;
@@ -9286,6 +9467,7 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
9286 if ((!ioc->is_driver_loading) && !(disable_discovery > 0 && 9467 if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
9287 !ioc->sas_hba.num_phys)) { 9468 !ioc->sas_hba.num_phys)) {
9288 _scsih_prep_device_scan(ioc); 9469 _scsih_prep_device_scan(ioc);
9470 _scsih_create_enclosure_list_after_reset(ioc);
9289 _scsih_search_responding_sas_devices(ioc); 9471 _scsih_search_responding_sas_devices(ioc);
9290 _scsih_search_responding_pcie_devices(ioc); 9472 _scsih_search_responding_pcie_devices(ioc);
9291 _scsih_search_responding_raid_devices(ioc); 9473 _scsih_search_responding_raid_devices(ioc);
@@ -9356,6 +9538,9 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
9356 case MPI2_EVENT_SAS_DISCOVERY: 9538 case MPI2_EVENT_SAS_DISCOVERY:
9357 _scsih_sas_discovery_event(ioc, fw_event); 9539 _scsih_sas_discovery_event(ioc, fw_event);
9358 break; 9540 break;
9541 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9542 _scsih_sas_device_discovery_error_event(ioc, fw_event);
9543 break;
9359 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 9544 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9360 _scsih_sas_broadcast_primitive_event(ioc, fw_event); 9545 _scsih_sas_broadcast_primitive_event(ioc, fw_event);
9361 break; 9546 break;
@@ -9433,8 +9618,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9433 u16 sz; 9618 u16 sz;
9434 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData; 9619 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
9435 9620
9436 /* events turned off due to host reset or driver unloading */ 9621 /* events turned off due to host reset */
9437 if (ioc->remove_host || ioc->pci_error_recovery) 9622 if (ioc->pci_error_recovery)
9438 return 1; 9623 return 1;
9439 9624
9440 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 9625 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
@@ -9540,6 +9725,7 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9540 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 9725 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9541 case MPI2_EVENT_IR_OPERATION_STATUS: 9726 case MPI2_EVENT_IR_OPERATION_STATUS:
9542 case MPI2_EVENT_SAS_DISCOVERY: 9727 case MPI2_EVENT_SAS_DISCOVERY:
9728 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9543 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 9729 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9544 case MPI2_EVENT_IR_PHYSICAL_DISK: 9730 case MPI2_EVENT_IR_PHYSICAL_DISK:
9545 case MPI2_EVENT_PCIE_ENUMERATION: 9731 case MPI2_EVENT_PCIE_ENUMERATION:
@@ -10513,6 +10699,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10513 INIT_LIST_HEAD(&ioc->sas_device_list); 10699 INIT_LIST_HEAD(&ioc->sas_device_list);
10514 INIT_LIST_HEAD(&ioc->sas_device_init_list); 10700 INIT_LIST_HEAD(&ioc->sas_device_init_list);
10515 INIT_LIST_HEAD(&ioc->sas_expander_list); 10701 INIT_LIST_HEAD(&ioc->sas_expander_list);
10702 INIT_LIST_HEAD(&ioc->enclosure_list);
10516 INIT_LIST_HEAD(&ioc->pcie_device_list); 10703 INIT_LIST_HEAD(&ioc->pcie_device_list);
10517 INIT_LIST_HEAD(&ioc->pcie_device_init_list); 10704 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
10518 INIT_LIST_HEAD(&ioc->fw_event_list); 10705 INIT_LIST_HEAD(&ioc->fw_event_list);
@@ -11100,10 +11287,10 @@ _mpt3sas_exit(void)
11100 pr_info("mpt3sas version %s unloading\n", 11287 pr_info("mpt3sas version %s unloading\n",
11101 MPT3SAS_DRIVER_VERSION); 11288 MPT3SAS_DRIVER_VERSION);
11102 11289
11103 pci_unregister_driver(&mpt3sas_driver);
11104
11105 mpt3sas_ctl_exit(hbas_to_enumerate); 11290 mpt3sas_ctl_exit(hbas_to_enumerate);
11106 11291
11292 pci_unregister_driver(&mpt3sas_driver);
11293
11107 scsih_exit(); 11294 scsih_exit();
11108} 11295}
11109 11296
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index 6bfcee4757e0..45aa94915cbf 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -177,7 +177,8 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
177 if (mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 177 if (mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
178 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, 178 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
179 vol_pg0->PhysDisk[count].PhysDiskNum) || 179 vol_pg0->PhysDisk[count].PhysDiskNum) ||
180 pd_pg0.DevHandle == MPT3SAS_INVALID_DEVICE_HANDLE) { 180 le16_to_cpu(pd_pg0.DevHandle) ==
181 MPT3SAS_INVALID_DEVICE_HANDLE) {
181 pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is " 182 pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is "
182 "disabled for the drive with handle(0x%04x) member" 183 "disabled for the drive with handle(0x%04x) member"
183 "handle retrieval failed for member number=%d\n", 184 "handle retrieval failed for member number=%d\n",
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index afd27165cd93..b3cd9a6b1d30 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2693,22 +2693,4 @@ static struct pci_driver mvumi_pci_driver = {
2693#endif 2693#endif
2694}; 2694};
2695 2695
2696/** 2696module_pci_driver(mvumi_pci_driver);
2697 * mvumi_init - Driver load entry point
2698 */
2699static int __init mvumi_init(void)
2700{
2701 return pci_register_driver(&mvumi_pci_driver);
2702}
2703
2704/**
2705 * mvumi_exit - Driver unload entry point
2706 */
2707static void __exit mvumi_exit(void)
2708{
2709
2710 pci_unregister_driver(&mvumi_pci_driver);
2711}
2712
2713module_init(mvumi_init);
2714module_exit(mvumi_exit);
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 5a33e1ad9881..67b14576fff2 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1840,14 +1840,14 @@ int osd_req_decode_sense_full(struct osd_request *or,
1840 case osd_sense_response_integrity_check: 1840 case osd_sense_response_integrity_check:
1841 { 1841 {
1842 struct osd_sense_response_integrity_check_descriptor 1842 struct osd_sense_response_integrity_check_descriptor
1843 *osricd = cur_descriptor; 1843 *d = cur_descriptor;
1844 const unsigned len = 1844 /* 2nibbles+space+ASCII */
1845 sizeof(osricd->integrity_check_value); 1845 char dump[sizeof(d->integrity_check_value) * 4 + 2];
1846 char key_dump[len*4 + 2]; /* 2nibbles+space+ASCII */ 1846
1847 1847 hex_dump_to_buffer(d->integrity_check_value,
1848 hex_dump_to_buffer(osricd->integrity_check_value, len, 1848 sizeof(d->integrity_check_value),
1849 32, 1, key_dump, sizeof(key_dump), true); 1849 32, 1, dump, sizeof(dump), true);
1850 OSD_SENSE_PRINT2("response_integrity [%s]\n", key_dump); 1850 OSD_SENSE_PRINT2("response_integrity [%s]\n", dump);
1851 } 1851 }
1852 case osd_sense_attribute_identification: 1852 case osd_sense_attribute_identification:
1853 { 1853 {
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index db88a8e7ee0e..4dd6cad330e8 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3607,7 +3607,7 @@ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3607 break; 3607 break;
3608 default: 3608 default:
3609 PM8001_MSG_DBG(pm8001_ha, 3609 PM8001_MSG_DBG(pm8001_ha,
3610 pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_UNSORPORTED\n")); 3610 pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_SUPPORTED\n"));
3611 break; 3611 break;
3612 } 3612 }
3613 complete(pm8001_dev->dcompletion); 3613 complete(pm8001_dev->dcompletion);
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
index a980ef756a67..5bd10b534c99 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -1,5 +1,5 @@
1/* QLogic FCoE Offload Driver 1/* QLogic FCoE Offload Driver
2 * Copyright (c) 2016-2017 Cavium Inc. 2 * Copyright (c) 2016-2018 Cavium Inc.
3 * 3 *
4 * This software is available under the terms of the GNU General Public License 4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
index b5c236efd465..42fde55ac735 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -1,5 +1,5 @@
1/* QLogic FCoE Offload Driver 1/* QLogic FCoE Offload Driver
2 * Copyright (c) 2016-2017 Cavium Inc. 2 * Copyright (c) 2016-2018 Cavium Inc.
3 * 3 *
4 * This software is available under the terms of the GNU General Public License 4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.c b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
index 5d5095e3d96d..29a55257224f 100644
--- a/drivers/scsi/qedf/drv_scsi_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
@@ -1,5 +1,5 @@
1/* QLogic FCoE Offload Driver 1/* QLogic FCoE Offload Driver
2 * Copyright (c) 2016-2017 Cavium Inc. 2 * Copyright (c) 2016-2018 Cavium Inc.
3 * 3 *
4 * This software is available under the terms of the GNU General Public License 4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.h b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
index 8fbe6e4d0b4f..bf102204fe56 100644
--- a/drivers/scsi/qedf/drv_scsi_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
@@ -1,5 +1,5 @@
1/* QLogic FCoE Offload Driver 1/* QLogic FCoE Offload Driver
2 * Copyright (c) 2016-2017 Cavium Inc. 2 * Copyright (c) 2016-2018 Cavium Inc.
3 * 3 *
4 * This software is available under the terms of the GNU General Public License 4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index cabb6af60fb8..2c78d8fb9122 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic FCoE Offload Driver 2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2017 Cavium Inc. 3 * Copyright (c) 2016-2018 Cavium Inc.
4 * 4 *
5 * This software is available under the terms of the GNU General Public License 5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * (GPL) Version 2, available from the file COPYING in the main directory of
@@ -180,6 +180,7 @@ struct qedf_rport {
180 spinlock_t rport_lock; 180 spinlock_t rport_lock;
181#define QEDF_RPORT_SESSION_READY 1 181#define QEDF_RPORT_SESSION_READY 1
182#define QEDF_RPORT_UPLOADING_CONNECTION 2 182#define QEDF_RPORT_UPLOADING_CONNECTION 2
183#define QEDF_RPORT_IN_RESET 3
183 unsigned long flags; 184 unsigned long flags;
184 unsigned long retry_delay_timestamp; 185 unsigned long retry_delay_timestamp;
185 struct fc_rport *rport; 186 struct fc_rport *rport;
@@ -300,6 +301,7 @@ struct qedf_ctx {
300#define QEDF_FALLBACK_VLAN 1002 301#define QEDF_FALLBACK_VLAN 1002
301#define QEDF_DEFAULT_PRIO 3 302#define QEDF_DEFAULT_PRIO 3
302 int vlan_id; 303 int vlan_id;
304 u8 prio;
303 struct qed_dev *cdev; 305 struct qed_dev *cdev;
304 struct qed_dev_fcoe_info dev_info; 306 struct qed_dev_fcoe_info dev_info;
305 struct qed_int_info int_info; 307 struct qed_int_info int_info;
@@ -365,6 +367,7 @@ struct qedf_ctx {
365#define QEDF_IO_WORK_MIN 64 367#define QEDF_IO_WORK_MIN 64
366 mempool_t *io_mempool; 368 mempool_t *io_mempool;
367 struct workqueue_struct *dpc_wq; 369 struct workqueue_struct *dpc_wq;
370 struct delayed_work grcdump_work;
368 371
369 u32 slow_sge_ios; 372 u32 slow_sge_ios;
370 u32 fast_sge_ios; 373 u32 fast_sge_ios;
@@ -504,6 +507,7 @@ extern int qedf_send_flogi(struct qedf_ctx *qedf);
504extern void qedf_get_protocol_tlv_data(void *dev, void *data); 507extern void qedf_get_protocol_tlv_data(void *dev, void *data);
505extern void qedf_fp_io_handler(struct work_struct *work); 508extern void qedf_fp_io_handler(struct work_struct *work);
506extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data); 509extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
510extern void qedf_wq_grcdump(struct work_struct *work);
507 511
508#define FCOE_WORD_TO_BYTE 4 512#define FCOE_WORD_TO_BYTE 4
509#define QEDF_MAX_TASK_NUM 0xFFFF 513#define QEDF_MAX_TASK_NUM 0xFFFF
diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c
index fa6727685627..0487b7237104 100644
--- a/drivers/scsi/qedf/qedf_attr.c
+++ b/drivers/scsi/qedf/qedf_attr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic FCoE Offload Driver 2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2017 Cavium Inc. 3 * Copyright (c) 2016-2018 Cavium Inc.
4 * 4 *
5 * This software is available under the terms of the GNU General Public License 5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf_dbg.c b/drivers/scsi/qedf/qedf_dbg.c
index bd1cef25a900..f2397ee9ba69 100644
--- a/drivers/scsi/qedf/qedf_dbg.c
+++ b/drivers/scsi/qedf/qedf_dbg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic FCoE Offload Driver 2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016 Cavium Inc. 3 * Copyright (c) 2016-2018 Cavium Inc.
4 * 4 *
5 * This software is available under the terms of the GNU General Public License 5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * (GPL) Version 2, available from the file COPYING in the main directory of
@@ -147,7 +147,7 @@ qedf_get_grc_dump(struct qed_dev *cdev, const struct qed_common_ops *common,
147 if (!*buf) 147 if (!*buf)
148 return -EINVAL; 148 return -EINVAL;
149 149
150 return common->dbg_grc(cdev, *buf, grcsize); 150 return common->dbg_all_data(cdev, *buf);
151} 151}
152 152
153void 153void
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index 77c27e888969..dd0109653aa3 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic FCoE Offload Driver 2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2017 Cavium Inc. 3 * Copyright (c) 2016-2018 Cavium Inc.
4 * 4 *
5 * This software is available under the terms of the GNU General Public License 5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index 5789ce185923..c29c162a494f 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic FCoE Offload Driver 2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2017 QLogic Corporation 3 * Copyright (c) 2016-2018 QLogic Corporation
4 * 4 *
5 * This software is available under the terms of the GNU General Public License 5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index aa22b11436ba..04f0c4d2e256 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic FCoE Offload Driver 2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2017 Cavium Inc. 3 * Copyright (c) 2016-2018 Cavium Inc.
4 * 4 *
5 * This software is available under the terms of the GNU General Public License 5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * (GPL) Version 2, available from the file COPYING in the main directory of
@@ -14,8 +14,8 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
14 void (*cb_func)(struct qedf_els_cb_arg *cb_arg), 14 void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
15 struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec) 15 struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
16{ 16{
17 struct qedf_ctx *qedf = fcport->qedf; 17 struct qedf_ctx *qedf;
18 struct fc_lport *lport = qedf->lport; 18 struct fc_lport *lport;
19 struct qedf_ioreq *els_req; 19 struct qedf_ioreq *els_req;
20 struct qedf_mp_req *mp_req; 20 struct qedf_mp_req *mp_req;
21 struct fc_frame_header *fc_hdr; 21 struct fc_frame_header *fc_hdr;
@@ -29,6 +29,15 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
29 unsigned long flags; 29 unsigned long flags;
30 u16 sqe_idx; 30 u16 sqe_idx;
31 31
32 if (!fcport) {
33 QEDF_ERR(NULL, "fcport is NULL");
34 rc = -EINVAL;
35 goto els_err;
36 }
37
38 qedf = fcport->qedf;
39 lport = qedf->lport;
40
32 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n"); 41 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
33 42
34 rc = fc_remote_port_chkready(fcport->rport); 43 rc = fc_remote_port_chkready(fcport->rport);
@@ -201,6 +210,14 @@ static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
201 kref_put(&orig_io_req->refcount, qedf_release_cmd); 210 kref_put(&orig_io_req->refcount, qedf_release_cmd);
202 211
203out_free: 212out_free:
213 /*
214 * Release a reference to the rrq request if we timed out as the
215 * rrq completion handler is called directly from the timeout handler
216 * and not from els_compl where the reference would have normally been
217 * released.
218 */
219 if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO)
220 kref_put(&rrq_req->refcount, qedf_release_cmd);
204 kfree(cb_arg); 221 kfree(cb_arg);
205} 222}
206 223
@@ -322,6 +339,17 @@ void qedf_restart_rport(struct qedf_rport *fcport)
322 if (!fcport) 339 if (!fcport)
323 return; 340 return;
324 341
342 if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
343 !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
344 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
345 QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
346 fcport);
347 return;
348 }
349
350 /* Set that we are now in reset */
351 set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
352
325 rdata = fcport->rdata; 353 rdata = fcport->rdata;
326 if (rdata) { 354 if (rdata) {
327 lport = fcport->qedf->lport; 355 lport = fcport->qedf->lport;
@@ -334,6 +362,7 @@ void qedf_restart_rport(struct qedf_rport *fcport)
334 if (rdata) 362 if (rdata)
335 fc_rport_login(rdata); 363 fc_rport_login(rdata);
336 } 364 }
365 clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
337} 366}
338 367
339static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg) 368static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index 16d1a21cdff9..3fd3af799b3d 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic FCoE Offload Driver 2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2017 Cavium Inc. 3 * Copyright (c) 2016-2018 Cavium Inc.
4 * 4 *
5 * This software is available under the terms of the GNU General Public License 5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * (GPL) Version 2, available from the file COPYING in the main directory of
@@ -137,7 +137,7 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
137 137
138 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame send: " 138 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame send: "
139 "dest=%pM op=%x sub=%x vlan=%04x.", eth_hdr->h_dest, op, sub, 139 "dest=%pM op=%x sub=%x vlan=%04x.", eth_hdr->h_dest, op, sub,
140 ntohs(vlan_tci)); 140 vlan_tci);
141 if (qedf_dump_frames) 141 if (qedf_dump_frames)
142 print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1, 142 print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
143 skb->data, skb->len, false); 143 skb->data, skb->len, false);
@@ -184,6 +184,7 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
184 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 184 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
185 "Dropping CVL since FCF has not been selected " 185 "Dropping CVL since FCF has not been selected "
186 "yet."); 186 "yet.");
187 kfree_skb(skb);
187 return; 188 return;
188 } 189 }
189 190
diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h
index 503c1ae3ccd0..f6f634e48d69 100644
--- a/drivers/scsi/qedf/qedf_hsi.h
+++ b/drivers/scsi/qedf/qedf_hsi.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic FCoE Offload Driver 2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2017 Cavium Inc. 3 * Copyright (c) 2016-2018 Cavium Inc.
4 * 4 *
5 * This software is available under the terms of the GNU General Public License 5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 3fe579d0f1a8..6bbc38b1b465 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic FCoE Offload Driver 2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2017 Cavium Inc. 3 * Copyright (c) 2016-2018 Cavium Inc.
4 * 4 *
5 * This software is available under the terms of the GNU General Public License 5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * (GPL) Version 2, available from the file COPYING in the main directory of
@@ -23,12 +23,31 @@ static void qedf_cmd_timeout(struct work_struct *work)
23 23
24 struct qedf_ioreq *io_req = 24 struct qedf_ioreq *io_req =
25 container_of(work, struct qedf_ioreq, timeout_work.work); 25 container_of(work, struct qedf_ioreq, timeout_work.work);
26 struct qedf_ctx *qedf = io_req->fcport->qedf; 26 struct qedf_ctx *qedf;
27 struct qedf_rport *fcport = io_req->fcport; 27 struct qedf_rport *fcport;
28 u8 op = 0; 28 u8 op = 0;
29 29
30 if (io_req == NULL) {
31 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
32 return;
33 }
34
35 fcport = io_req->fcport;
36 if (io_req->fcport == NULL) {
37 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
38 return;
39 }
40
41 qedf = fcport->qedf;
42
30 switch (io_req->cmd_type) { 43 switch (io_req->cmd_type) {
31 case QEDF_ABTS: 44 case QEDF_ABTS:
45 if (qedf == NULL) {
46 QEDF_INFO(NULL, QEDF_LOG_IO, "qedf is NULL for xid=0x%x.\n",
47 io_req->xid);
48 return;
49 }
50
32 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n", 51 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
33 io_req->xid); 52 io_req->xid);
34 /* Cleanup timed out ABTS */ 53 /* Cleanup timed out ABTS */
@@ -931,6 +950,15 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
931 return 0; 950 return 0;
932 } 951 }
933 952
953 if (!qedf->pdev->msix_enabled) {
954 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
955 "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
956 sc_cmd);
957 sc_cmd->result = DID_NO_CONNECT << 16;
958 sc_cmd->scsi_done(sc_cmd);
959 return 0;
960 }
961
934 rval = fc_remote_port_chkready(rport); 962 rval = fc_remote_port_chkready(rport);
935 if (rval) { 963 if (rval) {
936 sc_cmd->result = rval; 964 sc_cmd->result = rval;
@@ -1420,6 +1448,12 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1420 if (!fcport) 1448 if (!fcport)
1421 return; 1449 return;
1422 1450
1451 /* Check that fcport is still offloaded */
1452 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1453 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1454 return;
1455 }
1456
1423 qedf = fcport->qedf; 1457 qedf = fcport->qedf;
1424 cmd_mgr = qedf->cmd_mgr; 1458 cmd_mgr = qedf->cmd_mgr;
1425 1459
@@ -1436,8 +1470,8 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1436 rc = kref_get_unless_zero(&io_req->refcount); 1470 rc = kref_get_unless_zero(&io_req->refcount);
1437 if (!rc) { 1471 if (!rc) {
1438 QEDF_ERR(&(qedf->dbg_ctx), 1472 QEDF_ERR(&(qedf->dbg_ctx),
1439 "Could not get kref for io_req=0x%p.\n", 1473 "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1440 io_req); 1474 io_req, io_req->xid);
1441 continue; 1475 continue;
1442 } 1476 }
1443 qedf_flush_els_req(qedf, io_req); 1477 qedf_flush_els_req(qedf, io_req);
@@ -1448,6 +1482,31 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1448 goto free_cmd; 1482 goto free_cmd;
1449 } 1483 }
1450 1484
1485 if (io_req->cmd_type == QEDF_ABTS) {
1486 rc = kref_get_unless_zero(&io_req->refcount);
1487 if (!rc) {
1488 QEDF_ERR(&(qedf->dbg_ctx),
1489 "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1490 io_req, io_req->xid);
1491 continue;
1492 }
1493 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1494 "Flushing abort xid=0x%x.\n", io_req->xid);
1495
1496 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1497
1498 if (io_req->sc_cmd) {
1499 if (io_req->return_scsi_cmd_on_abts)
1500 qedf_scsi_done(qedf, io_req, DID_ERROR);
1501 }
1502
1503 /* Notify eh_abort handler that ABTS is complete */
1504 complete(&io_req->abts_done);
1505 kref_put(&io_req->refcount, qedf_release_cmd);
1506
1507 goto free_cmd;
1508 }
1509
1451 if (!io_req->sc_cmd) 1510 if (!io_req->sc_cmd)
1452 continue; 1511 continue;
1453 if (lun > 0) { 1512 if (lun > 0) {
@@ -1463,7 +1522,7 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1463 rc = kref_get_unless_zero(&io_req->refcount); 1522 rc = kref_get_unless_zero(&io_req->refcount);
1464 if (!rc) { 1523 if (!rc) {
1465 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for " 1524 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1466 "io_req=0x%p\n", io_req); 1525 "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1467 continue; 1526 continue;
1468 } 1527 }
1469 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1528 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
@@ -1525,6 +1584,21 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1525 goto abts_err; 1584 goto abts_err;
1526 } 1585 }
1527 1586
1587 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1588 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1589 rc = 1;
1590 goto out;
1591 }
1592
1593 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1594 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1595 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1596 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
1597 "cleanup or abort processing or already "
1598 "completed.\n", io_req->xid);
1599 rc = 1;
1600 goto out;
1601 }
1528 1602
1529 kref_get(&io_req->refcount); 1603 kref_get(&io_req->refcount);
1530 1604
@@ -1564,6 +1638,7 @@ abts_err:
1564 * task at the firmware. 1638 * task at the firmware.
1565 */ 1639 */
1566 qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts); 1640 qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts);
1641out:
1567 return rc; 1642 return rc;
1568} 1643}
1569 1644
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index d3f73d8d7738..90394cef0f41 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic FCoE Offload Driver 2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2017 Cavium Inc. 3 * Copyright (c) 2016-2018 Cavium Inc.
4 * 4 *
5 * This software is available under the terms of the GNU General Public License 5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * (GPL) Version 2, available from the file COPYING in the main directory of
@@ -44,20 +44,20 @@ module_param_named(debug, qedf_debug, uint, S_IRUGO);
44MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging" 44MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
45 " mask"); 45 " mask");
46 46
47static uint qedf_fipvlan_retries = 30; 47static uint qedf_fipvlan_retries = 60;
48module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO); 48module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO);
49MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt " 49MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt "
50 "before giving up (default 30)"); 50 "before giving up (default 60)");
51 51
52static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN; 52static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN;
53module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO); 53module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO);
54MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails " 54MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails "
55 "(default 1002)."); 55 "(default 1002).");
56 56
57static uint qedf_default_prio = QEDF_DEFAULT_PRIO; 57static int qedf_default_prio = -1;
58module_param_named(default_prio, qedf_default_prio, int, S_IRUGO); 58module_param_named(default_prio, qedf_default_prio, int, S_IRUGO);
59MODULE_PARM_DESC(default_prio, " Default 802.1q priority for FIP and FCoE" 59MODULE_PARM_DESC(default_prio, " Override 802.1q priority for FIP and FCoE"
60 " traffic (default 3)."); 60 " traffic (value between 0 and 7, default 3).");
61 61
62uint qedf_dump_frames; 62uint qedf_dump_frames;
63module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR); 63module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR);
@@ -89,6 +89,11 @@ module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR);
89MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry " 89MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry "
90 "delay handling (default off)."); 90 "delay handling (default off).");
91 91
92static bool qedf_dcbx_no_wait;
93module_param_named(dcbx_no_wait, qedf_dcbx_no_wait, bool, S_IRUGO | S_IWUSR);
94MODULE_PARM_DESC(dcbx_no_wait, " Do not wait for DCBX convergence to start "
95 "sending FIP VLAN requests on link up (Default: off).");
96
92static uint qedf_dp_module; 97static uint qedf_dp_module;
93module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO); 98module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO);
94MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed " 99MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed "
@@ -109,9 +114,9 @@ static struct kmem_cache *qedf_io_work_cache;
109void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id) 114void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
110{ 115{
111 qedf->vlan_id = vlan_id; 116 qedf->vlan_id = vlan_id;
112 qedf->vlan_id |= qedf_default_prio << VLAN_PRIO_SHIFT; 117 qedf->vlan_id |= qedf->prio << VLAN_PRIO_SHIFT;
113 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x " 118 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x "
114 "prio=%d.\n", vlan_id, qedf_default_prio); 119 "prio=%d.\n", vlan_id, qedf->prio);
115} 120}
116 121
117/* Returns true if we have a valid vlan, false otherwise */ 122/* Returns true if we have a valid vlan, false otherwise */
@@ -480,6 +485,11 @@ static void qedf_link_update(void *dev, struct qed_link_output *link)
480 struct qedf_ctx *qedf = (struct qedf_ctx *)dev; 485 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
481 486
482 if (link->link_up) { 487 if (link->link_up) {
488 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
489 QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC,
490 "Ignoring link up event as link is already up.\n");
491 return;
492 }
483 QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n", 493 QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n",
484 link->speed / 1000); 494 link->speed / 1000);
485 495
@@ -489,7 +499,8 @@ static void qedf_link_update(void *dev, struct qed_link_output *link)
489 atomic_set(&qedf->link_state, QEDF_LINK_UP); 499 atomic_set(&qedf->link_state, QEDF_LINK_UP);
490 qedf_update_link_speed(qedf, link); 500 qedf_update_link_speed(qedf, link);
491 501
492 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) { 502 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE ||
503 qedf_dcbx_no_wait) {
493 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 504 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
494 "DCBx done.\n"); 505 "DCBx done.\n");
495 if (atomic_read(&qedf->link_down_tmo_valid) > 0) 506 if (atomic_read(&qedf->link_down_tmo_valid) > 0)
@@ -515,7 +526,7 @@ static void qedf_link_update(void *dev, struct qed_link_output *link)
515 "Starting link down tmo.\n"); 526 "Starting link down tmo.\n");
516 atomic_set(&qedf->link_down_tmo_valid, 1); 527 atomic_set(&qedf->link_down_tmo_valid, 1);
517 } 528 }
518 qedf->vlan_id = 0; 529 qedf->vlan_id = 0;
519 qedf_update_link_speed(qedf, link); 530 qedf_update_link_speed(qedf, link);
520 queue_delayed_work(qedf->link_update_wq, &qedf->link_update, 531 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
521 qedf_link_down_tmo * HZ); 532 qedf_link_down_tmo * HZ);
@@ -526,6 +537,7 @@ static void qedf_link_update(void *dev, struct qed_link_output *link)
526static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type) 537static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
527{ 538{
528 struct qedf_ctx *qedf = (struct qedf_ctx *)dev; 539 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
540 u8 tmp_prio;
529 541
530 QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe " 542 QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe "
531 "prio=%d.\n", get->operational.valid, get->operational.enabled, 543 "prio=%d.\n", get->operational.valid, get->operational.enabled,
@@ -541,7 +553,26 @@ static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
541 553
542 atomic_set(&qedf->dcbx, QEDF_DCBX_DONE); 554 atomic_set(&qedf->dcbx, QEDF_DCBX_DONE);
543 555
544 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { 556 /*
557 * Set the 8021q priority in the following manner:
558 *
559 * 1. If a modparam is set use that
560 * 2. If the value is not between 0..7 use the default
561 * 3. Use the priority we get from the DCBX app tag
562 */
563 tmp_prio = get->operational.app_prio.fcoe;
564 if (qedf_default_prio > -1)
565 qedf->prio = qedf_default_prio;
566 else if (tmp_prio < 0 || tmp_prio > 7) {
567 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
568 "FIP/FCoE prio %d out of range, setting to %d.\n",
569 tmp_prio, QEDF_DEFAULT_PRIO);
570 qedf->prio = QEDF_DEFAULT_PRIO;
571 } else
572 qedf->prio = tmp_prio;
573
574 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP &&
575 !qedf_dcbx_no_wait) {
545 if (atomic_read(&qedf->link_down_tmo_valid) > 0) 576 if (atomic_read(&qedf->link_down_tmo_valid) > 0)
546 queue_delayed_work(qedf->link_update_wq, 577 queue_delayed_work(qedf->link_update_wq,
547 &qedf->link_recovery, 0); 578 &qedf->link_recovery, 0);
@@ -614,16 +645,6 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
614 goto out; 645 goto out;
615 } 646 }
616 647
617 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
618 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
619 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
620 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
621 "cleanup or abort processing or already "
622 "completed.\n", io_req->xid);
623 rc = SUCCESS;
624 goto out;
625 }
626
627 QEDF_ERR(&(qedf->dbg_ctx), "Aborting io_req sc_cmd=%p xid=0x%x " 648 QEDF_ERR(&(qedf->dbg_ctx), "Aborting io_req sc_cmd=%p xid=0x%x "
628 "fp_idx=%d.\n", sc_cmd, io_req->xid, io_req->fp_idx); 649 "fp_idx=%d.\n", sc_cmd, io_req->xid, io_req->fp_idx);
629 650
@@ -705,7 +726,6 @@ static void qedf_ctx_soft_reset(struct fc_lport *lport)
705 726
706 /* For host reset, essentially do a soft link up/down */ 727 /* For host reset, essentially do a soft link up/down */
707 atomic_set(&qedf->link_state, QEDF_LINK_DOWN); 728 atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
708 atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
709 queue_delayed_work(qedf->link_update_wq, &qedf->link_update, 729 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
710 0); 730 0);
711 qedf_wait_for_upload(qedf); 731 qedf_wait_for_upload(qedf);
@@ -720,6 +740,22 @@ static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
720{ 740{
721 struct fc_lport *lport; 741 struct fc_lport *lport;
722 struct qedf_ctx *qedf; 742 struct qedf_ctx *qedf;
743 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
744 struct fc_rport_libfc_priv *rp = rport->dd_data;
745 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
746 int rval;
747
748 rval = fc_remote_port_chkready(rport);
749
750 if (rval) {
751 QEDF_ERR(NULL, "device_reset rport not ready\n");
752 return FAILED;
753 }
754
755 if (fcport == NULL) {
756 QEDF_ERR(NULL, "device_reset: rport is NULL\n");
757 return FAILED;
758 }
723 759
724 lport = shost_priv(sc_cmd->device->host); 760 lport = shost_priv(sc_cmd->device->host);
725 qedf = lport_priv(lport); 761 qedf = lport_priv(lport);
@@ -1109,7 +1145,7 @@ static int qedf_offload_connection(struct qedf_ctx *qedf,
1109 conn_info.vlan_tag = qedf->vlan_id << 1145 conn_info.vlan_tag = qedf->vlan_id <<
1110 FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT; 1146 FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT;
1111 conn_info.vlan_tag |= 1147 conn_info.vlan_tag |=
1112 qedf_default_prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT; 1148 qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
1113 conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK << 1149 conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK <<
1114 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT); 1150 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT);
1115 1151
@@ -1649,6 +1685,15 @@ static int qedf_vport_destroy(struct fc_vport *vport)
1649 struct Scsi_Host *shost = vport_to_shost(vport); 1685 struct Scsi_Host *shost = vport_to_shost(vport);
1650 struct fc_lport *n_port = shost_priv(shost); 1686 struct fc_lport *n_port = shost_priv(shost);
1651 struct fc_lport *vn_port = vport->dd_data; 1687 struct fc_lport *vn_port = vport->dd_data;
1688 struct qedf_ctx *qedf = lport_priv(vn_port);
1689
1690 if (!qedf) {
1691 QEDF_ERR(NULL, "qedf is NULL.\n");
1692 goto out;
1693 }
1694
1695 /* Set unloading bit on vport qedf_ctx to prevent more I/O */
1696 set_bit(QEDF_UNLOADING, &qedf->flags);
1652 1697
1653 mutex_lock(&n_port->lp_mutex); 1698 mutex_lock(&n_port->lp_mutex);
1654 list_del(&vn_port->list); 1699 list_del(&vn_port->list);
@@ -1675,6 +1720,7 @@ static int qedf_vport_destroy(struct fc_vport *vport)
1675 if (vn_port->host) 1720 if (vn_port->host)
1676 scsi_host_put(vn_port->host); 1721 scsi_host_put(vn_port->host);
1677 1722
1723out:
1678 return 0; 1724 return 0;
1679} 1725}
1680 1726
@@ -2109,7 +2155,8 @@ static int qedf_setup_int(struct qedf_ctx *qedf)
2109 QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler); 2155 QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
2110 qedf->int_info.used_cnt = 1; 2156 qedf->int_info.used_cnt = 1;
2111 2157
2112 return 0; 2158 QEDF_ERR(&qedf->dbg_ctx, "Only MSI-X supported. Failing probe.\n");
2159 return -EINVAL;
2113} 2160}
2114 2161
2115/* Main function for libfc frame reception */ 2162/* Main function for libfc frame reception */
@@ -2195,6 +2242,7 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
2195 if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) { 2242 if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) {
2196 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, 2243 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2197 "FC frame d_id mismatch with MAC %pM.\n", dest_mac); 2244 "FC frame d_id mismatch with MAC %pM.\n", dest_mac);
2245 kfree_skb(skb);
2198 return; 2246 return;
2199 } 2247 }
2200 2248
@@ -2983,8 +3031,17 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
2983 qedf->link_update_wq = create_workqueue(host_buf); 3031 qedf->link_update_wq = create_workqueue(host_buf);
2984 INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update); 3032 INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
2985 INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery); 3033 INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
2986 3034 INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
2987 qedf->fipvlan_retries = qedf_fipvlan_retries; 3035 qedf->fipvlan_retries = qedf_fipvlan_retries;
3036 /* Set a default prio in case DCBX doesn't converge */
3037 if (qedf_default_prio > -1) {
3038 /*
3039 * This is the case where we pass a modparam in so we want to
3040 * honor it even if dcbx doesn't converge.
3041 */
3042 qedf->prio = qedf_default_prio;
3043 } else
3044 qedf->prio = QEDF_DEFAULT_PRIO;
2988 3045
2989 /* 3046 /*
2990 * Common probe. Takes care of basic hardware init and pci_* 3047 * Common probe. Takes care of basic hardware init and pci_*
@@ -3214,7 +3271,8 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
3214 * unload process. 3271 * unload process.
3215 */ 3272 */
3216 if (mode != QEDF_MODE_RECOVERY) { 3273 if (mode != QEDF_MODE_RECOVERY) {
3217 qedf->grcdump_size = qed_ops->common->dbg_grc_size(qedf->cdev); 3274 qedf->grcdump_size =
3275 qed_ops->common->dbg_all_data_size(qedf->cdev);
3218 if (qedf->grcdump_size) { 3276 if (qedf->grcdump_size) {
3219 rc = qedf_alloc_grc_dump_buf(&qedf->grcdump, 3277 rc = qedf_alloc_grc_dump_buf(&qedf->grcdump,
3220 qedf->grcdump_size); 3278 qedf->grcdump_size);
@@ -3398,6 +3456,15 @@ static void qedf_remove(struct pci_dev *pdev)
3398 __qedf_remove(pdev, QEDF_MODE_NORMAL); 3456 __qedf_remove(pdev, QEDF_MODE_NORMAL);
3399} 3457}
3400 3458
3459void qedf_wq_grcdump(struct work_struct *work)
3460{
3461 struct qedf_ctx *qedf =
3462 container_of(work, struct qedf_ctx, grcdump_work.work);
3463
3464 QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n");
3465 qedf_capture_grc_dump(qedf);
3466}
3467
3401/* 3468/*
3402 * Protocol TLV handler 3469 * Protocol TLV handler
3403 */ 3470 */
@@ -3508,6 +3575,17 @@ static int __init qedf_init(void)
3508 if (qedf_debug == QEDF_LOG_DEFAULT) 3575 if (qedf_debug == QEDF_LOG_DEFAULT)
3509 qedf_debug = QEDF_DEFAULT_LOG_MASK; 3576 qedf_debug = QEDF_DEFAULT_LOG_MASK;
3510 3577
3578 /*
3579 * Check that default prio for FIP/FCoE traffic is between 0..7 if a
3580 * value has been set
3581 */
3582 if (qedf_default_prio > -1)
3583 if (qedf_default_prio > 7) {
3584 qedf_default_prio = QEDF_DEFAULT_PRIO;
3585 QEDF_ERR(NULL, "FCoE/FIP priority out of range, resetting to %d.\n",
3586 QEDF_DEFAULT_PRIO);
3587 }
3588
3511 /* Print driver banner */ 3589 /* Print driver banner */
3512 QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR, 3590 QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR,
3513 QEDF_VERSION); 3591 QEDF_VERSION);
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
index c2478056356a..9455faacd5de 100644
--- a/drivers/scsi/qedf/qedf_version.h
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -1,15 +1,15 @@
1/* 1/*
2 * QLogic FCoE Offload Driver 2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2017 Cavium Inc. 3 * Copyright (c) 2016-2018 Cavium Inc.
4 * 4 *
5 * This software is available under the terms of the GNU General Public License 5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree. 7 * this source tree.
8 */ 8 */
9 9
10#define QEDF_VERSION "8.33.0.20" 10#define QEDF_VERSION "8.33.16.20"
11#define QEDF_DRIVER_MAJOR_VER 8 11#define QEDF_DRIVER_MAJOR_VER 8
12#define QEDF_DRIVER_MINOR_VER 33 12#define QEDF_DRIVER_MINOR_VER 33
13#define QEDF_DRIVER_REV_VER 0 13#define QEDF_DRIVER_REV_VER 16
14#define QEDF_DRIVER_ENG_VER 20 14#define QEDF_DRIVER_ENG_VER 20
15 15
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index eb2ec1fb07cb..9442e18aef6f 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2279,8 +2279,6 @@ enum discovery_state {
2279 DSC_LOGIN_PEND, 2279 DSC_LOGIN_PEND,
2280 DSC_LOGIN_FAILED, 2280 DSC_LOGIN_FAILED,
2281 DSC_GPDB, 2281 DSC_GPDB,
2282 DSC_GFPN_ID,
2283 DSC_GPSC,
2284 DSC_UPD_FCPORT, 2282 DSC_UPD_FCPORT,
2285 DSC_LOGIN_COMPLETE, 2283 DSC_LOGIN_COMPLETE,
2286 DSC_ADISC, 2284 DSC_ADISC,
@@ -2346,6 +2344,7 @@ typedef struct fc_port {
2346 unsigned int login_succ:1; 2344 unsigned int login_succ:1;
2347 unsigned int query:1; 2345 unsigned int query:1;
2348 unsigned int id_changed:1; 2346 unsigned int id_changed:1;
2347 unsigned int rscn_rcvd:1;
2349 2348
2350 struct work_struct nvme_del_work; 2349 struct work_struct nvme_del_work;
2351 struct completion nvme_del_done; 2350 struct completion nvme_del_done;
@@ -3226,6 +3225,7 @@ enum qla_work_type {
3226 QLA_EVT_GNNID, 3225 QLA_EVT_GNNID,
3227 QLA_EVT_GFPNID, 3226 QLA_EVT_GFPNID,
3228 QLA_EVT_SP_RETRY, 3227 QLA_EVT_SP_RETRY,
3228 QLA_EVT_IIDMA,
3229}; 3229};
3230 3230
3231 3231
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 3c4c84ed0f0f..f68eb6096559 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -116,7 +116,8 @@ extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
116 uint16_t *); 116 uint16_t *);
117extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *, 117extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *,
118 fc_port_t *, uint16_t *); 118 fc_port_t *, uint16_t *);
119 119int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
120void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
120/* 121/*
121 * Global Data in qla_os.c source file. 122 * Global Data in qla_os.c source file.
122 */ 123 */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 9e914f9c3ffb..4bc2b66b299f 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3175,7 +3175,6 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
3175 3175
3176done_free_sp: 3176done_free_sp:
3177 sp->free(sp); 3177 sp->free(sp);
3178 fcport->flags &= ~FCF_ASYNC_SENT;
3179done: 3178done:
3180 fcport->flags &= ~FCF_ASYNC_ACTIVE; 3179 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3181 return rval; 3180 return rval;
@@ -3239,7 +3238,7 @@ void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
3239 return; 3238 return;
3240 } 3239 }
3241 3240
3242 qla24xx_post_upd_fcport_work(vha, ea->fcport); 3241 qla_post_iidma_work(vha, fcport);
3243} 3242}
3244 3243
3245static void qla24xx_async_gpsc_sp_done(void *s, int res) 3244static void qla24xx_async_gpsc_sp_done(void *s, int res)
@@ -3257,8 +3256,6 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
3257 "Async done-%s res %x, WWPN %8phC \n", 3256 "Async done-%s res %x, WWPN %8phC \n",
3258 sp->name, res, fcport->port_name); 3257 sp->name, res, fcport->port_name);
3259 3258
3260 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3261
3262 if (res == (DID_ERROR << 16)) { 3259 if (res == (DID_ERROR << 16)) {
3263 /* entry status error */ 3260 /* entry status error */
3264 goto done; 3261 goto done;
@@ -3327,7 +3324,6 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
3327 if (!sp) 3324 if (!sp)
3328 goto done; 3325 goto done;
3329 3326
3330 fcport->flags |= FCF_ASYNC_SENT;
3331 sp->type = SRB_CT_PTHRU_CMD; 3327 sp->type = SRB_CT_PTHRU_CMD;
3332 sp->name = "gpsc"; 3328 sp->name = "gpsc";
3333 sp->gen1 = fcport->rscn_gen; 3329 sp->gen1 = fcport->rscn_gen;
@@ -3862,6 +3858,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3862 bool found; 3858 bool found;
3863 struct fab_scan_rp *rp; 3859 struct fab_scan_rp *rp;
3864 unsigned long flags; 3860 unsigned long flags;
3861 u8 recheck = 0;
3865 3862
3866 ql_dbg(ql_dbg_disc, vha, 0xffff, 3863 ql_dbg(ql_dbg_disc, vha, 0xffff,
3867 "%s enter\n", __func__); 3864 "%s enter\n", __func__);
@@ -3914,8 +3911,8 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3914 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3911 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3915 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE)) 3912 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3916 continue; 3913 continue;
3914 fcport->rscn_rcvd = 0;
3917 fcport->scan_state = QLA_FCPORT_FOUND; 3915 fcport->scan_state = QLA_FCPORT_FOUND;
3918 fcport->d_id.b24 = rp->id.b24;
3919 found = true; 3916 found = true;
3920 /* 3917 /*
3921 * If device was not a fabric device before. 3918 * If device was not a fabric device before.
@@ -3923,7 +3920,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3923 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 3920 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3924 qla2x00_clear_loop_id(fcport); 3921 qla2x00_clear_loop_id(fcport);
3925 fcport->flags |= FCF_FABRIC_DEVICE; 3922 fcport->flags |= FCF_FABRIC_DEVICE;
3923 } else if (fcport->d_id.b24 != rp->id.b24) {
3924 qlt_schedule_sess_for_deletion(fcport);
3926 } 3925 }
3926 fcport->d_id.b24 = rp->id.b24;
3927 break; 3927 break;
3928 } 3928 }
3929 3929
@@ -3940,10 +3940,13 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3940 * Logout all previous fabric dev marked lost, except FCP2 devices. 3940 * Logout all previous fabric dev marked lost, except FCP2 devices.
3941 */ 3941 */
3942 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3942 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3943 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 3943 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3944 fcport->rscn_rcvd = 0;
3944 continue; 3945 continue;
3946 }
3945 3947
3946 if (fcport->scan_state != QLA_FCPORT_FOUND) { 3948 if (fcport->scan_state != QLA_FCPORT_FOUND) {
3949 fcport->rscn_rcvd = 0;
3947 if ((qla_dual_mode_enabled(vha) || 3950 if ((qla_dual_mode_enabled(vha) ||
3948 qla_ini_mode_enabled(vha)) && 3951 qla_ini_mode_enabled(vha)) &&
3949 atomic_read(&fcport->state) == FCS_ONLINE) { 3952 atomic_read(&fcport->state) == FCS_ONLINE) {
@@ -3961,15 +3964,31 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3961 continue; 3964 continue;
3962 } 3965 }
3963 } 3966 }
3964 } else 3967 } else {
3965 qla24xx_fcport_handle_login(vha, fcport); 3968 if (fcport->rscn_rcvd ||
3969 fcport->disc_state != DSC_LOGIN_COMPLETE) {
3970 fcport->rscn_rcvd = 0;
3971 qla24xx_fcport_handle_login(vha, fcport);
3972 }
3973 }
3966 } 3974 }
3967 3975
3976 recheck = 1;
3968out: 3977out:
3969 qla24xx_sp_unmap(vha, sp); 3978 qla24xx_sp_unmap(vha, sp);
3970 spin_lock_irqsave(&vha->work_lock, flags); 3979 spin_lock_irqsave(&vha->work_lock, flags);
3971 vha->scan.scan_flags &= ~SF_SCANNING; 3980 vha->scan.scan_flags &= ~SF_SCANNING;
3972 spin_unlock_irqrestore(&vha->work_lock, flags); 3981 spin_unlock_irqrestore(&vha->work_lock, flags);
3982
3983 if (recheck) {
3984 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3985 if (fcport->rscn_rcvd) {
3986 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3987 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3988 break;
3989 }
3990 }
3991 }
3973} 3992}
3974 3993
3975static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha, 3994static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
@@ -4532,7 +4551,6 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4532 4551
4533done_free_sp: 4552done_free_sp:
4534 sp->free(sp); 4553 sp->free(sp);
4535 fcport->flags &= ~FCF_ASYNC_SENT;
4536done: 4554done:
4537 return rval; 4555 return rval;
4538} 4556}
@@ -4594,7 +4612,6 @@ static void qla2x00_async_gfpnid_sp_done(void *s, int res)
4594 struct event_arg ea; 4612 struct event_arg ea;
4595 u64 wwn; 4613 u64 wwn;
4596 4614
4597 fcport->flags &= ~FCF_ASYNC_SENT;
4598 wwn = wwn_to_u64(fpn); 4615 wwn = wwn_to_u64(fpn);
4599 if (wwn) 4616 if (wwn)
4600 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE); 4617 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
@@ -4623,12 +4640,10 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4623 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) 4640 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4624 return rval; 4641 return rval;
4625 4642
4626 fcport->disc_state = DSC_GFPN_ID;
4627 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 4643 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4628 if (!sp) 4644 if (!sp)
4629 goto done; 4645 goto done;
4630 4646
4631 fcport->flags |= FCF_ASYNC_SENT;
4632 sp->type = SRB_CT_PTHRU_CMD; 4647 sp->type = SRB_CT_PTHRU_CMD;
4633 sp->name = "gfpnid"; 4648 sp->name = "gfpnid";
4634 sp->gen1 = fcport->rscn_gen; 4649 sp->gen1 = fcport->rscn_gen;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 8f55dd44adae..1aa3720ea2ed 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1021,30 +1021,11 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1021 vha->fcport_count++; 1021 vha->fcport_count++;
1022 ea->fcport->login_succ = 1; 1022 ea->fcport->login_succ = 1;
1023 1023
1024 if (!IS_IIDMA_CAPABLE(vha->hw) || 1024 ql_dbg(ql_dbg_disc, vha, 0x20d6,
1025 !vha->hw->flags.gpsc_supported) { 1025 "%s %d %8phC post upd_fcport fcp_cnt %d\n",
1026 ql_dbg(ql_dbg_disc, vha, 0x20d6, 1026 __func__, __LINE__, ea->fcport->port_name,
1027 "%s %d %8phC post upd_fcport fcp_cnt %d\n", 1027 vha->fcport_count);
1028 __func__, __LINE__, ea->fcport->port_name, 1028 qla24xx_post_upd_fcport_work(vha, ea->fcport);
1029 vha->fcport_count);
1030
1031 qla24xx_post_upd_fcport_work(vha, ea->fcport);
1032 } else {
1033 if (ea->fcport->id_changed) {
1034 ea->fcport->id_changed = 0;
1035 ql_dbg(ql_dbg_disc, vha, 0x20d7,
1036 "%s %d %8phC post gfpnid fcp_cnt %d\n",
1037 __func__, __LINE__, ea->fcport->port_name,
1038 vha->fcport_count);
1039 qla24xx_post_gfpnid_work(vha, ea->fcport);
1040 } else {
1041 ql_dbg(ql_dbg_disc, vha, 0x20d7,
1042 "%s %d %8phC post gpsc fcp_cnt %d\n",
1043 __func__, __LINE__, ea->fcport->port_name,
1044 vha->fcport_count);
1045 qla24xx_post_gpsc_work(vha, ea->fcport);
1046 }
1047 }
1048 } else if (ea->fcport->login_succ) { 1029 } else if (ea->fcport->login_succ) {
1049 /* 1030 /*
1050 * We have an existing session. A late RSCN delivery 1031 * We have an existing session. A late RSCN delivery
@@ -1167,9 +1148,6 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1167 fcport->login_gen, fcport->login_retry, 1148 fcport->login_gen, fcport->login_retry,
1168 fcport->loop_id, fcport->scan_state); 1149 fcport->loop_id, fcport->scan_state);
1169 1150
1170 if (fcport->login_retry == 0)
1171 return 0;
1172
1173 if (fcport->scan_state != QLA_FCPORT_FOUND) 1151 if (fcport->scan_state != QLA_FCPORT_FOUND)
1174 return 0; 1152 return 0;
1175 1153
@@ -1194,7 +1172,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1194 return 0; 1172 return 0;
1195 } 1173 }
1196 1174
1197 fcport->login_retry--; 1175 if (fcport->login_retry > 0)
1176 fcport->login_retry--;
1198 1177
1199 switch (fcport->disc_state) { 1178 switch (fcport->disc_state) {
1200 case DSC_DELETED: 1179 case DSC_DELETED:
@@ -1350,20 +1329,7 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1350 fc_port_t *f, *tf; 1329 fc_port_t *f, *tf;
1351 uint32_t id = 0, mask, rid; 1330 uint32_t id = 0, mask, rid;
1352 unsigned long flags; 1331 unsigned long flags;
1353 1332 fc_port_t *fcport;
1354 switch (ea->event) {
1355 case FCME_RSCN:
1356 case FCME_GIDPN_DONE:
1357 case FCME_GPSC_DONE:
1358 case FCME_GPNID_DONE:
1359 case FCME_GNNID_DONE:
1360 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
1361 test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
1362 return;
1363 break;
1364 default:
1365 break;
1366 }
1367 1333
1368 switch (ea->event) { 1334 switch (ea->event) {
1369 case FCME_RELOGIN: 1335 case FCME_RELOGIN:
@@ -1377,6 +1343,11 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1377 return; 1343 return;
1378 switch (ea->id.b.rsvd_1) { 1344 switch (ea->id.b.rsvd_1) {
1379 case RSCN_PORT_ADDR: 1345 case RSCN_PORT_ADDR:
1346 fcport = qla2x00_find_fcport_by_nportid
1347 (vha, &ea->id, 1);
1348 if (fcport)
1349 fcport->rscn_rcvd = 1;
1350
1380 spin_lock_irqsave(&vha->work_lock, flags); 1351 spin_lock_irqsave(&vha->work_lock, flags);
1381 if (vha->scan.scan_flags == 0) { 1352 if (vha->scan.scan_flags == 0) {
1382 ql_dbg(ql_dbg_disc, vha, 0xffff, 1353 ql_dbg(ql_dbg_disc, vha, 0xffff,
@@ -4532,7 +4503,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
4532 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 4503 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
4533 fcport->deleted = QLA_SESS_DELETED; 4504 fcport->deleted = QLA_SESS_DELETED;
4534 fcport->login_retry = vha->hw->login_retry_count; 4505 fcport->login_retry = vha->hw->login_retry_count;
4535 fcport->login_retry = 5;
4536 fcport->logout_on_delete = 1; 4506 fcport->logout_on_delete = 1;
4537 4507
4538 if (!fcport->ct_desc.ct_sns) { 4508 if (!fcport->ct_desc.ct_sns) {
@@ -5054,6 +5024,24 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5054 } 5024 }
5055} 5025}
5056 5026
5027void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5028{
5029 qla2x00_iidma_fcport(vha, fcport);
5030 qla24xx_update_fcport_fcp_prio(vha, fcport);
5031}
5032
5033int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5034{
5035 struct qla_work_evt *e;
5036
5037 e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
5038 if (!e)
5039 return QLA_FUNCTION_FAILED;
5040
5041 e->u.fcport.fcport = fcport;
5042 return qla2x00_post_work(vha, e);
5043}
5044
5057/* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/ 5045/* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
5058static void 5046static void
5059qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) 5047qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
@@ -5122,13 +5110,14 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5122 5110
5123 if (IS_QLAFX00(vha->hw)) { 5111 if (IS_QLAFX00(vha->hw)) {
5124 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 5112 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5125 goto reg_port; 5113 } else {
5114 fcport->login_retry = 0;
5115 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5116 fcport->disc_state = DSC_LOGIN_COMPLETE;
5117 fcport->deleted = 0;
5118 fcport->logout_on_delete = 1;
5119 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5126 } 5120 }
5127 fcport->login_retry = 0;
5128 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5129 fcport->disc_state = DSC_LOGIN_COMPLETE;
5130 fcport->deleted = 0;
5131 fcport->logout_on_delete = 1;
5132 5121
5133 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 5122 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5134 qla2x00_iidma_fcport(vha, fcport); 5123 qla2x00_iidma_fcport(vha, fcport);
@@ -5140,7 +5129,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5140 5129
5141 qla24xx_update_fcport_fcp_prio(vha, fcport); 5130 qla24xx_update_fcport_fcp_prio(vha, fcport);
5142 5131
5143reg_port:
5144 switch (vha->host->active_mode) { 5132 switch (vha->host->active_mode) {
5145 case MODE_INITIATOR: 5133 case MODE_INITIATOR:
5146 qla2x00_reg_remote_port(vha, fcport); 5134 qla2x00_reg_remote_port(vha, fcport);
@@ -5159,6 +5147,23 @@ reg_port:
5159 default: 5147 default:
5160 break; 5148 break;
5161 } 5149 }
5150
5151 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
5152 if (fcport->id_changed) {
5153 fcport->id_changed = 0;
5154 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5155 "%s %d %8phC post gfpnid fcp_cnt %d\n",
5156 __func__, __LINE__, fcport->port_name,
5157 vha->fcport_count);
5158 qla24xx_post_gfpnid_work(vha, fcport);
5159 } else {
5160 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5161 "%s %d %8phC post gpsc fcp_cnt %d\n",
5162 __func__, __LINE__, fcport->port_name,
5163 vha->fcport_count);
5164 qla24xx_post_gpsc_work(vha, fcport);
5165 }
5166 }
5162} 5167}
5163 5168
5164/* 5169/*
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 15eaa6dded04..817c18a8e84d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -5063,6 +5063,10 @@ qla2x00_do_work(struct scsi_qla_host *vha)
5063 break; 5063 break;
5064 case QLA_EVT_SP_RETRY: 5064 case QLA_EVT_SP_RETRY:
5065 qla_sp_retry(vha, e); 5065 qla_sp_retry(vha, e);
5066 break;
5067 case QLA_EVT_IIDMA:
5068 qla_do_iidma_work(vha, e->u.fcport.fcport);
5069 break;
5066 } 5070 }
5067 if (e->flags & QLA_EVT_FLAG_FREE) 5071 if (e->flags & QLA_EVT_FLAG_FREE)
5068 kfree(e); 5072 kfree(e);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 025dc2d3f3de..b85c833099ff 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -601,24 +601,18 @@ void qla2x00_async_nack_sp_done(void *s, int res)
601 601
602 vha->fcport_count++; 602 vha->fcport_count++;
603 603
604 if (!IS_IIDMA_CAPABLE(vha->hw) || 604 ql_dbg(ql_dbg_disc, vha, 0x20f3,
605 !vha->hw->flags.gpsc_supported) { 605 "%s %d %8phC post upd_fcport fcp_cnt %d\n",
606 ql_dbg(ql_dbg_disc, vha, 0x20f3, 606 __func__, __LINE__,
607 "%s %d %8phC post upd_fcport fcp_cnt %d\n", 607 sp->fcport->port_name,
608 __func__, __LINE__, 608 vha->fcport_count);
609 sp->fcport->port_name, 609 sp->fcport->disc_state = DSC_UPD_FCPORT;
610 vha->fcport_count); 610 qla24xx_post_upd_fcport_work(vha, sp->fcport);
611 sp->fcport->disc_state = DSC_UPD_FCPORT; 611 } else {
612 qla24xx_post_upd_fcport_work(vha, sp->fcport); 612 sp->fcport->login_retry = 0;
613 } else { 613 sp->fcport->disc_state = DSC_LOGIN_COMPLETE;
614 ql_dbg(ql_dbg_disc, vha, 0x20f5, 614 sp->fcport->deleted = 0;
615 "%s %d %8phC post gpsc fcp_cnt %d\n", 615 sp->fcport->logout_on_delete = 1;
616 __func__, __LINE__,
617 sp->fcport->port_name,
618 vha->fcport_count);
619
620 qla24xx_post_gpsc_work(vha, sp->fcport);
621 }
622 } 616 }
623 break; 617 break;
624 618
@@ -1930,13 +1924,84 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1930 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 1924 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1931} 1925}
1932 1926
1927static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha,
1928 uint64_t unpacked_lun)
1929{
1930 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1931 struct qla_qpair_hint *h = NULL;
1932
1933 if (vha->flags.qpairs_available) {
1934 h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun);
1935 if (!h)
1936 h = &tgt->qphints[0];
1937 } else {
1938 h = &tgt->qphints[0];
1939 }
1940
1941 return h;
1942}
1943
1944static void qlt_do_tmr_work(struct work_struct *work)
1945{
1946 struct qla_tgt_mgmt_cmd *mcmd =
1947 container_of(work, struct qla_tgt_mgmt_cmd, work);
1948 struct qla_hw_data *ha = mcmd->vha->hw;
1949 int rc = EIO;
1950 uint32_t tag;
1951 unsigned long flags;
1952
1953 switch (mcmd->tmr_func) {
1954 case QLA_TGT_ABTS:
1955 tag = mcmd->orig_iocb.abts.exchange_addr_to_abort;
1956 break;
1957 default:
1958 tag = 0;
1959 break;
1960 }
1961
1962 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun,
1963 mcmd->tmr_func, tag);
1964
1965 if (rc != 0) {
1966 spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
1967 switch (mcmd->tmr_func) {
1968 case QLA_TGT_ABTS:
1969 qlt_24xx_send_abts_resp(mcmd->qpair,
1970 &mcmd->orig_iocb.abts,
1971 FCP_TMF_REJECTED, false);
1972 break;
1973 case QLA_TGT_LUN_RESET:
1974 case QLA_TGT_CLEAR_TS:
1975 case QLA_TGT_ABORT_TS:
1976 case QLA_TGT_CLEAR_ACA:
1977 case QLA_TGT_TARGET_RESET:
1978 qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio,
1979 qla_sam_status);
1980 break;
1981
1982 case QLA_TGT_ABORT_ALL:
1983 case QLA_TGT_NEXUS_LOSS_SESS:
1984 case QLA_TGT_NEXUS_LOSS:
1985 qlt_send_notify_ack(mcmd->qpair,
1986 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
1987 break;
1988 }
1989 spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags);
1990
1991 ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
1992 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
1993 mcmd->vha->vp_idx, rc);
1994 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1995 }
1996}
1997
1933/* ha->hardware_lock supposed to be held on entry */ 1998/* ha->hardware_lock supposed to be held on entry */
1934static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1999static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1935 struct abts_recv_from_24xx *abts, struct fc_port *sess) 2000 struct abts_recv_from_24xx *abts, struct fc_port *sess)
1936{ 2001{
1937 struct qla_hw_data *ha = vha->hw; 2002 struct qla_hw_data *ha = vha->hw;
1938 struct qla_tgt_mgmt_cmd *mcmd; 2003 struct qla_tgt_mgmt_cmd *mcmd;
1939 int rc; 2004 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
1940 2005
1941 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) { 2006 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
1942 /* send TASK_ABORT response immediately */ 2007 /* send TASK_ABORT response immediately */
@@ -1961,23 +2026,29 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1961 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 2026 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1962 mcmd->reset_count = ha->base_qpair->chip_reset; 2027 mcmd->reset_count = ha->base_qpair->chip_reset;
1963 mcmd->tmr_func = QLA_TGT_ABTS; 2028 mcmd->tmr_func = QLA_TGT_ABTS;
1964 mcmd->qpair = ha->base_qpair; 2029 mcmd->qpair = h->qpair;
1965 mcmd->vha = vha; 2030 mcmd->vha = vha;
1966 2031
1967 /* 2032 /*
1968 * LUN is looked up by target-core internally based on the passed 2033 * LUN is looked up by target-core internally based on the passed
1969 * abts->exchange_addr_to_abort tag. 2034 * abts->exchange_addr_to_abort tag.
1970 */ 2035 */
1971 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, mcmd->tmr_func, 2036 mcmd->se_cmd.cpuid = h->cpuid;
1972 abts->exchange_addr_to_abort); 2037
1973 if (rc != 0) { 2038 if (ha->tgt.tgt_ops->find_cmd_by_tag) {
1974 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, 2039 struct qla_tgt_cmd *abort_cmd;
1975 "qla_target(%d): tgt_ops->handle_tmr()" 2040
1976 " failed: %d", vha->vp_idx, rc); 2041 abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
1977 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2042 abts->exchange_addr_to_abort);
1978 return -EFAULT; 2043 if (abort_cmd && abort_cmd->qpair) {
2044 mcmd->qpair = abort_cmd->qpair;
2045 mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
2046 }
1979 } 2047 }
1980 2048
2049 INIT_WORK(&mcmd->work, qlt_do_tmr_work);
2050 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work);
2051
1981 return 0; 2052 return 0;
1982} 2053}
1983 2054
@@ -3556,13 +3627,6 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
3556 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 3627 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3557 ctio24->u.status1.ox_id = cpu_to_le16(temp); 3628 ctio24->u.status1.ox_id = cpu_to_le16(temp);
3558 3629
3559 /* Most likely, it isn't needed */
3560 ctio24->u.status1.residual = get_unaligned((uint32_t *)
3561 &atio->u.isp24.fcp_cmnd.add_cdb[
3562 atio->u.isp24.fcp_cmnd.add_cdb_len]);
3563 if (ctio24->u.status1.residual != 0)
3564 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
3565
3566 /* Memory Barrier */ 3630 /* Memory Barrier */
3567 wmb(); 3631 wmb();
3568 if (qpair->reqq_start_iocbs) 3632 if (qpair->reqq_start_iocbs)
@@ -4057,9 +4121,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
4057 4121
4058 fcp_task_attr = qlt_get_fcp_task_attr(vha, 4122 fcp_task_attr = qlt_get_fcp_task_attr(vha,
4059 atio->u.isp24.fcp_cmnd.task_attr); 4123 atio->u.isp24.fcp_cmnd.task_attr);
4060 data_length = be32_to_cpu(get_unaligned((uint32_t *) 4124 data_length = get_datalen_for_atio(atio);
4061 &atio->u.isp24.fcp_cmnd.add_cdb[
4062 atio->u.isp24.fcp_cmnd.add_cdb_len]));
4063 4125
4064 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 4126 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
4065 fcp_task_attr, data_dir, bidi); 4127 fcp_task_attr, data_dir, bidi);
@@ -4335,7 +4397,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
4335 struct qla_hw_data *ha = vha->hw; 4397 struct qla_hw_data *ha = vha->hw;
4336 struct qla_tgt_mgmt_cmd *mcmd; 4398 struct qla_tgt_mgmt_cmd *mcmd;
4337 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4399 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4338 int res; 4400 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
4339 4401
4340 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4402 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4341 if (!mcmd) { 4403 if (!mcmd) {
@@ -4355,24 +4417,36 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
4355 mcmd->tmr_func = fn; 4417 mcmd->tmr_func = fn;
4356 mcmd->flags = flags; 4418 mcmd->flags = flags;
4357 mcmd->reset_count = ha->base_qpair->chip_reset; 4419 mcmd->reset_count = ha->base_qpair->chip_reset;
4358 mcmd->qpair = ha->base_qpair; 4420 mcmd->qpair = h->qpair;
4359 mcmd->vha = vha; 4421 mcmd->vha = vha;
4422 mcmd->se_cmd.cpuid = h->cpuid;
4423 mcmd->unpacked_lun = lun;
4360 4424
4361 switch (fn) { 4425 switch (fn) {
4362 case QLA_TGT_LUN_RESET: 4426 case QLA_TGT_LUN_RESET:
4363 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); 4427 case QLA_TGT_CLEAR_TS:
4364 break; 4428 case QLA_TGT_ABORT_TS:
4365 } 4429 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4430 /* drop through */
4431 case QLA_TGT_CLEAR_ACA:
4432 h = qlt_find_qphint(vha, mcmd->unpacked_lun);
4433 mcmd->qpair = h->qpair;
4434 mcmd->se_cmd.cpuid = h->cpuid;
4435 break;
4366 4436
4367 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 0); 4437 case QLA_TGT_TARGET_RESET:
4368 if (res != 0) { 4438 case QLA_TGT_NEXUS_LOSS_SESS:
4369 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, 4439 case QLA_TGT_NEXUS_LOSS:
4370 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", 4440 case QLA_TGT_ABORT_ALL:
4371 sess->vha->vp_idx, res); 4441 default:
4372 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 4442 /* no-op */
4373 return -EFAULT; 4443 break;
4374 } 4444 }
4375 4445
4446 INIT_WORK(&mcmd->work, qlt_do_tmr_work);
4447 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq,
4448 &mcmd->work);
4449
4376 return 0; 4450 return 0;
4377} 4451}
4378 4452
@@ -4841,7 +4915,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4841 switch (sess->disc_state) { 4915 switch (sess->disc_state) {
4842 case DSC_LOGIN_PEND: 4916 case DSC_LOGIN_PEND:
4843 case DSC_GPDB: 4917 case DSC_GPDB:
4844 case DSC_GPSC:
4845 case DSC_UPD_FCPORT: 4918 case DSC_UPD_FCPORT:
4846 case DSC_LOGIN_COMPLETE: 4919 case DSC_LOGIN_COMPLETE:
4847 case DSC_ADISC: 4920 case DSC_ADISC:
@@ -5113,8 +5186,6 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
5113 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, 5186 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
5114 "qla_target(%d): Immediate notify task %x\n", 5187 "qla_target(%d): Immediate notify task %x\n",
5115 vha->vp_idx, iocb->u.isp2x.task_flags); 5188 vha->vp_idx, iocb->u.isp2x.task_flags);
5116 if (qlt_handle_task_mgmt(vha, iocb) == 0)
5117 send_notify_ack = 0;
5118 break; 5189 break;
5119 5190
5120 case IMM_NTFY_ELS: 5191 case IMM_NTFY_ELS:
@@ -5147,10 +5218,15 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
5147 struct fc_port *sess = NULL; 5218 struct fc_port *sess = NULL;
5148 unsigned long flags; 5219 unsigned long flags;
5149 u16 temp; 5220 u16 temp;
5221 port_id_t id;
5222
5223 id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2];
5224 id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
5225 id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
5226 id.b.rsvd_1 = 0;
5150 5227
5151 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5228 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5152 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 5229 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
5153 atio->u.isp24.fcp_hdr.s_id);
5154 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5230 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5155 if (!sess) { 5231 if (!sess) {
5156 qlt_send_term_exchange(qpair, NULL, atio, 1, 0); 5232 qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
@@ -5189,6 +5265,12 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
5189 */ 5265 */
5190 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 5266 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
5191 ctio24->u.status1.scsi_status = cpu_to_le16(status); 5267 ctio24->u.status1.scsi_status = cpu_to_le16(status);
5268
5269 ctio24->u.status1.residual = get_datalen_for_atio(atio);
5270
5271 if (ctio24->u.status1.residual != 0)
5272 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
5273
5192 /* Memory Barrier */ 5274 /* Memory Barrier */
5193 wmb(); 5275 wmb();
5194 if (qpair->reqq_start_iocbs) 5276 if (qpair->reqq_start_iocbs)
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 728ce74358e7..fecf96f0225c 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -682,7 +682,7 @@ struct qla_tgt_cmd;
682 * target module (tcm_qla2xxx). 682 * target module (tcm_qla2xxx).
683 */ 683 */
684struct qla_tgt_func_tmpl { 684struct qla_tgt_func_tmpl {
685 685 struct qla_tgt_cmd *(*find_cmd_by_tag)(struct fc_port *, uint64_t);
686 int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, 686 int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
687 unsigned char *, uint32_t, int, int, int); 687 unsigned char *, uint32_t, int, int, int);
688 void (*handle_data)(struct qla_tgt_cmd *); 688 void (*handle_data)(struct qla_tgt_cmd *);
@@ -966,6 +966,8 @@ struct qla_tgt_mgmt_cmd {
966 unsigned int flags; 966 unsigned int flags;
967 uint32_t reset_count; 967 uint32_t reset_count;
968#define QLA24XX_MGMT_SEND_NACK 1 968#define QLA24XX_MGMT_SEND_NACK 1
969 struct work_struct work;
970 uint64_t unpacked_lun;
969 union { 971 union {
970 struct atio_from_isp atio; 972 struct atio_from_isp atio;
971 struct imm_ntfy_from_isp imm_ntfy; 973 struct imm_ntfy_from_isp imm_ntfy;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 0c55d7057280..1ad7582220c3 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "10.00.00.06-k" 10#define QLA2XXX_VERSION "10.00.00.07-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 10 12#define QLA_DRIVER_MAJOR_VER 10
13#define QLA_DRIVER_MINOR_VER 0 13#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index aadfeaac3898..0c2e82af9c0a 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -48,7 +48,6 @@
48#include "tcm_qla2xxx.h" 48#include "tcm_qla2xxx.h"
49 49
50static struct workqueue_struct *tcm_qla2xxx_free_wq; 50static struct workqueue_struct *tcm_qla2xxx_free_wq;
51static struct workqueue_struct *tcm_qla2xxx_cmd_wq;
52 51
53/* 52/*
54 * Parse WWN. 53 * Parse WWN.
@@ -630,6 +629,32 @@ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun,
630 transl_tmr_func, GFP_ATOMIC, tag, flags); 629 transl_tmr_func, GFP_ATOMIC, tag, flags);
631} 630}
632 631
632static struct qla_tgt_cmd *tcm_qla2xxx_find_cmd_by_tag(struct fc_port *sess,
633 uint64_t tag)
634{
635 struct qla_tgt_cmd *cmd = NULL;
636 struct se_cmd *secmd;
637 unsigned long flags;
638
639 if (!sess->se_sess)
640 return NULL;
641
642 spin_lock_irqsave(&sess->se_sess->sess_cmd_lock, flags);
643 list_for_each_entry(secmd, &sess->se_sess->sess_cmd_list, se_cmd_list) {
644 /* skip task management functions, including tmr->task_cmd */
645 if (secmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
646 continue;
647
648 if (secmd->tag == tag) {
649 cmd = container_of(secmd, struct qla_tgt_cmd, se_cmd);
650 break;
651 }
652 }
653 spin_unlock_irqrestore(&sess->se_sess->sess_cmd_lock, flags);
654
655 return cmd;
656}
657
633static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) 658static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
634{ 659{
635 struct qla_tgt_cmd *cmd = container_of(se_cmd, 660 struct qla_tgt_cmd *cmd = container_of(se_cmd,
@@ -1608,6 +1633,7 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
1608 * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path. 1633 * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
1609 */ 1634 */
1610static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { 1635static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
1636 .find_cmd_by_tag = tcm_qla2xxx_find_cmd_by_tag,
1611 .handle_cmd = tcm_qla2xxx_handle_cmd, 1637 .handle_cmd = tcm_qla2xxx_handle_cmd,
1612 .handle_data = tcm_qla2xxx_handle_data, 1638 .handle_data = tcm_qla2xxx_handle_data,
1613 .handle_tmr = tcm_qla2xxx_handle_tmr, 1639 .handle_tmr = tcm_qla2xxx_handle_tmr,
@@ -1976,16 +2002,8 @@ static int tcm_qla2xxx_register_configfs(void)
1976 goto out_fabric_npiv; 2002 goto out_fabric_npiv;
1977 } 2003 }
1978 2004
1979 tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
1980 if (!tcm_qla2xxx_cmd_wq) {
1981 ret = -ENOMEM;
1982 goto out_free_wq;
1983 }
1984
1985 return 0; 2005 return 0;
1986 2006
1987out_free_wq:
1988 destroy_workqueue(tcm_qla2xxx_free_wq);
1989out_fabric_npiv: 2007out_fabric_npiv:
1990 target_unregister_template(&tcm_qla2xxx_npiv_ops); 2008 target_unregister_template(&tcm_qla2xxx_npiv_ops);
1991out_fabric: 2009out_fabric:
@@ -1995,7 +2013,6 @@ out_fabric:
1995 2013
1996static void tcm_qla2xxx_deregister_configfs(void) 2014static void tcm_qla2xxx_deregister_configfs(void)
1997{ 2015{
1998 destroy_workqueue(tcm_qla2xxx_cmd_wq);
1999 destroy_workqueue(tcm_qla2xxx_free_wq); 2016 destroy_workqueue(tcm_qla2xxx_free_wq);
2000 2017
2001 target_unregister_template(&tcm_qla2xxx_ops); 2018 target_unregister_template(&tcm_qla2xxx_ops);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index cec9a14982e6..8578e566ab41 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1385,6 +1385,9 @@ fail_unmap_queues:
1385 qpti->req_cpu, qpti->req_dvma); 1385 qpti->req_cpu, qpti->req_dvma);
1386#undef QSIZE 1386#undef QSIZE
1387 1387
1388fail_free_irq:
1389 free_irq(qpti->irq, qpti);
1390
1388fail_unmap_regs: 1391fail_unmap_regs:
1389 of_iounmap(&op->resource[0], qpti->qregs, 1392 of_iounmap(&op->resource[0], qpti->qregs,
1390 resource_size(&op->resource[0])); 1393 resource_size(&op->resource[0]));
@@ -1392,9 +1395,6 @@ fail_unmap_regs:
1392 of_iounmap(&op->resource[0], qpti->sreg, 1395 of_iounmap(&op->resource[0], qpti->sreg,
1393 sizeof(unsigned char)); 1396 sizeof(unsigned char));
1394 1397
1395fail_free_irq:
1396 free_irq(qpti->irq, qpti);
1397
1398fail_unlink: 1398fail_unlink:
1399 scsi_host_put(host); 1399 scsi_host_put(host);
1400 1400
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index b784002ef0bd..c5a8756384bc 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -4,7 +4,7 @@
4#include <scsi/scsi_dbg.h> 4#include <scsi/scsi_dbg.h>
5#include "scsi_debugfs.h" 5#include "scsi_debugfs.h"
6 6
7#define SCSI_CMD_FLAG_NAME(name) [ilog2(SCMD_##name)] = #name 7#define SCSI_CMD_FLAG_NAME(name)[const_ilog2(SCMD_##name)] = #name
8static const char *const scsi_cmd_flags[] = { 8static const char *const scsi_cmd_flags[] = {
9 SCSI_CMD_FLAG_NAME(TAGGED), 9 SCSI_CMD_FLAG_NAME(TAGGED),
10 SCSI_CMD_FLAG_NAME(UNCHECKED_ISA_DMA), 10 SCSI_CMD_FLAG_NAME(UNCHECKED_ISA_DMA),
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index dd107dc4db0e..c4cbfd07b916 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -161,15 +161,16 @@ static struct {
161 {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, storage on LUN 0 */ 161 {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, storage on LUN 0 */
162 {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, no storage on LUN 0 */ 162 {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, no storage on LUN 0 */
163 {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 163 {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
164 {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2}, 164 {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN |
165 BLIST_REPORTLUN2 | BLIST_RETRY_ITF},
165 {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN}, 166 {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
166 {"easyRAID", "16P", NULL, BLIST_NOREPORTLUN}, 167 {"easyRAID", "16P", NULL, BLIST_NOREPORTLUN},
167 {"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN}, 168 {"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN},
168 {"easyRAID", "F8", NULL, BLIST_NOREPORTLUN}, 169 {"easyRAID", "F8", NULL, BLIST_NOREPORTLUN},
169 {"FSC", "CentricStor", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 170 {"FSC", "CentricStor", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
171 {"FUJITSU", "ETERNUS_DXM", "*", BLIST_RETRY_ASC_C1},
170 {"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36}, 172 {"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36},
171 {"Generic", "USB Storage-SMC", "0180", BLIST_FORCELUN | BLIST_INQUIRY_36}, 173 {"Generic", "USB Storage-SMC", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36}, /* FW: 0180 and 0207 */
172 {"Generic", "USB Storage-SMC", "0207", BLIST_FORCELUN | BLIST_INQUIRY_36},
173 {"HITACHI", "DF400", "*", BLIST_REPORTLUN2}, 174 {"HITACHI", "DF400", "*", BLIST_REPORTLUN2},
174 {"HITACHI", "DF500", "*", BLIST_REPORTLUN2}, 175 {"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
175 {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2}, 176 {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
@@ -361,8 +362,22 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
361 scsi_strcpy_devinfo("model", devinfo->model, sizeof(devinfo->model), 362 scsi_strcpy_devinfo("model", devinfo->model, sizeof(devinfo->model),
362 model, compatible); 363 model, compatible);
363 364
364 if (strflags) 365 if (strflags) {
365 flags = (__force blist_flags_t)simple_strtoul(strflags, NULL, 0); 366 unsigned long long val;
367 int ret = kstrtoull(strflags, 0, &val);
368
369 if (ret != 0) {
370 kfree(devinfo);
371 return ret;
372 }
373 flags = (__force blist_flags_t)val;
374 }
375 if (flags & __BLIST_UNUSED_MASK) {
376 pr_err("scsi_devinfo (%s:%s): unsupported flags 0x%llx",
377 vendor, model, flags & __BLIST_UNUSED_MASK);
378 kfree(devinfo);
379 return -EINVAL;
380 }
366 devinfo->flags = flags; 381 devinfo->flags = flags;
367 devinfo->compatible = compatible; 382 devinfo->compatible = compatible;
368 383
@@ -615,7 +630,7 @@ static int devinfo_seq_show(struct seq_file *m, void *v)
615 devinfo_table->name) 630 devinfo_table->name)
616 seq_printf(m, "[%s]:\n", devinfo_table->name); 631 seq_printf(m, "[%s]:\n", devinfo_table->name);
617 632
618 seq_printf(m, "'%.8s' '%.16s' 0x%x\n", 633 seq_printf(m, "'%.8s' '%.16s' 0x%llx\n",
619 devinfo->vendor, devinfo->model, devinfo->flags); 634 devinfo->vendor, devinfo->model, devinfo->flags);
620 return 0; 635 return 0;
621} 636}
@@ -734,9 +749,9 @@ MODULE_PARM_DESC(dev_flags,
734 " list entries for vendor and model with an integer value of flags" 749 " list entries for vendor and model with an integer value of flags"
735 " to the scsi device info list"); 750 " to the scsi device info list");
736 751
737module_param_named(default_dev_flags, scsi_default_dev_flags, int, S_IRUGO|S_IWUSR); 752module_param_named(default_dev_flags, scsi_default_dev_flags, ullong, 0644);
738MODULE_PARM_DESC(default_dev_flags, 753MODULE_PARM_DESC(default_dev_flags,
739 "scsi default device flag integer value"); 754 "scsi default device flag uint64_t value");
740 755
741/** 756/**
742 * scsi_exit_devinfo - remove /proc/scsi/device_info & the scsi_dev_info_list 757 * scsi_exit_devinfo - remove /proc/scsi/device_info & the scsi_dev_info_list
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 188f30572aa1..5a58cbf3a75d 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -58,7 +58,10 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
58 {"IBM", "3526", "rdac", }, 58 {"IBM", "3526", "rdac", },
59 {"IBM", "3542", "rdac", }, 59 {"IBM", "3542", "rdac", },
60 {"IBM", "3552", "rdac", }, 60 {"IBM", "3552", "rdac", },
61 {"SGI", "TP9", "rdac", }, 61 {"SGI", "TP9300", "rdac", },
62 {"SGI", "TP9400", "rdac", },
63 {"SGI", "TP9500", "rdac", },
64 {"SGI", "TP9700", "rdac", },
62 {"SGI", "IS", "rdac", }, 65 {"SGI", "IS", "rdac", },
63 {"STK", "OPENstorage", "rdac", }, 66 {"STK", "OPENstorage", "rdac", },
64 {"STK", "FLEXLINE 380", "rdac", }, 67 {"STK", "FLEXLINE 380", "rdac", },
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 9c02ba2e7ef3..8932ae81a15a 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -38,6 +38,7 @@
38#include <scsi/scsi_host.h> 38#include <scsi/scsi_host.h>
39#include <scsi/scsi_ioctl.h> 39#include <scsi/scsi_ioctl.h>
40#include <scsi/scsi_dh.h> 40#include <scsi/scsi_dh.h>
41#include <scsi/scsi_devinfo.h>
41#include <scsi/sg.h> 42#include <scsi/sg.h>
42 43
43#include "scsi_priv.h" 44#include "scsi_priv.h"
@@ -525,6 +526,12 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
525 if (sshdr.asc == 0x10) /* DIF */ 526 if (sshdr.asc == 0x10) /* DIF */
526 return SUCCESS; 527 return SUCCESS;
527 528
529 if (sshdr.asc == 0x44 && sdev->sdev_bflags & BLIST_RETRY_ITF)
530 return ADD_TO_MLQUEUE;
531 if (sshdr.asc == 0xc1 && sshdr.ascq == 0x01 &&
532 sdev->sdev_bflags & BLIST_RETRY_ASC_C1)
533 return ADD_TO_MLQUEUE;
534
528 return NEEDS_RETRY; 535 return NEEDS_RETRY;
529 case NOT_READY: 536 case NOT_READY:
530 case UNIT_ATTENTION: 537 case UNIT_ATTENTION:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index fb38aeff9dbd..41e9ac9fc138 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -985,6 +985,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
985 case 0x08: /* Long write in progress */ 985 case 0x08: /* Long write in progress */
986 case 0x09: /* self test in progress */ 986 case 0x09: /* self test in progress */
987 case 0x14: /* space allocation in progress */ 987 case 0x14: /* space allocation in progress */
988 case 0x1a: /* start stop unit in progress */
989 case 0x1b: /* sanitize in progress */
990 case 0x1d: /* configuration in progress */
991 case 0x24: /* depopulation in progress */
988 action = ACTION_DELAYED_RETRY; 992 action = ACTION_DELAYED_RETRY;
989 break; 993 break;
990 default: 994 default:
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 1e36c9a9ad17..7943b762c12d 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -968,7 +968,7 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
968static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL); 968static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
969 969
970#define BLIST_FLAG_NAME(name) \ 970#define BLIST_FLAG_NAME(name) \
971 [ilog2((__force unsigned int)BLIST_##name)] = #name 971 [const_ilog2((__force __u64)BLIST_##name)] = #name
972static const char *const sdev_bflags_name[] = { 972static const char *const sdev_bflags_name[] = {
973#include "scsi_devinfo_tbl.c" 973#include "scsi_devinfo_tbl.c"
974}; 974};
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index e2953b416746..0cd16e80b019 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -213,10 +213,6 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
213 to_sas_host_attrs(shost)->q = q; 213 to_sas_host_attrs(shost)->q = q;
214 } 214 }
215 215
216 /*
217 * by default assume old behaviour and bounce for any highmem page
218 */
219 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
220 blk_queue_flag_set(QUEUE_FLAG_BIDI, q); 216 blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
221 return 0; 217 return 0;
222} 218}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 0d663b5e45bb..392c7d078ae3 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -74,12 +74,12 @@ struct scsi_disk {
74 struct gendisk *disk; 74 struct gendisk *disk;
75 struct opal_dev *opal_dev; 75 struct opal_dev *opal_dev;
76#ifdef CONFIG_BLK_DEV_ZONED 76#ifdef CONFIG_BLK_DEV_ZONED
77 unsigned int nr_zones; 77 u32 nr_zones;
78 unsigned int zone_blocks; 78 u32 zone_blocks;
79 unsigned int zone_shift; 79 u32 zone_shift;
80 unsigned int zones_optimal_open; 80 u32 zones_optimal_open;
81 unsigned int zones_optimal_nonseq; 81 u32 zones_optimal_nonseq;
82 unsigned int zones_max_open; 82 u32 zones_max_open;
83#endif 83#endif
84 atomic_t openers; 84 atomic_t openers;
85 sector_t capacity; /* size in logical blocks */ 85 sector_t capacity; /* size in logical blocks */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 210407cd2341..323e3dc4bc59 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -299,16 +299,6 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
299 case REQ_OP_WRITE: 299 case REQ_OP_WRITE:
300 case REQ_OP_WRITE_ZEROES: 300 case REQ_OP_WRITE_ZEROES:
301 case REQ_OP_WRITE_SAME: 301 case REQ_OP_WRITE_SAME:
302
303 if (result &&
304 sshdr->sense_key == ILLEGAL_REQUEST &&
305 sshdr->asc == 0x21)
306 /*
307 * INVALID ADDRESS FOR WRITE error: It is unlikely that
308 * retrying write requests failed with any kind of
309 * alignement error will result in success. So don't.
310 */
311 cmd->allowed = 0;
312 break; 302 break;
313 303
314 case REQ_OP_ZONE_REPORT: 304 case REQ_OP_ZONE_REPORT:
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 6fc58e2c99d3..573763908562 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1191,7 +1191,7 @@ sg_fasync(int fd, struct file *filp, int mode)
1191 return fasync_helper(fd, filp, mode, &sfp->async_qp); 1191 return fasync_helper(fd, filp, mode, &sfp->async_qp);
1192} 1192}
1193 1193
1194static int 1194static vm_fault_t
1195sg_vma_fault(struct vm_fault *vmf) 1195sg_vma_fault(struct vm_fault *vmf)
1196{ 1196{
1197 struct vm_area_struct *vma = vmf->vma; 1197 struct vm_area_struct *vma = vmf->vma;
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index d8a376b7882d..d9b2e46424aa 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -47,10 +47,10 @@ static const char * const snic_req_state_str[] = {
47 [SNIC_IOREQ_NOT_INITED] = "SNIC_IOREQ_NOT_INITED", 47 [SNIC_IOREQ_NOT_INITED] = "SNIC_IOREQ_NOT_INITED",
48 [SNIC_IOREQ_PENDING] = "SNIC_IOREQ_PENDING", 48 [SNIC_IOREQ_PENDING] = "SNIC_IOREQ_PENDING",
49 [SNIC_IOREQ_ABTS_PENDING] = "SNIC_IOREQ_ABTS_PENDING", 49 [SNIC_IOREQ_ABTS_PENDING] = "SNIC_IOREQ_ABTS_PENDING",
50 [SNIC_IOREQ_ABTS_COMPLETE] = "SNIC_IOREQ_ABTS_COMPELTE", 50 [SNIC_IOREQ_ABTS_COMPLETE] = "SNIC_IOREQ_ABTS_COMPLETE",
51 [SNIC_IOREQ_LR_PENDING] = "SNIC_IOREQ_LR_PENDING", 51 [SNIC_IOREQ_LR_PENDING] = "SNIC_IOREQ_LR_PENDING",
52 [SNIC_IOREQ_LR_COMPLETE] = "SNIC_IOREQ_LR_COMPELTE", 52 [SNIC_IOREQ_LR_COMPLETE] = "SNIC_IOREQ_LR_COMPLETE",
53 [SNIC_IOREQ_COMPLETE] = "SNIC_IOREQ_CMD_COMPELTE", 53 [SNIC_IOREQ_COMPLETE] = "SNIC_IOREQ_CMD_COMPLETE",
54}; 54};
55 55
56/* snic cmd status strings */ 56/* snic cmd status strings */
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a427ce9497be..c9e27e752c25 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3878,7 +3878,7 @@ static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
3878{ 3878{
3879 struct st_buffer *tb; 3879 struct st_buffer *tb;
3880 3880
3881 tb = kzalloc(sizeof(struct st_buffer), GFP_ATOMIC); 3881 tb = kzalloc(sizeof(struct st_buffer), GFP_KERNEL);
3882 if (!tb) { 3882 if (!tb) {
3883 printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n"); 3883 printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n");
3884 return NULL; 3884 return NULL;
@@ -3889,7 +3889,7 @@ static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
3889 tb->buffer_size = 0; 3889 tb->buffer_size = 0;
3890 3890
3891 tb->reserved_pages = kzalloc(max_sg * sizeof(struct page *), 3891 tb->reserved_pages = kzalloc(max_sg * sizeof(struct page *),
3892 GFP_ATOMIC); 3892 GFP_KERNEL);
3893 if (!tb->reserved_pages) { 3893 if (!tb->reserved_pages) {
3894 kfree(tb); 3894 kfree(tb);
3895 return NULL; 3895 return NULL;
@@ -4290,7 +4290,7 @@ static int st_probe(struct device *dev)
4290 goto out_buffer_free; 4290 goto out_buffer_free;
4291 } 4291 }
4292 4292
4293 tpnt = kzalloc(sizeof(struct scsi_tape), GFP_ATOMIC); 4293 tpnt = kzalloc(sizeof(struct scsi_tape), GFP_KERNEL);
4294 if (tpnt == NULL) { 4294 if (tpnt == NULL) {
4295 sdev_printk(KERN_ERR, SDp, 4295 sdev_printk(KERN_ERR, SDp,
4296 "st: Can't allocate device descriptor.\n"); 4296 "st: Can't allocate device descriptor.\n");
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index a2ec0bc9e9fa..33a4a4dad324 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -395,6 +395,12 @@ MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
395 395
396module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO); 396module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO);
397MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels"); 397MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels");
398
399static int ring_avail_percent_lowater = 10;
400module_param(ring_avail_percent_lowater, int, S_IRUGO);
401MODULE_PARM_DESC(ring_avail_percent_lowater,
402 "Select a channel if available ring size > this in percent");
403
398/* 404/*
399 * Timeout in seconds for all devices managed by this driver. 405 * Timeout in seconds for all devices managed by this driver.
400 */ 406 */
@@ -1241,7 +1247,7 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
1241{ 1247{
1242 u16 slot = 0; 1248 u16 slot = 0;
1243 u16 hash_qnum; 1249 u16 hash_qnum;
1244 struct cpumask alloced_mask; 1250 const struct cpumask *node_mask;
1245 int num_channels, tgt_cpu; 1251 int num_channels, tgt_cpu;
1246 1252
1247 if (stor_device->num_sc == 0) 1253 if (stor_device->num_sc == 0)
@@ -1257,10 +1263,13 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
1257 * III. Mapping is persistent. 1263 * III. Mapping is persistent.
1258 */ 1264 */
1259 1265
1260 cpumask_and(&alloced_mask, &stor_device->alloced_cpus, 1266 node_mask = cpumask_of_node(cpu_to_node(q_num));
1261 cpumask_of_node(cpu_to_node(q_num)));
1262 1267
1263 num_channels = cpumask_weight(&alloced_mask); 1268 num_channels = 0;
1269 for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
1270 if (cpumask_test_cpu(tgt_cpu, node_mask))
1271 num_channels++;
1272 }
1264 if (num_channels == 0) 1273 if (num_channels == 0)
1265 return stor_device->device->channel; 1274 return stor_device->device->channel;
1266 1275
@@ -1268,7 +1277,9 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
1268 while (hash_qnum >= num_channels) 1277 while (hash_qnum >= num_channels)
1269 hash_qnum -= num_channels; 1278 hash_qnum -= num_channels;
1270 1279
1271 for_each_cpu(tgt_cpu, &alloced_mask) { 1280 for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
1281 if (!cpumask_test_cpu(tgt_cpu, node_mask))
1282 continue;
1272 if (slot == hash_qnum) 1283 if (slot == hash_qnum)
1273 break; 1284 break;
1274 slot++; 1285 slot++;
@@ -1285,9 +1296,9 @@ static int storvsc_do_io(struct hv_device *device,
1285{ 1296{
1286 struct storvsc_device *stor_device; 1297 struct storvsc_device *stor_device;
1287 struct vstor_packet *vstor_packet; 1298 struct vstor_packet *vstor_packet;
1288 struct vmbus_channel *outgoing_channel; 1299 struct vmbus_channel *outgoing_channel, *channel;
1289 int ret = 0; 1300 int ret = 0;
1290 struct cpumask alloced_mask; 1301 const struct cpumask *node_mask;
1291 int tgt_cpu; 1302 int tgt_cpu;
1292 1303
1293 vstor_packet = &request->vstor_packet; 1304 vstor_packet = &request->vstor_packet;
@@ -1301,22 +1312,52 @@ static int storvsc_do_io(struct hv_device *device,
1301 /* 1312 /*
1302 * Select an an appropriate channel to send the request out. 1313 * Select an an appropriate channel to send the request out.
1303 */ 1314 */
1304
1305 if (stor_device->stor_chns[q_num] != NULL) { 1315 if (stor_device->stor_chns[q_num] != NULL) {
1306 outgoing_channel = stor_device->stor_chns[q_num]; 1316 outgoing_channel = stor_device->stor_chns[q_num];
1307 if (outgoing_channel->target_cpu == smp_processor_id()) { 1317 if (outgoing_channel->target_cpu == q_num) {
1308 /* 1318 /*
1309 * Ideally, we want to pick a different channel if 1319 * Ideally, we want to pick a different channel if
1310 * available on the same NUMA node. 1320 * available on the same NUMA node.
1311 */ 1321 */
1312 cpumask_and(&alloced_mask, &stor_device->alloced_cpus, 1322 node_mask = cpumask_of_node(cpu_to_node(q_num));
1313 cpumask_of_node(cpu_to_node(q_num))); 1323 for_each_cpu_wrap(tgt_cpu,
1314 for_each_cpu_wrap(tgt_cpu, &alloced_mask, 1324 &stor_device->alloced_cpus, q_num + 1) {
1315 outgoing_channel->target_cpu + 1) { 1325 if (!cpumask_test_cpu(tgt_cpu, node_mask))
1316 if (tgt_cpu != outgoing_channel->target_cpu) { 1326 continue;
1317 outgoing_channel = 1327 if (tgt_cpu == q_num)
1318 stor_device->stor_chns[tgt_cpu]; 1328 continue;
1319 break; 1329 channel = stor_device->stor_chns[tgt_cpu];
1330 if (hv_get_avail_to_write_percent(
1331 &channel->outbound)
1332 > ring_avail_percent_lowater) {
1333 outgoing_channel = channel;
1334 goto found_channel;
1335 }
1336 }
1337
1338 /*
1339 * All the other channels on the same NUMA node are
1340 * busy. Try to use the channel on the current CPU
1341 */
1342 if (hv_get_avail_to_write_percent(
1343 &outgoing_channel->outbound)
1344 > ring_avail_percent_lowater)
1345 goto found_channel;
1346
1347 /*
1348 * If we reach here, all the channels on the current
1349 * NUMA node are busy. Try to find a channel in
1350 * other NUMA nodes
1351 */
1352 for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
1353 if (cpumask_test_cpu(tgt_cpu, node_mask))
1354 continue;
1355 channel = stor_device->stor_chns[tgt_cpu];
1356 if (hv_get_avail_to_write_percent(
1357 &channel->outbound)
1358 > ring_avail_percent_lowater) {
1359 outgoing_channel = channel;
1360 goto found_channel;
1320 } 1361 }
1321 } 1362 }
1322 } 1363 }
@@ -1324,7 +1365,7 @@ static int storvsc_do_io(struct hv_device *device,
1324 outgoing_channel = get_og_chn(stor_device, q_num); 1365 outgoing_channel = get_og_chn(stor_device, q_num);
1325 } 1366 }
1326 1367
1327 1368found_channel:
1328 vstor_packet->flags |= REQUEST_COMPLETION_FLAG; 1369 vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
1329 1370
1330 vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) - 1371 vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) -
@@ -1382,9 +1423,6 @@ static int storvsc_device_alloc(struct scsi_device *sdevice)
1382 1423
1383static int storvsc_device_configure(struct scsi_device *sdevice) 1424static int storvsc_device_configure(struct scsi_device *sdevice)
1384{ 1425{
1385
1386 blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
1387
1388 blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ)); 1426 blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
1389 1427
1390 /* Ensure there are no gaps in presented sgls */ 1428 /* Ensure there are no gaps in presented sgls */
@@ -1732,8 +1770,9 @@ static int storvsc_probe(struct hv_device *device,
1732 (num_cpus - 1) / storvsc_vcpus_per_sub_channel; 1770 (num_cpus - 1) / storvsc_vcpus_per_sub_channel;
1733 } 1771 }
1734 1772
1735 scsi_driver.can_queue = (max_outstanding_req_per_channel * 1773 scsi_driver.can_queue = max_outstanding_req_per_channel *
1736 (max_sub_channels + 1)); 1774 (max_sub_channels + 1) *
1775 (100 - ring_avail_percent_lowater) / 100;
1737 1776
1738 host = scsi_host_alloc(&scsi_driver, 1777 host = scsi_host_alloc(&scsi_driver,
1739 sizeof(struct hv_host_device)); 1778 sizeof(struct hv_host_device));
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 2b38db2eeafa..221820a7c78b 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1098,7 +1098,7 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
1098 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; 1098 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
1099 } 1099 }
1100 1100
1101 if (host->hw_ver.major >= 0x2) { 1101 if (host->hw_ver.major == 0x2) {
1102 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; 1102 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
1103 1103
1104 if (!ufs_qcom_cap_qunipro(host)) 1104 if (!ufs_qcom_cap_qunipro(host))
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index d0a1674915a1..3a811c5f70ba 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -233,8 +233,6 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
233static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba); 233static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
234static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); 234static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
235static irqreturn_t ufshcd_intr(int irq, void *__hba); 235static irqreturn_t ufshcd_intr(int irq, void *__hba);
236static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
237 struct ufs_pa_layer_attr *desired_pwr_mode);
238static int ufshcd_change_power_mode(struct ufs_hba *hba, 236static int ufshcd_change_power_mode(struct ufs_hba *hba,
239 struct ufs_pa_layer_attr *pwr_mode); 237 struct ufs_pa_layer_attr *pwr_mode);
240static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag) 238static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
@@ -266,6 +264,18 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
266 } 264 }
267} 265}
268 266
267static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
268{
269 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
270 scsi_unblock_requests(hba->host);
271}
272
273static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
274{
275 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
276 scsi_block_requests(hba->host);
277}
278
269/* replace non-printable or non-ASCII characters with spaces */ 279/* replace non-printable or non-ASCII characters with spaces */
270static inline void ufshcd_remove_non_printable(char *val) 280static inline void ufshcd_remove_non_printable(char *val)
271{ 281{
@@ -675,7 +685,24 @@ static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
675 */ 685 */
676static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) 686static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
677{ 687{
678 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); 688 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
689 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
690 else
691 ufshcd_writel(hba, ~(1 << pos),
692 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
693}
694
695/**
696 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
697 * @hba: per adapter instance
698 * @pos: position of the bit to be cleared
699 */
700static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
701{
702 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
703 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
704 else
705 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
679} 706}
680 707
681/** 708/**
@@ -1091,12 +1118,12 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1091 * make sure that there are no outstanding requests when 1118 * make sure that there are no outstanding requests when
1092 * clock scaling is in progress 1119 * clock scaling is in progress
1093 */ 1120 */
1094 scsi_block_requests(hba->host); 1121 ufshcd_scsi_block_requests(hba);
1095 down_write(&hba->clk_scaling_lock); 1122 down_write(&hba->clk_scaling_lock);
1096 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) { 1123 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1097 ret = -EBUSY; 1124 ret = -EBUSY;
1098 up_write(&hba->clk_scaling_lock); 1125 up_write(&hba->clk_scaling_lock);
1099 scsi_unblock_requests(hba->host); 1126 ufshcd_scsi_unblock_requests(hba);
1100 } 1127 }
1101 1128
1102 return ret; 1129 return ret;
@@ -1105,7 +1132,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1105static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba) 1132static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1106{ 1133{
1107 up_write(&hba->clk_scaling_lock); 1134 up_write(&hba->clk_scaling_lock);
1108 scsi_unblock_requests(hba->host); 1135 ufshcd_scsi_unblock_requests(hba);
1109} 1136}
1110 1137
1111/** 1138/**
@@ -1200,16 +1227,13 @@ static int ufshcd_devfreq_target(struct device *dev,
1200 struct ufs_hba *hba = dev_get_drvdata(dev); 1227 struct ufs_hba *hba = dev_get_drvdata(dev);
1201 ktime_t start; 1228 ktime_t start;
1202 bool scale_up, sched_clk_scaling_suspend_work = false; 1229 bool scale_up, sched_clk_scaling_suspend_work = false;
1230 struct list_head *clk_list = &hba->clk_list_head;
1231 struct ufs_clk_info *clki;
1203 unsigned long irq_flags; 1232 unsigned long irq_flags;
1204 1233
1205 if (!ufshcd_is_clkscaling_supported(hba)) 1234 if (!ufshcd_is_clkscaling_supported(hba))
1206 return -EINVAL; 1235 return -EINVAL;
1207 1236
1208 if ((*freq > 0) && (*freq < UINT_MAX)) {
1209 dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
1210 return -EINVAL;
1211 }
1212
1213 spin_lock_irqsave(hba->host->host_lock, irq_flags); 1237 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1214 if (ufshcd_eh_in_progress(hba)) { 1238 if (ufshcd_eh_in_progress(hba)) {
1215 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1239 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
@@ -1219,7 +1243,13 @@ static int ufshcd_devfreq_target(struct device *dev,
1219 if (!hba->clk_scaling.active_reqs) 1243 if (!hba->clk_scaling.active_reqs)
1220 sched_clk_scaling_suspend_work = true; 1244 sched_clk_scaling_suspend_work = true;
1221 1245
1222 scale_up = (*freq == UINT_MAX) ? true : false; 1246 if (list_empty(clk_list)) {
1247 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1248 goto out;
1249 }
1250
1251 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1252 scale_up = (*freq == clki->max_freq) ? true : false;
1223 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) { 1253 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1224 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1254 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1225 ret = 0; 1255 ret = 0;
@@ -1287,6 +1317,55 @@ static struct devfreq_dev_profile ufs_devfreq_profile = {
1287 .get_dev_status = ufshcd_devfreq_get_dev_status, 1317 .get_dev_status = ufshcd_devfreq_get_dev_status,
1288}; 1318};
1289 1319
1320static int ufshcd_devfreq_init(struct ufs_hba *hba)
1321{
1322 struct list_head *clk_list = &hba->clk_list_head;
1323 struct ufs_clk_info *clki;
1324 struct devfreq *devfreq;
1325 int ret;
1326
1327 /* Skip devfreq if we don't have any clocks in the list */
1328 if (list_empty(clk_list))
1329 return 0;
1330
1331 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1332 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1333 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1334
1335 devfreq = devfreq_add_device(hba->dev,
1336 &ufs_devfreq_profile,
1337 DEVFREQ_GOV_SIMPLE_ONDEMAND,
1338 NULL);
1339 if (IS_ERR(devfreq)) {
1340 ret = PTR_ERR(devfreq);
1341 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1342
1343 dev_pm_opp_remove(hba->dev, clki->min_freq);
1344 dev_pm_opp_remove(hba->dev, clki->max_freq);
1345 return ret;
1346 }
1347
1348 hba->devfreq = devfreq;
1349
1350 return 0;
1351}
1352
1353static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1354{
1355 struct list_head *clk_list = &hba->clk_list_head;
1356 struct ufs_clk_info *clki;
1357
1358 if (!hba->devfreq)
1359 return;
1360
1361 devfreq_remove_device(hba->devfreq);
1362 hba->devfreq = NULL;
1363
1364 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1365 dev_pm_opp_remove(hba->dev, clki->min_freq);
1366 dev_pm_opp_remove(hba->dev, clki->max_freq);
1367}
1368
1290static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba) 1369static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1291{ 1370{
1292 unsigned long flags; 1371 unsigned long flags;
@@ -1425,7 +1504,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
1425 hba->clk_gating.is_suspended = false; 1504 hba->clk_gating.is_suspended = false;
1426 } 1505 }
1427unblock_reqs: 1506unblock_reqs:
1428 scsi_unblock_requests(hba->host); 1507 ufshcd_scsi_unblock_requests(hba);
1429} 1508}
1430 1509
1431/** 1510/**
@@ -1481,11 +1560,12 @@ start:
1481 * work and to enable clocks. 1560 * work and to enable clocks.
1482 */ 1561 */
1483 case CLKS_OFF: 1562 case CLKS_OFF:
1484 scsi_block_requests(hba->host); 1563 ufshcd_scsi_block_requests(hba);
1485 hba->clk_gating.state = REQ_CLKS_ON; 1564 hba->clk_gating.state = REQ_CLKS_ON;
1486 trace_ufshcd_clk_gating(dev_name(hba->dev), 1565 trace_ufshcd_clk_gating(dev_name(hba->dev),
1487 hba->clk_gating.state); 1566 hba->clk_gating.state);
1488 schedule_work(&hba->clk_gating.ungate_work); 1567 queue_work(hba->clk_gating.clk_gating_workq,
1568 &hba->clk_gating.ungate_work);
1489 /* 1569 /*
1490 * fall through to check if we should wait for this 1570 * fall through to check if we should wait for this
1491 * work to be done or not. 1571 * work to be done or not.
@@ -1671,6 +1751,8 @@ out:
1671 1751
1672static void ufshcd_init_clk_gating(struct ufs_hba *hba) 1752static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1673{ 1753{
1754 char wq_name[sizeof("ufs_clk_gating_00")];
1755
1674 if (!ufshcd_is_clkgating_allowed(hba)) 1756 if (!ufshcd_is_clkgating_allowed(hba))
1675 return; 1757 return;
1676 1758
@@ -1678,6 +1760,11 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1678 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); 1760 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1679 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); 1761 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1680 1762
1763 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1764 hba->host->host_no);
1765 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1766 WQ_MEM_RECLAIM);
1767
1681 hba->clk_gating.is_enabled = true; 1768 hba->clk_gating.is_enabled = true;
1682 1769
1683 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; 1770 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
@@ -1705,6 +1792,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1705 device_remove_file(hba->dev, &hba->clk_gating.enable_attr); 1792 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1706 cancel_work_sync(&hba->clk_gating.ungate_work); 1793 cancel_work_sync(&hba->clk_gating.ungate_work);
1707 cancel_delayed_work_sync(&hba->clk_gating.gate_work); 1794 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1795 destroy_workqueue(hba->clk_gating.clk_gating_workq);
1708} 1796}
1709 1797
1710/* Must be called with host lock acquired */ 1798/* Must be called with host lock acquired */
@@ -3383,6 +3471,52 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3383 "dme-link-startup: error code %d\n", ret); 3471 "dme-link-startup: error code %d\n", ret);
3384 return ret; 3472 return ret;
3385} 3473}
3474/**
3475 * ufshcd_dme_reset - UIC command for DME_RESET
3476 * @hba: per adapter instance
3477 *
3478 * DME_RESET command is issued in order to reset UniPro stack.
3479 * This function now deal with cold reset.
3480 *
3481 * Returns 0 on success, non-zero value on failure
3482 */
3483static int ufshcd_dme_reset(struct ufs_hba *hba)
3484{
3485 struct uic_command uic_cmd = {0};
3486 int ret;
3487
3488 uic_cmd.command = UIC_CMD_DME_RESET;
3489
3490 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3491 if (ret)
3492 dev_err(hba->dev,
3493 "dme-reset: error code %d\n", ret);
3494
3495 return ret;
3496}
3497
3498/**
3499 * ufshcd_dme_enable - UIC command for DME_ENABLE
3500 * @hba: per adapter instance
3501 *
3502 * DME_ENABLE command is issued in order to enable UniPro stack.
3503 *
3504 * Returns 0 on success, non-zero value on failure
3505 */
3506static int ufshcd_dme_enable(struct ufs_hba *hba)
3507{
3508 struct uic_command uic_cmd = {0};
3509 int ret;
3510
3511 uic_cmd.command = UIC_CMD_DME_ENABLE;
3512
3513 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3514 if (ret)
3515 dev_err(hba->dev,
3516 "dme-reset: error code %d\n", ret);
3517
3518 return ret;
3519}
3386 3520
3387static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) 3521static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3388{ 3522{
@@ -3906,7 +4040,7 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
3906 * @hba: per-adapter instance 4040 * @hba: per-adapter instance
3907 * @desired_pwr_mode: desired power configuration 4041 * @desired_pwr_mode: desired power configuration
3908 */ 4042 */
3909static int ufshcd_config_pwr_mode(struct ufs_hba *hba, 4043int ufshcd_config_pwr_mode(struct ufs_hba *hba,
3910 struct ufs_pa_layer_attr *desired_pwr_mode) 4044 struct ufs_pa_layer_attr *desired_pwr_mode)
3911{ 4045{
3912 struct ufs_pa_layer_attr final_params = { 0 }; 4046 struct ufs_pa_layer_attr final_params = { 0 };
@@ -3924,6 +4058,7 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
3924 4058
3925 return ret; 4059 return ret;
3926} 4060}
4061EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
3927 4062
3928/** 4063/**
3929 * ufshcd_complete_dev_init() - checks device readiness 4064 * ufshcd_complete_dev_init() - checks device readiness
@@ -4041,7 +4176,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4041} 4176}
4042 4177
4043/** 4178/**
4044 * ufshcd_hba_enable - initialize the controller 4179 * ufshcd_hba_execute_hce - initialize the controller
4045 * @hba: per adapter instance 4180 * @hba: per adapter instance
4046 * 4181 *
4047 * The controller resets itself and controller firmware initialization 4182 * The controller resets itself and controller firmware initialization
@@ -4050,7 +4185,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4050 * 4185 *
4051 * Returns 0 on success, non-zero value on failure 4186 * Returns 0 on success, non-zero value on failure
4052 */ 4187 */
4053static int ufshcd_hba_enable(struct ufs_hba *hba) 4188static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4054{ 4189{
4055 int retry; 4190 int retry;
4056 4191
@@ -4105,6 +4240,31 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
4105 return 0; 4240 return 0;
4106} 4241}
4107 4242
4243static int ufshcd_hba_enable(struct ufs_hba *hba)
4244{
4245 int ret;
4246
4247 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4248 ufshcd_set_link_off(hba);
4249 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4250
4251 /* enable UIC related interrupts */
4252 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4253 ret = ufshcd_dme_reset(hba);
4254 if (!ret) {
4255 ret = ufshcd_dme_enable(hba);
4256 if (!ret)
4257 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4258 if (ret)
4259 dev_err(hba->dev,
4260 "Host controller enable failed with non-hce\n");
4261 }
4262 } else {
4263 ret = ufshcd_hba_execute_hce(hba);
4264 }
4265
4266 return ret;
4267}
4108static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) 4268static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4109{ 4269{
4110 int tx_lanes, i, err = 0; 4270 int tx_lanes, i, err = 0;
@@ -4678,7 +4838,8 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
4678 * false interrupt if device completes another request after resetting 4838 * false interrupt if device completes another request after resetting
4679 * aggregation and before reading the DB. 4839 * aggregation and before reading the DB.
4680 */ 4840 */
4681 if (ufshcd_is_intr_aggr_allowed(hba)) 4841 if (ufshcd_is_intr_aggr_allowed(hba) &&
4842 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
4682 ufshcd_reset_intr_aggr(hba); 4843 ufshcd_reset_intr_aggr(hba);
4683 4844
4684 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); 4845 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -4969,6 +5130,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
4969 hba = container_of(work, struct ufs_hba, eeh_work); 5130 hba = container_of(work, struct ufs_hba, eeh_work);
4970 5131
4971 pm_runtime_get_sync(hba->dev); 5132 pm_runtime_get_sync(hba->dev);
5133 scsi_block_requests(hba->host);
4972 err = ufshcd_get_ee_status(hba, &status); 5134 err = ufshcd_get_ee_status(hba, &status);
4973 if (err) { 5135 if (err) {
4974 dev_err(hba->dev, "%s: failed to get exception status %d\n", 5136 dev_err(hba->dev, "%s: failed to get exception status %d\n",
@@ -4982,6 +5144,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
4982 ufshcd_bkops_exception_event_handler(hba); 5144 ufshcd_bkops_exception_event_handler(hba);
4983 5145
4984out: 5146out:
5147 scsi_unblock_requests(hba->host);
4985 pm_runtime_put_sync(hba->dev); 5148 pm_runtime_put_sync(hba->dev);
4986 return; 5149 return;
4987} 5150}
@@ -5192,7 +5355,7 @@ skip_err_handling:
5192 5355
5193out: 5356out:
5194 spin_unlock_irqrestore(hba->host->host_lock, flags); 5357 spin_unlock_irqrestore(hba->host->host_lock, flags);
5195 scsi_unblock_requests(hba->host); 5358 ufshcd_scsi_unblock_requests(hba);
5196 ufshcd_release(hba); 5359 ufshcd_release(hba);
5197 pm_runtime_put_sync(hba->dev); 5360 pm_runtime_put_sync(hba->dev);
5198} 5361}
@@ -5294,7 +5457,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
5294 /* handle fatal errors only when link is functional */ 5457 /* handle fatal errors only when link is functional */
5295 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { 5458 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5296 /* block commands from scsi mid-layer */ 5459 /* block commands from scsi mid-layer */
5297 scsi_block_requests(hba->host); 5460 ufshcd_scsi_block_requests(hba);
5298 5461
5299 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED; 5462 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5300 5463
@@ -5371,19 +5534,30 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
5371 u32 intr_status, enabled_intr_status; 5534 u32 intr_status, enabled_intr_status;
5372 irqreturn_t retval = IRQ_NONE; 5535 irqreturn_t retval = IRQ_NONE;
5373 struct ufs_hba *hba = __hba; 5536 struct ufs_hba *hba = __hba;
5537 int retries = hba->nutrs;
5374 5538
5375 spin_lock(hba->host->host_lock); 5539 spin_lock(hba->host->host_lock);
5376 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 5540 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5377 enabled_intr_status =
5378 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5379 5541
5380 if (intr_status) 5542 /*
5381 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); 5543 * There could be max of hba->nutrs reqs in flight and in worst case
5544 * if the reqs get finished 1 by 1 after the interrupt status is
5545 * read, make sure we handle them by checking the interrupt status
5546 * again in a loop until we process all of the reqs before returning.
5547 */
5548 do {
5549 enabled_intr_status =
5550 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5551 if (intr_status)
5552 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
5553 if (enabled_intr_status) {
5554 ufshcd_sl_intr(hba, enabled_intr_status);
5555 retval = IRQ_HANDLED;
5556 }
5557
5558 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5559 } while (intr_status && --retries);
5382 5560
5383 if (enabled_intr_status) {
5384 ufshcd_sl_intr(hba, enabled_intr_status);
5385 retval = IRQ_HANDLED;
5386 }
5387 spin_unlock(hba->host->host_lock); 5561 spin_unlock(hba->host->host_lock);
5388 return retval; 5562 return retval;
5389} 5563}
@@ -5398,7 +5572,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5398 goto out; 5572 goto out;
5399 5573
5400 spin_lock_irqsave(hba->host->host_lock, flags); 5574 spin_lock_irqsave(hba->host->host_lock, flags);
5401 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR); 5575 ufshcd_utmrl_clear(hba, tag);
5402 spin_unlock_irqrestore(hba->host->host_lock, flags); 5576 spin_unlock_irqrestore(hba->host->host_lock, flags);
5403 5577
5404 /* poll for max. 1 sec to clear door bell register by h/w */ 5578 /* poll for max. 1 sec to clear door bell register by h/w */
@@ -5958,14 +6132,18 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
5958{ 6132{
5959 int ret; 6133 int ret;
5960 int buff_len = hba->desc_size.pwr_desc; 6134 int buff_len = hba->desc_size.pwr_desc;
5961 u8 desc_buf[hba->desc_size.pwr_desc]; 6135 u8 *desc_buf;
6136
6137 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6138 if (!desc_buf)
6139 return;
5962 6140
5963 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); 6141 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
5964 if (ret) { 6142 if (ret) {
5965 dev_err(hba->dev, 6143 dev_err(hba->dev,
5966 "%s: Failed reading power descriptor.len = %d ret = %d", 6144 "%s: Failed reading power descriptor.len = %d ret = %d",
5967 __func__, buff_len, ret); 6145 __func__, buff_len, ret);
5968 return; 6146 goto out;
5969 } 6147 }
5970 6148
5971 hba->init_prefetch_data.icc_level = 6149 hba->init_prefetch_data.icc_level =
@@ -5983,6 +6161,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
5983 "%s: Failed configuring bActiveICCLevel = %d ret = %d", 6161 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
5984 __func__, hba->init_prefetch_data.icc_level , ret); 6162 __func__, hba->init_prefetch_data.icc_level , ret);
5985 6163
6164out:
6165 kfree(desc_buf);
5986} 6166}
5987 6167
5988/** 6168/**
@@ -6052,9 +6232,17 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
6052 struct ufs_dev_desc *dev_desc) 6232 struct ufs_dev_desc *dev_desc)
6053{ 6233{
6054 int err; 6234 int err;
6235 size_t buff_len;
6055 u8 model_index; 6236 u8 model_index;
6056 u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0}; 6237 u8 *desc_buf;
6057 u8 desc_buf[hba->desc_size.dev_desc]; 6238
6239 buff_len = max_t(size_t, hba->desc_size.dev_desc,
6240 QUERY_DESC_MAX_SIZE + 1);
6241 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6242 if (!desc_buf) {
6243 err = -ENOMEM;
6244 goto out;
6245 }
6058 6246
6059 err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); 6247 err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
6060 if (err) { 6248 if (err) {
@@ -6072,7 +6260,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
6072 6260
6073 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; 6261 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6074 6262
6075 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf, 6263 /* Zero-pad entire buffer for string termination. */
6264 memset(desc_buf, 0, buff_len);
6265
6266 err = ufshcd_read_string_desc(hba, model_index, desc_buf,
6076 QUERY_DESC_MAX_SIZE, true/*ASCII*/); 6267 QUERY_DESC_MAX_SIZE, true/*ASCII*/);
6077 if (err) { 6268 if (err) {
6078 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", 6269 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
@@ -6080,15 +6271,16 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
6080 goto out; 6271 goto out;
6081 } 6272 }
6082 6273
6083 str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0'; 6274 desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
6084 strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE), 6275 strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE),
6085 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET], 6276 min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
6086 MAX_MODEL_LEN)); 6277 MAX_MODEL_LEN));
6087 6278
6088 /* Null terminate the model string */ 6279 /* Null terminate the model string */
6089 dev_desc->model[MAX_MODEL_LEN] = '\0'; 6280 dev_desc->model[MAX_MODEL_LEN] = '\0';
6090 6281
6091out: 6282out:
6283 kfree(desc_buf);
6092 return err; 6284 return err;
6093} 6285}
6094 6286
@@ -6439,16 +6631,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
6439 sizeof(struct ufs_pa_layer_attr)); 6631 sizeof(struct ufs_pa_layer_attr));
6440 hba->clk_scaling.saved_pwr_info.is_valid = true; 6632 hba->clk_scaling.saved_pwr_info.is_valid = true;
6441 if (!hba->devfreq) { 6633 if (!hba->devfreq) {
6442 hba->devfreq = devm_devfreq_add_device(hba->dev, 6634 ret = ufshcd_devfreq_init(hba);
6443 &ufs_devfreq_profile, 6635 if (ret)
6444 "simple_ondemand",
6445 NULL);
6446 if (IS_ERR(hba->devfreq)) {
6447 ret = PTR_ERR(hba->devfreq);
6448 dev_err(hba->dev, "Unable to register with devfreq %d\n",
6449 ret);
6450 goto out; 6636 goto out;
6451 }
6452 } 6637 }
6453 hba->clk_scaling.is_allowed = true; 6638 hba->clk_scaling.is_allowed = true;
6454 } 6639 }
@@ -6799,9 +6984,16 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
6799 if (list_empty(head)) 6984 if (list_empty(head))
6800 goto out; 6985 goto out;
6801 6986
6802 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); 6987 /*
6803 if (ret) 6988 * vendor specific setup_clocks ops may depend on clocks managed by
6804 return ret; 6989 * this standard driver hence call the vendor specific setup_clocks
6990 * before disabling the clocks managed here.
6991 */
6992 if (!on) {
6993 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
6994 if (ret)
6995 return ret;
6996 }
6805 6997
6806 list_for_each_entry(clki, head, list) { 6998 list_for_each_entry(clki, head, list) {
6807 if (!IS_ERR_OR_NULL(clki->clk)) { 6999 if (!IS_ERR_OR_NULL(clki->clk)) {
@@ -6825,9 +7017,16 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
6825 } 7017 }
6826 } 7018 }
6827 7019
6828 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); 7020 /*
6829 if (ret) 7021 * vendor specific setup_clocks ops may depend on clocks managed by
6830 return ret; 7022 * this standard driver hence call the vendor specific setup_clocks
7023 * after enabling the clocks managed here.
7024 */
7025 if (on) {
7026 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7027 if (ret)
7028 return ret;
7029 }
6831 7030
6832out: 7031out:
6833 if (ret) { 7032 if (ret) {
@@ -6992,6 +7191,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
6992 if (hba->devfreq) 7191 if (hba->devfreq)
6993 ufshcd_suspend_clkscaling(hba); 7192 ufshcd_suspend_clkscaling(hba);
6994 destroy_workqueue(hba->clk_scaling.workq); 7193 destroy_workqueue(hba->clk_scaling.workq);
7194 ufshcd_devfreq_remove(hba);
6995 } 7195 }
6996 ufshcd_setup_clocks(hba, false); 7196 ufshcd_setup_clocks(hba, false);
6997 ufshcd_setup_hba_vreg(hba, false); 7197 ufshcd_setup_hba_vreg(hba, false);
@@ -7904,7 +8104,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
7904 8104
7905 /* Hold auto suspend until async scan completes */ 8105 /* Hold auto suspend until async scan completes */
7906 pm_runtime_get_sync(dev); 8106 pm_runtime_get_sync(dev);
7907 8107 atomic_set(&hba->scsi_block_reqs_cnt, 0);
7908 /* 8108 /*
7909 * We are assuming that device wasn't put in sleep/power-down 8109 * We are assuming that device wasn't put in sleep/power-down
7910 * state exclusively during the boot stage before kernel. 8110 * state exclusively during the boot stage before kernel.
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 8110dcd04d22..f51758f1e5cc 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -362,6 +362,7 @@ struct ufs_clk_gating {
362 struct device_attribute enable_attr; 362 struct device_attribute enable_attr;
363 bool is_enabled; 363 bool is_enabled;
364 int active_reqs; 364 int active_reqs;
365 struct workqueue_struct *clk_gating_workq;
365}; 366};
366 367
367struct ufs_saved_pwr_info { 368struct ufs_saved_pwr_info {
@@ -499,6 +500,7 @@ struct ufs_stats {
499 * @urgent_bkops_lvl: keeps track of urgent bkops level for device 500 * @urgent_bkops_lvl: keeps track of urgent bkops level for device
500 * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for 501 * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
501 * device is known or not. 502 * device is known or not.
503 * @scsi_block_reqs_cnt: reference counting for scsi block requests
502 */ 504 */
503struct ufs_hba { 505struct ufs_hba {
504 void __iomem *mmio_base; 506 void __iomem *mmio_base;
@@ -595,6 +597,22 @@ struct ufs_hba {
595 */ 597 */
596 #define UFSHCD_QUIRK_PRDT_BYTE_GRAN 0x80 598 #define UFSHCD_QUIRK_PRDT_BYTE_GRAN 0x80
597 599
600 /*
601 * Clear handling for transfer/task request list is just opposite.
602 */
603 #define UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR 0x100
604
605 /*
606 * This quirk needs to be enabled if host controller doesn't allow
607 * that the interrupt aggregation timer and counter are reset by s/w.
608 */
609 #define UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR 0x200
610
611 /*
612 * This quirks needs to be enabled if host controller cannot be
613 * enabled via HCE register.
614 */
615 #define UFSHCI_QUIRK_BROKEN_HCE 0x400
598 unsigned int quirks; /* Deviations from standard UFSHCI spec. */ 616 unsigned int quirks; /* Deviations from standard UFSHCI spec. */
599 617
600 /* Device deviations from standard UFS device spec. */ 618 /* Device deviations from standard UFS device spec. */
@@ -683,6 +701,7 @@ struct ufs_hba {
683 701
684 struct rw_semaphore clk_scaling_lock; 702 struct rw_semaphore clk_scaling_lock;
685 struct ufs_desc_size desc_size; 703 struct ufs_desc_size desc_size;
704 atomic_t scsi_block_reqs_cnt;
686}; 705};
687 706
688/* Returns true if clocks can be gated. Otherwise false */ 707/* Returns true if clocks can be gated. Otherwise false */
@@ -789,6 +808,8 @@ extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
789 u8 attr_set, u32 mib_val, u8 peer); 808 u8 attr_set, u32 mib_val, u8 peer);
790extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, 809extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
791 u32 *mib_val, u8 peer); 810 u32 *mib_val, u8 peer);
811extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
812 struct ufs_pa_layer_attr *desired_pwr_mode);
792 813
793/* UIC command interfaces for DME primitives */ 814/* UIC command interfaces for DME primitives */
794#define DME_LOCAL 0 815#define DME_LOCAL 0
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 2ba2b7b47f41..974bfb3f30f4 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -978,18 +978,7 @@ static struct pci_driver wd719x_pci_driver = {
978 .remove = wd719x_pci_remove, 978 .remove = wd719x_pci_remove,
979}; 979};
980 980
981static int __init wd719x_init(void) 981module_pci_driver(wd719x_pci_driver);
982{
983 return pci_register_driver(&wd719x_pci_driver);
984}
985
986static void __exit wd719x_exit(void)
987{
988 pci_unregister_driver(&wd719x_pci_driver);
989}
990
991module_init(wd719x_init);
992module_exit(wd719x_exit);
993 982
994MODULE_DESCRIPTION("Western Digital WD7193/7197/7296 SCSI driver"); 983MODULE_DESCRIPTION("Western Digital WD7193/7197/7296 SCSI driver");
995MODULE_AUTHOR("Ondrej Zary, Aaron Dewell, Juergen Gaertner"); 984MODULE_AUTHOR("Ondrej Zary, Aaron Dewell, Juergen Gaertner");
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
new file mode 100644
index 000000000000..bb70882e6b56
--- /dev/null
+++ b/drivers/scsi/zorro_esp.c
@@ -0,0 +1,1172 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ESP front-end for Amiga ZORRO SCSI systems.
4 *
5 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
6 *
7 * Copyright (C) 2011,2018 Michael Schmitz (schmitz@debian.org) for
8 * migration to ESP SCSI core
9 *
10 * Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for
11 * Blizzard 1230 DMA and probe function fixes
12 *
13 * Copyright (C) 2017 Finn Thain for PIO code from Mac ESP driver adapted here
14 */
15/*
16 * ZORRO bus code from:
17 */
18/*
19 * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
20 * Amiga MacroSystemUS WarpEngine SCSI controller.
21 * Amiga Technologies/DKB A4091 SCSI controller.
22 *
23 * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
24 * plus modifications of the 53c7xx.c driver to support the Amiga.
25 *
26 * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/interrupt.h>
34#include <linux/dma-mapping.h>
35#include <linux/scatterlist.h>
36#include <linux/delay.h>
37#include <linux/zorro.h>
38#include <linux/slab.h>
39
40#include <asm/page.h>
41#include <asm/pgtable.h>
42#include <asm/cacheflush.h>
43#include <asm/amigahw.h>
44#include <asm/amigaints.h>
45
46#include <scsi/scsi_host.h>
47#include <scsi/scsi_transport_spi.h>
48#include <scsi/scsi_device.h>
49#include <scsi/scsi_tcq.h>
50
51#include "esp_scsi.h"
52
53MODULE_AUTHOR("Michael Schmitz <schmitz@debian.org>");
54MODULE_DESCRIPTION("Amiga Zorro NCR5C9x (ESP) driver");
55MODULE_LICENSE("GPL");
56
57/* per-board register layout definitions */
58
59/* Blizzard 1230 DMA interface */
60
61struct blz1230_dma_registers {
62 unsigned char dma_addr; /* DMA address [0x0000] */
63 unsigned char dmapad2[0x7fff];
64 unsigned char dma_latch; /* DMA latch [0x8000] */
65};
66
67/* Blizzard 1230II DMA interface */
68
69struct blz1230II_dma_registers {
70 unsigned char dma_addr; /* DMA address [0x0000] */
71 unsigned char dmapad2[0xf];
72 unsigned char dma_latch; /* DMA latch [0x0010] */
73};
74
75/* Blizzard 2060 DMA interface */
76
77struct blz2060_dma_registers {
78 unsigned char dma_led_ctrl; /* DMA led control [0x000] */
79 unsigned char dmapad1[0x0f];
80 unsigned char dma_addr0; /* DMA address (MSB) [0x010] */
81 unsigned char dmapad2[0x03];
82 unsigned char dma_addr1; /* DMA address [0x014] */
83 unsigned char dmapad3[0x03];
84 unsigned char dma_addr2; /* DMA address [0x018] */
85 unsigned char dmapad4[0x03];
86 unsigned char dma_addr3; /* DMA address (LSB) [0x01c] */
87};
88
89/* DMA control bits */
90#define DMA_WRITE 0x80000000
91
92/* Cyberstorm DMA interface */
93
94struct cyber_dma_registers {
95 unsigned char dma_addr0; /* DMA address (MSB) [0x000] */
96 unsigned char dmapad1[1];
97 unsigned char dma_addr1; /* DMA address [0x002] */
98 unsigned char dmapad2[1];
99 unsigned char dma_addr2; /* DMA address [0x004] */
100 unsigned char dmapad3[1];
101 unsigned char dma_addr3; /* DMA address (LSB) [0x006] */
102 unsigned char dmapad4[0x3fb];
103 unsigned char cond_reg; /* DMA cond (ro) [0x402] */
104#define ctrl_reg cond_reg /* DMA control (wo) [0x402] */
105};
106
107/* DMA control bits */
108#define CYBER_DMA_WRITE 0x40 /* DMA direction. 1 = write */
109#define CYBER_DMA_Z3 0x20 /* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */
110
111/* DMA status bits */
112#define CYBER_DMA_HNDL_INTR 0x80 /* DMA IRQ pending? */
113
114/* The CyberStorm II DMA interface */
115struct cyberII_dma_registers {
116 unsigned char cond_reg; /* DMA cond (ro) [0x000] */
117#define ctrl_reg cond_reg /* DMA control (wo) [0x000] */
118 unsigned char dmapad4[0x3f];
119 unsigned char dma_addr0; /* DMA address (MSB) [0x040] */
120 unsigned char dmapad1[3];
121 unsigned char dma_addr1; /* DMA address [0x044] */
122 unsigned char dmapad2[3];
123 unsigned char dma_addr2; /* DMA address [0x048] */
124 unsigned char dmapad3[3];
125 unsigned char dma_addr3; /* DMA address (LSB) [0x04c] */
126};
127
128/* Fastlane DMA interface */
129
130struct fastlane_dma_registers {
131 unsigned char cond_reg; /* DMA status (ro) [0x0000] */
132#define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */
133 char dmapad1[0x3f];
134 unsigned char clear_strobe; /* DMA clear (wo) [0x0040] */
135};
136
137/*
138 * The controller registers can be found in the Z2 config area at these
139 * offsets:
140 */
141#define FASTLANE_ESP_ADDR 0x1000001
142
143/* DMA status bits */
144#define FASTLANE_DMA_MINT 0x80
145#define FASTLANE_DMA_IACT 0x40
146#define FASTLANE_DMA_CREQ 0x20
147
148/* DMA control bits */
149#define FASTLANE_DMA_FCODE 0xa0
150#define FASTLANE_DMA_MASK 0xf3
151#define FASTLANE_DMA_WRITE 0x08 /* 1 = write */
152#define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */
153#define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */
154#define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */
155
156/*
157 * private data used for driver
158 */
159struct zorro_esp_priv {
160 struct esp *esp; /* our ESP instance - for Scsi_host* */
161 void __iomem *board_base; /* virtual address (Zorro III board) */
162 int error; /* PIO error flag */
163 int zorro3; /* board is Zorro III */
164 unsigned char ctrl_data; /* shadow copy of ctrl_reg */
165};
166
167/*
168 * On all implementations except for the Oktagon, padding between ESP
169 * registers is three bytes.
170 * On Oktagon, it is one byte - use a different accessor there.
171 *
172 * Oktagon needs PDMA - currently unsupported!
173 */
174
175static void zorro_esp_write8(struct esp *esp, u8 val, unsigned long reg)
176{
177 writeb(val, esp->regs + (reg * 4UL));
178}
179
180static u8 zorro_esp_read8(struct esp *esp, unsigned long reg)
181{
182 return readb(esp->regs + (reg * 4UL));
183}
184
185static dma_addr_t zorro_esp_map_single(struct esp *esp, void *buf,
186 size_t sz, int dir)
187{
188 return dma_map_single(esp->dev, buf, sz, dir);
189}
190
191static int zorro_esp_map_sg(struct esp *esp, struct scatterlist *sg,
192 int num_sg, int dir)
193{
194 return dma_map_sg(esp->dev, sg, num_sg, dir);
195}
196
197static void zorro_esp_unmap_single(struct esp *esp, dma_addr_t addr,
198 size_t sz, int dir)
199{
200 dma_unmap_single(esp->dev, addr, sz, dir);
201}
202
203static void zorro_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
204 int num_sg, int dir)
205{
206 dma_unmap_sg(esp->dev, sg, num_sg, dir);
207}
208
209static int zorro_esp_irq_pending(struct esp *esp)
210{
211 /* check ESP status register; DMA has no status reg. */
212 if (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
213 return 1;
214
215 return 0;
216}
217
218static int cyber_esp_irq_pending(struct esp *esp)
219{
220 struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
221 unsigned char dma_status = readb(&dregs->cond_reg);
222
223 /* It's important to check the DMA IRQ bit in the correct way! */
224 return ((zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) &&
225 (dma_status & CYBER_DMA_HNDL_INTR));
226}
227
228static int fastlane_esp_irq_pending(struct esp *esp)
229{
230 struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
231 unsigned char dma_status;
232
233 dma_status = readb(&dregs->cond_reg);
234
235 if (dma_status & FASTLANE_DMA_IACT)
236 return 0; /* not our IRQ */
237
238 /* Return non-zero if ESP requested IRQ */
239 return (
240 (dma_status & FASTLANE_DMA_CREQ) &&
241 (!(dma_status & FASTLANE_DMA_MINT)) &&
242 (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR));
243}
244
245static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
246 u32 dma_len)
247{
248 return dma_len > 0xFFFFFF ? 0xFFFFFF : dma_len;
249}
250
251static void zorro_esp_reset_dma(struct esp *esp)
252{
253 /* nothing to do here */
254}
255
256static void zorro_esp_dma_drain(struct esp *esp)
257{
258 /* nothing to do here */
259}
260
261static void zorro_esp_dma_invalidate(struct esp *esp)
262{
263 /* nothing to do here */
264}
265
266static void fastlane_esp_dma_invalidate(struct esp *esp)
267{
268 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
269 struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
270 unsigned char *ctrl_data = &zep->ctrl_data;
271
272 *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK);
273 writeb(0, &dregs->clear_strobe);
274 z_writel(0, zep->board_base);
275}
276
277/*
278 * Programmed IO routines follow.
279 */
280
281static inline unsigned int zorro_esp_wait_for_fifo(struct esp *esp)
282{
283 int i = 500000;
284
285 do {
286 unsigned int fbytes = zorro_esp_read8(esp, ESP_FFLAGS)
287 & ESP_FF_FBYTES;
288
289 if (fbytes)
290 return fbytes;
291
292 udelay(2);
293 } while (--i);
294
295 pr_err("FIFO is empty (sreg %02x)\n",
296 zorro_esp_read8(esp, ESP_STATUS));
297 return 0;
298}
299
300static inline int zorro_esp_wait_for_intr(struct esp *esp)
301{
302 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
303 int i = 500000;
304
305 do {
306 esp->sreg = zorro_esp_read8(esp, ESP_STATUS);
307 if (esp->sreg & ESP_STAT_INTR)
308 return 0;
309
310 udelay(2);
311 } while (--i);
312
313 pr_err("IRQ timeout (sreg %02x)\n", esp->sreg);
314 zep->error = 1;
315 return 1;
316}
317
318/*
319 * PIO macros as used in mac_esp.c.
320 * Note that addr and fifo arguments are local-scope variables declared
321 * in zorro_esp_send_pio_cmd(), the macros are only used in that function,
322 * and addr and fifo are referenced in each use of the macros so there
323 * is no need to pass them as macro parameters.
324 */
325#define ZORRO_ESP_PIO_LOOP(operands, reg1) \
326 asm volatile ( \
327 "1: moveb " operands "\n" \
328 " subqw #1,%1 \n" \
329 " jbne 1b \n" \
330 : "+a" (addr), "+r" (reg1) \
331 : "a" (fifo));
332
333#define ZORRO_ESP_PIO_FILL(operands, reg1) \
334 asm volatile ( \
335 " moveb " operands "\n" \
336 " moveb " operands "\n" \
337 " moveb " operands "\n" \
338 " moveb " operands "\n" \
339 " moveb " operands "\n" \
340 " moveb " operands "\n" \
341 " moveb " operands "\n" \
342 " moveb " operands "\n" \
343 " moveb " operands "\n" \
344 " moveb " operands "\n" \
345 " moveb " operands "\n" \
346 " moveb " operands "\n" \
347 " moveb " operands "\n" \
348 " moveb " operands "\n" \
349 " moveb " operands "\n" \
350 " moveb " operands "\n" \
351 " subqw #8,%1 \n" \
352 " subqw #8,%1 \n" \
353 : "+a" (addr), "+r" (reg1) \
354 : "a" (fifo));
355
356#define ZORRO_ESP_FIFO_SIZE 16
357
358static void zorro_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
359 u32 dma_count, int write, u8 cmd)
360{
361 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
362 u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
363 u8 phase = esp->sreg & ESP_STAT_PMASK;
364
365 cmd &= ~ESP_CMD_DMA;
366
367 if (write) {
368 u8 *dst = (u8 *)addr;
369 u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
370
371 scsi_esp_cmd(esp, cmd);
372
373 while (1) {
374 if (!zorro_esp_wait_for_fifo(esp))
375 break;
376
377 *dst++ = zorro_esp_read8(esp, ESP_FDATA);
378 --esp_count;
379
380 if (!esp_count)
381 break;
382
383 if (zorro_esp_wait_for_intr(esp))
384 break;
385
386 if ((esp->sreg & ESP_STAT_PMASK) != phase)
387 break;
388
389 esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
390 if (esp->ireg & mask) {
391 zep->error = 1;
392 break;
393 }
394
395 if (phase == ESP_MIP)
396 scsi_esp_cmd(esp, ESP_CMD_MOK);
397
398 scsi_esp_cmd(esp, ESP_CMD_TI);
399 }
400 } else { /* unused, as long as we only handle MIP here */
401 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
402
403 if (esp_count >= ZORRO_ESP_FIFO_SIZE)
404 ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
405 else
406 ZORRO_ESP_PIO_LOOP("%0@+,%2@", esp_count)
407
408 scsi_esp_cmd(esp, cmd);
409
410 while (esp_count) {
411 unsigned int n;
412
413 if (zorro_esp_wait_for_intr(esp))
414 break;
415
416 if ((esp->sreg & ESP_STAT_PMASK) != phase)
417 break;
418
419 esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
420 if (esp->ireg & ~ESP_INTR_BSERV) {
421 zep->error = 1;
422 break;
423 }
424
425 n = ZORRO_ESP_FIFO_SIZE -
426 (zorro_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES);
427 if (n > esp_count)
428 n = esp_count;
429
430 if (n == ZORRO_ESP_FIFO_SIZE)
431 ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
432 else {
433 esp_count -= n;
434 ZORRO_ESP_PIO_LOOP("%0@+,%2@", n)
435 }
436
437 scsi_esp_cmd(esp, ESP_CMD_TI);
438 }
439 }
440}
441
442/* Blizzard 1230/60 SCSI-IV DMA */
443
444static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
445 u32 esp_count, u32 dma_count, int write, u8 cmd)
446{
447 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
448 struct blz1230_dma_registers __iomem *dregs = esp->dma_regs;
449 u8 phase = esp->sreg & ESP_STAT_PMASK;
450
451 zep->error = 0;
452 /*
453 * Use PIO if transferring message bytes to esp->command_block_dma.
454 * PIO requires a virtual address, so substitute esp->command_block
455 * for addr.
456 */
457 if (phase == ESP_MIP && addr == esp->command_block_dma) {
458 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
459 esp_count, dma_count, write, cmd);
460 return;
461 }
462
463 if (write)
464 /* DMA receive */
465 dma_sync_single_for_device(esp->dev, addr, esp_count,
466 DMA_FROM_DEVICE);
467 else
468 /* DMA send */
469 dma_sync_single_for_device(esp->dev, addr, esp_count,
470 DMA_TO_DEVICE);
471
472 addr >>= 1;
473 if (write)
474 addr &= ~(DMA_WRITE);
475 else
476 addr |= DMA_WRITE;
477
478 writeb((addr >> 24) & 0xff, &dregs->dma_latch);
479 writeb((addr >> 24) & 0xff, &dregs->dma_addr);
480 writeb((addr >> 16) & 0xff, &dregs->dma_addr);
481 writeb((addr >> 8) & 0xff, &dregs->dma_addr);
482 writeb(addr & 0xff, &dregs->dma_addr);
483
484 scsi_esp_cmd(esp, ESP_CMD_DMA);
485 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
486 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
487 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
488
489 scsi_esp_cmd(esp, cmd);
490}
491
492/* Blizzard 1230-II DMA */
493
494static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
495 u32 esp_count, u32 dma_count, int write, u8 cmd)
496{
497 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
498 struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs;
499 u8 phase = esp->sreg & ESP_STAT_PMASK;
500
501 zep->error = 0;
502 /* Use PIO if transferring message bytes to esp->command_block_dma */
503 if (phase == ESP_MIP && addr == esp->command_block_dma) {
504 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
505 esp_count, dma_count, write, cmd);
506 return;
507 }
508
509 if (write)
510 /* DMA receive */
511 dma_sync_single_for_device(esp->dev, addr, esp_count,
512 DMA_FROM_DEVICE);
513 else
514 /* DMA send */
515 dma_sync_single_for_device(esp->dev, addr, esp_count,
516 DMA_TO_DEVICE);
517
518 addr >>= 1;
519 if (write)
520 addr &= ~(DMA_WRITE);
521 else
522 addr |= DMA_WRITE;
523
524 writeb((addr >> 24) & 0xff, &dregs->dma_latch);
525 writeb((addr >> 16) & 0xff, &dregs->dma_addr);
526 writeb((addr >> 8) & 0xff, &dregs->dma_addr);
527 writeb(addr & 0xff, &dregs->dma_addr);
528
529 scsi_esp_cmd(esp, ESP_CMD_DMA);
530 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
531 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
532 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
533
534 scsi_esp_cmd(esp, cmd);
535}
536
537/* Blizzard 2060 DMA */
538
539static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
540 u32 esp_count, u32 dma_count, int write, u8 cmd)
541{
542 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
543 struct blz2060_dma_registers __iomem *dregs = esp->dma_regs;
544 u8 phase = esp->sreg & ESP_STAT_PMASK;
545
546 zep->error = 0;
547 /* Use PIO if transferring message bytes to esp->command_block_dma */
548 if (phase == ESP_MIP && addr == esp->command_block_dma) {
549 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
550 esp_count, dma_count, write, cmd);
551 return;
552 }
553
554 if (write)
555 /* DMA receive */
556 dma_sync_single_for_device(esp->dev, addr, esp_count,
557 DMA_FROM_DEVICE);
558 else
559 /* DMA send */
560 dma_sync_single_for_device(esp->dev, addr, esp_count,
561 DMA_TO_DEVICE);
562
563 addr >>= 1;
564 if (write)
565 addr &= ~(DMA_WRITE);
566 else
567 addr |= DMA_WRITE;
568
569 writeb(addr & 0xff, &dregs->dma_addr3);
570 writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
571 writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
572 writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
573
574 scsi_esp_cmd(esp, ESP_CMD_DMA);
575 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
576 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
577 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
578
579 scsi_esp_cmd(esp, cmd);
580}
581
582/* Cyberstorm I DMA */
583
584static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
585 u32 esp_count, u32 dma_count, int write, u8 cmd)
586{
587 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
588 struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
589 u8 phase = esp->sreg & ESP_STAT_PMASK;
590 unsigned char *ctrl_data = &zep->ctrl_data;
591
592 zep->error = 0;
593 /* Use PIO if transferring message bytes to esp->command_block_dma */
594 if (phase == ESP_MIP && addr == esp->command_block_dma) {
595 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
596 esp_count, dma_count, write, cmd);
597 return;
598 }
599
600 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
601 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
602 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
603
604 if (write) {
605 /* DMA receive */
606 dma_sync_single_for_device(esp->dev, addr, esp_count,
607 DMA_FROM_DEVICE);
608 addr &= ~(1);
609 } else {
610 /* DMA send */
611 dma_sync_single_for_device(esp->dev, addr, esp_count,
612 DMA_TO_DEVICE);
613 addr |= 1;
614 }
615
616 writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
617 writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
618 writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
619 writeb(addr & 0xff, &dregs->dma_addr3);
620
621 if (write)
622 *ctrl_data &= ~(CYBER_DMA_WRITE);
623 else
624 *ctrl_data |= CYBER_DMA_WRITE;
625
626 *ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
627
628 writeb(*ctrl_data, &dregs->ctrl_reg);
629
630 scsi_esp_cmd(esp, cmd);
631}
632
633/* Cyberstorm II DMA */
634
635static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr,
636 u32 esp_count, u32 dma_count, int write, u8 cmd)
637{
638 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
639 struct cyberII_dma_registers __iomem *dregs = esp->dma_regs;
640 u8 phase = esp->sreg & ESP_STAT_PMASK;
641
642 zep->error = 0;
643 /* Use PIO if transferring message bytes to esp->command_block_dma */
644 if (phase == ESP_MIP && addr == esp->command_block_dma) {
645 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
646 esp_count, dma_count, write, cmd);
647 return;
648 }
649
650 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
651 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
652 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
653
654 if (write) {
655 /* DMA receive */
656 dma_sync_single_for_device(esp->dev, addr, esp_count,
657 DMA_FROM_DEVICE);
658 addr &= ~(1);
659 } else {
660 /* DMA send */
661 dma_sync_single_for_device(esp->dev, addr, esp_count,
662 DMA_TO_DEVICE);
663 addr |= 1;
664 }
665
666 writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
667 writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
668 writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
669 writeb(addr & 0xff, &dregs->dma_addr3);
670
671 scsi_esp_cmd(esp, cmd);
672}
673
674/* Fastlane DMA */
675
676static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
677 u32 esp_count, u32 dma_count, int write, u8 cmd)
678{
679 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
680 struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
681 u8 phase = esp->sreg & ESP_STAT_PMASK;
682 unsigned char *ctrl_data = &zep->ctrl_data;
683
684 zep->error = 0;
685 /* Use PIO if transferring message bytes to esp->command_block_dma */
686 if (phase == ESP_MIP && addr == esp->command_block_dma) {
687 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
688 esp_count, dma_count, write, cmd);
689 return;
690 }
691
692 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
693 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
694 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
695
696 if (write) {
697 /* DMA receive */
698 dma_sync_single_for_device(esp->dev, addr, esp_count,
699 DMA_FROM_DEVICE);
700 addr &= ~(1);
701 } else {
702 /* DMA send */
703 dma_sync_single_for_device(esp->dev, addr, esp_count,
704 DMA_TO_DEVICE);
705 addr |= 1;
706 }
707
708 writeb(0, &dregs->clear_strobe);
709 z_writel(addr, ((addr & 0x00ffffff) + zep->board_base));
710
711 if (write) {
712 *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK) |
713 FASTLANE_DMA_ENABLE;
714 } else {
715 *ctrl_data = ((*ctrl_data & FASTLANE_DMA_MASK) |
716 FASTLANE_DMA_ENABLE |
717 FASTLANE_DMA_WRITE);
718 }
719
720 writeb(*ctrl_data, &dregs->ctrl_reg);
721
722 scsi_esp_cmd(esp, cmd);
723}
724
725static int zorro_esp_dma_error(struct esp *esp)
726{
727 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
728
729 /* check for error in case we've been doing PIO */
730 if (zep->error == 1)
731 return 1;
732
733 /* do nothing - there seems to be no way to check for DMA errors */
734 return 0;
735}
736
737/* per-board ESP driver ops */
738
739static const struct esp_driver_ops blz1230_esp_ops = {
740 .esp_write8 = zorro_esp_write8,
741 .esp_read8 = zorro_esp_read8,
742 .map_single = zorro_esp_map_single,
743 .map_sg = zorro_esp_map_sg,
744 .unmap_single = zorro_esp_unmap_single,
745 .unmap_sg = zorro_esp_unmap_sg,
746 .irq_pending = zorro_esp_irq_pending,
747 .dma_length_limit = zorro_esp_dma_length_limit,
748 .reset_dma = zorro_esp_reset_dma,
749 .dma_drain = zorro_esp_dma_drain,
750 .dma_invalidate = zorro_esp_dma_invalidate,
751 .send_dma_cmd = zorro_esp_send_blz1230_dma_cmd,
752 .dma_error = zorro_esp_dma_error,
753};
754
755static const struct esp_driver_ops blz1230II_esp_ops = {
756 .esp_write8 = zorro_esp_write8,
757 .esp_read8 = zorro_esp_read8,
758 .map_single = zorro_esp_map_single,
759 .map_sg = zorro_esp_map_sg,
760 .unmap_single = zorro_esp_unmap_single,
761 .unmap_sg = zorro_esp_unmap_sg,
762 .irq_pending = zorro_esp_irq_pending,
763 .dma_length_limit = zorro_esp_dma_length_limit,
764 .reset_dma = zorro_esp_reset_dma,
765 .dma_drain = zorro_esp_dma_drain,
766 .dma_invalidate = zorro_esp_dma_invalidate,
767 .send_dma_cmd = zorro_esp_send_blz1230II_dma_cmd,
768 .dma_error = zorro_esp_dma_error,
769};
770
771static const struct esp_driver_ops blz2060_esp_ops = {
772 .esp_write8 = zorro_esp_write8,
773 .esp_read8 = zorro_esp_read8,
774 .map_single = zorro_esp_map_single,
775 .map_sg = zorro_esp_map_sg,
776 .unmap_single = zorro_esp_unmap_single,
777 .unmap_sg = zorro_esp_unmap_sg,
778 .irq_pending = zorro_esp_irq_pending,
779 .dma_length_limit = zorro_esp_dma_length_limit,
780 .reset_dma = zorro_esp_reset_dma,
781 .dma_drain = zorro_esp_dma_drain,
782 .dma_invalidate = zorro_esp_dma_invalidate,
783 .send_dma_cmd = zorro_esp_send_blz2060_dma_cmd,
784 .dma_error = zorro_esp_dma_error,
785};
786
787static const struct esp_driver_ops cyber_esp_ops = {
788 .esp_write8 = zorro_esp_write8,
789 .esp_read8 = zorro_esp_read8,
790 .map_single = zorro_esp_map_single,
791 .map_sg = zorro_esp_map_sg,
792 .unmap_single = zorro_esp_unmap_single,
793 .unmap_sg = zorro_esp_unmap_sg,
794 .irq_pending = cyber_esp_irq_pending,
795 .dma_length_limit = zorro_esp_dma_length_limit,
796 .reset_dma = zorro_esp_reset_dma,
797 .dma_drain = zorro_esp_dma_drain,
798 .dma_invalidate = zorro_esp_dma_invalidate,
799 .send_dma_cmd = zorro_esp_send_cyber_dma_cmd,
800 .dma_error = zorro_esp_dma_error,
801};
802
803static const struct esp_driver_ops cyberII_esp_ops = {
804 .esp_write8 = zorro_esp_write8,
805 .esp_read8 = zorro_esp_read8,
806 .map_single = zorro_esp_map_single,
807 .map_sg = zorro_esp_map_sg,
808 .unmap_single = zorro_esp_unmap_single,
809 .unmap_sg = zorro_esp_unmap_sg,
810 .irq_pending = zorro_esp_irq_pending,
811 .dma_length_limit = zorro_esp_dma_length_limit,
812 .reset_dma = zorro_esp_reset_dma,
813 .dma_drain = zorro_esp_dma_drain,
814 .dma_invalidate = zorro_esp_dma_invalidate,
815 .send_dma_cmd = zorro_esp_send_cyberII_dma_cmd,
816 .dma_error = zorro_esp_dma_error,
817};
818
819static const struct esp_driver_ops fastlane_esp_ops = {
820 .esp_write8 = zorro_esp_write8,
821 .esp_read8 = zorro_esp_read8,
822 .map_single = zorro_esp_map_single,
823 .map_sg = zorro_esp_map_sg,
824 .unmap_single = zorro_esp_unmap_single,
825 .unmap_sg = zorro_esp_unmap_sg,
826 .irq_pending = fastlane_esp_irq_pending,
827 .dma_length_limit = zorro_esp_dma_length_limit,
828 .reset_dma = zorro_esp_reset_dma,
829 .dma_drain = zorro_esp_dma_drain,
830 .dma_invalidate = fastlane_esp_dma_invalidate,
831 .send_dma_cmd = zorro_esp_send_fastlane_dma_cmd,
832 .dma_error = zorro_esp_dma_error,
833};
834
835/* Zorro driver config data */
836
837struct zorro_driver_data {
838 const char *name;
839 unsigned long offset;
840 unsigned long dma_offset;
841 int absolute; /* offset is absolute address */
842 int scsi_option;
843 const struct esp_driver_ops *esp_ops;
844};
845
846/* board types */
847
848enum {
849 ZORRO_BLZ1230,
850 ZORRO_BLZ1230II,
851 ZORRO_BLZ2060,
852 ZORRO_CYBER,
853 ZORRO_CYBERII,
854 ZORRO_FASTLANE,
855};
856
857/* per-board config data */
858
859static const struct zorro_driver_data zorro_esp_boards[] = {
860 [ZORRO_BLZ1230] = {
861 .name = "Blizzard 1230",
862 .offset = 0x8000,
863 .dma_offset = 0x10000,
864 .scsi_option = 1,
865 .esp_ops = &blz1230_esp_ops,
866 },
867 [ZORRO_BLZ1230II] = {
868 .name = "Blizzard 1230II",
869 .offset = 0x10000,
870 .dma_offset = 0x10021,
871 .scsi_option = 1,
872 .esp_ops = &blz1230II_esp_ops,
873 },
874 [ZORRO_BLZ2060] = {
875 .name = "Blizzard 2060",
876 .offset = 0x1ff00,
877 .dma_offset = 0x1ffe0,
878 .esp_ops = &blz2060_esp_ops,
879 },
880 [ZORRO_CYBER] = {
881 .name = "CyberStormI",
882 .offset = 0xf400,
883 .dma_offset = 0xf800,
884 .esp_ops = &cyber_esp_ops,
885 },
886 [ZORRO_CYBERII] = {
887 .name = "CyberStormII",
888 .offset = 0x1ff03,
889 .dma_offset = 0x1ff43,
890 .scsi_option = 1,
891 .esp_ops = &cyberII_esp_ops,
892 },
893 [ZORRO_FASTLANE] = {
894 .name = "Fastlane",
895 .offset = 0x1000001,
896 .dma_offset = 0x1000041,
897 .esp_ops = &fastlane_esp_ops,
898 },
899};
900
901static const struct zorro_device_id zorro_esp_zorro_tbl[] = {
902 { /* Blizzard 1230 IV */
903 .id = ZORRO_ID(PHASE5, 0x11, 0),
904 .driver_data = ZORRO_BLZ1230,
905 },
906 { /* Blizzard 1230 II (Zorro II) or Fastlane (Zorro III) */
907 .id = ZORRO_ID(PHASE5, 0x0B, 0),
908 .driver_data = ZORRO_BLZ1230II,
909 },
910 { /* Blizzard 2060 */
911 .id = ZORRO_ID(PHASE5, 0x18, 0),
912 .driver_data = ZORRO_BLZ2060,
913 },
914 { /* Cyberstorm */
915 .id = ZORRO_ID(PHASE5, 0x0C, 0),
916 .driver_data = ZORRO_CYBER,
917 },
918 { /* Cyberstorm II */
919 .id = ZORRO_ID(PHASE5, 0x19, 0),
920 .driver_data = ZORRO_CYBERII,
921 },
922 { 0 }
923};
924MODULE_DEVICE_TABLE(zorro, zorro_esp_zorro_tbl);
925
926static int zorro_esp_probe(struct zorro_dev *z,
927 const struct zorro_device_id *ent)
928{
929 struct scsi_host_template *tpnt = &scsi_esp_template;
930 struct Scsi_Host *host;
931 struct esp *esp;
932 const struct zorro_driver_data *zdd;
933 struct zorro_esp_priv *zep;
934 unsigned long board, ioaddr, dmaaddr;
935 int err;
936
937 board = zorro_resource_start(z);
938 zdd = &zorro_esp_boards[ent->driver_data];
939
940 pr_info("%s found at address 0x%lx.\n", zdd->name, board);
941
942 zep = kzalloc(sizeof(*zep), GFP_KERNEL);
943 if (!zep) {
944 pr_err("Can't allocate device private data!\n");
945 return -ENOMEM;
946 }
947
948 /* let's figure out whether we have a Zorro II or Zorro III board */
949 if ((z->rom.er_Type & ERT_TYPEMASK) == ERT_ZORROIII) {
950 if (board > 0xffffff)
951 zep->zorro3 = 1;
952 } else {
953 /*
954 * Even though most of these boards identify as Zorro II,
955 * they are in fact CPU expansion slot boards and have full
956 * access to all of memory. Fix up DMA bitmask here.
957 */
958 z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
959 }
960
961 /*
962 * If Zorro III and ID matches Fastlane, our device table entry
963 * contains data for the Blizzard 1230 II board which does share the
964 * same ID. Fix up device table entry here.
965 * TODO: Some Cyberstom060 boards also share this ID but would need
966 * to use the Cyberstorm I driver data ... we catch this by checking
967 * for presence of ESP chip later, but don't try to fix up yet.
968 */
969 if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
970 pr_info("%s at address 0x%lx is Fastlane Z3, fixing data!\n",
971 zdd->name, board);
972 zdd = &zorro_esp_boards[ZORRO_FASTLANE];
973 }
974
975 if (zdd->absolute) {
976 ioaddr = zdd->offset;
977 dmaaddr = zdd->dma_offset;
978 } else {
979 ioaddr = board + zdd->offset;
980 dmaaddr = board + zdd->dma_offset;
981 }
982
983 if (!zorro_request_device(z, zdd->name)) {
984 pr_err("cannot reserve region 0x%lx, abort\n",
985 board);
986 err = -EBUSY;
987 goto fail_free_zep;
988 }
989
990 host = scsi_host_alloc(tpnt, sizeof(struct esp));
991
992 if (!host) {
993 pr_err("No host detected; board configuration problem?\n");
994 err = -ENOMEM;
995 goto fail_release_device;
996 }
997
998 host->base = ioaddr;
999 host->this_id = 7;
1000
1001 esp = shost_priv(host);
1002 esp->host = host;
1003 esp->dev = &z->dev;
1004
1005 esp->scsi_id = host->this_id;
1006 esp->scsi_id_mask = (1 << esp->scsi_id);
1007
1008 esp->cfreq = 40000000;
1009
1010 zep->esp = esp;
1011
1012 dev_set_drvdata(esp->dev, zep);
1013
1014 /* additional setup required for Fastlane */
1015 if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
1016 /* map full address space up to ESP base for DMA */
1017 zep->board_base = ioremap_nocache(board,
1018 FASTLANE_ESP_ADDR-1);
1019 if (!zep->board_base) {
1020 pr_err("Cannot allocate board address space\n");
1021 err = -ENOMEM;
1022 goto fail_free_host;
1023 }
1024 /* initialize DMA control shadow register */
1025 zep->ctrl_data = (FASTLANE_DMA_FCODE |
1026 FASTLANE_DMA_EDI | FASTLANE_DMA_ESI);
1027 }
1028
1029 esp->ops = zdd->esp_ops;
1030
1031 if (ioaddr > 0xffffff)
1032 esp->regs = ioremap_nocache(ioaddr, 0x20);
1033 else
1034 /* ZorroII address space remapped nocache by early startup */
1035 esp->regs = ZTWO_VADDR(ioaddr);
1036
1037 if (!esp->regs) {
1038 err = -ENOMEM;
1039 goto fail_unmap_fastlane;
1040 }
1041
1042 /* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */
1043 if (zdd->scsi_option) {
1044 zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1);
1045 if (zorro_esp_read8(esp, ESP_CFG1) != (ESP_CONFIG1_PENABLE|7)) {
1046 err = -ENODEV;
1047 goto fail_unmap_regs;
1048 }
1049 }
1050
1051 if (zep->zorro3) {
1052 /*
1053 * Only Fastlane Z3 for now - add switch for correct struct
1054 * dma_registers size if adding any more
1055 */
1056 esp->dma_regs = ioremap_nocache(dmaaddr,
1057 sizeof(struct fastlane_dma_registers));
1058 } else
1059 /* ZorroII address space remapped nocache by early startup */
1060 esp->dma_regs = ZTWO_VADDR(dmaaddr);
1061
1062 if (!esp->dma_regs) {
1063 err = -ENOMEM;
1064 goto fail_unmap_regs;
1065 }
1066
1067 esp->command_block = dma_alloc_coherent(esp->dev, 16,
1068 &esp->command_block_dma,
1069 GFP_KERNEL);
1070
1071 if (!esp->command_block) {
1072 err = -ENOMEM;
1073 goto fail_unmap_dma_regs;
1074 }
1075
1076 host->irq = IRQ_AMIGA_PORTS;
1077 err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
1078 "Amiga Zorro ESP", esp);
1079 if (err < 0) {
1080 err = -ENODEV;
1081 goto fail_free_command_block;
1082 }
1083
1084 /* register the chip */
1085 err = scsi_esp_register(esp, &z->dev);
1086
1087 if (err) {
1088 err = -ENOMEM;
1089 goto fail_free_irq;
1090 }
1091
1092 return 0;
1093
1094fail_free_irq:
1095 free_irq(host->irq, esp);
1096
1097fail_free_command_block:
1098 dma_free_coherent(esp->dev, 16,
1099 esp->command_block,
1100 esp->command_block_dma);
1101
1102fail_unmap_dma_regs:
1103 if (zep->zorro3)
1104 iounmap(esp->dma_regs);
1105
1106fail_unmap_regs:
1107 if (ioaddr > 0xffffff)
1108 iounmap(esp->regs);
1109
1110fail_unmap_fastlane:
1111 if (zep->zorro3)
1112 iounmap(zep->board_base);
1113
1114fail_free_host:
1115 scsi_host_put(host);
1116
1117fail_release_device:
1118 zorro_release_device(z);
1119
1120fail_free_zep:
1121 kfree(zep);
1122
1123 return err;
1124}
1125
1126static void zorro_esp_remove(struct zorro_dev *z)
1127{
1128 struct zorro_esp_priv *zep = dev_get_drvdata(&z->dev);
1129 struct esp *esp = zep->esp;
1130 struct Scsi_Host *host = esp->host;
1131
1132 scsi_esp_unregister(esp);
1133
1134 free_irq(host->irq, esp);
1135 dma_free_coherent(esp->dev, 16,
1136 esp->command_block,
1137 esp->command_block_dma);
1138
1139 if (zep->zorro3) {
1140 iounmap(zep->board_base);
1141 iounmap(esp->dma_regs);
1142 }
1143
1144 if (host->base > 0xffffff)
1145 iounmap(esp->regs);
1146
1147 scsi_host_put(host);
1148
1149 zorro_release_device(z);
1150
1151 kfree(zep);
1152}
1153
1154static struct zorro_driver zorro_esp_driver = {
1155 .name = KBUILD_MODNAME,
1156 .id_table = zorro_esp_zorro_tbl,
1157 .probe = zorro_esp_probe,
1158 .remove = zorro_esp_remove,
1159};
1160
1161static int __init zorro_esp_scsi_init(void)
1162{
1163 return zorro_register_driver(&zorro_esp_driver);
1164}
1165
1166static void __exit zorro_esp_scsi_exit(void)
1167{
1168 zorro_unregister_driver(&zorro_esp_driver);
1169}
1170
1171module_init(zorro_esp_scsi_init);
1172module_exit(zorro_esp_scsi_exit);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 3f4bf126eed0..5ccef7d597fa 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -155,6 +155,8 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
155 155
156 mutex_unlock(&g_tf_lock); 156 mutex_unlock(&g_tf_lock);
157 157
158 pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
159
158 return read_bytes; 160 return read_bytes;
159} 161}
160 162
@@ -3213,6 +3215,27 @@ void target_setup_backend_cits(struct target_backend *tb)
3213 target_core_setup_dev_stat_cit(tb); 3215 target_core_setup_dev_stat_cit(tb);
3214} 3216}
3215 3217
3218static void target_init_dbroot(void)
3219{
3220 struct file *fp;
3221
3222 snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED);
3223 fp = filp_open(db_root_stage, O_RDONLY, 0);
3224 if (IS_ERR(fp)) {
3225 pr_err("db_root: cannot open: %s\n", db_root_stage);
3226 return;
3227 }
3228 if (!S_ISDIR(file_inode(fp)->i_mode)) {
3229 filp_close(fp, NULL);
3230 pr_err("db_root: not a valid directory: %s\n", db_root_stage);
3231 return;
3232 }
3233 filp_close(fp, NULL);
3234
3235 strncpy(db_root, db_root_stage, DB_ROOT_LEN);
3236 pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
3237}
3238
3216static int __init target_core_init_configfs(void) 3239static int __init target_core_init_configfs(void)
3217{ 3240{
3218 struct configfs_subsystem *subsys = &target_core_fabrics; 3241 struct configfs_subsystem *subsys = &target_core_fabrics;
@@ -3293,6 +3316,8 @@ static int __init target_core_init_configfs(void)
3293 if (ret < 0) 3316 if (ret < 0)
3294 goto out; 3317 goto out;
3295 3318
3319 target_init_dbroot();
3320
3296 return 0; 3321 return 0;
3297 3322
3298out: 3323out:
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 9b2c0c773022..16751ae55d7b 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -250,6 +250,84 @@ static void fd_destroy_device(struct se_device *dev)
250 } 250 }
251} 251}
252 252
253struct target_core_file_cmd {
254 unsigned long len;
255 struct se_cmd *cmd;
256 struct kiocb iocb;
257};
258
259static void cmd_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
260{
261 struct target_core_file_cmd *cmd;
262
263 cmd = container_of(iocb, struct target_core_file_cmd, iocb);
264
265 if (ret != cmd->len)
266 target_complete_cmd(cmd->cmd, SAM_STAT_CHECK_CONDITION);
267 else
268 target_complete_cmd(cmd->cmd, SAM_STAT_GOOD);
269
270 kfree(cmd);
271}
272
273static sense_reason_t
274fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
275 enum dma_data_direction data_direction)
276{
277 int is_write = !(data_direction == DMA_FROM_DEVICE);
278 struct se_device *dev = cmd->se_dev;
279 struct fd_dev *fd_dev = FD_DEV(dev);
280 struct file *file = fd_dev->fd_file;
281 struct target_core_file_cmd *aio_cmd;
282 struct iov_iter iter = {};
283 struct scatterlist *sg;
284 struct bio_vec *bvec;
285 ssize_t len = 0;
286 int ret = 0, i;
287
288 aio_cmd = kmalloc(sizeof(struct target_core_file_cmd), GFP_KERNEL);
289 if (!aio_cmd)
290 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
291
292 bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
293 if (!bvec) {
294 kfree(aio_cmd);
295 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
296 }
297
298 for_each_sg(sgl, sg, sgl_nents, i) {
299 bvec[i].bv_page = sg_page(sg);
300 bvec[i].bv_len = sg->length;
301 bvec[i].bv_offset = sg->offset;
302
303 len += sg->length;
304 }
305
306 iov_iter_bvec(&iter, ITER_BVEC | is_write, bvec, sgl_nents, len);
307
308 aio_cmd->cmd = cmd;
309 aio_cmd->len = len;
310 aio_cmd->iocb.ki_pos = cmd->t_task_lba * dev->dev_attrib.block_size;
311 aio_cmd->iocb.ki_filp = file;
312 aio_cmd->iocb.ki_complete = cmd_rw_aio_complete;
313 aio_cmd->iocb.ki_flags = IOCB_DIRECT;
314
315 if (is_write && (cmd->se_cmd_flags & SCF_FUA))
316 aio_cmd->iocb.ki_flags |= IOCB_DSYNC;
317
318 if (is_write)
319 ret = call_write_iter(file, &aio_cmd->iocb, &iter);
320 else
321 ret = call_read_iter(file, &aio_cmd->iocb, &iter);
322
323 kfree(bvec);
324
325 if (ret != -EIOCBQUEUED)
326 cmd_rw_aio_complete(&aio_cmd->iocb, ret, 0);
327
328 return 0;
329}
330
253static int fd_do_rw(struct se_cmd *cmd, struct file *fd, 331static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
254 u32 block_size, struct scatterlist *sgl, 332 u32 block_size, struct scatterlist *sgl,
255 u32 sgl_nents, u32 data_length, int is_write) 333 u32 sgl_nents, u32 data_length, int is_write)
@@ -527,7 +605,7 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
527} 605}
528 606
529static sense_reason_t 607static sense_reason_t
530fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 608fd_execute_rw_buffered(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
531 enum dma_data_direction data_direction) 609 enum dma_data_direction data_direction)
532{ 610{
533 struct se_device *dev = cmd->se_dev; 611 struct se_device *dev = cmd->se_dev;
@@ -537,16 +615,6 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
537 sense_reason_t rc; 615 sense_reason_t rc;
538 int ret = 0; 616 int ret = 0;
539 /* 617 /*
540 * We are currently limited by the number of iovecs (2048) per
541 * single vfs_[writev,readv] call.
542 */
543 if (cmd->data_length > FD_MAX_BYTES) {
544 pr_err("FILEIO: Not able to process I/O of %u bytes due to"
545 "FD_MAX_BYTES: %u iovec count limitation\n",
546 cmd->data_length, FD_MAX_BYTES);
547 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
548 }
549 /*
550 * Call vectorized fileio functions to map struct scatterlist 618 * Call vectorized fileio functions to map struct scatterlist
551 * physical memory addresses to struct iovec virtual memory. 619 * physical memory addresses to struct iovec virtual memory.
552 */ 620 */
@@ -620,14 +688,39 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
620 return 0; 688 return 0;
621} 689}
622 690
691static sense_reason_t
692fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
693 enum dma_data_direction data_direction)
694{
695 struct se_device *dev = cmd->se_dev;
696 struct fd_dev *fd_dev = FD_DEV(dev);
697
698 /*
699 * We are currently limited by the number of iovecs (2048) per
700 * single vfs_[writev,readv] call.
701 */
702 if (cmd->data_length > FD_MAX_BYTES) {
703 pr_err("FILEIO: Not able to process I/O of %u bytes due to"
704 "FD_MAX_BYTES: %u iovec count limitation\n",
705 cmd->data_length, FD_MAX_BYTES);
706 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
707 }
708
709 if (fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO)
710 return fd_execute_rw_aio(cmd, sgl, sgl_nents, data_direction);
711 return fd_execute_rw_buffered(cmd, sgl, sgl_nents, data_direction);
712}
713
623enum { 714enum {
624 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err 715 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io,
716 Opt_fd_async_io, Opt_err
625}; 717};
626 718
627static match_table_t tokens = { 719static match_table_t tokens = {
628 {Opt_fd_dev_name, "fd_dev_name=%s"}, 720 {Opt_fd_dev_name, "fd_dev_name=%s"},
629 {Opt_fd_dev_size, "fd_dev_size=%s"}, 721 {Opt_fd_dev_size, "fd_dev_size=%s"},
630 {Opt_fd_buffered_io, "fd_buffered_io=%d"}, 722 {Opt_fd_buffered_io, "fd_buffered_io=%d"},
723 {Opt_fd_async_io, "fd_async_io=%d"},
631 {Opt_err, NULL} 724 {Opt_err, NULL}
632}; 725};
633 726
@@ -693,6 +786,21 @@ static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
693 786
694 fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE; 787 fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
695 break; 788 break;
789 case Opt_fd_async_io:
790 ret = match_int(args, &arg);
791 if (ret)
792 goto out;
793 if (arg != 1) {
794 pr_err("bogus fd_async_io=%d value\n", arg);
795 ret = -EINVAL;
796 goto out;
797 }
798
799 pr_debug("FILEIO: Using async I/O"
800 " operations for struct fd_dev\n");
801
802 fd_dev->fbd_flags |= FDBD_HAS_ASYNC_IO;
803 break;
696 default: 804 default:
697 break; 805 break;
698 } 806 }
@@ -709,10 +817,11 @@ static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
709 ssize_t bl = 0; 817 ssize_t bl = 0;
710 818
711 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); 819 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
712 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", 820 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s Async: %d\n",
713 fd_dev->fd_dev_name, fd_dev->fd_dev_size, 821 fd_dev->fd_dev_name, fd_dev->fd_dev_size,
714 (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ? 822 (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
715 "Buffered-WCE" : "O_DSYNC"); 823 "Buffered-WCE" : "O_DSYNC",
824 !!(fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO));
716 return bl; 825 return bl;
717} 826}
718 827
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 53be5ffd3261..929b1ecd544e 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -22,6 +22,7 @@
22#define FBDF_HAS_PATH 0x01 22#define FBDF_HAS_PATH 0x01
23#define FBDF_HAS_SIZE 0x02 23#define FBDF_HAS_SIZE 0x02
24#define FDBD_HAS_BUFFERED_IO_WCE 0x04 24#define FDBD_HAS_BUFFERED_IO_WCE 0x04
25#define FDBD_HAS_ASYNC_IO 0x08
25#define FDBD_FORMAT_UNIT_SIZE 2048 26#define FDBD_FORMAT_UNIT_SIZE 2048
26 27
27struct fd_dev { 28struct fd_dev {
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 1d5afc3ae017..dead30b1d32c 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -166,6 +166,7 @@ extern struct se_portal_group xcopy_pt_tpg;
166/* target_core_configfs.c */ 166/* target_core_configfs.c */
167#define DB_ROOT_LEN 4096 167#define DB_ROOT_LEN 4096
168#define DB_ROOT_DEFAULT "/var/target" 168#define DB_ROOT_DEFAULT "/var/target"
169#define DB_ROOT_PREFERRED "/etc/target"
169 170
170extern char db_root[]; 171extern char db_root[];
171 172
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 668934ea74cb..47d76c862014 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -689,8 +689,29 @@ after_mode_sense:
689 } 689 }
690after_mode_select: 690after_mode_select:
691 691
692 if (scsi_status == SAM_STAT_CHECK_CONDITION) 692 if (scsi_status == SAM_STAT_CHECK_CONDITION) {
693 transport_copy_sense_to_cmd(cmd, req_sense); 693 transport_copy_sense_to_cmd(cmd, req_sense);
694
695 /*
696 * check for TAPE device reads with
697 * FM/EOM/ILI set, so that we can get data
698 * back despite framework assumption that a
699 * check condition means there is no data
700 */
701 if (sd->type == TYPE_TAPE &&
702 cmd->data_direction == DMA_FROM_DEVICE) {
703 /*
704 * is sense data valid, fixed format,
705 * and have FM, EOM, or ILI set?
706 */
707 if (req_sense[0] == 0xf0 && /* valid, fixed format */
708 req_sense[2] & 0xe0 && /* FM, EOM, or ILI */
709 (req_sense[2] & 0xf) == 0) { /* key==NO_SENSE */
710 pr_debug("Tape FM/EOM/ILI status detected. Treat as normal read.\n");
711 cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
712 }
713 }
714 }
694} 715}
695 716
696enum { 717enum {
@@ -1062,7 +1083,8 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
1062 1083
1063 switch (host_byte(result)) { 1084 switch (host_byte(result)) {
1064 case DID_OK: 1085 case DID_OK:
1065 target_complete_cmd(cmd, scsi_status); 1086 target_complete_cmd_with_length(cmd, scsi_status,
1087 cmd->data_length - scsi_req(req)->resid_len);
1066 break; 1088 break;
1067 default: 1089 default:
1068 pr_debug("PSCSI Host Byte exception at cmd: %p CDB:" 1090 pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4558f2e1fe1b..f0e8f0f4ccb4 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -779,7 +779,9 @@ EXPORT_SYMBOL(target_complete_cmd);
779 779
780void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 780void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
781{ 781{
782 if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { 782 if ((scsi_status == SAM_STAT_GOOD ||
783 cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
784 length < cmd->data_length) {
783 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 785 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
784 cmd->residual_count += cmd->data_length - length; 786 cmd->residual_count += cmd->data_length - length;
785 } else { 787 } else {
@@ -1431,7 +1433,7 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1431 return 0; 1433 return 0;
1432} 1434}
1433 1435
1434/* 1436/**
1435 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1437 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
1436 * se_cmd + use pre-allocated SGL memory. 1438 * se_cmd + use pre-allocated SGL memory.
1437 * 1439 *
@@ -1441,7 +1443,7 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1441 * @sense: pointer to SCSI sense buffer 1443 * @sense: pointer to SCSI sense buffer
1442 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1444 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1443 * @data_length: fabric expected data transfer length 1445 * @data_length: fabric expected data transfer length
1444 * @task_addr: SAM task attribute 1446 * @task_attr: SAM task attribute
1445 * @data_dir: DMA data direction 1447 * @data_dir: DMA data direction
1446 * @flags: flags for command submission from target_sc_flags_tables 1448 * @flags: flags for command submission from target_sc_flags_tables
1447 * @sgl: struct scatterlist memory for unidirectional mapping 1449 * @sgl: struct scatterlist memory for unidirectional mapping
@@ -1578,7 +1580,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
1578} 1580}
1579EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1581EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1580 1582
1581/* 1583/**
1582 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1584 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1583 * 1585 *
1584 * @se_cmd: command descriptor to submit 1586 * @se_cmd: command descriptor to submit
@@ -1587,7 +1589,7 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1587 * @sense: pointer to SCSI sense buffer 1589 * @sense: pointer to SCSI sense buffer
1588 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1590 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1589 * @data_length: fabric expected data transfer length 1591 * @data_length: fabric expected data transfer length
1590 * @task_addr: SAM task attribute 1592 * @task_attr: SAM task attribute
1591 * @data_dir: DMA data direction 1593 * @data_dir: DMA data direction
1592 * @flags: flags for command submission from target_sc_flags_tables 1594 * @flags: flags for command submission from target_sc_flags_tables
1593 * 1595 *
@@ -1654,7 +1656,7 @@ static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
1654 * @se_sess: associated se_sess for endpoint 1656 * @se_sess: associated se_sess for endpoint
1655 * @sense: pointer to SCSI sense buffer 1657 * @sense: pointer to SCSI sense buffer
1656 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1658 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1657 * @fabric_context: fabric context for TMR req 1659 * @fabric_tmr_ptr: fabric context for TMR req
1658 * @tm_type: Type of TM request 1660 * @tm_type: Type of TM request
1659 * @gfp: gfp type for caller 1661 * @gfp: gfp type for caller
1660 * @tag: referenced task tag for TMR_ABORT_TASK 1662 * @tag: referenced task tag for TMR_ABORT_TASK
@@ -2084,12 +2086,24 @@ static void transport_complete_qf(struct se_cmd *cmd)
2084 goto queue_status; 2086 goto queue_status;
2085 } 2087 }
2086 2088
2087 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 2089 /*
2090 * Check if we need to send a sense buffer from
2091 * the struct se_cmd in question. We do NOT want
2092 * to take this path of the IO has been marked as
2093 * needing to be treated like a "normal read". This
2094 * is the case if it's a tape read, and either the
2095 * FM, EOM, or ILI bits are set, but there is no
2096 * sense data.
2097 */
2098 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2099 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
2088 goto queue_status; 2100 goto queue_status;
2089 2101
2090 switch (cmd->data_direction) { 2102 switch (cmd->data_direction) {
2091 case DMA_FROM_DEVICE: 2103 case DMA_FROM_DEVICE:
2092 if (cmd->scsi_status) 2104 /* queue status if not treating this as a normal read */
2105 if (cmd->scsi_status &&
2106 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2093 goto queue_status; 2107 goto queue_status;
2094 2108
2095 trace_target_cmd_complete(cmd); 2109 trace_target_cmd_complete(cmd);
@@ -2194,9 +2208,15 @@ static void target_complete_ok_work(struct work_struct *work)
2194 2208
2195 /* 2209 /*
2196 * Check if we need to send a sense buffer from 2210 * Check if we need to send a sense buffer from
2197 * the struct se_cmd in question. 2211 * the struct se_cmd in question. We do NOT want
2212 * to take this path of the IO has been marked as
2213 * needing to be treated like a "normal read". This
2214 * is the case if it's a tape read, and either the
2215 * FM, EOM, or ILI bits are set, but there is no
2216 * sense data.
2198 */ 2217 */
2199 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2218 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2219 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2200 WARN_ON(!cmd->scsi_status); 2220 WARN_ON(!cmd->scsi_status);
2201 ret = transport_send_check_condition_and_sense( 2221 ret = transport_send_check_condition_and_sense(
2202 cmd, 0, 1); 2222 cmd, 0, 1);
@@ -2238,7 +2258,18 @@ static void target_complete_ok_work(struct work_struct *work)
2238queue_rsp: 2258queue_rsp:
2239 switch (cmd->data_direction) { 2259 switch (cmd->data_direction) {
2240 case DMA_FROM_DEVICE: 2260 case DMA_FROM_DEVICE:
2241 if (cmd->scsi_status) 2261 /*
2262 * if this is a READ-type IO, but SCSI status
2263 * is set, then skip returning data and just
2264 * return the status -- unless this IO is marked
2265 * as needing to be treated as a normal read,
2266 * in which case we want to go ahead and return
2267 * the data. This happens, for example, for tape
2268 * reads with the FM, EOM, or ILI bits set, with
2269 * no sense data.
2270 */
2271 if (cmd->scsi_status &&
2272 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2242 goto queue_status; 2273 goto queue_status;
2243 2274
2244 atomic_long_add(cmd->data_length, 2275 atomic_long_add(cmd->data_length,
@@ -2606,7 +2637,8 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2606} 2637}
2607EXPORT_SYMBOL(transport_generic_free_cmd); 2638EXPORT_SYMBOL(transport_generic_free_cmd);
2608 2639
2609/* target_get_sess_cmd - Add command to active ->sess_cmd_list 2640/**
2641 * target_get_sess_cmd - Add command to active ->sess_cmd_list
2610 * @se_cmd: command descriptor to add 2642 * @se_cmd: command descriptor to add
2611 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2643 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
2612 */ 2644 */
@@ -2800,7 +2832,8 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd)
2800} 2832}
2801EXPORT_SYMBOL(target_show_cmd); 2833EXPORT_SYMBOL(target_show_cmd);
2802 2834
2803/* target_sess_cmd_list_set_waiting - Flag all commands in 2835/**
2836 * target_sess_cmd_list_set_waiting - Flag all commands in
2804 * sess_cmd_list to complete cmd_wait_comp. Set 2837 * sess_cmd_list to complete cmd_wait_comp. Set
2805 * sess_tearing_down so no more commands are queued. 2838 * sess_tearing_down so no more commands are queued.
2806 * @se_sess: session to flag 2839 * @se_sess: session to flag
@@ -2835,7 +2868,8 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2835} 2868}
2836EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2869EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2837 2870
2838/* target_wait_for_sess_cmds - Wait for outstanding descriptors 2871/**
2872 * target_wait_for_sess_cmds - Wait for outstanding descriptors
2839 * @se_sess: session to wait for active I/O 2873 * @se_sess: session to wait for active I/O
2840 */ 2874 */
2841void target_wait_for_sess_cmds(struct se_session *se_sess) 2875void target_wait_for_sess_cmds(struct se_session *se_sess)
@@ -3332,7 +3366,7 @@ static void target_tmr_work(struct work_struct *work)
3332 tmr->response = TMR_FUNCTION_REJECTED; 3366 tmr->response = TMR_FUNCTION_REJECTED;
3333 break; 3367 break;
3334 default: 3368 default:
3335 pr_err("Uknown TMR function: 0x%02x.\n", 3369 pr_err("Unknown TMR function: 0x%02x.\n",
3336 tmr->function); 3370 tmr->function);
3337 tmr->response = TMR_FUNCTION_REJECTED; 3371 tmr->response = TMR_FUNCTION_REJECTED;
3338 break; 3372 break;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 4f26bdc3d1dc..94b183efd236 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -42,7 +42,11 @@
42 42
43#include <linux/target_core_user.h> 43#include <linux/target_core_user.h>
44 44
45/* 45/**
46 * DOC: Userspace I/O
47 * Userspace I/O
48 * -------------
49 *
46 * Define a shared-memory interface for LIO to pass SCSI commands and 50 * Define a shared-memory interface for LIO to pass SCSI commands and
47 * data to userspace for processing. This is to allow backends that 51 * data to userspace for processing. This is to allow backends that
48 * are too complex for in-kernel support to be possible. 52 * are too complex for in-kernel support to be possible.
@@ -53,7 +57,7 @@
53 * See the .h file for how the ring is laid out. Note that while the 57 * See the .h file for how the ring is laid out. Note that while the
54 * command ring is defined, the particulars of the data area are 58 * command ring is defined, the particulars of the data area are
55 * not. Offset values in the command entry point to other locations 59 * not. Offset values in the command entry point to other locations
56 * internal to the mmap()ed area. There is separate space outside the 60 * internal to the mmap-ed area. There is separate space outside the
57 * command ring for data buffers. This leaves maximum flexibility for 61 * command ring for data buffers. This leaves maximum flexibility for
58 * moving buffer allocations, or even page flipping or other 62 * moving buffer allocations, or even page flipping or other
59 * allocation techniques, without altering the command ring layout. 63 * allocation techniques, without altering the command ring layout.
@@ -1382,7 +1386,7 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
1382 return page; 1386 return page;
1383} 1387}
1384 1388
1385static int tcmu_vma_fault(struct vm_fault *vmf) 1389static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
1386{ 1390{
1387 struct tcmu_dev *udev = vmf->vma->vm_private_data; 1391 struct tcmu_dev *udev = vmf->vma->vm_private_data;
1388 struct uio_info *info = &udev->uio_info; 1392 struct uio_info *info = &udev->uio_info;
@@ -1586,8 +1590,9 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
1586 return ret; 1590 return ret;
1587} 1591}
1588 1592
1589static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd, 1593static int tcmu_netlink_event_init(struct tcmu_dev *udev,
1590 int reconfig_attr, const void *reconfig_data) 1594 enum tcmu_genl_cmd cmd,
1595 struct sk_buff **buf, void **hdr)
1591{ 1596{
1592 struct sk_buff *skb; 1597 struct sk_buff *skb;
1593 void *msg_header; 1598 void *msg_header;
@@ -1613,46 +1618,66 @@ static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd,
1613 if (ret < 0) 1618 if (ret < 0)
1614 goto free_skb; 1619 goto free_skb;
1615 1620
1616 if (cmd == TCMU_CMD_RECONFIG_DEVICE) { 1621 *buf = skb;
1617 switch (reconfig_attr) { 1622 *hdr = msg_header;
1618 case TCMU_ATTR_DEV_CFG: 1623 return ret;
1619 ret = nla_put_string(skb, reconfig_attr, reconfig_data);
1620 break;
1621 case TCMU_ATTR_DEV_SIZE:
1622 ret = nla_put_u64_64bit(skb, reconfig_attr,
1623 *((u64 *)reconfig_data),
1624 TCMU_ATTR_PAD);
1625 break;
1626 case TCMU_ATTR_WRITECACHE:
1627 ret = nla_put_u8(skb, reconfig_attr,
1628 *((u8 *)reconfig_data));
1629 break;
1630 default:
1631 BUG();
1632 }
1633 1624
1634 if (ret < 0) 1625free_skb:
1635 goto free_skb; 1626 nlmsg_free(skb);
1636 } 1627 return ret;
1628}
1629
1630static int tcmu_netlink_event_send(struct tcmu_dev *udev,
1631 enum tcmu_genl_cmd cmd,
1632 struct sk_buff **buf, void **hdr)
1633{
1634 int ret = 0;
1635 struct sk_buff *skb = *buf;
1636 void *msg_header = *hdr;
1637 1637
1638 genlmsg_end(skb, msg_header); 1638 genlmsg_end(skb, msg_header);
1639 1639
1640 tcmu_init_genl_cmd_reply(udev, cmd); 1640 tcmu_init_genl_cmd_reply(udev, cmd);
1641 1641
1642 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, 1642 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
1643 TCMU_MCGRP_CONFIG, GFP_KERNEL); 1643 TCMU_MCGRP_CONFIG, GFP_KERNEL);
1644 /* We don't care if no one is listening */ 1644 /* We don't care if no one is listening */
1645 if (ret == -ESRCH) 1645 if (ret == -ESRCH)
1646 ret = 0; 1646 ret = 0;
1647 if (!ret) 1647 if (!ret)
1648 ret = tcmu_wait_genl_cmd_reply(udev); 1648 ret = tcmu_wait_genl_cmd_reply(udev);
1649
1650 return ret;
1651free_skb:
1652 nlmsg_free(skb);
1653 return ret; 1649 return ret;
1654} 1650}
1655 1651
1652static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
1653{
1654 struct sk_buff *skb = NULL;
1655 void *msg_header = NULL;
1656 int ret = 0;
1657
1658 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
1659 &msg_header);
1660 if (ret < 0)
1661 return ret;
1662 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, &skb,
1663 &msg_header);
1664
1665}
1666
1667static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
1668{
1669 struct sk_buff *skb = NULL;
1670 void *msg_header = NULL;
1671 int ret = 0;
1672
1673 ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE,
1674 &skb, &msg_header);
1675 if (ret < 0)
1676 return ret;
1677 return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
1678 &skb, &msg_header);
1679}
1680
1656static int tcmu_update_uio_info(struct tcmu_dev *udev) 1681static int tcmu_update_uio_info(struct tcmu_dev *udev)
1657{ 1682{
1658 struct tcmu_hba *hba = udev->hba->hba_ptr; 1683 struct tcmu_hba *hba = udev->hba->hba_ptr;
@@ -1762,7 +1787,7 @@ static int tcmu_configure_device(struct se_device *dev)
1762 */ 1787 */
1763 kref_get(&udev->kref); 1788 kref_get(&udev->kref);
1764 1789
1765 ret = tcmu_netlink_event(udev, TCMU_CMD_ADDED_DEVICE, 0, NULL); 1790 ret = tcmu_send_dev_add_event(udev);
1766 if (ret) 1791 if (ret)
1767 goto err_netlink; 1792 goto err_netlink;
1768 1793
@@ -1812,7 +1837,7 @@ static void tcmu_destroy_device(struct se_device *dev)
1812 list_del(&udev->node); 1837 list_del(&udev->node);
1813 mutex_unlock(&root_udev_mutex); 1838 mutex_unlock(&root_udev_mutex);
1814 1839
1815 tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL); 1840 tcmu_send_dev_remove_event(udev);
1816 1841
1817 uio_unregister_device(&udev->uio_info); 1842 uio_unregister_device(&udev->uio_info);
1818 1843
@@ -2151,6 +2176,27 @@ static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
2151 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); 2176 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
2152} 2177}
2153 2178
2179static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
2180 const char *reconfig_data)
2181{
2182 struct sk_buff *skb = NULL;
2183 void *msg_header = NULL;
2184 int ret = 0;
2185
2186 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2187 &skb, &msg_header);
2188 if (ret < 0)
2189 return ret;
2190 ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data);
2191 if (ret < 0) {
2192 nlmsg_free(skb);
2193 return ret;
2194 }
2195 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2196 &skb, &msg_header);
2197}
2198
2199
2154static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page, 2200static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
2155 size_t count) 2201 size_t count)
2156{ 2202{
@@ -2165,8 +2211,7 @@ static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
2165 2211
2166 /* Check if device has been configured before */ 2212 /* Check if device has been configured before */
2167 if (tcmu_dev_configured(udev)) { 2213 if (tcmu_dev_configured(udev)) {
2168 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, 2214 ret = tcmu_send_dev_config_event(udev, page);
2169 TCMU_ATTR_DEV_CFG, page);
2170 if (ret) { 2215 if (ret) {
2171 pr_err("Unable to reconfigure device\n"); 2216 pr_err("Unable to reconfigure device\n");
2172 return ret; 2217 return ret;
@@ -2193,6 +2238,26 @@ static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
2193 return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size); 2238 return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size);
2194} 2239}
2195 2240
2241static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
2242{
2243 struct sk_buff *skb = NULL;
2244 void *msg_header = NULL;
2245 int ret = 0;
2246
2247 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2248 &skb, &msg_header);
2249 if (ret < 0)
2250 return ret;
2251 ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE,
2252 size, TCMU_ATTR_PAD);
2253 if (ret < 0) {
2254 nlmsg_free(skb);
2255 return ret;
2256 }
2257 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2258 &skb, &msg_header);
2259}
2260
2196static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, 2261static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
2197 size_t count) 2262 size_t count)
2198{ 2263{
@@ -2208,8 +2273,7 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
2208 2273
2209 /* Check if device has been configured before */ 2274 /* Check if device has been configured before */
2210 if (tcmu_dev_configured(udev)) { 2275 if (tcmu_dev_configured(udev)) {
2211 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, 2276 ret = tcmu_send_dev_size_event(udev, val);
2212 TCMU_ATTR_DEV_SIZE, &val);
2213 if (ret) { 2277 if (ret) {
2214 pr_err("Unable to reconfigure device\n"); 2278 pr_err("Unable to reconfigure device\n");
2215 return ret; 2279 return ret;
@@ -2257,6 +2321,25 @@ static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
2257 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache); 2321 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
2258} 2322}
2259 2323
2324static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
2325{
2326 struct sk_buff *skb = NULL;
2327 void *msg_header = NULL;
2328 int ret = 0;
2329
2330 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2331 &skb, &msg_header);
2332 if (ret < 0)
2333 return ret;
2334 ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val);
2335 if (ret < 0) {
2336 nlmsg_free(skb);
2337 return ret;
2338 }
2339 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2340 &skb, &msg_header);
2341}
2342
2260static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, 2343static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
2261 const char *page, size_t count) 2344 const char *page, size_t count)
2262{ 2345{
@@ -2272,8 +2355,7 @@ static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
2272 2355
2273 /* Check if device has been configured before */ 2356 /* Check if device has been configured before */
2274 if (tcmu_dev_configured(udev)) { 2357 if (tcmu_dev_configured(udev)) {
2275 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, 2358 ret = tcmu_send_emulate_write_cache(udev, val);
2276 TCMU_ATTR_WRITECACHE, &val);
2277 if (ret) { 2359 if (ret) {
2278 pr_err("Unable to reconfigure device\n"); 2360 pr_err("Unable to reconfigure device\n");
2279 return ret; 2361 return ret;