aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-14 19:23:44 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-14 19:23:44 -0500
commit670ffccb2f9183eb6cb32fe92257aea52b3f8a7d (patch)
tree54962412913a69e17cc680c57f3e26f7305d99d2
parent47f521ba18190e4bfbb65ead3977af5756884427 (diff)
parent341b2aa83368e6f23bf0cc3d04604896337ad7cb (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly updates of the usual suspects: lpfc, qla2xxx, hisi_sas, megaraid_sas, pm80xx, mpt3sas, be2iscsi, hpsa. and a host of minor updates. There's no major behaviour change or additions to the core in all of this, so the potential for regressions should be small (biggest potential being in the scsi error handler changes)" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (203 commits) scsi: lpfc: Fix hard lock up NMI in els timeout handling. scsi: mpt3sas: remove a stray KERN_INFO scsi: mpt3sas: cleanup _scsih_pcie_enumeration_event() scsi: aacraid: use timespec64 instead of timeval scsi: scsi_transport_fc: add 64GBIT and 128GBIT port speed definitions scsi: qla2xxx: Suppress a kernel complaint in qla_init_base_qpair() scsi: mpt3sas: fix dma_addr_t casts scsi: be2iscsi: Use kasprintf scsi: storvsc: Avoid excessive host scan on controller change scsi: lpfc: fix kzalloc-simple.cocci warnings scsi: mpt3sas: Update mpt3sas driver version. scsi: mpt3sas: Fix sparse warnings scsi: mpt3sas: Fix nvme drives checking for tlr. scsi: mpt3sas: NVMe drive support for BTDHMAPPING ioctl command and log info scsi: mpt3sas: Add-Task-management-debug-info-for-NVMe-drives. scsi: mpt3sas: scan and add nvme device after controller reset scsi: mpt3sas: Set NVMe device queue depth as 128 scsi: mpt3sas: Handle NVMe PCIe device related events generated from firmware. scsi: mpt3sas: API's to remove nvme drive from sml scsi: mpt3sas: API 's to support NVMe drive addition to SML ...
-rw-r--r--Documentation/driver-api/scsi.rst2
-rw-r--r--Documentation/scsi/scsi-parameters.txt13
-rw-r--r--Documentation/scsi/smartpqi.txt2
-rw-r--r--drivers/scsi/.gitignore1
-rw-r--r--drivers/scsi/Makefile8
-rw-r--r--drivers/scsi/NCR5380.c18
-rw-r--r--drivers/scsi/aacraid/commsup.c26
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c3
-rw-r--r--drivers/scsi/be2iscsi/be.h19
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c55
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h48
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c54
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c114
-rw-r--r--drivers/scsi/be2iscsi/be_main.h51
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c278
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h10
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c7
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c5
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c10
-rw-r--r--drivers/scsi/csiostor/csio_hw.h3
-rw-r--r--drivers/scsi/csiostor/csio_init.c3
-rw-r--r--drivers/scsi/csiostor/csio_mb.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c1
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c50
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h1
-rw-r--r--drivers/scsi/cxlflash/main.c24
-rw-r--r--drivers/scsi/cxlflash/main.h3
-rw-r--r--drivers/scsi/cxlflash/sislite.h3
-rw-r--r--drivers/scsi/cxlflash/superpipe.c6
-rw-r--r--drivers/scsi/cxlflash/vlun.c6
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c10
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c6
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c12
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c6
-rw-r--r--drivers/scsi/fcoe/fcoe.c81
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c46
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c253
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c257
-rw-r--r--drivers/scsi/hpsa.c356
-rw-r--r--drivers/scsi/hpsa_cmd.h3
-rw-r--r--drivers/scsi/libfc/fc_lport.c3
-rw-r--r--drivers/scsi/libsas/sas_dump.c10
-rw-r--r--drivers/scsi/libsas/sas_dump.h1
-rw-r--r--drivers/scsi/libsas/sas_event.c26
-rw-r--r--drivers/scsi/libsas/sas_init.c12
-rw-r--r--drivers/scsi/lpfc/lpfc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c174
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c148
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c232
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c9
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h64
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c1025
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c20
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c676
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h29
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h43
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h564
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h11
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h282
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_pci.h111
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h14
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c658
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h177
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c100
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c164
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c2217
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c2
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c54
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c11
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c13
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c124
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h10
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c62
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h102
-rw-r--r--drivers/scsi/qedi/qedi_fw.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h29
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c195
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c73
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c132
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_debug.c31
-rw-r--r--drivers/scsi/scsi_devinfo.c72
-rw-r--r--drivers/scsi/scsi_dh.c36
-rw-r--r--drivers/scsi/scsi_error.c13
-rw-r--r--drivers/scsi/scsi_lib.c9
-rw-r--r--drivers/scsi/scsi_logging.h8
-rw-r--r--drivers/scsi/scsi_priv.h4
-rw-r--r--drivers/scsi/scsi_scan.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c45
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c2
-rw-r--r--drivers/scsi/sd.c32
-rw-r--r--drivers/scsi/sd_zbc.c169
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c18
-rw-r--r--drivers/scsi/storvsc_drv.c52
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210.c10
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c4
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h7
-rw-r--r--drivers/scsi/ufs/ufshcd.c40
-rw-r--r--drivers/scsi/ufs/ufshcd.h16
-rw-r--r--drivers/scsi/ufs/ufshci.h70
-rw-r--r--include/scsi/libsas.h56
-rw-r--r--include/scsi/scsi_device.h3
-rw-r--r--include/scsi/scsi_devinfo.h79
-rw-r--r--include/scsi/scsi_proto.h45
-rw-r--r--include/scsi/scsi_transport_fc.h2
131 files changed, 8211 insertions, 2405 deletions
diff --git a/Documentation/driver-api/scsi.rst b/Documentation/driver-api/scsi.rst
index 5a2aa7a377d9..9ae03171daca 100644
--- a/Documentation/driver-api/scsi.rst
+++ b/Documentation/driver-api/scsi.rst
@@ -28,7 +28,7 @@ SCSI commands can be transported over just about any kind of bus, and
28are the default protocol for storage devices attached to USB, SATA, SAS, 28are the default protocol for storage devices attached to USB, SATA, SAS,
29Fibre Channel, FireWire, and ATAPI devices. SCSI packets are also 29Fibre Channel, FireWire, and ATAPI devices. SCSI packets are also
30commonly exchanged over Infiniband, 30commonly exchanged over Infiniband,
31`I20 <http://i2o.shadowconnect.com/faq.php>`__, TCP/IP 31`I2O <http://i2o.shadowconnect.com/faq.php>`__, TCP/IP
32(`iSCSI <https://en.wikipedia.org/wiki/ISCSI>`__), even `Parallel 32(`iSCSI <https://en.wikipedia.org/wiki/ISCSI>`__), even `Parallel
33ports <http://cyberelk.net/tim/parport/parscsi.html>`__. 33ports <http://cyberelk.net/tim/parport/parscsi.html>`__.
34 34
diff --git a/Documentation/scsi/scsi-parameters.txt b/Documentation/scsi/scsi-parameters.txt
index 8477655c0e46..453d4b79c78d 100644
--- a/Documentation/scsi/scsi-parameters.txt
+++ b/Documentation/scsi/scsi-parameters.txt
@@ -50,10 +50,11 @@ parameters may be changed at runtime by the command
50 mac5380= [HW,SCSI] 50 mac5380= [HW,SCSI]
51 See drivers/scsi/mac_scsi.c. 51 See drivers/scsi/mac_scsi.c.
52 52
53 max_luns= [SCSI] Maximum number of LUNs to probe. 53 scsi_mod.max_luns=
54 [SCSI] Maximum number of LUNs to probe.
54 Should be between 1 and 2^32-1. 55 Should be between 1 and 2^32-1.
55 56
56 max_report_luns= 57 scsi_mod.max_report_luns=
57 [SCSI] Maximum number of LUNs received. 58 [SCSI] Maximum number of LUNs received.
58 Should be between 1 and 16384. 59 Should be between 1 and 16384.
59 60
@@ -80,15 +81,17 @@ parameters may be changed at runtime by the command
80 scsi_debug_*= [SCSI] 81 scsi_debug_*= [SCSI]
81 See drivers/scsi/scsi_debug.c. 82 See drivers/scsi/scsi_debug.c.
82 83
83 scsi_default_dev_flags= 84 scsi_mod.default_dev_flags=
84 [SCSI] SCSI default device flags 85 [SCSI] SCSI default device flags
85 Format: <integer> 86 Format: <integer>
86 87
87 scsi_dev_flags= [SCSI] Black/white list entry for vendor and model 88 scsi_mod.dev_flags=
89 [SCSI] Black/white list entry for vendor and model
88 Format: <vendor>:<model>:<flags> 90 Format: <vendor>:<model>:<flags>
89 (flags are integer value) 91 (flags are integer value)
90 92
91 scsi_logging_level= [SCSI] a bit mask of logging levels 93 scsi_mod.scsi_logging_level=
94 [SCSI] a bit mask of logging levels
92 See drivers/scsi/scsi_logging.h for bits. Also 95 See drivers/scsi/scsi_logging.h for bits. Also
93 settable via sysctl at dev.scsi.logging_level 96 settable via sysctl at dev.scsi.logging_level
94 (/proc/sys/dev/scsi/logging_level). 97 (/proc/sys/dev/scsi/logging_level).
diff --git a/Documentation/scsi/smartpqi.txt b/Documentation/scsi/smartpqi.txt
index ab377d9e5d1b..201f80c7c050 100644
--- a/Documentation/scsi/smartpqi.txt
+++ b/Documentation/scsi/smartpqi.txt
@@ -21,7 +21,7 @@ http://www.t10.org/members/w_pqi2.htm
21 21
22Supported devices: 22Supported devices:
23------------------ 23------------------
24<Controller names to be added as they become publically available.> 24<Controller names to be added as they become publicly available.>
25 25
26smartpqi specific entries in /sys 26smartpqi specific entries in /sys
27----------------------------- 27-----------------------------
diff --git a/drivers/scsi/.gitignore b/drivers/scsi/.gitignore
index c89ae9a04399..e2956741fbd1 100644
--- a/drivers/scsi/.gitignore
+++ b/drivers/scsi/.gitignore
@@ -1 +1,2 @@
153c700_d.h 153c700_d.h
2scsi_devinfo_tbl.c
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1639bf8b1ab6..fcfd28d2884c 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -192,6 +192,14 @@ clean-files := 53c700_d.h 53c700_u.h
192 192
193$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h 193$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
194 194
195$(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c
196
197quiet_cmd_bflags = GEN $@
198 cmd_bflags = sed -n 's/.*BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@
199
200$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h
201 $(call if_changed,bflags)
202
195# If you want to play with the firmware, uncomment 203# If you want to play with the firmware, uncomment
196# GENERATE_FIRMWARE := 1 204# GENERATE_FIRMWARE := 1
197 205
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 777b0222d021..90ea0f5d9bdb 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -1908,8 +1908,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
1908 switch (extended_msg[2]) { 1908 switch (extended_msg[2]) {
1909 case EXTENDED_SDTR: 1909 case EXTENDED_SDTR:
1910 case EXTENDED_WDTR: 1910 case EXTENDED_WDTR:
1911 case EXTENDED_MODIFY_DATA_POINTER:
1912 case EXTENDED_EXTENDED_IDENTIFY:
1913 tmp = 0; 1911 tmp = 0;
1914 } 1912 }
1915 } else if (len) { 1913 } else if (len) {
@@ -1932,18 +1930,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
1932 * reject it. 1930 * reject it.
1933 */ 1931 */
1934 default: 1932 default:
1935 if (!tmp) { 1933 if (tmp == EXTENDED_MESSAGE)
1936 shost_printk(KERN_ERR, instance, "rejecting message ");
1937 spi_print_msg(extended_msg);
1938 printk("\n");
1939 } else if (tmp != EXTENDED_MESSAGE)
1940 scmd_printk(KERN_INFO, cmd,
1941 "rejecting unknown message %02x\n",
1942 tmp);
1943 else
1944 scmd_printk(KERN_INFO, cmd, 1934 scmd_printk(KERN_INFO, cmd,
1945 "rejecting unknown extended message code %02x, length %d\n", 1935 "rejecting unknown extended message code %02x, length %d\n",
1946 extended_msg[1], extended_msg[0]); 1936 extended_msg[2], extended_msg[1]);
1937 else if (tmp)
1938 scmd_printk(KERN_INFO, cmd,
1939 "rejecting unknown message code %02x\n",
1940 tmp);
1947 1941
1948 msgout = MESSAGE_REJECT; 1942 msgout = MESSAGE_REJECT;
1949 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); 1943 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index dfe8e70f8d99..525a652dab48 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2383,19 +2383,19 @@ fib_free_out:
2383 goto out; 2383 goto out;
2384} 2384}
2385 2385
2386int aac_send_safw_hostttime(struct aac_dev *dev, struct timeval *now) 2386int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
2387{ 2387{
2388 struct tm cur_tm; 2388 struct tm cur_tm;
2389 char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ"; 2389 char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
2390 u32 datasize = sizeof(wellness_str); 2390 u32 datasize = sizeof(wellness_str);
2391 unsigned long local_time; 2391 time64_t local_time;
2392 int ret = -ENODEV; 2392 int ret = -ENODEV;
2393 2393
2394 if (!dev->sa_firmware) 2394 if (!dev->sa_firmware)
2395 goto out; 2395 goto out;
2396 2396
2397 local_time = (u32)(now->tv_sec - (sys_tz.tz_minuteswest * 60)); 2397 local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60));
2398 time_to_tm(local_time, 0, &cur_tm); 2398 time64_to_tm(local_time, 0, &cur_tm);
2399 cur_tm.tm_mon += 1; 2399 cur_tm.tm_mon += 1;
2400 cur_tm.tm_year += 1900; 2400 cur_tm.tm_year += 1900;
2401 wellness_str[8] = bin2bcd(cur_tm.tm_hour); 2401 wellness_str[8] = bin2bcd(cur_tm.tm_hour);
@@ -2412,7 +2412,7 @@ out:
2412 return ret; 2412 return ret;
2413} 2413}
2414 2414
2415int aac_send_hosttime(struct aac_dev *dev, struct timeval *now) 2415int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
2416{ 2416{
2417 int ret = -ENOMEM; 2417 int ret = -ENOMEM;
2418 struct fib *fibptr; 2418 struct fib *fibptr;
@@ -2424,7 +2424,7 @@ int aac_send_hosttime(struct aac_dev *dev, struct timeval *now)
2424 2424
2425 aac_fib_init(fibptr); 2425 aac_fib_init(fibptr);
2426 info = (__le32 *)fib_data(fibptr); 2426 info = (__le32 *)fib_data(fibptr);
2427 *info = cpu_to_le32(now->tv_sec); 2427 *info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */
2428 ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal, 2428 ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal,
2429 1, 1, NULL, NULL); 2429 1, 1, NULL, NULL);
2430 2430
@@ -2496,7 +2496,7 @@ int aac_command_thread(void *data)
2496 } 2496 }
2497 if (!time_before(next_check_jiffies,next_jiffies) 2497 if (!time_before(next_check_jiffies,next_jiffies)
2498 && ((difference = next_jiffies - jiffies) <= 0)) { 2498 && ((difference = next_jiffies - jiffies) <= 0)) {
2499 struct timeval now; 2499 struct timespec64 now;
2500 int ret; 2500 int ret;
2501 2501
2502 /* Don't even try to talk to adapter if its sick */ 2502 /* Don't even try to talk to adapter if its sick */
@@ -2506,15 +2506,15 @@ int aac_command_thread(void *data)
2506 next_check_jiffies = jiffies 2506 next_check_jiffies = jiffies
2507 + ((long)(unsigned)check_interval) 2507 + ((long)(unsigned)check_interval)
2508 * HZ; 2508 * HZ;
2509 do_gettimeofday(&now); 2509 ktime_get_real_ts64(&now);
2510 2510
2511 /* Synchronize our watches */ 2511 /* Synchronize our watches */
2512 if (((1000000 - (1000000 / HZ)) > now.tv_usec) 2512 if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
2513 && (now.tv_usec > (1000000 / HZ))) 2513 && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
2514 difference = (((1000000 - now.tv_usec) * HZ) 2514 difference = (((NSEC_PER_SEC - now.tv_nsec) * HZ)
2515 + 500000) / 1000000; 2515 + NSEC_PER_SEC / 2) / NSEC_PER_SEC;
2516 else { 2516 else {
2517 if (now.tv_usec > 500000) 2517 if (now.tv_nsec > NSEC_PER_SEC / 2)
2518 ++now.tv_sec; 2518 ++now.tv_sec;
2519 2519
2520 if (dev->sa_firmware) 2520 if (dev->sa_firmware)
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 381846164003..6612ff3b2e83 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -2212,7 +2212,7 @@ ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
2212 * by the capabilities of the bus connectivity of and sync settings for 2212 * by the capabilities of the bus connectivity of and sync settings for
2213 * the target. 2213 * the target.
2214 */ 2214 */
2215const struct ahc_syncrate * 2215static const struct ahc_syncrate *
2216ahc_devlimited_syncrate(struct ahc_softc *ahc, 2216ahc_devlimited_syncrate(struct ahc_softc *ahc,
2217 struct ahc_initiator_tinfo *tinfo, 2217 struct ahc_initiator_tinfo *tinfo,
2218 u_int *period, u_int *ppr_options, role_t role) 2218 u_int *period, u_int *ppr_options, role_t role)
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index acd687f4554e..c6be3aeb302b 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1141,7 +1141,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
1141 * or forcing transfer negotiations on the next command to any 1141 * or forcing transfer negotiations on the next command to any
1142 * target. 1142 * target.
1143 */ 1143 */
1144void 1144static void
1145ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc) 1145ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc)
1146{ 1146{
1147 int i; 1147 int i;
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 7cbc7213b2b2..5402b85b0bdc 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -721,11 +721,8 @@ Out:
721 */ 721 */
722static void asd_chip_reset(struct asd_ha_struct *asd_ha) 722static void asd_chip_reset(struct asd_ha_struct *asd_ha)
723{ 723{
724 struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
725
726 ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev)); 724 ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev));
727 asd_chip_hardrst(asd_ha); 725 asd_chip_hardrst(asd_ha);
728 sas_ha->notify_ha_event(sas_ha, HAE_RESET);
729} 726}
730 727
731/* ---------- Done List Routines ---------- */ 728/* ---------- Done List Routines ---------- */
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 55e3f8b40eb3..e035acf56652 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2017 Broadcom. All Rights Reserved. 2 * Copyright 2017 Broadcom. All Rights Reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -81,12 +81,12 @@ static inline void queue_tail_inc(struct be_queue_info *q)
81/*ISCSI */ 81/*ISCSI */
82 82
83struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ 83struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
84 u32 min_eqd; /* in usecs */ 84 unsigned long jiffies;
85 u32 max_eqd; /* in usecs */ 85 u32 eq_prev; /* Used to calculate eqe */
86 u32 prev_eqd; /* in usecs */ 86 u32 prev_eqd;
87 u32 et_eqd; /* configured val when aic is off */ 87#define BEISCSI_EQ_DELAY_MIN 0
88 ulong jiffies; 88#define BEISCSI_EQ_DELAY_DEF 32
89 u64 eq_prev; /* Used to calculate eqe */ 89#define BEISCSI_EQ_DELAY_MAX 128
90}; 90};
91 91
92struct be_eq_obj { 92struct be_eq_obj {
@@ -148,9 +148,8 @@ struct be_ctrl_info {
148/* TAG is from 1...MAX_MCC_CMD, MASK includes MAX_MCC_CMD */ 148/* TAG is from 1...MAX_MCC_CMD, MASK includes MAX_MCC_CMD */
149#define MCC_Q_CMD_TAG_MASK ((MAX_MCC_CMD << 1) - 1) 149#define MCC_Q_CMD_TAG_MASK ((MAX_MCC_CMD << 1) - 1)
150 150
151#define PAGE_SHIFT_4K 12 151#define PAGE_SHIFT_4K 12
152#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) 152#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
153#define mcc_timeout 120000 /* 12s timeout */
154 153
155/* Returns number of pages spanned by the data starting at the given addr */ 154/* Returns number of pages spanned by the data starting at the given addr */
156#define PAGES_4K_SPANNED(_address, size) \ 155#define PAGES_4K_SPANNED(_address, size) \
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index a79a5e72c777..2eb66df3e3d6 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2017 Broadcom. All Rights Reserved. 2 * Copyright 2017 Broadcom. All Rights Reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -675,8 +675,8 @@ static int be_mbox_notify(struct be_ctrl_info *ctrl)
675 return status; 675 return status;
676} 676}
677 677
678void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, 678void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, u32 payload_len,
679 bool embedded, u8 sge_cnt) 679 bool embedded, u8 sge_cnt)
680{ 680{
681 if (embedded) 681 if (embedded)
682 wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK; 682 wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK;
@@ -688,7 +688,7 @@ void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
688} 688}
689 689
690void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 690void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
691 u8 subsystem, u8 opcode, int cmd_len) 691 u8 subsystem, u8 opcode, u32 cmd_len)
692{ 692{
693 req_hdr->opcode = opcode; 693 req_hdr->opcode = opcode;
694 req_hdr->subsystem = subsystem; 694 req_hdr->subsystem = subsystem;
@@ -947,7 +947,6 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
947 default: 947 default:
948 mutex_unlock(&ctrl->mbox_lock); 948 mutex_unlock(&ctrl->mbox_lock);
949 BUG(); 949 BUG();
950 return -ENXIO;
951 } 950 }
952 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req)); 951 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
953 if (queue_type != QTYPE_SGL) 952 if (queue_type != QTYPE_SGL)
@@ -1522,6 +1521,52 @@ int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba)
1522 return ret; 1521 return ret;
1523} 1522}
1524 1523
1524int beiscsi_set_host_data(struct beiscsi_hba *phba)
1525{
1526 struct be_ctrl_info *ctrl = &phba->ctrl;
1527 struct be_cmd_set_host_data *ioctl;
1528 struct be_mcc_wrb *wrb;
1529 int ret = 0;
1530
1531 if (is_chip_be2_be3r(phba))
1532 return ret;
1533
1534 mutex_lock(&ctrl->mbox_lock);
1535 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1536 memset(wrb, 0, sizeof(*wrb));
1537 ioctl = embedded_payload(wrb);
1538
1539 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1540 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1541 OPCODE_COMMON_SET_HOST_DATA,
1542 EMBED_MBX_MAX_PAYLOAD_SIZE);
1543 ioctl->param.req.param_id = BE_CMD_SET_HOST_PARAM_ID;
1544 ioctl->param.req.param_len =
1545 snprintf((char *)ioctl->param.req.param_data,
1546 sizeof(ioctl->param.req.param_data),
1547 "Linux iSCSI v%s", BUILD_STR);
1548 ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len, 4);
1549 if (ioctl->param.req.param_len > BE_CMD_MAX_DRV_VERSION)
1550 ioctl->param.req.param_len = BE_CMD_MAX_DRV_VERSION;
1551 ret = be_mbox_notify(ctrl);
1552 if (!ret) {
1553 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1554 "BG_%d : HBA set host driver version\n");
1555 } else {
1556 /**
1557 * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
1558 * Older FW versions return this error.
1559 */
1560 if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
1561 ret == MCC_STATUS_INVALID_LENGTH)
1562 __beiscsi_log(phba, KERN_INFO,
1563 "BG_%d : HBA failed to set host driver version\n");
1564 }
1565
1566 mutex_unlock(&ctrl->mbox_lock);
1567 return ret;
1568}
1569
1525int beiscsi_set_uer_feature(struct beiscsi_hba *phba) 1570int beiscsi_set_uer_feature(struct beiscsi_hba *phba)
1526{ 1571{
1527 struct be_ctrl_info *ctrl = &phba->ctrl; 1572 struct be_ctrl_info *ctrl = &phba->ctrl;
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index d9b6773facdb..6f05d1dfa10a 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2017 Broadcom. All Rights Reserved. 2 * Copyright 2017 Broadcom. All Rights Reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -230,6 +230,7 @@ struct be_mcc_mailbox {
230#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58 230#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
231#define OPCODE_COMMON_FUNCTION_RESET 61 231#define OPCODE_COMMON_FUNCTION_RESET 61
232#define OPCODE_COMMON_GET_PORT_NAME 77 232#define OPCODE_COMMON_GET_PORT_NAME 77
233#define OPCODE_COMMON_SET_HOST_DATA 93
233#define OPCODE_COMMON_SET_FEATURES 191 234#define OPCODE_COMMON_SET_FEATURES 191
234 235
235/** 236/**
@@ -737,6 +738,30 @@ struct be_cmd_hba_name {
737 u8 initiator_alias[BE_INI_ALIAS_LEN]; 738 u8 initiator_alias[BE_INI_ALIAS_LEN];
738} __packed; 739} __packed;
739 740
741/******************** COMMON SET HOST DATA *******************/
742#define BE_CMD_SET_HOST_PARAM_ID 0x2
743#define BE_CMD_MAX_DRV_VERSION 0x30
744struct be_sethost_req {
745 u32 param_id;
746 u32 param_len;
747 u32 param_data[32];
748};
749
750struct be_sethost_resp {
751 u32 rsvd0;
752};
753
754struct be_cmd_set_host_data {
755 union {
756 struct be_cmd_req_hdr req_hdr;
757 struct be_cmd_resp_hdr resp_hdr;
758 } h;
759 union {
760 struct be_sethost_req req;
761 struct be_sethost_resp resp;
762 } param;
763} __packed;
764
740/******************** COMMON SET Features *******************/ 765/******************** COMMON SET Features *******************/
741#define BE_CMD_SET_FEATURE_UER 0x10 766#define BE_CMD_SET_FEATURE_UER 0x10
742#define BE_CMD_UER_SUPP_BIT 0x1 767#define BE_CMD_UER_SUPP_BIT 0x1
@@ -793,8 +818,6 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
793 struct be_queue_info *mccq, 818 struct be_queue_info *mccq,
794 struct be_queue_info *cq); 819 struct be_queue_info *cq);
795 820
796unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
797
798void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag); 821void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag);
799 822
800int beiscsi_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *, 823int beiscsi_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
@@ -847,6 +870,7 @@ int beiscsi_get_fw_config(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba);
847int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba); 870int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba);
848 871
849int beiscsi_set_uer_feature(struct beiscsi_hba *phba); 872int beiscsi_set_uer_feature(struct beiscsi_hba *phba);
873int beiscsi_set_host_data(struct beiscsi_hba *phba);
850 874
851struct be_default_pdu_context { 875struct be_default_pdu_context {
852 u32 dw[4]; 876 u32 dw[4];
@@ -1274,19 +1298,9 @@ struct be_cmd_get_port_name {
1274 * a read command 1298 * a read command
1275 */ 1299 */
1276#define TGT_CTX_UPDT_CMD 7 /* Target context update */ 1300#define TGT_CTX_UPDT_CMD 7 /* Target context update */
1277#define TGT_STS_CMD 8 /* Target R2T and other BHS
1278 * where only the status number
1279 * need to be updated
1280 */
1281#define TGT_DATAIN_CMD 9 /* Target Data-Ins in response
1282 * to read command
1283 */
1284#define TGT_SOS_PDU 10 /* Target:standalone status
1285 * response
1286 */
1287#define TGT_DM_CMD 11 /* Indicates that the bhs 1301#define TGT_DM_CMD 11 /* Indicates that the bhs
1288 * preparedby 1302 * prepared by driver should not
1289 * driver should not be touched 1303 * be touched.
1290 */ 1304 */
1291 1305
1292/* Returns the number of items in the field array. */ 1306/* Returns the number of items in the field array. */
@@ -1444,9 +1458,9 @@ struct be_cmd_get_port_name {
1444 * the cxn 1458 * the cxn
1445 */ 1459 */
1446 1460
1447void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, 1461void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, u32 payload_len,
1448 bool embedded, u8 sge_cnt); 1462 bool embedded, u8 sge_cnt);
1449 1463
1450void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 1464void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
1451 u8 subsystem, u8 opcode, int cmd_len); 1465 u8 subsystem, u8 opcode, u32 cmd_len);
1452#endif /* !BEISCSI_CMDS_H */ 1466#endif /* !BEISCSI_CMDS_H */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 43a80ce5ce6a..a398c54139aa 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2017 Broadcom. All Rights Reserved. 2 * Copyright 2017 Broadcom. All Rights Reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -684,41 +684,6 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
684} 684}
685 685
686/** 686/**
687 * beiscsi_get_initname - Read Initiator Name from flash
688 * @buf: buffer bointer
689 * @phba: The device priv structure instance
690 *
691 * returns number of bytes
692 */
693static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
694{
695 int rc;
696 unsigned int tag;
697 struct be_mcc_wrb *wrb;
698 struct be_cmd_hba_name *resp;
699
700 tag = be_cmd_get_initname(phba);
701 if (!tag) {
702 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
703 "BS_%d : Getting Initiator Name Failed\n");
704
705 return -EBUSY;
706 }
707
708 rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
709 if (rc) {
710 beiscsi_log(phba, KERN_ERR,
711 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
712 "BS_%d : Initiator Name MBX Failed\n");
713 return rc;
714 }
715
716 resp = embedded_payload(wrb);
717 rc = sprintf(buf, "%s\n", resp->initiator_name);
718 return rc;
719}
720
721/**
722 * beiscsi_get_port_state - Get the Port State 687 * beiscsi_get_port_state - Get the Port State
723 * @shost : pointer to scsi_host structure 688 * @shost : pointer to scsi_host structure
724 * 689 *
@@ -772,7 +737,6 @@ static void beiscsi_get_port_speed(struct Scsi_Host *shost)
772 * @param: parameter type identifier 737 * @param: parameter type identifier
773 * @buf: buffer pointer 738 * @buf: buffer pointer
774 * 739 *
775 * returns host parameter
776 */ 740 */
777int beiscsi_get_host_param(struct Scsi_Host *shost, 741int beiscsi_get_host_param(struct Scsi_Host *shost,
778 enum iscsi_host_param param, char *buf) 742 enum iscsi_host_param param, char *buf)
@@ -783,7 +747,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
783 if (!beiscsi_hba_is_online(phba)) { 747 if (!beiscsi_hba_is_online(phba)) {
784 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 748 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
785 "BS_%d : HBA in error 0x%lx\n", phba->state); 749 "BS_%d : HBA in error 0x%lx\n", phba->state);
786 return -EBUSY; 750 return 0;
787 } 751 }
788 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 752 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
789 "BS_%d : In beiscsi_get_host_param, param = %d\n", param); 753 "BS_%d : In beiscsi_get_host_param, param = %d\n", param);
@@ -794,15 +758,19 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
794 if (status < 0) { 758 if (status < 0) {
795 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 759 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
796 "BS_%d : beiscsi_get_macaddr Failed\n"); 760 "BS_%d : beiscsi_get_macaddr Failed\n");
797 return status; 761 return 0;
798 } 762 }
799 break; 763 break;
800 case ISCSI_HOST_PARAM_INITIATOR_NAME: 764 case ISCSI_HOST_PARAM_INITIATOR_NAME:
801 status = beiscsi_get_initname(buf, phba); 765 /* try fetching user configured name first */
766 status = beiscsi_get_initiator_name(phba, buf, true);
802 if (status < 0) { 767 if (status < 0) {
803 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 768 status = beiscsi_get_initiator_name(phba, buf, false);
804 "BS_%d : Retreiving Initiator Name Failed\n"); 769 if (status < 0) {
805 return status; 770 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
771 "BS_%d : Retreiving Initiator Name Failed\n");
772 status = 0;
773 }
806 } 774 }
807 break; 775 break;
808 case ISCSI_HOST_PARAM_PORT_STATE: 776 case ISCSI_HOST_PARAM_PORT_STATE:
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index b9d459a21f25..f41dfda97e17 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2017 Broadcom. All Rights Reserved. 2 * Copyright 2017 Broadcom. All Rights Reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index d8bd6f2c9c83..be96aa1e5077 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2017 Broadcom. All Rights Reserved. 2 * Copyright 2017 Broadcom. All Rights Reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -455,14 +455,12 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
455 return -ENOMEM; 455 return -ENOMEM;
456 phba->ctrl.csr = addr; 456 phba->ctrl.csr = addr;
457 phba->csr_va = addr; 457 phba->csr_va = addr;
458 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
459 458
460 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024); 459 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
461 if (addr == NULL) 460 if (addr == NULL)
462 goto pci_map_err; 461 goto pci_map_err;
463 phba->ctrl.db = addr; 462 phba->ctrl.db = addr;
464 phba->db_va = addr; 463 phba->db_va = addr;
465 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
466 464
467 if (phba->generation == BE_GEN2) 465 if (phba->generation == BE_GEN2)
468 pcicfg_reg = 1; 466 pcicfg_reg = 1;
@@ -476,7 +474,6 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
476 goto pci_map_err; 474 goto pci_map_err;
477 phba->ctrl.pcicfg = addr; 475 phba->ctrl.pcicfg = addr;
478 phba->pci_va = addr; 476 phba->pci_va = addr;
479 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
480 return 0; 477 return 0;
481 478
482pci_map_err: 479pci_map_err:
@@ -790,6 +787,24 @@ static irqreturn_t be_isr(int irq, void *dev_id)
790 return IRQ_HANDLED; 787 return IRQ_HANDLED;
791} 788}
792 789
790static void beiscsi_free_irqs(struct beiscsi_hba *phba)
791{
792 struct hwi_context_memory *phwi_context;
793 int i;
794
795 if (!phba->pcidev->msix_enabled) {
796 if (phba->pcidev->irq)
797 free_irq(phba->pcidev->irq, phba);
798 return;
799 }
800
801 phwi_context = phba->phwi_ctrlr->phwi_ctxt;
802 for (i = 0; i <= phba->num_cpus; i++) {
803 free_irq(pci_irq_vector(phba->pcidev, i),
804 &phwi_context->be_eq[i]);
805 kfree(phba->msi_name[i]);
806 }
807}
793 808
794static int beiscsi_init_irqs(struct beiscsi_hba *phba) 809static int beiscsi_init_irqs(struct beiscsi_hba *phba)
795{ 810{
@@ -803,15 +818,14 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
803 818
804 if (pcidev->msix_enabled) { 819 if (pcidev->msix_enabled) {
805 for (i = 0; i < phba->num_cpus; i++) { 820 for (i = 0; i < phba->num_cpus; i++) {
806 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, 821 phba->msi_name[i] = kasprintf(GFP_KERNEL,
807 GFP_KERNEL); 822 "beiscsi_%02x_%02x",
823 phba->shost->host_no, i);
808 if (!phba->msi_name[i]) { 824 if (!phba->msi_name[i]) {
809 ret = -ENOMEM; 825 ret = -ENOMEM;
810 goto free_msix_irqs; 826 goto free_msix_irqs;
811 } 827 }
812 828
813 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
814 phba->shost->host_no, i);
815 ret = request_irq(pci_irq_vector(pcidev, i), 829 ret = request_irq(pci_irq_vector(pcidev, i),
816 be_isr_msix, 0, phba->msi_name[i], 830 be_isr_msix, 0, phba->msi_name[i],
817 &phwi_context->be_eq[i]); 831 &phwi_context->be_eq[i]);
@@ -824,13 +838,12 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
824 goto free_msix_irqs; 838 goto free_msix_irqs;
825 } 839 }
826 } 840 }
827 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL); 841 phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_mcc_%02x",
842 phba->shost->host_no);
828 if (!phba->msi_name[i]) { 843 if (!phba->msi_name[i]) {
829 ret = -ENOMEM; 844 ret = -ENOMEM;
830 goto free_msix_irqs; 845 goto free_msix_irqs;
831 } 846 }
832 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
833 phba->shost->host_no);
834 ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0, 847 ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0,
835 phba->msi_name[i], &phwi_context->be_eq[i]); 848 phba->msi_name[i], &phwi_context->be_eq[i]);
836 if (ret) { 849 if (ret) {
@@ -924,12 +937,11 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
924 * this can happen if clean_task is called on a task that 937 * this can happen if clean_task is called on a task that
925 * failed in xmit_task or alloc_pdu. 938 * failed in xmit_task or alloc_pdu.
926 */ 939 */
927 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 940 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
928 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d," 941 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d, value there=%p\n",
929 "value there=%p\n", phba->io_sgl_free_index, 942 phba->io_sgl_free_index,
930 phba->io_sgl_hndl_base 943 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
931 [phba->io_sgl_free_index]); 944 spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
932 spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
933 return; 945 return;
934 } 946 }
935 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 947 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
@@ -1864,8 +1876,8 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
1864 1876
1865 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 1877 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1866 1878
1867 code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 1879 code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 32] &
1868 32] & CQE_CODE_MASK); 1880 CQE_CODE_MASK);
1869 1881
1870 /* Get the CID */ 1882 /* Get the CID */
1871 if (is_chip_be2_be3r(phba)) { 1883 if (is_chip_be2_be3r(phba)) {
@@ -3024,7 +3036,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
3024 3036
3025 mem->dma = paddr; 3037 mem->dma = paddr;
3026 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 3038 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
3027 phwi_context->cur_eqd); 3039 BEISCSI_EQ_DELAY_DEF);
3028 if (ret) { 3040 if (ret) {
3029 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3041 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3030 "BM_%d : beiscsi_cmd_eq_create" 3042 "BM_%d : beiscsi_cmd_eq_create"
@@ -3508,13 +3520,14 @@ static int be_mcc_queues_create(struct beiscsi_hba *phba,
3508 goto err; 3520 goto err;
3509 /* Ask BE to create MCC compl queue; */ 3521 /* Ask BE to create MCC compl queue; */
3510 if (phba->pcidev->msix_enabled) { 3522 if (phba->pcidev->msix_enabled) {
3511 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq 3523 if (beiscsi_cmd_cq_create(ctrl, cq,
3512 [phba->num_cpus].q, false, true, 0)) 3524 &phwi_context->be_eq[phba->num_cpus].q,
3513 goto mcc_cq_free; 3525 false, true, 0))
3526 goto mcc_cq_free;
3514 } else { 3527 } else {
3515 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, 3528 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3516 false, true, 0)) 3529 false, true, 0))
3517 goto mcc_cq_free; 3530 goto mcc_cq_free;
3518 } 3531 }
3519 3532
3520 /* Alloc MCC queue */ 3533 /* Alloc MCC queue */
@@ -3689,9 +3702,6 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3689 3702
3690 phwi_ctrlr = phba->phwi_ctrlr; 3703 phwi_ctrlr = phba->phwi_ctrlr;
3691 phwi_context = phwi_ctrlr->phwi_ctxt; 3704 phwi_context = phwi_ctrlr->phwi_ctxt;
3692 phwi_context->max_eqd = 128;
3693 phwi_context->min_eqd = 0;
3694 phwi_context->cur_eqd = 32;
3695 /* set port optic state to unknown */ 3705 /* set port optic state to unknown */
3696 phba->optic_state = 0xff; 3706 phba->optic_state = 0xff;
3697 3707
@@ -4792,10 +4802,10 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
4792 sg = scsi_sglist(sc); 4802 sg = scsi_sglist(sc);
4793 if (sc->sc_data_direction == DMA_TO_DEVICE) 4803 if (sc->sc_data_direction == DMA_TO_DEVICE)
4794 writedir = 1; 4804 writedir = 1;
4795 else 4805 else
4796 writedir = 0; 4806 writedir = 0;
4797 4807
4798 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); 4808 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
4799} 4809}
4800 4810
4801/** 4811/**
@@ -4917,6 +4927,13 @@ void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle)
4917 schedule_work(&phba->boot_work); 4927 schedule_work(&phba->boot_work);
4918} 4928}
4919 4929
4930/**
4931 * Boot flag info for iscsi-utilities
4932 * Bit 0 Block valid flag
4933 * Bit 1 Firmware booting selected
4934 */
4935#define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3
4936
4920static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 4937static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
4921{ 4938{
4922 struct beiscsi_hba *phba = data; 4939 struct beiscsi_hba *phba = data;
@@ -4972,7 +4989,7 @@ static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
4972 auth_data.chap.intr_secret); 4989 auth_data.chap.intr_secret);
4973 break; 4990 break;
4974 case ISCSI_BOOT_TGT_FLAGS: 4991 case ISCSI_BOOT_TGT_FLAGS:
4975 rc = sprintf(str, "2\n"); 4992 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS);
4976 break; 4993 break;
4977 case ISCSI_BOOT_TGT_NIC_ASSOC: 4994 case ISCSI_BOOT_TGT_NIC_ASSOC:
4978 rc = sprintf(str, "0\n"); 4995 rc = sprintf(str, "0\n");
@@ -5004,7 +5021,7 @@ static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
5004 5021
5005 switch (type) { 5022 switch (type) {
5006 case ISCSI_BOOT_ETH_FLAGS: 5023 case ISCSI_BOOT_ETH_FLAGS:
5007 rc = sprintf(str, "2\n"); 5024 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS);
5008 break; 5025 break;
5009 case ISCSI_BOOT_ETH_INDEX: 5026 case ISCSI_BOOT_ETH_INDEX:
5010 rc = sprintf(str, "0\n"); 5027 rc = sprintf(str, "0\n");
@@ -5209,8 +5226,8 @@ static void beiscsi_eqd_update_work(struct work_struct *work)
5209 5226
5210 if (eqd < 8) 5227 if (eqd < 8)
5211 eqd = 0; 5228 eqd = 0;
5212 eqd = min_t(u32, eqd, phwi_context->max_eqd); 5229 eqd = min_t(u32, eqd, BEISCSI_EQ_DELAY_MAX);
5213 eqd = max_t(u32, eqd, phwi_context->min_eqd); 5230 eqd = max_t(u32, eqd, BEISCSI_EQ_DELAY_MIN);
5214 5231
5215 aic->jiffies = now; 5232 aic->jiffies = now;
5216 aic->eq_prev = pbe_eq->cq_count; 5233 aic->eq_prev = pbe_eq->cq_count;
@@ -5298,6 +5315,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
5298 be2iscsi_enable_msix(phba); 5315 be2iscsi_enable_msix(phba);
5299 5316
5300 beiscsi_get_params(phba); 5317 beiscsi_get_params(phba);
5318 beiscsi_set_host_data(phba);
5301 /* Re-enable UER. If different TPE occurs then it is recoverable. */ 5319 /* Re-enable UER. If different TPE occurs then it is recoverable. */
5302 beiscsi_set_uer_feature(phba); 5320 beiscsi_set_uer_feature(phba);
5303 5321
@@ -5387,15 +5405,7 @@ static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload)
5387 phwi_ctrlr = phba->phwi_ctrlr; 5405 phwi_ctrlr = phba->phwi_ctrlr;
5388 phwi_context = phwi_ctrlr->phwi_ctxt; 5406 phwi_context = phwi_ctrlr->phwi_ctxt;
5389 hwi_disable_intr(phba); 5407 hwi_disable_intr(phba);
5390 if (phba->pcidev->msix_enabled) { 5408 beiscsi_free_irqs(phba);
5391 for (i = 0; i <= phba->num_cpus; i++) {
5392 free_irq(pci_irq_vector(phba->pcidev, i),
5393 &phwi_context->be_eq[i]);
5394 kfree(phba->msi_name[i]);
5395 }
5396 } else
5397 if (phba->pcidev->irq)
5398 free_irq(phba->pcidev->irq, phba);
5399 pci_free_irq_vectors(phba->pcidev); 5409 pci_free_irq_vectors(phba->pcidev);
5400 5410
5401 for (i = 0; i < phba->num_cpus; i++) { 5411 for (i = 0; i < phba->num_cpus; i++) {
@@ -5586,12 +5596,12 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5586 if (ret) { 5596 if (ret) {
5587 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5597 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5588 "BM_%d : be_ctrl_init failed\n"); 5598 "BM_%d : be_ctrl_init failed\n");
5589 goto hba_free; 5599 goto free_hba;
5590 } 5600 }
5591 5601
5592 ret = beiscsi_init_sliport(phba); 5602 ret = beiscsi_init_sliport(phba);
5593 if (ret) 5603 if (ret)
5594 goto hba_free; 5604 goto free_hba;
5595 5605
5596 spin_lock_init(&phba->io_sgl_lock); 5606 spin_lock_init(&phba->io_sgl_lock);
5597 spin_lock_init(&phba->mgmt_sgl_lock); 5607 spin_lock_init(&phba->mgmt_sgl_lock);
@@ -5604,6 +5614,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5604 } 5614 }
5605 beiscsi_get_port_name(&phba->ctrl, phba); 5615 beiscsi_get_port_name(&phba->ctrl, phba);
5606 beiscsi_get_params(phba); 5616 beiscsi_get_params(phba);
5617 beiscsi_set_host_data(phba);
5607 beiscsi_set_uer_feature(phba); 5618 beiscsi_set_uer_feature(phba);
5608 5619
5609 be2iscsi_enable_msix(phba); 5620 be2iscsi_enable_msix(phba);
@@ -5671,13 +5682,13 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5671 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5682 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5672 "BM_%d : beiscsi_dev_probe-" 5683 "BM_%d : beiscsi_dev_probe-"
5673 "Failed to beiscsi_init_irqs\n"); 5684 "Failed to beiscsi_init_irqs\n");
5674 goto free_blkenbld; 5685 goto disable_iopoll;
5675 } 5686 }
5676 hwi_enable_intr(phba); 5687 hwi_enable_intr(phba);
5677 5688
5678 ret = iscsi_host_add(phba->shost, &phba->pcidev->dev); 5689 ret = iscsi_host_add(phba->shost, &phba->pcidev->dev);
5679 if (ret) 5690 if (ret)
5680 goto free_blkenbld; 5691 goto free_irqs;
5681 5692
5682 /* set online bit after port is operational */ 5693 /* set online bit after port is operational */
5683 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5694 set_bit(BEISCSI_HBA_ONLINE, &phba->state);
@@ -5713,12 +5724,15 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5713 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); 5724 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
5714 return 0; 5725 return 0;
5715 5726
5716free_blkenbld: 5727free_irqs:
5717 destroy_workqueue(phba->wq); 5728 hwi_disable_intr(phba);
5729 beiscsi_free_irqs(phba);
5730disable_iopoll:
5718 for (i = 0; i < phba->num_cpus; i++) { 5731 for (i = 0; i < phba->num_cpus; i++) {
5719 pbe_eq = &phwi_context->be_eq[i]; 5732 pbe_eq = &phwi_context->be_eq[i];
5720 irq_poll_disable(&pbe_eq->iopoll); 5733 irq_poll_disable(&pbe_eq->iopoll);
5721 } 5734 }
5735 destroy_workqueue(phba->wq);
5722free_twq: 5736free_twq:
5723 hwi_cleanup_port(phba); 5737 hwi_cleanup_port(phba);
5724 beiscsi_cleanup_port(phba); 5738 beiscsi_cleanup_port(phba);
@@ -5727,9 +5741,9 @@ free_port:
5727 pci_free_consistent(phba->pcidev, 5741 pci_free_consistent(phba->pcidev,
5728 phba->ctrl.mbox_mem_alloced.size, 5742 phba->ctrl.mbox_mem_alloced.size,
5729 phba->ctrl.mbox_mem_alloced.va, 5743 phba->ctrl.mbox_mem_alloced.va,
5730 phba->ctrl.mbox_mem_alloced.dma); 5744 phba->ctrl.mbox_mem_alloced.dma);
5731 beiscsi_unmap_pci_function(phba); 5745 beiscsi_unmap_pci_function(phba);
5732hba_free: 5746free_hba:
5733 pci_disable_msix(phba->pcidev); 5747 pci_disable_msix(phba->pcidev);
5734 pci_dev_put(phba->pcidev); 5748 pci_dev_put(phba->pcidev);
5735 iscsi_host_free(phba->shost); 5749 iscsi_host_free(phba->shost);
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 81ce3ffda968..42bb6bdb68bd 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2017 Broadcom. All Rights Reserved. 2 * Copyright 2017 Broadcom. All Rights Reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -31,7 +31,7 @@
31#include <scsi/scsi_transport_iscsi.h> 31#include <scsi/scsi_transport_iscsi.h>
32 32
33#define DRV_NAME "be2iscsi" 33#define DRV_NAME "be2iscsi"
34#define BUILD_STR "11.4.0.0" 34#define BUILD_STR "11.4.0.1"
35#define BE_NAME "Emulex OneConnect" \ 35#define BE_NAME "Emulex OneConnect" \
36 "Open-iSCSI Driver version" BUILD_STR 36 "Open-iSCSI Driver version" BUILD_STR
37#define DRV_DESC BE_NAME " " "Driver" 37#define DRV_DESC BE_NAME " " "Driver"
@@ -59,7 +59,7 @@
59#define BE2_DEFPDU_DATA_SZ 8192 59#define BE2_DEFPDU_DATA_SZ 8192
60#define BE2_MAX_NUM_CQ_PROC 512 60#define BE2_MAX_NUM_CQ_PROC 512
61 61
62#define MAX_CPUS 64 62#define MAX_CPUS 64U
63#define BEISCSI_MAX_NUM_CPUS 7 63#define BEISCSI_MAX_NUM_CPUS 7
64 64
65#define BEISCSI_VER_STRLEN 32 65#define BEISCSI_VER_STRLEN 32
@@ -77,9 +77,7 @@
77 77
78#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ 78#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
79#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */ 79#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
80#define BEISCSI_NUM_DEVICES_SUPPORTED 0x01
81#define BEISCSI_MAX_FRAGS_INIT 192 80#define BEISCSI_MAX_FRAGS_INIT 192
82#define BE_NUM_MSIX_ENTRIES 1
83 81
84#define BE_SENSE_INFO_SIZE 258 82#define BE_SENSE_INFO_SIZE 258
85#define BE_ISCSI_PDU_HEADER_SIZE 64 83#define BE_ISCSI_PDU_HEADER_SIZE 64
@@ -155,8 +153,6 @@
155#define PAGES_REQUIRED(x) \ 153#define PAGES_REQUIRED(x) \
156 ((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE)) 154 ((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE))
157 155
158#define BEISCSI_MSI_NAME 20 /* size of msi_name string */
159
160#define MEM_DESCR_OFFSET 8 156#define MEM_DESCR_OFFSET 8
161#define BEISCSI_DEFQ_HDR 1 157#define BEISCSI_DEFQ_HDR 1
162#define BEISCSI_DEFQ_DATA 0 158#define BEISCSI_DEFQ_DATA 0
@@ -209,13 +205,8 @@ struct mem_array {
209}; 205};
210 206
211struct be_mem_descriptor { 207struct be_mem_descriptor {
212 unsigned int index; /* Index of this memory parameter */
213 unsigned int category; /* type indicates cached/non-cached */
214 unsigned int num_elements; /* number of elements in this
215 * descriptor
216 */
217 unsigned int alignment_mask; /* Alignment mask for this block */
218 unsigned int size_in_bytes; /* Size required by memory block */ 208 unsigned int size_in_bytes; /* Size required by memory block */
209 unsigned int num_elements;
219 struct mem_array *mem_array; 210 struct mem_array *mem_array;
220}; 211};
221 212
@@ -238,32 +229,12 @@ struct hba_parameters {
238 unsigned int num_eq_entries; 229 unsigned int num_eq_entries;
239 unsigned int wrbs_per_cxn; 230 unsigned int wrbs_per_cxn;
240 unsigned int hwi_ws_sz; 231 unsigned int hwi_ws_sz;
241 /**
242 * These are calculated from other params. They're here
243 * for debug purposes
244 */
245 unsigned int num_mcc_pages;
246 unsigned int num_mcc_cq_pages;
247 unsigned int num_cq_pages;
248 unsigned int num_eq_pages;
249
250 unsigned int num_async_pdu_buf_pages;
251 unsigned int num_async_pdu_buf_sgl_pages;
252 unsigned int num_async_pdu_buf_cq_pages;
253
254 unsigned int num_async_pdu_hdr_pages;
255 unsigned int num_async_pdu_hdr_sgl_pages;
256 unsigned int num_async_pdu_hdr_cq_pages;
257
258 unsigned int num_sge;
259}; 232};
260 233
261#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \ 234#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \
262 (phwi_ctrlr->wrb_context[cri].ulp_num) 235 (phwi_ctrlr->wrb_context[cri].ulp_num)
263struct hwi_wrb_context { 236struct hwi_wrb_context {
264 spinlock_t wrb_lock; 237 spinlock_t wrb_lock;
265 struct list_head wrb_handle_list;
266 struct list_head wrb_handle_drvr_list;
267 struct wrb_handle **pwrb_handle_base; 238 struct wrb_handle **pwrb_handle_base;
268 struct wrb_handle **pwrb_handle_basestd; 239 struct wrb_handle **pwrb_handle_basestd;
269 struct iscsi_wrb *plast_wrb; 240 struct iscsi_wrb *plast_wrb;
@@ -272,8 +243,6 @@ struct hwi_wrb_context {
272 unsigned short wrb_handles_available; 243 unsigned short wrb_handles_available;
273 unsigned short cid; 244 unsigned short cid;
274 uint8_t ulp_num; /* ULP to which CID binded */ 245 uint8_t ulp_num; /* ULP to which CID binded */
275 uint16_t register_set;
276 uint16_t doorbell_format;
277 uint32_t doorbell_offset; 246 uint32_t doorbell_offset;
278}; 247};
279 248
@@ -310,9 +279,6 @@ struct beiscsi_hba {
310 u8 __iomem *csr_va; /* CSR */ 279 u8 __iomem *csr_va; /* CSR */
311 u8 __iomem *db_va; /* Door Bell */ 280 u8 __iomem *db_va; /* Door Bell */
312 u8 __iomem *pci_va; /* PCI Config */ 281 u8 __iomem *pci_va; /* PCI Config */
313 struct be_bus_address csr_pa; /* CSR */
314 struct be_bus_address db_pa; /* CSR */
315 struct be_bus_address pci_pa; /* CSR */
316 /* PCI representation of our HBA */ 282 /* PCI representation of our HBA */
317 struct pci_dev *pcidev; 283 struct pci_dev *pcidev;
318 unsigned int num_cpus; 284 unsigned int num_cpus;
@@ -324,7 +290,6 @@ struct beiscsi_hba {
324 unsigned short io_sgl_free_index; 290 unsigned short io_sgl_free_index;
325 unsigned short io_sgl_hndl_avbl; 291 unsigned short io_sgl_hndl_avbl;
326 struct sgl_handle **io_sgl_hndl_base; 292 struct sgl_handle **io_sgl_hndl_base;
327 struct sgl_handle **sgl_hndl_array;
328 293
329 unsigned short eh_sgl_alloc_index; 294 unsigned short eh_sgl_alloc_index;
330 unsigned short eh_sgl_free_index; 295 unsigned short eh_sgl_free_index;
@@ -1009,10 +974,6 @@ struct be_ring {
1009}; 974};
1010 975
1011struct hwi_controller { 976struct hwi_controller {
1012 struct list_head io_sgl_list;
1013 struct list_head eh_sgl_list;
1014 struct sgl_handle *psgl_handle_base;
1015
1016 struct hwi_wrb_context *wrb_context; 977 struct hwi_wrb_context *wrb_context;
1017 struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT]; 978 struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT];
1018 struct be_ring default_pdu_data[BEISCSI_ULP_COUNT]; 979 struct be_ring default_pdu_data[BEISCSI_ULP_COUNT];
@@ -1036,10 +997,6 @@ struct wrb_handle {
1036}; 997};
1037 998
1038struct hwi_context_memory { 999struct hwi_context_memory {
1039 /* Adaptive interrupt coalescing (AIC) info */
1040 u16 min_eqd; /* in usecs */
1041 u16 max_eqd; /* in usecs */
1042 u16 cur_eqd; /* in usecs */
1043 struct be_eq_obj be_eq[MAX_CPUS]; 1000 struct be_eq_obj be_eq[MAX_CPUS];
1044 struct be_queue_info be_cq[MAX_CPUS - 1]; 1001 struct be_queue_info be_cq[MAX_CPUS - 1];
1045 1002
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index c73775368d09..66ca967f2850 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2017 Broadcom. All Rights Reserved. 2 * Copyright 2017 Broadcom. All Rights Reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -19,43 +19,6 @@
19#include "be_iscsi.h" 19#include "be_iscsi.h"
20#include "be_main.h" 20#include "be_main.h"
21 21
22int beiscsi_modify_eq_delay(struct beiscsi_hba *phba,
23 struct be_set_eqd *set_eqd,
24 int num)
25{
26 struct be_ctrl_info *ctrl = &phba->ctrl;
27 struct be_mcc_wrb *wrb;
28 struct be_cmd_req_modify_eq_delay *req;
29 unsigned int tag;
30 int i;
31
32 mutex_lock(&ctrl->mbox_lock);
33 wrb = alloc_mcc_wrb(phba, &tag);
34 if (!wrb) {
35 mutex_unlock(&ctrl->mbox_lock);
36 return 0;
37 }
38
39 req = embedded_payload(wrb);
40 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
43
44 req->num_eq = cpu_to_le32(num);
45 for (i = 0; i < num; i++) {
46 req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
47 req->delay[i].phase = 0;
48 req->delay[i].delay_multiplier =
49 cpu_to_le32(set_eqd[i].delay_multiplier);
50 }
51
52 /* ignore the completion of this mbox command */
53 set_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state);
54 be_mcc_notify(phba, tag);
55 mutex_unlock(&ctrl->mbox_lock);
56 return tag;
57}
58
59unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, 22unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
60 struct beiscsi_hba *phba, 23 struct beiscsi_hba *phba,
61 struct bsg_job *job, 24 struct bsg_job *job,
@@ -156,7 +119,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
156 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 119 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
157 "BG_%d : unknown addr family %d\n", 120 "BG_%d : unknown addr family %d\n",
158 dst_addr->sa_family); 121 dst_addr->sa_family);
159 return -EINVAL; 122 return 0;
160 } 123 }
161 124
162 phwi_ctrlr = phba->phwi_ctrlr; 125 phwi_ctrlr = phba->phwi_ctrlr;
@@ -236,16 +199,19 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
236} 199}
237 200
238/* 201/*
239 * mgmt_exec_nonemb_cmd()- Execute Non Embedded MBX Cmd 202 * beiscsi_exec_nemb_cmd()- execute non-embedded MBX cmd
240 * @phba: Driver priv structure 203 * @phba: driver priv structure
241 * @nonemb_cmd: Address of the MBX command issued 204 * @nonemb_cmd: DMA address of the MBX command to be issued
242 * @resp_buf: Buffer to copy the MBX cmd response 205 * @cbfn: callback func on MCC completion
243 * @resp_buf_len: respone lenght to be copied 206 * @resp_buf: buffer to copy the MBX cmd response
207 * @resp_buf_len: response length to be copied
244 * 208 *
245 **/ 209 **/
246static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba, 210static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
247 struct be_dma_mem *nonemb_cmd, void *resp_buf, 211 struct be_dma_mem *nonemb_cmd,
248 int resp_buf_len) 212 void (*cbfn)(struct beiscsi_hba *,
213 unsigned int),
214 void *resp_buf, u32 resp_buf_len)
249{ 215{
250 struct be_ctrl_info *ctrl = &phba->ctrl; 216 struct be_ctrl_info *ctrl = &phba->ctrl;
251 struct be_mcc_wrb *wrb; 217 struct be_mcc_wrb *wrb;
@@ -267,36 +233,54 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
267 sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd->dma)); 233 sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd->dma));
268 sge->len = cpu_to_le32(nonemb_cmd->size); 234 sge->len = cpu_to_le32(nonemb_cmd->size);
269 235
236 if (cbfn) {
237 struct be_dma_mem *tag_mem;
238
239 set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
240 ctrl->ptag_state[tag].cbfn = cbfn;
241 tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
242
243 /* store DMA mem to be freed in callback */
244 tag_mem->size = nonemb_cmd->size;
245 tag_mem->va = nonemb_cmd->va;
246 tag_mem->dma = nonemb_cmd->dma;
247 }
270 be_mcc_notify(phba, tag); 248 be_mcc_notify(phba, tag);
271 mutex_unlock(&ctrl->mbox_lock); 249 mutex_unlock(&ctrl->mbox_lock);
272 250
251 /* with cbfn set, its async cmd, don't wait */
252 if (cbfn)
253 return 0;
254
273 rc = beiscsi_mccq_compl_wait(phba, tag, NULL, nonemb_cmd); 255 rc = beiscsi_mccq_compl_wait(phba, tag, NULL, nonemb_cmd);
274 256
257 /* copy the response, if any */
275 if (resp_buf) 258 if (resp_buf)
276 memcpy(resp_buf, nonemb_cmd->va, resp_buf_len); 259 memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
260 /**
261 * This is special case of NTWK_GET_IF_INFO where the size of
262 * response is not known. beiscsi_if_get_info checks the return
263 * value to free DMA buffer.
264 */
265 if (rc == -EAGAIN)
266 return rc;
277 267
278 if (rc) { 268 /**
279 /* Check if the MBX Cmd needs to be re-issued */ 269 * If FW is busy that is driver timed out, DMA buffer is saved with
280 if (rc == -EAGAIN) 270 * the tag, only when the cmd completes this buffer is freed.
281 return rc; 271 */
282 272 if (rc == -EBUSY)
283 beiscsi_log(phba, KERN_WARNING, 273 return rc;
284 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
285 "BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
286 274
287 if (rc != -EBUSY)
288 goto free_cmd;
289 else
290 return rc;
291 }
292free_cmd: 275free_cmd:
293 pci_free_consistent(ctrl->pdev, nonemb_cmd->size, 276 pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
294 nonemb_cmd->va, nonemb_cmd->dma); 277 nonemb_cmd->va, nonemb_cmd->dma);
295 return rc; 278 return rc;
296} 279}
297 280
298static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd, 281static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
299 int iscsi_cmd, int size) 282 struct be_dma_mem *cmd,
283 u8 subsystem, u8 opcode, u32 size)
300{ 284{
301 cmd->va = pci_zalloc_consistent(phba->ctrl.pdev, size, &cmd->dma); 285 cmd->va = pci_zalloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
302 if (!cmd->va) { 286 if (!cmd->va) {
@@ -305,13 +289,86 @@ static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
305 return -ENOMEM; 289 return -ENOMEM;
306 } 290 }
307 cmd->size = size; 291 cmd->size = size;
308 be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size); 292 be_cmd_hdr_prepare(cmd->va, subsystem, opcode, size);
309 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 293 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
310 "BG_%d : subsystem iSCSI cmd %d size %d\n", 294 "BG_%d : subsystem %u cmd %u size %u\n",
311 iscsi_cmd, size); 295 subsystem, opcode, size);
312 return 0; 296 return 0;
313} 297}
314 298
299static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag)
300{
301 struct be_dma_mem *tag_mem;
302
303 /* status is ignored */
304 __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
305 tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
306 if (tag_mem->size) {
307 pci_free_consistent(phba->pcidev, tag_mem->size,
308 tag_mem->va, tag_mem->dma);
309 tag_mem->size = 0;
310 }
311}
312
313int beiscsi_modify_eq_delay(struct beiscsi_hba *phba,
314 struct be_set_eqd *set_eqd, int num)
315{
316 struct be_cmd_req_modify_eq_delay *req;
317 struct be_dma_mem nonemb_cmd;
318 int i, rc;
319
320 rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_COMMON,
321 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
322 if (rc)
323 return rc;
324
325 req = nonemb_cmd.va;
326 req->num_eq = cpu_to_le32(num);
327 for (i = 0; i < num; i++) {
328 req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
329 req->delay[i].phase = 0;
330 req->delay[i].delay_multiplier =
331 cpu_to_le32(set_eqd[i].delay_multiplier);
332 }
333
334 return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd,
335 __beiscsi_eq_delay_compl, NULL, 0);
336}
337
338/**
339 * beiscsi_get_initiator_name - read initiator name from flash
340 * @phba: device priv structure
341 * @name: buffer pointer
342 * @cfg: fetch user configured
343 *
344 */
345int beiscsi_get_initiator_name(struct beiscsi_hba *phba, char *name, bool cfg)
346{
347 struct be_dma_mem nonemb_cmd;
348 struct be_cmd_hba_name resp;
349 struct be_cmd_hba_name *req;
350 int rc;
351
352 rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI_INI,
353 OPCODE_ISCSI_INI_CFG_GET_HBA_NAME, sizeof(resp));
354 if (rc)
355 return rc;
356
357 req = nonemb_cmd.va;
358 if (cfg)
359 req->hdr.version = 1;
360 rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
361 &resp, sizeof(resp));
362 if (rc) {
363 beiscsi_log(phba, KERN_ERR,
364 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
365 "BS_%d : Initiator Name MBX Failed\n");
366 return rc;
367 }
368 rc = sprintf(name, "%s\n", resp.initiator_name);
369 return rc;
370}
371
315unsigned int beiscsi_if_get_handle(struct beiscsi_hba *phba) 372unsigned int beiscsi_if_get_handle(struct beiscsi_hba *phba)
316{ 373{
317 struct be_ctrl_info *ctrl = &phba->ctrl; 374 struct be_ctrl_info *ctrl = &phba->ctrl;
@@ -368,9 +425,9 @@ static int beiscsi_if_mod_gw(struct beiscsi_hba *phba,
368 struct be_dma_mem nonemb_cmd; 425 struct be_dma_mem nonemb_cmd;
369 int rt_val; 426 int rt_val;
370 427
371 rt_val = mgmt_alloc_cmd_data(phba, &nonemb_cmd, 428 rt_val = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
372 OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY, 429 OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY,
373 sizeof(*req)); 430 sizeof(*req));
374 if (rt_val) 431 if (rt_val)
375 return rt_val; 432 return rt_val;
376 433
@@ -379,7 +436,7 @@ static int beiscsi_if_mod_gw(struct beiscsi_hba *phba,
379 req->ip_addr.ip_type = ip_type; 436 req->ip_addr.ip_type = ip_type;
380 memcpy(req->ip_addr.addr, gw, 437 memcpy(req->ip_addr.addr, gw,
381 (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN); 438 (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN);
382 return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); 439 return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
383} 440}
384 441
385int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw) 442int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw)
@@ -420,17 +477,17 @@ int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type,
420 struct be_dma_mem nonemb_cmd; 477 struct be_dma_mem nonemb_cmd;
421 int rc; 478 int rc;
422 479
423 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, 480 rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
424 OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY, 481 OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY,
425 sizeof(*resp)); 482 sizeof(*resp));
426 if (rc) 483 if (rc)
427 return rc; 484 return rc;
428 485
429 req = nonemb_cmd.va; 486 req = nonemb_cmd.va;
430 req->ip_type = ip_type; 487 req->ip_type = ip_type;
431 488
432 return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, resp, 489 return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
433 sizeof(*resp)); 490 resp, sizeof(*resp));
434} 491}
435 492
436static int 493static int
@@ -441,9 +498,9 @@ beiscsi_if_clr_ip(struct beiscsi_hba *phba,
441 struct be_dma_mem nonemb_cmd; 498 struct be_dma_mem nonemb_cmd;
442 int rc; 499 int rc;
443 500
444 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, 501 rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
445 OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR, 502 OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
446 sizeof(*req)); 503 sizeof(*req));
447 if (rc) 504 if (rc)
448 return rc; 505 return rc;
449 506
@@ -461,7 +518,7 @@ beiscsi_if_clr_ip(struct beiscsi_hba *phba,
461 memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, 518 memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
462 if_info->ip_addr.subnet_mask, 519 if_info->ip_addr.subnet_mask,
463 sizeof(if_info->ip_addr.subnet_mask)); 520 sizeof(if_info->ip_addr.subnet_mask));
464 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); 521 rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
465 if (rc < 0 || req->ip_params.ip_record.status) { 522 if (rc < 0 || req->ip_params.ip_record.status) {
466 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 523 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
467 "BG_%d : failed to clear IP: rc %d status %d\n", 524 "BG_%d : failed to clear IP: rc %d status %d\n",
@@ -479,9 +536,9 @@ beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip,
479 uint32_t ip_len; 536 uint32_t ip_len;
480 int rc; 537 int rc;
481 538
482 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, 539 rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
483 OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR, 540 OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
484 sizeof(*req)); 541 sizeof(*req));
485 if (rc) 542 if (rc)
486 return rc; 543 return rc;
487 544
@@ -499,7 +556,7 @@ beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip,
499 memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, 556 memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
500 subnet, ip_len); 557 subnet, ip_len);
501 558
502 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); 559 rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
503 /** 560 /**
504 * In some cases, host needs to look into individual record status 561 * In some cases, host needs to look into individual record status
505 * even though FW reported success for that IOCTL. 562 * even though FW reported success for that IOCTL.
@@ -527,7 +584,8 @@ int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
527 return rc; 584 return rc;
528 585
529 if (if_info->dhcp_state) { 586 if (if_info->dhcp_state) {
530 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, 587 rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd,
588 CMD_SUBSYSTEM_ISCSI,
531 OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR, 589 OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
532 sizeof(*reldhcp)); 590 sizeof(*reldhcp));
533 if (rc) 591 if (rc)
@@ -536,7 +594,7 @@ int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
536 reldhcp = nonemb_cmd.va; 594 reldhcp = nonemb_cmd.va;
537 reldhcp->interface_hndl = phba->interface_handle; 595 reldhcp->interface_hndl = phba->interface_handle;
538 reldhcp->ip_type = ip_type; 596 reldhcp->ip_type = ip_type;
539 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); 597 rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
540 if (rc < 0) { 598 if (rc < 0) {
541 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 599 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
542 "BG_%d : failed to release existing DHCP: %d\n", 600 "BG_%d : failed to release existing DHCP: %d\n",
@@ -606,7 +664,7 @@ int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type)
606 } 664 }
607 } 665 }
608 666
609 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, 667 rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
610 OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR, 668 OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR,
611 sizeof(*dhcpreq)); 669 sizeof(*dhcpreq));
612 if (rc) 670 if (rc)
@@ -617,7 +675,7 @@ int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type)
617 dhcpreq->retry_count = 1; 675 dhcpreq->retry_count = 1;
618 dhcpreq->interface_hndl = phba->interface_handle; 676 dhcpreq->interface_hndl = phba->interface_handle;
619 dhcpreq->ip_type = ip_type; 677 dhcpreq->ip_type = ip_type;
620 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); 678 rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
621 679
622exit: 680exit:
623 kfree(if_info); 681 kfree(if_info);
@@ -673,9 +731,10 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
673 return rc; 731 return rc;
674 732
675 do { 733 do {
676 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, 734 rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd,
677 OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO, 735 CMD_SUBSYSTEM_ISCSI,
678 ioctl_size); 736 OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
737 ioctl_size);
679 if (rc) 738 if (rc)
680 return rc; 739 return rc;
681 740
@@ -698,8 +757,8 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
698 return -ENOMEM; 757 return -ENOMEM;
699 } 758 }
700 759
701 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, *if_info, 760 rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, *if_info,
702 ioctl_size); 761 ioctl_size);
703 762
704 /* Check if the error is because of Insufficent_Buffer */ 763 /* Check if the error is because of Insufficent_Buffer */
705 if (rc == -EAGAIN) { 764 if (rc == -EAGAIN) {
@@ -728,41 +787,14 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba,
728 struct be_dma_mem nonemb_cmd; 787 struct be_dma_mem nonemb_cmd;
729 int rc; 788 int rc;
730 789
731 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, 790 rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
732 OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG, 791 OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
733 sizeof(*nic)); 792 sizeof(*nic));
734 if (rc) 793 if (rc)
735 return rc; 794 return rc;
736 795
737 return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, nic, sizeof(*nic)); 796 return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
738} 797 nic, sizeof(*nic));
739
740
741
742unsigned int be_cmd_get_initname(struct beiscsi_hba *phba)
743{
744 unsigned int tag;
745 struct be_mcc_wrb *wrb;
746 struct be_cmd_hba_name *req;
747 struct be_ctrl_info *ctrl = &phba->ctrl;
748
749 if (mutex_lock_interruptible(&ctrl->mbox_lock))
750 return 0;
751 wrb = alloc_mcc_wrb(phba, &tag);
752 if (!wrb) {
753 mutex_unlock(&ctrl->mbox_lock);
754 return 0;
755 }
756
757 req = embedded_payload(wrb);
758 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
759 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
760 OPCODE_ISCSI_INI_CFG_GET_HBA_NAME,
761 sizeof(*req));
762
763 be_mcc_notify(phba, tag);
764 mutex_unlock(&ctrl->mbox_lock);
765 return tag;
766} 798}
767 799
768static void beiscsi_boot_process_compl(struct beiscsi_hba *phba, 800static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 06ddc5ad6874..0b22c99a7a22 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2017 Broadcom. All Rights Reserved. 2 * Copyright 2017 Broadcom. All Rights Reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -157,7 +157,6 @@ struct be_bsg_vendor_cmd {
157 157
158struct beiscsi_endpoint { 158struct beiscsi_endpoint {
159 struct beiscsi_hba *phba; 159 struct beiscsi_hba *phba;
160 struct beiscsi_sess *sess;
161 struct beiscsi_conn *conn; 160 struct beiscsi_conn *conn;
162 struct iscsi_endpoint *openiscsi_ep; 161 struct iscsi_endpoint *openiscsi_ep;
163 unsigned short ip_type; 162 unsigned short ip_type;
@@ -169,15 +168,12 @@ struct beiscsi_endpoint {
169 u16 cid_vld; 168 u16 cid_vld;
170}; 169};
171 170
172unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
173 struct beiscsi_endpoint *beiscsi_ep,
174 unsigned short cid,
175 unsigned short issue_reset,
176 unsigned short savecfg_flag);
177int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba, 171int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
178 struct invldt_cmd_tbl *inv_tbl, 172 struct invldt_cmd_tbl *inv_tbl,
179 unsigned int nents); 173 unsigned int nents);
180 174
175int beiscsi_get_initiator_name(struct beiscsi_hba *phba, char *name, bool cfg);
176
181int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type); 177int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type);
182 178
183int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type, 179int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index b2e8c0dfc79c..72ca2a2e08e2 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3137,16 +3137,9 @@ bfad_im_bsg_vendor_request(struct bsg_job *job)
3137 uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; 3137 uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
3138 struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job)); 3138 struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
3139 struct bfad_s *bfad = im_port->bfad; 3139 struct bfad_s *bfad = im_port->bfad;
3140 struct request_queue *request_q = job->req->q;
3141 void *payload_kbuf; 3140 void *payload_kbuf;
3142 int rc = -EINVAL; 3141 int rc = -EINVAL;
3143 3142
3144 /*
3145 * Set the BSG device request_queue size to 256 to support
3146 * payloads larger than 512*1024K bytes.
3147 */
3148 blk_queue_max_segments(request_q, 256);
3149
3150 /* Allocate a temp buffer to hold the passed in user space command */ 3143 /* Allocate a temp buffer to hold the passed in user space command */
3151 payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); 3144 payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
3152 if (!payload_kbuf) { 3145 if (!payload_kbuf) {
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 8dcd8c70c7ee..05f523971348 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -255,7 +255,8 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
255 struct bfad_s *bfad = port->bfad; 255 struct bfad_s *bfad = port->bfad;
256 struct bfa_s *bfa = &bfad->bfa; 256 struct bfa_s *bfa = &bfad->bfa;
257 struct bfa_ioc_s *ioc = &bfa->ioc; 257 struct bfa_ioc_s *ioc = &bfa->ioc;
258 int addr, len, rc, i; 258 int addr, rc, i;
259 u32 len;
259 u32 *regbuf; 260 u32 *regbuf;
260 void __iomem *rb, *reg_addr; 261 void __iomem *rb, *reg_addr;
261 unsigned long flags; 262 unsigned long flags;
@@ -266,7 +267,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
266 return PTR_ERR(kern_buf); 267 return PTR_ERR(kern_buf);
267 268
268 rc = sscanf(kern_buf, "%x:%x", &addr, &len); 269 rc = sscanf(kern_buf, "%x:%x", &addr, &len);
269 if (rc < 2) { 270 if (rc < 2 || len > (UINT_MAX >> 2)) {
270 printk(KERN_INFO 271 printk(KERN_INFO
271 "bfad[%d]: %s failed to read user buf\n", 272 "bfad[%d]: %s failed to read user buf\n",
272 bfad->inst_no, __func__); 273 bfad->inst_no, __func__);
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 61a93994d5ed..e0640e0f259f 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -332,12 +332,10 @@ static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
332int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn, 332int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
333 struct iscsi_task *task) 333 struct iscsi_task *task)
334{ 334{
335 struct bnx2i_cmd *bnx2i_cmd;
336 struct bnx2i_login_request *login_wqe; 335 struct bnx2i_login_request *login_wqe;
337 struct iscsi_login_req *login_hdr; 336 struct iscsi_login_req *login_hdr;
338 u32 dword; 337 u32 dword;
339 338
340 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
341 login_hdr = (struct iscsi_login_req *)task->hdr; 339 login_hdr = (struct iscsi_login_req *)task->hdr;
342 login_wqe = (struct bnx2i_login_request *) 340 login_wqe = (struct bnx2i_login_request *)
343 bnx2i_conn->ep->qp.sq_prod_qe; 341 bnx2i_conn->ep->qp.sq_prod_qe;
@@ -391,12 +389,10 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
391 struct iscsi_tm *tmfabort_hdr; 389 struct iscsi_tm *tmfabort_hdr;
392 struct scsi_cmnd *ref_sc; 390 struct scsi_cmnd *ref_sc;
393 struct iscsi_task *ctask; 391 struct iscsi_task *ctask;
394 struct bnx2i_cmd *bnx2i_cmd;
395 struct bnx2i_tmf_request *tmfabort_wqe; 392 struct bnx2i_tmf_request *tmfabort_wqe;
396 u32 dword; 393 u32 dword;
397 u32 scsi_lun[2]; 394 u32 scsi_lun[2];
398 395
399 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
400 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; 396 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
401 tmfabort_wqe = (struct bnx2i_tmf_request *) 397 tmfabort_wqe = (struct bnx2i_tmf_request *)
402 bnx2i_conn->ep->qp.sq_prod_qe; 398 bnx2i_conn->ep->qp.sq_prod_qe;
@@ -463,12 +459,10 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
463int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn, 459int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn,
464 struct iscsi_task *mtask) 460 struct iscsi_task *mtask)
465{ 461{
466 struct bnx2i_cmd *bnx2i_cmd;
467 struct bnx2i_text_request *text_wqe; 462 struct bnx2i_text_request *text_wqe;
468 struct iscsi_text *text_hdr; 463 struct iscsi_text *text_hdr;
469 u32 dword; 464 u32 dword;
470 465
471 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
472 text_hdr = (struct iscsi_text *)mtask->hdr; 466 text_hdr = (struct iscsi_text *)mtask->hdr;
473 text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe; 467 text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe;
474 468
@@ -541,11 +535,9 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
541 char *datap, int data_len, int unsol) 535 char *datap, int data_len, int unsol)
542{ 536{
543 struct bnx2i_endpoint *ep = bnx2i_conn->ep; 537 struct bnx2i_endpoint *ep = bnx2i_conn->ep;
544 struct bnx2i_cmd *bnx2i_cmd;
545 struct bnx2i_nop_out_request *nopout_wqe; 538 struct bnx2i_nop_out_request *nopout_wqe;
546 struct iscsi_nopout *nopout_hdr; 539 struct iscsi_nopout *nopout_hdr;
547 540
548 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
549 nopout_hdr = (struct iscsi_nopout *)task->hdr; 541 nopout_hdr = (struct iscsi_nopout *)task->hdr;
550 nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe; 542 nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
551 543
@@ -602,11 +594,9 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
602int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn, 594int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
603 struct iscsi_task *task) 595 struct iscsi_task *task)
604{ 596{
605 struct bnx2i_cmd *bnx2i_cmd;
606 struct bnx2i_logout_request *logout_wqe; 597 struct bnx2i_logout_request *logout_wqe;
607 struct iscsi_logout *logout_hdr; 598 struct iscsi_logout *logout_hdr;
608 599
609 bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
610 logout_hdr = (struct iscsi_logout *)task->hdr; 600 logout_hdr = (struct iscsi_logout *)task->hdr;
611 601
612 logout_wqe = (struct bnx2i_logout_request *) 602 logout_wqe = (struct bnx2i_logout_request *)
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 667046419b19..30f5f523c8cc 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -368,6 +368,9 @@ struct csio_hw_stats {
368#define CSIO_HWF_HOST_INTR_ENABLED 0x00000200 /* Are host interrupts 368#define CSIO_HWF_HOST_INTR_ENABLED 0x00000200 /* Are host interrupts
369 * enabled? 369 * enabled?
370 */ 370 */
371#define CSIO_HWF_ROOT_NO_RELAXED_ORDERING 0x00000400 /* Is PCIe relaxed
372 * ordering enabled
373 */
371 374
372#define csio_is_hw_intr_enabled(__hw) \ 375#define csio_is_hw_intr_enabled(__hw) \
373 ((__hw)->flags & CSIO_HWF_HW_INTR_ENABLED) 376 ((__hw)->flags & CSIO_HWF_HW_INTR_ENABLED)
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 28a9c7d706cb..cb1711a5d7a3 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -968,6 +968,9 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
968 goto err_pci_exit; 968 goto err_pci_exit;
969 } 969 }
970 970
971 if (!pcie_relaxed_ordering_enabled(pdev))
972 hw->flags |= CSIO_HWF_ROOT_NO_RELAXED_ORDERING;
973
971 pci_set_drvdata(pdev, hw); 974 pci_set_drvdata(pdev, hw);
972 975
973 rv = csio_hw_start(hw); 976 rv = csio_hw_start(hw);
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index abcedfbcecda..931b1d8f9f3e 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -491,6 +491,7 @@ csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
491 uint32_t iq_start_stop = (iq_params->iq_start) ? 491 uint32_t iq_start_stop = (iq_params->iq_start) ?
492 FW_IQ_CMD_IQSTART_F : 492 FW_IQ_CMD_IQSTART_F :
493 FW_IQ_CMD_IQSTOP_F; 493 FW_IQ_CMD_IQSTOP_F;
494 int relaxed = !(hw->flags & CSIO_HWF_ROOT_NO_RELAXED_ORDERING);
494 495
495 /* 496 /*
496 * If this IQ write is cascaded with IQ alloc request, do not 497 * If this IQ write is cascaded with IQ alloc request, do not
@@ -537,6 +538,8 @@ csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
537 cmdp->iqns_to_fl0congen |= htonl( 538 cmdp->iqns_to_fl0congen |= htonl(
538 FW_IQ_CMD_FL0HOSTFCMODE_V(iq_params->fl0hostfcmode)| 539 FW_IQ_CMD_FL0HOSTFCMODE_V(iq_params->fl0hostfcmode)|
539 FW_IQ_CMD_FL0CPRIO_V(iq_params->fl0cprio) | 540 FW_IQ_CMD_FL0CPRIO_V(iq_params->fl0cprio) |
541 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
542 FW_IQ_CMD_FL0DATARO_V(relaxed) |
540 FW_IQ_CMD_FL0PADEN_V(iq_params->fl0paden) | 543 FW_IQ_CMD_FL0PADEN_V(iq_params->fl0paden) |
541 FW_IQ_CMD_FL0PACKEN_V(iq_params->fl0packen)); 544 FW_IQ_CMD_FL0PACKEN_V(iq_params->fl0packen));
542 cmdp->fl0dcaen_to_fl0cidxfthresh |= htons( 545 cmdp->fl0dcaen_to_fl0cidxfthresh |= htons(
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 1bef2724eb78..266eddf17a99 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1575,6 +1575,7 @@ static void release_offload_resources(struct cxgbi_sock *csk)
1575 csk, csk->state, csk->flags, csk->tid); 1575 csk, csk->state, csk->flags, csk->tid);
1576 1576
1577 cxgbi_sock_free_cpl_skbs(csk); 1577 cxgbi_sock_free_cpl_skbs(csk);
1578 cxgbi_sock_purge_write_queue(csk);
1578 if (csk->wr_cred != csk->wr_max_cred) { 1579 if (csk->wr_cred != csk->wr_max_cred) {
1579 cxgbi_sock_purge_wr_queue(csk); 1580 cxgbi_sock_purge_wr_queue(csk);
1580 cxgbi_sock_reset_wr_list(csk); 1581 cxgbi_sock_reset_wr_list(csk);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index a61a152136a3..ce1336414e0a 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -688,8 +688,6 @@ rel_neigh:
688 688
689rel_rt: 689rel_rt:
690 ip_rt_put(rt); 690 ip_rt_put(rt);
691 if (csk)
692 cxgbi_sock_closed(csk);
693err_out: 691err_out:
694 return ERR_PTR(err); 692 return ERR_PTR(err);
695} 693}
@@ -1889,16 +1887,13 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
1889 struct iscsi_tcp_task *tcp_task = task->dd_data; 1887 struct iscsi_tcp_task *tcp_task = task->dd_data;
1890 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 1888 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1891 struct scsi_cmnd *sc = task->sc; 1889 struct scsi_cmnd *sc = task->sc;
1890 struct cxgbi_sock *csk = cconn->cep->csk;
1891 struct net_device *ndev = cdev->ports[csk->port_id];
1892 int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX; 1892 int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
1893 1893
1894 tcp_task->dd_data = tdata; 1894 tcp_task->dd_data = tdata;
1895 task->hdr = NULL; 1895 task->hdr = NULL;
1896 1896
1897 if (tdata->skb) {
1898 kfree_skb(tdata->skb);
1899 tdata->skb = NULL;
1900 }
1901
1902 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && 1897 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
1903 (opcode == ISCSI_OP_SCSI_DATA_OUT || 1898 (opcode == ISCSI_OP_SCSI_DATA_OUT ||
1904 (opcode == ISCSI_OP_SCSI_CMD && 1899 (opcode == ISCSI_OP_SCSI_CMD &&
@@ -1910,15 +1905,23 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
1910 1905
1911 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC); 1906 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
1912 if (!tdata->skb) { 1907 if (!tdata->skb) {
1913 struct cxgbi_sock *csk = cconn->cep->csk;
1914 struct net_device *ndev = cdev->ports[csk->port_id];
1915 ndev->stats.tx_dropped++; 1908 ndev->stats.tx_dropped++;
1916 return -ENOMEM; 1909 return -ENOMEM;
1917 } 1910 }
1918 1911
1919 skb_get(tdata->skb);
1920 skb_reserve(tdata->skb, cdev->skb_tx_rsvd); 1912 skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
1921 task->hdr = (struct iscsi_hdr *)tdata->skb->data; 1913
1914 if (task->sc) {
1915 task->hdr = (struct iscsi_hdr *)tdata->skb->data;
1916 } else {
1917 task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_KERNEL);
1918 if (!task->hdr) {
1919 __kfree_skb(tdata->skb);
1920 tdata->skb = NULL;
1921 ndev->stats.tx_dropped++;
1922 return -ENOMEM;
1923 }
1924 }
1922 task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ 1925 task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
1923 1926
1924 /* data_out uses scsi_cmd's itt */ 1927 /* data_out uses scsi_cmd's itt */
@@ -2062,9 +2065,9 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2062 unsigned int datalen; 2065 unsigned int datalen;
2063 int err; 2066 int err;
2064 2067
2065 if (!skb || cxgbi_skcb_test_flag(skb, SKCBF_TX_DONE)) { 2068 if (!skb) {
2066 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2069 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2067 "task 0x%p, skb 0x%p\n", task, skb); 2070 "task 0x%p\n", task);
2068 return 0; 2071 return 0;
2069 } 2072 }
2070 2073
@@ -2076,6 +2079,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2076 return -EPIPE; 2079 return -EPIPE;
2077 } 2080 }
2078 2081
2082 tdata->skb = NULL;
2079 datalen = skb->data_len; 2083 datalen = skb->data_len;
2080 2084
2081 /* write ppod first if using ofldq to write ppod */ 2085 /* write ppod first if using ofldq to write ppod */
@@ -2089,6 +2093,9 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2089 /* continue. Let fl get the data */ 2093 /* continue. Let fl get the data */
2090 } 2094 }
2091 2095
2096 if (!task->sc)
2097 memcpy(skb->data, task->hdr, SKB_TX_ISCSI_PDU_HEADER_MAX);
2098
2092 err = cxgbi_sock_send_pdus(cconn->cep->csk, skb); 2099 err = cxgbi_sock_send_pdus(cconn->cep->csk, skb);
2093 if (err > 0) { 2100 if (err > 0) {
2094 int pdulen = err; 2101 int pdulen = err;
@@ -2104,7 +2111,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2104 pdulen += ISCSI_DIGEST_SIZE; 2111 pdulen += ISCSI_DIGEST_SIZE;
2105 2112
2106 task->conn->txdata_octets += pdulen; 2113 task->conn->txdata_octets += pdulen;
2107 cxgbi_skcb_set_flag(skb, SKCBF_TX_DONE);
2108 return 0; 2114 return 0;
2109 } 2115 }
2110 2116
@@ -2113,6 +2119,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2113 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", 2119 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2114 task, skb, skb->len, skb->data_len, err); 2120 task, skb, skb->len, skb->data_len, err);
2115 /* reset skb to send when we are called again */ 2121 /* reset skb to send when we are called again */
2122 tdata->skb = skb;
2116 return err; 2123 return err;
2117 } 2124 }
2118 2125
@@ -2120,8 +2127,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2120 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", 2127 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2121 task->itt, skb, skb->len, skb->data_len, err); 2128 task->itt, skb, skb->len, skb->data_len, err);
2122 2129
2123 __kfree_skb(tdata->skb); 2130 __kfree_skb(skb);
2124 tdata->skb = NULL;
2125 2131
2126 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); 2132 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
2127 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); 2133 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
@@ -2146,9 +2152,14 @@ void cxgbi_cleanup_task(struct iscsi_task *task)
2146 task, tdata->skb, task->hdr_itt); 2152 task, tdata->skb, task->hdr_itt);
2147 2153
2148 tcp_task->dd_data = NULL; 2154 tcp_task->dd_data = NULL;
2155
2156 if (!task->sc)
2157 kfree(task->hdr);
2158 task->hdr = NULL;
2159
2149 /* never reached the xmit task callout */ 2160 /* never reached the xmit task callout */
2150 if (tdata->skb) { 2161 if (tdata->skb) {
2151 kfree_skb(tdata->skb); 2162 __kfree_skb(tdata->skb);
2152 tdata->skb = NULL; 2163 tdata->skb = NULL;
2153 } 2164 }
2154 2165
@@ -2556,7 +2567,10 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
2556 goto err_out; 2567 goto err_out;
2557 } 2568 }
2558 2569
2559 ifindex = hba->ndev->ifindex; 2570 rtnl_lock();
2571 if (!vlan_uses_dev(hba->ndev))
2572 ifindex = hba->ndev->ifindex;
2573 rtnl_unlock();
2560 } 2574 }
2561 2575
2562 if (dst_addr->sa_family == AF_INET) { 2576 if (dst_addr->sa_family == AF_INET) {
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 31a5816c2e8d..dcb190e75343 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -205,7 +205,6 @@ enum cxgbi_skcb_flags {
205 SKCBF_TX_NEED_HDR, /* packet needs a header */ 205 SKCBF_TX_NEED_HDR, /* packet needs a header */
206 SKCBF_TX_MEM_WRITE, /* memory write */ 206 SKCBF_TX_MEM_WRITE, /* memory write */
207 SKCBF_TX_FLAG_COMPL, /* wr completion flag */ 207 SKCBF_TX_FLAG_COMPL, /* wr completion flag */
208 SKCBF_TX_DONE, /* skb tx done */
209 SKCBF_RX_COALESCED, /* received whole pdu */ 208 SKCBF_RX_COALESCED, /* received whole pdu */
210 SKCBF_RX_HDR, /* received pdu header */ 209 SKCBF_RX_HDR, /* received pdu header */
211 SKCBF_RX_DATA, /* received pdu payload */ 210 SKCBF_RX_DATA, /* received pdu payload */
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 76b8b7eed0c0..617802855233 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -1634,7 +1634,10 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1634 ssize_t vpd_size; 1634 ssize_t vpd_size;
1635 char vpd_data[CXLFLASH_VPD_LEN]; 1635 char vpd_data[CXLFLASH_VPD_LEN];
1636 char tmp_buf[WWPN_BUF_LEN] = { 0 }; 1636 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1637 char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" }; 1637 const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
1638 cfg->dev_id->driver_data;
1639 const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
1640 const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1638 1641
1639 /* Get the VPD data from the device */ 1642 /* Get the VPD data from the device */
1640 vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data)); 1643 vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
@@ -1671,17 +1674,24 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1671 * value. Note that we must copy to a temporary buffer 1674 * value. Note that we must copy to a temporary buffer
1672 * because the conversion service requires that the ASCII 1675 * because the conversion service requires that the ASCII
1673 * string be terminated. 1676 * string be terminated.
1677 *
1678 * Allow for WWPN not being found for all devices, setting
1679 * the returned WWPN to zero when not found. Notify with a
1680 * log error for cards that should have had WWPN keywords
1681 * in the VPD - cards requiring WWPN will not have their
1682 * ports programmed and operate in an undefined state.
1674 */ 1683 */
1675 for (k = 0; k < cfg->num_fc_ports; k++) { 1684 for (k = 0; k < cfg->num_fc_ports; k++) {
1676 j = ro_size; 1685 j = ro_size;
1677 i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 1686 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1678 1687
1679 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]); 1688 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1680 if (unlikely(i < 0)) { 1689 if (i < 0) {
1681 dev_err(dev, "%s: Port %d WWPN not found in VPD\n", 1690 if (wwpn_vpd_required)
1682 __func__, k); 1691 dev_err(dev, "%s: Port %d WWPN not found\n",
1683 rc = -ENODEV; 1692 __func__, k);
1684 goto out; 1693 wwpn[k] = 0ULL;
1694 continue;
1685 } 1695 }
1686 1696
1687 j = pci_vpd_info_field_size(&vpd_data[i]); 1697 j = pci_vpd_info_field_size(&vpd_data[i]);
@@ -3145,7 +3155,7 @@ static struct scsi_host_template driver_template = {
3145 * Device dependent values 3155 * Device dependent values
3146 */ 3156 */
3147static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS, 3157static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
3148 0ULL }; 3158 CXLFLASH_WWPN_VPD_REQUIRED };
3149static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, 3159static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3150 CXLFLASH_NOTIFY_SHUTDOWN }; 3160 CXLFLASH_NOTIFY_SHUTDOWN };
3151static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS, 3161static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 880e348ed5c9..ba0108a7a9c2 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -95,7 +95,8 @@ enum undo_level {
95struct dev_dependent_vals { 95struct dev_dependent_vals {
96 u64 max_sectors; 96 u64 max_sectors;
97 u64 flags; 97 u64 flags;
98#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL 98#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL
99#define CXLFLASH_WWPN_VPD_REQUIRED 0x0000000000000002ULL
99}; 100};
100 101
101struct asyc_intr_info { 102struct asyc_intr_info {
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index 09daa86670fc..bedf1ce2f33c 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -548,7 +548,4 @@ struct sisl_rht_entry_f1 {
548#define TMF_LUN_RESET 0x1U 548#define TMF_LUN_RESET 0x1U
549#define TMF_CLEAR_ACA 0x2U 549#define TMF_CLEAR_ACA 0x2U
550 550
551
552#define SISLITE_MAX_WS_BLOCKS 512
553
554#endif /* _SISLITE_H */ 551#endif /* _SISLITE_H */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index ed46e8df2e42..170fff5aeff6 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -165,7 +165,7 @@ struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
165 struct llun_info *lli = arg; 165 struct llun_info *lli = arg;
166 u64 ctxid = DECODE_CTXID(rctxid); 166 u64 ctxid = DECODE_CTXID(rctxid);
167 int rc; 167 int rc;
168 pid_t pid = current->tgid, ctxpid = 0; 168 pid_t pid = task_tgid_nr(current), ctxpid = 0;
169 169
170 if (ctx_ctrl & CTX_CTRL_FILE) { 170 if (ctx_ctrl & CTX_CTRL_FILE) {
171 lli = NULL; 171 lli = NULL;
@@ -173,7 +173,7 @@ struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
173 } 173 }
174 174
175 if (ctx_ctrl & CTX_CTRL_CLONE) 175 if (ctx_ctrl & CTX_CTRL_CLONE)
176 pid = current->parent->tgid; 176 pid = task_ppid_nr(current);
177 177
178 if (likely(ctxid < MAX_CONTEXT)) { 178 if (likely(ctxid < MAX_CONTEXT)) {
179 while (true) { 179 while (true) {
@@ -824,7 +824,7 @@ static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
824 ctxi->rht_perms = perms; 824 ctxi->rht_perms = perms;
825 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; 825 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
826 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); 826 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
827 ctxi->pid = current->tgid; /* tgid = pid */ 827 ctxi->pid = task_tgid_nr(current); /* tgid = pid */
828 ctxi->ctx = ctx; 828 ctxi->ctx = ctx;
829 ctxi->cfg = cfg; 829 ctxi->cfg = cfg;
830 ctxi->file = file; 830 ctxi->file = file;
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 703bf1e9a64a..5deef57a7834 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -428,12 +428,14 @@ static int write_same16(struct scsi_device *sdev,
428 u8 *sense_buf = NULL; 428 u8 *sense_buf = NULL;
429 int rc = 0; 429 int rc = 0;
430 int result = 0; 430 int result = 0;
431 int ws_limit = SISLITE_MAX_WS_BLOCKS;
432 u64 offset = lba; 431 u64 offset = lba;
433 int left = nblks; 432 int left = nblks;
434 u32 to = sdev->request_queue->rq_timeout;
435 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 433 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
436 struct device *dev = &cfg->dev->dev; 434 struct device *dev = &cfg->dev->dev;
435 const u32 s = ilog2(sdev->sector_size) - 9;
436 const u32 to = sdev->request_queue->rq_timeout;
437 const u32 ws_limit = blk_queue_get_max_sectors(sdev->request_queue,
438 REQ_OP_WRITE_SAME) >> s;
437 439
438 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL); 440 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
439 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL); 441 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 0962fd544401..fd22dc6ab5d9 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1085,11 +1085,11 @@ static void alua_rescan(struct scsi_device *sdev)
1085static int alua_bus_attach(struct scsi_device *sdev) 1085static int alua_bus_attach(struct scsi_device *sdev)
1086{ 1086{
1087 struct alua_dh_data *h; 1087 struct alua_dh_data *h;
1088 int err, ret = -EINVAL; 1088 int err;
1089 1089
1090 h = kzalloc(sizeof(*h) , GFP_KERNEL); 1090 h = kzalloc(sizeof(*h) , GFP_KERNEL);
1091 if (!h) 1091 if (!h)
1092 return -ENOMEM; 1092 return SCSI_DH_NOMEM;
1093 spin_lock_init(&h->pg_lock); 1093 spin_lock_init(&h->pg_lock);
1094 rcu_assign_pointer(h->pg, NULL); 1094 rcu_assign_pointer(h->pg, NULL);
1095 h->init_error = SCSI_DH_OK; 1095 h->init_error = SCSI_DH_OK;
@@ -1098,16 +1098,14 @@ static int alua_bus_attach(struct scsi_device *sdev)
1098 1098
1099 mutex_init(&h->init_mutex); 1099 mutex_init(&h->init_mutex);
1100 err = alua_initialize(sdev, h); 1100 err = alua_initialize(sdev, h);
1101 if (err == SCSI_DH_NOMEM)
1102 ret = -ENOMEM;
1103 if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED) 1101 if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED)
1104 goto failed; 1102 goto failed;
1105 1103
1106 sdev->handler_data = h; 1104 sdev->handler_data = h;
1107 return 0; 1105 return SCSI_DH_OK;
1108failed: 1106failed:
1109 kfree(h); 1107 kfree(h);
1110 return ret; 1108 return err;
1111} 1109}
1112 1110
1113/* 1111/*
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 8654e940e1a8..6a2792f3a37e 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -490,7 +490,7 @@ static int clariion_bus_attach(struct scsi_device *sdev)
490 490
491 h = kzalloc(sizeof(*h) , GFP_KERNEL); 491 h = kzalloc(sizeof(*h) , GFP_KERNEL);
492 if (!h) 492 if (!h)
493 return -ENOMEM; 493 return SCSI_DH_NOMEM;
494 h->lun_state = CLARIION_LUN_UNINITIALIZED; 494 h->lun_state = CLARIION_LUN_UNINITIALIZED;
495 h->default_sp = CLARIION_UNBOUND_LU; 495 h->default_sp = CLARIION_UNBOUND_LU;
496 h->current_sp = CLARIION_UNBOUND_LU; 496 h->current_sp = CLARIION_UNBOUND_LU;
@@ -510,11 +510,11 @@ static int clariion_bus_attach(struct scsi_device *sdev)
510 h->default_sp + 'A'); 510 h->default_sp + 'A');
511 511
512 sdev->handler_data = h; 512 sdev->handler_data = h;
513 return 0; 513 return SCSI_DH_OK;
514 514
515failed: 515failed:
516 kfree(h); 516 kfree(h);
517 return -EINVAL; 517 return err;
518} 518}
519 519
520static void clariion_bus_detach(struct scsi_device *sdev) 520static void clariion_bus_detach(struct scsi_device *sdev)
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 62d314e07d11..e65a0ebb4b54 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -218,24 +218,28 @@ static int hp_sw_bus_attach(struct scsi_device *sdev)
218 218
219 h = kzalloc(sizeof(*h), GFP_KERNEL); 219 h = kzalloc(sizeof(*h), GFP_KERNEL);
220 if (!h) 220 if (!h)
221 return -ENOMEM; 221 return SCSI_DH_NOMEM;
222 h->path_state = HP_SW_PATH_UNINITIALIZED; 222 h->path_state = HP_SW_PATH_UNINITIALIZED;
223 h->retries = HP_SW_RETRIES; 223 h->retries = HP_SW_RETRIES;
224 h->sdev = sdev; 224 h->sdev = sdev;
225 225
226 ret = hp_sw_tur(sdev, h); 226 ret = hp_sw_tur(sdev, h);
227 if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED) 227 if (ret != SCSI_DH_OK)
228 goto failed; 228 goto failed;
229 if (h->path_state == HP_SW_PATH_UNINITIALIZED) {
230 ret = SCSI_DH_NOSYS;
231 goto failed;
232 }
229 233
230 sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n", 234 sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n",
231 HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE? 235 HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE?
232 "active":"passive"); 236 "active":"passive");
233 237
234 sdev->handler_data = h; 238 sdev->handler_data = h;
235 return 0; 239 return SCSI_DH_OK;
236failed: 240failed:
237 kfree(h); 241 kfree(h);
238 return -EINVAL; 242 return ret;
239} 243}
240 244
241static void hp_sw_bus_detach( struct scsi_device *sdev ) 245static void hp_sw_bus_detach( struct scsi_device *sdev )
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 2ceff585f189..7af31a1247ee 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -729,7 +729,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
729 729
730 h = kzalloc(sizeof(*h) , GFP_KERNEL); 730 h = kzalloc(sizeof(*h) , GFP_KERNEL);
731 if (!h) 731 if (!h)
732 return -ENOMEM; 732 return SCSI_DH_NOMEM;
733 h->lun = UNINITIALIZED_LUN; 733 h->lun = UNINITIALIZED_LUN;
734 h->state = RDAC_STATE_ACTIVE; 734 h->state = RDAC_STATE_ACTIVE;
735 735
@@ -755,7 +755,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
755 lun_state[(int)h->lun_state]); 755 lun_state[(int)h->lun_state]);
756 756
757 sdev->handler_data = h; 757 sdev->handler_data = h;
758 return 0; 758 return SCSI_DH_OK;
759 759
760clean_ctlr: 760clean_ctlr:
761 spin_lock(&list_lock); 761 spin_lock(&list_lock);
@@ -764,7 +764,7 @@ clean_ctlr:
764 764
765failed: 765failed:
766 kfree(h); 766 kfree(h);
767 return -EINVAL; 767 return err;
768} 768}
769 769
770static void rdac_bus_detach( struct scsi_device *sdev ) 770static void rdac_bus_detach( struct scsi_device *sdev )
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 5cc09dce4d25..f46b312d04bc 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -155,7 +155,7 @@ static int fcoe_vport_disable(struct fc_vport *, bool disable);
155static void fcoe_set_vport_symbolic_name(struct fc_vport *); 155static void fcoe_set_vport_symbolic_name(struct fc_vport *);
156static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); 156static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
157static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *); 157static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
158 158static void fcoe_vport_remove(struct fc_lport *);
159 159
160static struct fcoe_sysfs_function_template fcoe_sysfs_templ = { 160static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
161 .set_fcoe_ctlr_mode = fcoe_ctlr_mode, 161 .set_fcoe_ctlr_mode = fcoe_ctlr_mode,
@@ -501,11 +501,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
501 struct net_device *netdev = fcoe->netdev; 501 struct net_device *netdev = fcoe->netdev;
502 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); 502 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
503 503
504 rtnl_lock();
505 if (!fcoe->removed)
506 fcoe_interface_remove(fcoe);
507 rtnl_unlock();
508
509 /* Release the self-reference taken during fcoe_interface_create() */ 504 /* Release the self-reference taken during fcoe_interface_create() */
510 /* tear-down the FCoE controller */ 505 /* tear-down the FCoE controller */
511 fcoe_ctlr_destroy(fip); 506 fcoe_ctlr_destroy(fip);
@@ -1014,6 +1009,8 @@ skip_oem:
1014 * fcoe_if_destroy() - Tear down a SW FCoE instance 1009 * fcoe_if_destroy() - Tear down a SW FCoE instance
1015 * @lport: The local port to be destroyed 1010 * @lport: The local port to be destroyed
1016 * 1011 *
1012 * Locking: Must be called with the RTNL mutex held.
1013 *
1017 */ 1014 */
1018static void fcoe_if_destroy(struct fc_lport *lport) 1015static void fcoe_if_destroy(struct fc_lport *lport)
1019{ 1016{
@@ -1035,14 +1032,12 @@ static void fcoe_if_destroy(struct fc_lport *lport)
1035 /* Free existing transmit skbs */ 1032 /* Free existing transmit skbs */
1036 fcoe_clean_pending_queue(lport); 1033 fcoe_clean_pending_queue(lport);
1037 1034
1038 rtnl_lock();
1039 if (!is_zero_ether_addr(port->data_src_addr)) 1035 if (!is_zero_ether_addr(port->data_src_addr))
1040 dev_uc_del(netdev, port->data_src_addr); 1036 dev_uc_del(netdev, port->data_src_addr);
1041 if (lport->vport) 1037 if (lport->vport)
1042 synchronize_net(); 1038 synchronize_net();
1043 else 1039 else
1044 fcoe_interface_remove(fcoe); 1040 fcoe_interface_remove(fcoe);
1045 rtnl_unlock();
1046 1041
1047 /* Free queued packets for the per-CPU receive threads */ 1042 /* Free queued packets for the per-CPU receive threads */
1048 fcoe_percpu_clean(lport); 1043 fcoe_percpu_clean(lport);
@@ -1903,7 +1898,14 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1903 case NETDEV_UNREGISTER: 1898 case NETDEV_UNREGISTER:
1904 list_del(&fcoe->list); 1899 list_del(&fcoe->list);
1905 port = lport_priv(ctlr->lp); 1900 port = lport_priv(ctlr->lp);
1906 queue_work(fcoe_wq, &port->destroy_work); 1901 fcoe_vport_remove(lport);
1902 mutex_lock(&fcoe_config_mutex);
1903 fcoe_if_destroy(lport);
1904 if (!fcoe->removed)
1905 fcoe_interface_remove(fcoe);
1906 fcoe_interface_cleanup(fcoe);
1907 mutex_unlock(&fcoe_config_mutex);
1908 fcoe_ctlr_device_delete(fcoe_ctlr_to_ctlr_dev(ctlr));
1907 goto out; 1909 goto out;
1908 break; 1910 break;
1909 case NETDEV_FEAT_CHANGE: 1911 case NETDEV_FEAT_CHANGE:
@@ -2108,30 +2110,10 @@ static void fcoe_destroy_work(struct work_struct *work)
2108 struct fcoe_ctlr *ctlr; 2110 struct fcoe_ctlr *ctlr;
2109 struct fcoe_port *port; 2111 struct fcoe_port *port;
2110 struct fcoe_interface *fcoe; 2112 struct fcoe_interface *fcoe;
2111 struct Scsi_Host *shost;
2112 struct fc_host_attrs *fc_host;
2113 unsigned long flags;
2114 struct fc_vport *vport;
2115 struct fc_vport *next_vport;
2116 2113
2117 port = container_of(work, struct fcoe_port, destroy_work); 2114 port = container_of(work, struct fcoe_port, destroy_work);
2118 shost = port->lport->host;
2119 fc_host = shost_to_fc_host(shost);
2120
2121 /* Loop through all the vports and mark them for deletion */
2122 spin_lock_irqsave(shost->host_lock, flags);
2123 list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
2124 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
2125 continue;
2126 } else {
2127 vport->flags |= FC_VPORT_DELETING;
2128 queue_work(fc_host_work_q(shost),
2129 &vport->vport_delete_work);
2130 }
2131 }
2132 spin_unlock_irqrestore(shost->host_lock, flags);
2133 2115
2134 flush_workqueue(fc_host_work_q(shost)); 2116 fcoe_vport_remove(port->lport);
2135 2117
2136 mutex_lock(&fcoe_config_mutex); 2118 mutex_lock(&fcoe_config_mutex);
2137 2119
@@ -2139,7 +2121,11 @@ static void fcoe_destroy_work(struct work_struct *work)
2139 ctlr = fcoe_to_ctlr(fcoe); 2121 ctlr = fcoe_to_ctlr(fcoe);
2140 cdev = fcoe_ctlr_to_ctlr_dev(ctlr); 2122 cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
2141 2123
2124 rtnl_lock();
2142 fcoe_if_destroy(port->lport); 2125 fcoe_if_destroy(port->lport);
2126 if (!fcoe->removed)
2127 fcoe_interface_remove(fcoe);
2128 rtnl_unlock();
2143 fcoe_interface_cleanup(fcoe); 2129 fcoe_interface_cleanup(fcoe);
2144 2130
2145 mutex_unlock(&fcoe_config_mutex); 2131 mutex_unlock(&fcoe_config_mutex);
@@ -2254,6 +2240,8 @@ static int _fcoe_create(struct net_device *netdev, enum fip_mode fip_mode,
2254 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", 2240 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
2255 netdev->name); 2241 netdev->name);
2256 rc = -EIO; 2242 rc = -EIO;
2243 if (!fcoe->removed)
2244 fcoe_interface_remove(fcoe);
2257 rtnl_unlock(); 2245 rtnl_unlock();
2258 fcoe_interface_cleanup(fcoe); 2246 fcoe_interface_cleanup(fcoe);
2259 mutex_unlock(&fcoe_config_mutex); 2247 mutex_unlock(&fcoe_config_mutex);
@@ -2738,13 +2726,46 @@ static int fcoe_vport_destroy(struct fc_vport *vport)
2738 mutex_unlock(&n_port->lp_mutex); 2726 mutex_unlock(&n_port->lp_mutex);
2739 2727
2740 mutex_lock(&fcoe_config_mutex); 2728 mutex_lock(&fcoe_config_mutex);
2729 rtnl_lock();
2741 fcoe_if_destroy(vn_port); 2730 fcoe_if_destroy(vn_port);
2731 rtnl_unlock();
2742 mutex_unlock(&fcoe_config_mutex); 2732 mutex_unlock(&fcoe_config_mutex);
2743 2733
2744 return 0; 2734 return 0;
2745} 2735}
2746 2736
2747/** 2737/**
2738 * fcoe_vport_remove() - remove attached vports
2739 * @lport: lport for which the vports should be removed
2740 */
2741static void fcoe_vport_remove(struct fc_lport *lport)
2742{
2743 struct Scsi_Host *shost;
2744 struct fc_host_attrs *fc_host;
2745 unsigned long flags;
2746 struct fc_vport *vport;
2747 struct fc_vport *next_vport;
2748
2749 shost = lport->host;
2750 fc_host = shost_to_fc_host(shost);
2751
2752 /* Loop through all the vports and mark them for deletion */
2753 spin_lock_irqsave(shost->host_lock, flags);
2754 list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
2755 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
2756 continue;
2757 } else {
2758 vport->flags |= FC_VPORT_DELETING;
2759 queue_work(fc_host_work_q(shost),
2760 &vport->vport_delete_work);
2761 }
2762 }
2763 spin_unlock_irqrestore(shost->host_lock, flags);
2764
2765 flush_workqueue(fc_host_work_q(shost));
2766}
2767
2768/**
2748 * fcoe_vport_disable() - change vport state 2769 * fcoe_vport_disable() - change vport state
2749 * @vport: vport to bring online/offline 2770 * @vport: vport to bring online/offline
2750 * @disable: should the vport be disabled? 2771 * @disable: should the vport be disabled?
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 15692ea05ced..83357b0367d8 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -29,7 +29,7 @@
29#define HISI_SAS_MAX_PHYS 9 29#define HISI_SAS_MAX_PHYS 9
30#define HISI_SAS_MAX_QUEUES 32 30#define HISI_SAS_MAX_QUEUES 32
31#define HISI_SAS_QUEUE_SLOTS 512 31#define HISI_SAS_QUEUE_SLOTS 512
32#define HISI_SAS_MAX_ITCT_ENTRIES 2048 32#define HISI_SAS_MAX_ITCT_ENTRIES 1024
33#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES 33#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
34#define HISI_SAS_RESET_BIT 0 34#define HISI_SAS_RESET_BIT 0
35#define HISI_SAS_REJECT_CMD_BIT 1 35#define HISI_SAS_REJECT_CMD_BIT 1
@@ -96,6 +96,7 @@ struct hisi_sas_hw_error {
96 int shift; 96 int shift;
97 const char *msg; 97 const char *msg;
98 int reg; 98 int reg;
99 const struct hisi_sas_hw_error *sub;
99}; 100};
100 101
101struct hisi_sas_phy { 102struct hisi_sas_phy {
@@ -197,7 +198,7 @@ struct hisi_sas_hw {
197 int (*slot_complete)(struct hisi_hba *hisi_hba, 198 int (*slot_complete)(struct hisi_hba *hisi_hba,
198 struct hisi_sas_slot *slot); 199 struct hisi_sas_slot *slot);
199 void (*phys_init)(struct hisi_hba *hisi_hba); 200 void (*phys_init)(struct hisi_hba *hisi_hba);
200 void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no); 201 void (*phy_start)(struct hisi_hba *hisi_hba, int phy_no);
201 void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no); 202 void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
202 void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no); 203 void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no);
203 void (*get_events)(struct hisi_hba *hisi_hba, int phy_no); 204 void (*get_events)(struct hisi_hba *hisi_hba, int phy_no);
@@ -341,7 +342,11 @@ struct hisi_sas_initial_fis {
341}; 342};
342 343
343struct hisi_sas_breakpoint { 344struct hisi_sas_breakpoint {
344 u8 data[128]; /*io128 byte*/ 345 u8 data[128];
346};
347
348struct hisi_sas_sata_breakpoint {
349 struct hisi_sas_breakpoint tag[32];
345}; 350};
346 351
347struct hisi_sas_sge { 352struct hisi_sas_sge {
@@ -419,4 +424,6 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
419 struct sas_task *task, 424 struct sas_task *task,
420 struct hisi_sas_slot *slot); 425 struct hisi_sas_slot *slot);
421extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba); 426extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
427extern void hisi_sas_rst_work_handler(struct work_struct *work);
428extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
422#endif 429#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 37c838be4757..61a85ff8e459 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -185,13 +185,16 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
185 struct domain_device *device = task->dev; 185 struct domain_device *device = task->dev;
186 struct hisi_sas_device *sas_dev = device->lldd_dev; 186 struct hisi_sas_device *sas_dev = device->lldd_dev;
187 187
188 if (!task->lldd_task)
189 return;
190
191 task->lldd_task = NULL;
192
188 if (!sas_protocol_ata(task->task_proto)) 193 if (!sas_protocol_ata(task->task_proto))
189 if (slot->n_elem) 194 if (slot->n_elem)
190 dma_unmap_sg(dev, task->scatter, slot->n_elem, 195 dma_unmap_sg(dev, task->scatter, slot->n_elem,
191 task->data_dir); 196 task->data_dir);
192 197
193 task->lldd_task = NULL;
194
195 if (sas_dev) 198 if (sas_dev)
196 atomic64_dec(&sas_dev->running_req); 199 atomic64_dec(&sas_dev->running_req);
197 } 200 }
@@ -199,8 +202,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
199 if (slot->buf) 202 if (slot->buf)
200 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma); 203 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
201 204
202
203 list_del_init(&slot->entry); 205 list_del_init(&slot->entry);
206 slot->buf = NULL;
204 slot->task = NULL; 207 slot->task = NULL;
205 slot->port = NULL; 208 slot->port = NULL;
206 hisi_sas_slot_index_free(hisi_hba, slot->idx); 209 hisi_sas_slot_index_free(hisi_hba, slot->idx);
@@ -401,7 +404,9 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
401 goto err_out_buf; 404 goto err_out_buf;
402 } 405 }
403 406
407 spin_lock_irqsave(&hisi_hba->lock, flags);
404 list_add_tail(&slot->entry, &sas_dev->list); 408 list_add_tail(&slot->entry, &sas_dev->list);
409 spin_unlock_irqrestore(&hisi_hba->lock, flags);
405 spin_lock_irqsave(&task->task_state_lock, flags); 410 spin_lock_irqsave(&task->task_state_lock, flags);
406 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 411 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
407 spin_unlock_irqrestore(&task->task_state_lock, flags); 412 spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -505,9 +510,10 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
505{ 510{
506 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 511 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
507 struct hisi_sas_device *sas_dev = NULL; 512 struct hisi_sas_device *sas_dev = NULL;
513 unsigned long flags;
508 int i; 514 int i;
509 515
510 spin_lock(&hisi_hba->lock); 516 spin_lock_irqsave(&hisi_hba->lock, flags);
511 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 517 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
512 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 518 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
513 int queue = i % hisi_hba->queue_count; 519 int queue = i % hisi_hba->queue_count;
@@ -524,7 +530,7 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
524 break; 530 break;
525 } 531 }
526 } 532 }
527 spin_unlock(&hisi_hba->lock); 533 spin_unlock_irqrestore(&hisi_hba->lock, flags);
528 534
529 return sas_dev; 535 return sas_dev;
530} 536}
@@ -761,7 +767,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
761 case PHY_FUNC_LINK_RESET: 767 case PHY_FUNC_LINK_RESET:
762 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 768 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
763 msleep(100); 769 msleep(100);
764 hisi_hba->hw->phy_enable(hisi_hba, phy_no); 770 hisi_hba->hw->phy_start(hisi_hba, phy_no);
765 break; 771 break;
766 772
767 case PHY_FUNC_DISABLE: 773 case PHY_FUNC_DISABLE:
@@ -1045,7 +1051,6 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
1045 1051
1046static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1052static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1047{ 1053{
1048 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1049 struct device *dev = hisi_hba->dev; 1054 struct device *dev = hisi_hba->dev;
1050 struct Scsi_Host *shost = hisi_hba->shost; 1055 struct Scsi_Host *shost = hisi_hba->shost;
1051 u32 old_state, state; 1056 u32 old_state, state;
@@ -1073,7 +1078,6 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1073 hisi_sas_release_tasks(hisi_hba); 1078 hisi_sas_release_tasks(hisi_hba);
1074 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1079 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1075 1080
1076 sas_ha->notify_ha_event(sas_ha, HAE_RESET);
1077 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1081 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1078 1082
1079 /* Init and wait for PHYs to come up and all libsas event finished. */ 1083 /* Init and wait for PHYs to come up and all libsas event finished. */
@@ -1159,7 +1163,7 @@ static int hisi_sas_abort_task(struct sas_task *task)
1159 1163
1160 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1164 rc = hisi_sas_internal_task_abort(hisi_hba, device,
1161 HISI_SAS_INT_ABT_CMD, tag); 1165 HISI_SAS_INT_ABT_CMD, tag);
1162 if (rc == TMF_RESP_FUNC_FAILED) { 1166 if (rc == TMF_RESP_FUNC_FAILED && task->lldd_task) {
1163 spin_lock_irqsave(&hisi_hba->lock, flags); 1167 spin_lock_irqsave(&hisi_hba->lock, flags);
1164 hisi_sas_do_release_task(hisi_hba, task, slot); 1168 hisi_sas_do_release_task(hisi_hba, task, slot);
1165 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1169 spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -1387,8 +1391,9 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1387 if (rc) 1391 if (rc)
1388 goto err_out_buf; 1392 goto err_out_buf;
1389 1393
1390 1394 spin_lock_irqsave(&hisi_hba->lock, flags);
1391 list_add_tail(&slot->entry, &sas_dev->list); 1395 list_add_tail(&slot->entry, &sas_dev->list);
1396 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1392 spin_lock_irqsave(&task->task_state_lock, flags); 1397 spin_lock_irqsave(&task->task_state_lock, flags);
1393 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 1398 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1394 spin_unlock_irqrestore(&task->task_state_lock, flags); 1399 spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -1469,6 +1474,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1469 if (slot) 1474 if (slot)
1470 slot->task = NULL; 1475 slot->task = NULL;
1471 dev_err(dev, "internal task abort: timeout.\n"); 1476 dev_err(dev, "internal task abort: timeout.\n");
1477 goto exit;
1472 } 1478 }
1473 } 1479 }
1474 1480
@@ -1540,6 +1546,17 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
1540} 1546}
1541EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 1547EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
1542 1548
1549void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
1550{
1551 int i;
1552
1553 for (i = 0; i < hisi_hba->queue_count; i++) {
1554 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1555
1556 tasklet_kill(&cq->tasklet);
1557 }
1558}
1559EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
1543 1560
1544struct scsi_transport_template *hisi_sas_stt; 1561struct scsi_transport_template *hisi_sas_stt;
1545EXPORT_SYMBOL_GPL(hisi_sas_stt); 1562EXPORT_SYMBOL_GPL(hisi_sas_stt);
@@ -1608,7 +1625,7 @@ void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
1608 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 1625 s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
1609 memset(hisi_hba->breakpoint, 0, s); 1626 memset(hisi_hba->breakpoint, 0, s);
1610 1627
1611 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2; 1628 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1612 memset(hisi_hba->sata_breakpoint, 0, s); 1629 memset(hisi_hba->sata_breakpoint, 0, s);
1613} 1630}
1614EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 1631EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
@@ -1701,7 +1718,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1701 if (!hisi_hba->initial_fis) 1718 if (!hisi_hba->initial_fis)
1702 goto err_out; 1719 goto err_out;
1703 1720
1704 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2; 1721 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1705 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s, 1722 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1706 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL); 1723 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1707 if (!hisi_hba->sata_breakpoint) 1724 if (!hisi_hba->sata_breakpoint)
@@ -1766,7 +1783,7 @@ void hisi_sas_free(struct hisi_hba *hisi_hba)
1766 hisi_hba->initial_fis, 1783 hisi_hba->initial_fis,
1767 hisi_hba->initial_fis_dma); 1784 hisi_hba->initial_fis_dma);
1768 1785
1769 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2; 1786 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
1770 if (hisi_hba->sata_breakpoint) 1787 if (hisi_hba->sata_breakpoint)
1771 dma_free_coherent(dev, s, 1788 dma_free_coherent(dev, s,
1772 hisi_hba->sata_breakpoint, 1789 hisi_hba->sata_breakpoint,
@@ -1777,13 +1794,14 @@ void hisi_sas_free(struct hisi_hba *hisi_hba)
1777} 1794}
1778EXPORT_SYMBOL_GPL(hisi_sas_free); 1795EXPORT_SYMBOL_GPL(hisi_sas_free);
1779 1796
1780static void hisi_sas_rst_work_handler(struct work_struct *work) 1797void hisi_sas_rst_work_handler(struct work_struct *work)
1781{ 1798{
1782 struct hisi_hba *hisi_hba = 1799 struct hisi_hba *hisi_hba =
1783 container_of(work, struct hisi_hba, rst_work); 1800 container_of(work, struct hisi_hba, rst_work);
1784 1801
1785 hisi_sas_controller_reset(hisi_hba); 1802 hisi_sas_controller_reset(hisi_hba);
1786} 1803}
1804EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
1787 1805
1788int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 1806int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
1789{ 1807{
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 9385554e43a6..dc6eca8d6afd 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1857,7 +1857,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
1857 .start_delivery = start_delivery_v1_hw, 1857 .start_delivery = start_delivery_v1_hw,
1858 .slot_complete = slot_complete_v1_hw, 1858 .slot_complete = slot_complete_v1_hw,
1859 .phys_init = phys_init_v1_hw, 1859 .phys_init = phys_init_v1_hw,
1860 .phy_enable = enable_phy_v1_hw, 1860 .phy_start = start_phy_v1_hw,
1861 .phy_disable = disable_phy_v1_hw, 1861 .phy_disable = disable_phy_v1_hw,
1862 .phy_hard_reset = phy_hard_reset_v1_hw, 1862 .phy_hard_reset = phy_hard_reset_v1_hw,
1863 .phy_set_linkrate = phy_set_linkrate_v1_hw, 1863 .phy_set_linkrate = phy_set_linkrate_v1_hw,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index b1f097dabd01..d02c2a791981 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -406,80 +406,70 @@ static const struct hisi_sas_hw_error one_bit_ecc_errors[] = {
406 .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF), 406 .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF),
407 .msk = HGC_DQE_ECC_1B_ADDR_MSK, 407 .msk = HGC_DQE_ECC_1B_ADDR_MSK,
408 .shift = HGC_DQE_ECC_1B_ADDR_OFF, 408 .shift = HGC_DQE_ECC_1B_ADDR_OFF,
409 .msg = "hgc_dqe_acc1b_intr found: \ 409 .msg = "hgc_dqe_acc1b_intr found: Ram address is 0x%08X\n",
410 Ram address is 0x%08X\n",
411 .reg = HGC_DQE_ECC_ADDR, 410 .reg = HGC_DQE_ECC_ADDR,
412 }, 411 },
413 { 412 {
414 .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF), 413 .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF),
415 .msk = HGC_IOST_ECC_1B_ADDR_MSK, 414 .msk = HGC_IOST_ECC_1B_ADDR_MSK,
416 .shift = HGC_IOST_ECC_1B_ADDR_OFF, 415 .shift = HGC_IOST_ECC_1B_ADDR_OFF,
417 .msg = "hgc_iost_acc1b_intr found: \ 416 .msg = "hgc_iost_acc1b_intr found: Ram address is 0x%08X\n",
418 Ram address is 0x%08X\n",
419 .reg = HGC_IOST_ECC_ADDR, 417 .reg = HGC_IOST_ECC_ADDR,
420 }, 418 },
421 { 419 {
422 .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF), 420 .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF),
423 .msk = HGC_ITCT_ECC_1B_ADDR_MSK, 421 .msk = HGC_ITCT_ECC_1B_ADDR_MSK,
424 .shift = HGC_ITCT_ECC_1B_ADDR_OFF, 422 .shift = HGC_ITCT_ECC_1B_ADDR_OFF,
425 .msg = "hgc_itct_acc1b_intr found: \ 423 .msg = "hgc_itct_acc1b_intr found: am address is 0x%08X\n",
426 Ram address is 0x%08X\n",
427 .reg = HGC_ITCT_ECC_ADDR, 424 .reg = HGC_ITCT_ECC_ADDR,
428 }, 425 },
429 { 426 {
430 .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF), 427 .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF),
431 .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, 428 .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
432 .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, 429 .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
433 .msg = "hgc_iostl_acc1b_intr found: \ 430 .msg = "hgc_iostl_acc1b_intr found: memory address is 0x%08X\n",
434 memory address is 0x%08X\n",
435 .reg = HGC_LM_DFX_STATUS2, 431 .reg = HGC_LM_DFX_STATUS2,
436 }, 432 },
437 { 433 {
438 .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF), 434 .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF),
439 .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, 435 .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
440 .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, 436 .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
441 .msg = "hgc_itctl_acc1b_intr found: \ 437 .msg = "hgc_itctl_acc1b_intr found: memory address is 0x%08X\n",
442 memory address is 0x%08X\n",
443 .reg = HGC_LM_DFX_STATUS2, 438 .reg = HGC_LM_DFX_STATUS2,
444 }, 439 },
445 { 440 {
446 .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF), 441 .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF),
447 .msk = HGC_CQE_ECC_1B_ADDR_MSK, 442 .msk = HGC_CQE_ECC_1B_ADDR_MSK,
448 .shift = HGC_CQE_ECC_1B_ADDR_OFF, 443 .shift = HGC_CQE_ECC_1B_ADDR_OFF,
449 .msg = "hgc_cqe_acc1b_intr found: \ 444 .msg = "hgc_cqe_acc1b_intr found: Ram address is 0x%08X\n",
450 Ram address is 0x%08X\n",
451 .reg = HGC_CQE_ECC_ADDR, 445 .reg = HGC_CQE_ECC_ADDR,
452 }, 446 },
453 { 447 {
454 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF), 448 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF),
455 .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, 449 .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
456 .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, 450 .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
457 .msg = "rxm_mem0_acc1b_intr found: \ 451 .msg = "rxm_mem0_acc1b_intr found: memory address is 0x%08X\n",
458 memory address is 0x%08X\n",
459 .reg = HGC_RXM_DFX_STATUS14, 452 .reg = HGC_RXM_DFX_STATUS14,
460 }, 453 },
461 { 454 {
462 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF), 455 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF),
463 .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, 456 .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
464 .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, 457 .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
465 .msg = "rxm_mem1_acc1b_intr found: \ 458 .msg = "rxm_mem1_acc1b_intr found: memory address is 0x%08X\n",
466 memory address is 0x%08X\n",
467 .reg = HGC_RXM_DFX_STATUS14, 459 .reg = HGC_RXM_DFX_STATUS14,
468 }, 460 },
469 { 461 {
470 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF), 462 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF),
471 .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, 463 .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
472 .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, 464 .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
473 .msg = "rxm_mem2_acc1b_intr found: \ 465 .msg = "rxm_mem2_acc1b_intr found: memory address is 0x%08X\n",
474 memory address is 0x%08X\n",
475 .reg = HGC_RXM_DFX_STATUS14, 466 .reg = HGC_RXM_DFX_STATUS14,
476 }, 467 },
477 { 468 {
478 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF), 469 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF),
479 .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, 470 .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
480 .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, 471 .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
481 .msg = "rxm_mem3_acc1b_intr found: \ 472 .msg = "rxm_mem3_acc1b_intr found: memory address is 0x%08X\n",
482 memory address is 0x%08X\n",
483 .reg = HGC_RXM_DFX_STATUS15, 473 .reg = HGC_RXM_DFX_STATUS15,
484 }, 474 },
485}; 475};
@@ -489,80 +479,70 @@ static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = {
489 .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF), 479 .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF),
490 .msk = HGC_DQE_ECC_MB_ADDR_MSK, 480 .msk = HGC_DQE_ECC_MB_ADDR_MSK,
491 .shift = HGC_DQE_ECC_MB_ADDR_OFF, 481 .shift = HGC_DQE_ECC_MB_ADDR_OFF,
492 .msg = "hgc_dqe_accbad_intr (0x%x) found: \ 482 .msg = "hgc_dqe_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
493 Ram address is 0x%08X\n",
494 .reg = HGC_DQE_ECC_ADDR, 483 .reg = HGC_DQE_ECC_ADDR,
495 }, 484 },
496 { 485 {
497 .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF), 486 .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF),
498 .msk = HGC_IOST_ECC_MB_ADDR_MSK, 487 .msk = HGC_IOST_ECC_MB_ADDR_MSK,
499 .shift = HGC_IOST_ECC_MB_ADDR_OFF, 488 .shift = HGC_IOST_ECC_MB_ADDR_OFF,
500 .msg = "hgc_iost_accbad_intr (0x%x) found: \ 489 .msg = "hgc_iost_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
501 Ram address is 0x%08X\n",
502 .reg = HGC_IOST_ECC_ADDR, 490 .reg = HGC_IOST_ECC_ADDR,
503 }, 491 },
504 { 492 {
505 .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF), 493 .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF),
506 .msk = HGC_ITCT_ECC_MB_ADDR_MSK, 494 .msk = HGC_ITCT_ECC_MB_ADDR_MSK,
507 .shift = HGC_ITCT_ECC_MB_ADDR_OFF, 495 .shift = HGC_ITCT_ECC_MB_ADDR_OFF,
508 .msg = "hgc_itct_accbad_intr (0x%x) found: \ 496 .msg = "hgc_itct_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
509 Ram address is 0x%08X\n",
510 .reg = HGC_ITCT_ECC_ADDR, 497 .reg = HGC_ITCT_ECC_ADDR,
511 }, 498 },
512 { 499 {
513 .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF), 500 .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF),
514 .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK, 501 .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
515 .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF, 502 .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
516 .msg = "hgc_iostl_accbad_intr (0x%x) found: \ 503 .msg = "hgc_iostl_accbad_intr (0x%x) found: memory address is 0x%08X\n",
517 memory address is 0x%08X\n",
518 .reg = HGC_LM_DFX_STATUS2, 504 .reg = HGC_LM_DFX_STATUS2,
519 }, 505 },
520 { 506 {
521 .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF), 507 .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF),
522 .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK, 508 .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
523 .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF, 509 .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
524 .msg = "hgc_itctl_accbad_intr (0x%x) found: \ 510 .msg = "hgc_itctl_accbad_intr (0x%x) found: memory address is 0x%08X\n",
525 memory address is 0x%08X\n",
526 .reg = HGC_LM_DFX_STATUS2, 511 .reg = HGC_LM_DFX_STATUS2,
527 }, 512 },
528 { 513 {
529 .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF), 514 .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF),
530 .msk = HGC_CQE_ECC_MB_ADDR_MSK, 515 .msk = HGC_CQE_ECC_MB_ADDR_MSK,
531 .shift = HGC_CQE_ECC_MB_ADDR_OFF, 516 .shift = HGC_CQE_ECC_MB_ADDR_OFF,
532 .msg = "hgc_cqe_accbad_intr (0x%x) found: \ 517 .msg = "hgc_cqe_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
533 Ram address is 0x%08X\n",
534 .reg = HGC_CQE_ECC_ADDR, 518 .reg = HGC_CQE_ECC_ADDR,
535 }, 519 },
536 { 520 {
537 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF), 521 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF),
538 .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK, 522 .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
539 .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF, 523 .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
540 .msg = "rxm_mem0_accbad_intr (0x%x) found: \ 524 .msg = "rxm_mem0_accbad_intr (0x%x) found: memory address is 0x%08X\n",
541 memory address is 0x%08X\n",
542 .reg = HGC_RXM_DFX_STATUS14, 525 .reg = HGC_RXM_DFX_STATUS14,
543 }, 526 },
544 { 527 {
545 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF), 528 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF),
546 .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK, 529 .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
547 .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF, 530 .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
548 .msg = "rxm_mem1_accbad_intr (0x%x) found: \ 531 .msg = "rxm_mem1_accbad_intr (0x%x) found: memory address is 0x%08X\n",
549 memory address is 0x%08X\n",
550 .reg = HGC_RXM_DFX_STATUS14, 532 .reg = HGC_RXM_DFX_STATUS14,
551 }, 533 },
552 { 534 {
553 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF), 535 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF),
554 .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK, 536 .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
555 .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF, 537 .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
556 .msg = "rxm_mem2_accbad_intr (0x%x) found: \ 538 .msg = "rxm_mem2_accbad_intr (0x%x) found: memory address is 0x%08X\n",
557 memory address is 0x%08X\n",
558 .reg = HGC_RXM_DFX_STATUS14, 539 .reg = HGC_RXM_DFX_STATUS14,
559 }, 540 },
560 { 541 {
561 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF), 542 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF),
562 .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK, 543 .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
563 .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF, 544 .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
564 .msg = "rxm_mem3_accbad_intr (0x%x) found: \ 545 .msg = "rxm_mem3_accbad_intr (0x%x) found: memory address is 0x%08X\n",
565 memory address is 0x%08X\n",
566 .reg = HGC_RXM_DFX_STATUS15, 546 .reg = HGC_RXM_DFX_STATUS15,
567 }, 547 },
568}; 548};
@@ -843,8 +823,9 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
843 struct hisi_sas_device *sas_dev = NULL; 823 struct hisi_sas_device *sas_dev = NULL;
844 int i, sata_dev = dev_is_sata(device); 824 int i, sata_dev = dev_is_sata(device);
845 int sata_idx = -1; 825 int sata_idx = -1;
826 unsigned long flags;
846 827
847 spin_lock(&hisi_hba->lock); 828 spin_lock_irqsave(&hisi_hba->lock, flags);
848 829
849 if (sata_dev) 830 if (sata_dev)
850 if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx)) 831 if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx))
@@ -874,7 +855,7 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
874 } 855 }
875 856
876out: 857out:
877 spin_unlock(&hisi_hba->lock); 858 spin_unlock_irqrestore(&hisi_hba->lock, flags);
878 859
879 return sas_dev; 860 return sas_dev;
880} 861}
@@ -2376,7 +2357,9 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2376 2357
2377 if (unlikely(aborted)) { 2358 if (unlikely(aborted)) {
2378 ts->stat = SAS_ABORTED_TASK; 2359 ts->stat = SAS_ABORTED_TASK;
2360 spin_lock_irqsave(&hisi_hba->lock, flags);
2379 hisi_sas_slot_task_free(hisi_hba, task, slot); 2361 hisi_sas_slot_task_free(hisi_hba, task, slot);
2362 spin_unlock_irqrestore(&hisi_hba->lock, flags);
2380 return -1; 2363 return -1;
2381 } 2364 }
2382 2365
@@ -2951,25 +2934,58 @@ static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p)
2951 return IRQ_HANDLED; 2934 return IRQ_HANDLED;
2952} 2935}
2953 2936
2954#define AXI_ERR_NR 8 2937static const struct hisi_sas_hw_error axi_error[] = {
2955static const char axi_err_info[AXI_ERR_NR][32] = { 2938 { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" },
2956 "IOST_AXI_W_ERR", 2939 { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" },
2957 "IOST_AXI_R_ERR", 2940 { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" },
2958 "ITCT_AXI_W_ERR", 2941 { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" },
2959 "ITCT_AXI_R_ERR", 2942 { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" },
2960 "SATA_AXI_W_ERR", 2943 { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
2961 "SATA_AXI_R_ERR", 2944 { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
2962 "DQE_AXI_R_ERR", 2945 { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
2963 "CQE_AXI_W_ERR" 2946 {},
2947};
2948
2949static const struct hisi_sas_hw_error fifo_error[] = {
2950 { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" },
2951 { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" },
2952 { .msk = BIT(10), .msg = "GETDQE_FIFO" },
2953 { .msk = BIT(11), .msg = "CMDP_FIFO" },
2954 { .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
2955 {},
2964}; 2956};
2965 2957
2966#define FIFO_ERR_NR 5 2958static const struct hisi_sas_hw_error fatal_axi_errors[] = {
2967static const char fifo_err_info[FIFO_ERR_NR][32] = { 2959 {
2968 "CQE_WINFO_FIFO", 2960 .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF),
2969 "CQE_MSG_FIFIO", 2961 .msg = "write pointer and depth",
2970 "GETDQE_FIFO", 2962 },
2971 "CMDP_FIFO", 2963 {
2972 "AWTCTRL_FIFO" 2964 .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF),
2965 .msg = "iptt no match slot",
2966 },
2967 {
2968 .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF),
2969 .msg = "read pointer and depth",
2970 },
2971 {
2972 .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF),
2973 .reg = HGC_AXI_FIFO_ERR_INFO,
2974 .sub = axi_error,
2975 },
2976 {
2977 .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF),
2978 .reg = HGC_AXI_FIFO_ERR_INFO,
2979 .sub = fifo_error,
2980 },
2981 {
2982 .irq_msk = BIT(ENT_INT_SRC3_LM_OFF),
2983 .msg = "LM add/fetch list",
2984 },
2985 {
2986 .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF),
2987 .msg = "SAS_HGC_ABT fetch LM list",
2988 },
2973}; 2989};
2974 2990
2975static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) 2991static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
@@ -2977,98 +2993,47 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
2977 struct hisi_hba *hisi_hba = p; 2993 struct hisi_hba *hisi_hba = p;
2978 u32 irq_value, irq_msk, err_value; 2994 u32 irq_value, irq_msk, err_value;
2979 struct device *dev = hisi_hba->dev; 2995 struct device *dev = hisi_hba->dev;
2996 const struct hisi_sas_hw_error *axi_error;
2997 int i;
2980 2998
2981 irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); 2999 irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
2982 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe); 3000 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe);
2983 3001
2984 irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 3002 irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
2985 if (irq_value) {
2986 if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) {
2987 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
2988 1 << ENT_INT_SRC3_WP_DEPTH_OFF);
2989 dev_warn(dev, "write pointer and depth error (0x%x) \
2990 found!\n",
2991 irq_value);
2992 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
2993 }
2994
2995 if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) {
2996 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
2997 1 <<
2998 ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF);
2999 dev_warn(dev, "iptt no match slot error (0x%x) found!\n",
3000 irq_value);
3001 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
3002 }
3003 3003
3004 if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF)) { 3004 for (i = 0; i < ARRAY_SIZE(fatal_axi_errors); i++) {
3005 dev_warn(dev, "read pointer and depth error (0x%x) \ 3005 axi_error = &fatal_axi_errors[i];
3006 found!\n", 3006 if (!(irq_value & axi_error->irq_msk))
3007 irq_value); 3007 continue;
3008 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
3009 }
3010
3011 if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) {
3012 int i;
3013
3014 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
3015 1 << ENT_INT_SRC3_AXI_OFF);
3016 err_value = hisi_sas_read32(hisi_hba,
3017 HGC_AXI_FIFO_ERR_INFO);
3018
3019 for (i = 0; i < AXI_ERR_NR; i++) {
3020 if (err_value & BIT(i)) {
3021 dev_warn(dev, "%s (0x%x) found!\n",
3022 axi_err_info[i], irq_value);
3023 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
3024 }
3025 }
3026 }
3027
3028 if (irq_value & BIT(ENT_INT_SRC3_FIFO_OFF)) {
3029 int i;
3030
3031 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
3032 1 << ENT_INT_SRC3_FIFO_OFF);
3033 err_value = hisi_sas_read32(hisi_hba,
3034 HGC_AXI_FIFO_ERR_INFO);
3035 3008
3036 for (i = 0; i < FIFO_ERR_NR; i++) { 3009 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
3037 if (err_value & BIT(AXI_ERR_NR + i)) { 3010 1 << axi_error->shift);
3038 dev_warn(dev, "%s (0x%x) found!\n", 3011 if (axi_error->sub) {
3039 fifo_err_info[i], irq_value); 3012 const struct hisi_sas_hw_error *sub = axi_error->sub;
3040 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3013
3041 } 3014 err_value = hisi_sas_read32(hisi_hba, axi_error->reg);
3015 for (; sub->msk || sub->msg; sub++) {
3016 if (!(err_value & sub->msk))
3017 continue;
3018 dev_warn(dev, "%s (0x%x) found!\n",
3019 sub->msg, irq_value);
3020 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
3042 } 3021 }
3043 3022 } else {
3044 } 3023 dev_warn(dev, "%s (0x%x) found!\n",
3045 3024 axi_error->msg, irq_value);
3046 if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) {
3047 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
3048 1 << ENT_INT_SRC3_LM_OFF);
3049 dev_warn(dev, "LM add/fetch list error (0x%x) found!\n",
3050 irq_value);
3051 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
3052 }
3053
3054 if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) {
3055 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
3056 1 << ENT_INT_SRC3_ABT_OFF);
3057 dev_warn(dev, "SAS_HGC_ABT fetch LM list error (0x%x) found!\n",
3058 irq_value);
3059 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 3025 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
3060 } 3026 }
3027 }
3061 3028
3062 if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) { 3029 if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
3063 u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); 3030 u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
3064 u32 dev_id = reg_val & ITCT_DEV_MSK; 3031 u32 dev_id = reg_val & ITCT_DEV_MSK;
3065 struct hisi_sas_device *sas_dev = 3032 struct hisi_sas_device *sas_dev = &hisi_hba->devices[dev_id];
3066 &hisi_hba->devices[dev_id];
3067 3033
3068 hisi_sas_write32(hisi_hba, ITCT_CLR, 0); 3034 hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
3069 dev_dbg(dev, "clear ITCT ok\n"); 3035 dev_dbg(dev, "clear ITCT ok\n");
3070 complete(sas_dev->completion); 3036 complete(sas_dev->completion);
3071 }
3072 } 3037 }
3073 3038
3074 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value); 3039 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value);
@@ -3408,6 +3373,7 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
3408 3373
3409 interrupt_disable_v2_hw(hisi_hba); 3374 interrupt_disable_v2_hw(hisi_hba);
3410 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); 3375 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
3376 hisi_sas_kill_tasklets(hisi_hba);
3411 3377
3412 hisi_sas_stop_phys(hisi_hba); 3378 hisi_sas_stop_phys(hisi_hba);
3413 3379
@@ -3458,7 +3424,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
3458 .start_delivery = start_delivery_v2_hw, 3424 .start_delivery = start_delivery_v2_hw,
3459 .slot_complete = slot_complete_v2_hw, 3425 .slot_complete = slot_complete_v2_hw,
3460 .phys_init = phys_init_v2_hw, 3426 .phys_init = phys_init_v2_hw,
3461 .phy_enable = enable_phy_v2_hw, 3427 .phy_start = start_phy_v2_hw,
3462 .phy_disable = disable_phy_v2_hw, 3428 .phy_disable = disable_phy_v2_hw,
3463 .phy_hard_reset = phy_hard_reset_v2_hw, 3429 .phy_hard_reset = phy_hard_reset_v2_hw,
3464 .get_events = phy_get_events_v2_hw, 3430 .get_events = phy_get_events_v2_hw,
@@ -3491,16 +3457,11 @@ static int hisi_sas_v2_remove(struct platform_device *pdev)
3491{ 3457{
3492 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 3458 struct sas_ha_struct *sha = platform_get_drvdata(pdev);
3493 struct hisi_hba *hisi_hba = sha->lldd_ha; 3459 struct hisi_hba *hisi_hba = sha->lldd_ha;
3494 int i;
3495 3460
3496 if (timer_pending(&hisi_hba->timer)) 3461 if (timer_pending(&hisi_hba->timer))
3497 del_timer(&hisi_hba->timer); 3462 del_timer(&hisi_hba->timer);
3498 3463
3499 for (i = 0; i < hisi_hba->queue_count; i++) { 3464 hisi_sas_kill_tasklets(hisi_hba);
3500 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
3501
3502 tasklet_kill(&cq->tasklet);
3503 }
3504 3465
3505 return hisi_sas_remove(pdev); 3466 return hisi_sas_remove(pdev);
3506} 3467}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 3f2f0baf2a5e..19b1f2ffec17 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -53,6 +53,11 @@
53#define HGC_IOMB_PROC1_STATUS 0x104 53#define HGC_IOMB_PROC1_STATUS 0x104
54#define CFG_1US_TIMER_TRSH 0xcc 54#define CFG_1US_TIMER_TRSH 0xcc
55#define CHNL_INT_STATUS 0x148 55#define CHNL_INT_STATUS 0x148
56#define HGC_AXI_FIFO_ERR_INFO 0x154
57#define AXI_ERR_INFO_OFF 0
58#define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
59#define FIFO_ERR_INFO_OFF 8
60#define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF)
56#define INT_COAL_EN 0x19c 61#define INT_COAL_EN 0x19c
57#define OQ_INT_COAL_TIME 0x1a0 62#define OQ_INT_COAL_TIME 0x1a0
58#define OQ_INT_COAL_CNT 0x1a4 63#define OQ_INT_COAL_CNT 0x1a4
@@ -135,6 +140,7 @@
135#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) 140#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
136#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) 141#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
137#define STP_LINK_TIMER (PORT_BASE + 0x120) 142#define STP_LINK_TIMER (PORT_BASE + 0x120)
143#define CON_CFG_DRIVER (PORT_BASE + 0x130)
138#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134) 144#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
139#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138) 145#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
140#define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c) 146#define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c)
@@ -154,6 +160,10 @@
154#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF) 160#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
155#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17 161#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
156#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF) 162#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
163#define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
164#define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
165#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
166#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
157#define CHL_INT2 (PORT_BASE + 0x1bc) 167#define CHL_INT2 (PORT_BASE + 0x1bc)
158#define CHL_INT0_MSK (PORT_BASE + 0x1c0) 168#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
159#define CHL_INT1_MSK (PORT_BASE + 0x1c4) 169#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
@@ -171,8 +181,11 @@
171#define DMA_RX_STATUS (PORT_BASE + 0x2e8) 181#define DMA_RX_STATUS (PORT_BASE + 0x2e8)
172#define DMA_RX_STATUS_BUSY_OFF 0 182#define DMA_RX_STATUS_BUSY_OFF 0
173#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF) 183#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
184#define ERR_CNT_DWS_LOST (PORT_BASE + 0x380)
185#define ERR_CNT_RESET_PROB (PORT_BASE + 0x384)
186#define ERR_CNT_INVLD_DW (PORT_BASE + 0x390)
187#define ERR_CNT_DISP_ERR (PORT_BASE + 0x398)
174 188
175#define MAX_ITCT_HW 4096 /* max the hw can support */
176#define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */ 189#define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */
177#if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW) 190#if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW)
178#error Max ITCT exceeded 191#error Max ITCT exceeded
@@ -377,6 +390,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
377 /* Global registers init */ 390 /* Global registers init */
378 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 391 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
379 (u32)((1ULL << hisi_hba->queue_count) - 1)); 392 (u32)((1ULL << hisi_hba->queue_count) - 1));
393 hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
380 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); 394 hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
381 hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd); 395 hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd);
382 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); 396 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
@@ -388,7 +402,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
388 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff); 402 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
389 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); 403 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
390 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); 404 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
391 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); 405 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
392 hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); 406 hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
393 hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0); 407 hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
394 hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0); 408 hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
@@ -407,7 +421,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
407 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); 421 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
408 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff); 422 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
409 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); 423 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
410 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); 424 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff);
411 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff); 425 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
412 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); 426 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
413 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); 427 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
@@ -422,6 +436,8 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
422 0xa03e8); 436 0xa03e8);
423 hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 437 hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER,
424 0x7f7a120); 438 0x7f7a120);
439 hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER,
440 0x2a0a80);
425 } 441 }
426 for (i = 0; i < hisi_hba->queue_count; i++) { 442 for (i = 0; i < hisi_hba->queue_count; i++) {
427 /* Delivery queue */ 443 /* Delivery queue */
@@ -575,35 +591,24 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
575static void free_device_v3_hw(struct hisi_hba *hisi_hba, 591static void free_device_v3_hw(struct hisi_hba *hisi_hba,
576 struct hisi_sas_device *sas_dev) 592 struct hisi_sas_device *sas_dev)
577{ 593{
594 DECLARE_COMPLETION_ONSTACK(completion);
578 u64 dev_id = sas_dev->device_id; 595 u64 dev_id = sas_dev->device_id;
579 struct device *dev = hisi_hba->dev;
580 struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; 596 struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
581 u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 597 u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
582 598
599 sas_dev->completion = &completion;
600
583 /* clear the itct interrupt state */ 601 /* clear the itct interrupt state */
584 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) 602 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
585 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 603 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
586 ENT_INT_SRC3_ITC_INT_MSK); 604 ENT_INT_SRC3_ITC_INT_MSK);
587 605
588 /* clear the itct table*/ 606 /* clear the itct table*/
589 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR); 607 reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
590 reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
591 hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); 608 hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
592 609
593 udelay(10); 610 wait_for_completion(sas_dev->completion);
594 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); 611 memset(itct, 0, sizeof(struct hisi_sas_itct));
595 if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) {
596 dev_dbg(dev, "got clear ITCT done interrupt\n");
597
598 /* invalid the itct state*/
599 memset(itct, 0, sizeof(struct hisi_sas_itct));
600 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
601 ENT_INT_SRC3_ITC_INT_MSK);
602
603 /* clear the itct */
604 hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
605 dev_dbg(dev, "clear ITCT ok\n");
606 }
607} 612}
608 613
609static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, 614static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
@@ -755,10 +760,12 @@ static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id)
755{ 760{
756 int i, bitmap = 0; 761 int i, bitmap = 0;
757 u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); 762 u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
763 u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
758 764
759 for (i = 0; i < hisi_hba->n_phy; i++) 765 for (i = 0; i < hisi_hba->n_phy; i++)
760 if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) 766 if (phy_state & BIT(i))
761 bitmap |= 1 << i; 767 if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
768 bitmap |= BIT(i);
762 769
763 return bitmap; 770 return bitmap;
764} 771}
@@ -988,20 +995,6 @@ err_out_req:
988 return rc; 995 return rc;
989} 996}
990 997
991static int get_ncq_tag_v3_hw(struct sas_task *task, u32 *tag)
992{
993 struct ata_queued_cmd *qc = task->uldd_task;
994
995 if (qc) {
996 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
997 qc->tf.command == ATA_CMD_FPDMA_READ) {
998 *tag = qc->tag;
999 return 1;
1000 }
1001 }
1002 return 0;
1003}
1004
1005static int prep_ata_v3_hw(struct hisi_hba *hisi_hba, 998static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
1006 struct hisi_sas_slot *slot) 999 struct hisi_sas_slot *slot)
1007{ 1000{
@@ -1050,7 +1043,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
1050 hdr->dw1 = cpu_to_le32(dw1); 1043 hdr->dw1 = cpu_to_le32(dw1);
1051 1044
1052 /* dw2 */ 1045 /* dw2 */
1053 if (task->ata_task.use_ncq && get_ncq_tag_v3_hw(task, &hdr_tag)) { 1046 if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) {
1054 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 1047 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
1055 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; 1048 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
1056 } 1049 }
@@ -1276,6 +1269,25 @@ static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p)
1276 return res; 1269 return res;
1277} 1270}
1278 1271
1272static const struct hisi_sas_hw_error port_axi_error[] = {
1273 {
1274 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
1275 .msg = "dma_tx_axi_wr_err",
1276 },
1277 {
1278 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF),
1279 .msg = "dma_tx_axi_rd_err",
1280 },
1281 {
1282 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF),
1283 .msg = "dma_rx_axi_wr_err",
1284 },
1285 {
1286 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
1287 .msg = "dma_rx_axi_rd_err",
1288 },
1289};
1290
1279static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) 1291static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
1280{ 1292{
1281 struct hisi_hba *hisi_hba = p; 1293 struct hisi_hba *hisi_hba = p;
@@ -1301,10 +1313,19 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
1301 1313
1302 if ((irq_msk & (4 << (phy_no * 4))) && 1314 if ((irq_msk & (4 << (phy_no * 4))) &&
1303 irq_value1) { 1315 irq_value1) {
1304 if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK | 1316 int i;
1305 CHL_INT1_DMAC_TX_ECC_ERR_MSK)) 1317
1306 panic("%s: DMAC RX/TX ecc bad error! (0x%x)", 1318 for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
1307 dev_name(dev), irq_value1); 1319 const struct hisi_sas_hw_error *error =
1320 &port_axi_error[i];
1321
1322 if (!(irq_value1 & error->irq_msk))
1323 continue;
1324
1325 dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
1326 error->msg, phy_no, irq_value1);
1327 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
1328 }
1308 1329
1309 hisi_sas_phy_write32(hisi_hba, phy_no, 1330 hisi_sas_phy_write32(hisi_hba, phy_no,
1310 CHL_INT1, irq_value1); 1331 CHL_INT1, irq_value1);
@@ -1331,6 +1352,114 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
1331 return IRQ_HANDLED; 1352 return IRQ_HANDLED;
1332} 1353}
1333 1354
1355static const struct hisi_sas_hw_error axi_error[] = {
1356 { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" },
1357 { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" },
1358 { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" },
1359 { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" },
1360 { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" },
1361 { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
1362 { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
1363 { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
1364 {},
1365};
1366
1367static const struct hisi_sas_hw_error fifo_error[] = {
1368 { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" },
1369 { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" },
1370 { .msk = BIT(10), .msg = "GETDQE_FIFO" },
1371 { .msk = BIT(11), .msg = "CMDP_FIFO" },
1372 { .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
1373 {},
1374};
1375
1376static const struct hisi_sas_hw_error fatal_axi_error[] = {
1377 {
1378 .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF),
1379 .msg = "write pointer and depth",
1380 },
1381 {
1382 .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF),
1383 .msg = "iptt no match slot",
1384 },
1385 {
1386 .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF),
1387 .msg = "read pointer and depth",
1388 },
1389 {
1390 .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF),
1391 .reg = HGC_AXI_FIFO_ERR_INFO,
1392 .sub = axi_error,
1393 },
1394 {
1395 .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF),
1396 .reg = HGC_AXI_FIFO_ERR_INFO,
1397 .sub = fifo_error,
1398 },
1399 {
1400 .irq_msk = BIT(ENT_INT_SRC3_LM_OFF),
1401 .msg = "LM add/fetch list",
1402 },
1403 {
1404 .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF),
1405 .msg = "SAS_HGC_ABT fetch LM list",
1406 },
1407};
1408
1409static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
1410{
1411 u32 irq_value, irq_msk;
1412 struct hisi_hba *hisi_hba = p;
1413 struct device *dev = hisi_hba->dev;
1414 int i;
1415
1416 irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
1417 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00);
1418
1419 irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
1420
1421 for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) {
1422 const struct hisi_sas_hw_error *error = &fatal_axi_error[i];
1423
1424 if (!(irq_value & error->irq_msk))
1425 continue;
1426
1427 if (error->sub) {
1428 const struct hisi_sas_hw_error *sub = error->sub;
1429 u32 err_value = hisi_sas_read32(hisi_hba, error->reg);
1430
1431 for (; sub->msk || sub->msg; sub++) {
1432 if (!(err_value & sub->msk))
1433 continue;
1434
1435 dev_warn(dev, "%s error (0x%x) found!\n",
1436 sub->msg, irq_value);
1437 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
1438 }
1439 } else {
1440 dev_warn(dev, "%s error (0x%x) found!\n",
1441 error->msg, irq_value);
1442 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
1443 }
1444 }
1445
1446 if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
1447 u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
1448 u32 dev_id = reg_val & ITCT_DEV_MSK;
1449 struct hisi_sas_device *sas_dev =
1450 &hisi_hba->devices[dev_id];
1451
1452 hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
1453 dev_dbg(dev, "clear ITCT ok\n");
1454 complete(sas_dev->completion);
1455 }
1456
1457 hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value & 0x1df00);
1458 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk);
1459
1460 return IRQ_HANDLED;
1461}
1462
1334static void 1463static void
1335slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, 1464slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
1336 struct hisi_sas_slot *slot) 1465 struct hisi_sas_slot *slot)
@@ -1414,7 +1543,9 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
1414 ts->resp = SAS_TASK_COMPLETE; 1543 ts->resp = SAS_TASK_COMPLETE;
1415 if (unlikely(aborted)) { 1544 if (unlikely(aborted)) {
1416 ts->stat = SAS_ABORTED_TASK; 1545 ts->stat = SAS_ABORTED_TASK;
1546 spin_lock_irqsave(&hisi_hba->lock, flags);
1417 hisi_sas_slot_task_free(hisi_hba, task, slot); 1547 hisi_sas_slot_task_free(hisi_hba, task, slot);
1548 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1418 return -1; 1549 return -1;
1419 } 1550 }
1420 1551
@@ -1629,6 +1760,15 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
1629 goto free_phy_irq; 1760 goto free_phy_irq;
1630 } 1761 }
1631 1762
1763 rc = devm_request_irq(dev, pci_irq_vector(pdev, 11),
1764 fatal_axi_int_v3_hw, 0,
1765 DRV_NAME " fatal", hisi_hba);
1766 if (rc) {
1767 dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc);
1768 rc = -ENOENT;
1769 goto free_chnl_interrupt;
1770 }
1771
1632 /* Init tasklets for cq only */ 1772 /* Init tasklets for cq only */
1633 for (i = 0; i < hisi_hba->queue_count; i++) { 1773 for (i = 0; i < hisi_hba->queue_count; i++) {
1634 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1774 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
@@ -1656,6 +1796,8 @@ free_cq_irqs:
1656 1796
1657 free_irq(pci_irq_vector(pdev, k+16), cq); 1797 free_irq(pci_irq_vector(pdev, k+16), cq);
1658 } 1798 }
1799 free_irq(pci_irq_vector(pdev, 11), hisi_hba);
1800free_chnl_interrupt:
1659 free_irq(pci_irq_vector(pdev, 2), hisi_hba); 1801 free_irq(pci_irq_vector(pdev, 2), hisi_hba);
1660free_phy_irq: 1802free_phy_irq:
1661 free_irq(pci_irq_vector(pdev, 1), hisi_hba); 1803 free_irq(pci_irq_vector(pdev, 1), hisi_hba);
@@ -1749,6 +1891,31 @@ static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba)
1749 return hisi_sas_read32(hisi_hba, PHY_STATE); 1891 return hisi_sas_read32(hisi_hba, PHY_STATE);
1750} 1892}
1751 1893
1894static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
1895{
1896 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1897 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1898 struct sas_phy *sphy = sas_phy->phy;
1899 u32 reg_value;
1900
1901 /* loss dword sync */
1902 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST);
1903 sphy->loss_of_dword_sync_count += reg_value;
1904
1905 /* phy reset problem */
1906 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB);
1907 sphy->phy_reset_problem_count += reg_value;
1908
1909 /* invalid dword */
1910 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW);
1911 sphy->invalid_dword_count += reg_value;
1912
1913 /* disparity err */
1914 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR);
1915 sphy->running_disparity_error_count += reg_value;
1916
1917}
1918
1752static int soft_reset_v3_hw(struct hisi_hba *hisi_hba) 1919static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
1753{ 1920{
1754 struct device *dev = hisi_hba->dev; 1921 struct device *dev = hisi_hba->dev;
@@ -1757,6 +1924,7 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
1757 1924
1758 interrupt_disable_v3_hw(hisi_hba); 1925 interrupt_disable_v3_hw(hisi_hba);
1759 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); 1926 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
1927 hisi_sas_kill_tasklets(hisi_hba);
1760 1928
1761 hisi_sas_stop_phys(hisi_hba); 1929 hisi_sas_stop_phys(hisi_hba);
1762 1930
@@ -1793,7 +1961,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
1793 .start_delivery = start_delivery_v3_hw, 1961 .start_delivery = start_delivery_v3_hw,
1794 .slot_complete = slot_complete_v3_hw, 1962 .slot_complete = slot_complete_v3_hw,
1795 .phys_init = phys_init_v3_hw, 1963 .phys_init = phys_init_v3_hw,
1796 .phy_enable = enable_phy_v3_hw, 1964 .phy_start = start_phy_v3_hw,
1797 .phy_disable = disable_phy_v3_hw, 1965 .phy_disable = disable_phy_v3_hw,
1798 .phy_hard_reset = phy_hard_reset_v3_hw, 1966 .phy_hard_reset = phy_hard_reset_v3_hw,
1799 .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw, 1967 .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw,
@@ -1801,6 +1969,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
1801 .dereg_device = dereg_device_v3_hw, 1969 .dereg_device = dereg_device_v3_hw,
1802 .soft_reset = soft_reset_v3_hw, 1970 .soft_reset = soft_reset_v3_hw,
1803 .get_phys_state = get_phys_state_v3_hw, 1971 .get_phys_state = get_phys_state_v3_hw,
1972 .get_events = phy_get_events_v3_hw,
1804}; 1973};
1805 1974
1806static struct Scsi_Host * 1975static struct Scsi_Host *
@@ -1817,6 +1986,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
1817 } 1986 }
1818 hisi_hba = shost_priv(shost); 1987 hisi_hba = shost_priv(shost);
1819 1988
1989 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
1820 hisi_hba->hw = &hisi_sas_v3_hw; 1990 hisi_hba->hw = &hisi_sas_v3_hw;
1821 hisi_hba->pci_dev = pdev; 1991 hisi_hba->pci_dev = pdev;
1822 hisi_hba->dev = dev; 1992 hisi_hba->dev = dev;
@@ -1960,11 +2130,11 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
1960 2130
1961 free_irq(pci_irq_vector(pdev, 1), hisi_hba); 2131 free_irq(pci_irq_vector(pdev, 1), hisi_hba);
1962 free_irq(pci_irq_vector(pdev, 2), hisi_hba); 2132 free_irq(pci_irq_vector(pdev, 2), hisi_hba);
2133 free_irq(pci_irq_vector(pdev, 11), hisi_hba);
1963 for (i = 0; i < hisi_hba->queue_count; i++) { 2134 for (i = 0; i < hisi_hba->queue_count; i++) {
1964 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2135 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1965 2136
1966 free_irq(pci_irq_vector(pdev, i+16), cq); 2137 free_irq(pci_irq_vector(pdev, i+16), cq);
1967 tasklet_kill(&cq->tasklet);
1968 } 2138 }
1969 pci_free_irq_vectors(pdev); 2139 pci_free_irq_vectors(pdev);
1970} 2140}
@@ -1980,6 +2150,7 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
1980 sas_remove_host(sha->core.shost); 2150 sas_remove_host(sha->core.shost);
1981 2151
1982 hisi_sas_v3_destroy_irqs(pdev, hisi_hba); 2152 hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
2153 hisi_sas_kill_tasklets(hisi_hba);
1983 pci_release_regions(pdev); 2154 pci_release_regions(pdev);
1984 pci_disable_device(pdev); 2155 pci_disable_device(pdev);
1985 hisi_sas_free(hisi_hba); 2156 hisi_sas_free(hisi_hba);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4ed3d26ffdde..287e5eb0723f 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -60,7 +60,7 @@
60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' 60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
61 * with an optional trailing '-' followed by a byte value (0-255). 61 * with an optional trailing '-' followed by a byte value (0-255).
62 */ 62 */
63#define HPSA_DRIVER_VERSION "3.4.20-0" 63#define HPSA_DRIVER_VERSION "3.4.20-125"
64#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 64#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
65#define HPSA "hpsa" 65#define HPSA "hpsa"
66 66
@@ -787,7 +787,12 @@ static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
787 } 787 }
788 offload_enabled = hdev->offload_enabled; 788 offload_enabled = hdev->offload_enabled;
789 spin_unlock_irqrestore(&h->lock, flags); 789 spin_unlock_irqrestore(&h->lock, flags);
790 return snprintf(buf, 20, "%d\n", offload_enabled); 790
791 if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
792 return snprintf(buf, 20, "%d\n", offload_enabled);
793 else
794 return snprintf(buf, 40, "%s\n",
795 "Not applicable for a controller");
791} 796}
792 797
793#define MAX_PATHS 8 798#define MAX_PATHS 8
@@ -1270,7 +1275,7 @@ static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1270 dev->model, 1275 dev->model,
1271 label, 1276 label,
1272 dev->offload_config ? '+' : '-', 1277 dev->offload_config ? '+' : '-',
1273 dev->offload_enabled ? '+' : '-', 1278 dev->offload_to_be_enabled ? '+' : '-',
1274 dev->expose_device); 1279 dev->expose_device);
1275} 1280}
1276 1281
@@ -1345,36 +1350,42 @@ lun_assigned:
1345 (*nadded)++; 1350 (*nadded)++;
1346 hpsa_show_dev_msg(KERN_INFO, h, device, 1351 hpsa_show_dev_msg(KERN_INFO, h, device,
1347 device->expose_device ? "added" : "masked"); 1352 device->expose_device ? "added" : "masked");
1348 device->offload_to_be_enabled = device->offload_enabled;
1349 device->offload_enabled = 0;
1350 return 0; 1353 return 0;
1351} 1354}
1352 1355
1353/* Update an entry in h->dev[] array. */ 1356/*
1357 * Called during a scan operation.
1358 *
1359 * Update an entry in h->dev[] array.
1360 */
1354static void hpsa_scsi_update_entry(struct ctlr_info *h, 1361static void hpsa_scsi_update_entry(struct ctlr_info *h,
1355 int entry, struct hpsa_scsi_dev_t *new_entry) 1362 int entry, struct hpsa_scsi_dev_t *new_entry)
1356{ 1363{
1357 int offload_enabled;
1358 /* assumes h->devlock is held */ 1364 /* assumes h->devlock is held */
1359 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); 1365 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1360 1366
1361 /* Raid level changed. */ 1367 /* Raid level changed. */
1362 h->dev[entry]->raid_level = new_entry->raid_level; 1368 h->dev[entry]->raid_level = new_entry->raid_level;
1363 1369
1370 /*
1371 * ioacccel_handle may have changed for a dual domain disk
1372 */
1373 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1374
1364 /* Raid offload parameters changed. Careful about the ordering. */ 1375 /* Raid offload parameters changed. Careful about the ordering. */
1365 if (new_entry->offload_config && new_entry->offload_enabled) { 1376 if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
1366 /* 1377 /*
1367 * if drive is newly offload_enabled, we want to copy the 1378 * if drive is newly offload_enabled, we want to copy the
1368 * raid map data first. If previously offload_enabled and 1379 * raid map data first. If previously offload_enabled and
1369 * offload_config were set, raid map data had better be 1380 * offload_config were set, raid map data had better be
1370 * the same as it was before. if raid map data is changed 1381 * the same as it was before. If raid map data has changed
1371 * then it had better be the case that 1382 * then it had better be the case that
1372 * h->dev[entry]->offload_enabled is currently 0. 1383 * h->dev[entry]->offload_enabled is currently 0.
1373 */ 1384 */
1374 h->dev[entry]->raid_map = new_entry->raid_map; 1385 h->dev[entry]->raid_map = new_entry->raid_map;
1375 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; 1386 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1376 } 1387 }
1377 if (new_entry->hba_ioaccel_enabled) { 1388 if (new_entry->offload_to_be_enabled) {
1378 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; 1389 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1379 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */ 1390 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1380 } 1391 }
@@ -1385,17 +1396,18 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h,
1385 1396
1386 /* 1397 /*
1387 * We can turn off ioaccel offload now, but need to delay turning 1398 * We can turn off ioaccel offload now, but need to delay turning
1388 * it on until we can update h->dev[entry]->phys_disk[], but we 1399 * ioaccel on until we can update h->dev[entry]->phys_disk[], but we
1389 * can't do that until all the devices are updated. 1400 * can't do that until all the devices are updated.
1390 */ 1401 */
1391 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled; 1402 h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
1392 if (!new_entry->offload_enabled) 1403
1404 /*
1405 * turn ioaccel off immediately if told to do so.
1406 */
1407 if (!new_entry->offload_to_be_enabled)
1393 h->dev[entry]->offload_enabled = 0; 1408 h->dev[entry]->offload_enabled = 0;
1394 1409
1395 offload_enabled = h->dev[entry]->offload_enabled;
1396 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
1397 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated"); 1410 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1398 h->dev[entry]->offload_enabled = offload_enabled;
1399} 1411}
1400 1412
1401/* Replace an entry from h->dev[] array. */ 1413/* Replace an entry from h->dev[] array. */
@@ -1421,9 +1433,8 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1421 h->dev[entry] = new_entry; 1433 h->dev[entry] = new_entry;
1422 added[*nadded] = new_entry; 1434 added[*nadded] = new_entry;
1423 (*nadded)++; 1435 (*nadded)++;
1436
1424 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced"); 1437 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1425 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1426 new_entry->offload_enabled = 0;
1427} 1438}
1428 1439
1429/* Remove an entry from h->dev[] array. */ 1440/* Remove an entry from h->dev[] array. */
@@ -1513,11 +1524,22 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1513 return 1; 1524 return 1;
1514 if (dev1->offload_config != dev2->offload_config) 1525 if (dev1->offload_config != dev2->offload_config)
1515 return 1; 1526 return 1;
1516 if (dev1->offload_enabled != dev2->offload_enabled) 1527 if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
1517 return 1; 1528 return 1;
1518 if (!is_logical_dev_addr_mode(dev1->scsi3addr)) 1529 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1519 if (dev1->queue_depth != dev2->queue_depth) 1530 if (dev1->queue_depth != dev2->queue_depth)
1520 return 1; 1531 return 1;
1532 /*
1533 * This can happen for dual domain devices. An active
1534 * path change causes the ioaccel handle to change
1535 *
1536 * for example note the handle differences between p0 and p1
1537 * Device WWN ,WWN hash,Handle
1538 * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003
1539 * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004
1540 */
1541 if (dev1->ioaccel_handle != dev2->ioaccel_handle)
1542 return 1;
1521 return 0; 1543 return 0;
1522} 1544}
1523 1545
@@ -1727,6 +1749,11 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1727 * be 0, but we'll turn it off here just in case 1749 * be 0, but we'll turn it off here just in case
1728 */ 1750 */
1729 if (!logical_drive->phys_disk[i]) { 1751 if (!logical_drive->phys_disk[i]) {
1752 dev_warn(&h->pdev->dev,
1753 "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
1754 __func__,
1755 h->scsi_host->host_no, logical_drive->bus,
1756 logical_drive->target, logical_drive->lun);
1730 logical_drive->offload_enabled = 0; 1757 logical_drive->offload_enabled = 0;
1731 logical_drive->offload_to_be_enabled = 0; 1758 logical_drive->offload_to_be_enabled = 0;
1732 logical_drive->queue_depth = 8; 1759 logical_drive->queue_depth = 8;
@@ -1738,8 +1765,12 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1738 * way too high for partial stripe writes 1765 * way too high for partial stripe writes
1739 */ 1766 */
1740 logical_drive->queue_depth = qdepth; 1767 logical_drive->queue_depth = qdepth;
1741 else 1768 else {
1742 logical_drive->queue_depth = h->nr_cmds; 1769 if (logical_drive->external)
1770 logical_drive->queue_depth = EXTERNAL_QD;
1771 else
1772 logical_drive->queue_depth = h->nr_cmds;
1773 }
1743} 1774}
1744 1775
1745static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, 1776static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
@@ -1759,13 +1790,24 @@ static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1759 /* 1790 /*
1760 * If offload is currently enabled, the RAID map and 1791 * If offload is currently enabled, the RAID map and
1761 * phys_disk[] assignment *better* not be changing 1792 * phys_disk[] assignment *better* not be changing
1762 * and since it isn't changing, we do not need to 1793 * because we would be changing ioaccel phsy_disk[] pointers
1763 * update it. 1794 * on a ioaccel volume processing I/O requests.
1795 *
1796 * If an ioaccel volume status changed, initially because it was
1797 * re-configured and thus underwent a transformation, or
1798 * a drive failed, we would have received a state change
1799 * request and ioaccel should have been turned off. When the
1800 * transformation completes, we get another state change
1801 * request to turn ioaccel back on. In this case, we need
1802 * to update the ioaccel information.
1803 *
1804 * Thus: If it is not currently enabled, but will be after
1805 * the scan completes, make sure the ioaccel pointers
1806 * are up to date.
1764 */ 1807 */
1765 if (dev[i]->offload_enabled)
1766 continue;
1767 1808
1768 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); 1809 if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
1810 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1769 } 1811 }
1770} 1812}
1771 1813
@@ -1823,11 +1865,13 @@ static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1823 break; 1865 break;
1824 if (++waits > 20) 1866 if (++waits > 20)
1825 break; 1867 break;
1868 msleep(1000);
1869 }
1870
1871 if (waits > 20)
1826 dev_warn(&h->pdev->dev, 1872 dev_warn(&h->pdev->dev,
1827 "%s: removing device with %d outstanding commands!\n", 1873 "%s: removing device with %d outstanding commands!\n",
1828 __func__, cmds); 1874 __func__, cmds);
1829 msleep(1000);
1830 }
1831} 1875}
1832 1876
1833static void hpsa_remove_device(struct ctlr_info *h, 1877static void hpsa_remove_device(struct ctlr_info *h,
@@ -1838,6 +1882,12 @@ static void hpsa_remove_device(struct ctlr_info *h,
1838 if (!h->scsi_host) 1882 if (!h->scsi_host)
1839 return; 1883 return;
1840 1884
1885 /*
1886 * Allow for commands to drain
1887 */
1888 device->removed = 1;
1889 hpsa_wait_for_outstanding_commands_for_dev(h, device);
1890
1841 if (is_logical_device(device)) { /* RAID */ 1891 if (is_logical_device(device)) { /* RAID */
1842 sdev = scsi_device_lookup(h->scsi_host, device->bus, 1892 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1843 device->target, device->lun); 1893 device->target, device->lun);
@@ -1855,9 +1905,6 @@ static void hpsa_remove_device(struct ctlr_info *h,
1855 } 1905 }
1856 } else { /* HBA */ 1906 } else { /* HBA */
1857 1907
1858 device->removed = 1;
1859 hpsa_wait_for_outstanding_commands_for_dev(h, device);
1860
1861 hpsa_remove_sas_device(device); 1908 hpsa_remove_sas_device(device);
1862 } 1909 }
1863} 1910}
@@ -1965,8 +2012,13 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1965 } 2012 }
1966 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices); 2013 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1967 2014
1968 /* Now that h->dev[]->phys_disk[] is coherent, we can enable 2015 /*
2016 * Now that h->dev[]->phys_disk[] is coherent, we can enable
1969 * any logical drives that need it enabled. 2017 * any logical drives that need it enabled.
2018 *
2019 * The raid map should be current by now.
2020 *
2021 * We are updating the device list used for I/O requests.
1970 */ 2022 */
1971 for (i = 0; i < h->ndevices; i++) { 2023 for (i = 0; i < h->ndevices; i++) {
1972 if (h->dev[i] == NULL) 2024 if (h->dev[i] == NULL)
@@ -2441,7 +2493,7 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
2441 2493
2442 /* 2494 /*
2443 * Any RAID offload error results in retry which will use 2495 * Any RAID offload error results in retry which will use
2444 * the normal I/O path so the controller can handle whatever's 2496 * the normal I/O path so the controller can handle whatever is
2445 * wrong. 2497 * wrong.
2446 */ 2498 */
2447 if (is_logical_device(dev) && 2499 if (is_logical_device(dev) &&
@@ -2913,6 +2965,57 @@ static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2913 } 2965 }
2914} 2966}
2915 2967
2968static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
2969 u8 page, u8 *buf, size_t bufsize)
2970{
2971 int rc = IO_OK;
2972 struct CommandList *c;
2973 struct ErrorInfo *ei;
2974
2975 c = cmd_alloc(h);
2976 if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
2977 page, scsi3addr, TYPE_CMD)) {
2978 rc = -1;
2979 goto out;
2980 }
2981 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2982 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2983 if (rc)
2984 goto out;
2985 ei = c->err_info;
2986 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2987 hpsa_scsi_interpret_error(h, c);
2988 rc = -1;
2989 }
2990out:
2991 cmd_free(h, c);
2992 return rc;
2993}
2994
2995static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
2996 u8 *scsi3addr)
2997{
2998 u8 *buf;
2999 u64 sa = 0;
3000 int rc = 0;
3001
3002 buf = kzalloc(1024, GFP_KERNEL);
3003 if (!buf)
3004 return 0;
3005
3006 rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
3007 buf, 1024);
3008
3009 if (rc)
3010 goto out;
3011
3012 sa = get_unaligned_be64(buf+12);
3013
3014out:
3015 kfree(buf);
3016 return sa;
3017}
3018
2916static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, 3019static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2917 u16 page, unsigned char *buf, 3020 u16 page, unsigned char *buf,
2918 unsigned char bufsize) 3021 unsigned char bufsize)
@@ -2929,7 +3032,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2929 goto out; 3032 goto out;
2930 } 3033 }
2931 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3034 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2932 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); 3035 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2933 if (rc) 3036 if (rc)
2934 goto out; 3037 goto out;
2935 ei = c->err_info; 3038 ei = c->err_info;
@@ -3213,7 +3316,7 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
3213 return -1; 3316 return -1;
3214 } 3317 }
3215 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3318 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3216 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); 3319 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3217 if (rc) 3320 if (rc)
3218 goto out; 3321 goto out;
3219 ei = c->err_info; 3322 ei = c->err_info;
@@ -3256,7 +3359,7 @@ static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3256 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; 3359 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3257 3360
3258 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3361 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3259 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); 3362 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3260 if (rc) 3363 if (rc)
3261 goto out; 3364 goto out;
3262 ei = c->err_info; 3365 ei = c->err_info;
@@ -3284,7 +3387,7 @@ static int hpsa_bmic_id_controller(struct ctlr_info *h,
3284 goto out; 3387 goto out;
3285 3388
3286 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3389 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3287 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); 3390 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3288 if (rc) 3391 if (rc)
3289 goto out; 3392 goto out;
3290 ei = c->err_info; 3393 ei = c->err_info;
@@ -3315,7 +3418,7 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3315 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; 3418 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3316 3419
3317 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, 3420 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3318 DEFAULT_TIMEOUT); 3421 NO_TIMEOUT);
3319 ei = c->err_info; 3422 ei = c->err_info;
3320 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { 3423 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3321 hpsa_scsi_interpret_error(h, c); 3424 hpsa_scsi_interpret_error(h, c);
@@ -3348,6 +3451,9 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
3348 3451
3349 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); 3452 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3350 3453
3454 encl_dev->sas_address =
3455 hpsa_get_enclosure_logical_identifier(h, scsi3addr);
3456
3351 if (encl_dev->target == -1 || encl_dev->lun == -1) { 3457 if (encl_dev->target == -1 || encl_dev->lun == -1) {
3352 rc = IO_OK; 3458 rc = IO_OK;
3353 goto out; 3459 goto out;
@@ -3388,7 +3494,7 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
3388 c->Request.CDB[5] = 0; 3494 c->Request.CDB[5] = 0;
3389 3495
3390 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, 3496 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3391 DEFAULT_TIMEOUT); 3497 NO_TIMEOUT);
3392 if (rc) 3498 if (rc)
3393 goto out; 3499 goto out;
3394 3500
@@ -3472,6 +3578,30 @@ static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3472 dev->sas_address = sa; 3578 dev->sas_address = sa;
3473} 3579}
3474 3580
3581static void hpsa_ext_ctrl_present(struct ctlr_info *h,
3582 struct ReportExtendedLUNdata *physdev)
3583{
3584 u32 nphysicals;
3585 int i;
3586
3587 if (h->discovery_polling)
3588 return;
3589
3590 nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
3591
3592 for (i = 0; i < nphysicals; i++) {
3593 if (physdev->LUN[i].device_type ==
3594 BMIC_DEVICE_TYPE_CONTROLLER
3595 && !is_hba_lunid(physdev->LUN[i].lunid)) {
3596 dev_info(&h->pdev->dev,
3597 "External controller present, activate discovery polling and disable rld caching\n");
3598 hpsa_disable_rld_caching(h);
3599 h->discovery_polling = 1;
3600 break;
3601 }
3602 }
3603}
3604
3475/* Get a device id from inquiry page 0x83 */ 3605/* Get a device id from inquiry page 0x83 */
3476static bool hpsa_vpd_page_supported(struct ctlr_info *h, 3606static bool hpsa_vpd_page_supported(struct ctlr_info *h,
3477 unsigned char scsi3addr[], u8 page) 3607 unsigned char scsi3addr[], u8 page)
@@ -3516,6 +3646,13 @@ exit_supported:
3516 return true; 3646 return true;
3517} 3647}
3518 3648
3649/*
3650 * Called during a scan operation.
3651 * Sets ioaccel status on the new device list, not the existing device list
3652 *
3653 * The device list used during I/O will be updated later in
3654 * adjust_hpsa_scsi_table.
3655 */
3519static void hpsa_get_ioaccel_status(struct ctlr_info *h, 3656static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3520 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) 3657 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3521{ 3658{
@@ -3544,12 +3681,12 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3544 this_device->offload_config = 3681 this_device->offload_config =
3545 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); 3682 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3546 if (this_device->offload_config) { 3683 if (this_device->offload_config) {
3547 this_device->offload_enabled = 3684 this_device->offload_to_be_enabled =
3548 !!(ioaccel_status & OFFLOAD_ENABLED_BIT); 3685 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3549 if (hpsa_get_raid_map(h, scsi3addr, this_device)) 3686 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3550 this_device->offload_enabled = 0; 3687 this_device->offload_to_be_enabled = 0;
3551 } 3688 }
3552 this_device->offload_to_be_enabled = this_device->offload_enabled; 3689
3553out: 3690out:
3554 kfree(buf); 3691 kfree(buf);
3555 return; 3692 return;
@@ -3604,7 +3741,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3604 if (extended_response) 3741 if (extended_response)
3605 c->Request.CDB[1] = extended_response; 3742 c->Request.CDB[1] = extended_response;
3606 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 3743 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3607 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); 3744 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3608 if (rc) 3745 if (rc)
3609 goto out; 3746 goto out;
3610 ei = c->err_info; 3747 ei = c->err_info;
@@ -3739,7 +3876,7 @@ static unsigned char hpsa_volume_offline(struct ctlr_info *h,
3739 3876
3740 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); 3877 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3741 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, 3878 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3742 DEFAULT_TIMEOUT); 3879 NO_TIMEOUT);
3743 if (rc) { 3880 if (rc) {
3744 cmd_free(h, c); 3881 cmd_free(h, c);
3745 return HPSA_VPD_LV_STATUS_UNSUPPORTED; 3882 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
@@ -4228,6 +4365,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
4228 */ 4365 */
4229 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1; 4366 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
4230 4367
4368 hpsa_ext_ctrl_present(h, physdev_list);
4369
4231 /* Allocate the per device structures */ 4370 /* Allocate the per device structures */
4232 for (i = 0; i < ndevs_to_allocate; i++) { 4371 for (i = 0; i < ndevs_to_allocate; i++) {
4233 if (i >= HPSA_MAX_DEVICES) { 4372 if (i >= HPSA_MAX_DEVICES) {
@@ -4258,6 +4397,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
4258 int phys_dev_index = i - (raid_ctlr_position == 0); 4397 int phys_dev_index = i - (raid_ctlr_position == 0);
4259 bool skip_device = false; 4398 bool skip_device = false;
4260 4399
4400 memset(tmpdevice, 0, sizeof(*tmpdevice));
4401
4261 physical_device = i < nphysicals + (raid_ctlr_position == 0); 4402 physical_device = i < nphysicals + (raid_ctlr_position == 0);
4262 4403
4263 /* Figure out where the LUN ID info is coming from */ 4404 /* Figure out where the LUN ID info is coming from */
@@ -4279,7 +4420,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
4279 continue; 4420 continue;
4280 } 4421 }
4281 4422
4282 /* Get device type, vendor, model, device id */ 4423 /* Get device type, vendor, model, device id, raid_map */
4283 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice, 4424 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4284 &is_OBDR); 4425 &is_OBDR);
4285 if (rc == -ENOMEM) { 4426 if (rc == -ENOMEM) {
@@ -4296,18 +4437,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
4296 figure_bus_target_lun(h, lunaddrbytes, tmpdevice); 4437 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4297 this_device = currentsd[ncurrent]; 4438 this_device = currentsd[ncurrent];
4298 4439
4299 /* Turn on discovery_polling if there are ext target devices.
4300 * Event-based change notification is unreliable for those.
4301 */
4302 if (!h->discovery_polling) {
4303 if (tmpdevice->external) {
4304 h->discovery_polling = 1;
4305 dev_info(&h->pdev->dev,
4306 "External target, activate discovery polling.\n");
4307 }
4308 }
4309
4310
4311 *this_device = *tmpdevice; 4440 *this_device = *tmpdevice;
4312 this_device->physical_device = physical_device; 4441 this_device->physical_device = physical_device;
4313 4442
@@ -6496,6 +6625,17 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6496 c->Request.CDB[0] = HPSA_INQUIRY; 6625 c->Request.CDB[0] = HPSA_INQUIRY;
6497 c->Request.CDB[4] = size & 0xFF; 6626 c->Request.CDB[4] = size & 0xFF;
6498 break; 6627 break;
6628 case RECEIVE_DIAGNOSTIC:
6629 c->Request.CDBLen = 6;
6630 c->Request.type_attr_dir =
6631 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6632 c->Request.Timeout = 0;
6633 c->Request.CDB[0] = cmd;
6634 c->Request.CDB[1] = 1;
6635 c->Request.CDB[2] = 1;
6636 c->Request.CDB[3] = (size >> 8) & 0xFF;
6637 c->Request.CDB[4] = size & 0xFF;
6638 break;
6499 case HPSA_REPORT_LOG: 6639 case HPSA_REPORT_LOG:
6500 case HPSA_REPORT_PHYS: 6640 case HPSA_REPORT_PHYS:
6501 /* Talking to controller so It's a physical command 6641 /* Talking to controller so It's a physical command
@@ -8007,6 +8147,10 @@ static void controller_lockup_detected(struct ctlr_info *h)
8007 spin_unlock_irqrestore(&h->lock, flags); 8147 spin_unlock_irqrestore(&h->lock, flags);
8008 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n", 8148 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8009 lockup_detected, h->heartbeat_sample_interval / HZ); 8149 lockup_detected, h->heartbeat_sample_interval / HZ);
8150 if (lockup_detected == 0xffff0000) {
8151 dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
8152 writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
8153 }
8010 pci_disable_device(h->pdev); 8154 pci_disable_device(h->pdev);
8011 fail_all_outstanding_cmds(h); 8155 fail_all_outstanding_cmds(h);
8012} 8156}
@@ -8047,9 +8191,79 @@ static int detect_controller_lockup(struct ctlr_info *h)
8047 return false; 8191 return false;
8048} 8192}
8049 8193
8050static void hpsa_ack_ctlr_events(struct ctlr_info *h) 8194/*
8195 * Set ioaccel status for all ioaccel volumes.
8196 *
8197 * Called from monitor controller worker (hpsa_event_monitor_worker)
8198 *
8199 * A Volume (or Volumes that comprise an Array set may be undergoing a
8200 * transformation, so we will be turning off ioaccel for all volumes that
8201 * make up the Array.
8202 */
8203static void hpsa_set_ioaccel_status(struct ctlr_info *h)
8051{ 8204{
8205 int rc;
8052 int i; 8206 int i;
8207 u8 ioaccel_status;
8208 unsigned char *buf;
8209 struct hpsa_scsi_dev_t *device;
8210
8211 if (!h)
8212 return;
8213
8214 buf = kmalloc(64, GFP_KERNEL);
8215 if (!buf)
8216 return;
8217
8218 /*
8219 * Run through current device list used during I/O requests.
8220 */
8221 for (i = 0; i < h->ndevices; i++) {
8222 device = h->dev[i];
8223
8224 if (!device)
8225 continue;
8226 if (!device->scsi3addr)
8227 continue;
8228 if (!hpsa_vpd_page_supported(h, device->scsi3addr,
8229 HPSA_VPD_LV_IOACCEL_STATUS))
8230 continue;
8231
8232 memset(buf, 0, 64);
8233
8234 rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
8235 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
8236 buf, 64);
8237 if (rc != 0)
8238 continue;
8239
8240 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
8241 device->offload_config =
8242 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
8243 if (device->offload_config)
8244 device->offload_to_be_enabled =
8245 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
8246
8247 /*
8248 * Immediately turn off ioaccel for any volume the
8249 * controller tells us to. Some of the reasons could be:
8250 * transformation - change to the LVs of an Array.
8251 * degraded volume - component failure
8252 *
8253 * If ioaccel is to be re-enabled, re-enable later during the
8254 * scan operation so the driver can get a fresh raidmap
8255 * before turning ioaccel back on.
8256 *
8257 */
8258 if (!device->offload_to_be_enabled)
8259 device->offload_enabled = 0;
8260 }
8261
8262 kfree(buf);
8263}
8264
8265static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8266{
8053 char *event_type; 8267 char *event_type;
8054 8268
8055 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) 8269 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
@@ -8067,10 +8281,7 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8067 event_type = "configuration change"; 8281 event_type = "configuration change";
8068 /* Stop sending new RAID offload reqs via the IO accelerator */ 8282 /* Stop sending new RAID offload reqs via the IO accelerator */
8069 scsi_block_requests(h->scsi_host); 8283 scsi_block_requests(h->scsi_host);
8070 for (i = 0; i < h->ndevices; i++) { 8284 hpsa_set_ioaccel_status(h);
8071 h->dev[i]->offload_enabled = 0;
8072 h->dev[i]->offload_to_be_enabled = 0;
8073 }
8074 hpsa_drain_accel_commands(h); 8285 hpsa_drain_accel_commands(h);
8075 /* Set 'accelerator path config change' bit */ 8286 /* Set 'accelerator path config change' bit */
8076 dev_warn(&h->pdev->dev, 8287 dev_warn(&h->pdev->dev,
@@ -8087,10 +8298,6 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8087 writel(h->events, &(h->cfgtable->clear_event_notify)); 8298 writel(h->events, &(h->cfgtable->clear_event_notify));
8088 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); 8299 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8089 hpsa_wait_for_clear_event_notify_ack(h); 8300 hpsa_wait_for_clear_event_notify_ack(h);
8090#if 0
8091 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8092 hpsa_wait_for_mode_change_ack(h);
8093#endif
8094 } 8301 }
8095 return; 8302 return;
8096} 8303}
@@ -8241,7 +8448,6 @@ static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8241 if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) { 8448 if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
8242 hpsa_perform_rescan(h); 8449 hpsa_perform_rescan(h);
8243 } else if (h->discovery_polling) { 8450 } else if (h->discovery_polling) {
8244 hpsa_disable_rld_caching(h);
8245 if (hpsa_luns_changed(h)) { 8451 if (hpsa_luns_changed(h)) {
8246 dev_info(&h->pdev->dev, 8452 dev_info(&h->pdev->dev,
8247 "driver discovery polling rescan.\n"); 8453 "driver discovery polling rescan.\n");
@@ -8601,7 +8807,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
8601 goto errout; 8807 goto errout;
8602 8808
8603 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 8809 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8604 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); 8810 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
8605 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 8811 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8606 goto errout; 8812 goto errout;
8607 8813
@@ -8613,7 +8819,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
8613 goto errout; 8819 goto errout;
8614 8820
8615 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 8821 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8616 PCI_DMA_TODEVICE, DEFAULT_TIMEOUT); 8822 PCI_DMA_TODEVICE, NO_TIMEOUT);
8617 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 8823 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8618 goto errout; 8824 goto errout;
8619 8825
@@ -8623,7 +8829,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
8623 goto errout; 8829 goto errout;
8624 8830
8625 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, 8831 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8626 PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); 8832 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
8627 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 8833 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8628 goto errout; 8834 goto errout;
8629 8835
@@ -8684,6 +8890,8 @@ static void hpsa_remove_one(struct pci_dev *pdev)
8684 destroy_workqueue(h->rescan_ctlr_wq); 8890 destroy_workqueue(h->rescan_ctlr_wq);
8685 destroy_workqueue(h->resubmit_wq); 8891 destroy_workqueue(h->resubmit_wq);
8686 8892
8893 hpsa_delete_sas_host(h);
8894
8687 /* 8895 /*
8688 * Call before disabling interrupts. 8896 * Call before disabling interrupts.
8689 * scsi_remove_host can trigger I/O operations especially 8897 * scsi_remove_host can trigger I/O operations especially
@@ -8718,8 +8926,6 @@ static void hpsa_remove_one(struct pci_dev *pdev)
8718 h->lockup_detected = NULL; /* init_one 2 */ 8926 h->lockup_detected = NULL; /* init_one 2 */
8719 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */ 8927 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
8720 8928
8721 hpsa_delete_sas_host(h);
8722
8723 kfree(h); /* init_one 1 */ 8929 kfree(h); /* init_one 1 */
8724} 8930}
8725 8931
@@ -9207,9 +9413,9 @@ static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9207 struct sas_phy *phy = hpsa_sas_phy->phy; 9413 struct sas_phy *phy = hpsa_sas_phy->phy;
9208 9414
9209 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy); 9415 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9210 sas_phy_free(phy);
9211 if (hpsa_sas_phy->added_to_port) 9416 if (hpsa_sas_phy->added_to_port)
9212 list_del(&hpsa_sas_phy->phy_list_entry); 9417 list_del(&hpsa_sas_phy->phy_list_entry);
9418 sas_phy_delete(phy);
9213 kfree(hpsa_sas_phy); 9419 kfree(hpsa_sas_phy);
9214} 9420}
9215 9421
@@ -9367,7 +9573,7 @@ static int hpsa_add_sas_host(struct ctlr_info *h)
9367 struct hpsa_sas_port *hpsa_sas_port; 9573 struct hpsa_sas_port *hpsa_sas_port;
9368 struct hpsa_sas_phy *hpsa_sas_phy; 9574 struct hpsa_sas_phy *hpsa_sas_phy;
9369 9575
9370 parent_dev = &h->scsi_host->shost_gendev; 9576 parent_dev = &h->scsi_host->shost_dev;
9371 9577
9372 hpsa_sas_node = hpsa_alloc_sas_node(parent_dev); 9578 hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9373 if (!hpsa_sas_node) 9579 if (!hpsa_sas_node)
@@ -9458,7 +9664,7 @@ hpsa_sas_get_linkerrors(struct sas_phy *phy)
9458static int 9664static int
9459hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) 9665hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9460{ 9666{
9461 *identifier = 0; 9667 *identifier = rphy->identify.sas_address;
9462 return 0; 9668 return 0;
9463} 9669}
9464 9670
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 078afe448115..21a726e2eec6 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -142,6 +142,7 @@
142#define DOORBELL_CTLR_RESET 0x00000004l 142#define DOORBELL_CTLR_RESET 0x00000004l
143#define DOORBELL_CTLR_RESET2 0x00000020l 143#define DOORBELL_CTLR_RESET2 0x00000020l
144#define DOORBELL_CLEAR_EVENTS 0x00000040l 144#define DOORBELL_CLEAR_EVENTS 0x00000040l
145#define DOORBELL_GENERATE_CHKPT 0x00000080l
145 146
146#define CFGTBL_Trans_Simple 0x00000002l 147#define CFGTBL_Trans_Simple 0x00000002l
147#define CFGTBL_Trans_Performant 0x00000004l 148#define CFGTBL_Trans_Performant 0x00000004l
@@ -779,6 +780,8 @@ struct bmic_identify_physical_device {
779 u8 phys_bay_in_box; /* phys drv bay this drive resides */ 780 u8 phys_bay_in_box; /* phys drv bay this drive resides */
780 __le32 rpm; /* Drive rotational speed in rpm */ 781 __le32 rpm; /* Drive rotational speed in rpm */
781 u8 device_type; /* type of drive */ 782 u8 device_type; /* type of drive */
783#define BMIC_DEVICE_TYPE_CONTROLLER 0x07
784
782 u8 sata_version; /* only valid when drive_type is SATA */ 785 u8 sata_version; /* only valid when drive_type is SATA */
783 __le64 big_total_block_count; 786 __le64 big_total_block_count;
784 __le64 ris_starting_lba; 787 __le64 ris_starting_lba;
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 2fd0ec651170..5da46052e179 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -2083,7 +2083,6 @@ int fc_lport_bsg_request(struct bsg_job *job)
2083{ 2083{
2084 struct fc_bsg_request *bsg_request = job->request; 2084 struct fc_bsg_request *bsg_request = job->request;
2085 struct fc_bsg_reply *bsg_reply = job->reply; 2085 struct fc_bsg_reply *bsg_reply = job->reply;
2086 struct request *rsp = job->req->next_rq;
2087 struct Scsi_Host *shost = fc_bsg_to_shost(job); 2086 struct Scsi_Host *shost = fc_bsg_to_shost(job);
2088 struct fc_lport *lport = shost_priv(shost); 2087 struct fc_lport *lport = shost_priv(shost);
2089 struct fc_rport *rport; 2088 struct fc_rport *rport;
@@ -2092,8 +2091,6 @@ int fc_lport_bsg_request(struct bsg_job *job)
2092 u32 did, tov; 2091 u32 did, tov;
2093 2092
2094 bsg_reply->reply_payload_rcv_len = 0; 2093 bsg_reply->reply_payload_rcv_len = 0;
2095 if (rsp)
2096 scsi_req(rsp)->resid_len = job->reply_payload.payload_len;
2097 2094
2098 mutex_lock(&lport->lp_mutex); 2095 mutex_lock(&lport->lp_mutex);
2099 2096
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
index cd6f99c1ae7e..7e5d262e7a7d 100644
--- a/drivers/scsi/libsas/sas_dump.c
+++ b/drivers/scsi/libsas/sas_dump.c
@@ -24,10 +24,6 @@
24 24
25#include "sas_dump.h" 25#include "sas_dump.h"
26 26
27static const char *sas_hae_str[] = {
28 [0] = "HAE_RESET",
29};
30
31static const char *sas_porte_str[] = { 27static const char *sas_porte_str[] = {
32 [0] = "PORTE_BYTES_DMAED", 28 [0] = "PORTE_BYTES_DMAED",
33 [1] = "PORTE_BROADCAST_RCVD", 29 [1] = "PORTE_BROADCAST_RCVD",
@@ -53,12 +49,6 @@ void sas_dprint_phye(int phyid, enum phy_event pe)
53 SAS_DPRINTK("phy%d: phy event: %s\n", phyid, sas_phye_str[pe]); 49 SAS_DPRINTK("phy%d: phy event: %s\n", phyid, sas_phye_str[pe]);
54} 50}
55 51
56void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he)
57{
58 SAS_DPRINTK("ha %s: %s event\n", dev_name(sas_ha->dev),
59 sas_hae_str[he]);
60}
61
62void sas_dump_port(struct asd_sas_port *port) 52void sas_dump_port(struct asd_sas_port *port)
63{ 53{
64 SAS_DPRINTK("port%d: class:0x%x\n", port->id, port->class); 54 SAS_DPRINTK("port%d: class:0x%x\n", port->id, port->class);
diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h
index 800e4c69093f..6aaee6b0fcdb 100644
--- a/drivers/scsi/libsas/sas_dump.h
+++ b/drivers/scsi/libsas/sas_dump.h
@@ -26,5 +26,4 @@
26 26
27void sas_dprint_porte(int phyid, enum port_event pe); 27void sas_dprint_porte(int phyid, enum port_event pe);
28void sas_dprint_phye(int phyid, enum phy_event pe); 28void sas_dprint_phye(int phyid, enum phy_event pe);
29void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he);
30void sas_dump_port(struct asd_sas_port *port); 29void sas_dump_port(struct asd_sas_port *port);
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index c0d0d979b76d..0bb9eefc08c8 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -37,7 +37,7 @@ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
37 if (test_bit(SAS_HA_DRAINING, &ha->state)) { 37 if (test_bit(SAS_HA_DRAINING, &ha->state)) {
38 /* add it to the defer list, if not already pending */ 38 /* add it to the defer list, if not already pending */
39 if (list_empty(&sw->drain_node)) 39 if (list_empty(&sw->drain_node))
40 list_add(&sw->drain_node, &ha->defer_q); 40 list_add_tail(&sw->drain_node, &ha->defer_q);
41 } else 41 } else
42 rc = scsi_queue_work(ha->core.shost, &sw->work); 42 rc = scsi_queue_work(ha->core.shost, &sw->work);
43 43
@@ -124,15 +124,7 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
124 mutex_unlock(&ha->disco_mutex); 124 mutex_unlock(&ha->disco_mutex);
125} 125}
126 126
127static int notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event) 127static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
128{
129 BUG_ON(event >= HA_NUM_EVENTS);
130
131 return sas_queue_event(event, &sas_ha->pending,
132 &sas_ha->ha_events[event].work, sas_ha);
133}
134
135static int notify_port_event(struct asd_sas_phy *phy, enum port_event event)
136{ 128{
137 struct sas_ha_struct *ha = phy->ha; 129 struct sas_ha_struct *ha = phy->ha;
138 130
@@ -154,19 +146,7 @@ int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
154 146
155int sas_init_events(struct sas_ha_struct *sas_ha) 147int sas_init_events(struct sas_ha_struct *sas_ha)
156{ 148{
157 static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = { 149 sas_ha->notify_port_event = sas_notify_port_event;
158 [HAE_RESET] = sas_hae_reset,
159 };
160
161 int i;
162
163 for (i = 0; i < HA_NUM_EVENTS; i++) {
164 INIT_SAS_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
165 sas_ha->ha_events[i].ha = sas_ha;
166 }
167
168 sas_ha->notify_ha_event = notify_ha_event;
169 sas_ha->notify_port_event = notify_port_event;
170 sas_ha->notify_phy_event = sas_notify_phy_event; 150 sas_ha->notify_phy_event = sas_notify_phy_event;
171 151
172 return 0; 152 return 0;
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 681fcb837354..64fa6f53cb8b 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -107,17 +107,6 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
107 hashed[2] = r & 0xFF; 107 hashed[2] = r & 0xFF;
108} 108}
109 109
110
111/* ---------- HA events ---------- */
112
113void sas_hae_reset(struct work_struct *work)
114{
115 struct sas_ha_event *ev = to_sas_ha_event(work);
116 struct sas_ha_struct *ha = ev->ha;
117
118 clear_bit(HAE_RESET, &ha->pending);
119}
120
121int sas_register_ha(struct sas_ha_struct *sas_ha) 110int sas_register_ha(struct sas_ha_struct *sas_ha)
122{ 111{
123 int error = 0; 112 int error = 0;
@@ -155,7 +144,6 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
155 INIT_LIST_HEAD(&sas_ha->eh_ata_q); 144 INIT_LIST_HEAD(&sas_ha->eh_ata_q);
156 145
157 return 0; 146 return 0;
158
159Undo_ports: 147Undo_ports:
160 sas_unregister_ports(sas_ha); 148 sas_unregister_ports(sas_ha);
161Undo_phys: 149Undo_phys:
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 8eb3f96fe068..231302273257 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -23,6 +23,7 @@
23 23
24#include <scsi/scsi_host.h> 24#include <scsi/scsi_host.h>
25#include <linux/ktime.h> 25#include <linux/ktime.h>
26#include <linux/workqueue.h>
26 27
27#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS) 28#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
28#define CONFIG_SCSI_LPFC_DEBUG_FS 29#define CONFIG_SCSI_LPFC_DEBUG_FS
@@ -653,6 +654,8 @@ struct lpfc_hba {
653 /* SLI4 specific HBA data structure */ 654 /* SLI4 specific HBA data structure */
654 struct lpfc_sli4_hba sli4_hba; 655 struct lpfc_sli4_hba sli4_hba;
655 656
657 struct workqueue_struct *wq;
658
656 struct lpfc_sli sli; 659 struct lpfc_sli sli;
657 uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */ 660 uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
658 uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */ 661 uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 3e02bc3a7c3f..82f6e219ee34 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3134,7 +3134,8 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3134 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; 3134 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3135 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); 3135 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3136 3136
3137 return snprintf(buf, PAGE_SIZE, "%d\n", pring->txq_max); 3137 return snprintf(buf, PAGE_SIZE, "%d\n",
3138 pring ? pring->txq_max : 0);
3138} 3139}
3139 3140
3140static DEVICE_ATTR(txq_hw, S_IRUGO, 3141static DEVICE_ATTR(txq_hw, S_IRUGO,
@@ -3147,7 +3148,8 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
3147 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; 3148 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3148 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); 3149 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3149 3150
3150 return snprintf(buf, PAGE_SIZE, "%d\n", pring->txcmplq_max); 3151 return snprintf(buf, PAGE_SIZE, "%d\n",
3152 pring ? pring->txcmplq_max : 0);
3151} 3153}
3152 3154
3153static DEVICE_ATTR(txcmplq_hw, S_IRUGO, 3155static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
@@ -3380,7 +3382,7 @@ LPFC_ATTR_R(nvmet_mrq,
3380 */ 3382 */
3381LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP, 3383LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
3382 LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, 3384 LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
3383 "Define fc4 type to register with fabric."); 3385 "Enable FC4 Protocol support - FCP / NVME");
3384 3386
3385/* 3387/*
3386 * lpfc_xri_split: Defines the division of XRI resources between SCSI and NVME 3388 * lpfc_xri_split: Defines the division of XRI resources between SCSI and NVME
@@ -3396,7 +3398,7 @@ LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
3396 * percentage will go to NVME. 3398 * percentage will go to NVME.
3397 */ 3399 */
3398LPFC_ATTR_R(xri_split, 50, 10, 90, 3400LPFC_ATTR_R(xri_split, 50, 10, 90,
3399 "Division of XRI resources between SCSI and NVME"); 3401 "Percentage of FCP XRI resources versus NVME");
3400 3402
3401/* 3403/*
3402# lpfc_log_verbose: Only turn this flag on if you are willing to risk being 3404# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index fe9e1c079c20..d89816222b23 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2911,7 +2911,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2911 } 2911 }
2912 } 2912 }
2913 2913
2914 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { 2914 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) {
2915 ret_val = -ENOMEM; 2915 ret_val = -ENOMEM;
2916 goto err_post_rxbufs_exit; 2916 goto err_post_rxbufs_exit;
2917 } 2917 }
@@ -5421,6 +5421,8 @@ lpfc_bsg_timeout(struct bsg_job *job)
5421 struct lpfc_iocbq *check_iocb, *next_iocb; 5421 struct lpfc_iocbq *check_iocb, *next_iocb;
5422 5422
5423 pring = lpfc_phba_elsring(phba); 5423 pring = lpfc_phba_elsring(phba);
5424 if (unlikely(!pring))
5425 return -EIO;
5424 5426
5425 /* if job's driver data is NULL, the command completed or is in the 5427 /* if job's driver data is NULL, the command completed or is in the
5426 * the process of completing. In this case, return status to request 5428 * the process of completing. In this case, return status to request
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index d50c481ec41c..2bf5ad3b1512 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2227,7 +2227,7 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
2227 kfree(phba->nvmeio_trc); 2227 kfree(phba->nvmeio_trc);
2228 2228
2229 /* Allocate new trace buffer and initialize */ 2229 /* Allocate new trace buffer and initialize */
2230 phba->nvmeio_trc = kmalloc((sizeof(struct lpfc_debugfs_nvmeio_trc) * 2230 phba->nvmeio_trc = kzalloc((sizeof(struct lpfc_debugfs_nvmeio_trc) *
2231 sz), GFP_KERNEL); 2231 sz), GFP_KERNEL);
2232 if (!phba->nvmeio_trc) { 2232 if (!phba->nvmeio_trc) {
2233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -2235,8 +2235,6 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
2235 "nvmeio_trc buffer\n"); 2235 "nvmeio_trc buffer\n");
2236 return -ENOMEM; 2236 return -ENOMEM;
2237 } 2237 }
2238 memset(phba->nvmeio_trc, 0,
2239 (sizeof(struct lpfc_debugfs_nvmeio_trc) * sz));
2240 atomic_set(&phba->nvmeio_trc_cnt, 0); 2238 atomic_set(&phba->nvmeio_trc_cnt, 0);
2241 phba->nvmeio_trc_on = 0; 2239 phba->nvmeio_trc_on = 0;
2242 phba->nvmeio_trc_output_idx = 0; 2240 phba->nvmeio_trc_output_idx = 0;
@@ -5457,7 +5455,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
5457 phba->nvmeio_trc_size = lpfc_debugfs_max_nvmeio_trc; 5455 phba->nvmeio_trc_size = lpfc_debugfs_max_nvmeio_trc;
5458 5456
5459 /* Allocate trace buffer and initialize */ 5457 /* Allocate trace buffer and initialize */
5460 phba->nvmeio_trc = kmalloc( 5458 phba->nvmeio_trc = kzalloc(
5461 (sizeof(struct lpfc_debugfs_nvmeio_trc) * 5459 (sizeof(struct lpfc_debugfs_nvmeio_trc) *
5462 phba->nvmeio_trc_size), GFP_KERNEL); 5460 phba->nvmeio_trc_size), GFP_KERNEL);
5463 5461
@@ -5467,9 +5465,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
5467 "nvmeio_trc buffer\n"); 5465 "nvmeio_trc buffer\n");
5468 goto nvmeio_off; 5466 goto nvmeio_off;
5469 } 5467 }
5470 memset(phba->nvmeio_trc, 0,
5471 (sizeof(struct lpfc_debugfs_nvmeio_trc) *
5472 phba->nvmeio_trc_size));
5473 phba->nvmeio_trc_on = 1; 5468 phba->nvmeio_trc_on = 1;
5474 phba->nvmeio_trc_output_idx = 0; 5469 phba->nvmeio_trc_output_idx = 0;
5475 phba->nvmeio_trc = NULL; 5470 phba->nvmeio_trc = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 0dd6c21433fe..39d5b146202e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -5394,10 +5394,6 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
5394 (len + pcmd), vport, ndlp); 5394 (len + pcmd), vport, ndlp);
5395 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 5395 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
5396 &rdp_context->link_stat); 5396 &rdp_context->link_stat);
5397 /* Check if nport is logged, BZ190632 */
5398 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
5399 goto lpfc_skip_descriptor;
5400
5401 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 5397 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd),
5402 &rdp_context->link_stat, vport); 5398 &rdp_context->link_stat, vport);
5403 len += lpfc_rdp_res_oed_temp_desc(phba, 5399 len += lpfc_rdp_res_oed_temp_desc(phba,
@@ -5418,7 +5414,6 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
5418 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 5414 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd),
5419 rdp_context->page_a0, vport); 5415 rdp_context->page_a0, vport);
5420 5416
5421lpfc_skip_descriptor:
5422 rdp_res->length = cpu_to_be32(len - 8); 5417 rdp_res->length = cpu_to_be32(len - 8);
5423 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5418 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5424 5419
@@ -5540,7 +5535,6 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5540 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5535 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5541 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 5536 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
5542 5537
5543
5544 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5538 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5545 "2422 ELS RDP Request " 5539 "2422 ELS RDP Request "
5546 "dec len %d tag x%x port_id %d len %d\n", 5540 "dec len %d tag x%x port_id %d len %d\n",
@@ -5549,12 +5543,6 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5549 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 5543 be32_to_cpu(rdp_req->nport_id_desc.nport_id),
5550 be32_to_cpu(rdp_req->nport_id_desc.length)); 5544 be32_to_cpu(rdp_req->nport_id_desc.length));
5551 5545
5552 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
5553 !phba->cfg_enable_SmartSAN) {
5554 rjt_err = LSRJT_UNABLE_TPC;
5555 rjt_expl = LSEXP_PORT_LOGIN_REQ;
5556 goto error;
5557 }
5558 if (sizeof(struct fc_rdp_nport_desc) != 5546 if (sizeof(struct fc_rdp_nport_desc) !=
5559 be32_to_cpu(rdp_req->rdp_des_length)) 5547 be32_to_cpu(rdp_req->rdp_des_length))
5560 goto rjt_logerr; 5548 goto rjt_logerr;
@@ -7430,6 +7418,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
7430 timeout = (uint32_t)(phba->fc_ratov << 1); 7418 timeout = (uint32_t)(phba->fc_ratov << 1);
7431 7419
7432 pring = lpfc_phba_elsring(phba); 7420 pring = lpfc_phba_elsring(phba);
7421 if (unlikely(!pring))
7422 return;
7433 7423
7434 if ((phba->pport->load_flag & FC_UNLOADING)) 7424 if ((phba->pport->load_flag & FC_UNLOADING))
7435 return; 7425 return;
@@ -9310,6 +9300,9 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
9310 9300
9311 pring = lpfc_phba_elsring(phba); 9301 pring = lpfc_phba_elsring(phba);
9312 9302
9303 if (unlikely(!pring))
9304 return;
9305
9313 spin_lock_irq(&phba->hbalock); 9306 spin_lock_irq(&phba->hbalock);
9314 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 9307 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
9315 list) { 9308 list) {
@@ -9416,7 +9409,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
9416 rxid, 1); 9409 rxid, 1);
9417 9410
9418 /* Check if TXQ queue needs to be serviced */ 9411 /* Check if TXQ queue needs to be serviced */
9419 if (!(list_empty(&pring->txq))) 9412 if (pring && !list_empty(&pring->txq))
9420 lpfc_worker_wake_up(phba); 9413 lpfc_worker_wake_up(phba);
9421 return; 9414 return;
9422 } 9415 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 8d491084eb5d..2bafde2b7cfe 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -3324,7 +3324,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3324 3324
3325 /* Unblock ELS traffic */ 3325 /* Unblock ELS traffic */
3326 pring = lpfc_phba_elsring(phba); 3326 pring = lpfc_phba_elsring(phba);
3327 pring->flag &= ~LPFC_STOP_IOCB_EVENT; 3327 if (pring)
3328 pring->flag &= ~LPFC_STOP_IOCB_EVENT;
3328 3329
3329 /* Check for error */ 3330 /* Check for error */
3330 if (mb->mbxStatus) { 3331 if (mb->mbxStatus) {
@@ -4981,7 +4982,8 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4981 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4982 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4982 if ((ndlp->nlp_flag & NLP_DEFER_RM) && 4983 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
4983 !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) && 4984 !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
4984 !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { 4985 !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
4986 phba->sli_rev != LPFC_SLI_REV4) {
4985 /* For this case we need to cleanup the default rpi 4987 /* For this case we need to cleanup the default rpi
4986 * allocated by the firmware. 4988 * allocated by the firmware.
4987 */ 4989 */
@@ -5429,6 +5431,8 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5429 5431
5430 psli = &phba->sli; 5432 psli = &phba->sli;
5431 pring = lpfc_phba_elsring(phba); 5433 pring = lpfc_phba_elsring(phba);
5434 if (unlikely(!pring))
5435 return;
5432 5436
5433 /* Error matching iocb on txq or txcmplq 5437 /* Error matching iocb on txq or txcmplq
5434 * First check the txq. 5438 * First check the txq.
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1db0a38683f4..2b145966c73f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -3636,7 +3636,7 @@ struct lpfc_mbx_get_port_name {
3636#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 3636#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
3637#define MB_CQE_STATUS_DMA_FAILED 0x5 3637#define MB_CQE_STATUS_DMA_FAILED 0x5
3638 3638
3639#define LPFC_MBX_WR_CONFIG_MAX_BDE 8 3639#define LPFC_MBX_WR_CONFIG_MAX_BDE 1
3640struct lpfc_mbx_wr_object { 3640struct lpfc_mbx_wr_object {
3641 struct mbox_header header; 3641 struct mbox_header header;
3642 union { 3642 union {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6a1e28ba9258..2b7ea7e53e12 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3216,6 +3216,9 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3216 lpfc_destroy_vport_work_array(phba, vports); 3216 lpfc_destroy_vport_work_array(phba, vports);
3217 3217
3218 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3218 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3219
3220 if (phba->wq)
3221 flush_workqueue(phba->wq);
3219} 3222}
3220 3223
3221/** 3224/**
@@ -4173,6 +4176,9 @@ void
4173lpfc_stop_port(struct lpfc_hba *phba) 4176lpfc_stop_port(struct lpfc_hba *phba)
4174{ 4177{
4175 phba->lpfc_stop_port(phba); 4178 phba->lpfc_stop_port(phba);
4179
4180 if (phba->wq)
4181 flush_workqueue(phba->wq);
4176} 4182}
4177 4183
4178/** 4184/**
@@ -6363,6 +6369,9 @@ lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
6363 return error; 6369 return error;
6364 } 6370 }
6365 6371
6372 /* workqueue for deferred irq use */
6373 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6374
6366 return 0; 6375 return 0;
6367} 6376}
6368 6377
@@ -6377,6 +6386,12 @@ lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
6377static void 6386static void
6378lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 6387lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
6379{ 6388{
6389 if (phba->wq) {
6390 flush_workqueue(phba->wq);
6391 destroy_workqueue(phba->wq);
6392 phba->wq = NULL;
6393 }
6394
6380 /* Stop kernel worker thread */ 6395 /* Stop kernel worker thread */
6381 kthread_stop(phba->worker_thread); 6396 kthread_stop(phba->worker_thread);
6382} 6397}
@@ -11397,6 +11412,13 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
11397 /* Remove FC host and then SCSI host with the physical port */ 11412 /* Remove FC host and then SCSI host with the physical port */
11398 fc_remove_host(shost); 11413 fc_remove_host(shost);
11399 scsi_remove_host(shost); 11414 scsi_remove_host(shost);
11415 /*
11416 * Bring down the SLI Layer. This step disables all interrupts,
11417 * clears the rings, discards all mailbox commands, and resets
11418 * the HBA FCoE function.
11419 */
11420 lpfc_debugfs_terminate(vport);
11421 lpfc_sli4_hba_unset(phba);
11400 11422
11401 /* Perform ndlp cleanup on the physical port. The nvme and nvmet 11423 /* Perform ndlp cleanup on the physical port. The nvme and nvmet
11402 * localports are destroyed after to cleanup all transport memory. 11424 * localports are destroyed after to cleanup all transport memory.
@@ -11405,14 +11427,8 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
11405 lpfc_nvmet_destroy_targetport(phba); 11427 lpfc_nvmet_destroy_targetport(phba);
11406 lpfc_nvme_destroy_localport(vport); 11428 lpfc_nvme_destroy_localport(vport);
11407 11429
11408 /*
11409 * Bring down the SLI Layer. This step disables all interrupts,
11410 * clears the rings, discards all mailbox commands, and resets
11411 * the HBA FCoE function.
11412 */
11413 lpfc_debugfs_terminate(vport);
11414 lpfc_sli4_hba_unset(phba);
11415 11430
11431 lpfc_stop_hba_timers(phba);
11416 spin_lock_irq(&phba->hbalock); 11432 spin_lock_irq(&phba->hbalock);
11417 list_del_init(&vport->listentry); 11433 list_del_init(&vport->listentry);
11418 spin_unlock_irq(&phba->hbalock); 11434 spin_unlock_irq(&phba->hbalock);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index f3ad7cac355d..b6957d944b9a 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -216,7 +216,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
216 pring = lpfc_phba_elsring(phba); 216 pring = lpfc_phba_elsring(phba);
217 217
218 /* In case of error recovery path, we might have a NULL pring here */ 218 /* In case of error recovery path, we might have a NULL pring here */
219 if (!pring) 219 if (unlikely(!pring))
220 return; 220 return;
221 221
222 /* Abort outstanding I/O on NPort <nlp_DID> */ 222 /* Abort outstanding I/O on NPort <nlp_DID> */
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 23bdb1ca106e..517ae570e507 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -416,6 +416,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
416 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 416 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
417 vport = lport->vport; 417 vport = lport->vport;
418 418
419 if (vport->load_flag & FC_UNLOADING)
420 return -ENODEV;
421
419 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); 422 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
420 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 423 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
421 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, 424 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
@@ -667,15 +670,17 @@ lpfc_nvme_ktime(struct lpfc_hba *phba,
667 struct lpfc_nvme_buf *lpfc_ncmd) 670 struct lpfc_nvme_buf *lpfc_ncmd)
668{ 671{
669 uint64_t seg1, seg2, seg3, seg4; 672 uint64_t seg1, seg2, seg3, seg4;
673 uint64_t segsum;
670 674
671 if (!phba->ktime_on)
672 return;
673 if (!lpfc_ncmd->ts_last_cmd || 675 if (!lpfc_ncmd->ts_last_cmd ||
674 !lpfc_ncmd->ts_cmd_start || 676 !lpfc_ncmd->ts_cmd_start ||
675 !lpfc_ncmd->ts_cmd_wqput || 677 !lpfc_ncmd->ts_cmd_wqput ||
676 !lpfc_ncmd->ts_isr_cmpl || 678 !lpfc_ncmd->ts_isr_cmpl ||
677 !lpfc_ncmd->ts_data_nvme) 679 !lpfc_ncmd->ts_data_nvme)
678 return; 680 return;
681
682 if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
683 return;
679 if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd) 684 if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
680 return; 685 return;
681 if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start) 686 if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
@@ -695,15 +700,23 @@ lpfc_nvme_ktime(struct lpfc_hba *phba,
695 * cmpl is handled off to the NVME Layer. 700 * cmpl is handled off to the NVME Layer.
696 */ 701 */
697 seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd; 702 seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
698 if (seg1 > 5000000) /* 5 ms - for sequential IOs */ 703 if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
699 return; 704 seg1 = 0;
700 705
701 /* Calculate times relative to start of IO */ 706 /* Calculate times relative to start of IO */
702 seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start); 707 seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
703 seg3 = (lpfc_ncmd->ts_isr_cmpl - 708 segsum = seg2;
704 lpfc_ncmd->ts_cmd_start) - seg2; 709 seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
705 seg4 = (lpfc_ncmd->ts_data_nvme - 710 if (segsum > seg3)
706 lpfc_ncmd->ts_cmd_start) - seg2 - seg3; 711 return;
712 seg3 -= segsum;
713 segsum += seg3;
714
715 seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
716 if (segsum > seg4)
717 return;
718 seg4 -= segsum;
719
707 phba->ktime_data_samples++; 720 phba->ktime_data_samples++;
708 phba->ktime_seg1_total += seg1; 721 phba->ktime_seg1_total += seg1;
709 if (seg1 < phba->ktime_seg1_min) 722 if (seg1 < phba->ktime_seg1_min)
@@ -840,7 +853,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
840 } else { 853 } else {
841 lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) & 854 lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
842 LPFC_IOCB_STATUS_MASK); 855 LPFC_IOCB_STATUS_MASK);
843 lpfc_ncmd->result = wcqe->parameter; 856 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
844 857
845 /* For NVME, the only failure path that results in an 858 /* For NVME, the only failure path that results in an
846 * IO error is when the adapter rejects it. All other 859 * IO error is when the adapter rejects it. All other
@@ -874,9 +887,20 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
874 lpfc_ncmd->status, lpfc_ncmd->result, 887 lpfc_ncmd->status, lpfc_ncmd->result,
875 wcqe->total_data_placed); 888 wcqe->total_data_placed);
876 break; 889 break;
890 case IOSTAT_LOCAL_REJECT:
891 /* Let fall through to set command final state. */
892 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
893 lpfc_printf_vlog(vport, KERN_INFO,
894 LOG_NVME_IOERR,
895 "6032 Delay Aborted cmd %p "
896 "nvme cmd %p, xri x%x, "
897 "xb %d\n",
898 lpfc_ncmd, nCmd,
899 lpfc_ncmd->cur_iocbq.sli4_xritag,
900 bf_get(lpfc_wcqe_c_xb, wcqe));
877 default: 901 default:
878out_err: 902out_err:
879 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, 903 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
880 "6072 NVME Completion Error: xri %x " 904 "6072 NVME Completion Error: xri %x "
881 "status x%x result x%x placed x%x\n", 905 "status x%x result x%x placed x%x\n",
882 lpfc_ncmd->cur_iocbq.sli4_xritag, 906 lpfc_ncmd->cur_iocbq.sli4_xritag,
@@ -902,7 +926,7 @@ out_err:
902 * owns the dma address. 926 * owns the dma address.
903 */ 927 */
904#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 928#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
905 if (phba->ktime_on) { 929 if (lpfc_ncmd->ts_cmd_start) {
906 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp; 930 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
907 lpfc_ncmd->ts_data_nvme = ktime_get_ns(); 931 lpfc_ncmd->ts_data_nvme = ktime_get_ns();
908 phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme; 932 phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
@@ -920,12 +944,18 @@ out_err:
920#endif 944#endif
921 freqpriv = nCmd->private; 945 freqpriv = nCmd->private;
922 freqpriv->nvme_buf = NULL; 946 freqpriv->nvme_buf = NULL;
923 nCmd->done(nCmd); 947
948 /* NVME targets need completion held off until the abort exchange
949 * completes.
950 */
951 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY))
952 nCmd->done(nCmd);
924 953
925 spin_lock_irqsave(&phba->hbalock, flags); 954 spin_lock_irqsave(&phba->hbalock, flags);
926 lpfc_ncmd->nrport = NULL; 955 lpfc_ncmd->nrport = NULL;
927 spin_unlock_irqrestore(&phba->hbalock, flags); 956 spin_unlock_irqrestore(&phba->hbalock, flags);
928 957
958 /* Call release with XB=1 to queue the IO into the abort list. */
929 lpfc_release_nvme_buf(phba, lpfc_ncmd); 959 lpfc_release_nvme_buf(phba, lpfc_ncmd);
930} 960}
931 961
@@ -1119,12 +1149,12 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1119 1149
1120 first_data_sgl = sgl; 1150 first_data_sgl = sgl;
1121 lpfc_ncmd->seg_cnt = nCmd->sg_cnt; 1151 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1122 if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt) { 1152 if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt + 1) {
1123 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1153 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1124 "6058 Too many sg segments from " 1154 "6058 Too many sg segments from "
1125 "NVME Transport. Max %d, " 1155 "NVME Transport. Max %d, "
1126 "nvmeIO sg_cnt %d\n", 1156 "nvmeIO sg_cnt %d\n",
1127 phba->cfg_nvme_seg_cnt, 1157 phba->cfg_nvme_seg_cnt + 1,
1128 lpfc_ncmd->seg_cnt); 1158 lpfc_ncmd->seg_cnt);
1129 lpfc_ncmd->seg_cnt = 0; 1159 lpfc_ncmd->seg_cnt = 0;
1130 return 1; 1160 return 1;
@@ -1225,6 +1255,21 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1225 vport = lport->vport; 1255 vport = lport->vport;
1226 phba = vport->phba; 1256 phba = vport->phba;
1227 1257
1258 if (vport->load_flag & FC_UNLOADING) {
1259 ret = -ENODEV;
1260 goto out_fail;
1261 }
1262
1263 /* Validate pointers. */
1264 if (!pnvme_lport || !pnvme_rport || !freqpriv) {
1265 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR | LOG_NODE,
1266 "6117 No Send:IO submit ptrs NULL, lport %p, "
1267 "rport %p fcreq_priv %p\n",
1268 pnvme_lport, pnvme_rport, freqpriv);
1269 ret = -ENODEV;
1270 goto out_fail;
1271 }
1272
1228#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1273#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1229 if (phba->ktime_on) 1274 if (phba->ktime_on)
1230 start = ktime_get_ns(); 1275 start = ktime_get_ns();
@@ -1283,9 +1328,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1283 goto out_fail; 1328 goto out_fail;
1284 } 1329 }
1285#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1330#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1286 if (phba->ktime_on) { 1331 if (start) {
1287 lpfc_ncmd->ts_cmd_start = start; 1332 lpfc_ncmd->ts_cmd_start = start;
1288 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd; 1333 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1334 } else {
1335 lpfc_ncmd->ts_cmd_start = 0;
1289 } 1336 }
1290#endif 1337#endif
1291 1338
@@ -1327,7 +1374,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1327 ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq); 1374 ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
1328 if (ret) { 1375 if (ret) {
1329 atomic_dec(&ndlp->cmd_pending); 1376 atomic_dec(&ndlp->cmd_pending);
1330 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, 1377 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1331 "6113 FCP could not issue WQE err %x " 1378 "6113 FCP could not issue WQE err %x "
1332 "sid: x%x did: x%x oxid: x%x\n", 1379 "sid: x%x did: x%x oxid: x%x\n",
1333 ret, vport->fc_myDID, ndlp->nlp_DID, 1380 ret, vport->fc_myDID, ndlp->nlp_DID,
@@ -1336,7 +1383,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1336 } 1383 }
1337 1384
1338#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1385#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1339 if (phba->ktime_on) 1386 if (lpfc_ncmd->ts_cmd_start)
1340 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); 1387 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1341 1388
1342 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { 1389 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
@@ -1387,7 +1434,7 @@ void
1387lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1434lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1388 struct lpfc_wcqe_complete *abts_cmpl) 1435 struct lpfc_wcqe_complete *abts_cmpl)
1389{ 1436{
1390 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 1437 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1391 "6145 ABORT_XRI_CN completing on rpi x%x " 1438 "6145 ABORT_XRI_CN completing on rpi x%x "
1392 "original iotag x%x, abort cmd iotag x%x " 1439 "original iotag x%x, abort cmd iotag x%x "
1393 "req_tag x%x, status x%x, hwstatus x%x\n", 1440 "req_tag x%x, status x%x, hwstatus x%x\n",
@@ -1938,14 +1985,13 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
1938 * pci bus space for an I/O. The DMA buffer includes the 1985 * pci bus space for an I/O. The DMA buffer includes the
1939 * number of SGE's necessary to support the sg_tablesize. 1986 * number of SGE's necessary to support the sg_tablesize.
1940 */ 1987 */
1941 lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, 1988 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
1942 GFP_KERNEL, 1989 GFP_KERNEL,
1943 &lpfc_ncmd->dma_handle); 1990 &lpfc_ncmd->dma_handle);
1944 if (!lpfc_ncmd->data) { 1991 if (!lpfc_ncmd->data) {
1945 kfree(lpfc_ncmd); 1992 kfree(lpfc_ncmd);
1946 break; 1993 break;
1947 } 1994 }
1948 memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size);
1949 1995
1950 lxri = lpfc_sli4_next_xritag(phba); 1996 lxri = lpfc_sli4_next_xritag(phba);
1951 if (lxri == NO_XRI) { 1997 if (lxri == NO_XRI) {
@@ -2042,9 +2088,6 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2042 spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag); 2088 spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
2043 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 2089 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
2044 &phba->lpfc_nvme_buf_list_get, list) { 2090 &phba->lpfc_nvme_buf_list_get, list) {
2045 if (lpfc_test_rrq_active(phba, ndlp,
2046 lpfc_ncmd->cur_iocbq.sli4_lxritag))
2047 continue;
2048 list_del_init(&lpfc_ncmd->list); 2091 list_del_init(&lpfc_ncmd->list);
2049 found = 1; 2092 found = 1;
2050 break; 2093 break;
@@ -2057,9 +2100,6 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2057 spin_unlock(&phba->nvme_buf_list_put_lock); 2100 spin_unlock(&phba->nvme_buf_list_put_lock);
2058 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 2101 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
2059 &phba->lpfc_nvme_buf_list_get, list) { 2102 &phba->lpfc_nvme_buf_list_get, list) {
2060 if (lpfc_test_rrq_active(
2061 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
2062 continue;
2063 list_del_init(&lpfc_ncmd->list); 2103 list_del_init(&lpfc_ncmd->list);
2064 found = 1; 2104 found = 1;
2065 break; 2105 break;
@@ -2096,7 +2136,6 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
2096 2136
2097 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, 2137 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
2098 iflag); 2138 iflag);
2099 lpfc_ncmd->nvmeCmd = NULL;
2100 list_add_tail(&lpfc_ncmd->list, 2139 list_add_tail(&lpfc_ncmd->list,
2101 &phba->sli4_hba.lpfc_abts_nvme_buf_list); 2140 &phba->sli4_hba.lpfc_abts_nvme_buf_list);
2102 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, 2141 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
@@ -2296,6 +2335,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2296 struct lpfc_nvme_rport *rport; 2335 struct lpfc_nvme_rport *rport;
2297 struct nvme_fc_remote_port *remote_port; 2336 struct nvme_fc_remote_port *remote_port;
2298 struct nvme_fc_port_info rpinfo; 2337 struct nvme_fc_port_info rpinfo;
2338 struct lpfc_nodelist *prev_ndlp;
2299 2339
2300 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, 2340 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2301 "6006 Register NVME PORT. DID x%06x nlptype x%x\n", 2341 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
@@ -2332,7 +2372,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2332 * new rport. 2372 * new rport.
2333 */ 2373 */
2334 rport = remote_port->private; 2374 rport = remote_port->private;
2335 if (ndlp->nrport == rport) { 2375 if (ndlp->nrport) {
2336 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 2376 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2337 LOG_NVME_DISC, 2377 LOG_NVME_DISC,
2338 "6014 Rebinding lport to " 2378 "6014 Rebinding lport to "
@@ -2343,24 +2383,33 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2343 remote_port->port_role, 2383 remote_port->port_role,
2344 ndlp->nlp_type, 2384 ndlp->nlp_type,
2345 ndlp->nlp_DID); 2385 ndlp->nlp_DID);
2346 } else { 2386 prev_ndlp = rport->ndlp;
2347 /* New rport. */ 2387
2348 rport->remoteport = remote_port; 2388 /* Sever the ndlp<->rport connection before dropping
2349 rport->lport = lport; 2389 * the ndlp ref from register.
2350 rport->ndlp = lpfc_nlp_get(ndlp); 2390 */
2351 if (!rport->ndlp) 2391 ndlp->nrport = NULL;
2352 return -1; 2392 rport->ndlp = NULL;
2353 ndlp->nrport = rport; 2393 if (prev_ndlp)
2354 lpfc_printf_vlog(vport, KERN_INFO, 2394 lpfc_nlp_put(ndlp);
2355 LOG_NVME_DISC | LOG_NODE,
2356 "6022 Binding new rport to "
2357 "lport %p Rport WWNN 0x%llx, "
2358 "Rport WWPN 0x%llx DID "
2359 "x%06x Role x%x\n",
2360 lport,
2361 rpinfo.node_name, rpinfo.port_name,
2362 rpinfo.port_id, rpinfo.port_role);
2363 } 2395 }
2396
2397 /* Clean bind the rport to the ndlp. */
2398 rport->remoteport = remote_port;
2399 rport->lport = lport;
2400 rport->ndlp = lpfc_nlp_get(ndlp);
2401 if (!rport->ndlp)
2402 return -1;
2403 ndlp->nrport = rport;
2404 lpfc_printf_vlog(vport, KERN_INFO,
2405 LOG_NVME_DISC | LOG_NODE,
2406 "6022 Binding new rport to "
2407 "lport %p Rport WWNN 0x%llx, "
2408 "Rport WWPN 0x%llx DID "
2409 "x%06x Role x%x\n",
2410 lport,
2411 rpinfo.node_name, rpinfo.port_name,
2412 rpinfo.port_id, rpinfo.port_role);
2364 } else { 2413 } else {
2365 lpfc_printf_vlog(vport, KERN_ERR, 2414 lpfc_printf_vlog(vport, KERN_ERR,
2366 LOG_NVME_DISC | LOG_NODE, 2415 LOG_NVME_DISC | LOG_NODE,
@@ -2454,18 +2503,18 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2454 * @axri: pointer to the fcp xri abort wcqe structure. 2503 * @axri: pointer to the fcp xri abort wcqe structure.
2455 * 2504 *
2456 * This routine is invoked by the worker thread to process a SLI4 fast-path 2505 * This routine is invoked by the worker thread to process a SLI4 fast-path
2457 * FCP aborted xri. 2506 * NVME aborted xri. Aborted NVME IO commands are completed to the transport
2507 * here.
2458 **/ 2508 **/
2459void 2509void
2460lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, 2510lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2461 struct sli4_wcqe_xri_aborted *axri) 2511 struct sli4_wcqe_xri_aborted *axri)
2462{ 2512{
2463 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 2513 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2464 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
2465 struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd; 2514 struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
2515 struct nvmefc_fcp_req *nvme_cmd = NULL;
2466 struct lpfc_nodelist *ndlp; 2516 struct lpfc_nodelist *ndlp;
2467 unsigned long iflag = 0; 2517 unsigned long iflag = 0;
2468 int rrq_empty = 0;
2469 2518
2470 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 2519 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
2471 return; 2520 return;
@@ -2481,25 +2530,24 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2481 spin_unlock( 2530 spin_unlock(
2482 &phba->sli4_hba.abts_nvme_buf_list_lock); 2531 &phba->sli4_hba.abts_nvme_buf_list_lock);
2483 2532
2484 rrq_empty = list_empty(&phba->active_rrq_list);
2485 spin_unlock_irqrestore(&phba->hbalock, iflag); 2533 spin_unlock_irqrestore(&phba->hbalock, iflag);
2486 ndlp = lpfc_ncmd->ndlp; 2534 ndlp = lpfc_ncmd->ndlp;
2487 if (ndlp) { 2535 if (ndlp)
2488 lpfc_set_rrq_active(
2489 phba, ndlp,
2490 lpfc_ncmd->cur_iocbq.sli4_lxritag,
2491 rxid, 1);
2492 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 2536 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2493 }
2494 2537
2495 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2538 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2496 "6311 XRI Aborted xri x%x tag x%x " 2539 "6311 nvme_cmd %p xri x%x tag x%x "
2497 "released\n", 2540 "abort complete and xri released\n",
2498 xri, lpfc_ncmd->cur_iocbq.iotag); 2541 lpfc_ncmd->nvmeCmd, xri,
2499 2542 lpfc_ncmd->cur_iocbq.iotag);
2543
2544 /* Aborted NVME commands are required to not complete
2545 * before the abort exchange command fully completes.
2546 * Once completed, it is available via the put list.
2547 */
2548 nvme_cmd = lpfc_ncmd->nvmeCmd;
2549 nvme_cmd->done(nvme_cmd);
2500 lpfc_release_nvme_buf(phba, lpfc_ncmd); 2550 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2501 if (rrq_empty)
2502 lpfc_worker_wake_up(phba);
2503 return; 2551 return;
2504 } 2552 }
2505 } 2553 }
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 0b7c1a49e203..84cf1b9079f7 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -76,7 +76,7 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
76{ 76{
77 unsigned long iflag; 77 unsigned long iflag;
78 78
79 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 79 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
80 "6313 NVMET Defer ctx release xri x%x flg x%x\n", 80 "6313 NVMET Defer ctx release xri x%x flg x%x\n",
81 ctxp->oxid, ctxp->flag); 81 ctxp->oxid, ctxp->flag);
82 82
@@ -221,9 +221,8 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
221 spin_lock_init(&ctxp->ctxlock); 221 spin_lock_init(&ctxp->ctxlock);
222 222
223#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 223#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
224 if (phba->ktime_on) { 224 if (ctxp->ts_cmd_nvme) {
225 ctxp->ts_cmd_nvme = ktime_get_ns(); 225 ctxp->ts_cmd_nvme = ktime_get_ns();
226 ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme;
227 ctxp->ts_nvme_data = 0; 226 ctxp->ts_nvme_data = 0;
228 ctxp->ts_data_wqput = 0; 227 ctxp->ts_data_wqput = 0;
229 ctxp->ts_isr_data = 0; 228 ctxp->ts_isr_data = 0;
@@ -289,9 +288,7 @@ lpfc_nvmet_ktime(struct lpfc_hba *phba,
289{ 288{
290 uint64_t seg1, seg2, seg3, seg4, seg5; 289 uint64_t seg1, seg2, seg3, seg4, seg5;
291 uint64_t seg6, seg7, seg8, seg9, seg10; 290 uint64_t seg6, seg7, seg8, seg9, seg10;
292 291 uint64_t segsum;
293 if (!phba->ktime_on)
294 return;
295 292
296 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme || 293 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
297 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput || 294 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
@@ -300,6 +297,8 @@ lpfc_nvmet_ktime(struct lpfc_hba *phba,
300 !ctxp->ts_isr_status || !ctxp->ts_status_nvme) 297 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
301 return; 298 return;
302 299
300 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
301 return;
303 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme) 302 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
304 return; 303 return;
305 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data) 304 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
@@ -344,34 +343,66 @@ lpfc_nvmet_ktime(struct lpfc_hba *phba,
344 * (Segments 1 thru 4) for READDATA_RSP 343 * (Segments 1 thru 4) for READDATA_RSP
345 */ 344 */
346 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd; 345 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
347 seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1; 346 segsum = seg1;
348 seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) - 347
349 seg1 - seg2; 348 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
350 seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) - 349 if (segsum > seg2)
351 seg1 - seg2 - seg3; 350 return;
352 seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) - 351 seg2 -= segsum;
353 seg1 - seg2 - seg3 - seg4; 352 segsum += seg2;
353
354 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
355 if (segsum > seg3)
356 return;
357 seg3 -= segsum;
358 segsum += seg3;
359
360 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
361 if (segsum > seg4)
362 return;
363 seg4 -= segsum;
364 segsum += seg4;
365
366 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
367 if (segsum > seg5)
368 return;
369 seg5 -= segsum;
370 segsum += seg5;
371
354 372
355 /* For auto rsp commands seg6 thru seg10 will be 0 */ 373 /* For auto rsp commands seg6 thru seg10 will be 0 */
356 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) { 374 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
357 seg6 = (ctxp->ts_nvme_status - 375 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
358 ctxp->ts_isr_cmd) - 376 if (segsum > seg6)
359 seg1 - seg2 - seg3 - seg4 - seg5; 377 return;
360 seg7 = (ctxp->ts_status_wqput - 378 seg6 -= segsum;
361 ctxp->ts_isr_cmd) - 379 segsum += seg6;
362 seg1 - seg2 - seg3 - 380
363 seg4 - seg5 - seg6; 381 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
364 seg8 = (ctxp->ts_isr_status - 382 if (segsum > seg7)
365 ctxp->ts_isr_cmd) - 383 return;
366 seg1 - seg2 - seg3 - seg4 - 384 seg7 -= segsum;
367 seg5 - seg6 - seg7; 385 segsum += seg7;
368 seg9 = (ctxp->ts_status_nvme - 386
369 ctxp->ts_isr_cmd) - 387 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
370 seg1 - seg2 - seg3 - seg4 - 388 if (segsum > seg8)
371 seg5 - seg6 - seg7 - seg8; 389 return;
390 seg8 -= segsum;
391 segsum += seg8;
392
393 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
394 if (segsum > seg9)
395 return;
396 seg9 -= segsum;
397 segsum += seg9;
398
399 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
400 return;
372 seg10 = (ctxp->ts_isr_status - 401 seg10 = (ctxp->ts_isr_status -
373 ctxp->ts_isr_cmd); 402 ctxp->ts_isr_cmd);
374 } else { 403 } else {
404 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
405 return;
375 seg6 = 0; 406 seg6 = 0;
376 seg7 = 0; 407 seg7 = 0;
377 seg8 = 0; 408 seg8 = 0;
@@ -463,7 +494,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
463 struct lpfc_nvmet_tgtport *tgtp; 494 struct lpfc_nvmet_tgtport *tgtp;
464 struct nvmefc_tgt_fcp_req *rsp; 495 struct nvmefc_tgt_fcp_req *rsp;
465 struct lpfc_nvmet_rcv_ctx *ctxp; 496 struct lpfc_nvmet_rcv_ctx *ctxp;
466 uint32_t status, result, op, start_clean; 497 uint32_t status, result, op, start_clean, logerr;
467#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 498#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
468 uint32_t id; 499 uint32_t id;
469#endif 500#endif
@@ -491,17 +522,21 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
491 if (tgtp) 522 if (tgtp)
492 atomic_inc(&tgtp->xmt_fcp_rsp_error); 523 atomic_inc(&tgtp->xmt_fcp_rsp_error);
493 524
525 logerr = LOG_NVME_IOERR;
526
494 /* pick up SLI4 exhange busy condition */ 527 /* pick up SLI4 exhange busy condition */
495 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 528 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
496 ctxp->flag |= LPFC_NVMET_XBUSY; 529 ctxp->flag |= LPFC_NVMET_XBUSY;
530 logerr |= LOG_NVME_ABTS;
497 531
498 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
499 "6315 IO Cmpl XBUSY: xri x%x: %x/%x\n",
500 ctxp->oxid, status, result);
501 } else { 532 } else {
502 ctxp->flag &= ~LPFC_NVMET_XBUSY; 533 ctxp->flag &= ~LPFC_NVMET_XBUSY;
503 } 534 }
504 535
536 lpfc_printf_log(phba, KERN_INFO, logerr,
537 "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
538 ctxp->oxid, status, result, ctxp->flag);
539
505 } else { 540 } else {
506 rsp->fcp_error = NVME_SC_SUCCESS; 541 rsp->fcp_error = NVME_SC_SUCCESS;
507 if (op == NVMET_FCOP_RSP) 542 if (op == NVMET_FCOP_RSP)
@@ -519,7 +554,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
519 ctxp->entry_cnt++; 554 ctxp->entry_cnt++;
520 555
521#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 556#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
522 if (phba->ktime_on) { 557 if (ctxp->ts_cmd_nvme) {
523 if (rsp->op == NVMET_FCOP_READDATA_RSP) { 558 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
524 ctxp->ts_isr_data = 559 ctxp->ts_isr_data =
525 cmdwqe->isr_timestamp; 560 cmdwqe->isr_timestamp;
@@ -553,7 +588,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
553#endif 588#endif
554 rsp->done(rsp); 589 rsp->done(rsp);
555#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 590#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
556 if (phba->ktime_on) 591 if (ctxp->ts_cmd_nvme)
557 lpfc_nvmet_ktime(phba, ctxp); 592 lpfc_nvmet_ktime(phba, ctxp);
558#endif 593#endif
559 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */ 594 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
@@ -563,7 +598,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
563 memset(((char *)cmdwqe) + start_clean, 0, 598 memset(((char *)cmdwqe) + start_clean, 0,
564 (sizeof(struct lpfc_iocbq) - start_clean)); 599 (sizeof(struct lpfc_iocbq) - start_clean));
565#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 600#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
566 if (phba->ktime_on) { 601 if (ctxp->ts_cmd_nvme) {
567 ctxp->ts_isr_data = cmdwqe->isr_timestamp; 602 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
568 ctxp->ts_data_nvme = ktime_get_ns(); 603 ctxp->ts_data_nvme = ktime_get_ns();
569 } 604 }
@@ -597,6 +632,9 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
597 struct ulp_bde64 bpl; 632 struct ulp_bde64 bpl;
598 int rc; 633 int rc;
599 634
635 if (phba->pport->load_flag & FC_UNLOADING)
636 return -ENODEV;
637
600 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 638 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
601 "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid); 639 "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
602 640
@@ -678,8 +716,13 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
678 struct lpfc_iocbq *nvmewqeq; 716 struct lpfc_iocbq *nvmewqeq;
679 int rc; 717 int rc;
680 718
719 if (phba->pport->load_flag & FC_UNLOADING) {
720 rc = -ENODEV;
721 goto aerr;
722 }
723
681#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 724#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
682 if (phba->ktime_on) { 725 if (ctxp->ts_cmd_nvme) {
683 if (rsp->op == NVMET_FCOP_RSP) 726 if (rsp->op == NVMET_FCOP_RSP)
684 ctxp->ts_nvme_status = ktime_get_ns(); 727 ctxp->ts_nvme_status = ktime_get_ns();
685 else 728 else
@@ -734,7 +777,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
734 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); 777 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
735 if (rc == WQE_SUCCESS) { 778 if (rc == WQE_SUCCESS) {
736#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 779#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
737 if (!phba->ktime_on) 780 if (!ctxp->ts_cmd_nvme)
738 return 0; 781 return 0;
739 if (rsp->op == NVMET_FCOP_RSP) 782 if (rsp->op == NVMET_FCOP_RSP)
740 ctxp->ts_status_wqput = ktime_get_ns(); 783 ctxp->ts_status_wqput = ktime_get_ns();
@@ -777,6 +820,9 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
777 struct lpfc_hba *phba = ctxp->phba; 820 struct lpfc_hba *phba = ctxp->phba;
778 unsigned long flags; 821 unsigned long flags;
779 822
823 if (phba->pport->load_flag & FC_UNLOADING)
824 return;
825
780 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 826 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
781 "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n", 827 "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
782 ctxp->oxid, ctxp->flag, ctxp->state); 828 ctxp->oxid, ctxp->flag, ctxp->state);
@@ -787,6 +833,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
787 atomic_inc(&lpfc_nvmep->xmt_fcp_abort); 833 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
788 834
789 spin_lock_irqsave(&ctxp->ctxlock, flags); 835 spin_lock_irqsave(&ctxp->ctxlock, flags);
836 ctxp->state = LPFC_NVMET_STE_ABORT;
790 837
791 /* Since iaab/iaar are NOT set, we need to check 838 /* Since iaab/iaar are NOT set, we need to check
792 * if the firmware is in process of aborting IO 839 * if the firmware is in process of aborting IO
@@ -1125,9 +1172,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1125 } 1172 }
1126 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; 1173 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1127 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; 1174 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
1128 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | 1175 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1129 NVMET_FCTGTFEAT_CMD_IN_ISR |
1130 NVMET_FCTGTFEAT_OPDONE_IN_ISR;
1131 1176
1132#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1177#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1133 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, 1178 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
@@ -1138,9 +1183,14 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1138#endif 1183#endif
1139 if (error) { 1184 if (error) {
1140 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1185 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1141 "6025 Cannot register NVME targetport " 1186 "6025 Cannot register NVME targetport x%x: "
1142 "x%x\n", error); 1187 "portnm %llx nodenm %llx segs %d qs %d\n",
1188 error,
1189 pinfo.port_name, pinfo.node_name,
1190 lpfc_tgttemplate.max_sgl_segments,
1191 lpfc_tgttemplate.max_hw_queues);
1143 phba->targetport = NULL; 1192 phba->targetport = NULL;
1193 phba->nvmet_support = 0;
1144 1194
1145 lpfc_nvmet_cleanup_io_context(phba); 1195 lpfc_nvmet_cleanup_io_context(phba);
1146 1196
@@ -1152,9 +1202,11 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1152 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 1202 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1153 "6026 Registered NVME " 1203 "6026 Registered NVME "
1154 "targetport: %p, private %p " 1204 "targetport: %p, private %p "
1155 "portnm %llx nodenm %llx\n", 1205 "portnm %llx nodenm %llx segs %d qs %d\n",
1156 phba->targetport, tgtp, 1206 phba->targetport, tgtp,
1157 pinfo.port_name, pinfo.node_name); 1207 pinfo.port_name, pinfo.node_name,
1208 lpfc_tgttemplate.max_sgl_segments,
1209 lpfc_tgttemplate.max_hw_queues);
1158 1210
1159 atomic_set(&tgtp->rcv_ls_req_in, 0); 1211 atomic_set(&tgtp->rcv_ls_req_in, 0);
1160 atomic_set(&tgtp->rcv_ls_req_out, 0); 1212 atomic_set(&tgtp->rcv_ls_req_out, 0);
@@ -1457,6 +1509,7 @@ static struct lpfc_nvmet_ctxbuf *
1457lpfc_nvmet_replenish_context(struct lpfc_hba *phba, 1509lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1458 struct lpfc_nvmet_ctx_info *current_infop) 1510 struct lpfc_nvmet_ctx_info *current_infop)
1459{ 1511{
1512#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1460 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL; 1513 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
1461 struct lpfc_nvmet_ctx_info *get_infop; 1514 struct lpfc_nvmet_ctx_info *get_infop;
1462 int i; 1515 int i;
@@ -1504,6 +1557,7 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1504 get_infop = get_infop->nvmet_ctx_next_cpu; 1557 get_infop = get_infop->nvmet_ctx_next_cpu;
1505 } 1558 }
1506 1559
1560#endif
1507 /* Nothing found, all contexts for the MRQ are in-flight */ 1561 /* Nothing found, all contexts for the MRQ are in-flight */
1508 return NULL; 1562 return NULL;
1509} 1563}
@@ -1631,7 +1685,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1631 spin_lock_init(&ctxp->ctxlock); 1685 spin_lock_init(&ctxp->ctxlock);
1632 1686
1633#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1687#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1634 if (phba->ktime_on) { 1688 if (isr_timestamp) {
1635 ctxp->ts_isr_cmd = isr_timestamp; 1689 ctxp->ts_isr_cmd = isr_timestamp;
1636 ctxp->ts_cmd_nvme = ktime_get_ns(); 1690 ctxp->ts_cmd_nvme = ktime_get_ns();
1637 ctxp->ts_nvme_data = 0; 1691 ctxp->ts_nvme_data = 0;
@@ -1642,6 +1696,8 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1642 ctxp->ts_status_wqput = 0; 1696 ctxp->ts_status_wqput = 0;
1643 ctxp->ts_isr_status = 0; 1697 ctxp->ts_isr_status = 0;
1644 ctxp->ts_status_nvme = 0; 1698 ctxp->ts_status_nvme = 0;
1699 } else {
1700 ctxp->ts_cmd_nvme = 0;
1645 } 1701 }
1646#endif 1702#endif
1647 1703
@@ -2320,7 +2376,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2320 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 2376 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2321 atomic_inc(&tgtp->xmt_abort_rsp); 2377 atomic_inc(&tgtp->xmt_abort_rsp);
2322 2378
2323 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2379 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2324 "6165 ABORT cmpl: xri x%x flg x%x (%d) " 2380 "6165 ABORT cmpl: xri x%x flg x%x (%d) "
2325 "WCQE: %08x %08x %08x %08x\n", 2381 "WCQE: %08x %08x %08x %08x\n",
2326 ctxp->oxid, ctxp->flag, released, 2382 ctxp->oxid, ctxp->flag, released,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 4edb81073409..aecd2399005d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -80,8 +80,8 @@ static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
80 struct lpfc_cqe *); 80 struct lpfc_cqe *);
81static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, 81static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
82 int); 82 int);
83static int lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, 83static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
84 struct lpfc_eqe *eqe, uint32_t qidx); 84 struct lpfc_eqe *eqe, uint32_t qidx);
85static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 85static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
86static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 86static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
87static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, 87static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
@@ -2732,7 +2732,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2732 * 2732 *
2733 * This function looks up the iocb_lookup table to get the command iocb 2733 * This function looks up the iocb_lookup table to get the command iocb
2734 * corresponding to the given response iocb using the iotag of the 2734 * corresponding to the given response iocb using the iotag of the
2735 * response iocb. This function is called with the hbalock held. 2735 * response iocb. This function is called with the hbalock held
2736 * for sli3 devices or the ring_lock for sli4 devices.
2736 * This function returns the command iocb object if it finds the command 2737 * This function returns the command iocb object if it finds the command
2737 * iocb else returns NULL. 2738 * iocb else returns NULL.
2738 **/ 2739 **/
@@ -2828,9 +2829,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2828 unsigned long iflag; 2829 unsigned long iflag;
2829 2830
2830 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 2831 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2831 spin_lock_irqsave(&phba->hbalock, iflag); 2832 if (phba->sli_rev == LPFC_SLI_REV4)
2833 spin_lock_irqsave(&pring->ring_lock, iflag);
2834 else
2835 spin_lock_irqsave(&phba->hbalock, iflag);
2832 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 2836 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2833 spin_unlock_irqrestore(&phba->hbalock, iflag); 2837 if (phba->sli_rev == LPFC_SLI_REV4)
2838 spin_unlock_irqrestore(&pring->ring_lock, iflag);
2839 else
2840 spin_unlock_irqrestore(&phba->hbalock, iflag);
2834 2841
2835 if (cmdiocbp) { 2842 if (cmdiocbp) {
2836 if (cmdiocbp->iocb_cmpl) { 2843 if (cmdiocbp->iocb_cmpl) {
@@ -9396,10 +9403,13 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
9396 * for abort iocb hba_wqidx should already 9403 * for abort iocb hba_wqidx should already
9397 * be setup based on what work queue we used. 9404 * be setup based on what work queue we used.
9398 */ 9405 */
9399 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) 9406 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9400 piocb->hba_wqidx = 9407 piocb->hba_wqidx =
9401 lpfc_sli4_scmd_to_wqidx_distr(phba, 9408 lpfc_sli4_scmd_to_wqidx_distr(phba,
9402 piocb->context1); 9409 piocb->context1);
9410 piocb->hba_wqidx = piocb->hba_wqidx %
9411 phba->cfg_fcp_io_channel;
9412 }
9403 return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring; 9413 return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
9404 } else { 9414 } else {
9405 if (unlikely(!phba->sli4_hba.oas_wq)) 9415 if (unlikely(!phba->sli4_hba.oas_wq))
@@ -10632,6 +10642,14 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10632 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 10642 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10633 return 0; 10643 return 0;
10634 10644
10645 if (!pring) {
10646 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10647 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10648 else
10649 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10650 goto abort_iotag_exit;
10651 }
10652
10635 /* 10653 /*
10636 * If we're unloading, don't abort iocb on the ELS ring, but change 10654 * If we're unloading, don't abort iocb on the ELS ring, but change
10637 * the callback so that nothing happens when it finishes. 10655 * the callback so that nothing happens when it finishes.
@@ -12500,6 +12518,8 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
12500 unsigned long iflags; 12518 unsigned long iflags;
12501 12519
12502 pring = lpfc_phba_elsring(phba); 12520 pring = lpfc_phba_elsring(phba);
12521 if (unlikely(!pring))
12522 return NULL;
12503 12523
12504 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 12524 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
12505 spin_lock_irqsave(&pring->ring_lock, iflags); 12525 spin_lock_irqsave(&pring->ring_lock, iflags);
@@ -12507,19 +12527,21 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
12507 /* Look up the ELS command IOCB and create pseudo response IOCB */ 12527 /* Look up the ELS command IOCB and create pseudo response IOCB */
12508 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 12528 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12509 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 12529 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12510 /* Put the iocb back on the txcmplq */
12511 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
12512 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12513
12514 if (unlikely(!cmdiocbq)) { 12530 if (unlikely(!cmdiocbq)) {
12531 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12515 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12532 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12516 "0386 ELS complete with no corresponding " 12533 "0386 ELS complete with no corresponding "
12517 "cmdiocb: iotag (%d)\n", 12534 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
12518 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 12535 wcqe->word0, wcqe->total_data_placed,
12536 wcqe->parameter, wcqe->word3);
12519 lpfc_sli_release_iocbq(phba, irspiocbq); 12537 lpfc_sli_release_iocbq(phba, irspiocbq);
12520 return NULL; 12538 return NULL;
12521 } 12539 }
12522 12540
12541 /* Put the iocb back on the txcmplq */
12542 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
12543 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12544
12523 /* Fake the irspiocbq and copy necessary response information */ 12545 /* Fake the irspiocbq and copy necessary response information */
12524 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 12546 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
12525 12547
@@ -13010,14 +13032,11 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13010 * completion queue, and then return. 13032 * completion queue, and then return.
13011 * 13033 *
13012 **/ 13034 **/
13013static int 13035static void
13014lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 13036lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13015 struct lpfc_queue *speq) 13037 struct lpfc_queue *speq)
13016{ 13038{
13017 struct lpfc_queue *cq = NULL, *childq; 13039 struct lpfc_queue *cq = NULL, *childq;
13018 struct lpfc_cqe *cqe;
13019 bool workposted = false;
13020 int ecount = 0;
13021 uint16_t cqid; 13040 uint16_t cqid;
13022 13041
13023 /* Get the reference to the corresponding CQ */ 13042 /* Get the reference to the corresponding CQ */
@@ -13034,48 +13053,84 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13034 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13053 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13035 "0365 Slow-path CQ identifier " 13054 "0365 Slow-path CQ identifier "
13036 "(%d) does not exist\n", cqid); 13055 "(%d) does not exist\n", cqid);
13037 return 0; 13056 return;
13038 } 13057 }
13039 13058
13040 /* Save EQ associated with this CQ */ 13059 /* Save EQ associated with this CQ */
13041 cq->assoc_qp = speq; 13060 cq->assoc_qp = speq;
13042 13061
13062 if (!queue_work(phba->wq, &cq->spwork))
13063 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13064 "0390 Cannot schedule soft IRQ "
13065 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13066 cqid, cq->queue_id, smp_processor_id());
13067}
13068
13069/**
13070 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13071 * @phba: Pointer to HBA context object.
13072 *
13073 * This routine process a event queue entry from the slow-path event queue.
13074 * It will check the MajorCode and MinorCode to determine this is for a
13075 * completion event on a completion queue, if not, an error shall be logged
13076 * and just return. Otherwise, it will get to the corresponding completion
13077 * queue and process all the entries on that completion queue, rearm the
13078 * completion queue, and then return.
13079 *
13080 **/
13081static void
13082lpfc_sli4_sp_process_cq(struct work_struct *work)
13083{
13084 struct lpfc_queue *cq =
13085 container_of(work, struct lpfc_queue, spwork);
13086 struct lpfc_hba *phba = cq->phba;
13087 struct lpfc_cqe *cqe;
13088 bool workposted = false;
13089 int ccount = 0;
13090
13043 /* Process all the entries to the CQ */ 13091 /* Process all the entries to the CQ */
13044 switch (cq->type) { 13092 switch (cq->type) {
13045 case LPFC_MCQ: 13093 case LPFC_MCQ:
13046 while ((cqe = lpfc_sli4_cq_get(cq))) { 13094 while ((cqe = lpfc_sli4_cq_get(cq))) {
13047 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 13095 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
13048 if (!(++ecount % cq->entry_repost)) 13096 if (!(++ccount % cq->entry_repost))
13049 break; 13097 break;
13050 cq->CQ_mbox++; 13098 cq->CQ_mbox++;
13051 } 13099 }
13052 break; 13100 break;
13053 case LPFC_WCQ: 13101 case LPFC_WCQ:
13054 while ((cqe = lpfc_sli4_cq_get(cq))) { 13102 while ((cqe = lpfc_sli4_cq_get(cq))) {
13055 if ((cq->subtype == LPFC_FCP) || 13103 if (cq->subtype == LPFC_FCP ||
13056 (cq->subtype == LPFC_NVME)) 13104 cq->subtype == LPFC_NVME) {
13105#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13106 if (phba->ktime_on)
13107 cq->isr_timestamp = ktime_get_ns();
13108 else
13109 cq->isr_timestamp = 0;
13110#endif
13057 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, 13111 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
13058 cqe); 13112 cqe);
13059 else 13113 } else {
13060 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, 13114 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
13061 cqe); 13115 cqe);
13062 if (!(++ecount % cq->entry_repost)) 13116 }
13117 if (!(++ccount % cq->entry_repost))
13063 break; 13118 break;
13064 } 13119 }
13065 13120
13066 /* Track the max number of CQEs processed in 1 EQ */ 13121 /* Track the max number of CQEs processed in 1 EQ */
13067 if (ecount > cq->CQ_max_cqe) 13122 if (ccount > cq->CQ_max_cqe)
13068 cq->CQ_max_cqe = ecount; 13123 cq->CQ_max_cqe = ccount;
13069 break; 13124 break;
13070 default: 13125 default:
13071 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13126 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13072 "0370 Invalid completion queue type (%d)\n", 13127 "0370 Invalid completion queue type (%d)\n",
13073 cq->type); 13128 cq->type);
13074 return 0; 13129 return;
13075 } 13130 }
13076 13131
13077 /* Catch the no cq entry condition, log an error */ 13132 /* Catch the no cq entry condition, log an error */
13078 if (unlikely(ecount == 0)) 13133 if (unlikely(ccount == 0))
13079 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13134 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13080 "0371 No entry from the CQ: identifier " 13135 "0371 No entry from the CQ: identifier "
13081 "(x%x), type (%d)\n", cq->queue_id, cq->type); 13136 "(x%x), type (%d)\n", cq->queue_id, cq->type);
@@ -13086,8 +13141,6 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13086 /* wake up worker thread if there are works to be done */ 13141 /* wake up worker thread if there are works to be done */
13087 if (workposted) 13142 if (workposted)
13088 lpfc_worker_wake_up(phba); 13143 lpfc_worker_wake_up(phba);
13089
13090 return ecount;
13091} 13144}
13092 13145
13093/** 13146/**
@@ -13143,11 +13196,9 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13143 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13196 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13144 return; 13197 return;
13145 } 13198 }
13146 13199#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13147 if (cq->assoc_qp) 13200 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13148 cmdiocbq->isr_timestamp = 13201#endif
13149 cq->assoc_qp->isr_timestamp;
13150
13151 if (cmdiocbq->iocb_cmpl == NULL) { 13202 if (cmdiocbq->iocb_cmpl == NULL) {
13152 if (cmdiocbq->wqe_cmpl) { 13203 if (cmdiocbq->wqe_cmpl) {
13153 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13204 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
@@ -13292,7 +13343,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13292 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); 13343 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
13293 lpfc_nvmet_unsol_fcp_event( 13344 lpfc_nvmet_unsol_fcp_event(
13294 phba, idx, dma_buf, 13345 phba, idx, dma_buf,
13295 cq->assoc_qp->isr_timestamp); 13346 cq->isr_timestamp);
13296 return false; 13347 return false;
13297 } 13348 }
13298drop: 13349drop:
@@ -13395,15 +13446,12 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13395 * queue and process all the entries on the completion queue, rearm the 13446 * queue and process all the entries on the completion queue, rearm the
13396 * completion queue, and then return. 13447 * completion queue, and then return.
13397 **/ 13448 **/
13398static int 13449static void
13399lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 13450lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13400 uint32_t qidx) 13451 uint32_t qidx)
13401{ 13452{
13402 struct lpfc_queue *cq = NULL; 13453 struct lpfc_queue *cq = NULL;
13403 struct lpfc_cqe *cqe;
13404 bool workposted = false;
13405 uint16_t cqid, id; 13454 uint16_t cqid, id;
13406 int ecount = 0;
13407 13455
13408 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 13456 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13409 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13457 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -13411,7 +13459,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13411 "event: majorcode=x%x, minorcode=x%x\n", 13459 "event: majorcode=x%x, minorcode=x%x\n",
13412 bf_get_le32(lpfc_eqe_major_code, eqe), 13460 bf_get_le32(lpfc_eqe_major_code, eqe),
13413 bf_get_le32(lpfc_eqe_minor_code, eqe)); 13461 bf_get_le32(lpfc_eqe_minor_code, eqe));
13414 return 0; 13462 return;
13415 } 13463 }
13416 13464
13417 /* Get the reference to the corresponding CQ */ 13465 /* Get the reference to the corresponding CQ */
@@ -13448,9 +13496,8 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13448 13496
13449 /* Otherwise this is a Slow path event */ 13497 /* Otherwise this is a Slow path event */
13450 if (cq == NULL) { 13498 if (cq == NULL) {
13451 ecount = lpfc_sli4_sp_handle_eqe(phba, eqe, 13499 lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
13452 phba->sli4_hba.hba_eq[qidx]); 13500 return;
13453 return ecount;
13454 } 13501 }
13455 13502
13456process_cq: 13503process_cq:
@@ -13459,26 +13506,61 @@ process_cq:
13459 "0368 Miss-matched fast-path completion " 13506 "0368 Miss-matched fast-path completion "
13460 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 13507 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
13461 cqid, cq->queue_id); 13508 cqid, cq->queue_id);
13462 return 0; 13509 return;
13463 } 13510 }
13464 13511
13465 /* Save EQ associated with this CQ */ 13512 /* Save EQ associated with this CQ */
13466 cq->assoc_qp = phba->sli4_hba.hba_eq[qidx]; 13513 cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
13467 13514
13515 if (!queue_work(phba->wq, &cq->irqwork))
13516 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13517 "0363 Cannot schedule soft IRQ "
13518 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13519 cqid, cq->queue_id, smp_processor_id());
13520}
13521
13522/**
13523 * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
13524 * @phba: Pointer to HBA context object.
13525 * @eqe: Pointer to fast-path event queue entry.
13526 *
13527 * This routine process a event queue entry from the fast-path event queue.
13528 * It will check the MajorCode and MinorCode to determine this is for a
13529 * completion event on a completion queue, if not, an error shall be logged
13530 * and just return. Otherwise, it will get to the corresponding completion
13531 * queue and process all the entries on the completion queue, rearm the
13532 * completion queue, and then return.
13533 **/
13534static void
13535lpfc_sli4_hba_process_cq(struct work_struct *work)
13536{
13537 struct lpfc_queue *cq =
13538 container_of(work, struct lpfc_queue, irqwork);
13539 struct lpfc_hba *phba = cq->phba;
13540 struct lpfc_cqe *cqe;
13541 bool workposted = false;
13542 int ccount = 0;
13543
13468 /* Process all the entries to the CQ */ 13544 /* Process all the entries to the CQ */
13469 while ((cqe = lpfc_sli4_cq_get(cq))) { 13545 while ((cqe = lpfc_sli4_cq_get(cq))) {
13546#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13547 if (phba->ktime_on)
13548 cq->isr_timestamp = ktime_get_ns();
13549 else
13550 cq->isr_timestamp = 0;
13551#endif
13470 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); 13552 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13471 if (!(++ecount % cq->entry_repost)) 13553 if (!(++ccount % cq->entry_repost))
13472 break; 13554 break;
13473 } 13555 }
13474 13556
13475 /* Track the max number of CQEs processed in 1 EQ */ 13557 /* Track the max number of CQEs processed in 1 EQ */
13476 if (ecount > cq->CQ_max_cqe) 13558 if (ccount > cq->CQ_max_cqe)
13477 cq->CQ_max_cqe = ecount; 13559 cq->CQ_max_cqe = ccount;
13478 cq->assoc_qp->EQ_cqe_cnt += ecount; 13560 cq->assoc_qp->EQ_cqe_cnt += ccount;
13479 13561
13480 /* Catch the no cq entry condition */ 13562 /* Catch the no cq entry condition */
13481 if (unlikely(ecount == 0)) 13563 if (unlikely(ccount == 0))
13482 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13564 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13483 "0369 No entry from fast-path completion " 13565 "0369 No entry from fast-path completion "
13484 "queue fcpcqid=%d\n", cq->queue_id); 13566 "queue fcpcqid=%d\n", cq->queue_id);
@@ -13489,8 +13571,6 @@ process_cq:
13489 /* wake up worker thread if there are works to be done */ 13571 /* wake up worker thread if there are works to be done */
13490 if (workposted) 13572 if (workposted)
13491 lpfc_worker_wake_up(phba); 13573 lpfc_worker_wake_up(phba);
13492
13493 return ecount;
13494} 13574}
13495 13575
13496static void 13576static void
@@ -13524,10 +13604,7 @@ static void
13524lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 13604lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
13525{ 13605{
13526 struct lpfc_queue *cq; 13606 struct lpfc_queue *cq;
13527 struct lpfc_cqe *cqe;
13528 bool workposted = false;
13529 uint16_t cqid; 13607 uint16_t cqid;
13530 int ecount = 0;
13531 13608
13532 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 13609 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13533 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13610 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -13562,30 +13639,12 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
13562 /* Save EQ associated with this CQ */ 13639 /* Save EQ associated with this CQ */
13563 cq->assoc_qp = phba->sli4_hba.fof_eq; 13640 cq->assoc_qp = phba->sli4_hba.fof_eq;
13564 13641
13565 /* Process all the entries to the OAS CQ */ 13642 /* CQ work will be processed on CPU affinitized to this IRQ */
13566 while ((cqe = lpfc_sli4_cq_get(cq))) { 13643 if (!queue_work(phba->wq, &cq->irqwork))
13567 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13568 if (!(++ecount % cq->entry_repost))
13569 break;
13570 }
13571
13572 /* Track the max number of CQEs processed in 1 EQ */
13573 if (ecount > cq->CQ_max_cqe)
13574 cq->CQ_max_cqe = ecount;
13575 cq->assoc_qp->EQ_cqe_cnt += ecount;
13576
13577 /* Catch the no cq entry condition */
13578 if (unlikely(ecount == 0))
13579 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13644 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13580 "9153 No entry from fast-path completion " 13645 "0367 Cannot schedule soft IRQ "
13581 "queue fcpcqid=%d\n", cq->queue_id); 13646 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13582 13647 cqid, cq->queue_id, smp_processor_id());
13583 /* In any case, flash and re-arm the CQ */
13584 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
13585
13586 /* wake up worker thread if there are works to be done */
13587 if (workposted)
13588 lpfc_worker_wake_up(phba);
13589} 13648}
13590 13649
13591/** 13650/**
@@ -13711,7 +13770,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
13711 struct lpfc_eqe *eqe; 13770 struct lpfc_eqe *eqe;
13712 unsigned long iflag; 13771 unsigned long iflag;
13713 int ecount = 0; 13772 int ecount = 0;
13714 int ccount = 0;
13715 int hba_eqidx; 13773 int hba_eqidx;
13716 13774
13717 /* Get the driver's phba structure from the dev_id */ 13775 /* Get the driver's phba structure from the dev_id */
@@ -13729,11 +13787,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
13729 if (unlikely(!fpeq)) 13787 if (unlikely(!fpeq))
13730 return IRQ_NONE; 13788 return IRQ_NONE;
13731 13789
13732#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13733 if (phba->ktime_on)
13734 fpeq->isr_timestamp = ktime_get_ns();
13735#endif
13736
13737 if (lpfc_fcp_look_ahead) { 13790 if (lpfc_fcp_look_ahead) {
13738 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) 13791 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
13739 lpfc_sli4_eq_clr_intr(fpeq); 13792 lpfc_sli4_eq_clr_intr(fpeq);
@@ -13760,12 +13813,8 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
13760 * Process all the event on FCP fast-path EQ 13813 * Process all the event on FCP fast-path EQ
13761 */ 13814 */
13762 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 13815 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
13763 if (eqe == NULL) 13816 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
13764 break; 13817 if (!(++ecount % fpeq->entry_repost))
13765
13766 ccount += lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
13767 if (!(++ecount % fpeq->entry_repost) ||
13768 ccount > LPFC_MAX_ISR_CQE)
13769 break; 13818 break;
13770 fpeq->EQ_processed++; 13819 fpeq->EQ_processed++;
13771 } 13820 }
@@ -13948,6 +13997,8 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
13948 queue->entry_size = entry_size; 13997 queue->entry_size = entry_size;
13949 queue->entry_count = entry_count; 13998 queue->entry_count = entry_count;
13950 queue->phba = phba; 13999 queue->phba = phba;
14000 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14001 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
13951 14002
13952 /* entry_repost will be set during q creation */ 14003 /* entry_repost will be set during q creation */
13953 14004
@@ -17137,7 +17188,8 @@ exit:
17137 if (pcmd && pcmd->virt) 17188 if (pcmd && pcmd->virt)
17138 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17189 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17139 kfree(pcmd); 17190 kfree(pcmd);
17140 lpfc_sli_release_iocbq(phba, iocbq); 17191 if (iocbq)
17192 lpfc_sli_release_iocbq(phba, iocbq);
17141 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17193 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17142} 17194}
17143 17195
@@ -18691,6 +18743,8 @@ lpfc_drain_txq(struct lpfc_hba *phba)
18691 uint32_t txq_cnt = 0; 18743 uint32_t txq_cnt = 0;
18692 18744
18693 pring = lpfc_phba_elsring(phba); 18745 pring = lpfc_phba_elsring(phba);
18746 if (unlikely(!pring))
18747 return 0;
18694 18748
18695 spin_lock_irqsave(&pring->ring_lock, iflags); 18749 spin_lock_irqsave(&pring->ring_lock, iflags);
18696 list_for_each_entry(piocbq, &pring->txq, list) { 18750 list_for_each_entry(piocbq, &pring->txq, list) {
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 60200385fe00..13b8f4d4da34 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -158,7 +158,6 @@ struct lpfc_queue {
158#define LPFC_MQ_REPOST 8 158#define LPFC_MQ_REPOST 8
159#define LPFC_CQ_REPOST 64 159#define LPFC_CQ_REPOST 64
160#define LPFC_RQ_REPOST 64 160#define LPFC_RQ_REPOST 64
161#define LPFC_MAX_ISR_CQE 64
162#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */ 161#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
163 uint32_t queue_id; /* Queue ID assigned by the hardware */ 162 uint32_t queue_id; /* Queue ID assigned by the hardware */
164 uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ 163 uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
@@ -202,6 +201,9 @@ struct lpfc_queue {
202#define RQ_buf_posted q_cnt_3 201#define RQ_buf_posted q_cnt_3
203#define RQ_rcv_buf q_cnt_4 202#define RQ_rcv_buf q_cnt_4
204 203
204 struct work_struct irqwork;
205 struct work_struct spwork;
206
205 uint64_t isr_timestamp; 207 uint64_t isr_timestamp;
206 struct lpfc_queue *assoc_qp; 208 struct lpfc_queue *assoc_qp;
207 union sli4_qe qe[1]; /* array to index entries (must be last) */ 209 union sli4_qe qe[1]; /* array to index entries (must be last) */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 6aa192b3e4bf..e0181371af09 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
20 * included with this package. * 20 * included with this package. *
21 *******************************************************************/ 21 *******************************************************************/
22 22
23#define LPFC_DRIVER_VERSION "11.4.0.3" 23#define LPFC_DRIVER_VERSION "11.4.0.4"
24#define LPFC_DRIVER_NAME "lpfc" 24#define LPFC_DRIVER_NAME "lpfc"
25 25
26/* Used for SLI 2/3 */ 26/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index c714482bf4c5..c9d33b1268cb 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -313,6 +313,15 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
313 goto error_out; 313 goto error_out;
314 } 314 }
315 315
316 /* NPIV is not supported if HBA has NVME enabled */
317 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
318 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
319 "3189 Create VPORT failed: "
320 "NPIV is not supported on NVME\n");
321 rc = VPORT_INVAL;
322 goto error_out;
323 }
324
316 vpi = lpfc_alloc_vpi(phba); 325 vpi = lpfc_alloc_vpi(phba);
317 if (vpi == 0) { 326 if (vpi == 0) {
318 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 327 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index a6722c93a295..f5a36ccb8606 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
35/* 35/*
36 * MegaRAID SAS Driver meta data 36 * MegaRAID SAS Driver meta data
37 */ 37 */
38#define MEGASAS_VERSION "07.702.06.00-rc1" 38#define MEGASAS_VERSION "07.703.05.00-rc1"
39#define MEGASAS_RELDATE "June 21, 2017" 39#define MEGASAS_RELDATE "October 5, 2017"
40 40
41/* 41/*
42 * Device IDs 42 * Device IDs
@@ -57,6 +57,7 @@
57#define PCI_DEVICE_ID_LSI_CUTLASS_52 0x0052 57#define PCI_DEVICE_ID_LSI_CUTLASS_52 0x0052
58#define PCI_DEVICE_ID_LSI_CUTLASS_53 0x0053 58#define PCI_DEVICE_ID_LSI_CUTLASS_53 0x0053
59#define PCI_DEVICE_ID_LSI_VENTURA 0x0014 59#define PCI_DEVICE_ID_LSI_VENTURA 0x0014
60#define PCI_DEVICE_ID_LSI_CRUSADER 0x0015
60#define PCI_DEVICE_ID_LSI_HARPOON 0x0016 61#define PCI_DEVICE_ID_LSI_HARPOON 0x0016
61#define PCI_DEVICE_ID_LSI_TOMCAT 0x0017 62#define PCI_DEVICE_ID_LSI_TOMCAT 0x0017
62#define PCI_DEVICE_ID_LSI_VENTURA_4PORT 0x001B 63#define PCI_DEVICE_ID_LSI_VENTURA_4PORT 0x001B
@@ -186,16 +187,19 @@
186/* 187/*
187 * MFI command opcodes 188 * MFI command opcodes
188 */ 189 */
189#define MFI_CMD_INIT 0x00 190enum MFI_CMD_OP {
190#define MFI_CMD_LD_READ 0x01 191 MFI_CMD_INIT = 0x0,
191#define MFI_CMD_LD_WRITE 0x02 192 MFI_CMD_LD_READ = 0x1,
192#define MFI_CMD_LD_SCSI_IO 0x03 193 MFI_CMD_LD_WRITE = 0x2,
193#define MFI_CMD_PD_SCSI_IO 0x04 194 MFI_CMD_LD_SCSI_IO = 0x3,
194#define MFI_CMD_DCMD 0x05 195 MFI_CMD_PD_SCSI_IO = 0x4,
195#define MFI_CMD_ABORT 0x06 196 MFI_CMD_DCMD = 0x5,
196#define MFI_CMD_SMP 0x07 197 MFI_CMD_ABORT = 0x6,
197#define MFI_CMD_STP 0x08 198 MFI_CMD_SMP = 0x7,
198#define MFI_CMD_INVALID 0xff 199 MFI_CMD_STP = 0x8,
200 MFI_CMD_OP_COUNT,
201 MFI_CMD_INVALID = 0xff
202};
199 203
200#define MR_DCMD_CTRL_GET_INFO 0x01010000 204#define MR_DCMD_CTRL_GET_INFO 0x01010000
201#define MR_DCMD_LD_GET_LIST 0x03010000 205#define MR_DCMD_LD_GET_LIST 0x03010000
@@ -1504,6 +1508,15 @@ enum FW_BOOT_CONTEXT {
1504 1508
1505#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000 1509#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
1506 1510
1511#define MR_CAN_HANDLE_64_BIT_DMA_OFFSET (1 << 25)
1512
1513enum MR_ADAPTER_TYPE {
1514 MFI_SERIES = 1,
1515 THUNDERBOLT_SERIES = 2,
1516 INVADER_SERIES = 3,
1517 VENTURA_SERIES = 4,
1518};
1519
1507/* 1520/*
1508* register set for both 1068 and 1078 controllers 1521* register set for both 1068 and 1078 controllers
1509* structure extended for 1078 registers 1522* structure extended for 1078 registers
@@ -1617,7 +1630,8 @@ union megasas_sgl_frame {
1617typedef union _MFI_CAPABILITIES { 1630typedef union _MFI_CAPABILITIES {
1618 struct { 1631 struct {
1619#if defined(__BIG_ENDIAN_BITFIELD) 1632#if defined(__BIG_ENDIAN_BITFIELD)
1620 u32 reserved:19; 1633 u32 reserved:18;
1634 u32 support_64bit_mode:1;
1621 u32 support_pd_map_target_id:1; 1635 u32 support_pd_map_target_id:1;
1622 u32 support_qd_throttling:1; 1636 u32 support_qd_throttling:1;
1623 u32 support_fp_rlbypass:1; 1637 u32 support_fp_rlbypass:1;
@@ -1645,7 +1659,8 @@ typedef union _MFI_CAPABILITIES {
1645 u32 support_fp_rlbypass:1; 1659 u32 support_fp_rlbypass:1;
1646 u32 support_qd_throttling:1; 1660 u32 support_qd_throttling:1;
1647 u32 support_pd_map_target_id:1; 1661 u32 support_pd_map_target_id:1;
1648 u32 reserved:19; 1662 u32 support_64bit_mode:1;
1663 u32 reserved:18;
1649#endif 1664#endif
1650 } mfi_capabilities; 1665 } mfi_capabilities;
1651 __le32 reg; 1666 __le32 reg;
@@ -2114,6 +2129,19 @@ struct megasas_instance {
2114 2129
2115 u32 *crash_dump_buf; 2130 u32 *crash_dump_buf;
2116 dma_addr_t crash_dump_h; 2131 dma_addr_t crash_dump_h;
2132
2133 struct MR_PD_LIST *pd_list_buf;
2134 dma_addr_t pd_list_buf_h;
2135
2136 struct megasas_ctrl_info *ctrl_info_buf;
2137 dma_addr_t ctrl_info_buf_h;
2138
2139 struct MR_LD_LIST *ld_list_buf;
2140 dma_addr_t ld_list_buf_h;
2141
2142 struct MR_LD_TARGETID_LIST *ld_targetid_list_buf;
2143 dma_addr_t ld_targetid_list_buf_h;
2144
2117 void *crash_buf[MAX_CRASH_DUMP_SIZE]; 2145 void *crash_buf[MAX_CRASH_DUMP_SIZE];
2118 unsigned int fw_crash_buffer_size; 2146 unsigned int fw_crash_buffer_size;
2119 unsigned int fw_crash_state; 2147 unsigned int fw_crash_state;
@@ -2210,8 +2238,6 @@ struct megasas_instance {
2210 2238
2211 /* Ptr to hba specific information */ 2239 /* Ptr to hba specific information */
2212 void *ctrl_context; 2240 void *ctrl_context;
2213 u32 ctrl_context_pages;
2214 struct megasas_ctrl_info *ctrl_info;
2215 unsigned int msix_vectors; 2241 unsigned int msix_vectors;
2216 struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES]; 2242 struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
2217 u64 map_id; 2243 u64 map_id;
@@ -2236,12 +2262,13 @@ struct megasas_instance {
2236 bool dev_handle; 2262 bool dev_handle;
2237 bool fw_sync_cache_support; 2263 bool fw_sync_cache_support;
2238 u32 mfi_frame_size; 2264 u32 mfi_frame_size;
2239 bool is_ventura;
2240 bool msix_combined; 2265 bool msix_combined;
2241 u16 max_raid_mapsize; 2266 u16 max_raid_mapsize;
2242 /* preffered count to send as LDIO irrspective of FP capable.*/ 2267 /* preffered count to send as LDIO irrspective of FP capable.*/
2243 u8 r1_ldio_hint_default; 2268 u8 r1_ldio_hint_default;
2244 u32 nvme_page_size; 2269 u32 nvme_page_size;
2270 u8 adapter_type;
2271 bool consistent_mask_64bit;
2245}; 2272};
2246struct MR_LD_VF_MAP { 2273struct MR_LD_VF_MAP {
2247 u32 size; 2274 u32 size;
@@ -2488,4 +2515,7 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd);
2488u32 mega_mod64(u64 dividend, u32 divisor); 2515u32 mega_mod64(u64 dividend, u32 divisor);
2489int megasas_alloc_fusion_context(struct megasas_instance *instance); 2516int megasas_alloc_fusion_context(struct megasas_instance *instance);
2490void megasas_free_fusion_context(struct megasas_instance *instance); 2517void megasas_free_fusion_context(struct megasas_instance *instance);
2518void megasas_set_dma_settings(struct megasas_instance *instance,
2519 struct megasas_dcmd_frame *dcmd,
2520 dma_addr_t dma_addr, u32 dma_len);
2491#endif /*LSI_MEGARAID_SAS_H */ 2521#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index a36e18156e49..cc54bdb5c712 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -161,6 +161,7 @@ static struct pci_device_id megasas_pci_table[] = {
161 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, 161 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
162 /* VENTURA */ 162 /* VENTURA */
163 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, 163 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, 165 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
165 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, 166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, 167 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
@@ -205,6 +206,43 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
205void megasas_fusion_ocr_wq(struct work_struct *work); 206void megasas_fusion_ocr_wq(struct work_struct *work);
206static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 207static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
207 int initial); 208 int initial);
209static int
210megasas_set_dma_mask(struct megasas_instance *instance);
211static int
212megasas_alloc_ctrl_mem(struct megasas_instance *instance);
213static inline void
214megasas_free_ctrl_mem(struct megasas_instance *instance);
215static inline int
216megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
217static inline void
218megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
219static inline void
220megasas_init_ctrl_params(struct megasas_instance *instance);
221
222/**
223 * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs
224 * @instance: Adapter soft state
225 * @dcmd: DCMD frame inside MFI command
226 * @dma_addr: DMA address of buffer to be passed to FW
227 * @dma_len: Length of DMA buffer to be passed to FW
228 * @return: void
229 */
230void megasas_set_dma_settings(struct megasas_instance *instance,
231 struct megasas_dcmd_frame *dcmd,
232 dma_addr_t dma_addr, u32 dma_len)
233{
234 if (instance->consistent_mask_64bit) {
235 dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
236 dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
237 dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
238
239 } else {
240 dcmd->sgl.sge32[0].phys_addr =
241 cpu_to_le32(lower_32_bits(dma_addr));
242 dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
243 dcmd->flags = cpu_to_le16(dcmd->flags);
244 }
245}
208 246
209void 247void
210megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 248megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
@@ -2023,7 +2061,7 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
2023 msleep(1000); 2061 msleep(1000);
2024 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2062 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2025 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 2063 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2026 (instance->ctrl_context)) { 2064 (instance->adapter_type != MFI_SERIES)) {
2027 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 2065 writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2028 /* Flush */ 2066 /* Flush */
2029 readl(&instance->reg_set->doorbell); 2067 readl(&instance->reg_set->doorbell);
@@ -2485,13 +2523,15 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2485 dcmd->pad_0 = 0; 2523 dcmd->pad_0 = 0;
2486 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2524 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2487 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); 2525 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2488 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h); 2526
2489 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2527 megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2528 sizeof(struct MR_CTRL_HB_HOST_MEM));
2490 2529
2491 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", 2530 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2492 instance->host->host_no); 2531 instance->host->host_no);
2493 2532
2494 if (instance->ctrl_context && !instance->mask_interrupts) 2533 if ((instance->adapter_type != MFI_SERIES) &&
2534 !instance->mask_interrupts)
2495 retval = megasas_issue_blocked_cmd(instance, cmd, 2535 retval = megasas_issue_blocked_cmd(instance, cmd,
2496 MEGASAS_ROUTINE_WAIT_TIME_VF); 2536 MEGASAS_ROUTINE_WAIT_TIME_VF);
2497 else 2537 else
@@ -2787,7 +2827,9 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2787 /* 2827 /*
2788 * First wait for all commands to complete 2828 * First wait for all commands to complete
2789 */ 2829 */
2790 if (instance->ctrl_context) { 2830 if (instance->adapter_type == MFI_SERIES) {
2831 ret = megasas_generic_reset(scmd);
2832 } else {
2791 struct megasas_cmd_fusion *cmd; 2833 struct megasas_cmd_fusion *cmd;
2792 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr; 2834 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2793 if (cmd) 2835 if (cmd)
@@ -2795,8 +2837,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2795 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); 2837 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
2796 ret = megasas_reset_fusion(scmd->device->host, 2838 ret = megasas_reset_fusion(scmd->device->host,
2797 SCSIIO_TIMEOUT_OCR); 2839 SCSIIO_TIMEOUT_OCR);
2798 } else 2840 }
2799 ret = megasas_generic_reset(scmd);
2800 2841
2801 return ret; 2842 return ret;
2802} 2843}
@@ -2813,7 +2854,7 @@ static int megasas_task_abort(struct scsi_cmnd *scmd)
2813 2854
2814 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2855 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2815 2856
2816 if (instance->ctrl_context) 2857 if (instance->adapter_type != MFI_SERIES)
2817 ret = megasas_task_abort_fusion(scmd); 2858 ret = megasas_task_abort_fusion(scmd);
2818 else { 2859 else {
2819 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); 2860 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
@@ -2835,7 +2876,7 @@ static int megasas_reset_target(struct scsi_cmnd *scmd)
2835 2876
2836 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2877 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2837 2878
2838 if (instance->ctrl_context) 2879 if (instance->adapter_type != MFI_SERIES)
2839 ret = megasas_reset_target_fusion(scmd); 2880 ret = megasas_reset_target_fusion(scmd);
2840 else { 2881 else {
2841 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); 2882 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
@@ -3280,6 +3321,9 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3280 3321
3281 case MFI_CMD_SMP: 3322 case MFI_CMD_SMP:
3282 case MFI_CMD_STP: 3323 case MFI_CMD_STP:
3324 megasas_complete_int_cmd(instance, cmd);
3325 break;
3326
3283 case MFI_CMD_DCMD: 3327 case MFI_CMD_DCMD:
3284 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 3328 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3285 /* Check for LD map update */ 3329 /* Check for LD map update */
@@ -3366,6 +3410,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3366 default: 3410 default:
3367 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n", 3411 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3368 hdr->cmd); 3412 hdr->cmd);
3413 megasas_complete_int_cmd(instance, cmd);
3369 break; 3414 break;
3370 } 3415 }
3371} 3416}
@@ -3712,7 +3757,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3712 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3757 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3713 (instance->pdev->device == 3758 (instance->pdev->device ==
3714 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3759 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3715 (instance->ctrl_context)) 3760 (instance->adapter_type != MFI_SERIES))
3716 writel( 3761 writel(
3717 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 3762 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3718 &instance->reg_set->doorbell); 3763 &instance->reg_set->doorbell);
@@ -3730,7 +3775,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3730 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3775 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3731 (instance->pdev->device == 3776 (instance->pdev->device ==
3732 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3777 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3733 (instance->ctrl_context)) 3778 (instance->adapter_type != MFI_SERIES))
3734 writel(MFI_INIT_HOTPLUG, 3779 writel(MFI_INIT_HOTPLUG,
3735 &instance->reg_set->doorbell); 3780 &instance->reg_set->doorbell);
3736 else 3781 else
@@ -3750,11 +3795,11 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3750 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 3795 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3751 (instance->pdev->device == 3796 (instance->pdev->device ==
3752 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 3797 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3753 (instance->ctrl_context)) { 3798 (instance->adapter_type != MFI_SERIES)) {
3754 writel(MFI_RESET_FLAGS, 3799 writel(MFI_RESET_FLAGS,
3755 &instance->reg_set->doorbell); 3800 &instance->reg_set->doorbell);
3756 3801
3757 if (instance->ctrl_context) { 3802 if (instance->adapter_type != MFI_SERIES) {
3758 for (i = 0; i < (10 * 1000); i += 20) { 3803 for (i = 0; i < (10 * 1000); i += 20) {
3759 if (readl( 3804 if (readl(
3760 &instance-> 3805 &instance->
@@ -3921,7 +3966,8 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
3921 * max_sge_sz = 12 byte (sizeof megasas_sge64) 3966 * max_sge_sz = 12 byte (sizeof megasas_sge64)
3922 * Total 192 byte (3 MFI frame of 64 byte) 3967 * Total 192 byte (3 MFI frame of 64 byte)
3923 */ 3968 */
3924 frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1); 3969 frame_count = (instance->adapter_type == MFI_SERIES) ?
3970 (15 + 1) : (3 + 1);
3925 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; 3971 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
3926 /* 3972 /*
3927 * Use DMA pool facility provided by PCI layer 3973 * Use DMA pool facility provided by PCI layer
@@ -3976,7 +4022,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
3976 memset(cmd->frame, 0, instance->mfi_frame_size); 4022 memset(cmd->frame, 0, instance->mfi_frame_size);
3977 cmd->frame->io.context = cpu_to_le32(cmd->index); 4023 cmd->frame->io.context = cpu_to_le32(cmd->index);
3978 cmd->frame->io.pad_0 = 0; 4024 cmd->frame->io.pad_0 = 0;
3979 if (!instance->ctrl_context && reset_devices) 4025 if ((instance->adapter_type == MFI_SERIES) && reset_devices)
3980 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 4026 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
3981 } 4027 }
3982 4028
@@ -4030,9 +4076,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
4030 int j; 4076 int j;
4031 u16 max_cmd; 4077 u16 max_cmd;
4032 struct megasas_cmd *cmd; 4078 struct megasas_cmd *cmd;
4033 struct fusion_context *fusion;
4034 4079
4035 fusion = instance->ctrl_context;
4036 max_cmd = instance->max_mfi_cmds; 4080 max_cmd = instance->max_mfi_cmds;
4037 4081
4038 /* 4082 /*
@@ -4096,7 +4140,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
4096inline int 4140inline int
4097dcmd_timeout_ocr_possible(struct megasas_instance *instance) { 4141dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4098 4142
4099 if (!instance->ctrl_context) 4143 if (instance->adapter_type == MFI_SERIES)
4100 return KILL_ADAPTER; 4144 return KILL_ADAPTER;
4101 else if (instance->unload || 4145 else if (instance->unload ||
4102 test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) 4146 test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
@@ -4132,15 +4176,17 @@ megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4132 dcmd->cmd = MFI_CMD_DCMD; 4176 dcmd->cmd = MFI_CMD_DCMD;
4133 dcmd->cmd_status = 0xFF; 4177 dcmd->cmd_status = 0xFF;
4134 dcmd->sge_count = 1; 4178 dcmd->sge_count = 1;
4135 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4179 dcmd->flags = MFI_FRAME_DIR_READ;
4136 dcmd->timeout = 0; 4180 dcmd->timeout = 0;
4137 dcmd->pad_0 = 0; 4181 dcmd->pad_0 = 0;
4138 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO)); 4182 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4139 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO); 4183 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4140 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
4141 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
4142 4184
4143 if (instance->ctrl_context && !instance->mask_interrupts) 4185 megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4186 sizeof(struct MR_PD_INFO));
4187
4188 if ((instance->adapter_type != MFI_SERIES) &&
4189 !instance->mask_interrupts)
4144 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4190 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4145 else 4191 else
4146 ret = megasas_issue_polled(instance, cmd); 4192 ret = megasas_issue_polled(instance, cmd);
@@ -4203,6 +4249,9 @@ megasas_get_pd_list(struct megasas_instance *instance)
4203 return ret; 4249 return ret;
4204 } 4250 }
4205 4251
4252 ci = instance->pd_list_buf;
4253 ci_h = instance->pd_list_buf_h;
4254
4206 cmd = megasas_get_cmd(instance); 4255 cmd = megasas_get_cmd(instance);
4207 4256
4208 if (!cmd) { 4257 if (!cmd) {
@@ -4212,15 +4261,6 @@ megasas_get_pd_list(struct megasas_instance *instance)
4212 4261
4213 dcmd = &cmd->frame->dcmd; 4262 dcmd = &cmd->frame->dcmd;
4214 4263
4215 ci = pci_alloc_consistent(instance->pdev,
4216 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
4217
4218 if (!ci) {
4219 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n");
4220 megasas_return_cmd(instance, cmd);
4221 return -ENOMEM;
4222 }
4223
4224 memset(ci, 0, sizeof(*ci)); 4264 memset(ci, 0, sizeof(*ci));
4225 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4265 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4226 4266
@@ -4229,15 +4269,17 @@ megasas_get_pd_list(struct megasas_instance *instance)
4229 dcmd->cmd = MFI_CMD_DCMD; 4269 dcmd->cmd = MFI_CMD_DCMD;
4230 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4270 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4231 dcmd->sge_count = 1; 4271 dcmd->sge_count = 1;
4232 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4272 dcmd->flags = MFI_FRAME_DIR_READ;
4233 dcmd->timeout = 0; 4273 dcmd->timeout = 0;
4234 dcmd->pad_0 = 0; 4274 dcmd->pad_0 = 0;
4235 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 4275 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4236 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); 4276 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4237 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4238 dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4239 4277
4240 if (instance->ctrl_context && !instance->mask_interrupts) 4278 megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4279 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4280
4281 if ((instance->adapter_type != MFI_SERIES) &&
4282 !instance->mask_interrupts)
4241 ret = megasas_issue_blocked_cmd(instance, cmd, 4283 ret = megasas_issue_blocked_cmd(instance, cmd,
4242 MFI_IO_TIMEOUT_SECS); 4284 MFI_IO_TIMEOUT_SECS);
4243 else 4285 else
@@ -4248,7 +4290,7 @@ megasas_get_pd_list(struct megasas_instance *instance)
4248 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " 4290 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4249 "failed/not supported by firmware\n"); 4291 "failed/not supported by firmware\n");
4250 4292
4251 if (instance->ctrl_context) 4293 if (instance->adapter_type != MFI_SERIES)
4252 megaraid_sas_kill_hba(instance); 4294 megaraid_sas_kill_hba(instance);
4253 else 4295 else
4254 instance->pd_list_not_supported = 1; 4296 instance->pd_list_not_supported = 1;
@@ -4305,10 +4347,6 @@ megasas_get_pd_list(struct megasas_instance *instance)
4305 4347
4306 } 4348 }
4307 4349
4308 pci_free_consistent(instance->pdev,
4309 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
4310 ci, ci_h);
4311
4312 if (ret != DCMD_TIMEOUT) 4350 if (ret != DCMD_TIMEOUT)
4313 megasas_return_cmd(instance, cmd); 4351 megasas_return_cmd(instance, cmd);
4314 4352
@@ -4334,6 +4372,9 @@ megasas_get_ld_list(struct megasas_instance *instance)
4334 dma_addr_t ci_h = 0; 4372 dma_addr_t ci_h = 0;
4335 u32 ld_count; 4373 u32 ld_count;
4336 4374
4375 ci = instance->ld_list_buf;
4376 ci_h = instance->ld_list_buf_h;
4377
4337 cmd = megasas_get_cmd(instance); 4378 cmd = megasas_get_cmd(instance);
4338 4379
4339 if (!cmd) { 4380 if (!cmd) {
@@ -4343,16 +4384,6 @@ megasas_get_ld_list(struct megasas_instance *instance)
4343 4384
4344 dcmd = &cmd->frame->dcmd; 4385 dcmd = &cmd->frame->dcmd;
4345 4386
4346 ci = pci_alloc_consistent(instance->pdev,
4347 sizeof(struct MR_LD_LIST),
4348 &ci_h);
4349
4350 if (!ci) {
4351 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n");
4352 megasas_return_cmd(instance, cmd);
4353 return -ENOMEM;
4354 }
4355
4356 memset(ci, 0, sizeof(*ci)); 4387 memset(ci, 0, sizeof(*ci));
4357 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4388 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4358 4389
@@ -4361,15 +4392,17 @@ megasas_get_ld_list(struct megasas_instance *instance)
4361 dcmd->cmd = MFI_CMD_DCMD; 4392 dcmd->cmd = MFI_CMD_DCMD;
4362 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4393 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4363 dcmd->sge_count = 1; 4394 dcmd->sge_count = 1;
4364 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4395 dcmd->flags = MFI_FRAME_DIR_READ;
4365 dcmd->timeout = 0; 4396 dcmd->timeout = 0;
4366 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); 4397 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4367 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); 4398 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4368 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4369 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
4370 dcmd->pad_0 = 0; 4399 dcmd->pad_0 = 0;
4371 4400
4372 if (instance->ctrl_context && !instance->mask_interrupts) 4401 megasas_set_dma_settings(instance, dcmd, ci_h,
4402 sizeof(struct MR_LD_LIST));
4403
4404 if ((instance->adapter_type != MFI_SERIES) &&
4405 !instance->mask_interrupts)
4373 ret = megasas_issue_blocked_cmd(instance, cmd, 4406 ret = megasas_issue_blocked_cmd(instance, cmd,
4374 MFI_IO_TIMEOUT_SECS); 4407 MFI_IO_TIMEOUT_SECS);
4375 else 4408 else
@@ -4423,8 +4456,6 @@ megasas_get_ld_list(struct megasas_instance *instance)
4423 break; 4456 break;
4424 } 4457 }
4425 4458
4426 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h);
4427
4428 if (ret != DCMD_TIMEOUT) 4459 if (ret != DCMD_TIMEOUT)
4429 megasas_return_cmd(instance, cmd); 4460 megasas_return_cmd(instance, cmd);
4430 4461
@@ -4450,6 +4481,9 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4450 dma_addr_t ci_h = 0; 4481 dma_addr_t ci_h = 0;
4451 u32 tgtid_count; 4482 u32 tgtid_count;
4452 4483
4484 ci = instance->ld_targetid_list_buf;
4485 ci_h = instance->ld_targetid_list_buf_h;
4486
4453 cmd = megasas_get_cmd(instance); 4487 cmd = megasas_get_cmd(instance);
4454 4488
4455 if (!cmd) { 4489 if (!cmd) {
@@ -4460,16 +4494,6 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4460 4494
4461 dcmd = &cmd->frame->dcmd; 4495 dcmd = &cmd->frame->dcmd;
4462 4496
4463 ci = pci_alloc_consistent(instance->pdev,
4464 sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
4465
4466 if (!ci) {
4467 dev_warn(&instance->pdev->dev,
4468 "Failed to alloc mem for ld_list_query\n");
4469 megasas_return_cmd(instance, cmd);
4470 return -ENOMEM;
4471 }
4472
4473 memset(ci, 0, sizeof(*ci)); 4497 memset(ci, 0, sizeof(*ci));
4474 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4498 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4475 4499
@@ -4480,15 +4504,17 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4480 dcmd->cmd = MFI_CMD_DCMD; 4504 dcmd->cmd = MFI_CMD_DCMD;
4481 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4505 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4482 dcmd->sge_count = 1; 4506 dcmd->sge_count = 1;
4483 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4507 dcmd->flags = MFI_FRAME_DIR_READ;
4484 dcmd->timeout = 0; 4508 dcmd->timeout = 0;
4485 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 4509 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4486 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); 4510 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4487 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4488 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4489 dcmd->pad_0 = 0; 4511 dcmd->pad_0 = 0;
4490 4512
4491 if (instance->ctrl_context && !instance->mask_interrupts) 4513 megasas_set_dma_settings(instance, dcmd, ci_h,
4514 sizeof(struct MR_LD_TARGETID_LIST));
4515
4516 if ((instance->adapter_type != MFI_SERIES) &&
4517 !instance->mask_interrupts)
4492 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4518 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4493 else 4519 else
4494 ret = megasas_issue_polled(instance, cmd); 4520 ret = megasas_issue_polled(instance, cmd);
@@ -4539,9 +4565,6 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4539 break; 4565 break;
4540 } 4566 }
4541 4567
4542 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
4543 ci, ci_h);
4544
4545 if (ret != DCMD_TIMEOUT) 4568 if (ret != DCMD_TIMEOUT)
4546 megasas_return_cmd(instance, cmd); 4569 megasas_return_cmd(instance, cmd);
4547 4570
@@ -4563,9 +4586,9 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4563 return; 4586 return;
4564 4587
4565 instance->supportmax256vd = 4588 instance->supportmax256vd =
4566 instance->ctrl_info->adapterOperations3.supportMaxExtLDs; 4589 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
4567 /* Below is additional check to address future FW enhancement */ 4590 /* Below is additional check to address future FW enhancement */
4568 if (instance->ctrl_info->max_lds > 64) 4591 if (instance->ctrl_info_buf->max_lds > 64)
4569 instance->supportmax256vd = 1; 4592 instance->supportmax256vd = 1;
4570 4593
4571 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS 4594 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
@@ -4623,10 +4646,10 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
4623 struct megasas_cmd *cmd; 4646 struct megasas_cmd *cmd;
4624 struct megasas_dcmd_frame *dcmd; 4647 struct megasas_dcmd_frame *dcmd;
4625 struct megasas_ctrl_info *ci; 4648 struct megasas_ctrl_info *ci;
4626 struct megasas_ctrl_info *ctrl_info;
4627 dma_addr_t ci_h = 0; 4649 dma_addr_t ci_h = 0;
4628 4650
4629 ctrl_info = instance->ctrl_info; 4651 ci = instance->ctrl_info_buf;
4652 ci_h = instance->ctrl_info_buf_h;
4630 4653
4631 cmd = megasas_get_cmd(instance); 4654 cmd = megasas_get_cmd(instance);
4632 4655
@@ -4637,45 +4660,37 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
4637 4660
4638 dcmd = &cmd->frame->dcmd; 4661 dcmd = &cmd->frame->dcmd;
4639 4662
4640 ci = pci_alloc_consistent(instance->pdev,
4641 sizeof(struct megasas_ctrl_info), &ci_h);
4642
4643 if (!ci) {
4644 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n");
4645 megasas_return_cmd(instance, cmd);
4646 return -ENOMEM;
4647 }
4648
4649 memset(ci, 0, sizeof(*ci)); 4663 memset(ci, 0, sizeof(*ci));
4650 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4664 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4651 4665
4652 dcmd->cmd = MFI_CMD_DCMD; 4666 dcmd->cmd = MFI_CMD_DCMD;
4653 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4667 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4654 dcmd->sge_count = 1; 4668 dcmd->sge_count = 1;
4655 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4669 dcmd->flags = MFI_FRAME_DIR_READ;
4656 dcmd->timeout = 0; 4670 dcmd->timeout = 0;
4657 dcmd->pad_0 = 0; 4671 dcmd->pad_0 = 0;
4658 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 4672 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4659 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); 4673 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
4660 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4661 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4662 dcmd->mbox.b[0] = 1; 4674 dcmd->mbox.b[0] = 1;
4663 4675
4664 if (instance->ctrl_context && !instance->mask_interrupts) 4676 megasas_set_dma_settings(instance, dcmd, ci_h,
4677 sizeof(struct megasas_ctrl_info));
4678
4679 if ((instance->adapter_type != MFI_SERIES) &&
4680 !instance->mask_interrupts)
4665 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4681 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4666 else 4682 else
4667 ret = megasas_issue_polled(instance, cmd); 4683 ret = megasas_issue_polled(instance, cmd);
4668 4684
4669 switch (ret) { 4685 switch (ret) {
4670 case DCMD_SUCCESS: 4686 case DCMD_SUCCESS:
4671 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
4672 /* Save required controller information in 4687 /* Save required controller information in
4673 * CPU endianness format. 4688 * CPU endianness format.
4674 */ 4689 */
4675 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); 4690 le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
4676 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); 4691 le32_to_cpus((u32 *)&ci->adapterOperations2);
4677 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3); 4692 le32_to_cpus((u32 *)&ci->adapterOperations3);
4678 le16_to_cpus((u16 *)&ctrl_info->adapter_operations4); 4693 le16_to_cpus((u16 *)&ci->adapter_operations4);
4679 4694
4680 /* Update the latest Ext VD info. 4695 /* Update the latest Ext VD info.
4681 * From Init path, store current firmware details. 4696 * From Init path, store current firmware details.
@@ -4684,21 +4699,21 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
4684 */ 4699 */
4685 megasas_update_ext_vd_details(instance); 4700 megasas_update_ext_vd_details(instance);
4686 instance->use_seqnum_jbod_fp = 4701 instance->use_seqnum_jbod_fp =
4687 ctrl_info->adapterOperations3.useSeqNumJbodFP; 4702 ci->adapterOperations3.useSeqNumJbodFP;
4688 instance->support_morethan256jbod = 4703 instance->support_morethan256jbod =
4689 ctrl_info->adapter_operations4.support_pd_map_target_id; 4704 ci->adapter_operations4.support_pd_map_target_id;
4690 4705
4691 /*Check whether controller is iMR or MR */ 4706 /*Check whether controller is iMR or MR */
4692 instance->is_imr = (ctrl_info->memory_size ? 0 : 1); 4707 instance->is_imr = (ci->memory_size ? 0 : 1);
4693 dev_info(&instance->pdev->dev, 4708 dev_info(&instance->pdev->dev,
4694 "controller type\t: %s(%dMB)\n", 4709 "controller type\t: %s(%dMB)\n",
4695 instance->is_imr ? "iMR" : "MR", 4710 instance->is_imr ? "iMR" : "MR",
4696 le16_to_cpu(ctrl_info->memory_size)); 4711 le16_to_cpu(ci->memory_size));
4697 4712
4698 instance->disableOnlineCtrlReset = 4713 instance->disableOnlineCtrlReset =
4699 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 4714 ci->properties.OnOffProperties.disableOnlineCtrlReset;
4700 instance->secure_jbod_support = 4715 instance->secure_jbod_support =
4701 ctrl_info->adapterOperations3.supportSecurityonJBOD; 4716 ci->adapterOperations3.supportSecurityonJBOD;
4702 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n", 4717 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
4703 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled"); 4718 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
4704 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n", 4719 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
@@ -4726,9 +4741,6 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
4726 4741
4727 } 4742 }
4728 4743
4729 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
4730 ci, ci_h);
4731
4732 megasas_return_cmd(instance, cmd); 4744 megasas_return_cmd(instance, cmd);
4733 4745
4734 4746
@@ -4772,15 +4784,17 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance,
4772 dcmd->cmd = MFI_CMD_DCMD; 4784 dcmd->cmd = MFI_CMD_DCMD;
4773 dcmd->cmd_status = MFI_STAT_INVALID_STATUS; 4785 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4774 dcmd->sge_count = 1; 4786 dcmd->sge_count = 1;
4775 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 4787 dcmd->flags = MFI_FRAME_DIR_NONE;
4776 dcmd->timeout = 0; 4788 dcmd->timeout = 0;
4777 dcmd->pad_0 = 0; 4789 dcmd->pad_0 = 0;
4778 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE); 4790 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4779 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS); 4791 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
4780 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
4781 dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4782 4792
4783 if (instance->ctrl_context && !instance->mask_interrupts) 4793 megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
4794 CRASH_DMA_BUF_SIZE);
4795
4796 if ((instance->adapter_type != MFI_SERIES) &&
4797 !instance->mask_interrupts)
4784 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); 4798 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4785 else 4799 else
4786 ret = megasas_issue_polled(instance, cmd); 4800 ret = megasas_issue_polled(instance, cmd);
@@ -5088,7 +5102,7 @@ megasas_setup_jbod_map(struct megasas_instance *instance)
5088 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 5102 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5089 5103
5090 if (reset_devices || !fusion || 5104 if (reset_devices || !fusion ||
5091 !instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) { 5105 !instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) {
5092 dev_info(&instance->pdev->dev, 5106 dev_info(&instance->pdev->dev,
5093 "Jbod map is not supported %s %d\n", 5107 "Jbod map is not supported %s %d\n",
5094 __func__, __LINE__); 5108 __func__, __LINE__);
@@ -5167,7 +5181,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
5167 5181
5168 reg_set = instance->reg_set; 5182 reg_set = instance->reg_set;
5169 5183
5170 if (fusion) 5184 if (instance->adapter_type != MFI_SERIES)
5171 instance->instancet = &megasas_instance_template_fusion; 5185 instance->instancet = &megasas_instance_template_fusion;
5172 else { 5186 else {
5173 switch (instance->pdev->device) { 5187 switch (instance->pdev->device) {
@@ -5208,7 +5222,20 @@ static int megasas_init_fw(struct megasas_instance *instance)
5208 goto fail_ready_state; 5222 goto fail_ready_state;
5209 } 5223 }
5210 5224
5211 if (instance->is_ventura) { 5225 megasas_init_ctrl_params(instance);
5226
5227 if (megasas_set_dma_mask(instance))
5228 goto fail_ready_state;
5229
5230 if (megasas_alloc_ctrl_mem(instance))
5231 goto fail_alloc_dma_buf;
5232
5233 if (megasas_alloc_ctrl_dma_buffers(instance))
5234 goto fail_alloc_dma_buf;
5235
5236 fusion = instance->ctrl_context;
5237
5238 if (instance->adapter_type == VENTURA_SERIES) {
5212 scratch_pad_3 = 5239 scratch_pad_3 =
5213 readl(&instance->reg_set->outbound_scratch_pad_3); 5240 readl(&instance->reg_set->outbound_scratch_pad_3);
5214 instance->max_raid_mapsize = ((scratch_pad_3 >> 5241 instance->max_raid_mapsize = ((scratch_pad_3 >>
@@ -5226,7 +5253,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
5226 (&instance->reg_set->outbound_scratch_pad_2); 5253 (&instance->reg_set->outbound_scratch_pad_2);
5227 /* Check max MSI-X vectors */ 5254 /* Check max MSI-X vectors */
5228 if (fusion) { 5255 if (fusion) {
5229 if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/ 5256 if (instance->adapter_type == THUNDERBOLT_SERIES) {
5257 /* Thunderbolt Series*/
5230 instance->msix_vectors = (scratch_pad_2 5258 instance->msix_vectors = (scratch_pad_2
5231 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 5259 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5232 fw_msix_count = instance->msix_vectors; 5260 fw_msix_count = instance->msix_vectors;
@@ -5301,11 +5329,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
5301 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, 5329 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
5302 (unsigned long)instance); 5330 (unsigned long)instance);
5303 5331
5304 instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
5305 GFP_KERNEL);
5306 if (instance->ctrl_info == NULL)
5307 goto fail_init_adapter;
5308
5309 /* 5332 /*
5310 * Below are default value for legacy Firmware. 5333 * Below are default value for legacy Firmware.
5311 * non-fusion based controllers 5334 * non-fusion based controllers
@@ -5316,7 +5339,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
5316 if (instance->instancet->init_adapter(instance)) 5339 if (instance->instancet->init_adapter(instance))
5317 goto fail_init_adapter; 5340 goto fail_init_adapter;
5318 5341
5319 if (instance->is_ventura) { 5342 if (instance->adapter_type == VENTURA_SERIES) {
5320 scratch_pad_4 = 5343 scratch_pad_4 =
5321 readl(&instance->reg_set->outbound_scratch_pad_4); 5344 readl(&instance->reg_set->outbound_scratch_pad_4);
5322 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= 5345 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
@@ -5352,7 +5375,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
5352 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 5375 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5353 5376
5354 /* stream detection initialization */ 5377 /* stream detection initialization */
5355 if (instance->is_ventura && fusion) { 5378 if (instance->adapter_type == VENTURA_SERIES) {
5356 fusion->stream_detect_by_ld = 5379 fusion->stream_detect_by_ld =
5357 kzalloc(sizeof(struct LD_STREAM_DETECT *) 5380 kzalloc(sizeof(struct LD_STREAM_DETECT *)
5358 * MAX_LOGICAL_DRIVES_EXT, 5381 * MAX_LOGICAL_DRIVES_EXT,
@@ -5394,7 +5417,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
5394 * to calculate max_sectors_1. So the number ended up as zero always. 5417 * to calculate max_sectors_1. So the number ended up as zero always.
5395 */ 5418 */
5396 tmp_sectors = 0; 5419 tmp_sectors = 0;
5397 ctrl_info = instance->ctrl_info; 5420 ctrl_info = instance->ctrl_info_buf;
5398 5421
5399 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 5422 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
5400 le16_to_cpu(ctrl_info->max_strips_per_io); 5423 le16_to_cpu(ctrl_info->max_strips_per_io);
@@ -5505,9 +5528,10 @@ fail_setup_irqs:
5505 if (instance->msix_vectors) 5528 if (instance->msix_vectors)
5506 pci_free_irq_vectors(instance->pdev); 5529 pci_free_irq_vectors(instance->pdev);
5507 instance->msix_vectors = 0; 5530 instance->msix_vectors = 0;
5531fail_alloc_dma_buf:
5532 megasas_free_ctrl_dma_buffers(instance);
5533 megasas_free_ctrl_mem(instance);
5508fail_ready_state: 5534fail_ready_state:
5509 kfree(instance->ctrl_info);
5510 instance->ctrl_info = NULL;
5511 iounmap(instance->reg_set); 5535 iounmap(instance->reg_set);
5512 5536
5513fail_ioremap: 5537fail_ioremap:
@@ -5580,13 +5604,14 @@ megasas_get_seq_num(struct megasas_instance *instance,
5580 dcmd->cmd = MFI_CMD_DCMD; 5604 dcmd->cmd = MFI_CMD_DCMD;
5581 dcmd->cmd_status = 0x0; 5605 dcmd->cmd_status = 0x0;
5582 dcmd->sge_count = 1; 5606 dcmd->sge_count = 1;
5583 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 5607 dcmd->flags = MFI_FRAME_DIR_READ;
5584 dcmd->timeout = 0; 5608 dcmd->timeout = 0;
5585 dcmd->pad_0 = 0; 5609 dcmd->pad_0 = 0;
5586 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 5610 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5587 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); 5611 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
5588 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h); 5612
5589 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 5613 megasas_set_dma_settings(instance, dcmd, el_info_h,
5614 sizeof(struct megasas_evt_log_info));
5590 5615
5591 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) == 5616 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
5592 DCMD_SUCCESS) { 5617 DCMD_SUCCESS) {
@@ -5711,7 +5736,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
5711 dcmd->cmd = MFI_CMD_DCMD; 5736 dcmd->cmd = MFI_CMD_DCMD;
5712 dcmd->cmd_status = 0x0; 5737 dcmd->cmd_status = 0x0;
5713 dcmd->sge_count = 1; 5738 dcmd->sge_count = 1;
5714 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 5739 dcmd->flags = MFI_FRAME_DIR_READ;
5715 dcmd->timeout = 0; 5740 dcmd->timeout = 0;
5716 dcmd->pad_0 = 0; 5741 dcmd->pad_0 = 0;
5717 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); 5742 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
@@ -5719,8 +5744,9 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
5719 dcmd->mbox.w[0] = cpu_to_le32(seq_num); 5744 dcmd->mbox.w[0] = cpu_to_le32(seq_num);
5720 instance->last_seq_num = seq_num; 5745 instance->last_seq_num = seq_num;
5721 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); 5746 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
5722 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h); 5747
5723 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail)); 5748 megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
5749 sizeof(struct megasas_evt_detail));
5724 5750
5725 if (instance->aen_cmd != NULL) { 5751 if (instance->aen_cmd != NULL) {
5726 megasas_return_cmd(instance, cmd); 5752 megasas_return_cmd(instance, cmd);
@@ -5787,18 +5813,18 @@ megasas_get_target_prop(struct megasas_instance *instance,
5787 dcmd->cmd = MFI_CMD_DCMD; 5813 dcmd->cmd = MFI_CMD_DCMD;
5788 dcmd->cmd_status = 0xFF; 5814 dcmd->cmd_status = 0xFF;
5789 dcmd->sge_count = 1; 5815 dcmd->sge_count = 1;
5790 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 5816 dcmd->flags = MFI_FRAME_DIR_READ;
5791 dcmd->timeout = 0; 5817 dcmd->timeout = 0;
5792 dcmd->pad_0 = 0; 5818 dcmd->pad_0 = 0;
5793 dcmd->data_xfer_len = 5819 dcmd->data_xfer_len =
5794 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); 5820 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
5795 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); 5821 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
5796 dcmd->sgl.sge32[0].phys_addr =
5797 cpu_to_le32(instance->tgt_prop_h);
5798 dcmd->sgl.sge32[0].length =
5799 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
5800 5822
5801 if (instance->ctrl_context && !instance->mask_interrupts) 5823 megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
5824 sizeof(struct MR_TARGET_PROPERTIES));
5825
5826 if ((instance->adapter_type != MFI_SERIES) &&
5827 !instance->mask_interrupts)
5802 ret = megasas_issue_blocked_cmd(instance, 5828 ret = megasas_issue_blocked_cmd(instance,
5803 cmd, MFI_IO_TIMEOUT_SECS); 5829 cmd, MFI_IO_TIMEOUT_SECS);
5804 else 5830 else
@@ -5923,234 +5949,408 @@ static int megasas_io_attach(struct megasas_instance *instance)
5923 return 0; 5949 return 0;
5924} 5950}
5925 5951
5952/**
5953 * megasas_set_dma_mask - Set DMA mask for supported controllers
5954 *
5955 * @instance: Adapter soft state
5956 * Description:
5957 *
5958 * For Ventura, driver/FW will operate in 64bit DMA addresses.
5959 *
5960 * For invader-
5961 * By default, driver/FW will operate in 32bit DMA addresses
5962 * for consistent DMA mapping but if 32 bit consistent
5963 * DMA mask fails, driver will try with 64 bit consistent
5964 * mask provided FW is true 64bit DMA capable
5965 *
5966 * For older controllers(Thunderbolt and MFI based adapters)-
5967 * driver/FW will operate in 32 bit consistent DMA addresses.
5968 */
5926static int 5969static int
5927megasas_set_dma_mask(struct pci_dev *pdev) 5970megasas_set_dma_mask(struct megasas_instance *instance)
5928{ 5971{
5929 /* 5972 u64 consistent_mask;
5930 * All our controllers are capable of performing 64-bit DMA 5973 struct pci_dev *pdev;
5931 */ 5974 u32 scratch_pad_2;
5975
5976 pdev = instance->pdev;
5977 consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
5978 DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
5979
5932 if (IS_DMA64) { 5980 if (IS_DMA64) {
5933 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 5981 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
5982 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
5983 goto fail_set_dma_mask;
5984
5985 if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) &&
5986 (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
5987 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
5988 /*
5989 * If 32 bit DMA mask fails, then try for 64 bit mask
5990 * for FW capable of handling 64 bit DMA.
5991 */
5992 scratch_pad_2 = readl
5993 (&instance->reg_set->outbound_scratch_pad_2);
5934 5994
5935 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 5995 if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
5996 goto fail_set_dma_mask;
5997 else if (dma_set_mask_and_coherent(&pdev->dev,
5998 DMA_BIT_MASK(64)))
5936 goto fail_set_dma_mask; 5999 goto fail_set_dma_mask;
5937 } 6000 }
5938 } else { 6001 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
5939 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 6002 goto fail_set_dma_mask;
5940 goto fail_set_dma_mask; 6003
5941 } 6004 if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
5942 /* 6005 instance->consistent_mask_64bit = false;
5943 * Ensure that all data structures are allocated in 32-bit 6006 else
5944 * memory. 6007 instance->consistent_mask_64bit = true;
5945 */ 6008
5946 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 6009 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
5947 /* Try 32bit DMA mask and 32 bit Consistent dma mask */ 6010 ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"),
5948 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 6011 (instance->consistent_mask_64bit ? "64" : "32"));
5949 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
5950 dev_info(&pdev->dev, "set 32bit DMA mask"
5951 "and 32 bit consistent mask\n");
5952 else
5953 goto fail_set_dma_mask;
5954 }
5955 6012
5956 return 0; 6013 return 0;
5957 6014
5958fail_set_dma_mask: 6015fail_set_dma_mask:
5959 return 1; 6016 dev_err(&pdev->dev, "Failed to set DMA mask\n");
6017 return -1;
6018
5960} 6019}
5961 6020
5962/** 6021/*
5963 * megasas_probe_one - PCI hotplug entry point 6022 * megasas_set_adapter_type - Set adapter type.
5964 * @pdev: PCI device structure 6023 * Supported controllers can be divided in
5965 * @id: PCI ids of supported hotplugged adapter 6024 * 4 categories- enum MR_ADAPTER_TYPE {
6025 * MFI_SERIES = 1,
6026 * THUNDERBOLT_SERIES = 2,
6027 * INVADER_SERIES = 3,
6028 * VENTURA_SERIES = 4,
6029 * };
6030 * @instance: Adapter soft state
6031 * return: void
5966 */ 6032 */
5967static int megasas_probe_one(struct pci_dev *pdev, 6033static inline void megasas_set_adapter_type(struct megasas_instance *instance)
5968 const struct pci_device_id *id)
5969{ 6034{
5970 int rval, pos; 6035 if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
5971 struct Scsi_Host *host; 6036 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
5972 struct megasas_instance *instance; 6037 instance->adapter_type = MFI_SERIES;
5973 u16 control = 0; 6038 } else {
5974 struct fusion_context *fusion = NULL; 6039 switch (instance->pdev->device) {
5975 6040 case PCI_DEVICE_ID_LSI_VENTURA:
5976 /* Reset MSI-X in the kdump kernel */ 6041 case PCI_DEVICE_ID_LSI_CRUSADER:
5977 if (reset_devices) { 6042 case PCI_DEVICE_ID_LSI_HARPOON:
5978 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); 6043 case PCI_DEVICE_ID_LSI_TOMCAT:
5979 if (pos) { 6044 case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
5980 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, 6045 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
5981 &control); 6046 instance->adapter_type = VENTURA_SERIES;
5982 if (control & PCI_MSIX_FLAGS_ENABLE) { 6047 break;
5983 dev_info(&pdev->dev, "resetting MSI-X\n"); 6048 case PCI_DEVICE_ID_LSI_FUSION:
5984 pci_write_config_word(pdev, 6049 case PCI_DEVICE_ID_LSI_PLASMA:
5985 pos + PCI_MSIX_FLAGS, 6050 instance->adapter_type = THUNDERBOLT_SERIES;
5986 control & 6051 break;
5987 ~PCI_MSIX_FLAGS_ENABLE); 6052 case PCI_DEVICE_ID_LSI_INVADER:
5988 } 6053 case PCI_DEVICE_ID_LSI_INTRUDER:
6054 case PCI_DEVICE_ID_LSI_INTRUDER_24:
6055 case PCI_DEVICE_ID_LSI_CUTLASS_52:
6056 case PCI_DEVICE_ID_LSI_CUTLASS_53:
6057 case PCI_DEVICE_ID_LSI_FURY:
6058 instance->adapter_type = INVADER_SERIES;
6059 break;
6060 default: /* For all other supported controllers */
6061 instance->adapter_type = MFI_SERIES;
6062 break;
5989 } 6063 }
5990 } 6064 }
6065}
5991 6066
5992 /* 6067static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
5993 * PCI prepping: enable device set bus mastering and dma mask 6068{
5994 */ 6069 instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
5995 rval = pci_enable_device_mem(pdev); 6070 &instance->producer_h);
6071 instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
6072 &instance->consumer_h);
5996 6073
5997 if (rval) { 6074 if (!instance->producer || !instance->consumer) {
5998 return rval; 6075 dev_err(&instance->pdev->dev,
6076 "Failed to allocate memory for producer, consumer\n");
6077 return -1;
5999 } 6078 }
6000 6079
6001 pci_set_master(pdev); 6080 *instance->producer = 0;
6081 *instance->consumer = 0;
6082 return 0;
6083}
6002 6084
6003 if (megasas_set_dma_mask(pdev)) 6085/**
6004 goto fail_set_dma_mask; 6086 * megasas_alloc_ctrl_mem - Allocate per controller memory for core data
6087 * structures which are not common across MFI
6088 * adapters and fusion adapters.
6089 * For MFI based adapters, allocate producer and
6090 * consumer buffers. For fusion adapters, allocate
6091 * memory for fusion context.
6092 * @instance: Adapter soft state
6093 * return: 0 for SUCCESS
6094 */
6095static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6096{
6097 switch (instance->adapter_type) {
6098 case MFI_SERIES:
6099 if (megasas_alloc_mfi_ctrl_mem(instance))
6100 return -ENOMEM;
6101 break;
6102 case VENTURA_SERIES:
6103 case THUNDERBOLT_SERIES:
6104 case INVADER_SERIES:
6105 if (megasas_alloc_fusion_context(instance))
6106 return -ENOMEM;
6107 break;
6108 }
6005 6109
6006 host = scsi_host_alloc(&megasas_template, 6110 return 0;
6007 sizeof(struct megasas_instance)); 6111}
6008 6112
6009 if (!host) { 6113/*
6010 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n"); 6114 * megasas_free_ctrl_mem - Free fusion context for fusion adapters and
6011 goto fail_alloc_instance; 6115 * producer, consumer buffers for MFI adapters
6116 *
6117 * @instance - Adapter soft instance
6118 *
6119 */
6120static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
6121{
6122 if (instance->adapter_type == MFI_SERIES) {
6123 if (instance->producer)
6124 pci_free_consistent(instance->pdev, sizeof(u32),
6125 instance->producer,
6126 instance->producer_h);
6127 if (instance->consumer)
6128 pci_free_consistent(instance->pdev, sizeof(u32),
6129 instance->consumer,
6130 instance->consumer_h);
6131 } else {
6132 megasas_free_fusion_context(instance);
6012 } 6133 }
6134}
6013 6135
6014 instance = (struct megasas_instance *)host->hostdata; 6136/**
6015 memset(instance, 0, sizeof(*instance)); 6137 * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during
6016 atomic_set(&instance->fw_reset_no_pci_access, 0); 6138 * driver load time
6017 instance->pdev = pdev; 6139 *
6140 * @instance- Adapter soft instance
6141 * @return- O for SUCCESS
6142 */
6143static inline
6144int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
6145{
6146 struct pci_dev *pdev = instance->pdev;
6147 struct fusion_context *fusion = instance->ctrl_context;
6018 6148
6019 switch (instance->pdev->device) { 6149 instance->evt_detail =
6020 case PCI_DEVICE_ID_LSI_VENTURA: 6150 pci_alloc_consistent(pdev,
6021 case PCI_DEVICE_ID_LSI_HARPOON: 6151 sizeof(struct megasas_evt_detail),
6022 case PCI_DEVICE_ID_LSI_TOMCAT: 6152 &instance->evt_detail_h);
6023 case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6024 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6025 instance->is_ventura = true;
6026 case PCI_DEVICE_ID_LSI_FUSION:
6027 case PCI_DEVICE_ID_LSI_PLASMA:
6028 case PCI_DEVICE_ID_LSI_INVADER:
6029 case PCI_DEVICE_ID_LSI_FURY:
6030 case PCI_DEVICE_ID_LSI_INTRUDER:
6031 case PCI_DEVICE_ID_LSI_INTRUDER_24:
6032 case PCI_DEVICE_ID_LSI_CUTLASS_52:
6033 case PCI_DEVICE_ID_LSI_CUTLASS_53:
6034 {
6035 if (megasas_alloc_fusion_context(instance)) {
6036 megasas_free_fusion_context(instance);
6037 goto fail_alloc_dma_buf;
6038 }
6039 fusion = instance->ctrl_context;
6040 6153
6041 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || 6154 if (!instance->evt_detail) {
6042 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) 6155 dev_err(&instance->pdev->dev,
6043 fusion->adapter_type = THUNDERBOLT_SERIES; 6156 "Failed to allocate event detail buffer\n");
6044 else if (instance->is_ventura) 6157 return -ENOMEM;
6045 fusion->adapter_type = VENTURA_SERIES; 6158 }
6046 else 6159
6047 fusion->adapter_type = INVADER_SERIES; 6160 if (fusion) {
6048 } 6161 fusion->ioc_init_request =
6049 break; 6162 dma_alloc_coherent(&pdev->dev,
6050 default: /* For all other supported controllers */ 6163 sizeof(struct MPI2_IOC_INIT_REQUEST),
6051 6164 &fusion->ioc_init_request_phys,
6052 instance->producer = 6165 GFP_KERNEL);
6053 pci_alloc_consistent(pdev, sizeof(u32), 6166
6054 &instance->producer_h); 6167 if (!fusion->ioc_init_request) {
6055 instance->consumer = 6168 dev_err(&pdev->dev,
6056 pci_alloc_consistent(pdev, sizeof(u32), 6169 "Failed to allocate PD list buffer\n");
6057 &instance->consumer_h); 6170 return -ENOMEM;
6058
6059 if (!instance->producer || !instance->consumer) {
6060 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
6061 "memory for producer, consumer\n");
6062 goto fail_alloc_dma_buf;
6063 } 6171 }
6172 }
6064 6173
6065 *instance->producer = 0; 6174 instance->pd_list_buf =
6066 *instance->consumer = 0; 6175 pci_alloc_consistent(pdev,
6067 break; 6176 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
6177 &instance->pd_list_buf_h);
6178
6179 if (!instance->pd_list_buf) {
6180 dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
6181 return -ENOMEM;
6068 } 6182 }
6069 6183
6070 /* Crash dump feature related initialisation*/ 6184 instance->ctrl_info_buf =
6071 instance->drv_buf_index = 0; 6185 pci_alloc_consistent(pdev,
6072 instance->drv_buf_alloc = 0; 6186 sizeof(struct megasas_ctrl_info),
6073 instance->crash_dump_fw_support = 0; 6187 &instance->ctrl_info_buf_h);
6074 instance->crash_dump_app_support = 0;
6075 instance->fw_crash_state = UNAVAILABLE;
6076 spin_lock_init(&instance->crashdump_lock);
6077 instance->crash_dump_buf = NULL;
6078 6188
6079 megasas_poll_wait_aen = 0; 6189 if (!instance->ctrl_info_buf) {
6080 instance->flag_ieee = 0; 6190 dev_err(&pdev->dev,
6081 instance->ev = NULL; 6191 "Failed to allocate controller info buffer\n");
6082 instance->issuepend_done = 1; 6192 return -ENOMEM;
6083 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); 6193 }
6084 instance->is_imr = 0;
6085 6194
6086 instance->evt_detail = pci_alloc_consistent(pdev, 6195 instance->ld_list_buf =
6087 sizeof(struct 6196 pci_alloc_consistent(pdev,
6088 megasas_evt_detail), 6197 sizeof(struct MR_LD_LIST),
6089 &instance->evt_detail_h); 6198 &instance->ld_list_buf_h);
6090 6199
6091 if (!instance->evt_detail) { 6200 if (!instance->ld_list_buf) {
6092 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for " 6201 dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
6093 "event detail structure\n"); 6202 return -ENOMEM;
6094 goto fail_alloc_dma_buf; 6203 }
6204
6205 instance->ld_targetid_list_buf =
6206 pci_alloc_consistent(pdev,
6207 sizeof(struct MR_LD_TARGETID_LIST),
6208 &instance->ld_targetid_list_buf_h);
6209
6210 if (!instance->ld_targetid_list_buf) {
6211 dev_err(&pdev->dev,
6212 "Failed to allocate LD targetid list buffer\n");
6213 return -ENOMEM;
6095 } 6214 }
6096 6215
6097 if (!reset_devices) { 6216 if (!reset_devices) {
6098 instance->system_info_buf = pci_zalloc_consistent(pdev, 6217 instance->system_info_buf =
6099 sizeof(struct MR_DRV_SYSTEM_INFO), 6218 pci_alloc_consistent(pdev,
6100 &instance->system_info_h); 6219 sizeof(struct MR_DRV_SYSTEM_INFO),
6101 if (!instance->system_info_buf) 6220 &instance->system_info_h);
6102 dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n"); 6221 instance->pd_info =
6222 pci_alloc_consistent(pdev,
6223 sizeof(struct MR_PD_INFO),
6224 &instance->pd_info_h);
6225 instance->tgt_prop =
6226 pci_alloc_consistent(pdev,
6227 sizeof(struct MR_TARGET_PROPERTIES),
6228 &instance->tgt_prop_h);
6229 instance->crash_dump_buf =
6230 pci_alloc_consistent(pdev,
6231 CRASH_DMA_BUF_SIZE,
6232 &instance->crash_dump_h);
6103 6233
6104 instance->pd_info = pci_alloc_consistent(pdev, 6234 if (!instance->system_info_buf)
6105 sizeof(struct MR_PD_INFO), &instance->pd_info_h); 6235 dev_err(&instance->pdev->dev,
6236 "Failed to allocate system info buffer\n");
6106 6237
6107 if (!instance->pd_info) 6238 if (!instance->pd_info)
6108 dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n"); 6239 dev_err(&instance->pdev->dev,
6109 6240 "Failed to allocate pd_info buffer\n");
6110 instance->tgt_prop = pci_alloc_consistent(pdev,
6111 sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
6112 6241
6113 if (!instance->tgt_prop) 6242 if (!instance->tgt_prop)
6114 dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n"); 6243 dev_err(&instance->pdev->dev,
6244 "Failed to allocate tgt_prop buffer\n");
6115 6245
6116 instance->crash_dump_buf = pci_alloc_consistent(pdev,
6117 CRASH_DMA_BUF_SIZE,
6118 &instance->crash_dump_h);
6119 if (!instance->crash_dump_buf) 6246 if (!instance->crash_dump_buf)
6120 dev_err(&pdev->dev, "Can't allocate Firmware " 6247 dev_err(&instance->pdev->dev,
6121 "crash dump DMA buffer\n"); 6248 "Failed to allocate crash dump buffer\n");
6122 } 6249 }
6123 6250
6251 return 0;
6252}
6253
6254/*
6255 * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated
6256 * during driver load time
6257 *
6258 * @instance- Adapter soft instance
6259 *
6260 */
6261static inline
6262void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
6263{
6264 struct pci_dev *pdev = instance->pdev;
6265 struct fusion_context *fusion = instance->ctrl_context;
6266
6267 if (instance->evt_detail)
6268 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6269 instance->evt_detail,
6270 instance->evt_detail_h);
6271
6272 if (fusion && fusion->ioc_init_request)
6273 dma_free_coherent(&pdev->dev,
6274 sizeof(struct MPI2_IOC_INIT_REQUEST),
6275 fusion->ioc_init_request,
6276 fusion->ioc_init_request_phys);
6277
6278 if (instance->pd_list_buf)
6279 pci_free_consistent(pdev,
6280 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
6281 instance->pd_list_buf,
6282 instance->pd_list_buf_h);
6283
6284 if (instance->ld_list_buf)
6285 pci_free_consistent(pdev, sizeof(struct MR_LD_LIST),
6286 instance->ld_list_buf,
6287 instance->ld_list_buf_h);
6288
6289 if (instance->ld_targetid_list_buf)
6290 pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST),
6291 instance->ld_targetid_list_buf,
6292 instance->ld_targetid_list_buf_h);
6293
6294 if (instance->ctrl_info_buf)
6295 pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info),
6296 instance->ctrl_info_buf,
6297 instance->ctrl_info_buf_h);
6298
6299 if (instance->system_info_buf)
6300 pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
6301 instance->system_info_buf,
6302 instance->system_info_h);
6303
6304 if (instance->pd_info)
6305 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6306 instance->pd_info, instance->pd_info_h);
6307
6308 if (instance->tgt_prop)
6309 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6310 instance->tgt_prop, instance->tgt_prop_h);
6311
6312 if (instance->crash_dump_buf)
6313 pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
6314 instance->crash_dump_buf,
6315 instance->crash_dump_h);
6316}
6317
6318/*
6319 * megasas_init_ctrl_params - Initialize controller's instance
6320 * parameters before FW init
6321 * @instance - Adapter soft instance
6322 * @return - void
6323 */
6324static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
6325{
6326 instance->fw_crash_state = UNAVAILABLE;
6327
6328 megasas_poll_wait_aen = 0;
6329 instance->issuepend_done = 1;
6330 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
6331
6124 /* 6332 /*
6125 * Initialize locks and queues 6333 * Initialize locks and queues
6126 */ 6334 */
6127 INIT_LIST_HEAD(&instance->cmd_pool); 6335 INIT_LIST_HEAD(&instance->cmd_pool);
6128 INIT_LIST_HEAD(&instance->internal_reset_pending_q); 6336 INIT_LIST_HEAD(&instance->internal_reset_pending_q);
6129 6337
6130 atomic_set(&instance->fw_outstanding,0); 6338 atomic_set(&instance->fw_outstanding, 0);
6131 6339
6132 init_waitqueue_head(&instance->int_cmd_wait_q); 6340 init_waitqueue_head(&instance->int_cmd_wait_q);
6133 init_waitqueue_head(&instance->abort_cmd_wait_q); 6341 init_waitqueue_head(&instance->abort_cmd_wait_q);
6134 6342
6343 spin_lock_init(&instance->crashdump_lock);
6135 spin_lock_init(&instance->mfi_pool_lock); 6344 spin_lock_init(&instance->mfi_pool_lock);
6136 spin_lock_init(&instance->hba_lock); 6345 spin_lock_init(&instance->hba_lock);
6137 spin_lock_init(&instance->stream_lock); 6346 spin_lock_init(&instance->stream_lock);
6138 spin_lock_init(&instance->completion_lock); 6347 spin_lock_init(&instance->completion_lock);
6139 6348
6140 mutex_init(&instance->reset_mutex);
6141 mutex_init(&instance->hba_mutex); 6349 mutex_init(&instance->hba_mutex);
6142 6350 mutex_init(&instance->reset_mutex);
6143 /*
6144 * Initialize PCI related and misc parameters
6145 */
6146 instance->host = host;
6147 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
6148 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
6149 instance->ctrl_info = NULL;
6150
6151 6351
6152 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 6352 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
6153 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) 6353 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
6154 instance->flag_ieee = 1; 6354 instance->flag_ieee = 1;
6155 6355
6156 megasas_dbg_lvl = 0; 6356 megasas_dbg_lvl = 0;
@@ -6160,11 +6360,75 @@ static int megasas_probe_one(struct pci_dev *pdev,
6160 instance->disableOnlineCtrlReset = 1; 6360 instance->disableOnlineCtrlReset = 1;
6161 instance->UnevenSpanSupport = 0; 6361 instance->UnevenSpanSupport = 0;
6162 6362
6163 if (instance->ctrl_context) { 6363 if (instance->adapter_type != MFI_SERIES) {
6164 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 6364 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
6165 INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq); 6365 INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
6166 } else 6366 } else {
6167 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 6367 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
6368 }
6369}
6370
6371/**
6372 * megasas_probe_one - PCI hotplug entry point
6373 * @pdev: PCI device structure
6374 * @id: PCI ids of supported hotplugged adapter
6375 */
6376static int megasas_probe_one(struct pci_dev *pdev,
6377 const struct pci_device_id *id)
6378{
6379 int rval, pos;
6380 struct Scsi_Host *host;
6381 struct megasas_instance *instance;
6382 u16 control = 0;
6383
6384 /* Reset MSI-X in the kdump kernel */
6385 if (reset_devices) {
6386 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
6387 if (pos) {
6388 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
6389 &control);
6390 if (control & PCI_MSIX_FLAGS_ENABLE) {
6391 dev_info(&pdev->dev, "resetting MSI-X\n");
6392 pci_write_config_word(pdev,
6393 pos + PCI_MSIX_FLAGS,
6394 control &
6395 ~PCI_MSIX_FLAGS_ENABLE);
6396 }
6397 }
6398 }
6399
6400 /*
6401 * PCI prepping: enable device set bus mastering and dma mask
6402 */
6403 rval = pci_enable_device_mem(pdev);
6404
6405 if (rval) {
6406 return rval;
6407 }
6408
6409 pci_set_master(pdev);
6410
6411 host = scsi_host_alloc(&megasas_template,
6412 sizeof(struct megasas_instance));
6413
6414 if (!host) {
6415 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
6416 goto fail_alloc_instance;
6417 }
6418
6419 instance = (struct megasas_instance *)host->hostdata;
6420 memset(instance, 0, sizeof(*instance));
6421 atomic_set(&instance->fw_reset_no_pci_access, 0);
6422
6423 /*
6424 * Initialize PCI related and misc parameters
6425 */
6426 instance->pdev = pdev;
6427 instance->host = host;
6428 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
6429 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
6430
6431 megasas_set_adapter_type(instance);
6168 6432
6169 /* 6433 /*
6170 * Initialize MFI Firmware 6434 * Initialize MFI Firmware
@@ -6240,37 +6504,16 @@ fail_io_attach:
6240 instance->instancet->disable_intr(instance); 6504 instance->instancet->disable_intr(instance);
6241 megasas_destroy_irqs(instance); 6505 megasas_destroy_irqs(instance);
6242 6506
6243 if (instance->ctrl_context) 6507 if (instance->adapter_type != MFI_SERIES)
6244 megasas_release_fusion(instance); 6508 megasas_release_fusion(instance);
6245 else 6509 else
6246 megasas_release_mfi(instance); 6510 megasas_release_mfi(instance);
6247 if (instance->msix_vectors) 6511 if (instance->msix_vectors)
6248 pci_free_irq_vectors(instance->pdev); 6512 pci_free_irq_vectors(instance->pdev);
6249fail_init_mfi: 6513fail_init_mfi:
6250fail_alloc_dma_buf:
6251 if (instance->evt_detail)
6252 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6253 instance->evt_detail,
6254 instance->evt_detail_h);
6255
6256 if (instance->pd_info)
6257 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6258 instance->pd_info,
6259 instance->pd_info_h);
6260 if (instance->tgt_prop)
6261 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6262 instance->tgt_prop,
6263 instance->tgt_prop_h);
6264 if (instance->producer)
6265 pci_free_consistent(pdev, sizeof(u32), instance->producer,
6266 instance->producer_h);
6267 if (instance->consumer)
6268 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
6269 instance->consumer_h);
6270 scsi_host_put(host); 6514 scsi_host_put(host);
6271 6515
6272fail_alloc_instance: 6516fail_alloc_instance:
6273fail_set_dma_mask:
6274 pci_disable_device(pdev); 6517 pci_disable_device(pdev);
6275 6518
6276 return -ENODEV; 6519 return -ENODEV;
@@ -6447,7 +6690,13 @@ megasas_resume(struct pci_dev *pdev)
6447 6690
6448 pci_set_master(pdev); 6691 pci_set_master(pdev);
6449 6692
6450 if (megasas_set_dma_mask(pdev)) 6693 /*
6694 * We expect the FW state to be READY
6695 */
6696 if (megasas_transition_to_ready(instance, 0))
6697 goto fail_ready_state;
6698
6699 if (megasas_set_dma_mask(instance))
6451 goto fail_set_dma_mask; 6700 goto fail_set_dma_mask;
6452 6701
6453 /* 6702 /*
@@ -6456,12 +6705,6 @@ megasas_resume(struct pci_dev *pdev)
6456 6705
6457 atomic_set(&instance->fw_outstanding, 0); 6706 atomic_set(&instance->fw_outstanding, 0);
6458 6707
6459 /*
6460 * We expect the FW state to be READY
6461 */
6462 if (megasas_transition_to_ready(instance, 0))
6463 goto fail_ready_state;
6464
6465 /* Now re-enable MSI-X */ 6708 /* Now re-enable MSI-X */
6466 if (instance->msix_vectors) { 6709 if (instance->msix_vectors) {
6467 irq_flags = PCI_IRQ_MSIX; 6710 irq_flags = PCI_IRQ_MSIX;
@@ -6474,7 +6717,7 @@ megasas_resume(struct pci_dev *pdev)
6474 if (rval < 0) 6717 if (rval < 0)
6475 goto fail_reenable_msix; 6718 goto fail_reenable_msix;
6476 6719
6477 if (instance->ctrl_context) { 6720 if (instance->adapter_type != MFI_SERIES) {
6478 megasas_reset_reply_desc(instance); 6721 megasas_reset_reply_desc(instance);
6479 if (megasas_ioc_init_fusion(instance)) { 6722 if (megasas_ioc_init_fusion(instance)) {
6480 megasas_free_cmds(instance); 6723 megasas_free_cmds(instance);
@@ -6521,30 +6764,13 @@ megasas_resume(struct pci_dev *pdev)
6521 return 0; 6764 return 0;
6522 6765
6523fail_init_mfi: 6766fail_init_mfi:
6524 if (instance->evt_detail) 6767 megasas_free_ctrl_dma_buffers(instance);
6525 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 6768 megasas_free_ctrl_mem(instance);
6526 instance->evt_detail,
6527 instance->evt_detail_h);
6528
6529 if (instance->pd_info)
6530 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6531 instance->pd_info,
6532 instance->pd_info_h);
6533 if (instance->tgt_prop)
6534 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6535 instance->tgt_prop,
6536 instance->tgt_prop_h);
6537 if (instance->producer)
6538 pci_free_consistent(pdev, sizeof(u32), instance->producer,
6539 instance->producer_h);
6540 if (instance->consumer)
6541 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
6542 instance->consumer_h);
6543 scsi_host_put(host); 6769 scsi_host_put(host);
6544 6770
6771fail_reenable_msix:
6545fail_set_dma_mask: 6772fail_set_dma_mask:
6546fail_ready_state: 6773fail_ready_state:
6547fail_reenable_msix:
6548 6774
6549 pci_disable_device(pdev); 6775 pci_disable_device(pdev);
6550 6776
@@ -6647,7 +6873,7 @@ skip_firing_dcmds:
6647 if (instance->msix_vectors) 6873 if (instance->msix_vectors)
6648 pci_free_irq_vectors(instance->pdev); 6874 pci_free_irq_vectors(instance->pdev);
6649 6875
6650 if (instance->is_ventura) { 6876 if (instance->adapter_type == VENTURA_SERIES) {
6651 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) 6877 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
6652 kfree(fusion->stream_detect_by_ld[i]); 6878 kfree(fusion->stream_detect_by_ld[i]);
6653 kfree(fusion->stream_detect_by_ld); 6879 kfree(fusion->stream_detect_by_ld);
@@ -6655,7 +6881,7 @@ skip_firing_dcmds:
6655 } 6881 }
6656 6882
6657 6883
6658 if (instance->ctrl_context) { 6884 if (instance->adapter_type != MFI_SERIES) {
6659 megasas_release_fusion(instance); 6885 megasas_release_fusion(instance);
6660 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 6886 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
6661 (sizeof(struct MR_PD_CFG_SEQ) * 6887 (sizeof(struct MR_PD_CFG_SEQ) *
@@ -6680,30 +6906,10 @@ skip_firing_dcmds:
6680 fusion->pd_seq_sync[i], 6906 fusion->pd_seq_sync[i],
6681 fusion->pd_seq_phys[i]); 6907 fusion->pd_seq_phys[i]);
6682 } 6908 }
6683 megasas_free_fusion_context(instance);
6684 } else { 6909 } else {
6685 megasas_release_mfi(instance); 6910 megasas_release_mfi(instance);
6686 pci_free_consistent(pdev, sizeof(u32),
6687 instance->producer,
6688 instance->producer_h);
6689 pci_free_consistent(pdev, sizeof(u32),
6690 instance->consumer,
6691 instance->consumer_h);
6692 } 6911 }
6693 6912
6694 kfree(instance->ctrl_info);
6695
6696 if (instance->evt_detail)
6697 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6698 instance->evt_detail, instance->evt_detail_h);
6699 if (instance->pd_info)
6700 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6701 instance->pd_info,
6702 instance->pd_info_h);
6703 if (instance->tgt_prop)
6704 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6705 instance->tgt_prop,
6706 instance->tgt_prop_h);
6707 if (instance->vf_affiliation) 6913 if (instance->vf_affiliation)
6708 pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) * 6914 pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
6709 sizeof(struct MR_LD_VF_AFFILIATION), 6915 sizeof(struct MR_LD_VF_AFFILIATION),
@@ -6721,13 +6927,9 @@ skip_firing_dcmds:
6721 instance->hb_host_mem, 6927 instance->hb_host_mem,
6722 instance->hb_host_mem_h); 6928 instance->hb_host_mem_h);
6723 6929
6724 if (instance->crash_dump_buf) 6930 megasas_free_ctrl_dma_buffers(instance);
6725 pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
6726 instance->crash_dump_buf, instance->crash_dump_h);
6727 6931
6728 if (instance->system_info_buf) 6932 megasas_free_ctrl_mem(instance);
6729 pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
6730 instance->system_info_buf, instance->system_info_h);
6731 6933
6732 scsi_host_put(host); 6934 scsi_host_put(host);
6733 6935
@@ -6866,7 +7068,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6866 struct megasas_iocpacket __user * user_ioc, 7068 struct megasas_iocpacket __user * user_ioc,
6867 struct megasas_iocpacket *ioc) 7069 struct megasas_iocpacket *ioc)
6868{ 7070{
6869 struct megasas_sge32 *kern_sge32; 7071 struct megasas_sge64 *kern_sge64 = NULL;
7072 struct megasas_sge32 *kern_sge32 = NULL;
6870 struct megasas_cmd *cmd; 7073 struct megasas_cmd *cmd;
6871 void *kbuff_arr[MAX_IOCTL_SGE]; 7074 void *kbuff_arr[MAX_IOCTL_SGE];
6872 dma_addr_t buf_handle = 0; 7075 dma_addr_t buf_handle = 0;
@@ -6874,7 +7077,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6874 void *sense = NULL; 7077 void *sense = NULL;
6875 dma_addr_t sense_handle; 7078 dma_addr_t sense_handle;
6876 unsigned long *sense_ptr; 7079 unsigned long *sense_ptr;
6877 u32 opcode; 7080 u32 opcode = 0;
6878 7081
6879 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 7082 memset(kbuff_arr, 0, sizeof(kbuff_arr));
6880 7083
@@ -6884,6 +7087,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6884 return -EINVAL; 7087 return -EINVAL;
6885 } 7088 }
6886 7089
7090 if (ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) {
7091 dev_err(&instance->pdev->dev,
7092 "Received invalid ioctl command 0x%x\n",
7093 ioc->frame.hdr.cmd);
7094 return -ENOTSUPP;
7095 }
7096
6887 cmd = megasas_get_cmd(instance); 7097 cmd = megasas_get_cmd(instance);
6888 if (!cmd) { 7098 if (!cmd) {
6889 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n"); 7099 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
@@ -6899,10 +7109,18 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6899 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 7109 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
6900 cmd->frame->hdr.context = cpu_to_le32(cmd->index); 7110 cmd->frame->hdr.context = cpu_to_le32(cmd->index);
6901 cmd->frame->hdr.pad_0 = 0; 7111 cmd->frame->hdr.pad_0 = 0;
6902 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE | 7112
6903 MFI_FRAME_SGL64 | 7113 cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
7114
7115 if (instance->consistent_mask_64bit)
7116 cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
7117 MFI_FRAME_SENSE64));
7118 else
7119 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
6904 MFI_FRAME_SENSE64)); 7120 MFI_FRAME_SENSE64));
6905 opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 7121
7122 if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
7123 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
6906 7124
6907 if (opcode == MR_DCMD_CTRL_SHUTDOWN) { 7125 if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
6908 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { 7126 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
@@ -6925,8 +7143,12 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6925 * kernel buffers in SGLs. The location of SGL is embedded in the 7143 * kernel buffers in SGLs. The location of SGL is embedded in the
6926 * struct iocpacket itself. 7144 * struct iocpacket itself.
6927 */ 7145 */
6928 kern_sge32 = (struct megasas_sge32 *) 7146 if (instance->consistent_mask_64bit)
6929 ((unsigned long)cmd->frame + ioc->sgl_off); 7147 kern_sge64 = (struct megasas_sge64 *)
7148 ((unsigned long)cmd->frame + ioc->sgl_off);
7149 else
7150 kern_sge32 = (struct megasas_sge32 *)
7151 ((unsigned long)cmd->frame + ioc->sgl_off);
6930 7152
6931 /* 7153 /*
6932 * For each user buffer, create a mirror buffer and copy in 7154 * For each user buffer, create a mirror buffer and copy in
@@ -6949,8 +7171,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6949 * We don't change the dma_coherent_mask, so 7171 * We don't change the dma_coherent_mask, so
6950 * pci_alloc_consistent only returns 32bit addresses 7172 * pci_alloc_consistent only returns 32bit addresses
6951 */ 7173 */
6952 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); 7174 if (instance->consistent_mask_64bit) {
6953 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 7175 kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
7176 kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
7177 } else {
7178 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
7179 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
7180 }
6954 7181
6955 /* 7182 /*
6956 * We created a kernel buffer corresponding to the 7183 * We created a kernel buffer corresponding to the
@@ -6973,7 +7200,10 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6973 7200
6974 sense_ptr = 7201 sense_ptr =
6975 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); 7202 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
6976 *sense_ptr = cpu_to_le32(sense_handle); 7203 if (instance->consistent_mask_64bit)
7204 *sense_ptr = cpu_to_le64(sense_handle);
7205 else
7206 *sense_ptr = cpu_to_le32(sense_handle);
6977 } 7207 }
6978 7208
6979 /* 7209 /*
@@ -6984,8 +7214,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6984 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) { 7214 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
6985 cmd->sync_cmd = 0; 7215 cmd->sync_cmd = 0;
6986 dev_err(&instance->pdev->dev, 7216 dev_err(&instance->pdev->dev,
6987 "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n", 7217 "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
6988 __func__, __LINE__, opcode, cmd->cmd_status_drv); 7218 __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
7219 cmd->cmd_status_drv);
6989 return -EBUSY; 7220 return -EBUSY;
6990 } 7221 }
6991 7222
@@ -7045,10 +7276,16 @@ out:
7045 7276
7046 for (i = 0; i < ioc->sge_count; i++) { 7277 for (i = 0; i < ioc->sge_count; i++) {
7047 if (kbuff_arr[i]) { 7278 if (kbuff_arr[i]) {
7048 dma_free_coherent(&instance->pdev->dev, 7279 if (instance->consistent_mask_64bit)
7049 le32_to_cpu(kern_sge32[i].length), 7280 dma_free_coherent(&instance->pdev->dev,
7050 kbuff_arr[i], 7281 le32_to_cpu(kern_sge64[i].length),
7051 le32_to_cpu(kern_sge32[i].phys_addr)); 7282 kbuff_arr[i],
7283 le64_to_cpu(kern_sge64[i].phys_addr));
7284 else
7285 dma_free_coherent(&instance->pdev->dev,
7286 le32_to_cpu(kern_sge32[i].length),
7287 kbuff_arr[i],
7288 le32_to_cpu(kern_sge32[i].phys_addr));
7052 kbuff_arr[i] = NULL; 7289 kbuff_arr[i] = NULL;
7053 } 7290 }
7054 } 7291 }
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index ecc699a65bac..bfad9bfc313f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -737,7 +737,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
737 *pDevHandle = MR_PdDevHandleGet(pd, map); 737 *pDevHandle = MR_PdDevHandleGet(pd, map);
738 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 738 *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
739 /* get second pd also for raid 1/10 fast path writes*/ 739 /* get second pd also for raid 1/10 fast path writes*/
740 if (instance->is_ventura && 740 if ((instance->adapter_type == VENTURA_SERIES) &&
741 (raid->level == 1) && 741 (raid->level == 1) &&
742 !io_info->isRead) { 742 !io_info->isRead) {
743 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); 743 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -747,8 +747,8 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
747 } 747 }
748 } else { 748 } else {
749 if ((raid->level >= 5) && 749 if ((raid->level >= 5) &&
750 ((fusion->adapter_type == THUNDERBOLT_SERIES) || 750 ((instance->adapter_type == THUNDERBOLT_SERIES) ||
751 ((fusion->adapter_type == INVADER_SERIES) && 751 ((instance->adapter_type == INVADER_SERIES) &&
752 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) 752 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
753 pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE; 753 pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
754 else if (raid->level == 1) { 754 else if (raid->level == 1) {
@@ -762,7 +762,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
762 } 762 }
763 763
764 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); 764 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
765 if (instance->is_ventura) { 765 if (instance->adapter_type == VENTURA_SERIES) {
766 ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = 766 ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
767 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 767 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
768 io_info->span_arm = 768 io_info->span_arm =
@@ -853,7 +853,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
853 *pDevHandle = MR_PdDevHandleGet(pd, map); 853 *pDevHandle = MR_PdDevHandleGet(pd, map);
854 *pPdInterface = MR_PdInterfaceTypeGet(pd, map); 854 *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
855 /* get second pd also for raid 1/10 fast path writes*/ 855 /* get second pd also for raid 1/10 fast path writes*/
856 if (instance->is_ventura && 856 if ((instance->adapter_type == VENTURA_SERIES) &&
857 (raid->level == 1) && 857 (raid->level == 1) &&
858 !io_info->isRead) { 858 !io_info->isRead) {
859 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); 859 r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -863,8 +863,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
863 } 863 }
864 } else { 864 } else {
865 if ((raid->level >= 5) && 865 if ((raid->level >= 5) &&
866 ((fusion->adapter_type == THUNDERBOLT_SERIES) || 866 ((instance->adapter_type == THUNDERBOLT_SERIES) ||
867 ((fusion->adapter_type == INVADER_SERIES) && 867 ((instance->adapter_type == INVADER_SERIES) &&
868 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) 868 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
869 pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE; 869 pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
870 else if (raid->level == 1) { 870 else if (raid->level == 1) {
@@ -880,7 +880,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
880 } 880 }
881 881
882 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); 882 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
883 if (instance->is_ventura) { 883 if (instance->adapter_type == VENTURA_SERIES) {
884 ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = 884 ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
885 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; 885 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
886 io_info->span_arm = 886 io_info->span_arm =
@@ -1088,10 +1088,10 @@ MR_BuildRaidContext(struct megasas_instance *instance,
1088 cpu_to_le16(raid->fpIoTimeoutForLd ? 1088 cpu_to_le16(raid->fpIoTimeoutForLd ?
1089 raid->fpIoTimeoutForLd : 1089 raid->fpIoTimeoutForLd :
1090 map->raidMap.fpPdIoTimeoutSec); 1090 map->raidMap.fpPdIoTimeoutSec);
1091 if (fusion->adapter_type == INVADER_SERIES) 1091 if (instance->adapter_type == INVADER_SERIES)
1092 pRAID_Context->reg_lock_flags = (isRead) ? 1092 pRAID_Context->reg_lock_flags = (isRead) ?
1093 raid->regTypeReqOnRead : raid->regTypeReqOnWrite; 1093 raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
1094 else if (!instance->is_ventura) 1094 else if (instance->adapter_type == THUNDERBOLT_SERIES)
1095 pRAID_Context->reg_lock_flags = (isRead) ? 1095 pRAID_Context->reg_lock_flags = (isRead) ?
1096 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; 1096 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
1097 pRAID_Context->virtual_disk_tgt_id = raid->targetId; 1097 pRAID_Context->virtual_disk_tgt_id = raid->targetId;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 3c399e7b3fe1..65dc4fea6352 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -93,8 +93,37 @@ extern unsigned int resetwaittime;
93extern unsigned int dual_qdepth_disable; 93extern unsigned int dual_qdepth_disable;
94static void megasas_free_rdpq_fusion(struct megasas_instance *instance); 94static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
95static void megasas_free_reply_fusion(struct megasas_instance *instance); 95static void megasas_free_reply_fusion(struct megasas_instance *instance);
96static inline
97void megasas_configure_queue_sizes(struct megasas_instance *instance);
96 98
99/**
100 * megasas_check_same_4gb_region - check if allocation
101 * crosses same 4GB boundary or not
102 * @instance - adapter's soft instance
103 * start_addr - start address of DMA allocation
104 * size - size of allocation in bytes
105 * return - true : allocation does not cross same
106 * 4GB boundary
107 * false: allocation crosses same
108 * 4GB boundary
109 */
110static inline bool megasas_check_same_4gb_region
111 (struct megasas_instance *instance, dma_addr_t start_addr, size_t size)
112{
113 dma_addr_t end_addr;
114
115 end_addr = start_addr + size;
97 116
117 if (upper_32_bits(start_addr) != upper_32_bits(end_addr)) {
118 dev_err(&instance->pdev->dev,
119 "Failed to get same 4GB boundary: start_addr: 0x%llx end_addr: 0x%llx\n",
120 (unsigned long long)start_addr,
121 (unsigned long long)end_addr);
122 return false;
123 }
124
125 return true;
126}
98 127
99/** 128/**
100 * megasas_enable_intr_fusion - Enables interrupts 129 * megasas_enable_intr_fusion - Enables interrupts
@@ -197,7 +226,7 @@ static void
197megasas_fire_cmd_fusion(struct megasas_instance *instance, 226megasas_fire_cmd_fusion(struct megasas_instance *instance,
198 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) 227 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
199{ 228{
200 if (instance->is_ventura) 229 if (instance->adapter_type == VENTURA_SERIES)
201 writel(le32_to_cpu(req_desc->u.low), 230 writel(le32_to_cpu(req_desc->u.low),
202 &instance->reg_set->inbound_single_queue_port); 231 &instance->reg_set->inbound_single_queue_port);
203 else { 232 else {
@@ -240,7 +269,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
240 reg_set = instance->reg_set; 269 reg_set = instance->reg_set;
241 270
242 /* ventura FW does not fill outbound_scratch_pad_3 with queue depth */ 271 /* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
243 if (!instance->is_ventura) 272 if (instance->adapter_type < VENTURA_SERIES)
244 cur_max_fw_cmds = 273 cur_max_fw_cmds =
245 readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF; 274 readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
246 275
@@ -251,8 +280,8 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
251 (instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS; 280 (instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
252 281
253 dev_info(&instance->pdev->dev, 282 dev_info(&instance->pdev->dev,
254 "Current firmware maximum commands: %d\t LDIO threshold: %d\n", 283 "Current firmware supports maximum commands: %d\t LDIO threshold: %d\n",
255 cur_max_fw_cmds, ldio_threshold); 284 cur_max_fw_cmds, ldio_threshold);
256 285
257 if (fw_boot_context == OCR_CONTEXT) { 286 if (fw_boot_context == OCR_CONTEXT) {
258 cur_max_fw_cmds = cur_max_fw_cmds - 1; 287 cur_max_fw_cmds = cur_max_fw_cmds - 1;
@@ -267,10 +296,6 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
267 instance->max_fw_cmds = cur_max_fw_cmds; 296 instance->max_fw_cmds = cur_max_fw_cmds;
268 instance->ldio_threshold = ldio_threshold; 297 instance->ldio_threshold = ldio_threshold;
269 298
270 if (!instance->is_rdpq)
271 instance->max_fw_cmds =
272 min_t(u16, instance->max_fw_cmds, 1024);
273
274 if (reset_devices) 299 if (reset_devices)
275 instance->max_fw_cmds = min(instance->max_fw_cmds, 300 instance->max_fw_cmds = min(instance->max_fw_cmds,
276 (u16)MEGASAS_KDUMP_QUEUE_DEPTH); 301 (u16)MEGASAS_KDUMP_QUEUE_DEPTH);
@@ -280,19 +305,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
280 * does not exceed max cmds that the FW can support 305 * does not exceed max cmds that the FW can support
281 */ 306 */
282 instance->max_fw_cmds = instance->max_fw_cmds-1; 307 instance->max_fw_cmds = instance->max_fw_cmds-1;
283
284 instance->max_scsi_cmds = instance->max_fw_cmds -
285 (MEGASAS_FUSION_INTERNAL_CMDS +
286 MEGASAS_FUSION_IOCTL_CMDS);
287 instance->cur_can_queue = instance->max_scsi_cmds;
288 instance->host->can_queue = instance->cur_can_queue;
289 } 308 }
290
291 if (instance->is_ventura)
292 instance->max_mpt_cmds =
293 instance->max_fw_cmds * RAID_1_PEER_CMDS;
294 else
295 instance->max_mpt_cmds = instance->max_fw_cmds;
296} 309}
297/** 310/**
298 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool 311 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
@@ -305,17 +318,23 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
305 struct fusion_context *fusion = instance->ctrl_context; 318 struct fusion_context *fusion = instance->ctrl_context;
306 struct megasas_cmd_fusion *cmd; 319 struct megasas_cmd_fusion *cmd;
307 320
308 /* SG, Sense */ 321 if (fusion->sense)
309 for (i = 0; i < instance->max_mpt_cmds; i++) { 322 dma_pool_free(fusion->sense_dma_pool, fusion->sense,
310 cmd = fusion->cmd_list[i]; 323 fusion->sense_phys_addr);
311 if (cmd) { 324
312 if (cmd->sg_frame) 325 /* SG */
313 dma_pool_free(fusion->sg_dma_pool, cmd->sg_frame, 326 if (fusion->cmd_list) {
314 cmd->sg_frame_phys_addr); 327 for (i = 0; i < instance->max_mpt_cmds; i++) {
315 if (cmd->sense) 328 cmd = fusion->cmd_list[i];
316 dma_pool_free(fusion->sense_dma_pool, cmd->sense, 329 if (cmd) {
317 cmd->sense_phys_addr); 330 if (cmd->sg_frame)
331 dma_pool_free(fusion->sg_dma_pool,
332 cmd->sg_frame,
333 cmd->sg_frame_phys_addr);
334 }
335 kfree(cmd);
318 } 336 }
337 kfree(fusion->cmd_list);
319 } 338 }
320 339
321 if (fusion->sg_dma_pool) { 340 if (fusion->sg_dma_pool) {
@@ -347,13 +366,6 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
347 dma_pool_destroy(fusion->io_request_frames_pool); 366 dma_pool_destroy(fusion->io_request_frames_pool);
348 fusion->io_request_frames_pool = NULL; 367 fusion->io_request_frames_pool = NULL;
349 } 368 }
350
351
352 /* cmd_list */
353 for (i = 0; i < instance->max_mpt_cmds; i++)
354 kfree(fusion->cmd_list[i]);
355
356 kfree(fusion->cmd_list);
357} 369}
358 370
359/** 371/**
@@ -367,10 +379,12 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
367 u16 max_cmd; 379 u16 max_cmd;
368 struct fusion_context *fusion; 380 struct fusion_context *fusion;
369 struct megasas_cmd_fusion *cmd; 381 struct megasas_cmd_fusion *cmd;
382 int sense_sz;
383 u32 offset;
370 384
371 fusion = instance->ctrl_context; 385 fusion = instance->ctrl_context;
372 max_cmd = instance->max_fw_cmds; 386 max_cmd = instance->max_fw_cmds;
373 387 sense_sz = instance->max_mpt_cmds * SCSI_SENSE_BUFFERSIZE;
374 388
375 fusion->sg_dma_pool = 389 fusion->sg_dma_pool =
376 dma_pool_create("mr_sg", &instance->pdev->dev, 390 dma_pool_create("mr_sg", &instance->pdev->dev,
@@ -379,7 +393,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
379 /* SCSI_SENSE_BUFFERSIZE = 96 bytes */ 393 /* SCSI_SENSE_BUFFERSIZE = 96 bytes */
380 fusion->sense_dma_pool = 394 fusion->sense_dma_pool =
381 dma_pool_create("mr_sense", &instance->pdev->dev, 395 dma_pool_create("mr_sense", &instance->pdev->dev,
382 SCSI_SENSE_BUFFERSIZE, 64, 0); 396 sense_sz, 64, 0);
383 397
384 if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) { 398 if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
385 dev_err(&instance->pdev->dev, 399 dev_err(&instance->pdev->dev,
@@ -387,6 +401,51 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
387 return -ENOMEM; 401 return -ENOMEM;
388 } 402 }
389 403
404 fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
405 GFP_KERNEL, &fusion->sense_phys_addr);
406 if (!fusion->sense) {
407 dev_err(&instance->pdev->dev,
408 "failed from %s %d\n", __func__, __LINE__);
409 return -ENOMEM;
410 }
411
412 /* sense buffer, request frame and reply desc pool requires to be in
413 * same 4 gb region. Below function will check this.
414 * In case of failure, new pci pool will be created with updated
415 * alignment.
416 * Older allocation and pool will be destroyed.
417 * Alignment will be used such a way that next allocation if success,
418 * will always meet same 4gb region requirement.
419 * Actual requirement is not alignment, but we need start and end of
420 * DMA address must have same upper 32 bit address.
421 */
422
423 if (!megasas_check_same_4gb_region(instance, fusion->sense_phys_addr,
424 sense_sz)) {
425 dma_pool_free(fusion->sense_dma_pool, fusion->sense,
426 fusion->sense_phys_addr);
427 fusion->sense = NULL;
428 dma_pool_destroy(fusion->sense_dma_pool);
429
430 fusion->sense_dma_pool =
431 dma_pool_create("mr_sense_align", &instance->pdev->dev,
432 sense_sz, roundup_pow_of_two(sense_sz),
433 0);
434 if (!fusion->sense_dma_pool) {
435 dev_err(&instance->pdev->dev,
436 "Failed from %s %d\n", __func__, __LINE__);
437 return -ENOMEM;
438 }
439 fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
440 GFP_KERNEL,
441 &fusion->sense_phys_addr);
442 if (!fusion->sense) {
443 dev_err(&instance->pdev->dev,
444 "failed from %s %d\n", __func__, __LINE__);
445 return -ENOMEM;
446 }
447 }
448
390 /* 449 /*
391 * Allocate and attach a frame to each of the commands in cmd_list 450 * Allocate and attach a frame to each of the commands in cmd_list
392 */ 451 */
@@ -395,9 +454,11 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
395 cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool, 454 cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool,
396 GFP_KERNEL, &cmd->sg_frame_phys_addr); 455 GFP_KERNEL, &cmd->sg_frame_phys_addr);
397 456
398 cmd->sense = dma_pool_alloc(fusion->sense_dma_pool, 457 offset = SCSI_SENSE_BUFFERSIZE * i;
399 GFP_KERNEL, &cmd->sense_phys_addr); 458 cmd->sense = (u8 *)fusion->sense + offset;
400 if (!cmd->sg_frame || !cmd->sense) { 459 cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
460
461 if (!cmd->sg_frame) {
401 dev_err(&instance->pdev->dev, 462 dev_err(&instance->pdev->dev,
402 "Failed from %s %d\n", __func__, __LINE__); 463 "Failed from %s %d\n", __func__, __LINE__);
403 return -ENOMEM; 464 return -ENOMEM;
@@ -407,13 +468,10 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
407 /* create sense buffer for the raid 1/10 fp */ 468 /* create sense buffer for the raid 1/10 fp */
408 for (i = max_cmd; i < instance->max_mpt_cmds; i++) { 469 for (i = max_cmd; i < instance->max_mpt_cmds; i++) {
409 cmd = fusion->cmd_list[i]; 470 cmd = fusion->cmd_list[i];
410 cmd->sense = dma_pool_alloc(fusion->sense_dma_pool, 471 offset = SCSI_SENSE_BUFFERSIZE * i;
411 GFP_KERNEL, &cmd->sense_phys_addr); 472 cmd->sense = (u8 *)fusion->sense + offset;
412 if (!cmd->sense) { 473 cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
413 dev_err(&instance->pdev->dev, 474
414 "Failed from %s %d\n", __func__, __LINE__);
415 return -ENOMEM;
416 }
417 } 475 }
418 476
419 return 0; 477 return 0;
@@ -465,16 +523,7 @@ megasas_alloc_request_fusion(struct megasas_instance *instance)
465 523
466 fusion = instance->ctrl_context; 524 fusion = instance->ctrl_context;
467 525
468 fusion->req_frames_desc = 526retry_alloc:
469 dma_alloc_coherent(&instance->pdev->dev,
470 fusion->request_alloc_sz,
471 &fusion->req_frames_desc_phys, GFP_KERNEL);
472 if (!fusion->req_frames_desc) {
473 dev_err(&instance->pdev->dev,
474 "Failed from %s %d\n", __func__, __LINE__);
475 return -ENOMEM;
476 }
477
478 fusion->io_request_frames_pool = 527 fusion->io_request_frames_pool =
479 dma_pool_create("mr_ioreq", &instance->pdev->dev, 528 dma_pool_create("mr_ioreq", &instance->pdev->dev,
480 fusion->io_frames_alloc_sz, 16, 0); 529 fusion->io_frames_alloc_sz, 16, 0);
@@ -489,10 +538,62 @@ megasas_alloc_request_fusion(struct megasas_instance *instance)
489 dma_pool_alloc(fusion->io_request_frames_pool, 538 dma_pool_alloc(fusion->io_request_frames_pool,
490 GFP_KERNEL, &fusion->io_request_frames_phys); 539 GFP_KERNEL, &fusion->io_request_frames_phys);
491 if (!fusion->io_request_frames) { 540 if (!fusion->io_request_frames) {
541 if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) {
542 instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT;
543 dma_pool_destroy(fusion->io_request_frames_pool);
544 megasas_configure_queue_sizes(instance);
545 goto retry_alloc;
546 } else {
547 dev_err(&instance->pdev->dev,
548 "Failed from %s %d\n", __func__, __LINE__);
549 return -ENOMEM;
550 }
551 }
552
553 if (!megasas_check_same_4gb_region(instance,
554 fusion->io_request_frames_phys,
555 fusion->io_frames_alloc_sz)) {
556 dma_pool_free(fusion->io_request_frames_pool,
557 fusion->io_request_frames,
558 fusion->io_request_frames_phys);
559 fusion->io_request_frames = NULL;
560 dma_pool_destroy(fusion->io_request_frames_pool);
561
562 fusion->io_request_frames_pool =
563 dma_pool_create("mr_ioreq_align",
564 &instance->pdev->dev,
565 fusion->io_frames_alloc_sz,
566 roundup_pow_of_two(fusion->io_frames_alloc_sz),
567 0);
568
569 if (!fusion->io_request_frames_pool) {
570 dev_err(&instance->pdev->dev,
571 "Failed from %s %d\n", __func__, __LINE__);
572 return -ENOMEM;
573 }
574
575 fusion->io_request_frames =
576 dma_pool_alloc(fusion->io_request_frames_pool,
577 GFP_KERNEL,
578 &fusion->io_request_frames_phys);
579
580 if (!fusion->io_request_frames) {
581 dev_err(&instance->pdev->dev,
582 "Failed from %s %d\n", __func__, __LINE__);
583 return -ENOMEM;
584 }
585 }
586
587 fusion->req_frames_desc =
588 dma_alloc_coherent(&instance->pdev->dev,
589 fusion->request_alloc_sz,
590 &fusion->req_frames_desc_phys, GFP_KERNEL);
591 if (!fusion->req_frames_desc) {
492 dev_err(&instance->pdev->dev, 592 dev_err(&instance->pdev->dev,
493 "Failed from %s %d\n", __func__, __LINE__); 593 "Failed from %s %d\n", __func__, __LINE__);
494 return -ENOMEM; 594 return -ENOMEM;
495 } 595 }
596
496 return 0; 597 return 0;
497} 598}
498 599
@@ -523,6 +624,41 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
523 "Failed from %s %d\n", __func__, __LINE__); 624 "Failed from %s %d\n", __func__, __LINE__);
524 return -ENOMEM; 625 return -ENOMEM;
525 } 626 }
627
628 if (!megasas_check_same_4gb_region(instance,
629 fusion->reply_frames_desc_phys[0],
630 (fusion->reply_alloc_sz * count))) {
631 dma_pool_free(fusion->reply_frames_desc_pool,
632 fusion->reply_frames_desc[0],
633 fusion->reply_frames_desc_phys[0]);
634 fusion->reply_frames_desc[0] = NULL;
635 dma_pool_destroy(fusion->reply_frames_desc_pool);
636
637 fusion->reply_frames_desc_pool =
638 dma_pool_create("mr_reply_align",
639 &instance->pdev->dev,
640 fusion->reply_alloc_sz * count,
641 roundup_pow_of_two(fusion->reply_alloc_sz * count),
642 0);
643
644 if (!fusion->reply_frames_desc_pool) {
645 dev_err(&instance->pdev->dev,
646 "Failed from %s %d\n", __func__, __LINE__);
647 return -ENOMEM;
648 }
649
650 fusion->reply_frames_desc[0] =
651 dma_pool_alloc(fusion->reply_frames_desc_pool,
652 GFP_KERNEL,
653 &fusion->reply_frames_desc_phys[0]);
654
655 if (!fusion->reply_frames_desc[0]) {
656 dev_err(&instance->pdev->dev,
657 "Failed from %s %d\n", __func__, __LINE__);
658 return -ENOMEM;
659 }
660 }
661
526 reply_desc = fusion->reply_frames_desc[0]; 662 reply_desc = fusion->reply_frames_desc[0];
527 for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++) 663 for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
528 reply_desc->Words = cpu_to_le64(ULLONG_MAX); 664 reply_desc->Words = cpu_to_le64(ULLONG_MAX);
@@ -541,52 +677,124 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
541int 677int
542megasas_alloc_rdpq_fusion(struct megasas_instance *instance) 678megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
543{ 679{
544 int i, j, count; 680 int i, j, k, msix_count;
545 struct fusion_context *fusion; 681 struct fusion_context *fusion;
546 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 682 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
683 union MPI2_REPLY_DESCRIPTORS_UNION *rdpq_chunk_virt[RDPQ_MAX_CHUNK_COUNT];
684 dma_addr_t rdpq_chunk_phys[RDPQ_MAX_CHUNK_COUNT];
685 u8 dma_alloc_count, abs_index;
686 u32 chunk_size, array_size, offset;
547 687
548 fusion = instance->ctrl_context; 688 fusion = instance->ctrl_context;
689 chunk_size = fusion->reply_alloc_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
690 array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
691 MAX_MSIX_QUEUES_FUSION;
549 692
550 fusion->rdpq_virt = pci_alloc_consistent(instance->pdev, 693 fusion->rdpq_virt = pci_alloc_consistent(instance->pdev, array_size,
551 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION, 694 &fusion->rdpq_phys);
552 &fusion->rdpq_phys);
553 if (!fusion->rdpq_virt) { 695 if (!fusion->rdpq_virt) {
554 dev_err(&instance->pdev->dev, 696 dev_err(&instance->pdev->dev,
555 "Failed from %s %d\n", __func__, __LINE__); 697 "Failed from %s %d\n", __func__, __LINE__);
556 return -ENOMEM; 698 return -ENOMEM;
557 } 699 }
558 700
559 memset(fusion->rdpq_virt, 0, 701 memset(fusion->rdpq_virt, 0, array_size);
560 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION); 702 msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
561 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 703
562 fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq", 704 fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
563 &instance->pdev->dev, 705 &instance->pdev->dev,
564 fusion->reply_alloc_sz, 706 chunk_size, 16, 0);
565 16, 0); 707 fusion->reply_frames_desc_pool_align =
566 708 dma_pool_create("mr_rdpq_align",
567 if (!fusion->reply_frames_desc_pool) { 709 &instance->pdev->dev,
710 chunk_size,
711 roundup_pow_of_two(chunk_size),
712 0);
713
714 if (!fusion->reply_frames_desc_pool ||
715 !fusion->reply_frames_desc_pool_align) {
568 dev_err(&instance->pdev->dev, 716 dev_err(&instance->pdev->dev,
569 "Failed from %s %d\n", __func__, __LINE__); 717 "Failed from %s %d\n", __func__, __LINE__);
570 return -ENOMEM; 718 return -ENOMEM;
571 } 719 }
572 720
573 for (i = 0; i < count; i++) { 721/*
574 fusion->reply_frames_desc[i] = 722 * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
575 dma_pool_alloc(fusion->reply_frames_desc_pool, 723 * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should be
576 GFP_KERNEL, &fusion->reply_frames_desc_phys[i]); 724 * within 4GB boundary and also reply queues in a set must have same
577 if (!fusion->reply_frames_desc[i]) { 725 * upper 32-bits in their memory address. so here driver is allocating the
726 * DMA'able memory for reply queues according. Driver uses limitation of
727 * VENTURA_SERIES to manage INVADER_SERIES as well.
728 */
729 dma_alloc_count = DIV_ROUND_UP(msix_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK);
730
731 for (i = 0; i < dma_alloc_count; i++) {
732 rdpq_chunk_virt[i] =
733 dma_pool_alloc(fusion->reply_frames_desc_pool,
734 GFP_KERNEL, &rdpq_chunk_phys[i]);
735 if (!rdpq_chunk_virt[i]) {
578 dev_err(&instance->pdev->dev, 736 dev_err(&instance->pdev->dev,
579 "Failed from %s %d\n", __func__, __LINE__); 737 "Failed from %s %d\n", __func__, __LINE__);
580 return -ENOMEM; 738 return -ENOMEM;
581 } 739 }
740 /* reply desc pool requires to be in same 4 gb region.
741 * Below function will check this.
742 * In case of failure, new pci pool will be created with updated
743 * alignment.
744 * For RDPQ buffers, driver always allocate two separate pci pool.
745 * Alignment will be used such a way that next allocation if
746 * success, will always meet same 4gb region requirement.
747 * rdpq_tracker keep track of each buffer's physical,
748 * virtual address and pci pool descriptor. It will help driver
749 * while freeing the resources.
750 *
751 */
752 if (!megasas_check_same_4gb_region(instance, rdpq_chunk_phys[i],
753 chunk_size)) {
754 dma_pool_free(fusion->reply_frames_desc_pool,
755 rdpq_chunk_virt[i],
756 rdpq_chunk_phys[i]);
582 757
583 fusion->rdpq_virt[i].RDPQBaseAddress = 758 rdpq_chunk_virt[i] =
584 cpu_to_le64(fusion->reply_frames_desc_phys[i]); 759 dma_pool_alloc(fusion->reply_frames_desc_pool_align,
760 GFP_KERNEL, &rdpq_chunk_phys[i]);
761 if (!rdpq_chunk_virt[i]) {
762 dev_err(&instance->pdev->dev,
763 "Failed from %s %d\n",
764 __func__, __LINE__);
765 return -ENOMEM;
766 }
767 fusion->rdpq_tracker[i].dma_pool_ptr =
768 fusion->reply_frames_desc_pool_align;
769 } else {
770 fusion->rdpq_tracker[i].dma_pool_ptr =
771 fusion->reply_frames_desc_pool;
772 }
585 773
586 reply_desc = fusion->reply_frames_desc[i]; 774 fusion->rdpq_tracker[i].pool_entry_phys = rdpq_chunk_phys[i];
587 for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++) 775 fusion->rdpq_tracker[i].pool_entry_virt = rdpq_chunk_virt[i];
588 reply_desc->Words = cpu_to_le64(ULLONG_MAX);
589 } 776 }
777
778 for (k = 0; k < dma_alloc_count; k++) {
779 for (i = 0; i < RDPQ_MAX_INDEX_IN_ONE_CHUNK; i++) {
780 abs_index = (k * RDPQ_MAX_INDEX_IN_ONE_CHUNK) + i;
781
782 if (abs_index == msix_count)
783 break;
784 offset = fusion->reply_alloc_sz * i;
785 fusion->rdpq_virt[abs_index].RDPQBaseAddress =
786 cpu_to_le64(rdpq_chunk_phys[k] + offset);
787 fusion->reply_frames_desc_phys[abs_index] =
788 rdpq_chunk_phys[k] + offset;
789 fusion->reply_frames_desc[abs_index] =
790 (union MPI2_REPLY_DESCRIPTORS_UNION *)((u8 *)rdpq_chunk_virt[k] + offset);
791
792 reply_desc = fusion->reply_frames_desc[abs_index];
793 for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
794 reply_desc->Words = ULLONG_MAX;
795 }
796 }
797
590 return 0; 798 return 0;
591} 799}
592 800
@@ -598,15 +806,18 @@ megasas_free_rdpq_fusion(struct megasas_instance *instance) {
598 806
599 fusion = instance->ctrl_context; 807 fusion = instance->ctrl_context;
600 808
601 for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) { 809 for (i = 0; i < RDPQ_MAX_CHUNK_COUNT; i++) {
602 if (fusion->reply_frames_desc[i]) 810 if (fusion->rdpq_tracker[i].pool_entry_virt)
603 dma_pool_free(fusion->reply_frames_desc_pool, 811 dma_pool_free(fusion->rdpq_tracker[i].dma_pool_ptr,
604 fusion->reply_frames_desc[i], 812 fusion->rdpq_tracker[i].pool_entry_virt,
605 fusion->reply_frames_desc_phys[i]); 813 fusion->rdpq_tracker[i].pool_entry_phys);
814
606 } 815 }
607 816
608 if (fusion->reply_frames_desc_pool) 817 if (fusion->reply_frames_desc_pool)
609 dma_pool_destroy(fusion->reply_frames_desc_pool); 818 dma_pool_destroy(fusion->reply_frames_desc_pool);
819 if (fusion->reply_frames_desc_pool_align)
820 dma_pool_destroy(fusion->reply_frames_desc_pool_align);
610 821
611 if (fusion->rdpq_virt) 822 if (fusion->rdpq_virt)
612 pci_free_consistent(instance->pdev, 823 pci_free_consistent(instance->pdev,
@@ -661,9 +872,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
661 872
662 fusion = instance->ctrl_context; 873 fusion = instance->ctrl_context;
663 874
664 if (megasas_alloc_cmdlist_fusion(instance))
665 goto fail_exit;
666
667 if (megasas_alloc_request_fusion(instance)) 875 if (megasas_alloc_request_fusion(instance))
668 goto fail_exit; 876 goto fail_exit;
669 877
@@ -674,6 +882,11 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
674 if (megasas_alloc_reply_fusion(instance)) 882 if (megasas_alloc_reply_fusion(instance))
675 goto fail_exit; 883 goto fail_exit;
676 884
885 if (megasas_alloc_cmdlist_fusion(instance))
886 goto fail_exit;
887
888 dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
889 instance->max_fw_cmds);
677 890
678 /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */ 891 /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
679 io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 892 io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
@@ -770,22 +983,34 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
770 MFI_CAPABILITIES *drv_ops; 983 MFI_CAPABILITIES *drv_ops;
771 u32 scratch_pad_2; 984 u32 scratch_pad_2;
772 unsigned long flags; 985 unsigned long flags;
986 struct timeval tv;
987 bool cur_fw_64bit_dma_capable;
773 988
774 fusion = instance->ctrl_context; 989 fusion = instance->ctrl_context;
775 990
776 cmd = megasas_get_cmd(instance); 991 ioc_init_handle = fusion->ioc_init_request_phys;
992 IOCInitMessage = fusion->ioc_init_request;
777 993
778 if (!cmd) { 994 cmd = fusion->ioc_init_cmd;
779 dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
780 ret = 1;
781 goto fail_get_cmd;
782 }
783 995
784 scratch_pad_2 = readl 996 scratch_pad_2 = readl
785 (&instance->reg_set->outbound_scratch_pad_2); 997 (&instance->reg_set->outbound_scratch_pad_2);
786 998
787 cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0; 999 cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
788 1000
1001 if (instance->adapter_type == INVADER_SERIES) {
1002 cur_fw_64bit_dma_capable =
1003 (scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
1004
1005 if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) {
1006 dev_err(&instance->pdev->dev, "Driver was operating on 64bit "
1007 "DMA mask, but upcoming FW does not support 64bit DMA mask\n");
1008 megaraid_sas_kill_hba(instance);
1009 ret = 1;
1010 goto fail_fw_init;
1011 }
1012 }
1013
789 if (instance->is_rdpq && !cur_rdpq_mode) { 1014 if (instance->is_rdpq && !cur_rdpq_mode) {
790 dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*" 1015 dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*"
791 " from RDPQ mode to non RDPQ mode\n"); 1016 " from RDPQ mode to non RDPQ mode\n");
@@ -798,18 +1023,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
798 dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n", 1023 dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
799 instance->fw_sync_cache_support ? "Yes" : "No"); 1024 instance->fw_sync_cache_support ? "Yes" : "No");
800 1025
801 IOCInitMessage =
802 dma_alloc_coherent(&instance->pdev->dev,
803 sizeof(struct MPI2_IOC_INIT_REQUEST),
804 &ioc_init_handle, GFP_KERNEL);
805
806 if (!IOCInitMessage) {
807 dev_err(&instance->pdev->dev, "Could not allocate memory for "
808 "IOCInitMessage\n");
809 ret = 1;
810 goto fail_fw_init;
811 }
812
813 memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST)); 1026 memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
814 1027
815 IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; 1028 IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
@@ -825,8 +1038,15 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
825 IOCInitMessage->MsgFlags = instance->is_rdpq ? 1038 IOCInitMessage->MsgFlags = instance->is_rdpq ?
826 MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0; 1039 MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
827 IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); 1040 IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
1041 IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr));
828 IOCInitMessage->HostMSIxVectors = instance->msix_vectors; 1042 IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
829 IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT; 1043 IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
1044
1045 do_gettimeofday(&tv);
1046 /* Convert to milliseconds as per FW requirement */
1047 IOCInitMessage->TimeStamp = cpu_to_le64((tv.tv_sec * 1000) +
1048 (tv.tv_usec / 1000));
1049
830 init_frame = (struct megasas_init_frame *)cmd->frame; 1050 init_frame = (struct megasas_init_frame *)cmd->frame;
831 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 1051 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
832 1052
@@ -842,7 +1062,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
842 drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations); 1062 drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
843 1063
844 /* driver support Extended MSIX */ 1064 /* driver support Extended MSIX */
845 if (fusion->adapter_type >= INVADER_SERIES) 1065 if (instance->adapter_type >= INVADER_SERIES)
846 drv_ops->mfi_capabilities.support_additional_msix = 1; 1066 drv_ops->mfi_capabilities.support_additional_msix = 1;
847 /* driver supports HA / Remote LUN over Fast Path interface */ 1067 /* driver supports HA / Remote LUN over Fast Path interface */
848 drv_ops->mfi_capabilities.support_fp_remote_lun = 1; 1068 drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
@@ -860,6 +1080,10 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
860 1080
861 drv_ops->mfi_capabilities.support_qd_throttling = 1; 1081 drv_ops->mfi_capabilities.support_qd_throttling = 1;
862 drv_ops->mfi_capabilities.support_pd_map_target_id = 1; 1082 drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
1083
1084 if (instance->consistent_mask_64bit)
1085 drv_ops->mfi_capabilities.support_64bit_mode = 1;
1086
863 /* Convert capability to LE32 */ 1087 /* Convert capability to LE32 */
864 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); 1088 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
865 1089
@@ -869,8 +1093,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
869 strlen(sys_info) > 64 ? 64 : strlen(sys_info)); 1093 strlen(sys_info) > 64 ? 64 : strlen(sys_info));
870 instance->system_info_buf->systemIdLength = 1094 instance->system_info_buf->systemIdLength =
871 strlen(sys_info) > 64 ? 64 : strlen(sys_info); 1095 strlen(sys_info) > 64 ? 64 : strlen(sys_info);
872 init_frame->system_info_lo = instance->system_info_h; 1096 init_frame->system_info_lo = cpu_to_le32(lower_32_bits(instance->system_info_h));
873 init_frame->system_info_hi = 0; 1097 init_frame->system_info_hi = cpu_to_le32(upper_32_bits(instance->system_info_h));
874 } 1098 }
875 1099
876 init_frame->queue_info_new_phys_addr_hi = 1100 init_frame->queue_info_new_phys_addr_hi =
@@ -917,12 +1141,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
917 ret = 0; 1141 ret = 0;
918 1142
919fail_fw_init: 1143fail_fw_init:
920 megasas_return_cmd(instance, cmd);
921 if (IOCInitMessage)
922 dma_free_coherent(&instance->pdev->dev,
923 sizeof(struct MPI2_IOC_INIT_REQUEST),
924 IOCInitMessage, ioc_init_handle);
925fail_get_cmd:
926 dev_err(&instance->pdev->dev, 1144 dev_err(&instance->pdev->dev,
927 "Init cmd return status %s for SCSI host %d\n", 1145 "Init cmd return status %s for SCSI host %d\n",
928 ret ? "FAILED" : "SUCCESS", instance->host->host_no); 1146 ret ? "FAILED" : "SUCCESS", instance->host->host_no);
@@ -967,6 +1185,15 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
967 1185
968 memset(pd_sync, 0, pd_seq_map_sz); 1186 memset(pd_sync, 0, pd_seq_map_sz);
969 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 1187 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1188
1189 if (pend) {
1190 dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
1191 dcmd->flags = MFI_FRAME_DIR_WRITE;
1192 instance->jbod_seq_cmd = cmd;
1193 } else {
1194 dcmd->flags = MFI_FRAME_DIR_READ;
1195 }
1196
970 dcmd->cmd = MFI_CMD_DCMD; 1197 dcmd->cmd = MFI_CMD_DCMD;
971 dcmd->cmd_status = 0xFF; 1198 dcmd->cmd_status = 0xFF;
972 dcmd->sge_count = 1; 1199 dcmd->sge_count = 1;
@@ -974,21 +1201,16 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
974 dcmd->pad_0 = 0; 1201 dcmd->pad_0 = 0;
975 dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz); 1202 dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz);
976 dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO); 1203 dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
977 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(pd_seq_h); 1204
978 dcmd->sgl.sge32[0].length = cpu_to_le32(pd_seq_map_sz); 1205 megasas_set_dma_settings(instance, dcmd, pd_seq_h, pd_seq_map_sz);
979 1206
980 if (pend) { 1207 if (pend) {
981 dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
982 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
983 instance->jbod_seq_cmd = cmd;
984 instance->instancet->issue_dcmd(instance, cmd); 1208 instance->instancet->issue_dcmd(instance, cmd);
985 return 0; 1209 return 0;
986 } 1210 }
987 1211
988 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
989
990 /* Below code is only for non pended DCMD */ 1212 /* Below code is only for non pended DCMD */
991 if (instance->ctrl_context && !instance->mask_interrupts) 1213 if (!instance->mask_interrupts)
992 ret = megasas_issue_blocked_cmd(instance, cmd, 1214 ret = megasas_issue_blocked_cmd(instance, cmd,
993 MFI_IO_TIMEOUT_SECS); 1215 MFI_IO_TIMEOUT_SECS);
994 else 1216 else
@@ -1001,7 +1223,7 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
1001 ret = -EINVAL; 1223 ret = -EINVAL;
1002 } 1224 }
1003 1225
1004 if (ret == DCMD_TIMEOUT && instance->ctrl_context) 1226 if (ret == DCMD_TIMEOUT)
1005 megaraid_sas_kill_hba(instance); 1227 megaraid_sas_kill_hba(instance);
1006 1228
1007 if (ret == DCMD_SUCCESS) 1229 if (ret == DCMD_SUCCESS)
@@ -1069,21 +1291,21 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
1069 dcmd->cmd = MFI_CMD_DCMD; 1291 dcmd->cmd = MFI_CMD_DCMD;
1070 dcmd->cmd_status = 0xFF; 1292 dcmd->cmd_status = 0xFF;
1071 dcmd->sge_count = 1; 1293 dcmd->sge_count = 1;
1072 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 1294 dcmd->flags = MFI_FRAME_DIR_READ;
1073 dcmd->timeout = 0; 1295 dcmd->timeout = 0;
1074 dcmd->pad_0 = 0; 1296 dcmd->pad_0 = 0;
1075 dcmd->data_xfer_len = cpu_to_le32(size_map_info); 1297 dcmd->data_xfer_len = cpu_to_le32(size_map_info);
1076 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); 1298 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
1077 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
1078 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
1079 1299
1080 if (instance->ctrl_context && !instance->mask_interrupts) 1300 megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info);
1301
1302 if (!instance->mask_interrupts)
1081 ret = megasas_issue_blocked_cmd(instance, cmd, 1303 ret = megasas_issue_blocked_cmd(instance, cmd,
1082 MFI_IO_TIMEOUT_SECS); 1304 MFI_IO_TIMEOUT_SECS);
1083 else 1305 else
1084 ret = megasas_issue_polled(instance, cmd); 1306 ret = megasas_issue_polled(instance, cmd);
1085 1307
1086 if (ret == DCMD_TIMEOUT && instance->ctrl_context) 1308 if (ret == DCMD_TIMEOUT)
1087 megaraid_sas_kill_hba(instance); 1309 megaraid_sas_kill_hba(instance);
1088 1310
1089 megasas_return_cmd(instance, cmd); 1311 megasas_return_cmd(instance, cmd);
@@ -1173,15 +1395,15 @@ megasas_sync_map_info(struct megasas_instance *instance)
1173 dcmd->cmd = MFI_CMD_DCMD; 1395 dcmd->cmd = MFI_CMD_DCMD;
1174 dcmd->cmd_status = 0xFF; 1396 dcmd->cmd_status = 0xFF;
1175 dcmd->sge_count = 1; 1397 dcmd->sge_count = 1;
1176 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE); 1398 dcmd->flags = MFI_FRAME_DIR_WRITE;
1177 dcmd->timeout = 0; 1399 dcmd->timeout = 0;
1178 dcmd->pad_0 = 0; 1400 dcmd->pad_0 = 0;
1179 dcmd->data_xfer_len = cpu_to_le32(size_map_info); 1401 dcmd->data_xfer_len = cpu_to_le32(size_map_info);
1180 dcmd->mbox.b[0] = num_lds; 1402 dcmd->mbox.b[0] = num_lds;
1181 dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; 1403 dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
1182 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); 1404 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
1183 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 1405
1184 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); 1406 megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info);
1185 1407
1186 instance->map_update_cmd = cmd; 1408 instance->map_update_cmd = cmd;
1187 1409
@@ -1337,6 +1559,94 @@ ld_drv_map_alloc_fail:
1337} 1559}
1338 1560
1339/** 1561/**
1562 * megasas_configure_queue_sizes - Calculate size of request desc queue,
1563 * reply desc queue,
1564 * IO request frame queue, set can_queue.
1565 * @instance: Adapter soft state
1566 * @return: void
1567 */
1568static inline
1569void megasas_configure_queue_sizes(struct megasas_instance *instance)
1570{
1571 struct fusion_context *fusion;
1572 u16 max_cmd;
1573
1574 fusion = instance->ctrl_context;
1575 max_cmd = instance->max_fw_cmds;
1576
1577 if (instance->adapter_type == VENTURA_SERIES)
1578 instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS;
1579 else
1580 instance->max_mpt_cmds = instance->max_fw_cmds;
1581
1582 instance->max_scsi_cmds = instance->max_fw_cmds -
1583 (MEGASAS_FUSION_INTERNAL_CMDS +
1584 MEGASAS_FUSION_IOCTL_CMDS);
1585 instance->cur_can_queue = instance->max_scsi_cmds;
1586 instance->host->can_queue = instance->cur_can_queue;
1587
1588 fusion->reply_q_depth = 2 * ((max_cmd + 1 + 15) / 16) * 16;
1589
1590 fusion->request_alloc_sz = sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *
1591 instance->max_mpt_cmds;
1592 fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) *
1593 (fusion->reply_q_depth);
1594 fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
1595 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1596 * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
1597}
1598
1599static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance)
1600{
1601 struct fusion_context *fusion;
1602 struct megasas_cmd *cmd;
1603
1604 fusion = instance->ctrl_context;
1605
1606 cmd = kmalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
1607
1608 if (!cmd) {
1609 dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
1610 __func__, __LINE__);
1611 return -ENOMEM;
1612 }
1613
1614 cmd->frame = dma_alloc_coherent(&instance->pdev->dev,
1615 IOC_INIT_FRAME_SIZE,
1616 &cmd->frame_phys_addr, GFP_KERNEL);
1617
1618 if (!cmd->frame) {
1619 dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
1620 __func__, __LINE__);
1621 kfree(cmd);
1622 return -ENOMEM;
1623 }
1624
1625 fusion->ioc_init_cmd = cmd;
1626 return 0;
1627}
1628
1629/**
1630 * megasas_free_ioc_init_cmd - Free IOC INIT command frame
1631 * @instance: Adapter soft state
1632 */
1633static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance)
1634{
1635 struct fusion_context *fusion;
1636
1637 fusion = instance->ctrl_context;
1638
1639 if (fusion->ioc_init_cmd && fusion->ioc_init_cmd->frame)
1640 dma_free_coherent(&instance->pdev->dev,
1641 IOC_INIT_FRAME_SIZE,
1642 fusion->ioc_init_cmd->frame,
1643 fusion->ioc_init_cmd->frame_phys_addr);
1644
1645 if (fusion->ioc_init_cmd)
1646 kfree(fusion->ioc_init_cmd);
1647}
1648
1649/**
1340 * megasas_init_adapter_fusion - Initializes the FW 1650 * megasas_init_adapter_fusion - Initializes the FW
1341 * @instance: Adapter soft state 1651 * @instance: Adapter soft state
1342 * 1652 *
@@ -1347,7 +1657,6 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1347{ 1657{
1348 struct megasas_register_set __iomem *reg_set; 1658 struct megasas_register_set __iomem *reg_set;
1349 struct fusion_context *fusion; 1659 struct fusion_context *fusion;
1350 u16 max_cmd;
1351 u32 scratch_pad_2; 1660 u32 scratch_pad_2;
1352 int i = 0, count; 1661 int i = 0, count;
1353 1662
@@ -1363,17 +1672,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1363 instance->max_mfi_cmds = 1672 instance->max_mfi_cmds =
1364 MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS; 1673 MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
1365 1674
1366 max_cmd = instance->max_fw_cmds; 1675 megasas_configure_queue_sizes(instance);
1367
1368 fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
1369
1370 fusion->request_alloc_sz =
1371 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * instance->max_mpt_cmds;
1372 fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
1373 *(fusion->reply_q_depth);
1374 fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
1375 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1376 * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
1377 1676
1378 scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2); 1677 scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
1379 /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, 1678 /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
@@ -1431,6 +1730,9 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1431 MEGASAS_FUSION_IOCTL_CMDS); 1730 MEGASAS_FUSION_IOCTL_CMDS);
1432 sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS); 1731 sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
1433 1732
1733 if (megasas_alloc_ioc_init_frame(instance))
1734 return 1;
1735
1434 /* 1736 /*
1435 * Allocate memory for descriptors 1737 * Allocate memory for descriptors
1436 * Create a pool of commands 1738 * Create a pool of commands
@@ -1468,6 +1770,7 @@ fail_ioc_init:
1468fail_alloc_cmds: 1770fail_alloc_cmds:
1469 megasas_free_cmds(instance); 1771 megasas_free_cmds(instance);
1470fail_alloc_mfi_cmds: 1772fail_alloc_mfi_cmds:
1773 megasas_free_ioc_init_cmd(instance);
1471 return 1; 1774 return 1;
1472} 1775}
1473 1776
@@ -1800,7 +2103,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
1800 2103
1801 fusion = instance->ctrl_context; 2104 fusion = instance->ctrl_context;
1802 2105
1803 if (fusion->adapter_type >= INVADER_SERIES) { 2106 if (instance->adapter_type >= INVADER_SERIES) {
1804 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr; 2107 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
1805 sgl_ptr_end += fusion->max_sge_in_main_msg - 1; 2108 sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
1806 sgl_ptr_end->Flags = 0; 2109 sgl_ptr_end->Flags = 0;
@@ -1810,7 +2113,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
1810 sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); 2113 sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
1811 sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); 2114 sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
1812 sgl_ptr->Flags = 0; 2115 sgl_ptr->Flags = 0;
1813 if (fusion->adapter_type >= INVADER_SERIES) 2116 if (instance->adapter_type >= INVADER_SERIES)
1814 if (i == sge_count - 1) 2117 if (i == sge_count - 1)
1815 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; 2118 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1816 sgl_ptr++; 2119 sgl_ptr++;
@@ -1820,7 +2123,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
1820 (sge_count > fusion->max_sge_in_main_msg)) { 2123 (sge_count > fusion->max_sge_in_main_msg)) {
1821 2124
1822 struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; 2125 struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
1823 if (fusion->adapter_type >= INVADER_SERIES) { 2126 if (instance->adapter_type >= INVADER_SERIES) {
1824 if ((le16_to_cpu(cmd->io_request->IoFlags) & 2127 if ((le16_to_cpu(cmd->io_request->IoFlags) &
1825 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != 2128 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1826 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 2129 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
@@ -1836,7 +2139,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
1836 sg_chain = sgl_ptr; 2139 sg_chain = sgl_ptr;
1837 /* Prepare chain element */ 2140 /* Prepare chain element */
1838 sg_chain->NextChainOffset = 0; 2141 sg_chain->NextChainOffset = 0;
1839 if (fusion->adapter_type >= INVADER_SERIES) 2142 if (instance->adapter_type >= INVADER_SERIES)
1840 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; 2143 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1841 else 2144 else
1842 sg_chain->Flags = 2145 sg_chain->Flags =
@@ -2360,7 +2663,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2360 2663
2361 praid_context = &io_request->RaidContext; 2664 praid_context = &io_request->RaidContext;
2362 2665
2363 if (instance->is_ventura) { 2666 if (instance->adapter_type == VENTURA_SERIES) {
2364 spin_lock_irqsave(&instance->stream_lock, spinlock_flags); 2667 spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
2365 megasas_stream_detect(instance, cmd, &io_info); 2668 megasas_stream_detect(instance, cmd, &io_info);
2366 spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags); 2669 spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
@@ -2413,7 +2716,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2413 cmd->request_desc->SCSIIO.RequestFlags = 2716 cmd->request_desc->SCSIIO.RequestFlags =
2414 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO 2717 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO
2415 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2718 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2416 if (fusion->adapter_type == INVADER_SERIES) { 2719 if (instance->adapter_type == INVADER_SERIES) {
2417 if (io_request->RaidContext.raid_context.reg_lock_flags == 2720 if (io_request->RaidContext.raid_context.reg_lock_flags ==
2418 REGION_TYPE_UNUSED) 2721 REGION_TYPE_UNUSED)
2419 cmd->request_desc->SCSIIO.RequestFlags = 2722 cmd->request_desc->SCSIIO.RequestFlags =
@@ -2426,7 +2729,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2426 io_request->RaidContext.raid_context.reg_lock_flags |= 2729 io_request->RaidContext.raid_context.reg_lock_flags |=
2427 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | 2730 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
2428 MR_RL_FLAGS_SEQ_NUM_ENABLE); 2731 MR_RL_FLAGS_SEQ_NUM_ENABLE);
2429 } else if (instance->is_ventura) { 2732 } else if (instance->adapter_type == VENTURA_SERIES) {
2430 io_request->RaidContext.raid_context_g35.nseg_type |= 2733 io_request->RaidContext.raid_context_g35.nseg_type |=
2431 (1 << RAID_CONTEXT_NSEG_SHIFT); 2734 (1 << RAID_CONTEXT_NSEG_SHIFT);
2432 io_request->RaidContext.raid_context_g35.nseg_type |= 2735 io_request->RaidContext.raid_context_g35.nseg_type |=
@@ -2445,7 +2748,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2445 &io_info, local_map_ptr); 2748 &io_info, local_map_ptr);
2446 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; 2749 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
2447 cmd->pd_r1_lb = io_info.pd_after_lb; 2750 cmd->pd_r1_lb = io_info.pd_after_lb;
2448 if (instance->is_ventura) 2751 if (instance->adapter_type == VENTURA_SERIES)
2449 io_request->RaidContext.raid_context_g35.span_arm 2752 io_request->RaidContext.raid_context_g35.span_arm
2450 = io_info.span_arm; 2753 = io_info.span_arm;
2451 else 2754 else
@@ -2455,7 +2758,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2455 } else 2758 } else
2456 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 2759 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
2457 2760
2458 if (instance->is_ventura) 2761 if (instance->adapter_type == VENTURA_SERIES)
2459 cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle; 2762 cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
2460 else 2763 else
2461 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 2764 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
@@ -2478,7 +2781,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2478 cmd->request_desc->SCSIIO.RequestFlags = 2781 cmd->request_desc->SCSIIO.RequestFlags =
2479 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 2782 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
2480 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2783 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2481 if (fusion->adapter_type == INVADER_SERIES) { 2784 if (instance->adapter_type == INVADER_SERIES) {
2482 if (io_info.do_fp_rlbypass || 2785 if (io_info.do_fp_rlbypass ||
2483 (io_request->RaidContext.raid_context.reg_lock_flags 2786 (io_request->RaidContext.raid_context.reg_lock_flags
2484 == REGION_TYPE_UNUSED)) 2787 == REGION_TYPE_UNUSED))
@@ -2491,7 +2794,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
2491 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | 2794 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
2492 MR_RL_FLAGS_SEQ_NUM_ENABLE); 2795 MR_RL_FLAGS_SEQ_NUM_ENABLE);
2493 io_request->RaidContext.raid_context.nseg = 0x1; 2796 io_request->RaidContext.raid_context.nseg = 0x1;
2494 } else if (instance->is_ventura) { 2797 } else if (instance->adapter_type == VENTURA_SERIES) {
2495 io_request->RaidContext.raid_context_g35.routing_flags |= 2798 io_request->RaidContext.raid_context_g35.routing_flags |=
2496 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); 2799 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
2497 io_request->RaidContext.raid_context_g35.nseg_type |= 2800 io_request->RaidContext.raid_context_g35.nseg_type |=
@@ -2566,7 +2869,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
2566 2869
2567 /* set RAID context values */ 2870 /* set RAID context values */
2568 pRAID_Context->config_seq_num = raid->seqNum; 2871 pRAID_Context->config_seq_num = raid->seqNum;
2569 if (!instance->is_ventura) 2872 if (instance->adapter_type != VENTURA_SERIES)
2570 pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ; 2873 pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
2571 pRAID_Context->timeout_value = 2874 pRAID_Context->timeout_value =
2572 cpu_to_le16(raid->fpIoTimeoutForLd); 2875 cpu_to_le16(raid->fpIoTimeoutForLd);
@@ -2651,7 +2954,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
2651 cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1)); 2954 cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
2652 pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum; 2955 pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
2653 io_request->DevHandle = pd_sync->seq[pd_index].devHandle; 2956 io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
2654 if (instance->is_ventura) { 2957 if (instance->adapter_type == VENTURA_SERIES) {
2655 io_request->RaidContext.raid_context_g35.routing_flags |= 2958 io_request->RaidContext.raid_context_g35.routing_flags |=
2656 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); 2959 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
2657 io_request->RaidContext.raid_context_g35.nseg_type |= 2960 io_request->RaidContext.raid_context_g35.nseg_type |=
@@ -2699,7 +3002,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
2699 pRAID_Context->timeout_value = 3002 pRAID_Context->timeout_value =
2700 cpu_to_le16((os_timeout_value > timeout_limit) ? 3003 cpu_to_le16((os_timeout_value > timeout_limit) ?
2701 timeout_limit : os_timeout_value); 3004 timeout_limit : os_timeout_value);
2702 if (fusion->adapter_type >= INVADER_SERIES) 3005 if (instance->adapter_type >= INVADER_SERIES)
2703 io_request->IoFlags |= 3006 io_request->IoFlags |=
2704 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 3007 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
2705 3008
@@ -2782,7 +3085,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
2782 return 1; 3085 return 1;
2783 } 3086 }
2784 3087
2785 if (instance->is_ventura) { 3088 if (instance->adapter_type == VENTURA_SERIES) {
2786 set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count); 3089 set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
2787 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags); 3090 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
2788 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type); 3091 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
@@ -2805,7 +3108,8 @@ megasas_build_io_fusion(struct megasas_instance *instance,
2805 io_request->SGLOffset0 = 3108 io_request->SGLOffset0 =
2806 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; 3109 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
2807 3110
2808 io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr); 3111 io_request->SenseBufferLowAddress =
3112 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
2809 io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 3113 io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
2810 3114
2811 cmd->scmd = scp; 3115 cmd->scmd = scp;
@@ -2846,7 +3150,7 @@ void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
2846 (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION))); 3150 (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
2847 /*sense buffer is different for r1 command*/ 3151 /*sense buffer is different for r1 command*/
2848 r1_cmd->io_request->SenseBufferLowAddress = 3152 r1_cmd->io_request->SenseBufferLowAddress =
2849 cpu_to_le32(r1_cmd->sense_phys_addr); 3153 cpu_to_le32(lower_32_bits(r1_cmd->sense_phys_addr));
2850 r1_cmd->scmd = cmd->scmd; 3154 r1_cmd->scmd = cmd->scmd;
2851 req_desc2 = megasas_get_request_descriptor(instance, 3155 req_desc2 = megasas_get_request_descriptor(instance,
2852 (r1_cmd->index - 1)); 3156 (r1_cmd->index - 1));
@@ -3312,7 +3616,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
3312 3616
3313 io_req = cmd->io_request; 3617 io_req = cmd->io_request;
3314 3618
3315 if (fusion->adapter_type >= INVADER_SERIES) { 3619 if (instance->adapter_type >= INVADER_SERIES) {
3316 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = 3620 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
3317 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; 3621 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
3318 sgl_ptr_end += fusion->max_sge_in_main_msg - 1; 3622 sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
@@ -3386,6 +3690,7 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
3386void 3690void
3387megasas_release_fusion(struct megasas_instance *instance) 3691megasas_release_fusion(struct megasas_instance *instance)
3388{ 3692{
3693 megasas_free_ioc_init_cmd(instance);
3389 megasas_free_cmds(instance); 3694 megasas_free_cmds(instance);
3390 megasas_free_cmds_fusion(instance); 3695 megasas_free_cmds_fusion(instance);
3391 3696
@@ -4244,7 +4549,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
4244 for (i = 0 ; i < instance->max_scsi_cmds; i++) { 4549 for (i = 0 ; i < instance->max_scsi_cmds; i++) {
4245 cmd_fusion = fusion->cmd_list[i]; 4550 cmd_fusion = fusion->cmd_list[i];
4246 /*check for extra commands issued by driver*/ 4551 /*check for extra commands issued by driver*/
4247 if (instance->is_ventura) { 4552 if (instance->adapter_type == VENTURA_SERIES) {
4248 r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds]; 4553 r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
4249 megasas_return_cmd_fusion(instance, r1_cmd); 4554 megasas_return_cmd_fusion(instance, r1_cmd);
4250 } 4555 }
@@ -4345,7 +4650,7 @@ transition_to_ready:
4345 megasas_set_dynamic_target_properties(sdev); 4650 megasas_set_dynamic_target_properties(sdev);
4346 4651
4347 /* reset stream detection array */ 4652 /* reset stream detection array */
4348 if (instance->is_ventura) { 4653 if (instance->adapter_type == VENTURA_SERIES) {
4349 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { 4654 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
4350 memset(fusion->stream_detect_by_ld[j], 4655 memset(fusion->stream_detect_by_ld[j],
4351 0, sizeof(struct LD_STREAM_DETECT)); 4656 0, sizeof(struct LD_STREAM_DETECT));
@@ -4493,20 +4798,31 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
4493{ 4798{
4494 struct fusion_context *fusion; 4799 struct fusion_context *fusion;
4495 4800
4496 instance->ctrl_context_pages = get_order(sizeof(struct fusion_context)); 4801 instance->ctrl_context = kzalloc(sizeof(struct fusion_context),
4497 instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 4802 GFP_KERNEL);
4498 instance->ctrl_context_pages);
4499 if (!instance->ctrl_context) { 4803 if (!instance->ctrl_context) {
4500 /* fall back to using vmalloc for fusion_context */ 4804 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
4501 instance->ctrl_context = vzalloc(sizeof(struct fusion_context)); 4805 __func__, __LINE__);
4502 if (!instance->ctrl_context) { 4806 return -ENOMEM;
4503 dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__);
4504 return -ENOMEM;
4505 }
4506 } 4807 }
4507 4808
4508 fusion = instance->ctrl_context; 4809 fusion = instance->ctrl_context;
4509 4810
4811 fusion->log_to_span_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
4812 sizeof(LD_SPAN_INFO));
4813 fusion->log_to_span =
4814 (PLD_SPAN_INFO)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
4815 fusion->log_to_span_pages);
4816 if (!fusion->log_to_span) {
4817 fusion->log_to_span = vzalloc(MAX_LOGICAL_DRIVES_EXT *
4818 sizeof(LD_SPAN_INFO));
4819 if (!fusion->log_to_span) {
4820 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
4821 __func__, __LINE__);
4822 return -ENOMEM;
4823 }
4824 }
4825
4510 fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT * 4826 fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
4511 sizeof(struct LD_LOAD_BALANCE_INFO)); 4827 sizeof(struct LD_LOAD_BALANCE_INFO));
4512 fusion->load_balance_info = 4828 fusion->load_balance_info =
@@ -4537,11 +4853,15 @@ megasas_free_fusion_context(struct megasas_instance *instance)
4537 fusion->load_balance_info_pages); 4853 fusion->load_balance_info_pages);
4538 } 4854 }
4539 4855
4540 if (is_vmalloc_addr(fusion)) 4856 if (fusion->log_to_span) {
4541 vfree(fusion); 4857 if (is_vmalloc_addr(fusion->log_to_span))
4542 else 4858 vfree(fusion->log_to_span);
4543 free_pages((ulong)fusion, 4859 else
4544 instance->ctrl_context_pages); 4860 free_pages((ulong)fusion->log_to_span,
4861 fusion->log_to_span_pages);
4862 }
4863
4864 kfree(fusion);
4545 } 4865 }
4546} 4866}
4547 4867
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index d78d76112501..1814d79cb98d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -51,6 +51,8 @@
51#define HOST_DIAG_RESET_ADAPTER 0x4 51#define HOST_DIAG_RESET_ADAPTER 0x4
52#define MEGASAS_FUSION_MAX_RESET_TRIES 3 52#define MEGASAS_FUSION_MAX_RESET_TRIES 3
53#define MAX_MSIX_QUEUES_FUSION 128 53#define MAX_MSIX_QUEUES_FUSION 128
54#define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16
55#define RDPQ_MAX_CHUNK_COUNT (MAX_MSIX_QUEUES_FUSION / RDPQ_MAX_INDEX_IN_ONE_CHUNK)
54 56
55/* Invader defines */ 57/* Invader defines */
56#define MPI2_TYPE_CUDA 0x2 58#define MPI2_TYPE_CUDA 0x2
@@ -103,12 +105,8 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
103#define THRESHOLD_REPLY_COUNT 50 105#define THRESHOLD_REPLY_COUNT 50
104#define RAID_1_PEER_CMDS 2 106#define RAID_1_PEER_CMDS 2
105#define JBOD_MAPS_COUNT 2 107#define JBOD_MAPS_COUNT 2
106 108#define MEGASAS_REDUCE_QD_COUNT 64
107enum MR_FUSION_ADAPTER_TYPE { 109#define IOC_INIT_FRAME_SIZE 4096
108 THUNDERBOLT_SERIES = 0,
109 INVADER_SERIES = 1,
110 VENTURA_SERIES = 2,
111};
112 110
113/* 111/*
114 * Raid Context structure which describes MegaRAID specific IO Parameters 112 * Raid Context structure which describes MegaRAID specific IO Parameters
@@ -1270,6 +1268,12 @@ struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
1270 u32 Reserved2; 1268 u32 Reserved2;
1271}; 1269};
1272 1270
1271struct rdpq_alloc_detail {
1272 struct dma_pool *dma_pool_ptr;
1273 dma_addr_t pool_entry_phys;
1274 union MPI2_REPLY_DESCRIPTORS_UNION *pool_entry_virt;
1275};
1276
1273struct fusion_context { 1277struct fusion_context {
1274 struct megasas_cmd_fusion **cmd_list; 1278 struct megasas_cmd_fusion **cmd_list;
1275 dma_addr_t req_frames_desc_phys; 1279 dma_addr_t req_frames_desc_phys;
@@ -1282,9 +1286,14 @@ struct fusion_context {
1282 struct dma_pool *sg_dma_pool; 1286 struct dma_pool *sg_dma_pool;
1283 struct dma_pool *sense_dma_pool; 1287 struct dma_pool *sense_dma_pool;
1284 1288
1289 u8 *sense;
1290 dma_addr_t sense_phys_addr;
1291
1285 dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION]; 1292 dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION];
1286 union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION]; 1293 union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION];
1294 struct rdpq_alloc_detail rdpq_tracker[RDPQ_MAX_CHUNK_COUNT];
1287 struct dma_pool *reply_frames_desc_pool; 1295 struct dma_pool *reply_frames_desc_pool;
1296 struct dma_pool *reply_frames_desc_pool_align;
1288 1297
1289 u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION]; 1298 u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
1290 1299
@@ -1318,9 +1327,13 @@ struct fusion_context {
1318 u8 fast_path_io; 1327 u8 fast_path_io;
1319 struct LD_LOAD_BALANCE_INFO *load_balance_info; 1328 struct LD_LOAD_BALANCE_INFO *load_balance_info;
1320 u32 load_balance_info_pages; 1329 u32 load_balance_info_pages;
1321 LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT]; 1330 LD_SPAN_INFO *log_to_span;
1322 u8 adapter_type; 1331 u32 log_to_span_pages;
1323 struct LD_STREAM_DETECT **stream_detect_by_ld; 1332 struct LD_STREAM_DETECT **stream_detect_by_ld;
1333 dma_addr_t ioc_init_request_phys;
1334 struct MPI2_IOC_INIT_REQUEST *ioc_init_request;
1335 struct megasas_cmd *ioc_init_cmd;
1336
1324}; 1337};
1325 1338
1326union desc_value { 1339union desc_value {
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 2608011cc7f1..b015c30d2c32 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -9,7 +9,7 @@
9 * scatter/gather formats. 9 * scatter/gather formats.
10 * Creation Date: June 21, 2006 10 * Creation Date: June 21, 2006
11 * 11 *
12 * mpi2.h Version: 02.00.42 12 * mpi2.h Version: 02.00.48
13 * 13 *
14 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 14 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
15 * prefix are for use only on MPI v2.5 products, and must not be used 15 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -104,6 +104,16 @@
104 * 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT. 104 * 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT.
105 * 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT 105 * 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT
106 * 01-01-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT 106 * 01-01-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT
107 * 04-05-16 02.00.43 Modified MPI26_DIAG_BOOT_DEVICE_SELECT defines
108 * to be unique within first 32 characters.
109 * Removed AHCI support.
110 * Removed SOP support.
111 * Bumped MPI2_HEADER_VERSION_UNIT.
112 * 04-10-16 02.00.44 Bumped MPI2_HEADER_VERSION_UNIT.
113 * 07-06-16 02.00.45 Bumped MPI2_HEADER_VERSION_UNIT.
114 * 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT.
115 * 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT.
116 * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT.
107 * -------------------------------------------------------------------------- 117 * --------------------------------------------------------------------------
108 */ 118 */
109 119
@@ -143,7 +153,7 @@
143#define MPI2_VERSION_02_06 (0x0206) 153#define MPI2_VERSION_02_06 (0x0206)
144 154
145/*Unit and Dev versioning for this MPI header set */ 155/*Unit and Dev versioning for this MPI header set */
146#define MPI2_HEADER_VERSION_UNIT (0x2A) 156#define MPI2_HEADER_VERSION_UNIT (0x30)
147#define MPI2_HEADER_VERSION_DEV (0x00) 157#define MPI2_HEADER_VERSION_DEV (0x00)
148#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 158#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
149#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 159#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -250,6 +260,12 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
250#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000) 260#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000)
251#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800) 261#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800)
252 262
263/* Defines for V7A/V7R HostDiagnostic Register */
264#define MPI26_DIAG_BOOT_DEVICE_SEL_64FLASH (0x00000000)
265#define MPI26_DIAG_BOOT_DEVICE_SEL_64HCDW (0x00000800)
266#define MPI26_DIAG_BOOT_DEVICE_SEL_32FLASH (0x00001000)
267#define MPI26_DIAG_BOOT_DEVICE_SEL_32HCDW (0x00001800)
268
253#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400) 269#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400)
254#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200) 270#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200)
255#define MPI2_DIAG_HCB_MODE (0x00000100) 271#define MPI2_DIAG_HCB_MODE (0x00000100)
@@ -368,6 +384,7 @@ typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR {
368#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08) 384#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08)
369#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A) 385#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A)
370#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C) 386#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C)
387#define MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED (0x10)
371 388
372#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01) 389#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
373 390
@@ -426,6 +443,13 @@ typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR
426 Mpi25FastPathSCSIIORequestDescriptor_t, 443 Mpi25FastPathSCSIIORequestDescriptor_t,
427 *pMpi25FastPathSCSIIORequestDescriptor_t; 444 *pMpi25FastPathSCSIIORequestDescriptor_t;
428 445
446/*PCIe Encapsulated Request Descriptor */
447typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR
448 MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR,
449 *PTR_MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR,
450 Mpi26PCIeEncapsulatedRequestDescriptor_t,
451 *pMpi26PCIeEncapsulatedRequestDescriptor_t;
452
429/*union of Request Descriptors */ 453/*union of Request Descriptors */
430typedef union _MPI2_REQUEST_DESCRIPTOR_UNION { 454typedef union _MPI2_REQUEST_DESCRIPTOR_UNION {
431 MPI2_DEFAULT_REQUEST_DESCRIPTOR Default; 455 MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
@@ -434,6 +458,7 @@ typedef union _MPI2_REQUEST_DESCRIPTOR_UNION {
434 MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget; 458 MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
435 MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator; 459 MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
436 MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO; 460 MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO;
461 MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR PCIeEncapsulated;
437 U64 Words; 462 U64 Words;
438} MPI2_REQUEST_DESCRIPTOR_UNION, 463} MPI2_REQUEST_DESCRIPTOR_UNION,
439 *PTR_MPI2_REQUEST_DESCRIPTOR_UNION, 464 *PTR_MPI2_REQUEST_DESCRIPTOR_UNION,
@@ -451,6 +476,7 @@ typedef union _MPI2_REQUEST_DESCRIPTOR_UNION {
451 * Atomic SCSI Target Request Descriptor 476 * Atomic SCSI Target Request Descriptor
452 * Atomic RAID Accelerator Request Descriptor 477 * Atomic RAID Accelerator Request Descriptor
453 * Atomic Fast Path SCSI IO Request Descriptor 478 * Atomic Fast Path SCSI IO Request Descriptor
479 * Atomic PCIe Encapsulated Request Descriptor
454 */ 480 */
455 481
456/*Atomic Request Descriptor */ 482/*Atomic Request Descriptor */
@@ -488,6 +514,7 @@ typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR {
488#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03) 514#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03)
489#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05) 515#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05)
490#define MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06) 516#define MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06)
517#define MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS (0x08)
491#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) 518#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
492 519
493/*values for marking a reply descriptor as unused */ 520/*values for marking a reply descriptor as unused */
@@ -566,6 +593,13 @@ typedef MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
566 Mpi25FastPathSCSIIOSuccessReplyDescriptor_t, 593 Mpi25FastPathSCSIIOSuccessReplyDescriptor_t,
567 *pMpi25FastPathSCSIIOSuccessReplyDescriptor_t; 594 *pMpi25FastPathSCSIIOSuccessReplyDescriptor_t;
568 595
596/*PCIe Encapsulated Success Reply Descriptor */
597typedef MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
598 MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR,
599 *PTR_MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR,
600 Mpi26PCIeEncapsulatedSuccessReplyDescriptor_t,
601 *pMpi26PCIeEncapsulatedSuccessReplyDescriptor_t;
602
569/*union of Reply Descriptors */ 603/*union of Reply Descriptors */
570typedef union _MPI2_REPLY_DESCRIPTORS_UNION { 604typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
571 MPI2_DEFAULT_REPLY_DESCRIPTOR Default; 605 MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
@@ -575,6 +609,8 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
575 MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; 609 MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
576 MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess; 610 MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
577 MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess; 611 MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess;
612 MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR
613 PCIeEncapsulatedSuccess;
578 U64 Words; 614 U64 Words;
579} MPI2_REPLY_DESCRIPTORS_UNION, 615} MPI2_REPLY_DESCRIPTORS_UNION,
580 *PTR_MPI2_REPLY_DESCRIPTORS_UNION, 616 *PTR_MPI2_REPLY_DESCRIPTORS_UNION,
@@ -617,6 +653,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
617#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F) 653#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F)
618#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30) 654#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30)
619#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31) 655#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31)
656#define MPI2_FUNCTION_NVME_ENCAPSULATED (0x33)
620#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0) 657#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0)
621#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF) 658#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF)
622 659
@@ -1163,6 +1200,8 @@ typedef union _MPI25_SGE_IO_UNION {
1163 1200
1164#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C) 1201#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C)
1165#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00) 1202#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00)
1203#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08)
1204#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10)
1166 1205
1167/*Data Location Address Space */ 1206/*Data Location Address Space */
1168 1207
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 036c9cf61032..ee117106d0f7 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -7,7 +7,7 @@
7 * Title: MPI Configuration messages and pages 7 * Title: MPI Configuration messages and pages
8 * Creation Date: November 10, 2006 8 * Creation Date: November 10, 2006
9 * 9 *
10 * mpi2_cnfg.h Version: 02.00.35 10 * mpi2_cnfg.h Version: 02.00.40
11 * 11 *
12 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 12 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
13 * prefix are for use only on MPI v2.5 products, and must not be used 13 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -190,6 +190,35 @@
190 * MPI2_CONFIG_PAGE_BIOS_1. 190 * MPI2_CONFIG_PAGE_BIOS_1.
191 * 08-25-15 02.00.34 Bumped Header Version. 191 * 08-25-15 02.00.34 Bumped Header Version.
192 * 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4. 192 * 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4.
193 * 01-21-16 02.00.36 Added/modified MPI2_MFGPAGE_DEVID_SAS defines.
194 * Added Link field to PCIe Link Pages
195 * Added EnclosureLevel and ConnectorName to PCIe
196 * Device Page 0.
197 * Added define for PCIE IoUnit page 1 max rate shift.
198 * Added comment for reserved ExtPageTypes.
199 * Added SAS 4 22.5 gbs speed support.
200 * Added PCIe 4 16.0 GT/sec speec support.
201 * Removed AHCI support.
202 * Removed SOP support.
203 * Added NegotiatedLinkRate and NegotiatedPortWidth to
204 * PCIe device page 0.
205 * 04-10-16 02.00.37 Fixed MPI2_MFGPAGE_DEVID_SAS3616/3708 defines
206 * 07-01-16 02.00.38 Added Manufacturing page 7 Connector types.
207 * Changed declaration of ConnectorName in PCIe DevicePage0
208 * to match SAS DevicePage 0.
209 * Added SATADeviceWaitTime to IO Unit Page 11.
210 * Added MPI26_MFGPAGE_DEVID_SAS4008
211 * Added x16 PCIe width to IO Unit Page 7
212 * Added LINKFLAGS to control SRIS in PCIe IO Unit page 1
213 * phy data.
214 * Added InitStatus to PCIe IO Unit Page 1 header.
215 * 09-01-16 02.00.39 Added MPI26_CONFIG_PAGE_ENCLOSURE_0 and related defines.
216 * Added MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE and
217 * MPI26_ENCLOS_PGAD_FORM_HANDLE page address formats.
218 * 02-02-17 02.00.40 Added MPI2_MANPAGE7_SLOT_UNKNOWN.
219 * Added ChassisSlot field to SAS Enclosure Page 0.
220 * Added ChassisSlot Valid bit (bit 5) to the Flags field
221 * in SAS Enclosure Page 0.
193 * -------------------------------------------------------------------------- 222 * --------------------------------------------------------------------------
194 */ 223 */
195 224
@@ -273,6 +302,10 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION {
273#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) 302#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18)
274#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19) 303#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19)
275#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A) 304#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A)
305#define MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT (0x1B)
306#define MPI2_CONFIG_EXTPAGETYPE_PCIE_SWITCH (0x1C)
307#define MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE (0x1D)
308#define MPI2_CONFIG_EXTPAGETYPE_PCIE_LINK (0x1E)
276 309
277 310
278/***************************************************************************** 311/*****************************************************************************
@@ -340,6 +373,12 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION {
340 373
341#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF) 374#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF)
342 375
376/*Enclosure PageAddress format */
377#define MPI26_ENCLOS_PGAD_FORM_MASK (0xF0000000)
378#define MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
379#define MPI26_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
380
381#define MPI26_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF)
343 382
344/*RAID Configuration PageAddress format */ 383/*RAID Configuration PageAddress format */
345#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000) 384#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000)
@@ -366,6 +405,33 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION {
366#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF) 405#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF)
367 406
368 407
408/*PCIe Switch PageAddress format */
409#define MPI26_PCIE_SWITCH_PGAD_FORM_MASK (0xF0000000)
410#define MPI26_PCIE_SWITCH_PGAD_FORM_GET_NEXT_HNDL (0x00000000)
411#define MPI26_PCIE_SWITCH_PGAD_FORM_HNDL_PORTNUM (0x10000000)
412#define MPI26_PCIE_SWITCH_EXPAND_PGAD_FORM_HNDL (0x20000000)
413
414#define MPI26_PCIE_SWITCH_PGAD_HANDLE_MASK (0x0000FFFF)
415#define MPI26_PCIE_SWITCH_PGAD_PORTNUM_MASK (0x00FF0000)
416#define MPI26_PCIE_SWITCH_PGAD_PORTNUM_SHIFT (16)
417
418
419/*PCIe Device PageAddress format */
420#define MPI26_PCIE_DEVICE_PGAD_FORM_MASK (0xF0000000)
421#define MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
422#define MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE (0x20000000)
423
424#define MPI26_PCIE_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF)
425
426/*PCIe Link PageAddress format */
427#define MPI26_PCIE_LINK_PGAD_FORM_MASK (0xF0000000)
428#define MPI26_PCIE_LINK_PGAD_FORM_GET_NEXT_LINK (0x00000000)
429#define MPI26_PCIE_LINK_PGAD_FORM_LINK_NUM (0x10000000)
430
431#define MPI26_PCIE_DEVICE_PGAD_LINKNUM_MASK (0x000000FF)
432
433
434
369/**************************************************************************** 435/****************************************************************************
370* Configuration messages 436* Configuration messages
371****************************************************************************/ 437****************************************************************************/
@@ -485,6 +551,12 @@ typedef struct _MPI2_CONFIG_REPLY {
485#define MPI26_MFGPAGE_DEVID_SAS3508 (0x00AD) 551#define MPI26_MFGPAGE_DEVID_SAS3508 (0x00AD)
486#define MPI26_MFGPAGE_DEVID_SAS3508_1 (0x00AE) 552#define MPI26_MFGPAGE_DEVID_SAS3508_1 (0x00AE)
487#define MPI26_MFGPAGE_DEVID_SAS3408 (0x00AF) 553#define MPI26_MFGPAGE_DEVID_SAS3408 (0x00AF)
554#define MPI26_MFGPAGE_DEVID_SAS3716 (0x00D0)
555#define MPI26_MFGPAGE_DEVID_SAS3616 (0x00D1)
556#define MPI26_MFGPAGE_DEVID_SAS3708 (0x00D2)
557
558#define MPI26_MFGPAGE_DEVID_SAS4008 (0x00A1)
559
488 560
489/*Manufacturing Page 0 */ 561/*Manufacturing Page 0 */
490 562
@@ -727,6 +799,12 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO {
727#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B) 799#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B)
728#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C) 800#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C)
729#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D) 801#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D)
802#define MPI2_MANPAGE7_PINOUT_SFF_8088_A (0x0E)
803#define MPI2_MANPAGE7_PINOUT_SFF_8643_16i (0x0F)
804#define MPI2_MANPAGE7_PINOUT_SFF_8654_4i (0x10)
805#define MPI2_MANPAGE7_PINOUT_SFF_8654_8i (0x11)
806#define MPI2_MANPAGE7_PINOUT_SFF_8611_4i (0x12)
807#define MPI2_MANPAGE7_PINOUT_SFF_8611_8i (0x13)
730 808
731/*defines for the Location field */ 809/*defines for the Location field */
732#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01) 810#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01)
@@ -737,6 +815,9 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO {
737#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20) 815#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20)
738#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80) 816#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80)
739 817
818/*defines for the Slot field */
819#define MPI2_MANPAGE7_SLOT_UNKNOWN (0xFFFF)
820
740/* 821/*
741 *Host code (drivers, BIOS, utilities, etc.) should leave this define set to 822 *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
742 *one and check the value returned for NumPhys at runtime. 823 *one and check the value returned for NumPhys at runtime.
@@ -1000,11 +1081,13 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
1000#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02) 1081#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02)
1001#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04) 1082#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04)
1002#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08) 1083#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08)
1084#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X16 (0x10)
1003 1085
1004/*defines for IO Unit Page 7 PCIeSpeed field */ 1086/*defines for IO Unit Page 7 PCIeSpeed field */
1005#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00) 1087#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00)
1006#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01) 1088#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01)
1007#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02) 1089#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02)
1090#define MPI2_IOUNITPAGE7_PCIE_SPEED_16_0_GBPS (0x03)
1008 1091
1009/*defines for IO Unit Page 7 ProcessorState field */ 1092/*defines for IO Unit Page 7 ProcessorState field */
1010#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F) 1093#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F)
@@ -1971,6 +2054,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 {
1971#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09) 2054#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09)
1972#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A) 2055#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A)
1973#define MPI25_SAS_NEG_LINK_RATE_12_0 (0x0B) 2056#define MPI25_SAS_NEG_LINK_RATE_12_0 (0x0B)
2057#define MPI26_SAS_NEG_LINK_RATE_22_5 (0x0C)
1974 2058
1975 2059
1976/*values for AttachedPhyInfo fields */ 2060/*values for AttachedPhyInfo fields */
@@ -2038,12 +2122,14 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 {
2038#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90) 2122#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90)
2039#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0) 2123#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0)
2040#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0) 2124#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0)
2125#define MPI26_SAS_PRATE_MAX_RATE_22_5 (0xC0)
2041#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F) 2126#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F)
2042#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00) 2127#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
2043#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08) 2128#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08)
2044#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09) 2129#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09)
2045#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A) 2130#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A)
2046#define MPI25_SAS_PRATE_MIN_RATE_12_0 (0x0B) 2131#define MPI25_SAS_PRATE_MIN_RATE_12_0 (0x0B)
2132#define MPI26_SAS_PRATE_MIN_RATE_22_5 (0x0C)
2047 2133
2048 2134
2049/*values for SAS HwLinkRate fields */ 2135/*values for SAS HwLinkRate fields */
@@ -2052,11 +2138,13 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 {
2052#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90) 2138#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90)
2053#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0) 2139#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0)
2054#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0) 2140#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0)
2141#define MPI26_SAS_HWRATE_MAX_RATE_22_5 (0xC0)
2055#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F) 2142#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F)
2056#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08) 2143#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08)
2057#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09) 2144#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09)
2058#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A) 2145#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A)
2059#define MPI25_SAS_HWRATE_MIN_RATE_12_0 (0x0B) 2146#define MPI25_SAS_HWRATE_MIN_RATE_12_0 (0x0B)
2147#define MPI26_SAS_HWRATE_MIN_RATE_22_5 (0x0C)
2060 2148
2061 2149
2062 2150
@@ -2241,11 +2329,13 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
2241#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90) 2329#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90)
2242#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0) 2330#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0)
2243#define MPI25_SASIOUNIT1_MAX_RATE_12_0 (0xB0) 2331#define MPI25_SASIOUNIT1_MAX_RATE_12_0 (0xB0)
2332#define MPI26_SASIOUNIT1_MAX_RATE_22_5 (0xC0)
2244#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F) 2333#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F)
2245#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08) 2334#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08)
2246#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09) 2335#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09)
2247#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A) 2336#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A)
2248#define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B) 2337#define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B)
2338#define MPI26_SASIOUNIT1_MIN_RATE_22_5 (0x0C)
2249 2339
2250/*see mpi2_sas.h for values for 2340/*see mpi2_sas.h for values for
2251 *SAS IO Unit Page 1 ControllerPhyDeviceInfo values */ 2341 *SAS IO Unit Page 1 ControllerPhyDeviceInfo values */
@@ -3159,37 +3249,29 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0 {
3159/*SAS Enclosure Page 0 */ 3249/*SAS Enclosure Page 0 */
3160 3250
3161typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 { 3251typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
3162 MPI2_CONFIG_EXTENDED_PAGE_HEADER 3252 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
3163 Header; /*0x00 */ 3253 U32 Reserved1; /*0x08 */
3164 U32 3254 U64 EnclosureLogicalID; /*0x0C */
3165 Reserved1; /*0x08 */ 3255 U16 Flags; /*0x14 */
3166 U64 3256 U16 EnclosureHandle; /*0x16 */
3167 EnclosureLogicalID; /*0x0C */ 3257 U16 NumSlots; /*0x18 */
3168 U16 3258 U16 StartSlot; /*0x1A */
3169 Flags; /*0x14 */ 3259 U8 ChassisSlot; /*0x1C */
3170 U16 3260 U8 EnclosureLeve; /*0x1D */
3171 EnclosureHandle; /*0x16 */ 3261 U16 SEPDevHandle; /*0x1E */
3172 U16 3262 U32 Reserved3; /*0x20 */
3173 NumSlots; /*0x18 */ 3263 U32 Reserved4; /*0x24 */
3174 U16
3175 StartSlot; /*0x1A */
3176 U8
3177 Reserved2; /*0x1C */
3178 U8
3179 EnclosureLevel; /*0x1D */
3180 U16
3181 SEPDevHandle; /*0x1E */
3182 U32
3183 Reserved3; /*0x20 */
3184 U32
3185 Reserved4; /*0x24 */
3186} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, 3264} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
3187 *PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, 3265 *PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
3188 Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t; 3266 Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t,
3267 MPI26_CONFIG_PAGE_ENCLOSURE_0,
3268 *PTR_MPI26_CONFIG_PAGE_ENCLOSURE_0,
3269 Mpi26EnclosurePage0_t, *pMpi26EnclosurePage0_t;
3189 3270
3190#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04) 3271#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04)
3191 3272
3192/*values for SAS Enclosure Page 0 Flags field */ 3273/*values for SAS Enclosure Page 0 Flags field */
3274#define MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
3193#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010) 3275#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
3194#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F) 3276#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
3195#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) 3277#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
@@ -3199,6 +3281,18 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
3199#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004) 3281#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
3200#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005) 3282#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
3201 3283
3284#define MPI26_ENCLOSURE0_PAGEVERSION (0x04)
3285
3286/*Values for Enclosure Page 0 Flags field */
3287#define MPI26_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
3288#define MPI26_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
3289#define MPI26_ENCLS0_FLAGS_MNG_MASK (0x000F)
3290#define MPI26_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
3291#define MPI26_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
3292#define MPI26_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002)
3293#define MPI26_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003)
3294#define MPI26_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
3295#define MPI26_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
3202 3296
3203/**************************************************************************** 3297/****************************************************************************
3204* Log Config Page 3298* Log Config Page
@@ -3498,4 +3592,422 @@ typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS {
3498 3592
3499/*PageVersion should be provided by product-specific code */ 3593/*PageVersion should be provided by product-specific code */
3500 3594
3595
3596
3597/****************************************************************************
3598* values for fields used by several types of PCIe Config Pages
3599****************************************************************************/
3600
3601/*values for NegotiatedLinkRates fields */
3602#define MPI26_PCIE_NEG_LINK_RATE_MASK_PHYSICAL (0x0F)
3603/*link rates used for Negotiated Physical Link Rate */
3604#define MPI26_PCIE_NEG_LINK_RATE_UNKNOWN (0x00)
3605#define MPI26_PCIE_NEG_LINK_RATE_PHY_DISABLED (0x01)
3606#define MPI26_PCIE_NEG_LINK_RATE_2_5 (0x02)
3607#define MPI26_PCIE_NEG_LINK_RATE_5_0 (0x03)
3608#define MPI26_PCIE_NEG_LINK_RATE_8_0 (0x04)
3609#define MPI26_PCIE_NEG_LINK_RATE_16_0 (0x05)
3610
3611
3612/****************************************************************************
3613* PCIe IO Unit Config Pages (MPI v2.6 and later)
3614****************************************************************************/
3615
3616/*PCIe IO Unit Page 0 */
3617
3618typedef struct _MPI26_PCIE_IO_UNIT0_PHY_DATA {
3619 U8 Link; /*0x00 */
3620 U8 LinkFlags; /*0x01 */
3621 U8 PhyFlags; /*0x02 */
3622 U8 NegotiatedLinkRate; /*0x03 */
3623 U32 ControllerPhyDeviceInfo;/*0x04 */
3624 U16 AttachedDevHandle; /*0x08 */
3625 U16 ControllerDevHandle; /*0x0A */
3626 U32 EnumerationStatus; /*0x0C */
3627 U32 Reserved1; /*0x10 */
3628} MPI26_PCIE_IO_UNIT0_PHY_DATA,
3629 *PTR_MPI26_PCIE_IO_UNIT0_PHY_DATA,
3630 Mpi26PCIeIOUnit0PhyData_t, *pMpi26PCIeIOUnit0PhyData_t;
3631
3632/*
3633 *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
3634 *one and check the value returned for NumPhys at runtime.
3635 */
3636#ifndef MPI26_PCIE_IOUNIT0_PHY_MAX
3637#define MPI26_PCIE_IOUNIT0_PHY_MAX (1)
3638#endif
3639
3640typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_0 {
3641 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
3642 U32 Reserved1; /*0x08 */
3643 U8 NumPhys; /*0x0C */
3644 U8 InitStatus; /*0x0D */
3645 U16 Reserved3; /*0x0E */
3646 MPI26_PCIE_IO_UNIT0_PHY_DATA
3647 PhyData[MPI26_PCIE_IOUNIT0_PHY_MAX]; /*0x10 */
3648} MPI26_CONFIG_PAGE_PIOUNIT_0,
3649 *PTR_MPI26_CONFIG_PAGE_PIOUNIT_0,
3650 Mpi26PCIeIOUnitPage0_t, *pMpi26PCIeIOUnitPage0_t;
3651
3652#define MPI26_PCIEIOUNITPAGE0_PAGEVERSION (0x00)
3653
3654/*values for PCIe IO Unit Page 0 LinkFlags */
3655#define MPI26_PCIEIOUNIT0_LINKFLAGS_ENUMERATION_IN_PROGRESS (0x08)
3656
3657/*values for PCIe IO Unit Page 0 PhyFlags */
3658#define MPI26_PCIEIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
3659
3660/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
3661
3662/*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo
3663 *values
3664 */
3665
3666/*values for PCIe IO Unit Page 0 EnumerationStatus */
3667#define MPI26_PCIEIOUNIT0_ES_MAX_SWITCHES_EXCEEDED (0x40000000)
3668#define MPI26_PCIEIOUNIT0_ES_MAX_DEVICES_EXCEEDED (0x20000000)
3669
3670
3671/*PCIe IO Unit Page 1 */
3672
3673typedef struct _MPI26_PCIE_IO_UNIT1_PHY_DATA {
3674 U8 Link; /*0x00 */
3675 U8 LinkFlags; /*0x01 */
3676 U8 PhyFlags; /*0x02 */
3677 U8 MaxMinLinkRate; /*0x03 */
3678 U32 ControllerPhyDeviceInfo; /*0x04 */
3679 U32 Reserved1; /*0x08 */
3680} MPI26_PCIE_IO_UNIT1_PHY_DATA,
3681 *PTR_MPI26_PCIE_IO_UNIT1_PHY_DATA,
3682 Mpi26PCIeIOUnit1PhyData_t, *pMpi26PCIeIOUnit1PhyData_t;
3683
3684/*values for LinkFlags */
3685#define MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS (0x00)
3686#define MPI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS (0x01)
3687
3688/*
3689 *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
3690 *one and check the value returned for NumPhys at runtime.
3691 */
3692#ifndef MPI26_PCIE_IOUNIT1_PHY_MAX
3693#define MPI26_PCIE_IOUNIT1_PHY_MAX (1)
3694#endif
3695
3696typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 {
3697 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
3698 U16 ControlFlags; /*0x08 */
3699 U16 Reserved; /*0x0A */
3700 U16 AdditionalControlFlags; /*0x0C */
3701 U16 NVMeMaxQueueDepth; /*0x0E */
3702 U8 NumPhys; /*0x10 */
3703 U8 Reserved1; /*0x11 */
3704 U16 Reserved2; /*0x12 */
3705 MPI26_PCIE_IO_UNIT1_PHY_DATA
3706 PhyData[MPI26_PCIE_IOUNIT1_PHY_MAX];/*0x14 */
3707} MPI26_CONFIG_PAGE_PIOUNIT_1,
3708 *PTR_MPI26_CONFIG_PAGE_PIOUNIT_1,
3709 Mpi26PCIeIOUnitPage1_t, *pMpi26PCIeIOUnitPage1_t;
3710
3711#define MPI26_PCIEIOUNITPAGE1_PAGEVERSION (0x00)
3712
3713/*values for PCIe IO Unit Page 1 PhyFlags */
3714#define MPI26_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
3715#define MPI26_PCIEIOUNIT1_PHYFLAGS_ENDPOINT_ONLY (0x01)
3716
3717/*values for PCIe IO Unit Page 1 MaxMinLinkRate */
3718#define MPI26_PCIEIOUNIT1_MAX_RATE_MASK (0xF0)
3719#define MPI26_PCIEIOUNIT1_MAX_RATE_SHIFT (4)
3720#define MPI26_PCIEIOUNIT1_MAX_RATE_2_5 (0x20)
3721#define MPI26_PCIEIOUNIT1_MAX_RATE_5_0 (0x30)
3722#define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40)
3723#define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50)
3724
3725/*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo
3726 *values
3727 */
3728
3729
3730/****************************************************************************
3731* PCIe Switch Config Pages (MPI v2.6 and later)
3732****************************************************************************/
3733
3734/*PCIe Switch Page 0 */
3735
3736typedef struct _MPI26_CONFIG_PAGE_PSWITCH_0 {
3737 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
3738 U8 PhysicalPort; /*0x08 */
3739 U8 Reserved1; /*0x09 */
3740 U16 Reserved2; /*0x0A */
3741 U16 DevHandle; /*0x0C */
3742 U16 ParentDevHandle; /*0x0E */
3743 U8 NumPorts; /*0x10 */
3744 U8 PCIeLevel; /*0x11 */
3745 U16 Reserved3; /*0x12 */
3746 U32 Reserved4; /*0x14 */
3747 U32 Reserved5; /*0x18 */
3748 U32 Reserved6; /*0x1C */
3749} MPI26_CONFIG_PAGE_PSWITCH_0, *PTR_MPI26_CONFIG_PAGE_PSWITCH_0,
3750 Mpi26PCIeSwitchPage0_t, *pMpi26PCIeSwitchPage0_t;
3751
3752#define MPI26_PCIESWITCH0_PAGEVERSION (0x00)
3753
3754
3755/*PCIe Switch Page 1 */
3756
3757typedef struct _MPI26_CONFIG_PAGE_PSWITCH_1 {
3758 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
3759 U8 PhysicalPort; /*0x08 */
3760 U8 Reserved1; /*0x09 */
3761 U16 Reserved2; /*0x0A */
3762 U8 NumPorts; /*0x0C */
3763 U8 PortNum; /*0x0D */
3764 U16 AttachedDevHandle; /*0x0E */
3765 U16 SwitchDevHandle; /*0x10 */
3766 U8 NegotiatedPortWidth; /*0x12 */
3767 U8 NegotiatedLinkRate; /*0x13 */
3768 U32 Reserved4; /*0x14 */
3769 U32 Reserved5; /*0x18 */
3770} MPI26_CONFIG_PAGE_PSWITCH_1, *PTR_MPI26_CONFIG_PAGE_PSWITCH_1,
3771 Mpi26PCIeSwitchPage1_t, *pMpi26PCIeSwitchPage1_t;
3772
3773#define MPI26_PCIESWITCH1_PAGEVERSION (0x00)
3774
3775/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
3776
3777
3778/****************************************************************************
3779* PCIe Device Config Pages (MPI v2.6 and later)
3780****************************************************************************/
3781
3782/*PCIe Device Page 0 */
3783
3784typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 {
3785 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
3786 U16 Slot; /*0x08 */
3787 U16 EnclosureHandle; /*0x0A */
3788 U64 WWID; /*0x0C */
3789 U16 ParentDevHandle; /*0x14 */
3790 U8 PortNum; /*0x16 */
3791 U8 AccessStatus; /*0x17 */
3792 U16 DevHandle; /*0x18 */
3793 U8 PhysicalPort; /*0x1A */
3794 U8 Reserved1; /*0x1B */
3795 U32 DeviceInfo; /*0x1C */
3796 U32 Flags; /*0x20 */
3797 U8 SupportedLinkRates; /*0x24 */
3798 U8 MaxPortWidth; /*0x25 */
3799 U8 NegotiatedPortWidth; /*0x26 */
3800 U8 NegotiatedLinkRate; /*0x27 */
3801 U8 EnclosureLevel; /*0x28 */
3802 U8 Reserved2; /*0x29 */
3803 U16 Reserved3; /*0x2A */
3804 U8 ConnectorName[4]; /*0x2C */
3805 U32 Reserved4; /*0x30 */
3806 U32 Reserved5; /*0x34 */
3807} MPI26_CONFIG_PAGE_PCIEDEV_0, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_0,
3808 Mpi26PCIeDevicePage0_t, *pMpi26PCIeDevicePage0_t;
3809
3810#define MPI26_PCIEDEVICE0_PAGEVERSION (0x01)
3811
3812/*values for PCIe Device Page 0 AccessStatus field */
3813#define MPI26_PCIEDEV0_ASTATUS_NO_ERRORS (0x00)
3814#define MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION (0x04)
3815#define MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED (0x02)
3816#define MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED (0x07)
3817#define MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED (0x08)
3818#define MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE (0x09)
3819#define MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED (0x0A)
3820#define MPI26_PCIEDEV0_ASTATUS_UNKNOWN (0x10)
3821
3822#define MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT (0x30)
3823#define MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x31)
3824#define MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED (0x32)
3825#define MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED (0x33)
3826#define MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED (0x34)
3827#define MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED (0x35)
3828#define MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x36)
3829#define MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT (0x37)
3830#define MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS (0x38)
3831
3832#define MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX (0x3F)
3833
3834/*see mpi2_pci.h for the MPI26_PCIE_DEVINFO_ defines used for the DeviceInfo
3835 *field
3836 */
3837
3838/*values for PCIe Device Page 0 Flags field */
3839#define MPI26_PCIEDEV0_FLAGS_UNAUTHORIZED_DEVICE (0x8000)
3840#define MPI26_PCIEDEV0_FLAGS_ENABLED_FAST_PATH (0x4000)
3841#define MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE (0x2000)
3842#define MPI26_PCIEDEV0_FLAGS_ASYNCHRONOUS_NOTIFICATION (0x0400)
3843#define MPI26_PCIEDEV0_FLAGS_ATA_SW_PRESERVATION (0x0200)
3844#define MPI26_PCIEDEV0_FLAGS_UNSUPPORTED_DEVICE (0x0100)
3845#define MPI26_PCIEDEV0_FLAGS_ATA_48BIT_LBA_SUPPORTED (0x0080)
3846#define MPI26_PCIEDEV0_FLAGS_ATA_SMART_SUPPORTED (0x0040)
3847#define MPI26_PCIEDEV0_FLAGS_ATA_NCQ_SUPPORTED (0x0020)
3848#define MPI26_PCIEDEV0_FLAGS_ATA_FUA_SUPPORTED (0x0010)
3849#define MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID (0x0002)
3850#define MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT (0x0001)
3851
3852/* values for PCIe Device Page 0 SupportedLinkRates field */
3853#define MPI26_PCIEDEV0_LINK_RATE_16_0_SUPPORTED (0x08)
3854#define MPI26_PCIEDEV0_LINK_RATE_8_0_SUPPORTED (0x04)
3855#define MPI26_PCIEDEV0_LINK_RATE_5_0_SUPPORTED (0x02)
3856#define MPI26_PCIEDEV0_LINK_RATE_2_5_SUPPORTED (0x01)
3857
3858/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
3859
3860
3861/*PCIe Device Page 2 */
3862
3863typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 {
3864 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
3865 U16 DevHandle; /*0x08 */
3866 U16 Reserved1; /*0x0A */
3867 U32 MaximumDataTransferSize;/*0x0C */
3868 U32 Capabilities; /*0x10 */
3869 U32 Reserved2; /*0x14 */
3870} MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2,
3871 Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t;
3872
3873#define MPI26_PCIEDEVICE2_PAGEVERSION (0x00)
3874
3875/*defines for PCIe Device Page 2 Capabilities field */
3876#define MPI26_PCIEDEV2_CAP_SGL_FORMAT (0x00000004)
3877#define MPI26_PCIEDEV2_CAP_BIT_BUCKET_SUPPORT (0x00000002)
3878#define MPI26_PCIEDEV2_CAP_SGL_SUPPORT (0x00000001)
3879
3880
3881/****************************************************************************
3882* PCIe Link Config Pages (MPI v2.6 and later)
3883****************************************************************************/
3884
3885/*PCIe Link Page 1 */
3886
3887typedef struct _MPI26_CONFIG_PAGE_PCIELINK_1 {
3888 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
3889 U8 Link; /*0x08 */
3890 U8 Reserved1; /*0x09 */
3891 U16 Reserved2; /*0x0A */
3892 U32 CorrectableErrorCount; /*0x0C */
3893 U16 NonFatalErrorCount; /*0x10 */
3894 U16 Reserved3; /*0x12 */
3895 U16 FatalErrorCount; /*0x14 */
3896 U16 Reserved4; /*0x16 */
3897} MPI26_CONFIG_PAGE_PCIELINK_1, *PTR_MPI26_CONFIG_PAGE_PCIELINK_1,
3898 Mpi26PcieLinkPage1_t, *pMpi26PcieLinkPage1_t;
3899
3900#define MPI26_PCIELINK1_PAGEVERSION (0x00)
3901
3902/*PCIe Link Page 2 */
3903
3904typedef struct _MPI26_PCIELINK2_LINK_EVENT {
3905 U8 LinkEventCode; /*0x00 */
3906 U8 Reserved1; /*0x01 */
3907 U16 Reserved2; /*0x02 */
3908 U32 LinkEventInfo; /*0x04 */
3909} MPI26_PCIELINK2_LINK_EVENT, *PTR_MPI26_PCIELINK2_LINK_EVENT,
3910 Mpi26PcieLink2LinkEvent_t, *pMpi26PcieLink2LinkEvent_t;
3911
3912/*use MPI26_PCIELINK3_EVTCODE_ for the LinkEventCode field */
3913
3914
3915/*
3916 *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
3917 *one and check the value returned for NumLinkEvents at runtime.
3918 */
3919#ifndef MPI26_PCIELINK2_LINK_EVENT_MAX
3920#define MPI26_PCIELINK2_LINK_EVENT_MAX (1)
3921#endif
3922
3923typedef struct _MPI26_CONFIG_PAGE_PCIELINK_2 {
3924 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
3925 U8 Link; /*0x08 */
3926 U8 Reserved1; /*0x09 */
3927 U16 Reserved2; /*0x0A */
3928 U8 NumLinkEvents; /*0x0C */
3929 U8 Reserved3; /*0x0D */
3930 U16 Reserved4; /*0x0E */
3931 MPI26_PCIELINK2_LINK_EVENT
3932 LinkEvent[MPI26_PCIELINK2_LINK_EVENT_MAX]; /*0x10 */
3933} MPI26_CONFIG_PAGE_PCIELINK_2, *PTR_MPI26_CONFIG_PAGE_PCIELINK_2,
3934 Mpi26PcieLinkPage2_t, *pMpi26PcieLinkPage2_t;
3935
3936#define MPI26_PCIELINK2_PAGEVERSION (0x00)
3937
3938/*PCIe Link Page 3 */
3939
3940typedef struct _MPI26_PCIELINK3_LINK_EVENT_CONFIG {
3941 U8 LinkEventCode; /*0x00 */
3942 U8 Reserved1; /*0x01 */
3943 U16 Reserved2; /*0x02 */
3944 U8 CounterType; /*0x04 */
3945 U8 ThresholdWindow; /*0x05 */
3946 U8 TimeUnits; /*0x06 */
3947 U8 Reserved3; /*0x07 */
3948 U32 EventThreshold; /*0x08 */
3949 U16 ThresholdFlags; /*0x0C */
3950 U16 Reserved4; /*0x0E */
3951} MPI26_PCIELINK3_LINK_EVENT_CONFIG, *PTR_MPI26_PCIELINK3_LINK_EVENT_CONFIG,
3952 Mpi26PcieLink3LinkEventConfig_t, *pMpi26PcieLink3LinkEventConfig_t;
3953
3954/*values for LinkEventCode field */
3955#define MPI26_PCIELINK3_EVTCODE_NO_EVENT (0x00)
3956#define MPI26_PCIELINK3_EVTCODE_CORRECTABLE_ERROR_RECEIVED (0x01)
3957#define MPI26_PCIELINK3_EVTCODE_NON_FATAL_ERROR_RECEIVED (0x02)
3958#define MPI26_PCIELINK3_EVTCODE_FATAL_ERROR_RECEIVED (0x03)
3959#define MPI26_PCIELINK3_EVTCODE_DATA_LINK_ERROR_DETECTED (0x04)
3960#define MPI26_PCIELINK3_EVTCODE_TRANSACTION_LAYER_ERROR_DETECTED (0x05)
3961#define MPI26_PCIELINK3_EVTCODE_TLP_ECRC_ERROR_DETECTED (0x06)
3962#define MPI26_PCIELINK3_EVTCODE_POISONED_TLP (0x07)
3963#define MPI26_PCIELINK3_EVTCODE_RECEIVED_NAK_DLLP (0x08)
3964#define MPI26_PCIELINK3_EVTCODE_SENT_NAK_DLLP (0x09)
3965#define MPI26_PCIELINK3_EVTCODE_LTSSM_RECOVERY_STATE (0x0A)
3966#define MPI26_PCIELINK3_EVTCODE_LTSSM_RXL0S_STATE (0x0B)
3967#define MPI26_PCIELINK3_EVTCODE_LTSSM_TXL0S_STATE (0x0C)
3968#define MPI26_PCIELINK3_EVTCODE_LTSSM_L1_STATE (0x0D)
3969#define MPI26_PCIELINK3_EVTCODE_LTSSM_DISABLED_STATE (0x0E)
3970#define MPI26_PCIELINK3_EVTCODE_LTSSM_HOT_RESET_STATE (0x0F)
3971#define MPI26_PCIELINK3_EVTCODE_SYSTEM_ERROR (0x10)
3972#define MPI26_PCIELINK3_EVTCODE_DECODE_ERROR (0x11)
3973#define MPI26_PCIELINK3_EVTCODE_DISPARITY_ERROR (0x12)
3974
3975/*values for the CounterType field */
3976#define MPI26_PCIELINK3_COUNTER_TYPE_WRAPPING (0x00)
3977#define MPI26_PCIELINK3_COUNTER_TYPE_SATURATING (0x01)
3978#define MPI26_PCIELINK3_COUNTER_TYPE_PEAK_VALUE (0x02)
3979
3980/*values for the TimeUnits field */
3981#define MPI26_PCIELINK3_TM_UNITS_10_MICROSECONDS (0x00)
3982#define MPI26_PCIELINK3_TM_UNITS_100_MICROSECONDS (0x01)
3983#define MPI26_PCIELINK3_TM_UNITS_1_MILLISECOND (0x02)
3984#define MPI26_PCIELINK3_TM_UNITS_10_MILLISECONDS (0x03)
3985
3986/*values for the ThresholdFlags field */
3987#define MPI26_PCIELINK3_TFLAGS_EVENT_NOTIFY (0x0001)
3988
3989/*
3990 *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
3991 *one and check the value returned for NumLinkEvents at runtime.
3992 */
3993#ifndef MPI26_PCIELINK3_LINK_EVENT_MAX
3994#define MPI26_PCIELINK3_LINK_EVENT_MAX (1)
3995#endif
3996
3997typedef struct _MPI26_CONFIG_PAGE_PCIELINK_3 {
3998 MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
3999 U8 Link; /*0x08 */
4000 U8 Reserved1; /*0x09 */
4001 U16 Reserved2; /*0x0A */
4002 U8 NumLinkEvents; /*0x0C */
4003 U8 Reserved3; /*0x0D */
4004 U16 Reserved4; /*0x0E */
4005 MPI26_PCIELINK3_LINK_EVENT_CONFIG
4006 LinkEventConfig[MPI26_PCIELINK3_LINK_EVENT_MAX]; /*0x10 */
4007} MPI26_CONFIG_PAGE_PCIELINK_3, *PTR_MPI26_CONFIG_PAGE_PCIELINK_3,
4008 Mpi26PcieLinkPage3_t, *pMpi26PcieLinkPage3_t;
4009
4010#define MPI26_PCIELINK3_PAGEVERSION (0x00)
4011
4012
3501#endif 4013#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
index 38b2c879bf0f..948a3ba682d7 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -7,7 +7,7 @@
7 * Title: MPI SCSI initiator mode messages and structures 7 * Title: MPI SCSI initiator mode messages and structures
8 * Creation Date: June 23, 2006 8 * Creation Date: June 23, 2006
9 * 9 *
10 * mpi2_init.h Version: 02.00.20 10 * mpi2_init.h Version: 02.00.21
11 * 11 *
12 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 12 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
13 * prefix are for use only on MPI v2.5 products, and must not be used 13 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -55,6 +55,8 @@
55 * 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset. 55 * 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset.
56 * 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message. 56 * 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message.
57 * 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message. 57 * 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message.
58 * 01-21-16 02.00.21 Modified MPI26_SCSITASKMGMT_MSGFLAGS_PCIE* defines to
59 * be unique within first 32 characters.
58 * -------------------------------------------------------------------------- 60 * --------------------------------------------------------------------------
59 */ 61 */
60 62
@@ -374,6 +376,11 @@ typedef struct _MPI2_SCSI_IO_REPLY {
374} MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY, 376} MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY,
375 Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t; 377 Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t;
376 378
379/*SCSI IO Reply MsgFlags bits */
380#define MPI26_SCSIIO_REPLY_MSGFLAGS_REFTAG_OBSERVED_VALID (0x01)
381#define MPI26_SCSIIO_REPLY_MSGFLAGS_GUARD_OBSERVED_VALID (0x02)
382#define MPI26_SCSIIO_REPLY_MSGFLAGS_APPTAG_OBSERVED_VALID (0x04)
383
377/*SCSI IO Reply SCSIStatus values (SAM-4 status codes) */ 384/*SCSI IO Reply SCSIStatus values (SAM-4 status codes) */
378 385
379#define MPI2_SCSI_STATUS_GOOD (0x00) 386#define MPI2_SCSI_STATUS_GOOD (0x00)
@@ -447,11 +454,13 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST {
447/*MsgFlags bits */ 454/*MsgFlags bits */
448 455
449#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18) 456#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18)
457#define MPI26_SCSITASKMGMT_MSGFLAGS_HOT_RESET_PCIE (0x00)
450#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00) 458#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00)
451#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08) 459#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08)
452#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10) 460#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10)
453 461
454#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01) 462#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01)
463#define MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE (0x18)
455 464
456/*SCSI Task Management Reply Message */ 465/*SCSI Task Management Reply Message */
457typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY { 466typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY {
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index 673cf05f94dc..cc2aff7aa67b 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -7,7 +7,7 @@
7 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 7 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
8 * Creation Date: October 11, 2006 8 * Creation Date: October 11, 2006
9 * 9 *
10 * mpi2_ioc.h Version: 02.00.27 10 * mpi2_ioc.h Version: 02.00.32
11 * 11 *
12 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 12 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
13 * prefix are for use only on MPI v2.5 products, and must not be used 13 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -141,7 +141,32 @@
141 * Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and 141 * Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and
142 * MPI26_FW_HEADER_PID_FAMILY_3516_SAS. 142 * MPI26_FW_HEADER_PID_FAMILY_3516_SAS.
143 * Added MPI26_CTRL_OP_SHUTDOWN. 143 * Added MPI26_CTRL_OP_SHUTDOWN.
144 * 08-25-15 02.00.27 Added IC ARCH Class based signature defines 144 * 08-25-15 02.00.27 Added IC ARCH Class based signature defines.
145 * Added MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED event.
146 * Added ConigurationFlags field to IOCInit message to
147 * support NVMe SGL format control.
148 * Added PCIe SRIOV support.
149 * 02-17-16 02.00.28 Added SAS 4 22.5 gbs speed support.
150 * Added PCIe 4 16.0 GT/sec speec support.
151 * Removed AHCI support.
152 * Removed SOP support.
153 * 07-01-16 02.00.29 Added Archclass for 4008 product.
154 * Added IOCException MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED
155 * 08-23-16 02.00.30 Added new defines for the ImageType field of FWDownload
156 * Request Message.
157 * Added new defines for the ImageType field of FWUpload
158 * Request Message.
159 * Added new values for the RegionType field in the Layout
160 * Data sections of the FLASH Layout Extended Image Data.
161 * Added new defines for the ReasonCode field of
162 * Active Cable Exception Event.
163 * Added MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE and
164 * MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE.
165 * 11-23-16 02.00.31 Added MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR and
166 * MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR.
167 * 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP.
168 * Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related
169 * defines for the ReasonCode field.
145 * -------------------------------------------------------------------------- 170 * --------------------------------------------------------------------------
146 */ 171 */
147 172
@@ -213,6 +238,9 @@ typedef struct _MPI2_IOC_INIT_REQUEST {
213#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF) 238#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF)
214#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0) 239#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0)
215 240
241/*ConfigurationFlags */
242#define MPI26_IOCINIT_CFGFLAGS_NVME_SGL_FORMAT (0x0001)
243
216/*minimum depth for a Reply Descriptor Post Queue */ 244/*minimum depth for a Reply Descriptor Post Queue */
217#define MPI2_RDPQ_DEPTH_MIN (16) 245#define MPI2_RDPQ_DEPTH_MIN (16)
218 246
@@ -300,6 +328,10 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
300 U16 MinDevHandle; /*0x3C */ 328 U16 MinDevHandle; /*0x3C */
301 U8 CurrentHostPageSize; /* 0x3E */ 329 U8 CurrentHostPageSize; /* 0x3E */
302 U8 Reserved4; /* 0x3F */ 330 U8 Reserved4; /* 0x3F */
331 U8 SGEModifierMask; /*0x40 */
332 U8 SGEModifierValue; /*0x41 */
333 U8 SGEModifierShift; /*0x42 */
334 U8 Reserved5; /*0x43 */
303} MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY, 335} MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY,
304 Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t; 336 Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t;
305 337
@@ -316,6 +348,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
316#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0) 348#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0)
317 349
318/*IOCExceptions */ 350/*IOCExceptions */
351#define MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0400)
319#define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0200) 352#define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0200)
320#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100) 353#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100)
321 354
@@ -336,6 +369,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
336/*ProductID field uses MPI2_FW_HEADER_PID_ */ 369/*ProductID field uses MPI2_FW_HEADER_PID_ */
337 370
338/*IOCCapabilities */ 371/*IOCCapabilities */
372#define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000)
339#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000) 373#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000)
340#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000) 374#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000)
341#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000) 375#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000)
@@ -354,6 +388,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
354#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004) 388#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004)
355 389
356/*ProtocolFlags */ 390/*ProtocolFlags */
391#define MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES (0x0008)
357#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002) 392#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
358#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001) 393#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
359 394
@@ -403,6 +438,8 @@ typedef struct _MPI2_PORT_FACTS_REPLY {
403#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20) 438#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20)
404#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30) 439#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30)
405#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31) 440#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31)
441#define MPI2_PORTFACTS_PORTTYPE_TRI_MODE (0x40)
442
406 443
407/**************************************************************************** 444/****************************************************************************
408* PortEnable message 445* PortEnable message
@@ -509,6 +546,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY {
509#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019) 546#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019)
510#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C) 547#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C)
511#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D) 548#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D)
549#define MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE (0x001D)
512#define MPI2_EVENT_IR_VOLUME (0x001E) 550#define MPI2_EVENT_IR_VOLUME (0x001E)
513#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F) 551#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F)
514#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020) 552#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020)
@@ -521,7 +559,12 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY {
521#define MPI2_EVENT_TEMP_THRESHOLD (0x0027) 559#define MPI2_EVENT_TEMP_THRESHOLD (0x0027)
522#define MPI2_EVENT_HOST_MESSAGE (0x0028) 560#define MPI2_EVENT_HOST_MESSAGE (0x0028)
523#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029) 561#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029)
562#define MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE (0x0030)
563#define MPI2_EVENT_PCIE_ENUMERATION (0x0031)
564#define MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x0032)
565#define MPI2_EVENT_PCIE_LINK_COUNTER (0x0033)
524#define MPI2_EVENT_ACTIVE_CABLE_EXCEPTION (0x0034) 566#define MPI2_EVENT_ACTIVE_CABLE_EXCEPTION (0x0034)
567#define MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x0035)
525#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E) 568#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E)
526#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F) 569#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F)
527 570
@@ -618,11 +661,20 @@ typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT {
618 U8 ReasonCode; /* 0x04 */ 661 U8 ReasonCode; /* 0x04 */
619 U8 ReceptacleID; /* 0x05 */ 662 U8 ReceptacleID; /* 0x05 */
620 U16 Reserved1; /* 0x06 */ 663 U16 Reserved1; /* 0x06 */
621} MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, 664} MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
665 *PTR_MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
666 Mpi25EventDataActiveCableExcept_t,
667 *pMpi25EventDataActiveCableExcept_t,
668 MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
622 *PTR_MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT, 669 *PTR_MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
623 Mpi26EventDataActiveCableExcept_t, 670 Mpi26EventDataActiveCableExcept_t,
624 *pMpi26EventDataActiveCableExcept_t; 671 *pMpi26EventDataActiveCableExcept_t;
625 672
673/*MPI2.5 defines for the ReasonCode field */
674#define MPI25_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00)
675#define MPI25_EVENT_ACTIVE_CABLE_PRESENT (0x01)
676#define MPI25_EVENT_ACTIVE_CABLE_DEGRADED (0x02)
677
626/* defines for ReasonCode field */ 678/* defines for ReasonCode field */
627#define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00) 679#define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00)
628#define MPI26_EVENT_ACTIVE_CABLE_PRESENT (0x01) 680#define MPI26_EVENT_ACTIVE_CABLE_PRESENT (0x01)
@@ -958,6 +1010,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST {
958#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09) 1010#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09)
959#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A) 1011#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A)
960#define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B) 1012#define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B)
1013#define MPI26_EVENT_SAS_TOPO_LR_RATE_22_5 (0x0C)
961 1014
962/*values for the PhyStatus field */ 1015/*values for the PhyStatus field */
963#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80) 1016#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80)
@@ -983,12 +1036,37 @@ typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE {
983} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, 1036} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
984 *PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, 1037 *PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
985 Mpi2EventDataSasEnclDevStatusChange_t, 1038 Mpi2EventDataSasEnclDevStatusChange_t,
986 *pMpi2EventDataSasEnclDevStatusChange_t; 1039 *pMpi2EventDataSasEnclDevStatusChange_t,
1040 MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE,
1041 *PTR_MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE,
1042 Mpi26EventDataEnclDevStatusChange_t,
1043 *pMpi26EventDataEnclDevStatusChange_t;
987 1044
988/*SAS Enclosure Device Status Change event ReasonCode values */ 1045/*SAS Enclosure Device Status Change event ReasonCode values */
989#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01) 1046#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01)
990#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02) 1047#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02)
991 1048
1049/*Enclosure Device Status Change event ReasonCode values */
1050#define MPI26_EVENT_ENCL_RC_ADDED (0x01)
1051#define MPI26_EVENT_ENCL_RC_NOT_RESPONDING (0x02)
1052
1053
1054typedef struct _MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR {
1055 U16 DevHandle; /*0x00 */
1056 U8 ReasonCode; /*0x02 */
1057 U8 PhysicalPort; /*0x03 */
1058 U32 Reserved1[2]; /*0x04 */
1059 U64 SASAddress; /*0x0C */
1060 U32 Reserved2[2]; /*0x14 */
1061} MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR,
1062 *PTR_MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR,
1063 Mpi25EventDataSasDeviceDiscoveryError_t,
1064 *pMpi25EventDataSasDeviceDiscoveryError_t;
1065
1066/*SAS Device Discovery Error Event data ReasonCode values */
1067#define MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED (0x01)
1068#define MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT (0x02)
1069
992/*SAS PHY Counter Event data */ 1070/*SAS PHY Counter Event data */
993 1071
994typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER { 1072typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER {
@@ -1074,6 +1152,174 @@ typedef struct _MPI2_EVENT_DATA_HBD_PHY {
1074/*values for the DescriptorType field */ 1152/*values for the DescriptorType field */
1075#define MPI2_EVENT_HBD_DT_SAS (0x01) 1153#define MPI2_EVENT_HBD_DT_SAS (0x01)
1076 1154
1155
1156/*PCIe Device Status Change Event data (MPI v2.6 and later) */
1157
1158typedef struct _MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE {
1159 U16 TaskTag; /*0x00 */
1160 U8 ReasonCode; /*0x02 */
1161 U8 PhysicalPort; /*0x03 */
1162 U8 ASC; /*0x04 */
1163 U8 ASCQ; /*0x05 */
1164 U16 DevHandle; /*0x06 */
1165 U32 Reserved2; /*0x08 */
1166 U64 WWID; /*0x0C */
1167 U8 LUN[8]; /*0x14 */
1168} MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE,
1169 *PTR_MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE,
1170 Mpi26EventDataPCIeDeviceStatusChange_t,
1171 *pMpi26EventDataPCIeDeviceStatusChange_t;
1172
1173/*PCIe Device Status Change Event data ReasonCode values */
1174#define MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA (0x05)
1175#define MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED (0x07)
1176#define MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08)
1177#define MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL (0x09)
1178#define MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
1179#define MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
1180#define MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
1181#define MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
1182#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E)
1183#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F)
1184#define MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE (0x10)
1185
1186
1187/*PCIe Enumeration Event data (MPI v2.6 and later) */
1188
1189typedef struct _MPI26_EVENT_DATA_PCIE_ENUMERATION {
1190 U8 Flags; /*0x00 */
1191 U8 ReasonCode; /*0x01 */
1192 U8 PhysicalPort; /*0x02 */
1193 U8 Reserved1; /*0x03 */
1194 U32 EnumerationStatus; /*0x04 */
1195} MPI26_EVENT_DATA_PCIE_ENUMERATION,
1196 *PTR_MPI26_EVENT_DATA_PCIE_ENUMERATION,
1197 Mpi26EventDataPCIeEnumeration_t,
1198 *pMpi26EventDataPCIeEnumeration_t;
1199
1200/*PCIe Enumeration Event data Flags values */
1201#define MPI26_EVENT_PCIE_ENUM_DEVICE_CHANGE (0x02)
1202#define MPI26_EVENT_PCIE_ENUM_IN_PROGRESS (0x01)
1203
1204/*PCIe Enumeration Event data ReasonCode values */
1205#define MPI26_EVENT_PCIE_ENUM_RC_STARTED (0x01)
1206#define MPI26_EVENT_PCIE_ENUM_RC_COMPLETED (0x02)
1207
1208/*PCIe Enumeration Event data EnumerationStatus values */
1209#define MPI26_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000)
1210#define MPI26_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000)
1211#define MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000)
1212
1213
1214/*PCIe Topology Change List Event data (MPI v2.6 and later) */
1215
1216/*
1217 *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
1218 *one and check NumEntries at runtime.
1219 */
1220#ifndef MPI26_EVENT_PCIE_TOPO_PORT_COUNT
1221#define MPI26_EVENT_PCIE_TOPO_PORT_COUNT (1)
1222#endif
1223
1224typedef struct _MPI26_EVENT_PCIE_TOPO_PORT_ENTRY {
1225 U16 AttachedDevHandle; /*0x00 */
1226 U8 PortStatus; /*0x02 */
1227 U8 Reserved1; /*0x03 */
1228 U8 CurrentPortInfo; /*0x04 */
1229 U8 Reserved2; /*0x05 */
1230 U8 PreviousPortInfo; /*0x06 */
1231 U8 Reserved3; /*0x07 */
1232} MPI26_EVENT_PCIE_TOPO_PORT_ENTRY,
1233 *PTR_MPI26_EVENT_PCIE_TOPO_PORT_ENTRY,
1234 Mpi26EventPCIeTopoPortEntry_t,
1235 *pMpi26EventPCIeTopoPortEntry_t;
1236
1237/*PCIe Topology Change List Event data PortStatus values */
1238#define MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED (0x01)
1239#define MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING (0x02)
1240#define MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED (0x03)
1241#define MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE (0x04)
1242#define MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05)
1243
1244/*PCIe Topology Change List Event data defines for CurrentPortInfo and
1245 *PreviousPortInfo
1246 */
1247#define MPI26_EVENT_PCIE_TOPO_PI_LANE_MASK (0xF0)
1248#define MPI26_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00)
1249#define MPI26_EVENT_PCIE_TOPO_PI_1_LANE (0x10)
1250#define MPI26_EVENT_PCIE_TOPO_PI_2_LANES (0x20)
1251#define MPI26_EVENT_PCIE_TOPO_PI_4_LANES (0x30)
1252#define MPI26_EVENT_PCIE_TOPO_PI_8_LANES (0x40)
1253
1254#define MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0F)
1255#define MPI26_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00)
1256#define MPI26_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01)
1257#define MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02)
1258#define MPI26_EVENT_PCIE_TOPO_PI_RATE_5_0 (0x03)
1259#define MPI26_EVENT_PCIE_TOPO_PI_RATE_8_0 (0x04)
1260#define MPI26_EVENT_PCIE_TOPO_PI_RATE_16_0 (0x05)
1261
1262typedef struct _MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST {
1263 U16 EnclosureHandle; /*0x00 */
1264 U16 SwitchDevHandle; /*0x02 */
1265 U8 NumPorts; /*0x04 */
1266 U8 Reserved1; /*0x05 */
1267 U16 Reserved2; /*0x06 */
1268 U8 NumEntries; /*0x08 */
1269 U8 StartPortNum; /*0x09 */
1270 U8 SwitchStatus; /*0x0A */
1271 U8 PhysicalPort; /*0x0B */
1272 MPI26_EVENT_PCIE_TOPO_PORT_ENTRY
1273 PortEntry[MPI26_EVENT_PCIE_TOPO_PORT_COUNT]; /*0x0C */
1274} MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST,
1275 *PTR_MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST,
1276 Mpi26EventDataPCIeTopologyChangeList_t,
1277 *pMpi26EventDataPCIeTopologyChangeList_t;
1278
1279/*PCIe Topology Change List Event data SwitchStatus values */
1280#define MPI26_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00)
1281#define MPI26_EVENT_PCIE_TOPO_SS_ADDED (0x01)
1282#define MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02)
1283#define MPI26_EVENT_PCIE_TOPO_SS_RESPONDING (0x03)
1284#define MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04)
1285
1286/*PCIe Link Counter Event data (MPI v2.6 and later) */
1287
1288typedef struct _MPI26_EVENT_DATA_PCIE_LINK_COUNTER {
1289 U64 TimeStamp; /*0x00 */
1290 U32 Reserved1; /*0x08 */
1291 U8 LinkEventCode; /*0x0C */
1292 U8 LinkNum; /*0x0D */
1293 U16 Reserved2; /*0x0E */
1294 U32 LinkEventInfo; /*0x10 */
1295 U8 CounterType; /*0x14 */
1296 U8 ThresholdWindow; /*0x15 */
1297 U8 TimeUnits; /*0x16 */
1298 U8 Reserved3; /*0x17 */
1299 U32 EventThreshold; /*0x18 */
1300 U16 ThresholdFlags; /*0x1C */
1301 U16 Reserved4; /*0x1E */
1302} MPI26_EVENT_DATA_PCIE_LINK_COUNTER,
1303 *PTR_MPI26_EVENT_DATA_PCIE_LINK_COUNTER,
1304 Mpi26EventDataPcieLinkCounter_t, *pMpi26EventDataPcieLinkCounter_t;
1305
1306
1307/*use MPI26_PCIELINK3_EVTCODE_ values from mpi2_cnfg.h for the LinkEventCode
1308 *field
1309 */
1310
1311/*use MPI26_PCIELINK3_COUNTER_TYPE_ values from mpi2_cnfg.h for the CounterType
1312 *field
1313 */
1314
1315/*use MPI26_PCIELINK3_TIME_UNITS_ values from mpi2_cnfg.h for the TimeUnits
1316 *field
1317 */
1318
1319/*use MPI26_PCIELINK3_TFLAGS_ values from mpi2_cnfg.h for the ThresholdFlags
1320 *field
1321 */
1322
1077/**************************************************************************** 1323/****************************************************************************
1078* EventAck message 1324* EventAck message
1079****************************************************************************/ 1325****************************************************************************/
@@ -1191,6 +1437,14 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST {
1191#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A) 1437#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
1192#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) 1438#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
1193#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C) 1439#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C)
1440#define MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP (0x0D)
1441#define MPI2_FW_DOWNLOAD_ITYPE_SBR (0x0E)
1442#define MPI2_FW_DOWNLOAD_ITYPE_SBR_BACKUP (0x0F)
1443#define MPI2_FW_DOWNLOAD_ITYPE_HIIM (0x10)
1444#define MPI2_FW_DOWNLOAD_ITYPE_HIIA (0x11)
1445#define MPI2_FW_DOWNLOAD_ITYPE_CTLR (0x12)
1446#define MPI2_FW_DOWNLOAD_ITYPE_IMR_FIRMWARE (0x13)
1447#define MPI2_FW_DOWNLOAD_ITYPE_MR_NVDATA (0x14)
1194#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0) 1448#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
1195 1449
1196/*MPI v2.0 FWDownload TransactionContext Element */ 1450/*MPI v2.0 FWDownload TransactionContext Element */
@@ -1277,6 +1531,14 @@ typedef struct _MPI2_FW_UPLOAD_REQUEST {
1277#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A) 1531#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
1278#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) 1532#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
1279#define MPI2_FW_UPLOAD_ITYPE_CBB_BACKUP (0x0D) 1533#define MPI2_FW_UPLOAD_ITYPE_CBB_BACKUP (0x0D)
1534#define MPI2_FW_UPLOAD_ITYPE_SBR (0x0E)
1535#define MPI2_FW_UPLOAD_ITYPE_SBR_BACKUP (0x0F)
1536#define MPI2_FW_UPLOAD_ITYPE_HIIM (0x10)
1537#define MPI2_FW_UPLOAD_ITYPE_HIIA (0x11)
1538#define MPI2_FW_UPLOAD_ITYPE_CTLR (0x12)
1539#define MPI2_FW_UPLOAD_ITYPE_IMR_FIRMWARE (0x13)
1540#define MPI2_FW_UPLOAD_ITYPE_MR_NVDATA (0x14)
1541
1280 1542
1281/*MPI v2.0 FWUpload TransactionContext Element */ 1543/*MPI v2.0 FWUpload TransactionContext Element */
1282typedef struct _MPI2_FW_UPLOAD_TCSGE { 1544typedef struct _MPI2_FW_UPLOAD_TCSGE {
@@ -1395,10 +1657,13 @@ typedef struct _MPI2_FW_IMAGE_HEADER {
1395#define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00) 1657#define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00)
1396#define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01) 1658#define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01)
1397/* legacy (0x5AEAA55A) */ 1659/* legacy (0x5AEAA55A) */
1660#define MPI26_FW_HEADER_SIGNATURE0_ARC_3 (0x02)
1398#define MPI26_FW_HEADER_SIGNATURE0 \ 1661#define MPI26_FW_HEADER_SIGNATURE0 \
1399 (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0) 1662 (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0)
1400#define MPI26_FW_HEADER_SIGNATURE0_3516 \ 1663#define MPI26_FW_HEADER_SIGNATURE0_3516 \
1401 (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1) 1664 (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1)
1665#define MPI26_FW_HEADER_SIGNATURE0_4008 \
1666 (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_3)
1402 1667
1403/*Signature1 field */ 1668/*Signature1 field */
1404#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08) 1669#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
@@ -1542,6 +1807,13 @@ typedef struct _MPI2_FLASH_LAYOUT_DATA {
1542#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A) 1807#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A)
1543#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK) 1808#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK)
1544#define MPI2_FLASH_REGION_CBB_BACKUP (0x0D) 1809#define MPI2_FLASH_REGION_CBB_BACKUP (0x0D)
1810#define MPI2_FLASH_REGION_SBR (0x0E)
1811#define MPI2_FLASH_REGION_SBR_BACKUP (0x0F)
1812#define MPI2_FLASH_REGION_HIIM (0x10)
1813#define MPI2_FLASH_REGION_HIIA (0x11)
1814#define MPI2_FLASH_REGION_CTLR (0x12)
1815#define MPI2_FLASH_REGION_IMR_FIRMWARE (0x13)
1816#define MPI2_FLASH_REGION_MR_NVDATA (0x14)
1545 1817
1546/*ImageRevision */ 1818/*ImageRevision */
1547#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00) 1819#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
@@ -1826,6 +2098,8 @@ typedef struct _MPI26_IOUNIT_CONTROL_REQUEST {
1826#define MPI26_CTRL_OP_DEV_ENABLE_PERSIST_CONNECTION (0x17) 2098#define MPI26_CTRL_OP_DEV_ENABLE_PERSIST_CONNECTION (0x17)
1827#define MPI26_CTRL_OP_DEV_DISABLE_PERSIST_CONNECTION (0x18) 2099#define MPI26_CTRL_OP_DEV_DISABLE_PERSIST_CONNECTION (0x18)
1828#define MPI26_CTRL_OP_DEV_CLOSE_PERSIST_CONNECTION (0x19) 2100#define MPI26_CTRL_OP_DEV_CLOSE_PERSIST_CONNECTION (0x19)
2101#define MPI26_CTRL_OP_ENABLE_NVME_SGL_FORMAT (0x1A)
2102#define MPI26_CTRL_OP_DISABLE_NVME_SGL_FORMAT (0x1B)
1829#define MPI26_CTRL_OP_PRODUCT_SPECIFIC_MIN (0x80) 2103#define MPI26_CTRL_OP_PRODUCT_SPECIFIC_MIN (0x80)
1830 2104
1831/* values for the PrimFlags field */ 2105/* values for the PrimFlags field */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
new file mode 100644
index 000000000000..f0281f943ec9
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
@@ -0,0 +1,111 @@
1/*
2 * Copyright 2012-2015 Avago Technologies. All rights reserved.
3 *
4 *
5 * Name: mpi2_pci.h
6 * Title: MPI PCIe Attached Devices structures and definitions.
7 * Creation Date: October 9, 2012
8 *
9 * mpi2_pci.h Version: 02.00.02
10 *
11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
12 * prefix are for use only on MPI v2.5 products, and must not be used
13 * with MPI v2.0 products. Unless otherwise noted, names beginning with
14 * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
15 *
16 * Version History
17 * ---------------
18 *
19 * Date Version Description
20 * -------- -------- ------------------------------------------------------
21 * 03-16-15 02.00.00 Initial version.
22 * 02-17-16 02.00.01 Removed AHCI support.
23 * Removed SOP support.
24 * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to
25 * NVME Encapsulated Request.
26 * --------------------------------------------------------------------------
27 */
28
29#ifndef MPI2_PCI_H
30#define MPI2_PCI_H
31
32
33/*
34 *Values for the PCIe DeviceInfo field used in PCIe Device Status Change Event
35 *data and PCIe Configuration pages.
36 */
37#define MPI26_PCIE_DEVINFO_DIRECT_ATTACH (0x00000010)
38
39#define MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE (0x0000000F)
40#define MPI26_PCIE_DEVINFO_NO_DEVICE (0x00000000)
41#define MPI26_PCIE_DEVINFO_PCI_SWITCH (0x00000001)
42#define MPI26_PCIE_DEVINFO_NVME (0x00000003)
43
44
45/****************************************************************************
46* NVMe Encapsulated message
47****************************************************************************/
48
49/*NVME Encapsulated Request Message */
50typedef struct _MPI26_NVME_ENCAPSULATED_REQUEST {
51 U16 DevHandle; /*0x00 */
52 U8 ChainOffset; /*0x02 */
53 U8 Function; /*0x03 */
54 U16 EncapsulatedCommandLength; /*0x04 */
55 U8 Reserved1; /*0x06 */
56 U8 MsgFlags; /*0x07 */
57 U8 VP_ID; /*0x08 */
58 U8 VF_ID; /*0x09 */
59 U16 Reserved2; /*0x0A */
60 U32 Reserved3; /*0x0C */
61 U64 ErrorResponseBaseAddress; /*0x10 */
62 U16 ErrorResponseAllocationLength; /*0x18 */
63 U16 Flags; /*0x1A */
64 U32 DataLength; /*0x1C */
65 U8 NVMe_Command[4]; /*0x20 */
66
67} MPI26_NVME_ENCAPSULATED_REQUEST, *PTR_MPI26_NVME_ENCAPSULATED_REQUEST,
68 Mpi26NVMeEncapsulatedRequest_t, *pMpi26NVMeEncapsulatedRequest_t;
69
70/*defines for the Flags field */
71#define MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP (0x0020)
72/*Submission Queue Type*/
73#define MPI26_NVME_FLAGS_SUBMISSIONQ_MASK (0x0010)
74#define MPI26_NVME_FLAGS_SUBMISSIONQ_IO (0x0000)
75#define MPI26_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0010)
76/*Error Response Address Space */
77#define MPI26_NVME_FLAGS_MASK_ERROR_RSP_ADDR (0x000C)
78#define MPI26_NVME_FLAGS_SYSTEM_RSP_ADDR (0x0000)
79#define MPI26_NVME_FLAGS_IOCPLB_RSP_ADDR (0x0008)
80#define MPI26_NVME_FLAGS_IOCPLBNTA_RSP_ADDR (0x000C)
81/*Data Direction*/
82#define MPI26_NVME_FLAGS_DATADIRECTION_MASK (0x0003)
83#define MPI26_NVME_FLAGS_NODATATRANSFER (0x0000)
84#define MPI26_NVME_FLAGS_WRITE (0x0001)
85#define MPI26_NVME_FLAGS_READ (0x0002)
86#define MPI26_NVME_FLAGS_BIDIRECTIONAL (0x0003)
87
88
89/*NVMe Encapuslated Reply Message */
90typedef struct _MPI26_NVME_ENCAPSULATED_ERROR_REPLY {
91 U16 DevHandle; /*0x00 */
92 U8 MsgLength; /*0x02 */
93 U8 Function; /*0x03 */
94 U16 EncapsulatedCommandLength; /*0x04 */
95 U8 Reserved1; /*0x06 */
96 U8 MsgFlags; /*0x07 */
97 U8 VP_ID; /*0x08 */
98 U8 VF_ID; /*0x09 */
99 U16 Reserved2; /*0x0A */
100 U16 Reserved3; /*0x0C */
101 U16 IOCStatus; /*0x0E */
102 U32 IOCLogInfo; /*0x10 */
103 U16 ErrorResponseCount; /*0x14 */
104 U16 Reserved4; /*0x16 */
105} MPI26_NVME_ENCAPSULATED_ERROR_REPLY,
106 *PTR_MPI26_NVME_ENCAPSULATED_ERROR_REPLY,
107 Mpi26NVMeEncapsulatedErrorReply_t,
108 *pMpi26NVMeEncapsulatedErrorReply_t;
109
110
111#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
index 593765a4ddb8..629296ee9236 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -7,7 +7,7 @@
7 * Title: MPI diagnostic tool structures and definitions 7 * Title: MPI diagnostic tool structures and definitions
8 * Creation Date: March 26, 2007 8 * Creation Date: March 26, 2007
9 * 9 *
10 * mpi2_tool.h Version: 02.00.13 10 * mpi2_tool.h Version: 02.00.14
11 * 11 *
12 * Version History 12 * Version History
13 * --------------- 13 * ---------------
@@ -36,6 +36,8 @@
36 * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. 36 * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
37 * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC. 37 * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC.
38 * 11-18-14 02.00.13 Updated copyright information. 38 * 11-18-14 02.00.13 Updated copyright information.
39 * 08-25-16 02.00.14 Added new values for the Flags field of Toolbox Clean
40 * Tool Request Message.
39 * -------------------------------------------------------------------------- 41 * --------------------------------------------------------------------------
40 */ 42 */
41 43
@@ -106,6 +108,16 @@ typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST {
106#define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000) 108#define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000)
107#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000) 109#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000)
108#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000) 110#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000)
111#define MPI2_TOOLBOX_CLEAN_SBR (0x00800000)
112#define MPI2_TOOLBOX_CLEAN_SBR_BACKUP (0x00400000)
113#define MPI2_TOOLBOX_CLEAN_HIIM (0x00200000)
114#define MPI2_TOOLBOX_CLEAN_HIIA (0x00100000)
115#define MPI2_TOOLBOX_CLEAN_CTLR (0x00080000)
116#define MPI2_TOOLBOX_CLEAN_IMR_FIRMWARE (0x00040000)
117#define MPI2_TOOLBOX_CLEAN_MR_NVDATA (0x00020000)
118#define MPI2_TOOLBOX_CLEAN_RESERVED_5_16 (0x0001FFE0)
119#define MPI2_TOOLBOX_CLEAN_ALL_BUT_MPB (0x00000010)
120#define MPI2_TOOLBOX_CLEAN_ENTIRE_FLASH (0x00000008)
109#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004) 121#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004)
110#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002) 122#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002)
111#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001) 123#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 87999905bca3..a29534c1824e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -59,6 +59,7 @@
59#include <linux/time.h> 59#include <linux/time.h>
60#include <linux/ktime.h> 60#include <linux/ktime.h>
61#include <linux/kthread.h> 61#include <linux/kthread.h>
62#include <asm/page.h> /* To get host page size per arch */
62#include <linux/aer.h> 63#include <linux/aer.h>
63 64
64 65
@@ -556,6 +557,11 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
556 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size; 557 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
557 func_str = "smp_passthru"; 558 func_str = "smp_passthru";
558 break; 559 break;
560 case MPI2_FUNCTION_NVME_ENCAPSULATED:
561 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
562 ioc->sge_size;
563 func_str = "nvme_encapsulated";
564 break;
559 default: 565 default:
560 frame_sz = 32; 566 frame_sz = 32;
561 func_str = "unknown"; 567 func_str = "unknown";
@@ -655,7 +661,27 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
655 desc = "Temperature Threshold"; 661 desc = "Temperature Threshold";
656 break; 662 break;
657 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: 663 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
658 desc = "Active cable exception"; 664 desc = "Cable Event";
665 break;
666 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
667 desc = "PCIE Device Status Change";
668 break;
669 case MPI2_EVENT_PCIE_ENUMERATION:
670 {
671 Mpi26EventDataPCIeEnumeration_t *event_data =
672 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
673 pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name,
674 (event_data->ReasonCode ==
675 MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
676 "start" : "stop");
677 if (event_data->EnumerationStatus)
678 pr_info("enumeration_status(0x%08x)",
679 le32_to_cpu(event_data->EnumerationStatus));
680 pr_info("\n");
681 return;
682 }
683 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
684 desc = "PCIE Topology Change List";
659 break; 685 break;
660 } 686 }
661 687
@@ -984,7 +1010,9 @@ _base_interrupt(int irq, void *bus_id)
984 if (request_desript_type == 1010 if (request_desript_type ==
985 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS || 1011 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
986 request_desript_type == 1012 request_desript_type ==
987 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { 1013 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1014 request_desript_type ==
1015 MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
988 cb_idx = _base_get_cb_idx(ioc, smid); 1016 cb_idx = _base_get_cb_idx(ioc, smid);
989 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && 1017 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
990 (likely(mpt_callbacks[cb_idx] != NULL))) { 1018 (likely(mpt_callbacks[cb_idx] != NULL))) {
@@ -1347,6 +1375,433 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1347/* IEEE format sgls */ 1375/* IEEE format sgls */
1348 1376
1349/** 1377/**
1378 * _base_build_nvme_prp - This function is called for NVMe end devices to build
1379 * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
1380 * entry of the NVMe message (PRP1). If the data buffer is small enough to be
1381 * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is
1382 * used to describe a larger data buffer. If the data buffer is too large to
1383 * describe using the two PRP entriess inside the NVMe message, then PRP1
1384 * describes the first data memory segment, and PRP2 contains a pointer to a PRP
1385 * list located elsewhere in memory to describe the remaining data memory
1386 * segments. The PRP list will be contiguous.
1387
1388 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
1389 * consists of a list of PRP entries to describe a number of noncontigous
1390 * physical memory segments as a single memory buffer, just as a SGL does. Note
1391 * however, that this function is only used by the IOCTL call, so the memory
1392 * given will be guaranteed to be contiguous. There is no need to translate
1393 * non-contiguous SGL into a PRP in this case. All PRPs will describe
1394 * contiguous space that is one page size each.
1395 *
1396 * Each NVMe message contains two PRP entries. The first (PRP1) either contains
1397 * a PRP list pointer or a PRP element, depending upon the command. PRP2
1398 * contains the second PRP element if the memory being described fits within 2
1399 * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
1400 *
1401 * A PRP list pointer contains the address of a PRP list, structured as a linear
1402 * array of PRP entries. Each PRP entry in this list describes a segment of
1403 * physical memory.
1404 *
1405 * Each 64-bit PRP entry comprises an address and an offset field. The address
1406 * always points at the beginning of a 4KB physical memory page, and the offset
1407 * describes where within that 4KB page the memory segment begins. Only the
1408 * first element in a PRP list may contain a non-zero offest, implying that all
1409 * memory segments following the first begin at the start of a 4KB page.
1410 *
1411 * Each PRP element normally describes 4KB of physical memory, with exceptions
1412 * for the first and last elements in the list. If the memory being described
1413 * by the list begins at a non-zero offset within the first 4KB page, then the
1414 * first PRP element will contain a non-zero offset indicating where the region
1415 * begins within the 4KB page. The last memory segment may end before the end
1416 * of the 4KB segment, depending upon the overall size of the memory being
1417 * described by the PRP list.
1418 *
1419 * Since PRP entries lack any indication of size, the overall data buffer length
1420 * is used to determine where the end of the data memory buffer is located, and
1421 * how many PRP entries are required to describe it.
1422 *
1423 * @ioc: per adapter object
1424 * @smid: system request message index for getting asscociated SGL
1425 * @nvme_encap_request: the NVMe request msg frame pointer
1426 * @data_out_dma: physical address for WRITES
1427 * @data_out_sz: data xfer size for WRITES
1428 * @data_in_dma: physical address for READS
1429 * @data_in_sz: data xfer size for READS
1430 *
1431 * Returns nothing.
1432 */
1433static void
1434_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
1435 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
1436 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1437 size_t data_in_sz)
1438{
1439 int prp_size = NVME_PRP_SIZE;
1440 __le64 *prp_entry, *prp1_entry, *prp2_entry;
1441 __le64 *prp_page;
1442 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
1443 u32 offset, entry_len;
1444 u32 page_mask_result, page_mask;
1445 size_t length;
1446
1447 /*
1448 * Not all commands require a data transfer. If no data, just return
1449 * without constructing any PRP.
1450 */
1451 if (!data_in_sz && !data_out_sz)
1452 return;
1453 /*
1454 * Set pointers to PRP1 and PRP2, which are in the NVMe command.
1455 * PRP1 is located at a 24 byte offset from the start of the NVMe
1456 * command. Then set the current PRP entry pointer to PRP1.
1457 */
1458 prp1_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
1459 NVME_CMD_PRP1_OFFSET);
1460 prp2_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
1461 NVME_CMD_PRP2_OFFSET);
1462 prp_entry = prp1_entry;
1463 /*
1464 * For the PRP entries, use the specially allocated buffer of
1465 * contiguous memory.
1466 */
1467 prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
1468 prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
1469
1470 /*
1471 * Check if we are within 1 entry of a page boundary we don't
1472 * want our first entry to be a PRP List entry.
1473 */
1474 page_mask = ioc->page_size - 1;
1475 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
1476 if (!page_mask_result) {
1477 /* Bump up to next page boundary. */
1478 prp_page = (__le64 *)((u8 *)prp_page + prp_size);
1479 prp_page_dma = prp_page_dma + prp_size;
1480 }
1481
1482 /*
1483 * Set PRP physical pointer, which initially points to the current PRP
1484 * DMA memory page.
1485 */
1486 prp_entry_dma = prp_page_dma;
1487
1488 /* Get physical address and length of the data buffer. */
1489 if (data_in_sz) {
1490 dma_addr = data_in_dma;
1491 length = data_in_sz;
1492 } else {
1493 dma_addr = data_out_dma;
1494 length = data_out_sz;
1495 }
1496
1497 /* Loop while the length is not zero. */
1498 while (length) {
1499 /*
1500 * Check if we need to put a list pointer here if we are at
1501 * page boundary - prp_size (8 bytes).
1502 */
1503 page_mask_result = (prp_entry_dma + prp_size) & page_mask;
1504 if (!page_mask_result) {
1505 /*
1506 * This is the last entry in a PRP List, so we need to
1507 * put a PRP list pointer here. What this does is:
1508 * - bump the current memory pointer to the next
1509 * address, which will be the next full page.
1510 * - set the PRP Entry to point to that page. This
1511 * is now the PRP List pointer.
1512 * - bump the PRP Entry pointer the start of the
1513 * next page. Since all of this PRP memory is
1514 * contiguous, no need to get a new page - it's
1515 * just the next address.
1516 */
1517 prp_entry_dma++;
1518 *prp_entry = cpu_to_le64(prp_entry_dma);
1519 prp_entry++;
1520 }
1521
1522 /* Need to handle if entry will be part of a page. */
1523 offset = dma_addr & page_mask;
1524 entry_len = ioc->page_size - offset;
1525
1526 if (prp_entry == prp1_entry) {
1527 /*
1528 * Must fill in the first PRP pointer (PRP1) before
1529 * moving on.
1530 */
1531 *prp1_entry = cpu_to_le64(dma_addr);
1532
1533 /*
1534 * Now point to the second PRP entry within the
1535 * command (PRP2).
1536 */
1537 prp_entry = prp2_entry;
1538 } else if (prp_entry == prp2_entry) {
1539 /*
1540 * Should the PRP2 entry be a PRP List pointer or just
1541 * a regular PRP pointer? If there is more than one
1542 * more page of data, must use a PRP List pointer.
1543 */
1544 if (length > ioc->page_size) {
1545 /*
1546 * PRP2 will contain a PRP List pointer because
1547 * more PRP's are needed with this command. The
1548 * list will start at the beginning of the
1549 * contiguous buffer.
1550 */
1551 *prp2_entry = cpu_to_le64(prp_entry_dma);
1552
1553 /*
1554 * The next PRP Entry will be the start of the
1555 * first PRP List.
1556 */
1557 prp_entry = prp_page;
1558 } else {
1559 /*
1560 * After this, the PRP Entries are complete.
1561 * This command uses 2 PRP's and no PRP list.
1562 */
1563 *prp2_entry = cpu_to_le64(dma_addr);
1564 }
1565 } else {
1566 /*
1567 * Put entry in list and bump the addresses.
1568 *
1569 * After PRP1 and PRP2 are filled in, this will fill in
1570 * all remaining PRP entries in a PRP List, one per
1571 * each time through the loop.
1572 */
1573 *prp_entry = cpu_to_le64(dma_addr);
1574 prp_entry++;
1575 prp_entry_dma++;
1576 }
1577
1578 /*
1579 * Bump the phys address of the command's data buffer by the
1580 * entry_len.
1581 */
1582 dma_addr += entry_len;
1583
1584 /* Decrement length accounting for last partial page. */
1585 if (entry_len > length)
1586 length = 0;
1587 else
1588 length -= entry_len;
1589 }
1590}
1591
1592/**
1593 * base_make_prp_nvme -
1594 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
1595 *
1596 * @ioc: per adapter object
1597 * @scmd: SCSI command from the mid-layer
1598 * @mpi_request: mpi request
1599 * @smid: msg Index
1600 * @sge_count: scatter gather element count.
1601 *
1602 * Returns: true: PRPs are built
1603 * false: IEEE SGLs needs to be built
1604 */
1605static void
1606base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
1607 struct scsi_cmnd *scmd,
1608 Mpi25SCSIIORequest_t *mpi_request,
1609 u16 smid, int sge_count)
1610{
1611 int sge_len, num_prp_in_chain = 0;
1612 Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
1613 __le64 *curr_buff;
1614 dma_addr_t msg_dma, sge_addr, offset;
1615 u32 page_mask, page_mask_result;
1616 struct scatterlist *sg_scmd;
1617 u32 first_prp_len;
1618 int data_len = scsi_bufflen(scmd);
1619 u32 nvme_pg_size;
1620
1621 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
1622 /*
1623 * Nvme has a very convoluted prp format. One prp is required
1624 * for each page or partial page. Driver need to split up OS sg_list
1625 * entries if it is longer than one page or cross a page
1626 * boundary. Driver also have to insert a PRP list pointer entry as
1627 * the last entry in each physical page of the PRP list.
1628 *
1629 * NOTE: The first PRP "entry" is actually placed in the first
1630 * SGL entry in the main message as IEEE 64 format. The 2nd
1631 * entry in the main message is the chain element, and the rest
1632 * of the PRP entries are built in the contiguous pcie buffer.
1633 */
1634 page_mask = nvme_pg_size - 1;
1635
1636 /*
1637 * Native SGL is needed.
1638 * Put a chain element in main message frame that points to the first
1639 * chain buffer.
1640 *
1641 * NOTE: The ChainOffset field must be 0 when using a chain pointer to
1642 * a native SGL.
1643 */
1644
1645 /* Set main message chain element pointer */
1646 main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
1647 /*
1648 * For NVMe the chain element needs to be the 2nd SG entry in the main
1649 * message.
1650 */
1651 main_chain_element = (Mpi25IeeeSgeChain64_t *)
1652 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
1653
1654 /*
1655 * For the PRP entries, use the specially allocated buffer of
1656 * contiguous memory. Normal chain buffers can't be used
1657 * because each chain buffer would need to be the size of an OS
1658 * page (4k).
1659 */
1660 curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
1661 msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
1662
1663 main_chain_element->Address = cpu_to_le64(msg_dma);
1664 main_chain_element->NextChainOffset = 0;
1665 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1666 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
1667 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
1668
1669 /* Build first prp, sge need not to be page aligned*/
1670 ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
1671 sg_scmd = scsi_sglist(scmd);
1672 sge_addr = sg_dma_address(sg_scmd);
1673 sge_len = sg_dma_len(sg_scmd);
1674
1675 offset = sge_addr & page_mask;
1676 first_prp_len = nvme_pg_size - offset;
1677
1678 ptr_first_sgl->Address = cpu_to_le64(sge_addr);
1679 ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
1680
1681 data_len -= first_prp_len;
1682
1683 if (sge_len > first_prp_len) {
1684 sge_addr += first_prp_len;
1685 sge_len -= first_prp_len;
1686 } else if (data_len && (sge_len == first_prp_len)) {
1687 sg_scmd = sg_next(sg_scmd);
1688 sge_addr = sg_dma_address(sg_scmd);
1689 sge_len = sg_dma_len(sg_scmd);
1690 }
1691
1692 for (;;) {
1693 offset = sge_addr & page_mask;
1694
1695 /* Put PRP pointer due to page boundary*/
1696 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
1697 if (unlikely(!page_mask_result)) {
1698 scmd_printk(KERN_NOTICE,
1699 scmd, "page boundary curr_buff: 0x%p\n",
1700 curr_buff);
1701 msg_dma += 8;
1702 *curr_buff = cpu_to_le64(msg_dma);
1703 curr_buff++;
1704 num_prp_in_chain++;
1705 }
1706
1707 *curr_buff = cpu_to_le64(sge_addr);
1708 curr_buff++;
1709 msg_dma += 8;
1710 num_prp_in_chain++;
1711
1712 sge_addr += nvme_pg_size;
1713 sge_len -= nvme_pg_size;
1714 data_len -= nvme_pg_size;
1715
1716 if (data_len <= 0)
1717 break;
1718
1719 if (sge_len > 0)
1720 continue;
1721
1722 sg_scmd = sg_next(sg_scmd);
1723 sge_addr = sg_dma_address(sg_scmd);
1724 sge_len = sg_dma_len(sg_scmd);
1725 }
1726
1727 main_chain_element->Length =
1728 cpu_to_le32(num_prp_in_chain * sizeof(u64));
1729 return;
1730}
1731
1732static bool
1733base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
1734 struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
1735{
1736 u32 data_length = 0;
1737 struct scatterlist *sg_scmd;
1738 bool build_prp = true;
1739
1740 data_length = scsi_bufflen(scmd);
1741 sg_scmd = scsi_sglist(scmd);
1742
1743 /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
1744 * we built IEEE SGL
1745 */
1746 if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
1747 build_prp = false;
1748
1749 return build_prp;
1750}
1751
1752/**
1753 * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
1754 * determine if the driver needs to build a native SGL. If so, that native
1755 * SGL is built in the special contiguous buffers allocated especially for
1756 * PCIe SGL creation. If the driver will not build a native SGL, return
1757 * TRUE and a normal IEEE SGL will be built. Currently this routine
1758 * supports NVMe.
1759 * @ioc: per adapter object
1760 * @mpi_request: mf request pointer
1761 * @smid: system request message index
1762 * @scmd: scsi command
1763 * @pcie_device: points to the PCIe device's info
1764 *
1765 * Returns 0 if native SGL was built, 1 if no SGL was built
1766 */
1767static int
1768_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
1769 Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
1770 struct _pcie_device *pcie_device)
1771{
1772 struct scatterlist *sg_scmd;
1773 int sges_left;
1774
1775 /* Get the SG list pointer and info. */
1776 sg_scmd = scsi_sglist(scmd);
1777 sges_left = scsi_dma_map(scmd);
1778 if (sges_left < 0) {
1779 sdev_printk(KERN_ERR, scmd->device,
1780 "scsi_dma_map failed: request for %d bytes!\n",
1781 scsi_bufflen(scmd));
1782 return 1;
1783 }
1784
1785 /* Check if we need to build a native SG list. */
1786 if (base_is_prp_possible(ioc, pcie_device,
1787 scmd, sges_left) == 0) {
1788 /* We built a native SG list, just return. */
1789 goto out;
1790 }
1791
1792 /*
1793 * Build native NVMe PRP.
1794 */
1795 base_make_prp_nvme(ioc, scmd, mpi_request,
1796 smid, sges_left);
1797
1798 return 0;
1799out:
1800 scsi_dma_unmap(scmd);
1801 return 1;
1802}
1803
1804/**
1350 * _base_add_sg_single_ieee - add sg element for IEEE format 1805 * _base_add_sg_single_ieee - add sg element for IEEE format
1351 * @paddr: virtual address for SGE 1806 * @paddr: virtual address for SGE
1352 * @flags: SGE flags 1807 * @flags: SGE flags
@@ -1391,9 +1846,11 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1391 1846
1392/** 1847/**
1393 * _base_build_sg_scmd - main sg creation routine 1848 * _base_build_sg_scmd - main sg creation routine
1849 * pcie_device is unused here!
1394 * @ioc: per adapter object 1850 * @ioc: per adapter object
1395 * @scmd: scsi command 1851 * @scmd: scsi command
1396 * @smid: system request message index 1852 * @smid: system request message index
1853 * @unused: unused pcie_device pointer
1397 * Context: none. 1854 * Context: none.
1398 * 1855 *
1399 * The main routine that builds scatter gather table from a given 1856 * The main routine that builds scatter gather table from a given
@@ -1403,7 +1860,7 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1403 */ 1860 */
1404static int 1861static int
1405_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, 1862_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
1406 struct scsi_cmnd *scmd, u16 smid) 1863 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
1407{ 1864{
1408 Mpi2SCSIIORequest_t *mpi_request; 1865 Mpi2SCSIIORequest_t *mpi_request;
1409 dma_addr_t chain_dma; 1866 dma_addr_t chain_dma;
@@ -1537,6 +1994,8 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
1537 * @ioc: per adapter object 1994 * @ioc: per adapter object
1538 * @scmd: scsi command 1995 * @scmd: scsi command
1539 * @smid: system request message index 1996 * @smid: system request message index
1997 * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
1998 * constructed on need.
1540 * Context: none. 1999 * Context: none.
1541 * 2000 *
1542 * The main routine that builds scatter gather table from a given 2001 * The main routine that builds scatter gather table from a given
@@ -1546,9 +2005,9 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
1546 */ 2005 */
1547static int 2006static int
1548_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, 2007_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
1549 struct scsi_cmnd *scmd, u16 smid) 2008 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
1550{ 2009{
1551 Mpi2SCSIIORequest_t *mpi_request; 2010 Mpi25SCSIIORequest_t *mpi_request;
1552 dma_addr_t chain_dma; 2011 dma_addr_t chain_dma;
1553 struct scatterlist *sg_scmd; 2012 struct scatterlist *sg_scmd;
1554 void *sg_local, *chain; 2013 void *sg_local, *chain;
@@ -1571,6 +2030,13 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
1571 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2030 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1572 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 2031 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1573 2032
2033 /* Check if we need to build a native SG list. */
2034 if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2035 smid, scmd, pcie_device) == 0)) {
2036 /* We built a native SG list, just return. */
2037 return 0;
2038 }
2039
1574 sg_scmd = scsi_sglist(scmd); 2040 sg_scmd = scsi_sglist(scmd);
1575 sges_left = scsi_dma_map(scmd); 2041 sges_left = scsi_dma_map(scmd);
1576 if (sges_left < 0) { 2042 if (sges_left < 0) {
@@ -1582,12 +2048,12 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
1582 2048
1583 sg_local = &mpi_request->SGL; 2049 sg_local = &mpi_request->SGL;
1584 sges_in_segment = (ioc->request_sz - 2050 sges_in_segment = (ioc->request_sz -
1585 offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee; 2051 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
1586 if (sges_left <= sges_in_segment) 2052 if (sges_left <= sges_in_segment)
1587 goto fill_in_last_segment; 2053 goto fill_in_last_segment;
1588 2054
1589 mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) + 2055 mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
1590 (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee); 2056 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
1591 2057
1592 /* fill in main message segment when there is a chain following */ 2058 /* fill in main message segment when there is a chain following */
1593 while (sges_in_segment > 1) { 2059 while (sges_in_segment > 1) {
@@ -1990,7 +2456,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1990 ioc->cpu_count, max_msix_vectors); 2456 ioc->cpu_count, max_msix_vectors);
1991 2457
1992 if (!ioc->rdpq_array_enable && max_msix_vectors == -1) 2458 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
1993 local_max_msix_vectors = 8; 2459 local_max_msix_vectors = (reset_devices) ? 1 : 8;
1994 else 2460 else
1995 local_max_msix_vectors = max_msix_vectors; 2461 local_max_msix_vectors = max_msix_vectors;
1996 2462
@@ -2267,6 +2733,32 @@ mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2267} 2733}
2268 2734
2269/** 2735/**
2736 * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
2737 * @ioc: per adapter object
2738 * @smid: system request message index
2739 *
2740 * Returns virt pointer to a PCIe SGL.
2741 */
2742void *
2743mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2744{
2745 return (void *)(ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl);
2746}
2747
2748/**
2749 * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
2750 * @ioc: per adapter object
2751 * @smid: system request message index
2752 *
2753 * Returns phys pointer to the address of the PCIe buffer.
2754 */
2755dma_addr_t
2756mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2757{
2758 return ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl_dma;
2759}
2760
2761/**
2270 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address 2762 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
2271 * @ioc: per adapter object 2763 * @ioc: per adapter object
2272 * @phys_addr: lower 32 physical addr of the reply 2764 * @phys_addr: lower 32 physical addr of the reply
@@ -2544,6 +3036,30 @@ _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2544} 3036}
2545 3037
2546/** 3038/**
3039 * _base_put_smid_nvme_encap - send NVMe encapsulated request to
3040 * firmware
3041 * @ioc: per adapter object
3042 * @smid: system request message index
3043 *
3044 * Return nothing.
3045 */
3046static void
3047_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3048{
3049 Mpi2RequestDescriptorUnion_t descriptor;
3050 u64 *request = (u64 *)&descriptor;
3051
3052 descriptor.Default.RequestFlags =
3053 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
3054 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
3055 descriptor.Default.SMID = cpu_to_le16(smid);
3056 descriptor.Default.LMID = 0;
3057 descriptor.Default.DescriptorTypeDependent = 0;
3058 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3059 &ioc->scsi_lookup_lock);
3060}
3061
3062/**
2547 * _base_put_smid_default - Default, primarily used for config pages 3063 * _base_put_smid_default - Default, primarily used for config pages
2548 * @ioc: per adapter object 3064 * @ioc: per adapter object
2549 * @smid: system request message index 3065 * @smid: system request message index
@@ -2634,6 +3150,27 @@ _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2634} 3150}
2635 3151
2636/** 3152/**
3153 * _base_put_smid_nvme_encap_atomic - send NVMe encapsulated request to
3154 * firmware using Atomic Request Descriptor
3155 * @ioc: per adapter object
3156 * @smid: system request message index
3157 *
3158 * Return nothing.
3159 */
3160static void
3161_base_put_smid_nvme_encap_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3162{
3163 Mpi26AtomicRequestDescriptor_t descriptor;
3164 u32 *request = (u32 *)&descriptor;
3165
3166 descriptor.RequestFlags = MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
3167 descriptor.MSIxIndex = _base_get_msix_index(ioc);
3168 descriptor.SMID = cpu_to_le16(smid);
3169
3170 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
3171}
3172
3173/**
2637 * _base_put_smid_default - Default, primarily used for config pages 3174 * _base_put_smid_default - Default, primarily used for config pages
2638 * use Atomic Request Descriptor 3175 * use Atomic Request Descriptor
2639 * @ioc: per adapter object 3176 * @ioc: per adapter object
@@ -2945,6 +3482,11 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
2945 3482
2946 _base_display_OEMs_branding(ioc); 3483 _base_display_OEMs_branding(ioc);
2947 3484
3485 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
3486 pr_info("%sNVMe", i ? "," : "");
3487 i++;
3488 }
3489
2948 pr_info(MPT3SAS_FMT "Protocol=(", ioc->name); 3490 pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
2949 3491
2950 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { 3492 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
@@ -3245,6 +3787,17 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
3245 kfree(ioc->reply_post); 3787 kfree(ioc->reply_post);
3246 } 3788 }
3247 3789
3790 if (ioc->pcie_sgl_dma_pool) {
3791 for (i = 0; i < ioc->scsiio_depth; i++) {
3792 if (ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl)
3793 pci_pool_free(ioc->pcie_sgl_dma_pool,
3794 ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl,
3795 ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
3796 }
3797 if (ioc->pcie_sgl_dma_pool)
3798 pci_pool_destroy(ioc->pcie_sgl_dma_pool);
3799 }
3800
3248 if (ioc->config_page) { 3801 if (ioc->config_page) {
3249 dexitprintk(ioc, pr_info(MPT3SAS_FMT 3802 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3250 "config_page(0x%p): free\n", ioc->name, 3803 "config_page(0x%p): free\n", ioc->name,
@@ -3286,7 +3839,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
3286 u16 chains_needed_per_io; 3839 u16 chains_needed_per_io;
3287 u32 sz, total_sz, reply_post_free_sz; 3840 u32 sz, total_sz, reply_post_free_sz;
3288 u32 retry_sz; 3841 u32 retry_sz;
3289 u16 max_request_credit; 3842 u16 max_request_credit, nvme_blocks_needed;
3290 unsigned short sg_tablesize; 3843 unsigned short sg_tablesize;
3291 u16 sge_size; 3844 u16 sge_size;
3292 int i; 3845 int i;
@@ -3308,6 +3861,11 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
3308 sg_tablesize = MPT3SAS_SG_DEPTH; 3861 sg_tablesize = MPT3SAS_SG_DEPTH;
3309 } 3862 }
3310 3863
3864 /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
3865 if (reset_devices)
3866 sg_tablesize = min_t(unsigned short, sg_tablesize,
3867 MPT_KDUMP_MIN_PHYS_SEGMENTS);
3868
3311 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS) 3869 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
3312 sg_tablesize = MPT_MIN_PHYS_SEGMENTS; 3870 sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
3313 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) { 3871 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
@@ -3340,7 +3898,10 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
3340 ioc->internal_depth, facts->RequestCredit); 3898 ioc->internal_depth, facts->RequestCredit);
3341 if (max_request_credit > MAX_HBA_QUEUE_DEPTH) 3899 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
3342 max_request_credit = MAX_HBA_QUEUE_DEPTH; 3900 max_request_credit = MAX_HBA_QUEUE_DEPTH;
3343 } else 3901 } else if (reset_devices)
3902 max_request_credit = min_t(u16, facts->RequestCredit,
3903 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
3904 else
3344 max_request_credit = min_t(u16, facts->RequestCredit, 3905 max_request_credit = min_t(u16, facts->RequestCredit,
3345 MAX_HBA_QUEUE_DEPTH); 3906 MAX_HBA_QUEUE_DEPTH);
3346 3907
@@ -3622,7 +4183,52 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
3622 "internal(0x%p): depth(%d), start smid(%d)\n", 4183 "internal(0x%p): depth(%d), start smid(%d)\n",
3623 ioc->name, ioc->internal, 4184 ioc->name, ioc->internal,
3624 ioc->internal_depth, ioc->internal_smid)); 4185 ioc->internal_depth, ioc->internal_smid));
4186 /*
4187 * The number of NVMe page sized blocks needed is:
4188 * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
4189 * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
4190 * that is placed in the main message frame. 8 is the size of each PRP
4191 * entry or PRP list pointer entry. 8 is subtracted from page_size
4192 * because of the PRP list pointer entry at the end of a page, so this
4193 * is not counted as a PRP entry. The 1 added page is a round up.
4194 *
4195 * To avoid allocation failures due to the amount of memory that could
4196 * be required for NVMe PRP's, only each set of NVMe blocks will be
4197 * contiguous, so a new set is allocated for each possible I/O.
4198 */
4199 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4200 nvme_blocks_needed =
4201 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
4202 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
4203 nvme_blocks_needed++;
4204
4205 sz = nvme_blocks_needed * ioc->page_size;
4206 ioc->pcie_sgl_dma_pool =
4207 pci_pool_create("PCIe SGL pool", ioc->pdev, sz, 16, 0);
4208 if (!ioc->pcie_sgl_dma_pool) {
4209 pr_info(MPT3SAS_FMT
4210 "PCIe SGL pool: pci_pool_create failed\n",
4211 ioc->name);
4212 goto out;
4213 }
4214 for (i = 0; i < ioc->scsiio_depth; i++) {
4215 ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl =
4216 pci_pool_alloc(ioc->pcie_sgl_dma_pool,
4217 GFP_KERNEL,
4218 &ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
4219 if (!ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl) {
4220 pr_info(MPT3SAS_FMT
4221 "PCIe SGL pool: pci_pool_alloc failed\n",
4222 ioc->name);
4223 goto out;
4224 }
4225 }
3625 4226
4227 dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
4228 "element_size(%d), pool_size(%d kB)\n", ioc->name,
4229 ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
4230 total_sz += sz * ioc->scsiio_depth;
4231 }
3626 /* sense buffers, 4 byte align */ 4232 /* sense buffers, 4 byte align */
3627 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; 4233 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
3628 ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4234 ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
@@ -4446,7 +5052,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
4446 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) 5052 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
4447 ioc->ir_firmware = 1; 5053 ioc->ir_firmware = 1;
4448 if ((facts->IOCCapabilities & 5054 if ((facts->IOCCapabilities &
4449 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE)) 5055 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
4450 ioc->rdpq_array_capable = 1; 5056 ioc->rdpq_array_capable = 1;
4451 if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) 5057 if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
4452 ioc->atomic_desc_capable = 1; 5058 ioc->atomic_desc_capable = 1;
@@ -4467,6 +5073,19 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
4467 le16_to_cpu(mpi_reply.HighPriorityCredit); 5073 le16_to_cpu(mpi_reply.HighPriorityCredit);
4468 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; 5074 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
4469 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); 5075 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
5076 facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
5077
5078 /*
5079 * Get the Page Size from IOC Facts. If it's 0, default to 4k.
5080 */
5081 ioc->page_size = 1 << facts->CurrentHostPageSize;
5082 if (ioc->page_size == 1) {
5083 pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting "
5084 "default host page size to 4k\n", ioc->name);
5085 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
5086 }
5087 dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n",
5088 ioc->name, facts->CurrentHostPageSize));
4470 5089
4471 dinitprintk(ioc, pr_info(MPT3SAS_FMT 5090 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4472 "hba queue depth(%d), max chains per io(%d)\n", 5091 "hba queue depth(%d), max chains per io(%d)\n",
@@ -4506,6 +5125,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
4506 mpi_request.VP_ID = 0; 5125 mpi_request.VP_ID = 0;
4507 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged); 5126 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
4508 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 5127 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
5128 mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
4509 5129
4510 if (_base_is_controller_msix_enabled(ioc)) 5130 if (_base_is_controller_msix_enabled(ioc))
4511 mpi_request.HostMSIxVectors = ioc->reply_queue_count; 5131 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
@@ -5374,6 +5994,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5374 */ 5994 */
5375 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee; 5995 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
5376 ioc->build_sg = &_base_build_sg_ieee; 5996 ioc->build_sg = &_base_build_sg_ieee;
5997 ioc->build_nvme_prp = &_base_build_nvme_prp;
5377 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; 5998 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
5378 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); 5999 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
5379 6000
@@ -5385,11 +6006,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5385 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic; 6006 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
5386 ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic; 6007 ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic;
5387 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic; 6008 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic;
6009 ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap_atomic;
5388 } else { 6010 } else {
5389 ioc->put_smid_default = &_base_put_smid_default; 6011 ioc->put_smid_default = &_base_put_smid_default;
5390 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io; 6012 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
5391 ioc->put_smid_fast_path = &_base_put_smid_fast_path; 6013 ioc->put_smid_fast_path = &_base_put_smid_fast_path;
5392 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority; 6014 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
6015 ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap;
5393 } 6016 }
5394 6017
5395 6018
@@ -5517,9 +6140,16 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5517 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); 6140 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
5518 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); 6141 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
5519 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); 6142 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
5520 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) 6143 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
5521 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION); 6144 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
5522 6145 if (ioc->is_gen35_ioc) {
6146 _base_unmask_events(ioc,
6147 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
6148 _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
6149 _base_unmask_events(ioc,
6150 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
6151 }
6152 }
5523 r = _base_make_ioc_operational(ioc); 6153 r = _base_make_ioc_operational(ioc);
5524 if (r) 6154 if (r)
5525 goto out_free_resources; 6155 goto out_free_resources;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index a77bb7dc12b1..60f42ca3954f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -54,6 +54,7 @@
54#include "mpi/mpi2_raid.h" 54#include "mpi/mpi2_raid.h"
55#include "mpi/mpi2_tool.h" 55#include "mpi/mpi2_tool.h"
56#include "mpi/mpi2_sas.h" 56#include "mpi/mpi2_sas.h"
57#include "mpi/mpi2_pci.h"
57 58
58#include <scsi/scsi.h> 59#include <scsi/scsi.h>
59#include <scsi/scsi_cmnd.h> 60#include <scsi/scsi_cmnd.h>
@@ -73,8 +74,8 @@
73#define MPT3SAS_DRIVER_NAME "mpt3sas" 74#define MPT3SAS_DRIVER_NAME "mpt3sas"
74#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" 75#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
75#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" 76#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
76#define MPT3SAS_DRIVER_VERSION "15.100.00.00" 77#define MPT3SAS_DRIVER_VERSION "17.100.00.00"
77#define MPT3SAS_MAJOR_VERSION 15 78#define MPT3SAS_MAJOR_VERSION 17
78#define MPT3SAS_MINOR_VERSION 100 79#define MPT3SAS_MINOR_VERSION 100
79#define MPT3SAS_BUILD_VERSION 0 80#define MPT3SAS_BUILD_VERSION 0
80#define MPT3SAS_RELEASE_VERSION 00 81#define MPT3SAS_RELEASE_VERSION 00
@@ -92,6 +93,7 @@
92 */ 93 */
93#define MPT_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE 94#define MPT_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE
94#define MPT_MIN_PHYS_SEGMENTS 16 95#define MPT_MIN_PHYS_SEGMENTS 16
96#define MPT_KDUMP_MIN_PHYS_SEGMENTS 32
95 97
96#ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE 98#ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE
97#define MPT3SAS_SG_DEPTH CONFIG_SCSI_MPT3SAS_MAX_SGE 99#define MPT3SAS_SG_DEPTH CONFIG_SCSI_MPT3SAS_MAX_SGE
@@ -111,9 +113,11 @@
111#define MPT3SAS_SATA_QUEUE_DEPTH 32 113#define MPT3SAS_SATA_QUEUE_DEPTH 32
112#define MPT3SAS_SAS_QUEUE_DEPTH 254 114#define MPT3SAS_SAS_QUEUE_DEPTH 254
113#define MPT3SAS_RAID_QUEUE_DEPTH 128 115#define MPT3SAS_RAID_QUEUE_DEPTH 128
116#define MPT3SAS_KDUMP_SCSI_IO_DEPTH 200
114 117
115#define MPT3SAS_RAID_MAX_SECTORS 8192 118#define MPT3SAS_RAID_MAX_SECTORS 8192
116 119#define MPT3SAS_HOST_PAGE_SIZE_4K 12
120#define MPT3SAS_NVME_QUEUE_DEPTH 128
117#define MPT_NAME_LENGTH 32 /* generic length of strings */ 121#define MPT_NAME_LENGTH 32 /* generic length of strings */
118#define MPT_STRING_LENGTH 64 122#define MPT_STRING_LENGTH 64
119 123
@@ -131,6 +135,15 @@
131#define DEFAULT_NUM_FWCHAIN_ELEMTS 8 135#define DEFAULT_NUM_FWCHAIN_ELEMTS 8
132 136
133/* 137/*
138 * NVMe defines
139 */
140#define NVME_PRP_SIZE 8 /* PRP size */
141#define NVME_CMD_PRP1_OFFSET 24 /* PRP1 offset in NVMe cmd */
142#define NVME_CMD_PRP2_OFFSET 32 /* PRP2 offset in NVMe cmd */
143#define NVME_ERROR_RESPONSE_SIZE 16 /* Max NVME Error Response */
144#define NVME_PRP_PAGE_SIZE 4096 /* Page size */
145
146/*
134 * reset phases 147 * reset phases
135 */ 148 */
136#define MPT3_IOC_PRE_RESET 1 /* prior to host reset */ 149#define MPT3_IOC_PRE_RESET 1 /* prior to host reset */
@@ -159,6 +172,7 @@
159#define MPT_TARGET_FLAGS_VOLUME 0x02 172#define MPT_TARGET_FLAGS_VOLUME 0x02
160#define MPT_TARGET_FLAGS_DELETED 0x04 173#define MPT_TARGET_FLAGS_DELETED 0x04
161#define MPT_TARGET_FASTPATH_IO 0x08 174#define MPT_TARGET_FASTPATH_IO 0x08
175#define MPT_TARGET_FLAGS_PCIE_DEVICE 0x10
162 176
163#define SAS2_PCI_DEVICE_B0_REVISION (0x01) 177#define SAS2_PCI_DEVICE_B0_REVISION (0x01)
164#define SAS3_PCI_DEVICE_C0_REVISION (0x02) 178#define SAS3_PCI_DEVICE_C0_REVISION (0x02)
@@ -357,7 +371,8 @@ struct Mpi2ManufacturingPage11_t {
357 * @flags: MPT_TARGET_FLAGS_XXX flags 371 * @flags: MPT_TARGET_FLAGS_XXX flags
358 * @deleted: target flaged for deletion 372 * @deleted: target flaged for deletion
359 * @tm_busy: target is busy with TM request. 373 * @tm_busy: target is busy with TM request.
360 * @sdev: The sas_device associated with this target 374 * @sas_dev: The sas_device associated with this target
375 * @pcie_dev: The pcie device associated with this target
361 */ 376 */
362struct MPT3SAS_TARGET { 377struct MPT3SAS_TARGET {
363 struct scsi_target *starget; 378 struct scsi_target *starget;
@@ -368,7 +383,8 @@ struct MPT3SAS_TARGET {
368 u32 flags; 383 u32 flags;
369 u8 deleted; 384 u8 deleted;
370 u8 tm_busy; 385 u8 tm_busy;
371 struct _sas_device *sdev; 386 struct _sas_device *sas_dev;
387 struct _pcie_device *pcie_dev;
372}; 388};
373 389
374 390
@@ -467,6 +483,8 @@ struct _internal_cmd {
467 * @pfa_led_on: flag for PFA LED status 483 * @pfa_led_on: flag for PFA LED status
468 * @pend_sas_rphy_add: flag to check if device is in sas_rphy_add() 484 * @pend_sas_rphy_add: flag to check if device is in sas_rphy_add()
469 * addition routine. 485 * addition routine.
486 * @chassis_slot: chassis slot
487 * @is_chassis_slot_valid: chassis slot valid or not
470 */ 488 */
471struct _sas_device { 489struct _sas_device {
472 struct list_head list; 490 struct list_head list;
@@ -489,6 +507,8 @@ struct _sas_device {
489 u8 pfa_led_on; 507 u8 pfa_led_on;
490 u8 pend_sas_rphy_add; 508 u8 pend_sas_rphy_add;
491 u8 enclosure_level; 509 u8 enclosure_level;
510 u8 chassis_slot;
511 u8 is_chassis_slot_valid;
492 u8 connector_name[5]; 512 u8 connector_name[5];
493 struct kref refcount; 513 struct kref refcount;
494}; 514};
@@ -508,6 +528,89 @@ static inline void sas_device_put(struct _sas_device *s)
508 kref_put(&s->refcount, sas_device_free); 528 kref_put(&s->refcount, sas_device_free);
509} 529}
510 530
531/*
532 * struct _pcie_device - attached PCIe device information
533 * @list: pcie device list
534 * @starget: starget object
535 * @wwid: device WWID
536 * @handle: device handle
537 * @device_info: bitfield provides detailed info about the device
538 * @id: target id
539 * @channel: target channel
540 * @slot: slot number
541 * @port_num: port number
542 * @responding: used in _scsih_pcie_device_mark_responding
543 * @fast_path: fast path feature enable bit
544 * @nvme_mdts: MaximumDataTransferSize from PCIe Device Page 2 for
545 * NVMe device only
546 * @enclosure_handle: enclosure handle
547 * @enclosure_logical_id: enclosure logical identifier
548 * @enclosure_level: The level of device's enclosure from the controller
549 * @connector_name: ASCII value of the Connector's name
550 * @serial_number: pointer of serial number string allocated runtime
551 * @refcount: reference count for deletion
552 */
553struct _pcie_device {
554 struct list_head list;
555 struct scsi_target *starget;
556 u64 wwid;
557 u16 handle;
558 u32 device_info;
559 int id;
560 int channel;
561 u16 slot;
562 u8 port_num;
563 u8 responding;
564 u8 fast_path;
565 u32 nvme_mdts;
566 u16 enclosure_handle;
567 u64 enclosure_logical_id;
568 u8 enclosure_level;
569 u8 connector_name[4];
570 u8 *serial_number;
571 struct kref refcount;
572};
573/**
574 * pcie_device_get - Increment the pcie device reference count
575 *
576 * @p: pcie_device object
577 *
578 * When ever this function called it will increment the
579 * reference count of the pcie device for which this function called.
580 *
581 */
582static inline void pcie_device_get(struct _pcie_device *p)
583{
584 kref_get(&p->refcount);
585}
586
587/**
588 * pcie_device_free - Release the pcie device object
589 * @r - kref object
590 *
591 * Free's the pcie device object. It will be called when reference count
592 * reaches to zero.
593 */
594static inline void pcie_device_free(struct kref *r)
595{
596 kfree(container_of(r, struct _pcie_device, refcount));
597}
598
599/**
600 * pcie_device_put - Decrement the pcie device reference count
601 *
602 * @p: pcie_device object
603 *
604 * When ever this function called it will decrement the
605 * reference count of the pcie device for which this function called.
606 *
607 * When refernce count reaches to Zero, this will call pcie_device_free to the
608 * pcie_device object.
609 */
610static inline void pcie_device_put(struct _pcie_device *p)
611{
612 kref_put(&p->refcount, pcie_device_free);
613}
511/** 614/**
512 * struct _raid_device - raid volume link list 615 * struct _raid_device - raid volume link list
513 * @list: sas device list 616 * @list: sas device list
@@ -556,12 +659,13 @@ struct _raid_device {
556 659
557/** 660/**
558 * struct _boot_device - boot device info 661 * struct _boot_device - boot device info
559 * @is_raid: flag to indicate whether this is volume 662 *
560 * @device: holds pointer for either struct _sas_device or 663 * @channel: sas, raid, or pcie channel
561 * struct _raid_device 664 * @device: holds pointer for struct _sas_device, struct _raid_device or
665 * struct _pcie_device
562 */ 666 */
563struct _boot_device { 667struct _boot_device {
564 u8 is_raid; 668 int channel;
565 void *device; 669 void *device;
566}; 670};
567 671
@@ -644,6 +748,16 @@ enum reset_type {
644}; 748};
645 749
646/** 750/**
751 * struct pcie_sg_list - PCIe SGL buffer (contiguous per I/O)
752 * @pcie_sgl: PCIe native SGL for NVMe devices
753 * @pcie_sgl_dma: physical address
754 */
755struct pcie_sg_list {
756 void *pcie_sgl;
757 dma_addr_t pcie_sgl_dma;
758};
759
760/**
647 * struct chain_tracker - firmware chain tracker 761 * struct chain_tracker - firmware chain tracker
648 * @chain_buffer: chain buffer 762 * @chain_buffer: chain buffer
649 * @chain_buffer_dma: physical address 763 * @chain_buffer_dma: physical address
@@ -669,6 +783,7 @@ struct scsiio_tracker {
669 struct scsi_cmnd *scmd; 783 struct scsi_cmnd *scmd;
670 u8 cb_idx; 784 u8 cb_idx;
671 u8 direct_io; 785 u8 direct_io;
786 struct pcie_sg_list pcie_sg_list;
672 struct list_head chain_list; 787 struct list_head chain_list;
673 struct list_head tracker_list; 788 struct list_head tracker_list;
674 u16 msix_io; 789 u16 msix_io;
@@ -742,13 +857,19 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
742 857
743/* SAS3.0 support */ 858/* SAS3.0 support */
744typedef int (*MPT_BUILD_SG_SCMD)(struct MPT3SAS_ADAPTER *ioc, 859typedef int (*MPT_BUILD_SG_SCMD)(struct MPT3SAS_ADAPTER *ioc,
745 struct scsi_cmnd *scmd, u16 smid); 860 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device);
746typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge, 861typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge,
747 dma_addr_t data_out_dma, size_t data_out_sz, 862 dma_addr_t data_out_dma, size_t data_out_sz,
748 dma_addr_t data_in_dma, size_t data_in_sz); 863 dma_addr_t data_in_dma, size_t data_in_sz);
749typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc, 864typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc,
750 void *paddr); 865 void *paddr);
751 866
867/* SAS3.5 support */
868typedef void (*NVME_BUILD_PRP)(struct MPT3SAS_ADAPTER *ioc, u16 smid,
869 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
870 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
871 size_t data_in_sz);
872
752/* To support atomic and non atomic descriptors*/ 873/* To support atomic and non atomic descriptors*/
753typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid, 874typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
754 u16 funcdep); 875 u16 funcdep);
@@ -791,6 +912,7 @@ struct mpt3sas_facts {
791 u16 MaxDevHandle; 912 u16 MaxDevHandle;
792 u16 MaxPersistentEntries; 913 u16 MaxPersistentEntries;
793 u16 MinDevHandle; 914 u16 MinDevHandle;
915 u8 CurrentHostPageSize;
794}; 916};
795 917
796struct mpt3sas_port_facts { 918struct mpt3sas_port_facts {
@@ -825,6 +947,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
825 * @bars: bitmask of BAR's that must be configured 947 * @bars: bitmask of BAR's that must be configured
826 * @mask_interrupts: ignore interrupt 948 * @mask_interrupts: ignore interrupt
827 * @dma_mask: used to set the consistent dma mask 949 * @dma_mask: used to set the consistent dma mask
950 * @pci_access_mutex: Mutex to synchronize ioctl, sysfs show path and
951 * pci resource handling
828 * @fault_reset_work_q_name: fw fault work queue 952 * @fault_reset_work_q_name: fw fault work queue
829 * @fault_reset_work_q: "" 953 * @fault_reset_work_q: ""
830 * @fault_reset_work: "" 954 * @fault_reset_work: ""
@@ -888,9 +1012,13 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
888 * @sas_device_list: sas device object list 1012 * @sas_device_list: sas device object list
889 * @sas_device_init_list: sas device object list (used only at init time) 1013 * @sas_device_init_list: sas device object list (used only at init time)
890 * @sas_device_lock: 1014 * @sas_device_lock:
1015 * @pcie_device_list: pcie device object list
1016 * @pcie_device_init_list: pcie device object list (used only at init time)
1017 * @pcie_device_lock:
891 * @io_missing_delay: time for IO completed by fw when PDR enabled 1018 * @io_missing_delay: time for IO completed by fw when PDR enabled
892 * @device_missing_delay: time for device missing by fw when PDR enabled 1019 * @device_missing_delay: time for device missing by fw when PDR enabled
893 * @sas_id : used for setting volume target IDs 1020 * @sas_id : used for setting volume target IDs
1021 * @pcie_target_id: used for setting pcie target IDs
894 * @blocking_handles: bitmask used to identify which devices need blocking 1022 * @blocking_handles: bitmask used to identify which devices need blocking
895 * @pd_handles : bitmask for PD handles 1023 * @pd_handles : bitmask for PD handles
896 * @pd_handles_sz : size of pd_handle bitmask 1024 * @pd_handles_sz : size of pd_handle bitmask
@@ -1056,6 +1184,9 @@ struct MPT3SAS_ADAPTER {
1056 MPT_BUILD_SG build_sg_mpi; 1184 MPT_BUILD_SG build_sg_mpi;
1057 MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi; 1185 MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi;
1058 1186
1187 /* function ptr for NVMe PRP elements only */
1188 NVME_BUILD_PRP build_nvme_prp;
1189
1059 /* event log */ 1190 /* event log */
1060 u32 event_type[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; 1191 u32 event_type[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
1061 u32 event_context; 1192 u32 event_context;
@@ -1086,11 +1217,16 @@ struct MPT3SAS_ADAPTER {
1086 struct list_head sas_device_list; 1217 struct list_head sas_device_list;
1087 struct list_head sas_device_init_list; 1218 struct list_head sas_device_init_list;
1088 spinlock_t sas_device_lock; 1219 spinlock_t sas_device_lock;
1220 struct list_head pcie_device_list;
1221 struct list_head pcie_device_init_list;
1222 spinlock_t pcie_device_lock;
1223
1089 struct list_head raid_device_list; 1224 struct list_head raid_device_list;
1090 spinlock_t raid_device_lock; 1225 spinlock_t raid_device_lock;
1091 u8 io_missing_delay; 1226 u8 io_missing_delay;
1092 u16 device_missing_delay; 1227 u16 device_missing_delay;
1093 int sas_id; 1228 int sas_id;
1229 int pcie_target_id;
1094 1230
1095 void *blocking_handles; 1231 void *blocking_handles;
1096 void *pd_handles; 1232 void *pd_handles;
@@ -1119,6 +1255,11 @@ struct MPT3SAS_ADAPTER {
1119 int pending_io_count; 1255 int pending_io_count;
1120 wait_queue_head_t reset_wq; 1256 wait_queue_head_t reset_wq;
1121 1257
1258 /* PCIe SGL */
1259 struct dma_pool *pcie_sgl_dma_pool;
1260 /* Host Page Size */
1261 u32 page_size;
1262
1122 /* chain */ 1263 /* chain */
1123 struct chain_tracker *chain_lookup; 1264 struct chain_tracker *chain_lookup;
1124 struct list_head free_chain_list; 1265 struct list_head free_chain_list;
@@ -1216,6 +1357,7 @@ struct MPT3SAS_ADAPTER {
1216 PUT_SMID_IO_FP_HIP put_smid_fast_path; 1357 PUT_SMID_IO_FP_HIP put_smid_fast_path;
1217 PUT_SMID_IO_FP_HIP put_smid_hi_priority; 1358 PUT_SMID_IO_FP_HIP put_smid_hi_priority;
1218 PUT_SMID_DEFAULT put_smid_default; 1359 PUT_SMID_DEFAULT put_smid_default;
1360 PUT_SMID_DEFAULT put_smid_nvme_encap;
1219 1361
1220}; 1362};
1221 1363
@@ -1252,7 +1394,8 @@ void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid);
1252void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid); 1394void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid);
1253__le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, 1395__le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc,
1254 u16 smid); 1396 u16 smid);
1255 1397void *mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid);
1398dma_addr_t mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid);
1256void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc); 1399void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
1257 1400
1258/* hi-priority queue */ 1401/* hi-priority queue */
@@ -1321,6 +1464,10 @@ struct _sas_device *mpt3sas_get_sdev_by_addr(
1321 struct MPT3SAS_ADAPTER *ioc, u64 sas_address); 1464 struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
1322struct _sas_device *__mpt3sas_get_sdev_by_addr( 1465struct _sas_device *__mpt3sas_get_sdev_by_addr(
1323 struct MPT3SAS_ADAPTER *ioc, u64 sas_address); 1466 struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
1467struct _sas_device *mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc,
1468 u16 handle);
1469struct _pcie_device *mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc,
1470 u16 handle);
1324 1471
1325void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc); 1472void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc);
1326struct _raid_device * 1473struct _raid_device *
@@ -1359,6 +1506,12 @@ int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
1359int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, 1506int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
1360 Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page, 1507 Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
1361 u32 form, u32 handle); 1508 u32 form, u32 handle);
1509int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
1510 Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page,
1511 u32 form, u32 handle);
1512int mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc,
1513 Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page,
1514 u32 form, u32 handle);
1362int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, 1515int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
1363 Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, 1516 Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page,
1364 u16 sz); 1517 u16 sz);
@@ -1466,7 +1619,7 @@ void
1466mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io); 1619mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io);
1467void 1620void
1468mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 1621mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
1469 struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request, 1622 struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
1470 u16 smid); 1623 u16 smid);
1471 1624
1472/* NCQ Prio Handling Check */ 1625/* NCQ Prio Handling Check */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index dd6270125614..1c747cf419d5 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -150,6 +150,24 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
150 case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING: 150 case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING:
151 desc = "driver_mapping"; 151 desc = "driver_mapping";
152 break; 152 break;
153 case MPI2_CONFIG_EXTPAGETYPE_SAS_PORT:
154 desc = "sas_port";
155 break;
156 case MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING:
157 desc = "ext_manufacturing";
158 break;
159 case MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT:
160 desc = "pcie_io_unit";
161 break;
162 case MPI2_CONFIG_EXTPAGETYPE_PCIE_SWITCH:
163 desc = "pcie_switch";
164 break;
165 case MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE:
166 desc = "pcie_device";
167 break;
168 case MPI2_CONFIG_EXTPAGETYPE_PCIE_LINK:
169 desc = "pcie_link";
170 break;
153 } 171 }
154 break; 172 break;
155 } 173 }
@@ -1053,6 +1071,88 @@ mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
1053} 1071}
1054 1072
1055/** 1073/**
1074 * mpt3sas_config_get_pcie_device_pg0 - obtain pcie device page 0
1075 * @ioc: per adapter object
1076 * @mpi_reply: reply mf payload returned from firmware
1077 * @config_page: contents of the config page
1078 * @form: GET_NEXT_HANDLE or HANDLE
1079 * @handle: device handle
1080 * Context: sleep.
1081 *
1082 * Returns 0 for success, non-zero for failure.
1083 */
1084int
1085mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
1086 Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page,
1087 u32 form, u32 handle)
1088{
1089 Mpi2ConfigRequest_t mpi_request;
1090 int r;
1091
1092 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1093 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1094 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1095 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
1096 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE;
1097 mpi_request.Header.PageVersion = MPI26_PCIEDEVICE0_PAGEVERSION;
1098 mpi_request.Header.PageNumber = 0;
1099 ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
1100 r = _config_request(ioc, &mpi_request, mpi_reply,
1101 MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
1102 if (r)
1103 goto out;
1104
1105 mpi_request.PageAddress = cpu_to_le32(form | handle);
1106 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1107 r = _config_request(ioc, &mpi_request, mpi_reply,
1108 MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
1109 sizeof(*config_page));
1110out:
1111 return r;
1112}
1113
1114/**
1115 * mpt3sas_config_get_pcie_device_pg2 - obtain pcie device page 2
1116 * @ioc: per adapter object
1117 * @mpi_reply: reply mf payload returned from firmware
1118 * @config_page: contents of the config page
1119 * @form: GET_NEXT_HANDLE or HANDLE
1120 * @handle: device handle
1121 * Context: sleep.
1122 *
1123 * Returns 0 for success, non-zero for failure.
1124 */
1125int
1126mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc,
1127 Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page,
1128 u32 form, u32 handle)
1129{
1130 Mpi2ConfigRequest_t mpi_request;
1131 int r;
1132
1133 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1134 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1135 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1136 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
1137 mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE;
1138 mpi_request.Header.PageVersion = MPI26_PCIEDEVICE2_PAGEVERSION;
1139 mpi_request.Header.PageNumber = 2;
1140 ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
1141 r = _config_request(ioc, &mpi_request, mpi_reply,
1142 MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
1143 if (r)
1144 goto out;
1145
1146 mpi_request.PageAddress = cpu_to_le32(form | handle);
1147 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
1148 r = _config_request(ioc, &mpi_request, mpi_reply,
1149 MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
1150 sizeof(*config_page));
1151out:
1152 return r;
1153}
1154
1155/**
1056 * mpt3sas_config_get_number_hba_phys - obtain number of phys on the host 1156 * mpt3sas_config_get_number_hba_phys - obtain number of phys on the host
1057 * @ioc: per adapter object 1157 * @ioc: per adapter object
1058 * @num_phys: pointer returned with the number of phys 1158 * @num_phys: pointer returned with the number of phys
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index bdffb692bded..b4c374b08e5e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -79,32 +79,6 @@ enum block_state {
79}; 79};
80 80
81/** 81/**
82 * _ctl_sas_device_find_by_handle - sas device search
83 * @ioc: per adapter object
84 * @handle: sas device handle (assigned by firmware)
85 * Context: Calling function should acquire ioc->sas_device_lock
86 *
87 * This searches for sas_device based on sas_address, then return sas_device
88 * object.
89 */
90static struct _sas_device *
91_ctl_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
92{
93 struct _sas_device *sas_device, *r;
94
95 r = NULL;
96 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
97 if (sas_device->handle != handle)
98 continue;
99 r = sas_device;
100 goto out;
101 }
102
103 out:
104 return r;
105}
106
107/**
108 * _ctl_display_some_debug - debug routine 82 * _ctl_display_some_debug - debug routine
109 * @ioc: per adapter object 83 * @ioc: per adapter object
110 * @smid: system request message index 84 * @smid: system request message index
@@ -229,10 +203,9 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
229 Mpi2SCSIIOReply_t *scsi_reply = 203 Mpi2SCSIIOReply_t *scsi_reply =
230 (Mpi2SCSIIOReply_t *)mpi_reply; 204 (Mpi2SCSIIOReply_t *)mpi_reply;
231 struct _sas_device *sas_device = NULL; 205 struct _sas_device *sas_device = NULL;
232 unsigned long flags; 206 struct _pcie_device *pcie_device = NULL;
233 207
234 spin_lock_irqsave(&ioc->sas_device_lock, flags); 208 sas_device = mpt3sas_get_sdev_by_handle(ioc,
235 sas_device = _ctl_sas_device_find_by_handle(ioc,
236 le16_to_cpu(scsi_reply->DevHandle)); 209 le16_to_cpu(scsi_reply->DevHandle));
237 if (sas_device) { 210 if (sas_device) {
238 pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n", 211 pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n",
@@ -242,8 +215,25 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
242 "\tenclosure_logical_id(0x%016llx), slot(%d)\n", 215 "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
243 ioc->name, (unsigned long long) 216 ioc->name, (unsigned long long)
244 sas_device->enclosure_logical_id, sas_device->slot); 217 sas_device->enclosure_logical_id, sas_device->slot);
218 sas_device_put(sas_device);
219 }
220 if (!sas_device) {
221 pcie_device = mpt3sas_get_pdev_by_handle(ioc,
222 le16_to_cpu(scsi_reply->DevHandle));
223 if (pcie_device) {
224 pr_warn(MPT3SAS_FMT
225 "\tWWID(0x%016llx), port(%d)\n", ioc->name,
226 (unsigned long long)pcie_device->wwid,
227 pcie_device->port_num);
228 if (pcie_device->enclosure_handle != 0)
229 pr_warn(MPT3SAS_FMT
230 "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
231 ioc->name, (unsigned long long)
232 pcie_device->enclosure_logical_id,
233 pcie_device->slot);
234 pcie_device_put(pcie_device);
235 }
245 } 236 }
246 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
247 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) 237 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
248 pr_info(MPT3SAS_FMT 238 pr_info(MPT3SAS_FMT
249 "\tscsi_state(0x%02x), scsi_status" 239 "\tscsi_state(0x%02x), scsi_status"
@@ -272,6 +262,7 @@ mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
272{ 262{
273 MPI2DefaultReply_t *mpi_reply; 263 MPI2DefaultReply_t *mpi_reply;
274 Mpi2SCSIIOReply_t *scsiio_reply; 264 Mpi2SCSIIOReply_t *scsiio_reply;
265 Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply;
275 const void *sense_data; 266 const void *sense_data;
276 u32 sz; 267 u32 sz;
277 268
@@ -298,7 +289,20 @@ mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
298 memcpy(ioc->ctl_cmds.sense, sense_data, sz); 289 memcpy(ioc->ctl_cmds.sense, sense_data, sz);
299 } 290 }
300 } 291 }
292 /*
293 * Get Error Response data for NVMe device. The ctl_cmds.sense
294 * buffer is used to store the Error Response data.
295 */
296 if (mpi_reply->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
297 nvme_error_reply =
298 (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply;
299 sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE,
300 le32_to_cpu(nvme_error_reply->ErrorResponseCount));
301 sense_data = mpt3sas_base_get_sense_buffer(ioc, smid);
302 memcpy(ioc->ctl_cmds.sense, sense_data, sz);
303 }
301 } 304 }
305
302 _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); 306 _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
303 ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING; 307 ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING;
304 complete(&ioc->ctl_cmds.done); 308 complete(&ioc->ctl_cmds.done);
@@ -640,11 +644,12 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
640{ 644{
641 MPI2RequestHeader_t *mpi_request = NULL, *request; 645 MPI2RequestHeader_t *mpi_request = NULL, *request;
642 MPI2DefaultReply_t *mpi_reply; 646 MPI2DefaultReply_t *mpi_reply;
647 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
643 u32 ioc_state; 648 u32 ioc_state;
644 u16 smid; 649 u16 smid;
645 unsigned long timeout; 650 unsigned long timeout;
646 u8 issue_reset; 651 u8 issue_reset;
647 u32 sz; 652 u32 sz, sz_arg;
648 void *psge; 653 void *psge;
649 void *data_out = NULL; 654 void *data_out = NULL;
650 dma_addr_t data_out_dma = 0; 655 dma_addr_t data_out_dma = 0;
@@ -741,7 +746,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
741 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 746 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
742 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 747 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
743 mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT || 748 mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
744 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH) { 749 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH ||
750 mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
745 751
746 device_handle = le16_to_cpu(mpi_request->FunctionDependent1); 752 device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
747 if (!device_handle || (device_handle > 753 if (!device_handle || (device_handle >
@@ -792,6 +798,38 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
792 798
793 init_completion(&ioc->ctl_cmds.done); 799 init_completion(&ioc->ctl_cmds.done);
794 switch (mpi_request->Function) { 800 switch (mpi_request->Function) {
801 case MPI2_FUNCTION_NVME_ENCAPSULATED:
802 {
803 nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
804 /*
805 * Get the Physical Address of the sense buffer.
806 * Use Error Response buffer address field to hold the sense
807 * buffer address.
808 * Clear the internal sense buffer, which will potentially hold
809 * the Completion Queue Entry on return, or 0 if no Entry.
810 * Build the PRPs and set direction bits.
811 * Send the request.
812 */
813 nvme_encap_request->ErrorResponseBaseAddress = ioc->sense_dma &
814 0xFFFFFFFF00000000;
815 nvme_encap_request->ErrorResponseBaseAddress |=
816 (U64)mpt3sas_base_get_sense_buffer_dma(ioc, smid);
817 nvme_encap_request->ErrorResponseAllocationLength =
818 NVME_ERROR_RESPONSE_SIZE;
819 memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE);
820 ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
821 data_out_dma, data_out_sz, data_in_dma, data_in_sz);
822 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
823 dtmprintk(ioc, pr_info(MPT3SAS_FMT "handle(0x%04x) :"
824 "ioctl failed due to device removal in progress\n",
825 ioc->name, device_handle));
826 mpt3sas_base_free_smid(ioc, smid);
827 ret = -EINVAL;
828 goto out;
829 }
830 ioc->put_smid_nvme_encap(ioc, smid);
831 break;
832 }
795 case MPI2_FUNCTION_SCSI_IO_REQUEST: 833 case MPI2_FUNCTION_SCSI_IO_REQUEST:
796 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 834 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
797 { 835 {
@@ -1007,15 +1045,25 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
1007 } 1045 }
1008 } 1046 }
1009 1047
1010 /* copy out sense to user */ 1048 /* copy out sense/NVMe Error Response to user */
1011 if (karg.max_sense_bytes && (mpi_request->Function == 1049 if (karg.max_sense_bytes && (mpi_request->Function ==
1012 MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == 1050 MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
1013 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { 1051 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
1014 sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE); 1052 MPI2_FUNCTION_NVME_ENCAPSULATED)) {
1053 if (karg.sense_data_ptr == NULL) {
1054 pr_info(MPT3SAS_FMT "Response buffer provided"
1055 " by application is NULL; Response data will"
1056 " not be returned.\n", ioc->name);
1057 goto out;
1058 }
1059 sz_arg = (mpi_request->Function ==
1060 MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE :
1061 SCSI_SENSE_BUFFERSIZE;
1062 sz = min_t(u32, karg.max_sense_bytes, sz_arg);
1015 if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, 1063 if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense,
1016 sz)) { 1064 sz)) {
1017 pr_err("failure at %s:%d/%s()!\n", __FILE__, 1065 pr_err("failure at %s:%d/%s()!\n", __FILE__,
1018 __LINE__, __func__); 1066 __LINE__, __func__);
1019 ret = -ENODATA; 1067 ret = -ENODATA;
1020 goto out; 1068 goto out;
1021 } 1069 }
@@ -1065,12 +1113,6 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1065{ 1113{
1066 struct mpt3_ioctl_iocinfo karg; 1114 struct mpt3_ioctl_iocinfo karg;
1067 1115
1068 if (copy_from_user(&karg, arg, sizeof(karg))) {
1069 pr_err("failure at %s:%d/%s()!\n",
1070 __FILE__, __LINE__, __func__);
1071 return -EFAULT;
1072 }
1073
1074 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1116 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1075 __func__)); 1117 __func__));
1076 1118
@@ -1295,6 +1337,42 @@ _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc,
1295} 1337}
1296 1338
1297/** 1339/**
1340 * _ctl_btdh_search_pcie_device - searching for pcie device
1341 * @ioc: per adapter object
1342 * @btdh: btdh ioctl payload
1343 */
1344static int
1345_ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc,
1346 struct mpt3_ioctl_btdh_mapping *btdh)
1347{
1348 struct _pcie_device *pcie_device;
1349 unsigned long flags;
1350 int rc = 0;
1351
1352 if (list_empty(&ioc->pcie_device_list))
1353 return rc;
1354
1355 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1356 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1357 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1358 btdh->handle == pcie_device->handle) {
1359 btdh->bus = pcie_device->channel;
1360 btdh->id = pcie_device->id;
1361 rc = 1;
1362 goto out;
1363 } else if (btdh->bus == pcie_device->channel && btdh->id ==
1364 pcie_device->id && btdh->handle == 0xFFFF) {
1365 btdh->handle = pcie_device->handle;
1366 rc = 1;
1367 goto out;
1368 }
1369 }
1370 out:
1371 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1372 return rc;
1373}
1374
1375/**
1298 * _ctl_btdh_search_raid_device - searching for raid device 1376 * _ctl_btdh_search_raid_device - searching for raid device
1299 * @ioc: per adapter object 1377 * @ioc: per adapter object
1300 * @btdh: btdh ioctl payload 1378 * @btdh: btdh ioctl payload
@@ -1352,6 +1430,8 @@ _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1352 1430
1353 rc = _ctl_btdh_search_sas_device(ioc, &karg); 1431 rc = _ctl_btdh_search_sas_device(ioc, &karg);
1354 if (!rc) 1432 if (!rc)
1433 rc = _ctl_btdh_search_pcie_device(ioc, &karg);
1434 if (!rc)
1355 _ctl_btdh_search_raid_device(ioc, &karg); 1435 _ctl_btdh_search_raid_device(ioc, &karg);
1356 1436
1357 if (copy_to_user(arg, &karg, sizeof(karg))) { 1437 if (copy_to_user(arg, &karg, sizeof(karg))) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 22998cbd538f..362f406a285e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -60,6 +60,9 @@
60#include "mpt3sas_base.h" 60#include "mpt3sas_base.h"
61 61
62#define RAID_CHANNEL 1 62#define RAID_CHANNEL 1
63
64#define PCIE_CHANNEL 2
65
63/* forward proto's */ 66/* forward proto's */
64static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, 67static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
65 struct _sas_node *sas_expander); 68 struct _sas_node *sas_expander);
@@ -69,7 +72,11 @@ static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
69 struct _sas_device *sas_device); 72 struct _sas_device *sas_device);
70static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, 73static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
71 u8 retry_count, u8 is_pd); 74 u8 retry_count, u8 is_pd);
72 75static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
76static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
77 struct _pcie_device *pcie_device);
78static void
79_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
73static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid); 80static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
74 81
75/* global parameters */ 82/* global parameters */
@@ -406,11 +413,6 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
406 413
407 *sas_address = 0; 414 *sas_address = 0;
408 415
409 if (handle <= ioc->sas_hba.num_phys) {
410 *sas_address = ioc->sas_hba.sas_address;
411 return 0;
412 }
413
414 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 416 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
415 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 417 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
416 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, 418 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
@@ -420,7 +422,15 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
420 422
421 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 423 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
422 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 424 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
423 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 425 /* For HBA, vSES doesn't return HBA SAS address. Instead return
426 * vSES's sas address.
427 */
428 if ((handle <= ioc->sas_hba.num_phys) &&
429 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
430 MPI2_SAS_DEVICE_INFO_SEP)))
431 *sas_address = ioc->sas_hba.sas_address;
432 else
433 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
424 return 0; 434 return 0;
425 } 435 }
426 436
@@ -439,21 +449,22 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
439/** 449/**
440 * _scsih_determine_boot_device - determine boot device. 450 * _scsih_determine_boot_device - determine boot device.
441 * @ioc: per adapter object 451 * @ioc: per adapter object
442 * @device: either sas_device or raid_device object 452 * @device: sas_device or pcie_device object
443 * @is_raid: [flag] 1 = raid object, 0 = sas object 453 * @channel: SAS or PCIe channel
444 * 454 *
445 * Determines whether this device should be first reported device to 455 * Determines whether this device should be first reported device to
446 * to scsi-ml or sas transport, this purpose is for persistent boot device. 456 * to scsi-ml or sas transport, this purpose is for persistent boot device.
447 * There are primary, alternate, and current entries in bios page 2. The order 457 * There are primary, alternate, and current entries in bios page 2. The order
448 * priority is primary, alternate, then current. This routine saves 458 * priority is primary, alternate, then current. This routine saves
449 * the corresponding device object and is_raid flag in the ioc object. 459 * the corresponding device object.
450 * The saved data to be used later in _scsih_probe_boot_devices(). 460 * The saved data to be used later in _scsih_probe_boot_devices().
451 */ 461 */
452static void 462static void
453_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, 463_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
454 void *device, u8 is_raid) 464 u32 channel)
455{ 465{
456 struct _sas_device *sas_device; 466 struct _sas_device *sas_device;
467 struct _pcie_device *pcie_device;
457 struct _raid_device *raid_device; 468 struct _raid_device *raid_device;
458 u64 sas_address; 469 u64 sas_address;
459 u64 device_name; 470 u64 device_name;
@@ -468,18 +479,24 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
468 if (!ioc->bios_pg3.BiosVersion) 479 if (!ioc->bios_pg3.BiosVersion)
469 return; 480 return;
470 481
471 if (!is_raid) { 482 if (channel == RAID_CHANNEL) {
472 sas_device = device;
473 sas_address = sas_device->sas_address;
474 device_name = sas_device->device_name;
475 enclosure_logical_id = sas_device->enclosure_logical_id;
476 slot = sas_device->slot;
477 } else {
478 raid_device = device; 483 raid_device = device;
479 sas_address = raid_device->wwid; 484 sas_address = raid_device->wwid;
480 device_name = 0; 485 device_name = 0;
481 enclosure_logical_id = 0; 486 enclosure_logical_id = 0;
482 slot = 0; 487 slot = 0;
488 } else if (channel == PCIE_CHANNEL) {
489 pcie_device = device;
490 sas_address = pcie_device->wwid;
491 device_name = 0;
492 enclosure_logical_id = 0;
493 slot = 0;
494 } else {
495 sas_device = device;
496 sas_address = sas_device->sas_address;
497 device_name = sas_device->device_name;
498 enclosure_logical_id = sas_device->enclosure_logical_id;
499 slot = sas_device->slot;
483 } 500 }
484 501
485 if (!ioc->req_boot_device.device) { 502 if (!ioc->req_boot_device.device) {
@@ -493,7 +510,7 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
493 ioc->name, __func__, 510 ioc->name, __func__,
494 (unsigned long long)sas_address)); 511 (unsigned long long)sas_address));
495 ioc->req_boot_device.device = device; 512 ioc->req_boot_device.device = device;
496 ioc->req_boot_device.is_raid = is_raid; 513 ioc->req_boot_device.channel = channel;
497 } 514 }
498 } 515 }
499 516
@@ -508,7 +525,7 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
508 ioc->name, __func__, 525 ioc->name, __func__,
509 (unsigned long long)sas_address)); 526 (unsigned long long)sas_address));
510 ioc->req_alt_boot_device.device = device; 527 ioc->req_alt_boot_device.device = device;
511 ioc->req_alt_boot_device.is_raid = is_raid; 528 ioc->req_alt_boot_device.channel = channel;
512 } 529 }
513 } 530 }
514 531
@@ -523,7 +540,7 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
523 ioc->name, __func__, 540 ioc->name, __func__,
524 (unsigned long long)sas_address)); 541 (unsigned long long)sas_address));
525 ioc->current_boot_device.device = device; 542 ioc->current_boot_device.device = device;
526 ioc->current_boot_device.is_raid = is_raid; 543 ioc->current_boot_device.channel = channel;
527 } 544 }
528 } 545 }
529} 546}
@@ -536,7 +553,7 @@ __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
536 553
537 assert_spin_locked(&ioc->sas_device_lock); 554 assert_spin_locked(&ioc->sas_device_lock);
538 555
539 ret = tgt_priv->sdev; 556 ret = tgt_priv->sas_dev;
540 if (ret) 557 if (ret)
541 sas_device_get(ret); 558 sas_device_get(ret);
542 559
@@ -557,6 +574,44 @@ mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
557 return ret; 574 return ret;
558} 575}
559 576
577static struct _pcie_device *
578__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
579 struct MPT3SAS_TARGET *tgt_priv)
580{
581 struct _pcie_device *ret;
582
583 assert_spin_locked(&ioc->pcie_device_lock);
584
585 ret = tgt_priv->pcie_dev;
586 if (ret)
587 pcie_device_get(ret);
588
589 return ret;
590}
591
592/**
593 * mpt3sas_get_pdev_from_target - pcie device search
594 * @ioc: per adapter object
595 * @tgt_priv: starget private object
596 *
597 * Context: This function will acquire ioc->pcie_device_lock and will release
598 * before returning the pcie_device object.
599 *
600 * This searches for pcie_device from target, then return pcie_device object.
601 */
602static struct _pcie_device *
603mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
604 struct MPT3SAS_TARGET *tgt_priv)
605{
606 struct _pcie_device *ret;
607 unsigned long flags;
608
609 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
610 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
611 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
612
613 return ret;
614}
560 615
561struct _sas_device * 616struct _sas_device *
562__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, 617__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
@@ -636,7 +691,7 @@ found_device:
636 * This searches for sas_device based on sas_address, then return sas_device 691 * This searches for sas_device based on sas_address, then return sas_device
637 * object. 692 * object.
638 */ 693 */
639static struct _sas_device * 694struct _sas_device *
640mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 695mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
641{ 696{
642 struct _sas_device *sas_device; 697 struct _sas_device *sas_device;
@@ -650,6 +705,69 @@ mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
650} 705}
651 706
652/** 707/**
708 * _scsih_display_enclosure_chassis_info - display device location info
709 * @ioc: per adapter object
710 * @sas_device: per sas device object
711 * @sdev: scsi device struct
712 * @starget: scsi target struct
713 *
714 * Returns nothing.
715 */
716static void
717_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
718 struct _sas_device *sas_device, struct scsi_device *sdev,
719 struct scsi_target *starget)
720{
721 if (sdev) {
722 if (sas_device->enclosure_handle != 0)
723 sdev_printk(KERN_INFO, sdev,
724 "enclosure logical id (0x%016llx), slot(%d) \n",
725 (unsigned long long)
726 sas_device->enclosure_logical_id,
727 sas_device->slot);
728 if (sas_device->connector_name[0] != '\0')
729 sdev_printk(KERN_INFO, sdev,
730 "enclosure level(0x%04x), connector name( %s)\n",
731 sas_device->enclosure_level,
732 sas_device->connector_name);
733 if (sas_device->is_chassis_slot_valid)
734 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
735 sas_device->chassis_slot);
736 } else if (starget) {
737 if (sas_device->enclosure_handle != 0)
738 starget_printk(KERN_INFO, starget,
739 "enclosure logical id(0x%016llx), slot(%d) \n",
740 (unsigned long long)
741 sas_device->enclosure_logical_id,
742 sas_device->slot);
743 if (sas_device->connector_name[0] != '\0')
744 starget_printk(KERN_INFO, starget,
745 "enclosure level(0x%04x), connector name( %s)\n",
746 sas_device->enclosure_level,
747 sas_device->connector_name);
748 if (sas_device->is_chassis_slot_valid)
749 starget_printk(KERN_INFO, starget,
750 "chassis slot(0x%04x)\n",
751 sas_device->chassis_slot);
752 } else {
753 if (sas_device->enclosure_handle != 0)
754 pr_info(MPT3SAS_FMT
755 "enclosure logical id(0x%016llx), slot(%d) \n",
756 ioc->name, (unsigned long long)
757 sas_device->enclosure_logical_id,
758 sas_device->slot);
759 if (sas_device->connector_name[0] != '\0')
760 pr_info(MPT3SAS_FMT
761 "enclosure level(0x%04x), connector name( %s)\n",
762 ioc->name, sas_device->enclosure_level,
763 sas_device->connector_name);
764 if (sas_device->is_chassis_slot_valid)
765 pr_info(MPT3SAS_FMT "chassis slot(0x%04x)\n",
766 ioc->name, sas_device->chassis_slot);
767 }
768}
769
770/**
653 * _scsih_sas_device_remove - remove sas_device from list. 771 * _scsih_sas_device_remove - remove sas_device from list.
654 * @ioc: per adapter object 772 * @ioc: per adapter object
655 * @sas_device: the sas_device object 773 * @sas_device: the sas_device object
@@ -670,17 +788,7 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
670 ioc->name, sas_device->handle, 788 ioc->name, sas_device->handle,
671 (unsigned long long) sas_device->sas_address); 789 (unsigned long long) sas_device->sas_address);
672 790
673 if (sas_device->enclosure_handle != 0) 791 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
674 pr_info(MPT3SAS_FMT
675 "removing enclosure logical id(0x%016llx), slot(%d)\n",
676 ioc->name, (unsigned long long)
677 sas_device->enclosure_logical_id, sas_device->slot);
678
679 if (sas_device->connector_name[0] != '\0')
680 pr_info(MPT3SAS_FMT
681 "removing enclosure level(0x%04x), connector name( %s)\n",
682 ioc->name, sas_device->enclosure_level,
683 sas_device->connector_name);
684 792
685 /* 793 /*
686 * The lock serializes access to the list, but we still need to verify 794 * The lock serializes access to the list, but we still need to verify
@@ -772,17 +880,8 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
772 ioc->name, __func__, sas_device->handle, 880 ioc->name, __func__, sas_device->handle,
773 (unsigned long long)sas_device->sas_address)); 881 (unsigned long long)sas_device->sas_address));
774 882
775 if (sas_device->enclosure_handle != 0) 883 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
776 dewtprintk(ioc, pr_info(MPT3SAS_FMT 884 NULL, NULL));
777 "%s: enclosure logical id(0x%016llx), slot( %d)\n",
778 ioc->name, __func__, (unsigned long long)
779 sas_device->enclosure_logical_id, sas_device->slot));
780
781 if (sas_device->connector_name[0] != '\0')
782 dewtprintk(ioc, pr_info(MPT3SAS_FMT
783 "%s: enclosure level(0x%04x), connector name( %s)\n",
784 ioc->name, __func__,
785 sas_device->enclosure_level, sas_device->connector_name));
786 885
787 spin_lock_irqsave(&ioc->sas_device_lock, flags); 886 spin_lock_irqsave(&ioc->sas_device_lock, flags);
788 sas_device_get(sas_device); 887 sas_device_get(sas_device);
@@ -832,17 +931,8 @@ _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
832 __func__, sas_device->handle, 931 __func__, sas_device->handle,
833 (unsigned long long)sas_device->sas_address)); 932 (unsigned long long)sas_device->sas_address));
834 933
835 if (sas_device->enclosure_handle != 0) 934 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
836 dewtprintk(ioc, pr_info(MPT3SAS_FMT 935 NULL, NULL));
837 "%s: enclosure logical id(0x%016llx), slot( %d)\n",
838 ioc->name, __func__, (unsigned long long)
839 sas_device->enclosure_logical_id, sas_device->slot));
840
841 if (sas_device->connector_name[0] != '\0')
842 dewtprintk(ioc, pr_info(MPT3SAS_FMT
843 "%s: enclosure level(0x%04x), connector name( %s)\n",
844 ioc->name, __func__, sas_device->enclosure_level,
845 sas_device->connector_name));
846 936
847 spin_lock_irqsave(&ioc->sas_device_lock, flags); 937 spin_lock_irqsave(&ioc->sas_device_lock, flags);
848 sas_device_get(sas_device); 938 sas_device_get(sas_device);
@@ -851,6 +941,282 @@ _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
851 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 941 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
852} 942}
853 943
944
945static struct _pcie_device *
946__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
947{
948 struct _pcie_device *pcie_device;
949
950 assert_spin_locked(&ioc->pcie_device_lock);
951
952 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
953 if (pcie_device->wwid == wwid)
954 goto found_device;
955
956 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
957 if (pcie_device->wwid == wwid)
958 goto found_device;
959
960 return NULL;
961
962found_device:
963 pcie_device_get(pcie_device);
964 return pcie_device;
965}
966
967
968/**
969 * mpt3sas_get_pdev_by_wwid - pcie device search
970 * @ioc: per adapter object
971 * @wwid: wwid
972 *
973 * Context: This function will acquire ioc->pcie_device_lock and will release
974 * before returning the pcie_device object.
975 *
976 * This searches for pcie_device based on wwid, then return pcie_device object.
977 */
978static struct _pcie_device *
979mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
980{
981 struct _pcie_device *pcie_device;
982 unsigned long flags;
983
984 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
985 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
986 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
987
988 return pcie_device;
989}
990
991
992static struct _pcie_device *
993__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
994 int channel)
995{
996 struct _pcie_device *pcie_device;
997
998 assert_spin_locked(&ioc->pcie_device_lock);
999
1000 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1001 if (pcie_device->id == id && pcie_device->channel == channel)
1002 goto found_device;
1003
1004 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1005 if (pcie_device->id == id && pcie_device->channel == channel)
1006 goto found_device;
1007
1008 return NULL;
1009
1010found_device:
1011 pcie_device_get(pcie_device);
1012 return pcie_device;
1013}
1014
1015static struct _pcie_device *
1016__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1017{
1018 struct _pcie_device *pcie_device;
1019
1020 assert_spin_locked(&ioc->pcie_device_lock);
1021
1022 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1023 if (pcie_device->handle == handle)
1024 goto found_device;
1025
1026 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1027 if (pcie_device->handle == handle)
1028 goto found_device;
1029
1030 return NULL;
1031
1032found_device:
1033 pcie_device_get(pcie_device);
1034 return pcie_device;
1035}
1036
1037
1038/**
1039 * mpt3sas_get_pdev_by_handle - pcie device search
1040 * @ioc: per adapter object
1041 * @handle: Firmware device handle
1042 *
1043 * Context: This function will acquire ioc->pcie_device_lock and will release
1044 * before returning the pcie_device object.
1045 *
1046 * This searches for pcie_device based on handle, then return pcie_device
1047 * object.
1048 */
1049struct _pcie_device *
1050mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1051{
1052 struct _pcie_device *pcie_device;
1053 unsigned long flags;
1054
1055 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1056 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1057 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1058
1059 return pcie_device;
1060}
1061
1062/**
1063 * _scsih_pcie_device_remove - remove pcie_device from list.
1064 * @ioc: per adapter object
1065 * @pcie_device: the pcie_device object
1066 * Context: This function will acquire ioc->pcie_device_lock.
1067 *
1068 * If pcie_device is on the list, remove it and decrement its reference count.
1069 */
1070static void
1071_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1072 struct _pcie_device *pcie_device)
1073{
1074 unsigned long flags;
1075 int was_on_pcie_device_list = 0;
1076
1077 if (!pcie_device)
1078 return;
1079 pr_info(MPT3SAS_FMT
1080 "removing handle(0x%04x), wwid(0x%016llx)\n",
1081 ioc->name, pcie_device->handle,
1082 (unsigned long long) pcie_device->wwid);
1083 if (pcie_device->enclosure_handle != 0)
1084 pr_info(MPT3SAS_FMT
1085 "removing enclosure logical id(0x%016llx), slot(%d)\n",
1086 ioc->name,
1087 (unsigned long long)pcie_device->enclosure_logical_id,
1088 pcie_device->slot);
1089 if (pcie_device->connector_name[0] != '\0')
1090 pr_info(MPT3SAS_FMT
1091 "removing enclosure level(0x%04x), connector name( %s)\n",
1092 ioc->name, pcie_device->enclosure_level,
1093 pcie_device->connector_name);
1094
1095 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1096 if (!list_empty(&pcie_device->list)) {
1097 list_del_init(&pcie_device->list);
1098 was_on_pcie_device_list = 1;
1099 }
1100 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1101 if (was_on_pcie_device_list) {
1102 kfree(pcie_device->serial_number);
1103 pcie_device_put(pcie_device);
1104 }
1105}
1106
1107
1108/**
1109 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1110 * @ioc: per adapter object
1111 * @handle: device handle
1112 *
1113 * Return nothing.
1114 */
1115static void
1116_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1117{
1118 struct _pcie_device *pcie_device;
1119 unsigned long flags;
1120 int was_on_pcie_device_list = 0;
1121
1122 if (ioc->shost_recovery)
1123 return;
1124
1125 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1126 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1127 if (pcie_device) {
1128 if (!list_empty(&pcie_device->list)) {
1129 list_del_init(&pcie_device->list);
1130 was_on_pcie_device_list = 1;
1131 pcie_device_put(pcie_device);
1132 }
1133 }
1134 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1135 if (was_on_pcie_device_list) {
1136 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1137 pcie_device_put(pcie_device);
1138 }
1139}
1140
1141/**
1142 * _scsih_pcie_device_add - add pcie_device object
1143 * @ioc: per adapter object
1144 * @pcie_device: pcie_device object
1145 *
1146 * This is added to the pcie_device_list link list.
1147 */
1148static void
1149_scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1150 struct _pcie_device *pcie_device)
1151{
1152 unsigned long flags;
1153
1154 dewtprintk(ioc, pr_info(MPT3SAS_FMT
1155 "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
1156 pcie_device->handle, (unsigned long long)pcie_device->wwid));
1157 if (pcie_device->enclosure_handle != 0)
1158 dewtprintk(ioc, pr_info(MPT3SAS_FMT
1159 "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1160 ioc->name, __func__,
1161 (unsigned long long)pcie_device->enclosure_logical_id,
1162 pcie_device->slot));
1163 if (pcie_device->connector_name[0] != '\0')
1164 dewtprintk(ioc, pr_info(MPT3SAS_FMT
1165 "%s: enclosure level(0x%04x), connector name( %s)\n",
1166 ioc->name, __func__, pcie_device->enclosure_level,
1167 pcie_device->connector_name));
1168
1169 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1170 pcie_device_get(pcie_device);
1171 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1172 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1173
1174 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1175 _scsih_pcie_device_remove(ioc, pcie_device);
1176 } else if (!pcie_device->starget) {
1177 if (!ioc->is_driver_loading) {
1178/*TODO-- Need to find out whether this condition will occur or not*/
1179 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1180 }
1181 } else
1182 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1183}
1184
1185/*
1186 * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1187 * @ioc: per adapter object
1188 * @pcie_device: the pcie_device object
1189 * Context: This function will acquire ioc->pcie_device_lock.
1190 *
1191 * Adding new object at driver load time to the ioc->pcie_device_init_list.
1192 */
1193static void
1194_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1195 struct _pcie_device *pcie_device)
1196{
1197 unsigned long flags;
1198
1199 dewtprintk(ioc, pr_info(MPT3SAS_FMT
1200 "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
1201 pcie_device->handle, (unsigned long long)pcie_device->wwid));
1202 if (pcie_device->enclosure_handle != 0)
1203 dewtprintk(ioc, pr_info(MPT3SAS_FMT
1204 "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1205 ioc->name, __func__,
1206 (unsigned long long)pcie_device->enclosure_logical_id,
1207 pcie_device->slot));
1208 if (pcie_device->connector_name[0] != '\0')
1209 dewtprintk(ioc, pr_info(MPT3SAS_FMT
1210 "%s: enclosure level(0x%04x), connector name( %s)\n",
1211 ioc->name, __func__, pcie_device->enclosure_level,
1212 pcie_device->connector_name));
1213
1214 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1215 pcie_device_get(pcie_device);
1216 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1217 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1218 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1219}
854/** 1220/**
855 * _scsih_raid_device_find_by_id - raid device search 1221 * _scsih_raid_device_find_by_id - raid device search
856 * @ioc: per adapter object 1222 * @ioc: per adapter object
@@ -1062,6 +1428,23 @@ _scsih_is_end_device(u32 device_info)
1062} 1428}
1063 1429
1064/** 1430/**
1431 * _scsih_is_nvme_device - determines if device is an nvme device
1432 * @device_info: bitfield providing information about the device.
1433 * Context: none
1434 *
1435 * Returns 1 if nvme device.
1436 */
1437static int
1438_scsih_is_nvme_device(u32 device_info)
1439{
1440 if ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1441 == MPI26_PCIE_DEVINFO_NVME)
1442 return 1;
1443 else
1444 return 0;
1445}
1446
1447/**
1065 * _scsih_scsi_lookup_get - returns scmd entry 1448 * _scsih_scsi_lookup_get - returns scmd entry
1066 * @ioc: per adapter object 1449 * @ioc: per adapter object
1067 * @smid: system request message index 1450 * @smid: system request message index
@@ -1278,6 +1661,7 @@ scsih_target_alloc(struct scsi_target *starget)
1278 struct MPT3SAS_TARGET *sas_target_priv_data; 1661 struct MPT3SAS_TARGET *sas_target_priv_data;
1279 struct _sas_device *sas_device; 1662 struct _sas_device *sas_device;
1280 struct _raid_device *raid_device; 1663 struct _raid_device *raid_device;
1664 struct _pcie_device *pcie_device;
1281 unsigned long flags; 1665 unsigned long flags;
1282 struct sas_rphy *rphy; 1666 struct sas_rphy *rphy;
1283 1667
@@ -1307,6 +1691,28 @@ scsih_target_alloc(struct scsi_target *starget)
1307 return 0; 1691 return 0;
1308 } 1692 }
1309 1693
1694 /* PCIe devices */
1695 if (starget->channel == PCIE_CHANNEL) {
1696 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1697 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1698 starget->channel);
1699 if (pcie_device) {
1700 sas_target_priv_data->handle = pcie_device->handle;
1701 sas_target_priv_data->sas_address = pcie_device->wwid;
1702 sas_target_priv_data->pcie_dev = pcie_device;
1703 pcie_device->starget = starget;
1704 pcie_device->id = starget->id;
1705 pcie_device->channel = starget->channel;
1706 sas_target_priv_data->flags |=
1707 MPT_TARGET_FLAGS_PCIE_DEVICE;
1708 if (pcie_device->fast_path)
1709 sas_target_priv_data->flags |=
1710 MPT_TARGET_FASTPATH_IO;
1711 }
1712 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1713 return 0;
1714 }
1715
1310 /* sas/sata devices */ 1716 /* sas/sata devices */
1311 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1717 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1312 rphy = dev_to_rphy(starget->dev.parent); 1718 rphy = dev_to_rphy(starget->dev.parent);
@@ -1316,7 +1722,7 @@ scsih_target_alloc(struct scsi_target *starget)
1316 if (sas_device) { 1722 if (sas_device) {
1317 sas_target_priv_data->handle = sas_device->handle; 1723 sas_target_priv_data->handle = sas_device->handle;
1318 sas_target_priv_data->sas_address = sas_device->sas_address; 1724 sas_target_priv_data->sas_address = sas_device->sas_address;
1319 sas_target_priv_data->sdev = sas_device; 1725 sas_target_priv_data->sas_dev = sas_device;
1320 sas_device->starget = starget; 1726 sas_device->starget = starget;
1321 sas_device->id = starget->id; 1727 sas_device->id = starget->id;
1322 sas_device->channel = starget->channel; 1728 sas_device->channel = starget->channel;
@@ -1324,7 +1730,8 @@ scsih_target_alloc(struct scsi_target *starget)
1324 sas_target_priv_data->flags |= 1730 sas_target_priv_data->flags |=
1325 MPT_TARGET_FLAGS_RAID_COMPONENT; 1731 MPT_TARGET_FLAGS_RAID_COMPONENT;
1326 if (sas_device->fast_path) 1732 if (sas_device->fast_path)
1327 sas_target_priv_data->flags |= MPT_TARGET_FASTPATH_IO; 1733 sas_target_priv_data->flags |=
1734 MPT_TARGET_FASTPATH_IO;
1328 } 1735 }
1329 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1736 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1330 1737
@@ -1345,7 +1752,9 @@ scsih_target_destroy(struct scsi_target *starget)
1345 struct MPT3SAS_TARGET *sas_target_priv_data; 1752 struct MPT3SAS_TARGET *sas_target_priv_data;
1346 struct _sas_device *sas_device; 1753 struct _sas_device *sas_device;
1347 struct _raid_device *raid_device; 1754 struct _raid_device *raid_device;
1755 struct _pcie_device *pcie_device;
1348 unsigned long flags; 1756 unsigned long flags;
1757 struct sas_rphy *rphy;
1349 1758
1350 sas_target_priv_data = starget->hostdata; 1759 sas_target_priv_data = starget->hostdata;
1351 if (!sas_target_priv_data) 1760 if (!sas_target_priv_data)
@@ -1363,7 +1772,29 @@ scsih_target_destroy(struct scsi_target *starget)
1363 goto out; 1772 goto out;
1364 } 1773 }
1365 1774
1775 if (starget->channel == PCIE_CHANNEL) {
1776 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1777 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1778 sas_target_priv_data);
1779 if (pcie_device && (pcie_device->starget == starget) &&
1780 (pcie_device->id == starget->id) &&
1781 (pcie_device->channel == starget->channel))
1782 pcie_device->starget = NULL;
1783
1784 if (pcie_device) {
1785 /*
1786 * Corresponding get() is in _scsih_target_alloc()
1787 */
1788 sas_target_priv_data->pcie_dev = NULL;
1789 pcie_device_put(pcie_device);
1790 pcie_device_put(pcie_device);
1791 }
1792 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1793 goto out;
1794 }
1795
1366 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1796 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1797 rphy = dev_to_rphy(starget->dev.parent);
1367 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); 1798 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1368 if (sas_device && (sas_device->starget == starget) && 1799 if (sas_device && (sas_device->starget == starget) &&
1369 (sas_device->id == starget->id) && 1800 (sas_device->id == starget->id) &&
@@ -1374,7 +1805,7 @@ scsih_target_destroy(struct scsi_target *starget)
1374 /* 1805 /*
1375 * Corresponding get() is in _scsih_target_alloc() 1806 * Corresponding get() is in _scsih_target_alloc()
1376 */ 1807 */
1377 sas_target_priv_data->sdev = NULL; 1808 sas_target_priv_data->sas_dev = NULL;
1378 sas_device_put(sas_device); 1809 sas_device_put(sas_device);
1379 1810
1380 sas_device_put(sas_device); 1811 sas_device_put(sas_device);
@@ -1403,6 +1834,7 @@ scsih_slave_alloc(struct scsi_device *sdev)
1403 struct scsi_target *starget; 1834 struct scsi_target *starget;
1404 struct _raid_device *raid_device; 1835 struct _raid_device *raid_device;
1405 struct _sas_device *sas_device; 1836 struct _sas_device *sas_device;
1837 struct _pcie_device *pcie_device;
1406 unsigned long flags; 1838 unsigned long flags;
1407 1839
1408 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data), 1840 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
@@ -1431,8 +1863,22 @@ scsih_slave_alloc(struct scsi_device *sdev)
1431 raid_device->sdev = sdev; /* raid is single lun */ 1863 raid_device->sdev = sdev; /* raid is single lun */
1432 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1864 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1433 } 1865 }
1866 if (starget->channel == PCIE_CHANNEL) {
1867 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1868 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
1869 sas_target_priv_data->sas_address);
1870 if (pcie_device && (pcie_device->starget == NULL)) {
1871 sdev_printk(KERN_INFO, sdev,
1872 "%s : pcie_device->starget set to starget @ %d\n",
1873 __func__, __LINE__);
1874 pcie_device->starget = starget;
1875 }
1876
1877 if (pcie_device)
1878 pcie_device_put(pcie_device);
1879 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1434 1880
1435 if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 1881 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1436 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1882 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1437 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 1883 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1438 sas_target_priv_data->sas_address); 1884 sas_target_priv_data->sas_address);
@@ -1466,6 +1912,7 @@ scsih_slave_destroy(struct scsi_device *sdev)
1466 struct Scsi_Host *shost; 1912 struct Scsi_Host *shost;
1467 struct MPT3SAS_ADAPTER *ioc; 1913 struct MPT3SAS_ADAPTER *ioc;
1468 struct _sas_device *sas_device; 1914 struct _sas_device *sas_device;
1915 struct _pcie_device *pcie_device;
1469 unsigned long flags; 1916 unsigned long flags;
1470 1917
1471 if (!sdev->hostdata) 1918 if (!sdev->hostdata)
@@ -1478,7 +1925,19 @@ scsih_slave_destroy(struct scsi_device *sdev)
1478 shost = dev_to_shost(&starget->dev); 1925 shost = dev_to_shost(&starget->dev);
1479 ioc = shost_priv(shost); 1926 ioc = shost_priv(shost);
1480 1927
1481 if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 1928 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
1929 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1930 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1931 sas_target_priv_data);
1932 if (pcie_device && !sas_target_priv_data->num_luns)
1933 pcie_device->starget = NULL;
1934
1935 if (pcie_device)
1936 pcie_device_put(pcie_device);
1937
1938 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1939
1940 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1482 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1941 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1483 sas_device = __mpt3sas_get_sdev_from_target(ioc, 1942 sas_device = __mpt3sas_get_sdev_from_target(ioc,
1484 sas_target_priv_data); 1943 sas_target_priv_data);
@@ -1562,6 +2021,14 @@ scsih_is_raid(struct device *dev)
1562 return (sdev->channel == RAID_CHANNEL) ? 1 : 0; 2021 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
1563} 2022}
1564 2023
2024static int
2025scsih_is_nvme(struct device *dev)
2026{
2027 struct scsi_device *sdev = to_scsi_device(dev);
2028
2029 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2030}
2031
1565/** 2032/**
1566 * scsih_get_resync - get raid volume resync percent complete 2033 * scsih_get_resync - get raid volume resync percent complete
1567 * @dev the device struct object 2034 * @dev the device struct object
@@ -1837,6 +2304,7 @@ scsih_slave_configure(struct scsi_device *sdev)
1837 struct MPT3SAS_DEVICE *sas_device_priv_data; 2304 struct MPT3SAS_DEVICE *sas_device_priv_data;
1838 struct MPT3SAS_TARGET *sas_target_priv_data; 2305 struct MPT3SAS_TARGET *sas_target_priv_data;
1839 struct _sas_device *sas_device; 2306 struct _sas_device *sas_device;
2307 struct _pcie_device *pcie_device;
1840 struct _raid_device *raid_device; 2308 struct _raid_device *raid_device;
1841 unsigned long flags; 2309 unsigned long flags;
1842 int qdepth; 2310 int qdepth;
@@ -1967,6 +2435,55 @@ scsih_slave_configure(struct scsi_device *sdev)
1967 } 2435 }
1968 } 2436 }
1969 2437
2438 /* PCIe handling */
2439 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2440 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2441 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2442 sas_device_priv_data->sas_target->sas_address);
2443 if (!pcie_device) {
2444 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2445 dfailprintk(ioc, pr_warn(MPT3SAS_FMT
2446 "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
2447 __LINE__, __func__));
2448 return 1;
2449 }
2450
2451 qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2452 ds = "NVMe";
2453 sdev_printk(KERN_INFO, sdev,
2454 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2455 ds, handle, (unsigned long long)pcie_device->wwid,
2456 pcie_device->port_num);
2457 if (pcie_device->enclosure_handle != 0)
2458 sdev_printk(KERN_INFO, sdev,
2459 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2460 ds,
2461 (unsigned long long)pcie_device->enclosure_logical_id,
2462 pcie_device->slot);
2463 if (pcie_device->connector_name[0] != '\0')
2464 sdev_printk(KERN_INFO, sdev,
2465 "%s: enclosure level(0x%04x),"
2466 "connector name( %s)\n", ds,
2467 pcie_device->enclosure_level,
2468 pcie_device->connector_name);
2469 pcie_device_put(pcie_device);
2470 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2471 scsih_change_queue_depth(sdev, qdepth);
2472
2473 if (pcie_device->nvme_mdts)
2474 blk_queue_max_hw_sectors(sdev->request_queue,
2475 pcie_device->nvme_mdts/512);
2476 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2477 ** merged and can eliminate holes created during merging
2478 ** operation.
2479 **/
2480 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES,
2481 sdev->request_queue);
2482 blk_queue_virt_boundary(sdev->request_queue,
2483 ioc->page_size - 1);
2484 return 0;
2485 }
2486
1970 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2487 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1971 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 2488 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1972 sas_device_priv_data->sas_target->sas_address); 2489 sas_device_priv_data->sas_target->sas_address);
@@ -2005,16 +2522,8 @@ scsih_slave_configure(struct scsi_device *sdev)
2005 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", 2522 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2006 ds, handle, (unsigned long long)sas_device->sas_address, 2523 ds, handle, (unsigned long long)sas_device->sas_address,
2007 sas_device->phy, (unsigned long long)sas_device->device_name); 2524 sas_device->phy, (unsigned long long)sas_device->device_name);
2008 if (sas_device->enclosure_handle != 0) 2525
2009 sdev_printk(KERN_INFO, sdev, 2526 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2010 "%s: enclosure_logical_id(0x%016llx), slot(%d)\n",
2011 ds, (unsigned long long)
2012 sas_device->enclosure_logical_id, sas_device->slot);
2013 if (sas_device->connector_name[0] != '\0')
2014 sdev_printk(KERN_INFO, sdev,
2015 "%s: enclosure level(0x%04x), connector name( %s)\n",
2016 ds, sas_device->enclosure_level,
2017 sas_device->connector_name);
2018 2527
2019 sas_device_put(sas_device); 2528 sas_device_put(sas_device);
2020 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2529 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -2400,6 +2909,7 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2400 struct scsi_target *starget = scmd->device->sdev_target; 2909 struct scsi_target *starget = scmd->device->sdev_target;
2401 struct MPT3SAS_TARGET *priv_target = starget->hostdata; 2910 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
2402 struct _sas_device *sas_device = NULL; 2911 struct _sas_device *sas_device = NULL;
2912 struct _pcie_device *pcie_device = NULL;
2403 unsigned long flags; 2913 unsigned long flags;
2404 char *device_str = NULL; 2914 char *device_str = NULL;
2405 2915
@@ -2416,6 +2926,31 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2416 "%s handle(0x%04x), %s wwid(0x%016llx)\n", 2926 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
2417 device_str, priv_target->handle, 2927 device_str, priv_target->handle,
2418 device_str, (unsigned long long)priv_target->sas_address); 2928 device_str, (unsigned long long)priv_target->sas_address);
2929
2930 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2931 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2932 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
2933 if (pcie_device) {
2934 starget_printk(KERN_INFO, starget,
2935 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2936 pcie_device->handle,
2937 (unsigned long long)pcie_device->wwid,
2938 pcie_device->port_num);
2939 if (pcie_device->enclosure_handle != 0)
2940 starget_printk(KERN_INFO, starget,
2941 "enclosure logical id(0x%016llx), slot(%d)\n",
2942 (unsigned long long)
2943 pcie_device->enclosure_logical_id,
2944 pcie_device->slot);
2945 if (pcie_device->connector_name[0] != '\0')
2946 starget_printk(KERN_INFO, starget,
2947 "enclosure level(0x%04x), connector name( %s)\n",
2948 pcie_device->enclosure_level,
2949 pcie_device->connector_name);
2950 pcie_device_put(pcie_device);
2951 }
2952 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2953
2419 } else { 2954 } else {
2420 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2955 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2421 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target); 2956 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
@@ -2433,17 +2968,9 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2433 sas_device->handle, 2968 sas_device->handle,
2434 (unsigned long long)sas_device->sas_address, 2969 (unsigned long long)sas_device->sas_address,
2435 sas_device->phy); 2970 sas_device->phy);
2436 if (sas_device->enclosure_handle != 0) 2971
2437 starget_printk(KERN_INFO, starget, 2972 _scsih_display_enclosure_chassis_info(NULL, sas_device,
2438 "enclosure_logical_id(0x%016llx), slot(%d)\n", 2973 NULL, starget);
2439 (unsigned long long)
2440 sas_device->enclosure_logical_id,
2441 sas_device->slot);
2442 if (sas_device->connector_name[0] != '\0')
2443 starget_printk(KERN_INFO, starget,
2444 "enclosure level(0x%04x),connector name(%s)\n",
2445 sas_device->enclosure_level,
2446 sas_device->connector_name);
2447 2974
2448 sas_device_put(sas_device); 2975 sas_device_put(sas_device);
2449 } 2976 }
@@ -3007,8 +3534,6 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3007 struct _sas_device *sas_device; 3534 struct _sas_device *sas_device;
3008 3535
3009 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 3536 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3010 if (!sas_device)
3011 return;
3012 3537
3013 shost_for_each_device(sdev, ioc->shost) { 3538 shost_for_each_device(sdev, ioc->shost) {
3014 sas_device_priv_data = sdev->hostdata; 3539 sas_device_priv_data = sdev->hostdata;
@@ -3018,7 +3543,7 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3018 continue; 3543 continue;
3019 if (sas_device_priv_data->block) 3544 if (sas_device_priv_data->block)
3020 continue; 3545 continue;
3021 if (sas_device->pend_sas_rphy_add) 3546 if (sas_device && sas_device->pend_sas_rphy_add)
3022 continue; 3547 continue;
3023 if (sas_device_priv_data->ignore_delay_remove) { 3548 if (sas_device_priv_data->ignore_delay_remove) {
3024 sdev_printk(KERN_INFO, sdev, 3549 sdev_printk(KERN_INFO, sdev,
@@ -3029,7 +3554,8 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3029 _scsih_internal_device_block(sdev, sas_device_priv_data); 3554 _scsih_internal_device_block(sdev, sas_device_priv_data);
3030 } 3555 }
3031 3556
3032 sas_device_put(sas_device); 3557 if (sas_device)
3558 sas_device_put(sas_device);
3033} 3559}
3034 3560
3035/** 3561/**
@@ -3113,6 +3639,33 @@ _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3113} 3639}
3114 3640
3115/** 3641/**
3642 * _scsih_block_io_to_pcie_children_attached_directly
3643 * @ioc: per adapter object
3644 * @event_data: topology change event data
3645 *
3646 * This routine set sdev state to SDEV_BLOCK for all devices
3647 * direct attached during device pull/reconnect.
3648 */
3649static void
3650_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3651 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
3652{
3653 int i;
3654 u16 handle;
3655 u16 reason_code;
3656
3657 for (i = 0; i < event_data->NumEntries; i++) {
3658 handle =
3659 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
3660 if (!handle)
3661 continue;
3662 reason_code = event_data->PortEntry[i].PortStatus;
3663 if (reason_code ==
3664 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
3665 _scsih_block_io_device(ioc, handle);
3666 }
3667}
3668/**
3116 * _scsih_tm_tr_send - send task management request 3669 * _scsih_tm_tr_send - send task management request
3117 * @ioc: per adapter object 3670 * @ioc: per adapter object
3118 * @handle: device handle 3671 * @handle: device handle
@@ -3133,18 +3686,14 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3133 Mpi2SCSITaskManagementRequest_t *mpi_request; 3686 Mpi2SCSITaskManagementRequest_t *mpi_request;
3134 u16 smid; 3687 u16 smid;
3135 struct _sas_device *sas_device = NULL; 3688 struct _sas_device *sas_device = NULL;
3689 struct _pcie_device *pcie_device = NULL;
3136 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 3690 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
3137 u64 sas_address = 0; 3691 u64 sas_address = 0;
3138 unsigned long flags; 3692 unsigned long flags;
3139 struct _tr_list *delayed_tr; 3693 struct _tr_list *delayed_tr;
3140 u32 ioc_state; 3694 u32 ioc_state;
3141 3695
3142 if (ioc->remove_host) { 3696 if (ioc->pci_error_recovery) {
3143 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3144 "%s: host has been removed: handle(0x%04x)\n",
3145 __func__, ioc->name, handle));
3146 return;
3147 } else if (ioc->pci_error_recovery) {
3148 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3697 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3149 "%s: host in pci error recovery: handle(0x%04x)\n", 3698 "%s: host in pci error recovery: handle(0x%04x)\n",
3150 __func__, ioc->name, 3699 __func__, ioc->name,
@@ -3175,24 +3724,52 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3175 sas_address = sas_device->sas_address; 3724 sas_address = sas_device->sas_address;
3176 } 3725 }
3177 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3726 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3178 3727 if (!sas_device) {
3728 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3729 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
3730 if (pcie_device && pcie_device->starget &&
3731 pcie_device->starget->hostdata) {
3732 sas_target_priv_data = pcie_device->starget->hostdata;
3733 sas_target_priv_data->deleted = 1;
3734 sas_address = pcie_device->wwid;
3735 }
3736 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3737 }
3179 if (sas_target_priv_data) { 3738 if (sas_target_priv_data) {
3180 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3739 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3181 "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", 3740 "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
3182 ioc->name, handle, 3741 ioc->name, handle,
3183 (unsigned long long)sas_address)); 3742 (unsigned long long)sas_address));
3184 if (sas_device->enclosure_handle != 0) 3743 if (sas_device) {
3185 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3744 if (sas_device->enclosure_handle != 0)
3186 "setting delete flag:enclosure logical id(0x%016llx)," 3745 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3187 " slot(%d)\n", ioc->name, (unsigned long long) 3746 "setting delete flag:enclosure logical "
3188 sas_device->enclosure_logical_id, 3747 "id(0x%016llx), slot(%d)\n", ioc->name,
3189 sas_device->slot)); 3748 (unsigned long long)
3190 if (sas_device->connector_name[0] != '\0') 3749 sas_device->enclosure_logical_id,
3191 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3750 sas_device->slot));
3192 "setting delete flag: enclosure level(0x%04x)," 3751 if (sas_device->connector_name[0] != '\0')
3193 " connector name( %s)\n", ioc->name, 3752 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3194 sas_device->enclosure_level, 3753 "setting delete flag: enclosure "
3195 sas_device->connector_name)); 3754 "level(0x%04x), connector name( %s)\n",
3755 ioc->name, sas_device->enclosure_level,
3756 sas_device->connector_name));
3757 } else if (pcie_device) {
3758 if (pcie_device->enclosure_handle != 0)
3759 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3760 "setting delete flag: logical "
3761 "id(0x%016llx), slot(%d)\n", ioc->name,
3762 (unsigned long long)
3763 pcie_device->enclosure_logical_id,
3764 pcie_device->slot));
3765 if (pcie_device->connector_name[0] != '\0')
3766 dewtprintk(ioc, pr_info(MPT3SAS_FMT
3767 "setting delete flag:, enclosure "
3768 "level(0x%04x), "
3769 "connector name( %s)\n", ioc->name,
3770 pcie_device->enclosure_level,
3771 pcie_device->connector_name));
3772 }
3196 _scsih_ublock_io_device(ioc, sas_address); 3773 _scsih_ublock_io_device(ioc, sas_address);
3197 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 3774 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
3198 } 3775 }
@@ -3227,6 +3804,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3227out: 3804out:
3228 if (sas_device) 3805 if (sas_device)
3229 sas_device_put(sas_device); 3806 sas_device_put(sas_device);
3807 if (pcie_device)
3808 pcie_device_put(pcie_device);
3230} 3809}
3231 3810
3232/** 3811/**
@@ -3731,6 +4310,81 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
3731} 4310}
3732 4311
3733/** 4312/**
4313 * _scsih_check_pcie_topo_remove_events - sanity check on topo
4314 * events
4315 * @ioc: per adapter object
4316 * @event_data: the event data payload
4317 *
4318 * This handles the case where driver receives multiple switch
4319 * or device add and delete events in a single shot. When there
4320 * is a delete event the routine will void any pending add
4321 * events waiting in the event queue.
4322 *
4323 * Return nothing.
4324 */
4325static void
4326_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4327 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4328{
4329 struct fw_event_work *fw_event;
4330 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4331 unsigned long flags;
4332 int i, reason_code;
4333 u16 handle, switch_handle;
4334
4335 for (i = 0; i < event_data->NumEntries; i++) {
4336 handle =
4337 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4338 if (!handle)
4339 continue;
4340 reason_code = event_data->PortEntry[i].PortStatus;
4341 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4342 _scsih_tm_tr_send(ioc, handle);
4343 }
4344
4345 switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4346 if (!switch_handle) {
4347 _scsih_block_io_to_pcie_children_attached_directly(
4348 ioc, event_data);
4349 return;
4350 }
4351 /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4352 if ((event_data->SwitchStatus
4353 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4354 (event_data->SwitchStatus ==
4355 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4356 _scsih_block_io_to_pcie_children_attached_directly(
4357 ioc, event_data);
4358
4359 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4360 return;
4361
4362 /* mark ignore flag for pending events */
4363 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4364 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4365 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4366 fw_event->ignore)
4367 continue;
4368 local_event_data =
4369 (Mpi26EventDataPCIeTopologyChangeList_t *)
4370 fw_event->event_data;
4371 if (local_event_data->SwitchStatus ==
4372 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4373 local_event_data->SwitchStatus ==
4374 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4375 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4376 switch_handle) {
4377 dewtprintk(ioc, pr_info(MPT3SAS_FMT
4378 "setting ignoring flag for switch event\n",
4379 ioc->name));
4380 fw_event->ignore = 1;
4381 }
4382 }
4383 }
4384 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4385}
4386
4387/**
3734 * _scsih_set_volume_delete_flag - setting volume delete flag 4388 * _scsih_set_volume_delete_flag - setting volume delete flag
3735 * @ioc: per adapter object 4389 * @ioc: per adapter object
3736 * @handle: device handle 4390 * @handle: device handle
@@ -3979,7 +4633,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
3979 */ 4633 */
3980static void 4634static void
3981_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 4635_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3982 Mpi2SCSIIORequest_t *mpi_request) 4636 Mpi25SCSIIORequest_t *mpi_request)
3983{ 4637{
3984 u16 eedp_flags; 4638 u16 eedp_flags;
3985 unsigned char prot_op = scsi_get_prot_op(scmd); 4639 unsigned char prot_op = scsi_get_prot_op(scmd);
@@ -4082,7 +4736,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4082 struct _raid_device *raid_device; 4736 struct _raid_device *raid_device;
4083 struct request *rq = scmd->request; 4737 struct request *rq = scmd->request;
4084 int class; 4738 int class;
4085 Mpi2SCSIIORequest_t *mpi_request; 4739 Mpi25SCSIIORequest_t *mpi_request;
4740 struct _pcie_device *pcie_device = NULL;
4086 u32 mpi_control; 4741 u32 mpi_control;
4087 u16 smid; 4742 u16 smid;
4088 u16 handle; 4743 u16 handle;
@@ -4159,8 +4814,9 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4159 /* Make sure Device is not raid volume. 4814 /* Make sure Device is not raid volume.
4160 * We do not expose raid functionality to upper layer for warpdrive. 4815 * We do not expose raid functionality to upper layer for warpdrive.
4161 */ 4816 */
4162 if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev) 4817 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
4163 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) 4818 && !scsih_is_nvme(&scmd->device->sdev_gendev))
4819 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
4164 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; 4820 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
4165 4821
4166 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); 4822 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
@@ -4170,7 +4826,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4170 goto out; 4826 goto out;
4171 } 4827 }
4172 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4828 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4173 memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); 4829 memset(mpi_request, 0, ioc->request_sz);
4174 _scsih_setup_eedp(ioc, scmd, mpi_request); 4830 _scsih_setup_eedp(ioc, scmd, mpi_request);
4175 4831
4176 if (scmd->cmd_len == 32) 4832 if (scmd->cmd_len == 32)
@@ -4189,13 +4845,14 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4189 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 4845 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
4190 mpi_request->SenseBufferLowAddress = 4846 mpi_request->SenseBufferLowAddress =
4191 mpt3sas_base_get_sense_buffer_dma(ioc, smid); 4847 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
4192 mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4; 4848 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
4193 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) 4849 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
4194 mpi_request->LUN); 4850 mpi_request->LUN);
4195 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 4851 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
4196 4852
4197 if (mpi_request->DataLength) { 4853 if (mpi_request->DataLength) {
4198 if (ioc->build_sg_scmd(ioc, scmd, smid)) { 4854 pcie_device = sas_target_priv_data->pcie_dev;
4855 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
4199 mpt3sas_base_free_smid(ioc, smid); 4856 mpt3sas_base_free_smid(ioc, smid);
4200 goto out; 4857 goto out;
4201 } 4858 }
@@ -4204,8 +4861,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4204 4861
4205 raid_device = sas_target_priv_data->raid_device; 4862 raid_device = sas_target_priv_data->raid_device;
4206 if (raid_device && raid_device->direct_io_enabled) 4863 if (raid_device && raid_device->direct_io_enabled)
4207 mpt3sas_setup_direct_io(ioc, scmd, raid_device, mpi_request, 4864 mpt3sas_setup_direct_io(ioc, scmd,
4208 smid); 4865 raid_device, mpi_request, smid);
4209 4866
4210 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) { 4867 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
4211 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { 4868 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
@@ -4273,6 +4930,7 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4273 char *desc_scsi_state = ioc->tmp_string; 4930 char *desc_scsi_state = ioc->tmp_string;
4274 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 4931 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
4275 struct _sas_device *sas_device = NULL; 4932 struct _sas_device *sas_device = NULL;
4933 struct _pcie_device *pcie_device = NULL;
4276 struct scsi_target *starget = scmd->device->sdev_target; 4934 struct scsi_target *starget = scmd->device->sdev_target;
4277 struct MPT3SAS_TARGET *priv_target = starget->hostdata; 4935 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
4278 char *device_str = NULL; 4936 char *device_str = NULL;
@@ -4405,6 +5063,28 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4405 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 5063 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
4406 pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name, 5064 pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
4407 device_str, (unsigned long long)priv_target->sas_address); 5065 device_str, (unsigned long long)priv_target->sas_address);
5066 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5067 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5068 if (pcie_device) {
5069 pr_info(MPT3SAS_FMT "\twwid(0x%016llx), port(%d)\n",
5070 ioc->name,
5071 (unsigned long long)pcie_device->wwid,
5072 pcie_device->port_num);
5073 if (pcie_device->enclosure_handle != 0)
5074 pr_info(MPT3SAS_FMT
5075 "\tenclosure logical id(0x%016llx), "
5076 "slot(%d)\n", ioc->name,
5077 (unsigned long long)
5078 pcie_device->enclosure_logical_id,
5079 pcie_device->slot);
5080 if (pcie_device->connector_name[0])
5081 pr_info(MPT3SAS_FMT
5082 "\tenclosure level(0x%04x),"
5083 "connector name( %s)\n",
5084 ioc->name, pcie_device->enclosure_level,
5085 pcie_device->connector_name);
5086 pcie_device_put(pcie_device);
5087 }
4408 } else { 5088 } else {
4409 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target); 5089 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
4410 if (sas_device) { 5090 if (sas_device) {
@@ -4412,19 +5092,9 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4412 "\tsas_address(0x%016llx), phy(%d)\n", 5092 "\tsas_address(0x%016llx), phy(%d)\n",
4413 ioc->name, (unsigned long long) 5093 ioc->name, (unsigned long long)
4414 sas_device->sas_address, sas_device->phy); 5094 sas_device->sas_address, sas_device->phy);
4415 if (sas_device->enclosure_handle != 0) 5095
4416 pr_warn(MPT3SAS_FMT 5096 _scsih_display_enclosure_chassis_info(ioc, sas_device,
4417 "\tenclosure_logical_id(0x%016llx)," 5097 NULL, NULL);
4418 "slot(%d)\n", ioc->name,
4419 (unsigned long long)
4420 sas_device->enclosure_logical_id,
4421 sas_device->slot);
4422 if (sas_device->connector_name[0])
4423 pr_warn(MPT3SAS_FMT
4424 "\tenclosure level(0x%04x),"
4425 " connector name( %s)\n", ioc->name,
4426 sas_device->enclosure_level,
4427 sas_device->connector_name);
4428 5098
4429 sas_device_put(sas_device); 5099 sas_device_put(sas_device);
4430 } 5100 }
@@ -4451,11 +5121,10 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4451 struct sense_info data; 5121 struct sense_info data;
4452 _scsih_normalize_sense(scmd->sense_buffer, &data); 5122 _scsih_normalize_sense(scmd->sense_buffer, &data);
4453 pr_warn(MPT3SAS_FMT 5123 pr_warn(MPT3SAS_FMT
4454 "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", 5124 "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
4455 ioc->name, data.skey, 5125 ioc->name, data.skey,
4456 data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount)); 5126 data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
4457 } 5127 }
4458
4459 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 5128 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
4460 response_info = le32_to_cpu(mpi_reply->ResponseInfo); 5129 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
4461 response_bytes = (u8 *)&response_info; 5130 response_bytes = (u8 *)&response_info;
@@ -4602,16 +5271,8 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4602 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) 5271 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
4603 goto out_unlock; 5272 goto out_unlock;
4604 5273
4605 if (sas_device->enclosure_handle != 0) 5274 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
4606 starget_printk(KERN_INFO, starget, "predicted fault, " 5275
4607 "enclosure logical id(0x%016llx), slot(%d)\n",
4608 (unsigned long long)sas_device->enclosure_logical_id,
4609 sas_device->slot);
4610 if (sas_device->connector_name[0] != '\0')
4611 starget_printk(KERN_WARNING, starget, "predicted fault, "
4612 "enclosure level(0x%04x), connector name( %s)\n",
4613 sas_device->enclosure_level,
4614 sas_device->connector_name);
4615 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5276 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4616 5277
4617 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) 5278 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
@@ -4666,7 +5327,7 @@ out_unlock:
4666static u8 5327static u8
4667_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 5328_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4668{ 5329{
4669 Mpi2SCSIIORequest_t *mpi_request; 5330 Mpi25SCSIIORequest_t *mpi_request;
4670 Mpi2SCSIIOReply_t *mpi_reply; 5331 Mpi2SCSIIOReply_t *mpi_reply;
4671 struct scsi_cmnd *scmd; 5332 struct scsi_cmnd *scmd;
4672 u16 ioc_status; 5333 u16 ioc_status;
@@ -4731,9 +5392,10 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4731 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; 5392 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
4732 if (!sas_device_priv_data->tlr_snoop_check) { 5393 if (!sas_device_priv_data->tlr_snoop_check) {
4733 sas_device_priv_data->tlr_snoop_check++; 5394 sas_device_priv_data->tlr_snoop_check++;
4734 if (!ioc->is_warpdrive && 5395 if ((!ioc->is_warpdrive &&
4735 !scsih_is_raid(&scmd->device->sdev_gendev) && 5396 !scsih_is_raid(&scmd->device->sdev_gendev) &&
4736 sas_is_tlr_enabled(scmd->device) && 5397 !scsih_is_nvme(&scmd->device->sdev_gendev))
5398 && sas_is_tlr_enabled(scmd->device) &&
4737 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { 5399 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
4738 sas_disable_tlr(scmd->device); 5400 sas_disable_tlr(scmd->device);
4739 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n"); 5401 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
@@ -4804,6 +5466,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4804 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { 5466 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
4805 scmd->result = DID_RESET << 16; 5467 scmd->result = DID_RESET << 16;
4806 break; 5468 break;
5469 } else if ((scmd->device->channel == RAID_CHANNEL) &&
5470 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5471 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5472 scmd->result = DID_RESET << 16;
5473 break;
4807 } 5474 }
4808 scmd->result = DID_SOFT_ERROR << 16; 5475 scmd->result = DID_SOFT_ERROR << 16;
4809 break; 5476 break;
@@ -5274,8 +5941,6 @@ mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
5274 spin_lock_irqsave(&ioc->sas_node_lock, flags); 5941 spin_lock_irqsave(&ioc->sas_node_lock, flags);
5275 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 5942 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5276 sas_address); 5943 sas_address);
5277 if (sas_expander)
5278 list_del(&sas_expander->list);
5279 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 5944 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5280 if (sas_expander) 5945 if (sas_expander)
5281 _scsih_expander_node_remove(ioc, sas_expander); 5946 _scsih_expander_node_remove(ioc, sas_expander);
@@ -5386,6 +6051,52 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
5386} 6051}
5387 6052
5388/** 6053/**
6054 * _scsih_get_enclosure_logicalid_chassis_slot - get device's
6055 * EnclosureLogicalID and ChassisSlot information.
6056 * @ioc: per adapter object
6057 * @sas_device_pg0: SAS device page0
6058 * @sas_device: per sas device object
6059 *
6060 * Returns nothing.
6061 */
6062static void
6063_scsih_get_enclosure_logicalid_chassis_slot(struct MPT3SAS_ADAPTER *ioc,
6064 Mpi2SasDevicePage0_t *sas_device_pg0, struct _sas_device *sas_device)
6065{
6066 Mpi2ConfigReply_t mpi_reply;
6067 Mpi2SasEnclosurePage0_t enclosure_pg0;
6068
6069 if (!sas_device_pg0 || !sas_device)
6070 return;
6071
6072 sas_device->enclosure_handle =
6073 le16_to_cpu(sas_device_pg0->EnclosureHandle);
6074 sas_device->is_chassis_slot_valid = 0;
6075
6076 if (!le16_to_cpu(sas_device_pg0->EnclosureHandle))
6077 return;
6078
6079 if (mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6080 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6081 le16_to_cpu(sas_device_pg0->EnclosureHandle))) {
6082 pr_err(MPT3SAS_FMT
6083 "Enclosure Pg0 read failed for handle(0x%04x)\n",
6084 ioc->name, le16_to_cpu(sas_device_pg0->EnclosureHandle));
6085 return;
6086 }
6087
6088 sas_device->enclosure_logical_id =
6089 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6090
6091 if (le16_to_cpu(enclosure_pg0.Flags) &
6092 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6093 sas_device->is_chassis_slot_valid = 1;
6094 sas_device->chassis_slot = enclosure_pg0.ChassisSlot;
6095 }
6096}
6097
6098
6099/**
5389 * _scsih_check_device - checking device responsiveness 6100 * _scsih_check_device - checking device responsiveness
5390 * @ioc: per adapter object 6101 * @ioc: per adapter object
5391 * @parent_sas_address: sas address of parent expander or sas host 6102 * @parent_sas_address: sas address of parent expander or sas host
@@ -5409,7 +6120,6 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
5409 struct MPT3SAS_TARGET *sas_target_priv_data; 6120 struct MPT3SAS_TARGET *sas_target_priv_data;
5410 u32 device_info; 6121 u32 device_info;
5411 6122
5412
5413 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 6123 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5414 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) 6124 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
5415 return; 6125 return;
@@ -5456,6 +6166,9 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
5456 sas_device->enclosure_level = 0; 6166 sas_device->enclosure_level = 0;
5457 sas_device->connector_name[0] = '\0'; 6167 sas_device->connector_name[0] = '\0';
5458 } 6168 }
6169
6170 _scsih_get_enclosure_logicalid_chassis_slot(ioc,
6171 &sas_device_pg0, sas_device);
5459 } 6172 }
5460 6173
5461 /* check if device is present */ 6174 /* check if device is present */
@@ -5507,6 +6220,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
5507 u32 ioc_status; 6220 u32 ioc_status;
5508 u64 sas_address; 6221 u64 sas_address;
5509 u32 device_info; 6222 u32 device_info;
6223 int encl_pg0_rc = -1;
5510 6224
5511 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 6225 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5512 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 6226 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
@@ -5551,6 +6265,16 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
5551 return -1; 6265 return -1;
5552 } 6266 }
5553 6267
6268 if (sas_device_pg0.EnclosureHandle) {
6269 encl_pg0_rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6270 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6271 sas_device_pg0.EnclosureHandle);
6272 if (encl_pg0_rc)
6273 pr_info(MPT3SAS_FMT
6274 "Enclosure Pg0 read failed for handle(0x%04x)\n",
6275 ioc->name, sas_device_pg0.EnclosureHandle);
6276 }
6277
5554 sas_device = kzalloc(sizeof(struct _sas_device), 6278 sas_device = kzalloc(sizeof(struct _sas_device),
5555 GFP_KERNEL); 6279 GFP_KERNEL);
5556 if (!sas_device) { 6280 if (!sas_device) {
@@ -5588,13 +6312,21 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
5588 sas_device->enclosure_level = 0; 6312 sas_device->enclosure_level = 0;
5589 sas_device->connector_name[0] = '\0'; 6313 sas_device->connector_name[0] = '\0';
5590 } 6314 }
5591 /* get enclosure_logical_id */ 6315
5592 if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0( 6316 /* get enclosure_logical_id & chassis_slot */
5593 ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 6317 sas_device->is_chassis_slot_valid = 0;
5594 sas_device->enclosure_handle))) 6318 if (encl_pg0_rc == 0) {
5595 sas_device->enclosure_logical_id = 6319 sas_device->enclosure_logical_id =
5596 le64_to_cpu(enclosure_pg0.EnclosureLogicalID); 6320 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
5597 6321
6322 if (le16_to_cpu(enclosure_pg0.Flags) &
6323 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6324 sas_device->is_chassis_slot_valid = 1;
6325 sas_device->chassis_slot =
6326 enclosure_pg0.ChassisSlot;
6327 }
6328 }
6329
5598 /* get device name */ 6330 /* get device name */
5599 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); 6331 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
5600 6332
@@ -5625,23 +6357,15 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
5625 _scsih_turn_off_pfa_led(ioc, sas_device); 6357 _scsih_turn_off_pfa_led(ioc, sas_device);
5626 sas_device->pfa_led_on = 0; 6358 sas_device->pfa_led_on = 0;
5627 } 6359 }
6360
5628 dewtprintk(ioc, pr_info(MPT3SAS_FMT 6361 dewtprintk(ioc, pr_info(MPT3SAS_FMT
5629 "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", 6362 "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
5630 ioc->name, __func__, 6363 ioc->name, __func__,
5631 sas_device->handle, (unsigned long long) 6364 sas_device->handle, (unsigned long long)
5632 sas_device->sas_address)); 6365 sas_device->sas_address));
5633 if (sas_device->enclosure_handle != 0) 6366
5634 dewtprintk(ioc, pr_info(MPT3SAS_FMT 6367 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
5635 "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n", 6368 NULL, NULL));
5636 ioc->name, __func__,
5637 (unsigned long long)sas_device->enclosure_logical_id,
5638 sas_device->slot));
5639 if (sas_device->connector_name[0] != '\0')
5640 dewtprintk(ioc, pr_info(MPT3SAS_FMT
5641 "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
5642 ioc->name, __func__,
5643 sas_device->enclosure_level,
5644 sas_device->connector_name));
5645 6369
5646 if (sas_device->starget && sas_device->starget->hostdata) { 6370 if (sas_device->starget && sas_device->starget->hostdata) {
5647 sas_target_priv_data = sas_device->starget->hostdata; 6371 sas_target_priv_data = sas_device->starget->hostdata;
@@ -5660,34 +6384,16 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
5660 "removing handle(0x%04x), sas_addr(0x%016llx)\n", 6384 "removing handle(0x%04x), sas_addr(0x%016llx)\n",
5661 ioc->name, sas_device->handle, 6385 ioc->name, sas_device->handle,
5662 (unsigned long long) sas_device->sas_address); 6386 (unsigned long long) sas_device->sas_address);
5663 if (sas_device->enclosure_handle != 0) 6387
5664 pr_info(MPT3SAS_FMT 6388 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
5665 "removing : enclosure logical id(0x%016llx), slot(%d)\n",
5666 ioc->name,
5667 (unsigned long long)sas_device->enclosure_logical_id,
5668 sas_device->slot);
5669 if (sas_device->connector_name[0] != '\0')
5670 pr_info(MPT3SAS_FMT
5671 "removing enclosure level(0x%04x), connector name( %s)\n",
5672 ioc->name, sas_device->enclosure_level,
5673 sas_device->connector_name);
5674 6389
5675 dewtprintk(ioc, pr_info(MPT3SAS_FMT 6390 dewtprintk(ioc, pr_info(MPT3SAS_FMT
5676 "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", 6391 "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
5677 ioc->name, __func__, 6392 ioc->name, __func__,
5678 sas_device->handle, (unsigned long long) 6393 sas_device->handle, (unsigned long long)
5679 sas_device->sas_address)); 6394 sas_device->sas_address));
5680 if (sas_device->enclosure_handle != 0) 6395 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
5681 dewtprintk(ioc, pr_info(MPT3SAS_FMT 6396 NULL, NULL));
5682 "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
5683 ioc->name, __func__,
5684 (unsigned long long)sas_device->enclosure_logical_id,
5685 sas_device->slot));
5686 if (sas_device->connector_name[0] != '\0')
5687 dewtprintk(ioc, pr_info(MPT3SAS_FMT
5688 "%s: exit: enclosure level(0x%04x), connector name(%s)\n",
5689 ioc->name, __func__, sas_device->enclosure_level,
5690 sas_device->connector_name));
5691} 6397}
5692 6398
5693/** 6399/**
@@ -6028,7 +6734,705 @@ out:
6028 sas_device_put(sas_device); 6734 sas_device_put(sas_device);
6029 6735
6030 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 6736 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6737}
6738
6739
6740/**
6741 * _scsih_check_pcie_access_status - check access flags
6742 * @ioc: per adapter object
6743 * @wwid: wwid
6744 * @handle: sas device handle
6745 * @access_flags: errors returned during discovery of the device
6746 *
6747 * Return 0 for success, else failure
6748 */
6749static u8
6750_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
6751 u16 handle, u8 access_status)
6752{
6753 u8 rc = 1;
6754 char *desc = NULL;
6755
6756 switch (access_status) {
6757 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
6758 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
6759 rc = 0;
6760 break;
6761 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
6762 desc = "PCIe device capability failed";
6763 break;
6764 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
6765 desc = "PCIe device blocked";
6766 break;
6767 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
6768 desc = "PCIe device mem space access failed";
6769 break;
6770 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
6771 desc = "PCIe device unsupported";
6772 break;
6773 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
6774 desc = "PCIe device MSIx Required";
6775 break;
6776 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
6777 desc = "PCIe device init fail max";
6778 break;
6779 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
6780 desc = "PCIe device status unknown";
6781 break;
6782 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
6783 desc = "nvme ready timeout";
6784 break;
6785 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
6786 desc = "nvme device configuration unsupported";
6787 break;
6788 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
6789 desc = "nvme identify failed";
6790 break;
6791 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
6792 desc = "nvme qconfig failed";
6793 break;
6794 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
6795 desc = "nvme qcreation failed";
6796 break;
6797 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
6798 desc = "nvme eventcfg failed";
6799 break;
6800 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
6801 desc = "nvme get feature stat failed";
6802 break;
6803 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
6804 desc = "nvme idle timeout";
6805 break;
6806 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
6807 desc = "nvme failure status";
6808 break;
6809 default:
6810 pr_err(MPT3SAS_FMT
6811 " NVMe discovery error(0x%02x): wwid(0x%016llx),"
6812 "handle(0x%04x)\n", ioc->name, access_status,
6813 (unsigned long long)wwid, handle);
6814 return rc;
6815 }
6816
6817 if (!rc)
6818 return rc;
6819
6820 pr_info(MPT3SAS_FMT
6821 "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
6822 ioc->name, desc,
6823 (unsigned long long)wwid, handle);
6824 return rc;
6825}
6826
6827/**
6828 * _scsih_pcie_device_remove_from_sml - removing pcie device
6829 * from SML and free up associated memory
6830 * @ioc: per adapter object
6831 * @pcie_device: the pcie_device object
6832 *
6833 * Return nothing.
6834 */
6835static void
6836_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
6837 struct _pcie_device *pcie_device)
6838{
6839 struct MPT3SAS_TARGET *sas_target_priv_data;
6840
6841 dewtprintk(ioc, pr_info(MPT3SAS_FMT
6842 "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
6843 pcie_device->handle, (unsigned long long)
6844 pcie_device->wwid));
6845 if (pcie_device->enclosure_handle != 0)
6846 dewtprintk(ioc, pr_info(MPT3SAS_FMT
6847 "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
6848 ioc->name, __func__,
6849 (unsigned long long)pcie_device->enclosure_logical_id,
6850 pcie_device->slot));
6851 if (pcie_device->connector_name[0] != '\0')
6852 dewtprintk(ioc, pr_info(MPT3SAS_FMT
6853 "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
6854 ioc->name, __func__,
6855 pcie_device->enclosure_level,
6856 pcie_device->connector_name));
6857
6858 if (pcie_device->starget && pcie_device->starget->hostdata) {
6859 sas_target_priv_data = pcie_device->starget->hostdata;
6860 sas_target_priv_data->deleted = 1;
6861 _scsih_ublock_io_device(ioc, pcie_device->wwid);
6862 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
6863 }
6864
6865 pr_info(MPT3SAS_FMT
6866 "removing handle(0x%04x), wwid (0x%016llx)\n",
6867 ioc->name, pcie_device->handle,
6868 (unsigned long long) pcie_device->wwid);
6869 if (pcie_device->enclosure_handle != 0)
6870 pr_info(MPT3SAS_FMT
6871 "removing : enclosure logical id(0x%016llx), slot(%d)\n",
6872 ioc->name,
6873 (unsigned long long)pcie_device->enclosure_logical_id,
6874 pcie_device->slot);
6875 if (pcie_device->connector_name[0] != '\0')
6876 pr_info(MPT3SAS_FMT
6877 "removing: enclosure level(0x%04x), connector name( %s)\n",
6878 ioc->name, pcie_device->enclosure_level,
6879 pcie_device->connector_name);
6880
6881 if (pcie_device->starget)
6882 scsi_remove_target(&pcie_device->starget->dev);
6883 dewtprintk(ioc, pr_info(MPT3SAS_FMT
6884 "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
6885 pcie_device->handle, (unsigned long long)
6886 pcie_device->wwid));
6887 if (pcie_device->enclosure_handle != 0)
6888 dewtprintk(ioc, pr_info(MPT3SAS_FMT
6889 "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
6890 ioc->name, __func__,
6891 (unsigned long long)pcie_device->enclosure_logical_id,
6892 pcie_device->slot));
6893 if (pcie_device->connector_name[0] != '\0')
6894 dewtprintk(ioc, pr_info(MPT3SAS_FMT
6895 "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
6896 ioc->name, __func__, pcie_device->enclosure_level,
6897 pcie_device->connector_name));
6898
6899 kfree(pcie_device->serial_number);
6900}
6901
6902
6903/**
6904 * _scsih_pcie_check_device - checking device responsiveness
6905 * @ioc: per adapter object
6906 * @handle: attached device handle
6907 *
6908 * Returns nothing.
6909 */
6910static void
6911_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6912{
6913 Mpi2ConfigReply_t mpi_reply;
6914 Mpi26PCIeDevicePage0_t pcie_device_pg0;
6915 u32 ioc_status;
6916 struct _pcie_device *pcie_device;
6917 u64 wwid;
6918 unsigned long flags;
6919 struct scsi_target *starget;
6920 struct MPT3SAS_TARGET *sas_target_priv_data;
6921 u32 device_info;
6922
6923 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6924 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
6925 return;
6926
6927 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6928 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6929 return;
6930
6931 /* check if this is end device */
6932 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6933 if (!(_scsih_is_nvme_device(device_info)))
6934 return;
6935
6936 wwid = le64_to_cpu(pcie_device_pg0.WWID);
6937 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
6938 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
6939
6940 if (!pcie_device) {
6941 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6942 return;
6943 }
6944
6945 if (unlikely(pcie_device->handle != handle)) {
6946 starget = pcie_device->starget;
6947 sas_target_priv_data = starget->hostdata;
6948 starget_printk(KERN_INFO, starget,
6949 "handle changed from(0x%04x) to (0x%04x)!!!\n",
6950 pcie_device->handle, handle);
6951 sas_target_priv_data->handle = handle;
6952 pcie_device->handle = handle;
6953
6954 if (le32_to_cpu(pcie_device_pg0.Flags) &
6955 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6956 pcie_device->enclosure_level =
6957 pcie_device_pg0.EnclosureLevel;
6958 memcpy(&pcie_device->connector_name[0],
6959 &pcie_device_pg0.ConnectorName[0], 4);
6960 } else {
6961 pcie_device->enclosure_level = 0;
6962 pcie_device->connector_name[0] = '\0';
6963 }
6964 }
6965
6966 /* check if device is present */
6967 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6968 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6969 pr_info(MPT3SAS_FMT
6970 "device is not present handle(0x%04x), flags!!!\n",
6971 ioc->name, handle);
6972 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6973 pcie_device_put(pcie_device);
6974 return;
6975 }
6976
6977 /* check if there were any issues with discovery */
6978 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6979 pcie_device_pg0.AccessStatus)) {
6980 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6981 pcie_device_put(pcie_device);
6982 return;
6983 }
6984
6985 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6986 pcie_device_put(pcie_device);
6987
6988 _scsih_ublock_io_device(ioc, wwid);
6989
6990 return;
6991}
6992
6993/**
6994 * _scsih_pcie_add_device - creating pcie device object
6995 * @ioc: per adapter object
6996 * @handle: pcie device handle
6997 *
6998 * Creating end device object, stored in ioc->pcie_device_list.
6999 *
7000 * Return 1 means queue the event later, 0 means complete the event
7001 */
7002static int
7003_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7004{
7005 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7006 Mpi26PCIeDevicePage2_t pcie_device_pg2;
7007 Mpi2ConfigReply_t mpi_reply;
7008 Mpi2SasEnclosurePage0_t enclosure_pg0;
7009 struct _pcie_device *pcie_device;
7010 u32 pcie_device_type;
7011 u32 ioc_status;
7012 u64 wwid;
7013
7014 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7015 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
7016 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
7017 ioc->name, __FILE__, __LINE__, __func__);
7018 return 0;
7019 }
7020 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7021 MPI2_IOCSTATUS_MASK;
7022 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7023 pr_err(MPT3SAS_FMT
7024 "failure at %s:%d/%s()!\n",
7025 ioc->name, __FILE__, __LINE__, __func__);
7026 return 0;
7027 }
7028
7029 set_bit(handle, ioc->pend_os_device_add);
7030 wwid = le64_to_cpu(pcie_device_pg0.WWID);
7031
7032 /* check if device is present */
7033 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
7034 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
7035 pr_err(MPT3SAS_FMT
7036 "device is not present handle(0x04%x)!!!\n",
7037 ioc->name, handle);
7038 return 0;
7039 }
7040
7041 /* check if there were any issues with discovery */
7042 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
7043 pcie_device_pg0.AccessStatus))
7044 return 0;
7045
7046 if (!(_scsih_is_nvme_device(le32_to_cpu(pcie_device_pg0.DeviceInfo))))
7047 return 0;
7048
7049 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
7050 if (pcie_device) {
7051 clear_bit(handle, ioc->pend_os_device_add);
7052 pcie_device_put(pcie_device);
7053 return 0;
7054 }
7055
7056 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
7057 if (!pcie_device) {
7058 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
7059 ioc->name, __FILE__, __LINE__, __func__);
7060 return 0;
7061 }
7062
7063 kref_init(&pcie_device->refcount);
7064 pcie_device->id = ioc->pcie_target_id++;
7065 pcie_device->channel = PCIE_CHANNEL;
7066 pcie_device->handle = handle;
7067 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
7068 pcie_device->wwid = wwid;
7069 pcie_device->port_num = pcie_device_pg0.PortNum;
7070 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
7071 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7072 pcie_device_type = pcie_device->device_info &
7073 MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE;
7074
7075 pcie_device->enclosure_handle =
7076 le16_to_cpu(pcie_device_pg0.EnclosureHandle);
7077 if (pcie_device->enclosure_handle != 0)
7078 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
7079
7080 if (le16_to_cpu(pcie_device_pg0.Flags) &
7081 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
7082 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
7083 memcpy(&pcie_device->connector_name[0],
7084 &pcie_device_pg0.ConnectorName[0], 4);
7085 } else {
7086 pcie_device->enclosure_level = 0;
7087 pcie_device->connector_name[0] = '\0';
7088 }
7089
7090 /* get enclosure_logical_id */
7091 if (pcie_device->enclosure_handle &&
7092 !(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
7093 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
7094 pcie_device->enclosure_handle)))
7095 pcie_device->enclosure_logical_id =
7096 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
7097
7098 /* TODO -- Add device name once FW supports it */
7099 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
7100 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
7101 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
7102 ioc->name, __FILE__, __LINE__, __func__);
7103 kfree(pcie_device);
7104 return 0;
7105 }
7106
7107 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7108 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7109 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
7110 ioc->name, __FILE__, __LINE__, __func__);
7111 kfree(pcie_device);
7112 return 0;
7113 }
7114 pcie_device->nvme_mdts =
7115 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
7116
7117 if (ioc->wait_for_discovery_to_complete)
7118 _scsih_pcie_device_init_add(ioc, pcie_device);
7119 else
7120 _scsih_pcie_device_add(ioc, pcie_device);
7121
7122 pcie_device_put(pcie_device);
7123 return 0;
7124}
7125
7126/**
7127 * _scsih_pcie_topology_change_event_debug - debug for topology
7128 * event
7129 * @ioc: per adapter object
7130 * @event_data: event data payload
7131 * Context: user.
7132 */
7133static void
7134_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7135 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
7136{
7137 int i;
7138 u16 handle;
7139 u16 reason_code;
7140 u8 port_number;
7141 char *status_str = NULL;
7142 u8 link_rate, prev_link_rate;
7143
7144 switch (event_data->SwitchStatus) {
7145 case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
7146 status_str = "add";
7147 break;
7148 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
7149 status_str = "remove";
7150 break;
7151 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
7152 case 0:
7153 status_str = "responding";
7154 break;
7155 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
7156 status_str = "remove delay";
7157 break;
7158 default:
7159 status_str = "unknown status";
7160 break;
7161 }
7162 pr_info(MPT3SAS_FMT "pcie topology change: (%s)\n",
7163 ioc->name, status_str);
7164 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
7165 "start_port(%02d), count(%d)\n",
7166 le16_to_cpu(event_data->SwitchDevHandle),
7167 le16_to_cpu(event_data->EnclosureHandle),
7168 event_data->StartPortNum, event_data->NumEntries);
7169 for (i = 0; i < event_data->NumEntries; i++) {
7170 handle =
7171 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7172 if (!handle)
7173 continue;
7174 port_number = event_data->StartPortNum + i;
7175 reason_code = event_data->PortEntry[i].PortStatus;
7176 switch (reason_code) {
7177 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7178 status_str = "target add";
7179 break;
7180 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7181 status_str = "target remove";
7182 break;
7183 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
7184 status_str = "delay target remove";
7185 break;
7186 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7187 status_str = "link rate change";
7188 break;
7189 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
7190 status_str = "target responding";
7191 break;
7192 default:
7193 status_str = "unknown";
7194 break;
7195 }
7196 link_rate = event_data->PortEntry[i].CurrentPortInfo &
7197 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7198 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
7199 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7200 pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
7201 " link rate: new(0x%02x), old(0x%02x)\n", port_number,
7202 handle, status_str, link_rate, prev_link_rate);
7203 }
7204}
7205
7206/**
7207 * _scsih_pcie_topology_change_event - handle PCIe topology
7208 * changes
7209 * @ioc: per adapter object
7210 * @fw_event: The fw_event_work object
7211 * Context: user.
7212 *
7213 */
7214static int
7215_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7216 struct fw_event_work *fw_event)
7217{
7218 int i;
7219 u16 handle;
7220 u16 reason_code;
7221 u8 link_rate, prev_link_rate;
7222 unsigned long flags;
7223 int rc;
7224 int requeue_event;
7225 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
7226 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
7227 struct _pcie_device *pcie_device;
7228
7229 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7230 _scsih_pcie_topology_change_event_debug(ioc, event_data);
7231
7232 if (ioc->shost_recovery || ioc->remove_host ||
7233 ioc->pci_error_recovery)
7234 return 0;
7235
7236 if (fw_event->ignore) {
7237 dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
7238 ioc->name));
7239 return 0;
7240 }
7241
7242 /* handle siblings events */
7243 for (i = 0; i < event_data->NumEntries; i++) {
7244 if (fw_event->ignore) {
7245 dewtprintk(ioc, pr_info(MPT3SAS_FMT
7246 "ignoring switch event\n", ioc->name));
7247 return 0;
7248 }
7249 if (ioc->remove_host || ioc->pci_error_recovery)
7250 return 0;
7251 reason_code = event_data->PortEntry[i].PortStatus;
7252 handle =
7253 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7254 if (!handle)
7255 continue;
7256
7257 link_rate = event_data->PortEntry[i].CurrentPortInfo
7258 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7259 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
7260 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7261
7262 switch (reason_code) {
7263 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7264 if (ioc->shost_recovery)
7265 break;
7266 if (link_rate == prev_link_rate)
7267 break;
7268 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7269 break;
7270
7271 _scsih_pcie_check_device(ioc, handle);
6031 7272
7273 /* This code after this point handles the test case
7274 * where a device has been added, however its returning
7275 * BUSY for sometime. Then before the Device Missing
7276 * Delay expires and the device becomes READY, the
7277 * device is removed and added back.
7278 */
7279 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7280 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
7281 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7282
7283 if (pcie_device) {
7284 pcie_device_put(pcie_device);
7285 break;
7286 }
7287
7288 if (!test_bit(handle, ioc->pend_os_device_add))
7289 break;
7290
7291 dewtprintk(ioc, pr_info(MPT3SAS_FMT
7292 "handle(0x%04x) device not found: convert "
7293 "event to a device add\n", ioc->name, handle));
7294 event_data->PortEntry[i].PortStatus &= 0xF0;
7295 event_data->PortEntry[i].PortStatus |=
7296 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
7297 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7298 if (ioc->shost_recovery)
7299 break;
7300 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7301 break;
7302
7303 rc = _scsih_pcie_add_device(ioc, handle);
7304 if (!rc) {
7305 /* mark entry vacant */
7306 /* TODO This needs to be reviewed and fixed,
7307 * we dont have an entry
7308 * to make an event void like vacant
7309 */
7310 event_data->PortEntry[i].PortStatus |=
7311 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
7312 }
7313 break;
7314 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7315 _scsih_pcie_device_remove_by_handle(ioc, handle);
7316 break;
7317 }
7318 }
7319 return requeue_event;
7320}
7321
7322/**
7323 * _scsih_pcie_device_status_change_event_debug - debug for
7324 * device event
7325 * @event_data: event data payload
7326 * Context: user.
7327 *
7328 * Return nothing.
7329 */
7330static void
7331_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7332 Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
7333{
7334 char *reason_str = NULL;
7335
7336 switch (event_data->ReasonCode) {
7337 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
7338 reason_str = "smart data";
7339 break;
7340 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
7341 reason_str = "unsupported device discovered";
7342 break;
7343 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
7344 reason_str = "internal device reset";
7345 break;
7346 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
7347 reason_str = "internal task abort";
7348 break;
7349 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7350 reason_str = "internal task abort set";
7351 break;
7352 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7353 reason_str = "internal clear task set";
7354 break;
7355 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
7356 reason_str = "internal query task";
7357 break;
7358 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
7359 reason_str = "device init failure";
7360 break;
7361 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7362 reason_str = "internal device reset complete";
7363 break;
7364 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7365 reason_str = "internal task abort complete";
7366 break;
7367 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
7368 reason_str = "internal async notification";
7369 break;
7370 default:
7371 reason_str = "unknown reason";
7372 break;
7373 }
7374
7375 pr_info(MPT3SAS_FMT "PCIE device status change: (%s)\n"
7376 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
7377 ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
7378 (unsigned long long)le64_to_cpu(event_data->WWID),
7379 le16_to_cpu(event_data->TaskTag));
7380 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
7381 pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
7382 event_data->ASC, event_data->ASCQ);
7383 pr_info("\n");
7384}
7385
7386/**
7387 * _scsih_pcie_device_status_change_event - handle device status
7388 * change
7389 * @ioc: per adapter object
7390 * @fw_event: The fw_event_work object
7391 * Context: user.
7392 *
7393 * Return nothing.
7394 */
7395static void
7396_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7397 struct fw_event_work *fw_event)
7398{
7399 struct MPT3SAS_TARGET *target_priv_data;
7400 struct _pcie_device *pcie_device;
7401 u64 wwid;
7402 unsigned long flags;
7403 Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
7404 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
7405 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7406 _scsih_pcie_device_status_change_event_debug(ioc,
7407 event_data);
7408
7409 if (event_data->ReasonCode !=
7410 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7411 event_data->ReasonCode !=
7412 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7413 return;
7414
7415 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7416 wwid = le64_to_cpu(event_data->WWID);
7417 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7418
7419 if (!pcie_device || !pcie_device->starget)
7420 goto out;
7421
7422 target_priv_data = pcie_device->starget->hostdata;
7423 if (!target_priv_data)
7424 goto out;
7425
7426 if (event_data->ReasonCode ==
7427 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
7428 target_priv_data->tm_busy = 1;
7429 else
7430 target_priv_data->tm_busy = 0;
7431out:
7432 if (pcie_device)
7433 pcie_device_put(pcie_device);
7434
7435 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6032} 7436}
6033 7437
6034/** 7438/**
@@ -6282,6 +7686,35 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
6282} 7686}
6283 7687
6284/** 7688/**
7689 * _scsih_pcie_enumeration_event - handle enumeration events
7690 * @ioc: per adapter object
7691 * @fw_event: The fw_event_work object
7692 * Context: user.
7693 *
7694 * Return nothing.
7695 */
7696static void
7697_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
7698 struct fw_event_work *fw_event)
7699{
7700 Mpi26EventDataPCIeEnumeration_t *event_data =
7701 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
7702
7703 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
7704 return;
7705
7706 pr_info(MPT3SAS_FMT "pcie enumeration event: (%s) Flag 0x%02x",
7707 ioc->name,
7708 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
7709 "started" : "completed",
7710 event_data->Flags);
7711 if (event_data->EnumerationStatus)
7712 pr_cont("enumeration_status(0x%08x)",
7713 le32_to_cpu(event_data->EnumerationStatus));
7714 pr_cont("\n");
7715}
7716
7717/**
6285 * _scsih_ir_fastpath - turn on fastpath for IR physdisk 7718 * _scsih_ir_fastpath - turn on fastpath for IR physdisk
6286 * @ioc: per adapter object 7719 * @ioc: per adapter object
6287 * @handle: device handle for physical disk 7720 * @handle: device handle for physical disk
@@ -7085,7 +8518,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
7085{ 8518{
7086 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 8519 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
7087 struct scsi_target *starget; 8520 struct scsi_target *starget;
7088 struct _sas_device *sas_device; 8521 struct _sas_device *sas_device = NULL;
7089 unsigned long flags; 8522 unsigned long flags;
7090 8523
7091 spin_lock_irqsave(&ioc->sas_device_lock, flags); 8524 spin_lock_irqsave(&ioc->sas_device_lock, flags);
@@ -7126,6 +8559,9 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
7126 sas_device->connector_name[0] = '\0'; 8559 sas_device->connector_name[0] = '\0';
7127 } 8560 }
7128 8561
8562 _scsih_get_enclosure_logicalid_chassis_slot(ioc,
8563 sas_device_pg0, sas_device);
8564
7129 if (sas_device->handle == sas_device_pg0->DevHandle) 8565 if (sas_device->handle == sas_device_pg0->DevHandle)
7130 goto out; 8566 goto out;
7131 pr_info("\thandle changed from(0x%04x)!!!\n", 8567 pr_info("\thandle changed from(0x%04x)!!!\n",
@@ -7190,6 +8626,130 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
7190} 8626}
7191 8627
7192/** 8628/**
8629 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
8630 * @ioc: per adapter object
8631 * @pcie_device_pg0: PCIe Device page 0
8632 *
8633 * After host reset, find out whether devices are still responding.
8634 * Used in _scsih_remove_unresponding_devices.
8635 *
8636 * Return nothing.
8637 */
8638static void
8639_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
8640 Mpi26PCIeDevicePage0_t *pcie_device_pg0)
8641{
8642 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8643 struct scsi_target *starget;
8644 struct _pcie_device *pcie_device;
8645 unsigned long flags;
8646
8647 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8648 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
8649 if ((pcie_device->wwid == pcie_device_pg0->WWID) &&
8650 (pcie_device->slot == pcie_device_pg0->Slot)) {
8651 pcie_device->responding = 1;
8652 starget = pcie_device->starget;
8653 if (starget && starget->hostdata) {
8654 sas_target_priv_data = starget->hostdata;
8655 sas_target_priv_data->tm_busy = 0;
8656 sas_target_priv_data->deleted = 0;
8657 } else
8658 sas_target_priv_data = NULL;
8659 if (starget) {
8660 starget_printk(KERN_INFO, starget,
8661 "handle(0x%04x), wwid(0x%016llx) ",
8662 pcie_device->handle,
8663 (unsigned long long)pcie_device->wwid);
8664 if (pcie_device->enclosure_handle != 0)
8665 starget_printk(KERN_INFO, starget,
8666 "enclosure logical id(0x%016llx), "
8667 "slot(%d)\n",
8668 (unsigned long long)
8669 pcie_device->enclosure_logical_id,
8670 pcie_device->slot);
8671 }
8672
8673 if (((le32_to_cpu(pcie_device_pg0->Flags)) &
8674 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
8675 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
8676 pcie_device->enclosure_level =
8677 pcie_device_pg0->EnclosureLevel;
8678 memcpy(&pcie_device->connector_name[0],
8679 &pcie_device_pg0->ConnectorName[0], 4);
8680 } else {
8681 pcie_device->enclosure_level = 0;
8682 pcie_device->connector_name[0] = '\0';
8683 }
8684
8685 if (pcie_device->handle == pcie_device_pg0->DevHandle)
8686 goto out;
8687 pr_info("\thandle changed from(0x%04x)!!!\n",
8688 pcie_device->handle);
8689 pcie_device->handle = pcie_device_pg0->DevHandle;
8690 if (sas_target_priv_data)
8691 sas_target_priv_data->handle =
8692 pcie_device_pg0->DevHandle;
8693 goto out;
8694 }
8695 }
8696
8697 out:
8698 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8699}
8700
8701/**
8702 * _scsih_search_responding_pcie_devices -
8703 * @ioc: per adapter object
8704 *
8705 * After host reset, find out whether devices are still responding.
8706 * If not remove.
8707 *
8708 * Return nothing.
8709 */
8710static void
8711_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8712{
8713 Mpi26PCIeDevicePage0_t pcie_device_pg0;
8714 Mpi2ConfigReply_t mpi_reply;
8715 u16 ioc_status;
8716 u16 handle;
8717 u32 device_info;
8718
8719 pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
8720
8721 if (list_empty(&ioc->pcie_device_list))
8722 goto out;
8723
8724 handle = 0xFFFF;
8725 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8726 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8727 handle))) {
8728 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8729 MPI2_IOCSTATUS_MASK;
8730 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8731 pr_info(MPT3SAS_FMT "\tbreak from %s: "
8732 "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name,
8733 __func__, ioc_status,
8734 le32_to_cpu(mpi_reply.IOCLogInfo));
8735 break;
8736 }
8737 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
8738 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8739 if (!(_scsih_is_nvme_device(device_info)))
8740 continue;
8741 pcie_device_pg0.WWID = le64_to_cpu(pcie_device_pg0.WWID),
8742 pcie_device_pg0.Slot = le16_to_cpu(pcie_device_pg0.Slot);
8743 pcie_device_pg0.Flags = le32_to_cpu(pcie_device_pg0.Flags);
8744 pcie_device_pg0.DevHandle = handle;
8745 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
8746 }
8747out:
8748 pr_info(MPT3SAS_FMT "search for PCIe end-devices: complete\n",
8749 ioc->name);
8750}
8751
8752/**
7193 * _scsih_mark_responding_raid_device - mark a raid_device as responding 8753 * _scsih_mark_responding_raid_device - mark a raid_device as responding
7194 * @ioc: per adapter object 8754 * @ioc: per adapter object
7195 * @wwid: world wide identifier for raid volume 8755 * @wwid: world wide identifier for raid volume
@@ -7322,8 +8882,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
7322/** 8882/**
7323 * _scsih_mark_responding_expander - mark a expander as responding 8883 * _scsih_mark_responding_expander - mark a expander as responding
7324 * @ioc: per adapter object 8884 * @ioc: per adapter object
7325 * @sas_address: sas address 8885 * @expander_pg0:SAS Expander Config Page0
7326 * @handle:
7327 * 8886 *
7328 * After host reset, find out whether devices are still responding. 8887 * After host reset, find out whether devices are still responding.
7329 * Used in _scsih_remove_unresponsive_expanders. 8888 * Used in _scsih_remove_unresponsive_expanders.
@@ -7331,18 +8890,41 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
7331 * Return nothing. 8890 * Return nothing.
7332 */ 8891 */
7333static void 8892static void
7334_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, 8893_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
7335 u16 handle) 8894 Mpi2ExpanderPage0_t *expander_pg0)
7336{ 8895{
7337 struct _sas_node *sas_expander; 8896 struct _sas_node *sas_expander = NULL;
7338 unsigned long flags; 8897 unsigned long flags;
7339 int i; 8898 int i, encl_pg0_rc = -1;
8899 Mpi2ConfigReply_t mpi_reply;
8900 Mpi2SasEnclosurePage0_t enclosure_pg0;
8901 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
8902 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
8903
8904 if (le16_to_cpu(expander_pg0->EnclosureHandle)) {
8905 encl_pg0_rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8906 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8907 le16_to_cpu(expander_pg0->EnclosureHandle));
8908 if (encl_pg0_rc)
8909 pr_info(MPT3SAS_FMT
8910 "Enclosure Pg0 read failed for handle(0x%04x)\n",
8911 ioc->name,
8912 le16_to_cpu(expander_pg0->EnclosureHandle));
8913 }
7340 8914
7341 spin_lock_irqsave(&ioc->sas_node_lock, flags); 8915 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7342 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 8916 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
7343 if (sas_expander->sas_address != sas_address) 8917 if (sas_expander->sas_address != sas_address)
7344 continue; 8918 continue;
7345 sas_expander->responding = 1; 8919 sas_expander->responding = 1;
8920
8921 if (!encl_pg0_rc)
8922 sas_expander->enclosure_logical_id =
8923 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
8924
8925 sas_expander->enclosure_handle =
8926 le16_to_cpu(expander_pg0->EnclosureHandle);
8927
7346 if (sas_expander->handle == handle) 8928 if (sas_expander->handle == handle)
7347 goto out; 8929 goto out;
7348 pr_info("\texpander(0x%016llx): handle changed" \ 8930 pr_info("\texpander(0x%016llx): handle changed" \
@@ -7395,7 +8977,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
7395 pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n", 8977 pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
7396 handle, 8978 handle,
7397 (unsigned long long)sas_address); 8979 (unsigned long long)sas_address);
7398 _scsih_mark_responding_expander(ioc, sas_address, handle); 8980 _scsih_mark_responding_expander(ioc, &expander_pg0);
7399 } 8981 }
7400 8982
7401 out: 8983 out:
@@ -7403,17 +8985,18 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
7403} 8985}
7404 8986
7405/** 8987/**
7406 * _scsih_remove_unresponding_sas_devices - removing unresponding devices 8988 * _scsih_remove_unresponding_devices - removing unresponding devices
7407 * @ioc: per adapter object 8989 * @ioc: per adapter object
7408 * 8990 *
7409 * Return nothing. 8991 * Return nothing.
7410 */ 8992 */
7411static void 8993static void
7412_scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc) 8994_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
7413{ 8995{
7414 struct _sas_device *sas_device, *sas_device_next; 8996 struct _sas_device *sas_device, *sas_device_next;
7415 struct _sas_node *sas_expander, *sas_expander_next; 8997 struct _sas_node *sas_expander, *sas_expander_next;
7416 struct _raid_device *raid_device, *raid_device_next; 8998 struct _raid_device *raid_device, *raid_device_next;
8999 struct _pcie_device *pcie_device, *pcie_device_next;
7417 struct list_head tmp_list; 9000 struct list_head tmp_list;
7418 unsigned long flags; 9001 unsigned long flags;
7419 LIST_HEAD(head); 9002 LIST_HEAD(head);
@@ -7447,6 +9030,26 @@ _scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
7447 sas_device_put(sas_device); 9030 sas_device_put(sas_device);
7448 } 9031 }
7449 9032
9033 pr_info(MPT3SAS_FMT
9034 " Removing unresponding devices: pcie end-devices\n"
9035 , ioc->name);
9036 INIT_LIST_HEAD(&head);
9037 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9038 list_for_each_entry_safe(pcie_device, pcie_device_next,
9039 &ioc->pcie_device_list, list) {
9040 if (!pcie_device->responding)
9041 list_move_tail(&pcie_device->list, &head);
9042 else
9043 pcie_device->responding = 0;
9044 }
9045 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9046
9047 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
9048 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
9049 list_del_init(&pcie_device->list);
9050 pcie_device_put(pcie_device);
9051 }
9052
7450 /* removing unresponding volumes */ 9053 /* removing unresponding volumes */
7451 if (ioc->ir_firmware) { 9054 if (ioc->ir_firmware) {
7452 pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n", 9055 pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n",
@@ -7476,7 +9079,6 @@ _scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
7476 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 9079 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7477 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list, 9080 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
7478 list) { 9081 list) {
7479 list_del(&sas_expander->list);
7480 _scsih_expander_node_remove(ioc, sas_expander); 9082 _scsih_expander_node_remove(ioc, sas_expander);
7481 } 9083 }
7482 9084
@@ -7520,6 +9122,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
7520{ 9122{
7521 Mpi2ExpanderPage0_t expander_pg0; 9123 Mpi2ExpanderPage0_t expander_pg0;
7522 Mpi2SasDevicePage0_t sas_device_pg0; 9124 Mpi2SasDevicePage0_t sas_device_pg0;
9125 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7523 Mpi2RaidVolPage1_t volume_pg1; 9126 Mpi2RaidVolPage1_t volume_pg1;
7524 Mpi2RaidVolPage0_t volume_pg0; 9127 Mpi2RaidVolPage0_t volume_pg0;
7525 Mpi2RaidPhysDiskPage0_t pd_pg0; 9128 Mpi2RaidPhysDiskPage0_t pd_pg0;
@@ -7530,6 +9133,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
7530 u16 handle, parent_handle; 9133 u16 handle, parent_handle;
7531 u64 sas_address; 9134 u64 sas_address;
7532 struct _sas_device *sas_device; 9135 struct _sas_device *sas_device;
9136 struct _pcie_device *pcie_device;
7533 struct _sas_node *expander_device; 9137 struct _sas_node *expander_device;
7534 static struct _raid_device *raid_device; 9138 static struct _raid_device *raid_device;
7535 u8 retry_count; 9139 u8 retry_count;
@@ -7755,7 +9359,44 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
7755 } 9359 }
7756 pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n", 9360 pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n",
7757 ioc->name); 9361 ioc->name);
9362 pr_info(MPT3SAS_FMT "\tscan devices: pcie end devices start\n",
9363 ioc->name);
7758 9364
9365 /* pcie devices */
9366 handle = 0xFFFF;
9367 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9368 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9369 handle))) {
9370 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
9371 & MPI2_IOCSTATUS_MASK;
9372 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9373 pr_info(MPT3SAS_FMT "\tbreak from pcie end device"
9374 " scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9375 ioc->name, ioc_status,
9376 le32_to_cpu(mpi_reply.IOCLogInfo));
9377 break;
9378 }
9379 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9380 if (!(_scsih_is_nvme_device(
9381 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
9382 continue;
9383 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
9384 le64_to_cpu(pcie_device_pg0.WWID));
9385 if (pcie_device) {
9386 pcie_device_put(pcie_device);
9387 continue;
9388 }
9389 retry_count = 0;
9390 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
9391 _scsih_pcie_add_device(ioc, handle);
9392
9393 pr_info(MPT3SAS_FMT "\tAFTER adding pcie end device: "
9394 "handle (0x%04x), wwid(0x%016llx)\n", ioc->name,
9395 handle,
9396 (unsigned long long) le64_to_cpu(pcie_device_pg0.WWID));
9397 }
9398 pr_info(MPT3SAS_FMT "\tpcie devices: pcie end devices complete\n",
9399 ioc->name);
7759 pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name); 9400 pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
7760} 9401}
7761/** 9402/**
@@ -7805,6 +9446,7 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
7805 !ioc->sas_hba.num_phys)) { 9446 !ioc->sas_hba.num_phys)) {
7806 _scsih_prep_device_scan(ioc); 9447 _scsih_prep_device_scan(ioc);
7807 _scsih_search_responding_sas_devices(ioc); 9448 _scsih_search_responding_sas_devices(ioc);
9449 _scsih_search_responding_pcie_devices(ioc);
7808 _scsih_search_responding_raid_devices(ioc); 9450 _scsih_search_responding_raid_devices(ioc);
7809 _scsih_search_responding_expanders(ioc); 9451 _scsih_search_responding_expanders(ioc);
7810 _scsih_error_recovery_delete_devices(ioc); 9452 _scsih_error_recovery_delete_devices(ioc);
@@ -7849,7 +9491,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
7849 goto out; 9491 goto out;
7850 ssleep(1); 9492 ssleep(1);
7851 } 9493 }
7852 _scsih_remove_unresponding_sas_devices(ioc); 9494 _scsih_remove_unresponding_devices(ioc);
7853 _scsih_scan_for_devices_after_reset(ioc); 9495 _scsih_scan_for_devices_after_reset(ioc);
7854 break; 9496 break;
7855 case MPT3SAS_PORT_ENABLE_COMPLETE: 9497 case MPT3SAS_PORT_ENABLE_COMPLETE:
@@ -7892,6 +9534,16 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
7892 case MPI2_EVENT_IR_OPERATION_STATUS: 9534 case MPI2_EVENT_IR_OPERATION_STATUS:
7893 _scsih_sas_ir_operation_status_event(ioc, fw_event); 9535 _scsih_sas_ir_operation_status_event(ioc, fw_event);
7894 break; 9536 break;
9537 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9538 _scsih_pcie_device_status_change_event(ioc, fw_event);
9539 break;
9540 case MPI2_EVENT_PCIE_ENUMERATION:
9541 _scsih_pcie_enumeration_event(ioc, fw_event);
9542 break;
9543 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9544 _scsih_pcie_topology_change_event(ioc, fw_event);
9545 return;
9546 break;
7895 } 9547 }
7896out: 9548out:
7897 fw_event_work_put(fw_event); 9549 fw_event_work_put(fw_event);
@@ -7982,6 +9634,11 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
7982 (Mpi2EventDataSasTopologyChangeList_t *) 9634 (Mpi2EventDataSasTopologyChangeList_t *)
7983 mpi_reply->EventData); 9635 mpi_reply->EventData);
7984 break; 9636 break;
9637 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9638 _scsih_check_pcie_topo_remove_events(ioc,
9639 (Mpi26EventDataPCIeTopologyChangeList_t *)
9640 mpi_reply->EventData);
9641 break;
7985 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 9642 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7986 _scsih_check_ir_config_unhide_events(ioc, 9643 _scsih_check_ir_config_unhide_events(ioc,
7987 (Mpi2EventDataIrConfigChangeList_t *) 9644 (Mpi2EventDataIrConfigChangeList_t *)
@@ -8044,6 +9701,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
8044 case MPI2_EVENT_SAS_DISCOVERY: 9701 case MPI2_EVENT_SAS_DISCOVERY:
8045 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 9702 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
8046 case MPI2_EVENT_IR_PHYSICAL_DISK: 9703 case MPI2_EVENT_IR_PHYSICAL_DISK:
9704 case MPI2_EVENT_PCIE_ENUMERATION:
9705 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
8047 break; 9706 break;
8048 9707
8049 case MPI2_EVENT_TEMP_THRESHOLD: 9708 case MPI2_EVENT_TEMP_THRESHOLD:
@@ -8056,19 +9715,21 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
8056 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; 9715 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
8057 switch (ActiveCableEventData->ReasonCode) { 9716 switch (ActiveCableEventData->ReasonCode) {
8058 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER: 9717 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
8059 pr_notice(MPT3SAS_FMT "Receptacle ID %d: This active cable" 9718 pr_notice(MPT3SAS_FMT
8060 " requires %d mW of power\n", ioc->name, 9719 "Currently an active cable with ReceptacleID %d\n",
8061 ActiveCableEventData->ReceptacleID, 9720 ioc->name, ActiveCableEventData->ReceptacleID);
9721 pr_notice("cannot be powered and devices connected\n");
9722 pr_notice("to this active cable will not be seen\n");
9723 pr_notice("This active cable requires %d mW of power\n",
8062 ActiveCableEventData->ActiveCablePowerRequirement); 9724 ActiveCableEventData->ActiveCablePowerRequirement);
8063 pr_notice(MPT3SAS_FMT "Receptacle ID %d: Devices connected"
8064 " to this active cable will not be seen\n",
8065 ioc->name, ActiveCableEventData->ReceptacleID);
8066 break; 9725 break;
8067 9726
8068 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED: 9727 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
8069 pr_notice(MPT3SAS_FMT "ReceptacleID %d: This cable", 9728 pr_notice(MPT3SAS_FMT
8070 ioc->name, ActiveCableEventData->ReceptacleID); 9729 "Currently a cable with ReceptacleID %d\n",
8071 pr_notice(" is not running at an optimal speed(12 Gb/s)\n"); 9730 ioc->name, ActiveCableEventData->ReceptacleID);
9731 pr_notice(
9732 "is not running at optimal speed(12 Gb/s rate)\n");
8072 break; 9733 break;
8073 } 9734 }
8074 9735
@@ -8100,7 +9761,6 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
8100 * _scsih_expander_node_remove - removing expander device from list. 9761 * _scsih_expander_node_remove - removing expander device from list.
8101 * @ioc: per adapter object 9762 * @ioc: per adapter object
8102 * @sas_expander: the sas_device object 9763 * @sas_expander: the sas_device object
8103 * Context: Calling function should acquire ioc->sas_node_lock.
8104 * 9764 *
8105 * Removing object and freeing associated memory from the 9765 * Removing object and freeing associated memory from the
8106 * ioc->sas_expander_list. 9766 * ioc->sas_expander_list.
@@ -8112,6 +9772,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
8112 struct _sas_node *sas_expander) 9772 struct _sas_node *sas_expander)
8113{ 9773{
8114 struct _sas_port *mpt3sas_port, *next; 9774 struct _sas_port *mpt3sas_port, *next;
9775 unsigned long flags;
8115 9776
8116 /* remove sibling ports attached to this expander */ 9777 /* remove sibling ports attached to this expander */
8117 list_for_each_entry_safe(mpt3sas_port, next, 9778 list_for_each_entry_safe(mpt3sas_port, next,
@@ -8139,6 +9800,10 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
8139 sas_expander->handle, (unsigned long long) 9800 sas_expander->handle, (unsigned long long)
8140 sas_expander->sas_address); 9801 sas_expander->sas_address);
8141 9802
9803 spin_lock_irqsave(&ioc->sas_node_lock, flags);
9804 list_del(&sas_expander->list);
9805 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9806
8142 kfree(sas_expander->phy); 9807 kfree(sas_expander->phy);
8143 kfree(sas_expander); 9808 kfree(sas_expander);
8144} 9809}
@@ -8231,6 +9896,7 @@ static void scsih_remove(struct pci_dev *pdev)
8231 struct _sas_port *mpt3sas_port, *next_port; 9896 struct _sas_port *mpt3sas_port, *next_port;
8232 struct _raid_device *raid_device, *next; 9897 struct _raid_device *raid_device, *next;
8233 struct MPT3SAS_TARGET *sas_target_priv_data; 9898 struct MPT3SAS_TARGET *sas_target_priv_data;
9899 struct _pcie_device *pcie_device, *pcienext;
8234 struct workqueue_struct *wq; 9900 struct workqueue_struct *wq;
8235 unsigned long flags; 9901 unsigned long flags;
8236 9902
@@ -8259,6 +9925,12 @@ static void scsih_remove(struct pci_dev *pdev)
8259 (unsigned long long) raid_device->wwid); 9925 (unsigned long long) raid_device->wwid);
8260 _scsih_raid_device_remove(ioc, raid_device); 9926 _scsih_raid_device_remove(ioc, raid_device);
8261 } 9927 }
9928 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
9929 list) {
9930 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
9931 list_del_init(&pcie_device->list);
9932 pcie_device_put(pcie_device);
9933 }
8262 9934
8263 /* free ports attached to the sas_host */ 9935 /* free ports attached to the sas_host */
8264 list_for_each_entry_safe(mpt3sas_port, next_port, 9936 list_for_each_entry_safe(mpt3sas_port, next_port,
@@ -8330,42 +10002,52 @@ scsih_shutdown(struct pci_dev *pdev)
8330static void 10002static void
8331_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) 10003_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
8332{ 10004{
8333 u8 is_raid; 10005 u32 channel;
8334 void *device; 10006 void *device;
8335 struct _sas_device *sas_device; 10007 struct _sas_device *sas_device;
8336 struct _raid_device *raid_device; 10008 struct _raid_device *raid_device;
10009 struct _pcie_device *pcie_device;
8337 u16 handle; 10010 u16 handle;
8338 u64 sas_address_parent; 10011 u64 sas_address_parent;
8339 u64 sas_address; 10012 u64 sas_address;
8340 unsigned long flags; 10013 unsigned long flags;
8341 int rc; 10014 int rc;
10015 int tid;
8342 10016
8343 /* no Bios, return immediately */ 10017 /* no Bios, return immediately */
8344 if (!ioc->bios_pg3.BiosVersion) 10018 if (!ioc->bios_pg3.BiosVersion)
8345 return; 10019 return;
8346 10020
8347 device = NULL; 10021 device = NULL;
8348 is_raid = 0;
8349 if (ioc->req_boot_device.device) { 10022 if (ioc->req_boot_device.device) {
8350 device = ioc->req_boot_device.device; 10023 device = ioc->req_boot_device.device;
8351 is_raid = ioc->req_boot_device.is_raid; 10024 channel = ioc->req_boot_device.channel;
8352 } else if (ioc->req_alt_boot_device.device) { 10025 } else if (ioc->req_alt_boot_device.device) {
8353 device = ioc->req_alt_boot_device.device; 10026 device = ioc->req_alt_boot_device.device;
8354 is_raid = ioc->req_alt_boot_device.is_raid; 10027 channel = ioc->req_alt_boot_device.channel;
8355 } else if (ioc->current_boot_device.device) { 10028 } else if (ioc->current_boot_device.device) {
8356 device = ioc->current_boot_device.device; 10029 device = ioc->current_boot_device.device;
8357 is_raid = ioc->current_boot_device.is_raid; 10030 channel = ioc->current_boot_device.channel;
8358 } 10031 }
8359 10032
8360 if (!device) 10033 if (!device)
8361 return; 10034 return;
8362 10035
8363 if (is_raid) { 10036 if (channel == RAID_CHANNEL) {
8364 raid_device = device; 10037 raid_device = device;
8365 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 10038 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8366 raid_device->id, 0); 10039 raid_device->id, 0);
8367 if (rc) 10040 if (rc)
8368 _scsih_raid_device_remove(ioc, raid_device); 10041 _scsih_raid_device_remove(ioc, raid_device);
10042 } else if (channel == PCIE_CHANNEL) {
10043 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10044 pcie_device = device;
10045 tid = pcie_device->id;
10046 list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
10047 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10048 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
10049 if (rc)
10050 _scsih_pcie_device_remove(ioc, pcie_device);
8369 } else { 10051 } else {
8370 spin_lock_irqsave(&ioc->sas_device_lock, flags); 10052 spin_lock_irqsave(&ioc->sas_device_lock, flags);
8371 sas_device = device; 10053 sas_device = device;
@@ -8498,6 +10180,101 @@ _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
8498} 10180}
8499 10181
8500/** 10182/**
10183 * get_next_pcie_device - Get the next pcie device
10184 * @ioc: per adapter object
10185 *
10186 * Get the next pcie device from pcie_device_init_list list.
10187 *
10188 * Returns pcie device structure if pcie_device_init_list list is not empty
10189 * otherwise returns NULL
10190 */
10191static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
10192{
10193 struct _pcie_device *pcie_device = NULL;
10194 unsigned long flags;
10195
10196 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10197 if (!list_empty(&ioc->pcie_device_init_list)) {
10198 pcie_device = list_first_entry(&ioc->pcie_device_init_list,
10199 struct _pcie_device, list);
10200 pcie_device_get(pcie_device);
10201 }
10202 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10203
10204 return pcie_device;
10205}
10206
10207/**
10208 * pcie_device_make_active - Add pcie device to pcie_device_list list
10209 * @ioc: per adapter object
10210 * @pcie_device: pcie device object
10211 *
10212 * Add the pcie device which has registered with SCSI Transport Later to
10213 * pcie_device_list list
10214 */
10215static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
10216 struct _pcie_device *pcie_device)
10217{
10218 unsigned long flags;
10219
10220 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10221
10222 if (!list_empty(&pcie_device->list)) {
10223 list_del_init(&pcie_device->list);
10224 pcie_device_put(pcie_device);
10225 }
10226 pcie_device_get(pcie_device);
10227 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
10228
10229 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10230}
10231
10232/**
10233 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
10234 * @ioc: per adapter object
10235 *
10236 * Called during initial loading of the driver.
10237 */
10238static void
10239_scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
10240{
10241 struct _pcie_device *pcie_device;
10242 int rc;
10243
10244 /* PCIe Device List */
10245 while ((pcie_device = get_next_pcie_device(ioc))) {
10246 if (pcie_device->starget) {
10247 pcie_device_put(pcie_device);
10248 continue;
10249 }
10250 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
10251 pcie_device->id, 0);
10252 if (rc) {
10253 _scsih_pcie_device_remove(ioc, pcie_device);
10254 pcie_device_put(pcie_device);
10255 continue;
10256 } else if (!pcie_device->starget) {
10257 /*
10258 * When async scanning is enabled, its not possible to
10259 * remove devices while scanning is turned on due to an
10260 * oops in scsi_sysfs_add_sdev()->add_device()->
10261 * sysfs_addrm_start()
10262 */
10263 if (!ioc->is_driver_loading) {
10264 /* TODO-- Need to find out whether this condition will
10265 * occur or not
10266 */
10267 _scsih_pcie_device_remove(ioc, pcie_device);
10268 pcie_device_put(pcie_device);
10269 continue;
10270 }
10271 }
10272 pcie_device_make_active(ioc, pcie_device);
10273 pcie_device_put(pcie_device);
10274 }
10275}
10276
10277/**
8501 * _scsih_probe_devices - probing for devices 10278 * _scsih_probe_devices - probing for devices
8502 * @ioc: per adapter object 10279 * @ioc: per adapter object
8503 * 10280 *
@@ -8525,8 +10302,10 @@ _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
8525 _scsih_probe_sas(ioc); 10302 _scsih_probe_sas(ioc);
8526 _scsih_probe_raid(ioc); 10303 _scsih_probe_raid(ioc);
8527 } 10304 }
8528 } else 10305 } else {
8529 _scsih_probe_sas(ioc); 10306 _scsih_probe_sas(ioc);
10307 _scsih_probe_pcie(ioc);
10308 }
8530} 10309}
8531 10310
8532/** 10311/**
@@ -8740,6 +10519,7 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
8740 case MPI26_MFGPAGE_DEVID_SAS3516: 10519 case MPI26_MFGPAGE_DEVID_SAS3516:
8741 case MPI26_MFGPAGE_DEVID_SAS3516_1: 10520 case MPI26_MFGPAGE_DEVID_SAS3516_1:
8742 case MPI26_MFGPAGE_DEVID_SAS3416: 10521 case MPI26_MFGPAGE_DEVID_SAS3416:
10522 case MPI26_MFGPAGE_DEVID_SAS3616:
8743 return MPI26_VERSION; 10523 return MPI26_VERSION;
8744 } 10524 }
8745 return 0; 10525 return 0;
@@ -8817,6 +10597,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8817 case MPI26_MFGPAGE_DEVID_SAS3516: 10597 case MPI26_MFGPAGE_DEVID_SAS3516:
8818 case MPI26_MFGPAGE_DEVID_SAS3516_1: 10598 case MPI26_MFGPAGE_DEVID_SAS3516_1:
8819 case MPI26_MFGPAGE_DEVID_SAS3416: 10599 case MPI26_MFGPAGE_DEVID_SAS3416:
10600 case MPI26_MFGPAGE_DEVID_SAS3616:
8820 ioc->is_gen35_ioc = 1; 10601 ioc->is_gen35_ioc = 1;
8821 break; 10602 break;
8822 default: 10603 default:
@@ -8867,11 +10648,14 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8867 spin_lock_init(&ioc->sas_node_lock); 10648 spin_lock_init(&ioc->sas_node_lock);
8868 spin_lock_init(&ioc->fw_event_lock); 10649 spin_lock_init(&ioc->fw_event_lock);
8869 spin_lock_init(&ioc->raid_device_lock); 10650 spin_lock_init(&ioc->raid_device_lock);
10651 spin_lock_init(&ioc->pcie_device_lock);
8870 spin_lock_init(&ioc->diag_trigger_lock); 10652 spin_lock_init(&ioc->diag_trigger_lock);
8871 10653
8872 INIT_LIST_HEAD(&ioc->sas_device_list); 10654 INIT_LIST_HEAD(&ioc->sas_device_list);
8873 INIT_LIST_HEAD(&ioc->sas_device_init_list); 10655 INIT_LIST_HEAD(&ioc->sas_device_init_list);
8874 INIT_LIST_HEAD(&ioc->sas_expander_list); 10656 INIT_LIST_HEAD(&ioc->sas_expander_list);
10657 INIT_LIST_HEAD(&ioc->pcie_device_list);
10658 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
8875 INIT_LIST_HEAD(&ioc->fw_event_list); 10659 INIT_LIST_HEAD(&ioc->fw_event_list);
8876 INIT_LIST_HEAD(&ioc->raid_device_list); 10660 INIT_LIST_HEAD(&ioc->raid_device_list);
8877 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); 10661 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
@@ -9273,6 +11057,9 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
9273 PCI_ANY_ID, PCI_ANY_ID }, 11057 PCI_ANY_ID, PCI_ANY_ID },
9274 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416, 11058 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
9275 PCI_ANY_ID, PCI_ANY_ID }, 11059 PCI_ANY_ID, PCI_ANY_ID },
11060 /* Mercator ~ 3616*/
11061 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
11062 PCI_ANY_ID, PCI_ANY_ID },
9276 {0} /* Terminating entry */ 11063 {0} /* Terminating entry */
9277}; 11064};
9278MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); 11065MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index 540bd5005149..ced7d9f6274c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -299,7 +299,7 @@ mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
299 */ 299 */
300void 300void
301mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 301mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
302 struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request, 302 struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
303 u16 smid) 303 u16 smid)
304{ 304{
305 sector_t v_lba, p_lba, stripe_off, column, io_size; 305 sector_t v_lba, p_lba, stripe_off, column, io_size;
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 107e191bf023..8620ac5d6e41 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -604,7 +604,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
604 * check bus line 604 * check bus line
605 */ 605 */
606 phase = nsp32_read1(base, SCSI_BUS_MONITOR); 606 phase = nsp32_read1(base, SCSI_BUS_MONITOR);
607 if(((phase & BUSMON_BSY) == 1) || (phase & BUSMON_SEL) == 1) { 607 if ((phase & BUSMON_BSY) || (phase & BUSMON_SEL)) {
608 nsp32_msg(KERN_WARNING, "bus busy"); 608 nsp32_msg(KERN_WARNING, "bus busy");
609 SCpnt->result = DID_BUS_BUSY << 16; 609 SCpnt->result = DID_BUS_BUSY << 16;
610 status = 1; 610 status = 1;
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index be8269c8d127..596f3ff965f5 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -98,6 +98,58 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
98 } 98 }
99} 99}
100static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL); 100static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
101
102/**
103 * pm8001_ctl_ila_version_show - ila version
104 * @cdev: pointer to embedded class device
105 * @buf: the buffer returned
106 *
107 * A sysfs 'read-only' shost attribute.
108 */
109static ssize_t pm8001_ctl_ila_version_show(struct device *cdev,
110 struct device_attribute *attr, char *buf)
111{
112 struct Scsi_Host *shost = class_to_shost(cdev);
113 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
114 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
115
116 if (pm8001_ha->chip_id != chip_8001) {
117 return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
118 (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 24),
119 (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 16),
120 (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 8),
121 (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version));
122 }
123 return 0;
124}
125static DEVICE_ATTR(ila_version, 0444, pm8001_ctl_ila_version_show, NULL);
126
127/**
128 * pm8001_ctl_inactive_fw_version_show - Inacative firmware version number
129 * @cdev: pointer to embedded class device
130 * @buf: the buffer returned
131 *
132 * A sysfs 'read-only' shost attribute.
133 */
134static ssize_t pm8001_ctl_inactive_fw_version_show(struct device *cdev,
135 struct device_attribute *attr, char *buf)
136{
137 struct Scsi_Host *shost = class_to_shost(cdev);
138 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
139 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
140
141 if (pm8001_ha->chip_id != chip_8001) {
142 return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
143 (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 24),
144 (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 16),
145 (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 8),
146 (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version));
147 }
148 return 0;
149}
150static
151DEVICE_ATTR(inc_fw_ver, 0444, pm8001_ctl_inactive_fw_version_show, NULL);
152
101/** 153/**
102 * pm8001_ctl_max_out_io_show - max outstanding io supported 154 * pm8001_ctl_max_out_io_show - max outstanding io supported
103 * @cdev: pointer to embedded class device 155 * @cdev: pointer to embedded class device
@@ -748,6 +800,8 @@ struct device_attribute *pm8001_host_attrs[] = {
748 &dev_attr_bios_version, 800 &dev_attr_bios_version,
749 &dev_attr_ib_log, 801 &dev_attr_ib_log,
750 &dev_attr_ob_log, 802 &dev_attr_ob_log,
803 &dev_attr_ila_version,
804 &dev_attr_inc_fw_ver,
751 NULL, 805 NULL,
752}; 806};
753 807
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 10546faac58c..db88a8e7ee0e 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3198,19 +3198,28 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3198 3198
3199int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) 3199int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
3200{ 3200{
3201 u32 tag;
3201 struct local_phy_ctl_resp *pPayload = 3202 struct local_phy_ctl_resp *pPayload =
3202 (struct local_phy_ctl_resp *)(piomb + 4); 3203 (struct local_phy_ctl_resp *)(piomb + 4);
3203 u32 status = le32_to_cpu(pPayload->status); 3204 u32 status = le32_to_cpu(pPayload->status);
3204 u32 phy_id = le32_to_cpu(pPayload->phyop_phyid) & ID_BITS; 3205 u32 phy_id = le32_to_cpu(pPayload->phyop_phyid) & ID_BITS;
3205 u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS; 3206 u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS;
3207 tag = le32_to_cpu(pPayload->tag);
3206 if (status != 0) { 3208 if (status != 0) {
3207 PM8001_MSG_DBG(pm8001_ha, 3209 PM8001_MSG_DBG(pm8001_ha,
3208 pm8001_printk("%x phy execute %x phy op failed!\n", 3210 pm8001_printk("%x phy execute %x phy op failed!\n",
3209 phy_id, phy_op)); 3211 phy_id, phy_op));
3210 } else 3212 } else {
3211 PM8001_MSG_DBG(pm8001_ha, 3213 PM8001_MSG_DBG(pm8001_ha,
3212 pm8001_printk("%x phy execute %x phy op success!\n", 3214 pm8001_printk("%x phy execute %x phy op success!\n",
3213 phy_id, phy_op)); 3215 phy_id, phy_op));
3216 pm8001_ha->phy[phy_id].reset_success = true;
3217 }
3218 if (pm8001_ha->phy[phy_id].enable_completion) {
3219 complete(pm8001_ha->phy[phy_id].enable_completion);
3220 pm8001_ha->phy[phy_id].enable_completion = NULL;
3221 }
3222 pm8001_tag_free(pm8001_ha, tag);
3214 return 0; 3223 return 0;
3215} 3224}
3216 3225
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 0e013f76b582..7a697ca68501 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -132,7 +132,7 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
132 sas_phy->oob_mode = OOB_NOT_CONNECTED; 132 sas_phy->oob_mode = OOB_NOT_CONNECTED;
133 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 133 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
134 sas_phy->id = phy_id; 134 sas_phy->id = phy_id;
135 sas_phy->sas_addr = &pm8001_ha->sas_addr[0]; 135 sas_phy->sas_addr = (u8 *)&phy->dev_sas_addr;
136 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 136 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
137 sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata; 137 sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata;
138 sas_phy->lldd_phy = phy; 138 sas_phy->lldd_phy = phy;
@@ -591,10 +591,12 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
591 for (i = 0; i < chip_info->n_phy; i++) { 591 for (i = 0; i < chip_info->n_phy; i++) {
592 sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy; 592 sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy;
593 sha->sas_port[i] = &pm8001_ha->port[i].sas_port; 593 sha->sas_port[i] = &pm8001_ha->port[i].sas_port;
594 sha->sas_phy[i]->sas_addr =
595 (u8 *)&pm8001_ha->phy[i].dev_sas_addr;
594 } 596 }
595 sha->sas_ha_name = DRV_NAME; 597 sha->sas_ha_name = DRV_NAME;
596 sha->dev = pm8001_ha->dev; 598 sha->dev = pm8001_ha->dev;
597 599 sha->strict_wide_ports = 1;
598 sha->lldd_module = THIS_MODULE; 600 sha->lldd_module = THIS_MODULE;
599 sha->sas_addr = &pm8001_ha->sas_addr[0]; 601 sha->sas_addr = &pm8001_ha->sas_addr[0];
600 sha->num_phys = chip_info->n_phy; 602 sha->num_phys = chip_info->n_phy;
@@ -611,6 +613,7 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
611static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) 613static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
612{ 614{
613 u8 i, j; 615 u8 i, j;
616 u8 sas_add[8];
614#ifdef PM8001_READ_VPD 617#ifdef PM8001_READ_VPD
615 /* For new SPC controllers WWN is stored in flash vpd 618 /* For new SPC controllers WWN is stored in flash vpd
616 * For SPC/SPCve controllers WWN is stored in EEPROM 619 * For SPC/SPCve controllers WWN is stored in EEPROM
@@ -672,10 +675,12 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
672 pm8001_ha->sas_addr[j] = 675 pm8001_ha->sas_addr[j] =
673 payload.func_specific[0x804 + i]; 676 payload.func_specific[0x804 + i];
674 } 677 }
675 678 memcpy(sas_add, pm8001_ha->sas_addr, SAS_ADDR_SIZE);
676 for (i = 0; i < pm8001_ha->chip->n_phy; i++) { 679 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
680 if (i && ((i % 4) == 0))
681 sas_add[7] = sas_add[7] + 4;
677 memcpy(&pm8001_ha->phy[i].dev_sas_addr, 682 memcpy(&pm8001_ha->phy[i].dev_sas_addr,
678 pm8001_ha->sas_addr, SAS_ADDR_SIZE); 683 sas_add, SAS_ADDR_SIZE);
679 PM8001_INIT_DBG(pm8001_ha, 684 PM8001_INIT_DBG(pm8001_ha,
680 pm8001_printk("phy %d sas_addr = %016llx\n", i, 685 pm8001_printk("phy %d sas_addr = %016llx\n", i,
681 pm8001_ha->phy[i].dev_sas_addr)); 686 pm8001_ha->phy[i].dev_sas_addr));
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 7b2f92ae9866..0e294e80c169 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -1158,40 +1158,42 @@ int pm8001_query_task(struct sas_task *task)
1158int pm8001_abort_task(struct sas_task *task) 1158int pm8001_abort_task(struct sas_task *task)
1159{ 1159{
1160 unsigned long flags; 1160 unsigned long flags;
1161 u32 tag = 0xdeadbeef; 1161 u32 tag;
1162 u32 device_id; 1162 u32 device_id;
1163 struct domain_device *dev ; 1163 struct domain_device *dev ;
1164 struct pm8001_hba_info *pm8001_ha = NULL; 1164 struct pm8001_hba_info *pm8001_ha;
1165 struct pm8001_ccb_info *ccb;
1166 struct scsi_lun lun; 1165 struct scsi_lun lun;
1167 struct pm8001_device *pm8001_dev; 1166 struct pm8001_device *pm8001_dev;
1168 struct pm8001_tmf_task tmf_task; 1167 struct pm8001_tmf_task tmf_task;
1169 int rc = TMF_RESP_FUNC_FAILED; 1168 int rc = TMF_RESP_FUNC_FAILED, ret;
1169 u32 phy_id;
1170 struct sas_task_slow slow_task;
1170 if (unlikely(!task || !task->lldd_task || !task->dev)) 1171 if (unlikely(!task || !task->lldd_task || !task->dev))
1171 return rc; 1172 return TMF_RESP_FUNC_FAILED;
1173 dev = task->dev;
1174 pm8001_dev = dev->lldd_dev;
1175 pm8001_ha = pm8001_find_ha_by_dev(dev);
1176 device_id = pm8001_dev->device_id;
1177 phy_id = pm8001_dev->attached_phy;
1178 rc = pm8001_find_tag(task, &tag);
1179 if (rc == 0) {
1180 pm8001_printk("no tag for task:%p\n", task);
1181 return TMF_RESP_FUNC_FAILED;
1182 }
1172 spin_lock_irqsave(&task->task_state_lock, flags); 1183 spin_lock_irqsave(&task->task_state_lock, flags);
1173 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1184 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1174 spin_unlock_irqrestore(&task->task_state_lock, flags); 1185 spin_unlock_irqrestore(&task->task_state_lock, flags);
1175 rc = TMF_RESP_FUNC_COMPLETE; 1186 return TMF_RESP_FUNC_COMPLETE;
1176 goto out; 1187 }
1188 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1189 if (task->slow_task == NULL) {
1190 init_completion(&slow_task.completion);
1191 task->slow_task = &slow_task;
1177 } 1192 }
1178 spin_unlock_irqrestore(&task->task_state_lock, flags); 1193 spin_unlock_irqrestore(&task->task_state_lock, flags);
1179 if (task->task_proto & SAS_PROTOCOL_SSP) { 1194 if (task->task_proto & SAS_PROTOCOL_SSP) {
1180 struct scsi_cmnd *cmnd = task->uldd_task; 1195 struct scsi_cmnd *cmnd = task->uldd_task;
1181 dev = task->dev;
1182 ccb = task->lldd_task;
1183 pm8001_dev = dev->lldd_dev;
1184 pm8001_ha = pm8001_find_ha_by_dev(dev);
1185 int_to_scsilun(cmnd->device->lun, &lun); 1196 int_to_scsilun(cmnd->device->lun, &lun);
1186 rc = pm8001_find_tag(task, &tag);
1187 if (rc == 0) {
1188 printk(KERN_INFO "No such tag in %s\n", __func__);
1189 rc = TMF_RESP_FUNC_FAILED;
1190 return rc;
1191 }
1192 device_id = pm8001_dev->device_id;
1193 PM8001_EH_DBG(pm8001_ha,
1194 pm8001_printk("abort io to deviceid= %d\n", device_id));
1195 tmf_task.tmf = TMF_ABORT_TASK; 1197 tmf_task.tmf = TMF_ABORT_TASK;
1196 tmf_task.tag_of_task_to_be_managed = tag; 1198 tmf_task.tag_of_task_to_be_managed = tag;
1197 rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1199 rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
@@ -1199,33 +1201,77 @@ int pm8001_abort_task(struct sas_task *task)
1199 pm8001_dev->sas_device, 0, tag); 1201 pm8001_dev->sas_device, 0, tag);
1200 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1202 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1201 task->task_proto & SAS_PROTOCOL_STP) { 1203 task->task_proto & SAS_PROTOCOL_STP) {
1202 dev = task->dev; 1204 if (pm8001_ha->chip_id == chip_8006) {
1203 pm8001_dev = dev->lldd_dev; 1205 DECLARE_COMPLETION_ONSTACK(completion_reset);
1204 pm8001_ha = pm8001_find_ha_by_dev(dev); 1206 DECLARE_COMPLETION_ONSTACK(completion);
1205 rc = pm8001_find_tag(task, &tag); 1207 struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
1206 if (rc == 0) { 1208
1207 printk(KERN_INFO "No such tag in %s\n", __func__); 1209 /* 1. Set Device state as Recovery */
1208 rc = TMF_RESP_FUNC_FAILED; 1210 pm8001_dev->setds_completion = &completion;
1209 return rc; 1211 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1212 pm8001_dev, 0x03);
1213 wait_for_completion(&completion);
1214
1215 /* 2. Send Phy Control Hard Reset */
1216 reinit_completion(&completion);
1217 phy->reset_success = false;
1218 phy->enable_completion = &completion;
1219 phy->reset_completion = &completion_reset;
1220 ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
1221 PHY_HARD_RESET);
1222 if (ret)
1223 goto out;
1224 PM8001_MSG_DBG(pm8001_ha,
1225 pm8001_printk("Waiting for local phy ctl\n"));
1226 wait_for_completion(&completion);
1227 if (!phy->reset_success)
1228 goto out;
1229
1230 /* 3. Wait for Port Reset complete / Port reset TMO */
1231 PM8001_MSG_DBG(pm8001_ha,
1232 pm8001_printk("Waiting for Port reset\n"));
1233 wait_for_completion(&completion_reset);
1234 if (phy->port_reset_status)
1235 goto out;
1236
1237 /*
1238 * 4. SATA Abort ALL
1239 * we wait for the task to be aborted so that the task
1240 * is removed from the ccb. on success the caller is
1241 * going to free the task.
1242 */
1243 ret = pm8001_exec_internal_task_abort(pm8001_ha,
1244 pm8001_dev, pm8001_dev->sas_device, 1, tag);
1245 if (ret)
1246 goto out;
1247 ret = wait_for_completion_timeout(
1248 &task->slow_task->completion,
1249 PM8001_TASK_TIMEOUT * HZ);
1250 if (!ret)
1251 goto out;
1252
1253 /* 5. Set Device State as Operational */
1254 reinit_completion(&completion);
1255 pm8001_dev->setds_completion = &completion;
1256 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1257 pm8001_dev, 0x01);
1258 wait_for_completion(&completion);
1259 } else {
1260 rc = pm8001_exec_internal_task_abort(pm8001_ha,
1261 pm8001_dev, pm8001_dev->sas_device, 0, tag);
1210 } 1262 }
1211 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, 1263 rc = TMF_RESP_FUNC_COMPLETE;
1212 pm8001_dev->sas_device, 0, tag);
1213 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 1264 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
1214 /* SMP */ 1265 /* SMP */
1215 dev = task->dev;
1216 pm8001_dev = dev->lldd_dev;
1217 pm8001_ha = pm8001_find_ha_by_dev(dev);
1218 rc = pm8001_find_tag(task, &tag);
1219 if (rc == 0) {
1220 printk(KERN_INFO "No such tag in %s\n", __func__);
1221 rc = TMF_RESP_FUNC_FAILED;
1222 return rc;
1223 }
1224 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, 1266 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1225 pm8001_dev->sas_device, 0, tag); 1267 pm8001_dev->sas_device, 0, tag);
1226 1268
1227 } 1269 }
1228out: 1270out:
1271 spin_lock_irqsave(&task->task_state_lock, flags);
1272 if (task->slow_task == &slow_task)
1273 task->slow_task = NULL;
1274 spin_unlock_irqrestore(&task->task_state_lock, flags);
1229 if (rc != TMF_RESP_FUNC_COMPLETE) 1275 if (rc != TMF_RESP_FUNC_COMPLETE)
1230 pm8001_printk("rc= %d\n", rc); 1276 pm8001_printk("rc= %d\n", rc);
1231 return rc; 1277 return rc;
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index e81a8fa7ef1a..80b4dd6df0c2 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -263,8 +263,15 @@ struct pm8001_phy {
263 u8 phy_state; 263 u8 phy_state;
264 enum sas_linkrate minimum_linkrate; 264 enum sas_linkrate minimum_linkrate;
265 enum sas_linkrate maximum_linkrate; 265 enum sas_linkrate maximum_linkrate;
266 struct completion *reset_completion;
267 bool port_reset_status;
268 bool reset_success;
266}; 269};
267 270
271/* port reset status */
272#define PORT_RESET_SUCCESS 0x00
273#define PORT_RESET_TMO 0x01
274
268struct pm8001_device { 275struct pm8001_device {
269 enum sas_device_type dev_type; 276 enum sas_device_type dev_type;
270 struct domain_device *sas_device; 277 struct domain_device *sas_device;
@@ -404,6 +411,8 @@ union main_cfg_table {
404 u32 port_recovery_timer; 411 u32 port_recovery_timer;
405 u32 interrupt_reassertion_delay; 412 u32 interrupt_reassertion_delay;
406 u32 fatal_n_non_fatal_dump; /* 0x28 */ 413 u32 fatal_n_non_fatal_dump; /* 0x28 */
414 u32 ila_version;
415 u32 inc_fw_version;
407 } pm80xx_tbl; 416 } pm80xx_tbl;
408}; 417};
409 418
@@ -531,6 +540,7 @@ struct pm8001_hba_info {
531 u32 smp_exp_mode; 540 u32 smp_exp_mode;
532 const struct firmware *fw_image; 541 const struct firmware *fw_image;
533 struct isr_param irq_vector[PM8001_MAX_MSIX_VEC]; 542 struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
543 u32 reset_in_progress;
534}; 544};
535 545
536struct pm8001_work { 546struct pm8001_work {
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index eb4fee61df72..42f0405601ad 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -312,6 +312,11 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
312 /* read port recover and reset timeout */ 312 /* read port recover and reset timeout */
313 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer = 313 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer =
314 pm8001_mr32(address, MAIN_PORT_RECOVERY_TIMER); 314 pm8001_mr32(address, MAIN_PORT_RECOVERY_TIMER);
315 /* read ILA and inactive firmware version */
316 pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version =
317 pm8001_mr32(address, MAIN_MPI_ILA_RELEASE_TYPE);
318 pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version =
319 pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION);
315} 320}
316 321
317/** 322/**
@@ -592,6 +597,12 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
592 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &= 0xffff0000; 597 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &= 0xffff0000;
593 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |= 598 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
594 PORT_RECOVERY_TIMEOUT; 599 PORT_RECOVERY_TIMEOUT;
600 if (pm8001_ha->chip_id == chip_8006) {
601 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &=
602 0x0000ffff;
603 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
604 0x140000;
605 }
595 pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER, 606 pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
596 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer); 607 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
597} 608}
@@ -1478,6 +1489,7 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
1478 ccb->device = pm8001_ha_dev; 1489 ccb->device = pm8001_ha_dev;
1479 ccb->ccb_tag = ccb_tag; 1490 ccb->ccb_tag = ccb_tag;
1480 ccb->task = task; 1491 ccb->task = task;
1492 ccb->n_elem = 0;
1481 pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; 1493 pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
1482 pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; 1494 pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
1483 1495
@@ -1770,6 +1782,8 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
1770 "task 0x%p done with io_status 0x%x resp 0x%x " 1782 "task 0x%p done with io_status 0x%x resp 0x%x "
1771 "stat 0x%x but aborted by upper layer!\n", 1783 "stat 0x%x but aborted by upper layer!\n",
1772 t, status, ts->resp, ts->stat)); 1784 t, status, ts->resp, ts->stat));
1785 if (t->slow_task)
1786 complete(&t->slow_task->completion);
1773 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 1787 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1774 } else { 1788 } else {
1775 spin_unlock_irqrestore(&t->task_state_lock, flags); 1789 spin_unlock_irqrestore(&t->task_state_lock, flags);
@@ -3033,10 +3047,10 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
3033 3047
3034 struct pm8001_port *port = &pm8001_ha->port[port_id]; 3048 struct pm8001_port *port = &pm8001_ha->port[port_id];
3035 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 3049 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
3050 u32 port_sata = (phy->phy_type & PORT_TYPE_SATA);
3036 port->port_state = portstate; 3051 port->port_state = portstate;
3037 phy->identify.device_type = 0; 3052 phy->identify.device_type = 0;
3038 phy->phy_attached = 0; 3053 phy->phy_attached = 0;
3039 memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
3040 switch (portstate) { 3054 switch (portstate) {
3041 case PORT_VALID: 3055 case PORT_VALID:
3042 break; 3056 break;
@@ -3045,7 +3059,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
3045 pm8001_printk(" PortInvalid portID %d\n", port_id)); 3059 pm8001_printk(" PortInvalid portID %d\n", port_id));
3046 PM8001_MSG_DBG(pm8001_ha, 3060 PM8001_MSG_DBG(pm8001_ha,
3047 pm8001_printk(" Last phy Down and port invalid\n")); 3061 pm8001_printk(" Last phy Down and port invalid\n"));
3048 if (phy->phy_type & PORT_TYPE_SATA) { 3062 if (port_sata) {
3049 phy->phy_type = 0; 3063 phy->phy_type = 0;
3050 port->port_attached = 0; 3064 port->port_attached = 0;
3051 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3065 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
@@ -3067,7 +3081,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
3067 pm8001_printk(" Phy Down and PORT_LOSTCOMM\n")); 3081 pm8001_printk(" Phy Down and PORT_LOSTCOMM\n"));
3068 PM8001_MSG_DBG(pm8001_ha, 3082 PM8001_MSG_DBG(pm8001_ha,
3069 pm8001_printk(" Last phy Down and port invalid\n")); 3083 pm8001_printk(" Last phy Down and port invalid\n"));
3070 if (phy->phy_type & PORT_TYPE_SATA) { 3084 if (port_sata) {
3071 port->port_attached = 0; 3085 port->port_attached = 0;
3072 phy->phy_type = 0; 3086 phy->phy_type = 0;
3073 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3087 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
@@ -3083,6 +3097,11 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
3083 break; 3097 break;
3084 3098
3085 } 3099 }
3100 if (port_sata && (portstate != PORT_IN_RESET)) {
3101 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
3102
3103 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
3104 }
3086} 3105}
3087 3106
3088static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3107static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
@@ -3185,12 +3204,14 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3185 case HW_EVENT_PHY_DOWN: 3204 case HW_EVENT_PHY_DOWN:
3186 PM8001_MSG_DBG(pm8001_ha, 3205 PM8001_MSG_DBG(pm8001_ha,
3187 pm8001_printk("HW_EVENT_PHY_DOWN\n")); 3206 pm8001_printk("HW_EVENT_PHY_DOWN\n"));
3188 if (phy->phy_type & PORT_TYPE_SATA) 3207 hw_event_phy_down(pm8001_ha, piomb);
3189 sas_ha->notify_phy_event(&phy->sas_phy, 3208 if (pm8001_ha->reset_in_progress) {
3190 PHYE_LOSS_OF_SIGNAL); 3209 PM8001_MSG_DBG(pm8001_ha,
3210 pm8001_printk("Reset in progress\n"));
3211 return 0;
3212 }
3191 phy->phy_attached = 0; 3213 phy->phy_attached = 0;
3192 phy->phy_state = 0; 3214 phy->phy_state = 0;
3193 hw_event_phy_down(pm8001_ha, piomb);
3194 break; 3215 break;
3195 case HW_EVENT_PORT_INVALID: 3216 case HW_EVENT_PORT_INVALID:
3196 PM8001_MSG_DBG(pm8001_ha, 3217 PM8001_MSG_DBG(pm8001_ha,
@@ -3297,9 +3318,17 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3297 case HW_EVENT_PORT_RESET_TIMER_TMO: 3318 case HW_EVENT_PORT_RESET_TIMER_TMO:
3298 PM8001_MSG_DBG(pm8001_ha, 3319 PM8001_MSG_DBG(pm8001_ha,
3299 pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n")); 3320 pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
3321 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
3322 port_id, phy_id, 0, 0);
3300 sas_phy_disconnected(sas_phy); 3323 sas_phy_disconnected(sas_phy);
3301 phy->phy_attached = 0; 3324 phy->phy_attached = 0;
3302 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); 3325 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
3326 if (pm8001_ha->phy[phy_id].reset_completion) {
3327 pm8001_ha->phy[phy_id].port_reset_status =
3328 PORT_RESET_TMO;
3329 complete(pm8001_ha->phy[phy_id].reset_completion);
3330 pm8001_ha->phy[phy_id].reset_completion = NULL;
3331 }
3303 break; 3332 break;
3304 case HW_EVENT_PORT_RECOVERY_TIMER_TMO: 3333 case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
3305 PM8001_MSG_DBG(pm8001_ha, 3334 PM8001_MSG_DBG(pm8001_ha,
@@ -3324,6 +3353,12 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3324 case HW_EVENT_PORT_RESET_COMPLETE: 3353 case HW_EVENT_PORT_RESET_COMPLETE:
3325 PM8001_MSG_DBG(pm8001_ha, 3354 PM8001_MSG_DBG(pm8001_ha,
3326 pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n")); 3355 pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
3356 if (pm8001_ha->phy[phy_id].reset_completion) {
3357 pm8001_ha->phy[phy_id].port_reset_status =
3358 PORT_RESET_SUCCESS;
3359 complete(pm8001_ha->phy[phy_id].reset_completion);
3360 pm8001_ha->phy[phy_id].reset_completion = NULL;
3361 }
3327 break; 3362 break;
3328 case EVENT_BROADCAST_ASYNCH_EVENT: 3363 case EVENT_BROADCAST_ASYNCH_EVENT:
3329 PM8001_MSG_DBG(pm8001_ha, 3364 PM8001_MSG_DBG(pm8001_ha,
@@ -4389,7 +4424,7 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
4389 payload.sas_identify.dev_type = SAS_END_DEVICE; 4424 payload.sas_identify.dev_type = SAS_END_DEVICE;
4390 payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; 4425 payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
4391 memcpy(payload.sas_identify.sas_addr, 4426 memcpy(payload.sas_identify.sas_addr,
4392 pm8001_ha->sas_addr, SAS_ADDR_SIZE); 4427 &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
4393 payload.sas_identify.phy_id = phy_id; 4428 payload.sas_identify.phy_id = phy_id;
4394 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); 4429 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
4395 return ret; 4430 return ret;
@@ -4496,17 +4531,20 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
4496static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, 4531static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
4497 u32 phyId, u32 phy_op) 4532 u32 phyId, u32 phy_op)
4498{ 4533{
4534 u32 tag;
4535 int rc;
4499 struct local_phy_ctl_req payload; 4536 struct local_phy_ctl_req payload;
4500 struct inbound_queue_table *circularQ; 4537 struct inbound_queue_table *circularQ;
4501 int ret;
4502 u32 opc = OPC_INB_LOCAL_PHY_CONTROL; 4538 u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
4503 memset(&payload, 0, sizeof(payload)); 4539 memset(&payload, 0, sizeof(payload));
4540 rc = pm8001_tag_alloc(pm8001_ha, &tag);
4541 if (rc)
4542 return rc;
4504 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4543 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4505 payload.tag = cpu_to_le32(1); 4544 payload.tag = cpu_to_le32(tag);
4506 payload.phyop_phyid = 4545 payload.phyop_phyid =
4507 cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF)); 4546 cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
4508 ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); 4547 return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
4509 return ret;
4510} 4548}
4511 4549
4512static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha) 4550static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 7a443bad6163..889e69ce3689 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -167,7 +167,7 @@
167#define LINKMODE_AUTO (0x03 << 12) 167#define LINKMODE_AUTO (0x03 << 12)
168#define LINKRATE_15 (0x01 << 8) 168#define LINKRATE_15 (0x01 << 8)
169#define LINKRATE_30 (0x02 << 8) 169#define LINKRATE_30 (0x02 << 8)
170#define LINKRATE_60 (0x06 << 8) 170#define LINKRATE_60 (0x04 << 8)
171#define LINKRATE_120 (0x08 << 8) 171#define LINKRATE_120 (0x08 << 8)
172 172
173/* phy_profile */ 173/* phy_profile */
@@ -229,6 +229,102 @@
229#define IT_NEXUS_TIMEOUT 0x7D0 229#define IT_NEXUS_TIMEOUT 0x7D0
230#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30) 230#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30)
231 231
232#ifdef __LITTLE_ENDIAN_BITFIELD
233struct sas_identify_frame_local {
234 /* Byte 0 */
235 u8 frame_type:4;
236 u8 dev_type:3;
237 u8 _un0:1;
238
239 /* Byte 1 */
240 u8 _un1;
241
242 /* Byte 2 */
243 union {
244 struct {
245 u8 _un20:1;
246 u8 smp_iport:1;
247 u8 stp_iport:1;
248 u8 ssp_iport:1;
249 u8 _un247:4;
250 };
251 u8 initiator_bits;
252 };
253
254 /* Byte 3 */
255 union {
256 struct {
257 u8 _un30:1;
258 u8 smp_tport:1;
259 u8 stp_tport:1;
260 u8 ssp_tport:1;
261 u8 _un347:4;
262 };
263 u8 target_bits;
264 };
265
266 /* Byte 4 - 11 */
267 u8 _un4_11[8];
268
269 /* Byte 12 - 19 */
270 u8 sas_addr[SAS_ADDR_SIZE];
271
272 /* Byte 20 */
273 u8 phy_id;
274
275 u8 _un21_27[7];
276
277} __packed;
278
279#elif defined(__BIG_ENDIAN_BITFIELD)
280struct sas_identify_frame_local {
281 /* Byte 0 */
282 u8 _un0:1;
283 u8 dev_type:3;
284 u8 frame_type:4;
285
286 /* Byte 1 */
287 u8 _un1;
288
289 /* Byte 2 */
290 union {
291 struct {
292 u8 _un247:4;
293 u8 ssp_iport:1;
294 u8 stp_iport:1;
295 u8 smp_iport:1;
296 u8 _un20:1;
297 };
298 u8 initiator_bits;
299 };
300
301 /* Byte 3 */
302 union {
303 struct {
304 u8 _un347:4;
305 u8 ssp_tport:1;
306 u8 stp_tport:1;
307 u8 smp_tport:1;
308 u8 _un30:1;
309 };
310 u8 target_bits;
311 };
312
313 /* Byte 4 - 11 */
314 u8 _un4_11[8];
315
316 /* Byte 12 - 19 */
317 u8 sas_addr[SAS_ADDR_SIZE];
318
319 /* Byte 20 */
320 u8 phy_id;
321
322 u8 _un21_27[7];
323} __packed;
324#else
325#error "Bitfield order not defined!"
326#endif
327
232struct mpi_msg_hdr { 328struct mpi_msg_hdr {
233 __le32 header; /* Bits [11:0] - Message operation code */ 329 __le32 header; /* Bits [11:0] - Message operation code */
234 /* Bits [15:12] - Message Category */ 330 /* Bits [15:12] - Message Category */
@@ -248,7 +344,7 @@ struct mpi_msg_hdr {
248struct phy_start_req { 344struct phy_start_req {
249 __le32 tag; 345 __le32 tag;
250 __le32 ase_sh_lm_slr_phyid; 346 __le32 ase_sh_lm_slr_phyid;
251 struct sas_identify_frame sas_identify; /* 28 Bytes */ 347 struct sas_identify_frame_local sas_identify; /* 28 Bytes */
252 __le32 spasti; 348 __le32 spasti;
253 u32 reserved[21]; 349 u32 reserved[21];
254} __attribute__((packed, aligned(4))); 350} __attribute__((packed, aligned(4)));
@@ -1349,6 +1445,8 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
1349#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */ 1445#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */
1350#define MAIN_PORT_RECOVERY_TIMER 0x94 /* DWORD 0x25 */ 1446#define MAIN_PORT_RECOVERY_TIMER 0x94 /* DWORD 0x25 */
1351#define MAIN_INT_REASSERTION_DELAY 0x98 /* DWORD 0x26 */ 1447#define MAIN_INT_REASSERTION_DELAY 0x98 /* DWORD 0x26 */
1448#define MAIN_MPI_ILA_RELEASE_TYPE 0xA4 /* DWORD 0x29 */
1449#define MAIN_MPI_INACTIVE_FW_VERSION 0XB0 /* DWORD 0x2C */
1352 1450
1353/* Gereral Status Table offset - byte offset */ 1451/* Gereral Status Table offset - byte offset */
1354#define GST_GSTLEN_MPIS_OFFSET 0x00 1452#define GST_GSTLEN_MPIS_OFFSET 0x00
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 93d54acd4a22..bd302d3cb9af 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -92,7 +92,6 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
92 struct iscsi_text_response_hdr *cqe_text_response; 92 struct iscsi_text_response_hdr *cqe_text_response;
93 struct qedi_cmd *cmd; 93 struct qedi_cmd *cmd;
94 int pld_len; 94 int pld_len;
95 u32 *tmp;
96 95
97 cmd = (struct qedi_cmd *)task->dd_data; 96 cmd = (struct qedi_cmd *)task->dd_data;
98 task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id); 97 task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
@@ -108,7 +107,6 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
108 hton24(resp_hdr_ptr->dlength, 107 hton24(resp_hdr_ptr->dlength,
109 (cqe_text_response->hdr_second_dword & 108 (cqe_text_response->hdr_second_dword &
110 ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK)); 109 ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK));
111 tmp = (u32 *)resp_hdr_ptr->dlength;
112 110
113 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, 111 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
114 conn->session->age); 112 conn->session->age);
@@ -196,7 +194,6 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
196 struct iscsi_tm_rsp *resp_hdr_ptr; 194 struct iscsi_tm_rsp *resp_hdr_ptr;
197 struct iscsi_tm *tmf_hdr; 195 struct iscsi_tm *tmf_hdr;
198 struct qedi_cmd *qedi_cmd = NULL; 196 struct qedi_cmd *qedi_cmd = NULL;
199 u32 *tmp;
200 197
201 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response; 198 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
202 199
@@ -222,7 +219,6 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
222 hton24(resp_hdr_ptr->dlength, 219 hton24(resp_hdr_ptr->dlength,
223 (cqe_tmp_response->hdr_second_dword & 220 (cqe_tmp_response->hdr_second_dword &
224 ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK)); 221 ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
225 tmp = (u32 *)resp_hdr_ptr->dlength;
226 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, 222 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
227 conn->session->age); 223 conn->session->age);
228 resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn); 224 resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
@@ -269,7 +265,6 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
269 struct iscsi_login_response_hdr *cqe_login_response; 265 struct iscsi_login_response_hdr *cqe_login_response;
270 struct qedi_cmd *cmd; 266 struct qedi_cmd *cmd;
271 int pld_len; 267 int pld_len;
272 u32 *tmp;
273 268
274 cmd = (struct qedi_cmd *)task->dd_data; 269 cmd = (struct qedi_cmd *)task->dd_data;
275 270
@@ -286,7 +281,6 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
286 hton24(resp_hdr_ptr->dlength, 281 hton24(resp_hdr_ptr->dlength,
287 (cqe_login_response->hdr_second_dword & 282 (cqe_login_response->hdr_second_dword &
288 ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK)); 283 ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK));
289 tmp = (u32 *)resp_hdr_ptr->dlength;
290 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, 284 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
291 conn->session->age); 285 conn->session->age);
292 resp_hdr_ptr->tsih = cqe_login_response->tsih; 286 resp_hdr_ptr->tsih = cqe_login_response->tsih;
@@ -590,7 +584,6 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
590 int datalen = 0; 584 int datalen = 0;
591 struct qedi_conn *qedi_conn; 585 struct qedi_conn *qedi_conn;
592 u32 iscsi_cid; 586 u32 iscsi_cid;
593 bool mark_cmd_node_deleted = false;
594 u8 cqe_err_bits = 0; 587 u8 cqe_err_bits = 0;
595 588
596 iscsi_cid = cqe->cqe_common.conn_id; 589 iscsi_cid = cqe->cqe_common.conn_id;
@@ -674,7 +667,6 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
674 cmd->io_cmd_in_list = false; 667 cmd->io_cmd_in_list = false;
675 list_del_init(&cmd->io_cmd); 668 list_del_init(&cmd->io_cmd);
676 qedi_conn->active_cmd_count--; 669 qedi_conn->active_cmd_count--;
677 mark_cmd_node_deleted = true;
678 } 670 }
679 spin_unlock(&qedi_conn->list_lock); 671 spin_unlock(&qedi_conn->list_lock);
680 672
@@ -763,7 +755,7 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
763 u32 rtid = 0; 755 u32 rtid = 0;
764 u32 iscsi_cid; 756 u32 iscsi_cid;
765 struct qedi_conn *qedi_conn; 757 struct qedi_conn *qedi_conn;
766 struct qedi_cmd *cmd_new, *dbg_cmd; 758 struct qedi_cmd *dbg_cmd;
767 struct iscsi_task *mtask; 759 struct iscsi_task *mtask;
768 struct iscsi_tm *tmf_hdr = NULL; 760 struct iscsi_tm *tmf_hdr = NULL;
769 761
@@ -856,7 +848,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
856 } 848 }
857 qedi_conn->cmd_cleanup_cmpl++; 849 qedi_conn->cmd_cleanup_cmpl++;
858 wake_up(&qedi_conn->wait_queue); 850 wake_up(&qedi_conn->wait_queue);
859 cmd_new = task->dd_data;
860 851
861 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, 852 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
862 "Freeing tid=0x%x for cid=0x%x\n", 853 "Freeing tid=0x%x for cid=0x%x\n",
@@ -1029,7 +1020,6 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
1029 struct iscsi_task_context *fw_task_ctx; 1020 struct iscsi_task_context *fw_task_ctx;
1030 struct qedi_ctx *qedi = qedi_conn->qedi; 1021 struct qedi_ctx *qedi = qedi_conn->qedi;
1031 struct iscsi_login_req *login_hdr; 1022 struct iscsi_login_req *login_hdr;
1032 struct scsi_sge *req_sge = NULL;
1033 struct scsi_sge *resp_sge = NULL; 1023 struct scsi_sge *resp_sge = NULL;
1034 struct qedi_cmd *qedi_cmd; 1024 struct qedi_cmd *qedi_cmd;
1035 struct qedi_endpoint *ep; 1025 struct qedi_endpoint *ep;
@@ -1037,7 +1027,6 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
1037 u16 sq_idx = 0; 1027 u16 sq_idx = 0;
1038 int rval = 0; 1028 int rval = 0;
1039 1029
1040 req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1041 resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; 1030 resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1042 qedi_cmd = (struct qedi_cmd *)task->dd_data; 1031 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1043 ep = qedi_conn->ep; 1032 ep = qedi_conn->ep;
@@ -1718,7 +1707,6 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
1718 struct qedi_ctx *qedi = qedi_conn->qedi; 1707 struct qedi_ctx *qedi = qedi_conn->qedi;
1719 struct iscsi_task_context *fw_task_ctx; 1708 struct iscsi_task_context *fw_task_ctx;
1720 struct iscsi_nopout *nopout_hdr; 1709 struct iscsi_nopout *nopout_hdr;
1721 struct scsi_sge *req_sge = NULL;
1722 struct scsi_sge *resp_sge = NULL; 1710 struct scsi_sge *resp_sge = NULL;
1723 struct qedi_cmd *qedi_cmd; 1711 struct qedi_cmd *qedi_cmd;
1724 struct qedi_endpoint *ep; 1712 struct qedi_endpoint *ep;
@@ -1727,7 +1715,6 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
1727 u16 sq_idx = 0; 1715 u16 sq_idx = 0;
1728 int rval = 0; 1716 int rval = 0;
1729 1717
1730 req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1731 resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; 1718 resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1732 qedi_cmd = (struct qedi_cmd *)task->dd_data; 1719 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1733 nopout_hdr = (struct iscsi_nopout *)task->hdr; 1720 nopout_hdr = (struct iscsi_nopout *)task->hdr;
@@ -1995,7 +1982,6 @@ void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
1995 struct qedi_conn *qedi_conn = conn->dd_data; 1982 struct qedi_conn *qedi_conn = conn->dd_data;
1996 struct scsi_cmnd *sc_cmd = task->sc; 1983 struct scsi_cmnd *sc_cmd = task->sc;
1997 unsigned long flags; 1984 unsigned long flags;
1998 u8 op;
1999 1985
2000 spin_lock_irqsave(&qedi->io_trace_lock, flags); 1986 spin_lock_irqsave(&qedi->io_trace_lock, flags);
2001 1987
@@ -2005,7 +1991,6 @@ void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
2005 io_log->cid = qedi_conn->iscsi_conn_id; 1991 io_log->cid = qedi_conn->iscsi_conn_id;
2006 io_log->lun = sc_cmd->device->lun; 1992 io_log->lun = sc_cmd->device->lun;
2007 io_log->op = sc_cmd->cmnd[0]; 1993 io_log->op = sc_cmd->cmnd[0];
2008 op = sc_cmd->cmnd[0];
2009 io_log->lba[0] = sc_cmd->cmnd[2]; 1994 io_log->lba[0] = sc_cmd->cmnd[2];
2010 io_log->lba[1] = sc_cmd->cmnd[3]; 1995 io_log->lba[1] = sc_cmd->cmnd[3];
2011 io_log->lba[2] = sc_cmd->cmnd[4]; 1996 io_log->lba[2] = sc_cmd->cmnd[4];
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 2ea0ef93f5cb..e3ac7078d2aa 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -919,9 +919,9 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
919 919
920 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 920 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
921 sizeof(response) + sizeof(uint8_t); 921 sizeof(response) + sizeof(uint8_t);
922 fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) + 922 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
923 sizeof(struct fc_bsg_reply); 923 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
924 memcpy(fw_sts_ptr, response, sizeof(response)); 924 sizeof(response));
925 fw_sts_ptr += sizeof(response); 925 fw_sts_ptr += sizeof(response);
926 *fw_sts_ptr = command_sent; 926 *fw_sts_ptr = command_sent;
927 927
@@ -1116,14 +1116,13 @@ qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1116 return -EINVAL; 1116 return -EINVAL;
1117 } 1117 }
1118 1118
1119 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1119 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1120 if (!mn) { 1120 if (!mn) {
1121 ql_log(ql_log_warn, vha, 0x703c, 1121 ql_log(ql_log_warn, vha, 0x703c,
1122 "DMA alloc failed for fw buffer.\n"); 1122 "DMA alloc failed for fw buffer.\n");
1123 return -ENOMEM; 1123 return -ENOMEM;
1124 } 1124 }
1125 1125
1126 memset(mn, 0, sizeof(struct access_chip_84xx));
1127 mn->entry_type = ACCESS_CHIP_IOCB_TYPE; 1126 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1128 mn->entry_count = 1; 1127 mn->entry_count = 1;
1129 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request); 1128 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
@@ -2554,13 +2553,11 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2554 ql_log(ql_log_warn, vha, 0x7089, 2553 ql_log(ql_log_warn, vha, 0x7089,
2555 "mbx abort_command " 2554 "mbx abort_command "
2556 "failed.\n"); 2555 "failed.\n");
2557 scsi_req(bsg_job->req)->result =
2558 bsg_reply->result = -EIO; 2556 bsg_reply->result = -EIO;
2559 } else { 2557 } else {
2560 ql_dbg(ql_dbg_user, vha, 0x708a, 2558 ql_dbg(ql_dbg_user, vha, 0x708a,
2561 "mbx abort_command " 2559 "mbx abort_command "
2562 "success.\n"); 2560 "success.\n");
2563 scsi_req(bsg_job->req)->result =
2564 bsg_reply->result = 0; 2561 bsg_reply->result = 0;
2565 } 2562 }
2566 spin_lock_irqsave(&ha->hardware_lock, flags); 2563 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -2571,7 +2568,7 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2571 } 2568 }
2572 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2569 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2573 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); 2570 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2574 scsi_req(bsg_job->req)->result = bsg_reply->result = -ENXIO; 2571 bsg_reply->result = -ENXIO;
2575 return 0; 2572 return 0;
2576 2573
2577done: 2574done:
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 486c075998f6..01a9b8971e88 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -323,6 +323,12 @@ struct els_logo_payload {
323 uint8_t wwpn[WWN_SIZE]; 323 uint8_t wwpn[WWN_SIZE];
324}; 324};
325 325
326struct els_plogi_payload {
327 uint8_t opcode;
328 uint8_t rsvd[3];
329 uint8_t data[112];
330};
331
326struct ct_arg { 332struct ct_arg {
327 void *iocb; 333 void *iocb;
328 u16 nport_handle; 334 u16 nport_handle;
@@ -358,6 +364,19 @@ struct srb_iocb {
358 dma_addr_t els_logo_pyld_dma; 364 dma_addr_t els_logo_pyld_dma;
359 } els_logo; 365 } els_logo;
360 struct { 366 struct {
367#define ELS_DCMD_PLOGI 0x3
368 uint32_t flags;
369 uint32_t els_cmd;
370 struct completion comp;
371 struct els_plogi_payload *els_plogi_pyld;
372 struct els_plogi_payload *els_resp_pyld;
373 dma_addr_t els_plogi_pyld_dma;
374 dma_addr_t els_resp_pyld_dma;
375 uint32_t fw_status[3];
376 __le16 comp_status;
377 __le16 len;
378 } els_plogi;
379 struct {
361 /* 380 /*
362 * Values for flags field below are as 381 * Values for flags field below are as
363 * defined in tsk_mgmt_entry struct 382 * defined in tsk_mgmt_entry struct
@@ -922,6 +941,7 @@ struct mbx_cmd_32 {
922#define INTR_RSP_QUE_UPDATE_83XX 0x14 941#define INTR_RSP_QUE_UPDATE_83XX 0x14
923#define INTR_ATIO_QUE_UPDATE 0x1C 942#define INTR_ATIO_QUE_UPDATE 0x1C
924#define INTR_ATIO_RSP_QUE_UPDATE 0x1D 943#define INTR_ATIO_RSP_QUE_UPDATE 0x1D
944#define INTR_ATIO_QUE_UPDATE_27XX 0x1E
925 945
926/* ISP mailbox loopback echo diagnostic error code */ 946/* ISP mailbox loopback echo diagnostic error code */
927#define MBS_LB_RESET 0x17 947#define MBS_LB_RESET 0x17
@@ -2302,6 +2322,7 @@ typedef struct fc_port {
2302 unsigned int send_els_logo:1; 2322 unsigned int send_els_logo:1;
2303 unsigned int login_pause:1; 2323 unsigned int login_pause:1;
2304 unsigned int login_succ:1; 2324 unsigned int login_succ:1;
2325 unsigned int query:1;
2305 2326
2306 struct work_struct nvme_del_work; 2327 struct work_struct nvme_del_work;
2307 struct completion nvme_del_done; 2328 struct completion nvme_del_done;
@@ -2347,6 +2368,7 @@ typedef struct fc_port {
2347 uint8_t fc4_type; 2368 uint8_t fc4_type;
2348 uint8_t fc4f_nvme; 2369 uint8_t fc4f_nvme;
2349 uint8_t scan_state; 2370 uint8_t scan_state;
2371 uint8_t n2n_flag;
2350 2372
2351 unsigned long last_queue_full; 2373 unsigned long last_queue_full;
2352 unsigned long last_ramp_up; 2374 unsigned long last_ramp_up;
@@ -2368,6 +2390,9 @@ typedef struct fc_port {
2368 struct list_head gnl_entry; 2390 struct list_head gnl_entry;
2369 struct work_struct del_work; 2391 struct work_struct del_work;
2370 u8 iocb[IOCB_SIZE]; 2392 u8 iocb[IOCB_SIZE];
2393 u8 current_login_state;
2394 u8 last_login_state;
2395 struct completion n2n_done;
2371} fc_port_t; 2396} fc_port_t;
2372 2397
2373#define QLA_FCPORT_SCAN 1 2398#define QLA_FCPORT_SCAN 1
@@ -4113,6 +4138,7 @@ typedef struct scsi_qla_host {
4113#define QPAIR_ONLINE_CHECK_NEEDED 27 4138#define QPAIR_ONLINE_CHECK_NEEDED 27
4114#define SET_ZIO_THRESHOLD_NEEDED 28 4139#define SET_ZIO_THRESHOLD_NEEDED 28
4115#define DETECT_SFP_CHANGE 29 4140#define DETECT_SFP_CHANGE 29
4141#define N2N_LOGIN_NEEDED 30
4116 4142
4117 unsigned long pci_flags; 4143 unsigned long pci_flags;
4118#define PFLG_DISCONNECTED 0 /* PCI device removed */ 4144#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -4223,6 +4249,9 @@ typedef struct scsi_qla_host {
4223 wait_queue_head_t fcport_waitQ; 4249 wait_queue_head_t fcport_waitQ;
4224 wait_queue_head_t vref_waitq; 4250 wait_queue_head_t vref_waitq;
4225 uint8_t min_link_speed_feat; 4251 uint8_t min_link_speed_feat;
4252 uint8_t n2n_node_name[WWN_SIZE];
4253 uint8_t n2n_port_name[WWN_SIZE];
4254 uint16_t n2n_id;
4226} scsi_qla_host_t; 4255} scsi_qla_host_t;
4227 4256
4228struct qla27xx_image_status { 4257struct qla27xx_image_status {
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index bec641aae7b3..d5cef0727e72 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -753,9 +753,7 @@ struct els_entry_24xx {
753 uint8_t reserved_2; 753 uint8_t reserved_2;
754 754
755 uint8_t port_id[3]; 755 uint8_t port_id[3];
756 uint8_t reserved_3; 756 uint8_t s_id[3];
757
758 uint16_t reserved_4;
759 757
760 uint16_t control_flags; /* Control flags. */ 758 uint16_t control_flags; /* Control flags. */
761#define ECF_PAYLOAD_DESCR_MASK (BIT_15|BIT_14|BIT_13) 759#define ECF_PAYLOAD_DESCR_MASK (BIT_15|BIT_14|BIT_13)
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 3ad375f85b59..fa115c7433e5 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -45,6 +45,8 @@ extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
45extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *); 45extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
46 46
47extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t); 47extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
48extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *,
49 port_id_t);
48 50
49extern void qla2x00_update_fcports(scsi_qla_host_t *); 51extern void qla2x00_update_fcports(scsi_qla_host_t *);
50 52
@@ -145,6 +147,7 @@ extern int ql2xmvasynctoatio;
145extern int ql2xuctrlirq; 147extern int ql2xuctrlirq;
146extern int ql2xnvmeenable; 148extern int ql2xnvmeenable;
147extern int ql2xautodetectsfp; 149extern int ql2xautodetectsfp;
150extern int ql2xenablemsix;
148 151
149extern int qla2x00_loop_reset(scsi_qla_host_t *); 152extern int qla2x00_loop_reset(scsi_qla_host_t *);
150extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 153extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -486,6 +489,8 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
486 uint16_t *); 489 uint16_t *);
487int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *, 490int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
488 struct port_database_24xx *); 491 struct port_database_24xx *);
492int qla24xx_get_port_login_templ(scsi_qla_host_t *, dma_addr_t,
493 void *, uint16_t);
489 494
490extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *); 495extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *);
491extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t); 496extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 44cf875a484a..1bafa043f9f1 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -812,13 +812,12 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
812 sp->gen2 = fcport->login_gen; 812 sp->gen2 = fcport->login_gen;
813 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 813 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
814 814
815 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 815 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
816 if (pd == NULL) { 816 if (pd == NULL) {
817 ql_log(ql_log_warn, vha, 0xd043, 817 ql_log(ql_log_warn, vha, 0xd043,
818 "Failed to allocate port database structure.\n"); 818 "Failed to allocate port database structure.\n");
819 goto done_free_sp; 819 goto done_free_sp;
820 } 820 }
821 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
822 821
823 mb = sp->u.iocb_cmd.u.mbx.out_mb; 822 mb = sp->u.iocb_cmd.u.mbx.out_mb;
824 mb[0] = MBC_GET_PORT_DATABASE; 823 mb[0] = MBC_GET_PORT_DATABASE;
@@ -1434,6 +1433,14 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1434 qla24xx_post_gpdb_work(vha, ea->fcport, 0); 1433 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1435 break; 1434 break;
1436 default: 1435 default:
1436 if (ea->fcport->n2n_flag) {
1437 ql_dbg(ql_dbg_disc, vha, 0x2118,
1438 "%s %d %8phC post fc4 prli\n",
1439 __func__, __LINE__, ea->fcport->port_name);
1440 ea->fcport->fc4f_nvme = 0;
1441 ea->fcport->n2n_flag = 0;
1442 qla24xx_post_prli_work(vha, ea->fcport);
1443 }
1437 ql_dbg(ql_dbg_disc, vha, 0x2119, 1444 ql_dbg(ql_dbg_disc, vha, 0x2119,
1438 "%s %d %8phC unhandle event of %x\n", 1445 "%s %d %8phC unhandle event of %x\n",
1439 __func__, __LINE__, ea->fcport->port_name, ea->data[0]); 1446 __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
@@ -4367,7 +4374,109 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
4367 return (rval); 4374 return (rval);
4368} 4375}
4369 4376
4377/*
4378 * N2N Login
4379 * Updates Fibre Channel Device Database with local loop devices.
4380 *
4381 * Input:
4382 * ha = adapter block pointer.
4383 *
4384 * Returns:
4385 */
4386static int qla24xx_n2n_handle_login(struct scsi_qla_host *vha,
4387 fc_port_t *fcport)
4388{
4389 struct qla_hw_data *ha = vha->hw;
4390 int res = QLA_SUCCESS, rval;
4391 int greater_wwpn = 0;
4392 int logged_in = 0;
4393
4394 if (ha->current_topology != ISP_CFG_N)
4395 return res;
4396
4397 if (wwn_to_u64(vha->port_name) >
4398 wwn_to_u64(vha->n2n_port_name)) {
4399 ql_dbg(ql_dbg_disc, vha, 0x2002,
4400 "HBA WWPN is greater %llx > target %llx\n",
4401 wwn_to_u64(vha->port_name),
4402 wwn_to_u64(vha->n2n_port_name));
4403 greater_wwpn = 1;
4404 fcport->d_id.b24 = vha->n2n_id;
4405 }
4406
4407 fcport->loop_id = vha->loop_id;
4408 fcport->fc4f_nvme = 0;
4409 fcport->query = 1;
4410
4411 ql_dbg(ql_dbg_disc, vha, 0x4001,
4412 "Initiate N2N login handler: HBA port_id=%06x loopid=%d\n",
4413 fcport->d_id.b24, vha->loop_id);
4414
4415 /* Fill in member data. */
4416 if (!greater_wwpn) {
4417 rval = qla2x00_get_port_database(vha, fcport, 0);
4418 ql_dbg(ql_dbg_disc, vha, 0x1051,
4419 "Remote login-state (%x/%x) port_id=%06x loop_id=%x, rval=%d\n",
4420 fcport->current_login_state, fcport->last_login_state,
4421 fcport->d_id.b24, fcport->loop_id, rval);
4422
4423 if (((fcport->current_login_state & 0xf) == 0x4) ||
4424 ((fcport->current_login_state & 0xf) == 0x6))
4425 logged_in = 1;
4426 }
4427
4428 if (logged_in || greater_wwpn) {
4429 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
4430 qla_nvme_register_hba(vha);
4431
4432 /* Set connected N_Port d_id */
4433 if (vha->flags.nvme_enabled)
4434 fcport->fc4f_nvme = 1;
4435
4436 fcport->scan_state = QLA_FCPORT_FOUND;
4437 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
4438 fcport->disc_state = DSC_GNL;
4439 fcport->n2n_flag = 1;
4440 fcport->flags = 3;
4441 vha->hw->flags.gpsc_supported = 0;
4442
4443 if (greater_wwpn) {
4444 ql_dbg(ql_dbg_disc, vha, 0x20e5,
4445 "%s %d PLOGI ELS %8phC\n",
4446 __func__, __LINE__, fcport->port_name);
4447
4448 res = qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
4449 fcport, fcport->d_id);
4450 }
4451
4452 if (res != QLA_SUCCESS) {
4453 ql_log(ql_log_info, vha, 0xd04d,
4454 "PLOGI Failed: portid=%06x - retrying\n",
4455 fcport->d_id.b24);
4456 res = QLA_SUCCESS;
4457 } else {
4458 /* State 0x6 means FCP PRLI complete */
4459 if ((fcport->current_login_state & 0xf) == 0x6) {
4460 ql_dbg(ql_dbg_disc, vha, 0x2118,
4461 "%s %d %8phC post GPDB work\n",
4462 __func__, __LINE__, fcport->port_name);
4463 fcport->chip_reset =
4464 vha->hw->base_qpair->chip_reset;
4465 qla24xx_post_gpdb_work(vha, fcport, 0);
4466 } else {
4467 ql_dbg(ql_dbg_disc, vha, 0x2118,
4468 "%s %d %8phC post NVMe PRLI\n",
4469 __func__, __LINE__, fcport->port_name);
4470 qla24xx_post_prli_work(vha, fcport);
4471 }
4472 }
4473 } else {
4474 /* Wait for next database change */
4475 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
4476 }
4370 4477
4478 return res;
4479}
4371 4480
4372/* 4481/*
4373 * qla2x00_configure_local_loop 4482 * qla2x00_configure_local_loop
@@ -4438,6 +4547,14 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
4438 } 4547 }
4439 } 4548 }
4440 4549
4550 /* Inititae N2N login. */
4551 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
4552 rval = qla24xx_n2n_handle_login(vha, new_fcport);
4553 if (rval != QLA_SUCCESS)
4554 goto cleanup_allocation;
4555 return QLA_SUCCESS;
4556 }
4557
4441 /* Add devices to port list. */ 4558 /* Add devices to port list. */
4442 id_iter = (char *)ha->gid_list; 4559 id_iter = (char *)ha->gid_list;
4443 for (index = 0; index < entries; index++) { 4560 for (index = 0; index < entries; index++) {
@@ -4479,10 +4596,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
4479 "Failed to retrieve fcport information " 4596 "Failed to retrieve fcport information "
4480 "-- get_port_database=%x, loop_id=0x%04x.\n", 4597 "-- get_port_database=%x, loop_id=0x%04x.\n",
4481 rval2, new_fcport->loop_id); 4598 rval2, new_fcport->loop_id);
4482 ql_dbg(ql_dbg_disc, vha, 0x2105, 4599 /* Skip retry if N2N */
4483 "Scheduling resync.\n"); 4600 if (ha->current_topology != ISP_CFG_N) {
4484 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4601 ql_dbg(ql_dbg_disc, vha, 0x2105,
4485 continue; 4602 "Scheduling resync.\n");
4603 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4604 continue;
4605 }
4486 } 4606 }
4487 4607
4488 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4608 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
@@ -7555,6 +7675,12 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
7555 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 7675 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7556 icb->firmware_options_3 |= BIT_0; 7676 icb->firmware_options_3 |= BIT_0;
7557 7677
7678 if (IS_QLA27XX(ha)) {
7679 icb->firmware_options_3 |= BIT_8;
7680 ql_dbg(ql_log_info, vha, 0x0075,
7681 "Enabling direct connection.\n");
7682 }
7683
7558 if (rval) { 7684 if (rval) {
7559 ql_log(ql_log_warn, vha, 0x0076, 7685 ql_log(ql_log_warn, vha, 0x0076,
7560 "NVRAM configuration failed.\n"); 7686 "NVRAM configuration failed.\n");
@@ -7910,7 +8036,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
7910 return NULL; 8036 return NULL;
7911 } 8037 }
7912 8038
7913 if (ql2xmqsupport) { 8039 if (ql2xmqsupport || ql2xnvmeenable) {
7914 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL); 8040 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
7915 if (qpair == NULL) { 8041 if (qpair == NULL) {
7916 ql_log(ql_log_warn, vha, 0x0182, 8042 ql_log(ql_log_warn, vha, 0x0182,
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 2f94159186d7..d810a447cb4a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2518,6 +2518,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2518{ 2518{
2519 scsi_qla_host_t *vha = sp->vha; 2519 scsi_qla_host_t *vha = sp->vha;
2520 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2520 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2521 uint32_t dsd_len = 24;
2521 2522
2522 els_iocb->entry_type = ELS_IOCB_TYPE; 2523 els_iocb->entry_type = ELS_IOCB_TYPE;
2523 els_iocb->entry_count = 1; 2524 els_iocb->entry_count = 1;
@@ -2534,24 +2535,198 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2534 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 2535 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2535 els_iocb->port_id[1] = sp->fcport->d_id.b.area; 2536 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2536 els_iocb->port_id[2] = sp->fcport->d_id.b.domain; 2537 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2538 els_iocb->s_id[0] = vha->d_id.b.al_pa;
2539 els_iocb->s_id[1] = vha->d_id.b.area;
2540 els_iocb->s_id[2] = vha->d_id.b.domain;
2537 els_iocb->control_flags = 0; 2541 els_iocb->control_flags = 0;
2538 2542
2539 els_iocb->tx_byte_count = sizeof(struct els_logo_payload); 2543 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2540 els_iocb->tx_address[0] = 2544 els_iocb->tx_byte_count = sizeof(struct els_plogi_payload);
2541 cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma)); 2545 els_iocb->tx_address[0] =
2542 els_iocb->tx_address[1] = 2546 cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2543 cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma)); 2547 els_iocb->tx_address[1] =
2544 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload)); 2548 cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2549 els_iocb->tx_len = dsd_len;
2550
2551 els_iocb->rx_dsd_count = 1;
2552 els_iocb->rx_byte_count = sizeof(struct els_plogi_payload);
2553 els_iocb->rx_address[0] =
2554 cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
2555 els_iocb->rx_address[1] =
2556 cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
2557 els_iocb->rx_len = dsd_len;
2558 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2559 "PLOGI ELS IOCB:\n");
2560 ql_dump_buffer(ql_log_info, vha, 0x0109,
2561 (uint8_t *)els_iocb, 0x70);
2562 } else {
2563 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2564 els_iocb->tx_address[0] =
2565 cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2566 els_iocb->tx_address[1] =
2567 cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2568 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2545 2569
2546 els_iocb->rx_byte_count = 0; 2570 els_iocb->rx_byte_count = 0;
2547 els_iocb->rx_address[0] = 0; 2571 els_iocb->rx_address[0] = 0;
2548 els_iocb->rx_address[1] = 0; 2572 els_iocb->rx_address[1] = 0;
2549 els_iocb->rx_len = 0; 2573 els_iocb->rx_len = 0;
2574 }
2550 2575
2551 sp->vha->qla_stats.control_requests++; 2576 sp->vha->qla_stats.control_requests++;
2552} 2577}
2553 2578
2554static void 2579static void
2580qla2x00_els_dcmd2_sp_free(void *data)
2581{
2582 srb_t *sp = data;
2583 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2584
2585 if (elsio->u.els_plogi.els_plogi_pyld)
2586 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2587 elsio->u.els_plogi.els_plogi_pyld,
2588 elsio->u.els_plogi.els_plogi_pyld_dma);
2589
2590 if (elsio->u.els_plogi.els_resp_pyld)
2591 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2592 elsio->u.els_plogi.els_resp_pyld,
2593 elsio->u.els_plogi.els_resp_pyld_dma);
2594
2595 del_timer(&elsio->timer);
2596 qla2x00_rel_sp(sp);
2597}
2598
2599static void
2600qla2x00_els_dcmd2_iocb_timeout(void *data)
2601{
2602 srb_t *sp = data;
2603 fc_port_t *fcport = sp->fcport;
2604 struct scsi_qla_host *vha = sp->vha;
2605 struct qla_hw_data *ha = vha->hw;
2606 struct srb_iocb *lio = &sp->u.iocb_cmd;
2607 unsigned long flags = 0;
2608 int res;
2609
2610 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2611 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2612 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2613
2614 /* Abort the exchange */
2615 spin_lock_irqsave(&ha->hardware_lock, flags);
2616 res = ha->isp_ops->abort_command(sp);
2617 ql_dbg(ql_dbg_io, vha, 0x3070,
2618 "mbx abort_command %s\n",
2619 (res == QLA_SUCCESS) ? "successful" : "failed");
2620 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2621
2622 complete(&lio->u.els_plogi.comp);
2623}
2624
2625static void
2626qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2627{
2628 srb_t *sp = ptr;
2629 fc_port_t *fcport = sp->fcport;
2630 struct srb_iocb *lio = &sp->u.iocb_cmd;
2631 struct scsi_qla_host *vha = sp->vha;
2632
2633 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072,
2634 "%s ELS hdl=%x, portid=%06x done %8pC\n",
2635 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
2636
2637 complete(&lio->u.els_plogi.comp);
2638}
2639
2640int
2641qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2642 fc_port_t *fcport, port_id_t remote_did)
2643{
2644 srb_t *sp;
2645 struct srb_iocb *elsio = NULL;
2646 struct qla_hw_data *ha = vha->hw;
2647 int rval = QLA_SUCCESS;
2648 void *ptr, *resp_ptr;
2649 dma_addr_t ptr_dma;
2650
2651 /* Alloc SRB structure */
2652 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2653 if (!sp) {
2654 ql_log(ql_log_info, vha, 0x70e6,
2655 "SRB allocation failed\n");
2656 return -ENOMEM;
2657 }
2658
2659 elsio = &sp->u.iocb_cmd;
2660 fcport->d_id.b.domain = remote_did.b.domain;
2661 fcport->d_id.b.area = remote_did.b.area;
2662 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2663
2664 ql_dbg(ql_dbg_io, vha, 0x3073,
2665 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2666
2667 sp->type = SRB_ELS_DCMD;
2668 sp->name = "ELS_DCMD";
2669 sp->fcport = fcport;
2670 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2671 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2672 sp->done = qla2x00_els_dcmd2_sp_done;
2673 sp->free = qla2x00_els_dcmd2_sp_free;
2674
2675 ptr = elsio->u.els_plogi.els_plogi_pyld =
2676 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2677 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2678 ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
2679
2680 if (!elsio->u.els_plogi.els_plogi_pyld) {
2681 rval = QLA_FUNCTION_FAILED;
2682 goto out;
2683 }
2684
2685 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2686 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2687 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2688
2689 if (!elsio->u.els_plogi.els_resp_pyld) {
2690 rval = QLA_FUNCTION_FAILED;
2691 goto out;
2692 }
2693
2694 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2695
2696 memset(ptr, 0, sizeof(struct els_plogi_payload));
2697 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2698 elsio->u.els_plogi.els_cmd = els_opcode;
2699 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2700 qla24xx_get_port_login_templ(vha, ptr_dma + 4,
2701 &elsio->u.els_plogi.els_plogi_pyld->data[0],
2702 sizeof(struct els_plogi_payload));
2703
2704 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2705 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x0109,
2706 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2707
2708 init_completion(&elsio->u.els_plogi.comp);
2709 rval = qla2x00_start_sp(sp);
2710 if (rval != QLA_SUCCESS) {
2711 rval = QLA_FUNCTION_FAILED;
2712 goto out;
2713 }
2714
2715 ql_dbg(ql_dbg_io, vha, 0x3074,
2716 "%s PLOGI sent, hdl=%x, loopid=%x, portid=%06x\n",
2717 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b24);
2718
2719 wait_for_completion(&elsio->u.els_plogi.comp);
2720
2721 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2722 rval = QLA_FUNCTION_FAILED;
2723
2724out:
2725 sp->free(sp);
2726 return rval;
2727}
2728
2729static void
2555qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 2730qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2556{ 2731{
2557 struct bsg_job *bsg_job = sp->u.bsg_job; 2732 struct bsg_job *bsg_job = sp->u.bsg_job;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 9d9668aac6f6..2fd79129bb2a 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1041,6 +1041,7 @@ global_port_update:
1041 */ 1041 */
1042 atomic_set(&vha->loop_down_timer, 0); 1042 atomic_set(&vha->loop_down_timer, 0);
1043 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 1043 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1044 !ha->flags.n2n_ae &&
1044 atomic_read(&vha->loop_state) != LOOP_DEAD) { 1045 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1045 ql_dbg(ql_dbg_async, vha, 0x5011, 1046 ql_dbg(ql_dbg_async, vha, 0x5011,
1046 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 1047 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
@@ -1543,8 +1544,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1543 struct fc_bsg_reply *bsg_reply; 1544 struct fc_bsg_reply *bsg_reply;
1544 uint16_t comp_status; 1545 uint16_t comp_status;
1545 uint32_t fw_status[3]; 1546 uint32_t fw_status[3];
1546 uint8_t* fw_sts_ptr;
1547 int res; 1547 int res;
1548 struct srb_iocb *els;
1548 1549
1549 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1550 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1550 if (!sp) 1551 if (!sp)
@@ -1561,10 +1562,14 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1561 break; 1562 break;
1562 case SRB_ELS_DCMD: 1563 case SRB_ELS_DCMD:
1563 type = "Driver ELS logo"; 1564 type = "Driver ELS logo";
1564 ql_dbg(ql_dbg_user, vha, 0x5047, 1565 if (iocb_type != ELS_IOCB_TYPE) {
1565 "Completing %s: (%p) type=%d.\n", type, sp, sp->type); 1566 ql_dbg(ql_dbg_user, vha, 0x5047,
1566 sp->done(sp, 0); 1567 "Completing %s: (%p) type=%d.\n",
1567 return; 1568 type, sp, sp->type);
1569 sp->done(sp, 0);
1570 return;
1571 }
1572 break;
1568 case SRB_CT_PTHRU_CMD: 1573 case SRB_CT_PTHRU_CMD:
1569 /* borrowing sts_entry_24xx.comp_status. 1574 /* borrowing sts_entry_24xx.comp_status.
1570 same location as ct_entry_24xx.comp_status 1575 same location as ct_entry_24xx.comp_status
@@ -1584,6 +1589,33 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1584 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); 1589 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1585 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); 1590 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1586 1591
1592 if (iocb_type == ELS_IOCB_TYPE) {
1593 els = &sp->u.iocb_cmd;
1594 els->u.els_plogi.fw_status[0] = fw_status[0];
1595 els->u.els_plogi.fw_status[1] = fw_status[1];
1596 els->u.els_plogi.fw_status[2] = fw_status[2];
1597 els->u.els_plogi.comp_status = fw_status[0];
1598 if (comp_status == CS_COMPLETE) {
1599 res = DID_OK << 16;
1600 } else {
1601 if (comp_status == CS_DATA_UNDERRUN) {
1602 res = DID_OK << 16;
1603 els->u.els_plogi.len =
1604 le16_to_cpu(((struct els_sts_entry_24xx *)
1605 pkt)->total_byte_count);
1606 } else {
1607 els->u.els_plogi.len = 0;
1608 res = DID_ERROR << 16;
1609 }
1610 }
1611 ql_log(ql_log_info, vha, 0x503f,
1612 "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
1613 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1614 le16_to_cpu(((struct els_sts_entry_24xx *)
1615 pkt)->total_byte_count));
1616 goto els_ct_done;
1617 }
1618
1587 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1619 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1588 * fc payload to the caller 1620 * fc payload to the caller
1589 */ 1621 */
@@ -1604,11 +1636,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1604 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1636 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1605 le16_to_cpu(((struct els_sts_entry_24xx *) 1637 le16_to_cpu(((struct els_sts_entry_24xx *)
1606 pkt)->total_byte_count)); 1638 pkt)->total_byte_count));
1607 fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) + 1639 } else {
1608 sizeof(struct fc_bsg_reply);
1609 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1610 }
1611 else {
1612 ql_dbg(ql_dbg_user, vha, 0x5040, 1640 ql_dbg(ql_dbg_user, vha, 0x5040,
1613 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1641 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1614 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1642 "error subcode 1=0x%x error subcode 2=0x%x.\n",
@@ -1619,10 +1647,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1619 pkt)->error_subcode_2)); 1647 pkt)->error_subcode_2));
1620 res = DID_ERROR << 16; 1648 res = DID_ERROR << 16;
1621 bsg_reply->reply_payload_rcv_len = 0; 1649 bsg_reply->reply_payload_rcv_len = 0;
1622 fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
1623 sizeof(struct fc_bsg_reply);
1624 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1625 } 1650 }
1651 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
1652 fw_status, sizeof(fw_status));
1626 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 1653 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1627 (uint8_t *)pkt, sizeof(*pkt)); 1654 (uint8_t *)pkt, sizeof(*pkt));
1628 } 1655 }
@@ -1631,6 +1658,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1631 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1658 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1632 bsg_job->reply_len = 0; 1659 bsg_job->reply_len = 0;
1633 } 1660 }
1661els_ct_done:
1634 1662
1635 sp->done(sp, res); 1663 sp->done(sp, res);
1636} 1664}
@@ -3129,6 +3157,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
3129 case INTR_RSP_QUE_UPDATE_83XX: 3157 case INTR_RSP_QUE_UPDATE_83XX:
3130 qla24xx_process_response_queue(vha, rsp); 3158 qla24xx_process_response_queue(vha, rsp);
3131 break; 3159 break;
3160 case INTR_ATIO_QUE_UPDATE_27XX:
3132 case INTR_ATIO_QUE_UPDATE:{ 3161 case INTR_ATIO_QUE_UPDATE:{
3133 unsigned long flags2; 3162 unsigned long flags2;
3134 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 3163 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
@@ -3259,6 +3288,7 @@ qla24xx_msix_default(int irq, void *dev_id)
3259 case INTR_RSP_QUE_UPDATE_83XX: 3288 case INTR_RSP_QUE_UPDATE_83XX:
3260 qla24xx_process_response_queue(vha, rsp); 3289 qla24xx_process_response_queue(vha, rsp);
3261 break; 3290 break;
3291 case INTR_ATIO_QUE_UPDATE_27XX:
3262 case INTR_ATIO_QUE_UPDATE:{ 3292 case INTR_ATIO_QUE_UPDATE:{
3263 unsigned long flags2; 3293 unsigned long flags2;
3264 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 3294 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
@@ -3347,7 +3377,8 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3347 .pre_vectors = QLA_BASE_VECTORS, 3377 .pre_vectors = QLA_BASE_VECTORS,
3348 }; 3378 };
3349 3379
3350 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { 3380 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3381 IS_ATIO_MSIX_CAPABLE(ha)) {
3351 desc.pre_vectors++; 3382 desc.pre_vectors++;
3352 min_vecs++; 3383 min_vecs++;
3353 } 3384 }
@@ -3374,7 +3405,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3374 ha->msix_count, ret); 3405 ha->msix_count, ret);
3375 ha->msix_count = ret; 3406 ha->msix_count = ret;
3376 /* Recalculate queue values */ 3407 /* Recalculate queue values */
3377 if (ha->mqiobase && ql2xmqsupport) { 3408 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
3378 ha->max_req_queues = ha->msix_count - 1; 3409 ha->max_req_queues = ha->msix_count - 1;
3379 3410
3380 /* ATIOQ needs 1 vector. That's 1 less QPair */ 3411 /* ATIOQ needs 1 vector. That's 1 less QPair */
@@ -3432,7 +3463,8 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3432 * If target mode is enable, also request the vector for the ATIO 3463 * If target mode is enable, also request the vector for the ATIO
3433 * queue. 3464 * queue.
3434 */ 3465 */
3435 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { 3466 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3467 IS_ATIO_MSIX_CAPABLE(ha)) {
3436 qentry = &ha->msix_entries[QLA_ATIO_VECTOR]; 3468 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
3437 rsp->msix = qentry; 3469 rsp->msix = qentry;
3438 qentry->handle = rsp; 3470 qentry->handle = rsp;
@@ -3486,11 +3518,14 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3486 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3518 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3487 3519
3488 /* If possible, enable MSI-X. */ 3520 /* If possible, enable MSI-X. */
3489 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3521 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
3490 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) && 3522 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
3491 !IS_QLA27XX(ha)) 3523 !IS_QLAFX00(ha) && !IS_QLA27XX(ha)))
3492 goto skip_msi; 3524 goto skip_msi;
3493 3525
3526 if (ql2xenablemsix == 2)
3527 goto skip_msix;
3528
3494 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 3529 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3495 (ha->pdev->subsystem_device == 0x7040 || 3530 (ha->pdev->subsystem_device == 0x7040 ||
3496 ha->pdev->subsystem_device == 0x7041 || 3531 ha->pdev->subsystem_device == 0x7041 ||
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 99502fa90810..cb717d47339f 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1782,13 +1782,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1782 "Entered %s.\n", __func__); 1782 "Entered %s.\n", __func__);
1783 1783
1784 pd24 = NULL; 1784 pd24 = NULL;
1785 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1785 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1786 if (pd == NULL) { 1786 if (pd == NULL) {
1787 ql_log(ql_log_warn, vha, 0x1050, 1787 ql_log(ql_log_warn, vha, 0x1050,
1788 "Failed to allocate port database structure.\n"); 1788 "Failed to allocate port database structure.\n");
1789 fcport->query = 0;
1789 return QLA_MEMORY_ALLOC_FAILED; 1790 return QLA_MEMORY_ALLOC_FAILED;
1790 } 1791 }
1791 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
1792 1792
1793 mcp->mb[0] = MBC_GET_PORT_DATABASE; 1793 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1794 if (opt != 0 && !IS_FWI2_CAPABLE(ha)) 1794 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
@@ -1823,17 +1823,32 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1823 1823
1824 if (IS_FWI2_CAPABLE(ha)) { 1824 if (IS_FWI2_CAPABLE(ha)) {
1825 uint64_t zero = 0; 1825 uint64_t zero = 0;
1826 u8 current_login_state, last_login_state;
1827
1826 pd24 = (struct port_database_24xx *) pd; 1828 pd24 = (struct port_database_24xx *) pd;
1827 1829
1828 /* Check for logged in state. */ 1830 /* Check for logged in state. */
1829 if (pd24->current_login_state != PDS_PRLI_COMPLETE && 1831 if (fcport->fc4f_nvme) {
1830 pd24->last_login_state != PDS_PRLI_COMPLETE) { 1832 current_login_state = pd24->current_login_state >> 4;
1831 ql_dbg(ql_dbg_mbx, vha, 0x1051, 1833 last_login_state = pd24->last_login_state >> 4;
1832 "Unable to verify login-state (%x/%x) for " 1834 } else {
1833 "loop_id %x.\n", pd24->current_login_state, 1835 current_login_state = pd24->current_login_state & 0xf;
1834 pd24->last_login_state, fcport->loop_id); 1836 last_login_state = pd24->last_login_state & 0xf;
1837 }
1838 fcport->current_login_state = pd24->current_login_state;
1839 fcport->last_login_state = pd24->last_login_state;
1840
1841 /* Check for logged in state. */
1842 if (current_login_state != PDS_PRLI_COMPLETE &&
1843 last_login_state != PDS_PRLI_COMPLETE) {
1844 ql_dbg(ql_dbg_mbx, vha, 0x119a,
1845 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
1846 current_login_state, last_login_state,
1847 fcport->loop_id);
1835 rval = QLA_FUNCTION_FAILED; 1848 rval = QLA_FUNCTION_FAILED;
1836 goto gpd_error_out; 1849
1850 if (!fcport->query)
1851 goto gpd_error_out;
1837 } 1852 }
1838 1853
1839 if (fcport->loop_id == FC_NO_LOOP_ID || 1854 if (fcport->loop_id == FC_NO_LOOP_ID ||
@@ -1912,6 +1927,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1912 1927
1913gpd_error_out: 1928gpd_error_out:
1914 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 1929 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1930 fcport->query = 0;
1915 1931
1916 if (rval != QLA_SUCCESS) { 1932 if (rval != QLA_SUCCESS) {
1917 ql_dbg(ql_dbg_mbx, vha, 0x1052, 1933 ql_dbg(ql_dbg_mbx, vha, 0x1052,
@@ -2255,13 +2271,12 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2255 else 2271 else
2256 req = ha->req_q_map[0]; 2272 req = ha->req_q_map[0];
2257 2273
2258 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2274 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2259 if (lg == NULL) { 2275 if (lg == NULL) {
2260 ql_log(ql_log_warn, vha, 0x1062, 2276 ql_log(ql_log_warn, vha, 0x1062,
2261 "Failed to allocate login IOCB.\n"); 2277 "Failed to allocate login IOCB.\n");
2262 return QLA_MEMORY_ALLOC_FAILED; 2278 return QLA_MEMORY_ALLOC_FAILED;
2263 } 2279 }
2264 memset(lg, 0, sizeof(struct logio_entry_24xx));
2265 2280
2266 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2281 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2267 lg->entry_count = 1; 2282 lg->entry_count = 1;
@@ -2525,13 +2540,12 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2525 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, 2540 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2526 "Entered %s.\n", __func__); 2541 "Entered %s.\n", __func__);
2527 2542
2528 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2543 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2529 if (lg == NULL) { 2544 if (lg == NULL) {
2530 ql_log(ql_log_warn, vha, 0x106e, 2545 ql_log(ql_log_warn, vha, 0x106e,
2531 "Failed to allocate logout IOCB.\n"); 2546 "Failed to allocate logout IOCB.\n");
2532 return QLA_MEMORY_ALLOC_FAILED; 2547 return QLA_MEMORY_ALLOC_FAILED;
2533 } 2548 }
2534 memset(lg, 0, sizeof(struct logio_entry_24xx));
2535 2549
2536 req = vha->req; 2550 req = vha->req;
2537 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2551 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
@@ -2820,13 +2834,12 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2820 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, 2834 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2821 "Entered %s.\n", __func__); 2835 "Entered %s.\n", __func__);
2822 2836
2823 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2837 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2824 if (pmap == NULL) { 2838 if (pmap == NULL) {
2825 ql_log(ql_log_warn, vha, 0x1080, 2839 ql_log(ql_log_warn, vha, 0x1080,
2826 "Memory alloc failed.\n"); 2840 "Memory alloc failed.\n");
2827 return QLA_MEMORY_ALLOC_FAILED; 2841 return QLA_MEMORY_ALLOC_FAILED;
2828 } 2842 }
2829 memset(pmap, 0, FCAL_MAP_SIZE);
2830 2843
2831 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; 2844 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2832 mcp->mb[2] = MSW(pmap_dma); 2845 mcp->mb[2] = MSW(pmap_dma);
@@ -3014,13 +3027,12 @@ qla24xx_abort_command(srb_t *sp)
3014 return QLA_FUNCTION_FAILED; 3027 return QLA_FUNCTION_FAILED;
3015 } 3028 }
3016 3029
3017 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 3030 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3018 if (abt == NULL) { 3031 if (abt == NULL) {
3019 ql_log(ql_log_warn, vha, 0x108d, 3032 ql_log(ql_log_warn, vha, 0x108d,
3020 "Failed to allocate abort IOCB.\n"); 3033 "Failed to allocate abort IOCB.\n");
3021 return QLA_MEMORY_ALLOC_FAILED; 3034 return QLA_MEMORY_ALLOC_FAILED;
3022 } 3035 }
3023 memset(abt, 0, sizeof(struct abort_entry_24xx));
3024 3036
3025 abt->entry_type = ABORT_IOCB_TYPE; 3037 abt->entry_type = ABORT_IOCB_TYPE;
3026 abt->entry_count = 1; 3038 abt->entry_count = 1;
@@ -3098,13 +3110,12 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3098 rsp = req->rsp; 3110 rsp = req->rsp;
3099 } 3111 }
3100 3112
3101 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 3113 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3102 if (tsk == NULL) { 3114 if (tsk == NULL) {
3103 ql_log(ql_log_warn, vha, 0x1093, 3115 ql_log(ql_log_warn, vha, 0x1093,
3104 "Failed to allocate task management IOCB.\n"); 3116 "Failed to allocate task management IOCB.\n");
3105 return QLA_MEMORY_ALLOC_FAILED; 3117 return QLA_MEMORY_ALLOC_FAILED;
3106 } 3118 }
3107 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
3108 3119
3109 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 3120 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3110 tsk->p.tsk.entry_count = 1; 3121 tsk->p.tsk.entry_count = 1;
@@ -3753,6 +3764,38 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3753 rptid_entry->vp_status, 3764 rptid_entry->vp_status,
3754 rptid_entry->port_id[2], rptid_entry->port_id[1], 3765 rptid_entry->port_id[2], rptid_entry->port_id[1],
3755 rptid_entry->port_id[0]); 3766 rptid_entry->port_id[0]);
3767 ql_dbg(ql_dbg_async, vha, 0x5075,
3768 "Format 1: Remote WWPN %8phC.\n",
3769 rptid_entry->u.f1.port_name);
3770
3771 ql_dbg(ql_dbg_async, vha, 0x5075,
3772 "Format 1: WWPN %8phC.\n",
3773 vha->port_name);
3774
3775 /* N2N. direct connect */
3776 if (IS_QLA27XX(ha) &&
3777 ((rptid_entry->u.f1.flags>>1) & 0x7) == 2) {
3778 /* if our portname is higher then initiate N2N login */
3779 if (wwn_to_u64(vha->port_name) >
3780 wwn_to_u64(rptid_entry->u.f1.port_name)) {
3781 // ??? qlt_update_host_map(vha, id);
3782 vha->n2n_id = 0x1;
3783 ql_dbg(ql_dbg_async, vha, 0x5075,
3784 "Format 1: Setting n2n_update_needed for id %d\n",
3785 vha->n2n_id);
3786 } else {
3787 ql_dbg(ql_dbg_async, vha, 0x5075,
3788 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
3789 rptid_entry->u.f1.port_name);
3790 }
3791
3792 memcpy(vha->n2n_port_name, rptid_entry->u.f1.port_name,
3793 WWN_SIZE);
3794 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
3795 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3796 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3797 return;
3798 }
3756 3799
3757 /* buffer to buffer credit flag */ 3800 /* buffer to buffer credit flag */
3758 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; 3801 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
@@ -3856,14 +3899,13 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3856 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, 3899 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
3857 "Entered %s.\n", __func__); 3900 "Entered %s.\n", __func__);
3858 3901
3859 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 3902 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
3860 if (!vpmod) { 3903 if (!vpmod) {
3861 ql_log(ql_log_warn, vha, 0x10bc, 3904 ql_log(ql_log_warn, vha, 0x10bc,
3862 "Failed to allocate modify VP IOCB.\n"); 3905 "Failed to allocate modify VP IOCB.\n");
3863 return QLA_MEMORY_ALLOC_FAILED; 3906 return QLA_MEMORY_ALLOC_FAILED;
3864 } 3907 }
3865 3908
3866 memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
3867 vpmod->entry_type = VP_CONFIG_IOCB_TYPE; 3909 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
3868 vpmod->entry_count = 1; 3910 vpmod->entry_count = 1;
3869 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; 3911 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
@@ -3934,13 +3976,12 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3934 if (vp_index == 0 || vp_index >= ha->max_npiv_vports) 3976 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
3935 return QLA_PARAMETER_ERROR; 3977 return QLA_PARAMETER_ERROR;
3936 3978
3937 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma); 3979 vce = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
3938 if (!vce) { 3980 if (!vce) {
3939 ql_log(ql_log_warn, vha, 0x10c2, 3981 ql_log(ql_log_warn, vha, 0x10c2,
3940 "Failed to allocate VP control IOCB.\n"); 3982 "Failed to allocate VP control IOCB.\n");
3941 return QLA_MEMORY_ALLOC_FAILED; 3983 return QLA_MEMORY_ALLOC_FAILED;
3942 } 3984 }
3943 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
3944 3985
3945 vce->entry_type = VP_CTRL_IOCB_TYPE; 3986 vce->entry_type = VP_CTRL_IOCB_TYPE;
3946 vce->entry_count = 1; 3987 vce->entry_count = 1;
@@ -4592,6 +4633,48 @@ qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4592 return rval; 4633 return rval;
4593} 4634}
4594 4635
4636int
4637qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4638 void *buf, uint16_t bufsiz)
4639{
4640 int rval, i;
4641 mbx_cmd_t mc;
4642 mbx_cmd_t *mcp = &mc;
4643 uint32_t *bp;
4644
4645 if (!IS_FWI2_CAPABLE(vha->hw))
4646 return QLA_FUNCTION_FAILED;
4647
4648 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4649 "Entered %s.\n", __func__);
4650
4651 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4652 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4653 mcp->mb[2] = MSW(buf_dma);
4654 mcp->mb[3] = LSW(buf_dma);
4655 mcp->mb[6] = MSW(MSD(buf_dma));
4656 mcp->mb[7] = LSW(MSD(buf_dma));
4657 mcp->mb[8] = bufsiz/4;
4658 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4659 mcp->in_mb = MBX_1|MBX_0;
4660 mcp->tov = MBX_TOV_SECONDS;
4661 mcp->flags = 0;
4662 rval = qla2x00_mailbox_command(vha, mcp);
4663
4664 if (rval != QLA_SUCCESS) {
4665 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4666 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4667 } else {
4668 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4669 "Done %s.\n", __func__);
4670 bp = (uint32_t *) buf;
4671 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4672 *bp = cpu_to_be32(*bp);
4673 }
4674
4675 return rval;
4676}
4677
4595static int 4678static int
4596qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 4679qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4597{ 4680{
@@ -6025,13 +6108,12 @@ int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6025 if (!vha->hw->flags.fw_started) 6108 if (!vha->hw->flags.fw_started)
6026 goto done; 6109 goto done;
6027 6110
6028 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 6111 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6029 if (pd == NULL) { 6112 if (pd == NULL) {
6030 ql_log(ql_log_warn, vha, 0xd047, 6113 ql_log(ql_log_warn, vha, 0xd047,
6031 "Failed to allocate port database structure.\n"); 6114 "Failed to allocate port database structure.\n");
6032 goto done_free_sp; 6115 goto done_free_sp;
6033 } 6116 }
6034 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
6035 6117
6036 memset(&mc, 0, sizeof(mc)); 6118 memset(&mc, 0, sizeof(mc));
6037 mc.mb[0] = MBC_GET_PORT_DATABASE; 6119 mc.mb[0] = MBC_GET_PORT_DATABASE;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index cbf544dbf883..bd9f14bf7ac2 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -606,7 +606,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
606 struct qla_hw_data *ha = vha->hw; 606 struct qla_hw_data *ha = vha->hw;
607 struct qla_qpair *qpair, *tqpair; 607 struct qla_qpair *qpair, *tqpair;
608 608
609 if (ql2xmqsupport) { 609 if (ql2xmqsupport || ql2xnvmeenable) {
610 list_for_each_entry_safe(qpair, tqpair, &vha->qp_list, 610 list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
611 qp_list_elem) 611 qp_list_elem)
612 qla2xxx_delete_qpair(vha, qpair); 612 qla2xxx_delete_qpair(vha, qpair);
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index e23a3d4c36f3..d5da3981cefe 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -2245,8 +2245,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
2245 memcpy(fstatus.reserved_3, 2245 memcpy(fstatus.reserved_3,
2246 pkt->reserved_2, 20 * sizeof(uint8_t)); 2246 pkt->reserved_2, 20 * sizeof(uint8_t));
2247 2247
2248 fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) + 2248 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
2249 sizeof(struct fc_bsg_reply);
2250 2249
2251 memcpy(fw_sts_ptr, (uint8_t *)&fstatus, 2250 memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
2252 sizeof(struct qla_mt_iocb_rsp_fx00)); 2251 sizeof(struct qla_mt_iocb_rsp_fx00));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 50286cf02eca..46f2d0cf7c0d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -268,6 +268,15 @@ MODULE_PARM_DESC(ql2xautodetectsfp,
268 "Detect SFP range and set appropriate distance.\n" 268 "Detect SFP range and set appropriate distance.\n"
269 "1 (Default): Enable\n"); 269 "1 (Default): Enable\n");
270 270
271int ql2xenablemsix = 1;
272module_param(ql2xenablemsix, int, 0444);
273MODULE_PARM_DESC(ql2xenablemsix,
274 "Set to enable MSI or MSI-X interrupt mechanism.\n"
275 " Default is 1, enable MSI-X interrupt mechanism.\n"
276 " 0 -- enable traditional pin-based mechanism.\n"
277 " 1 -- enable MSI-X interrupt mechanism.\n"
278 " 2 -- enable MSI interrupt mechanism.\n");
279
271/* 280/*
272 * SCSI host template entry points 281 * SCSI host template entry points
273 */ 282 */
@@ -386,7 +395,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
386 INIT_LIST_HEAD(&ha->base_qpair->nvme_done_list); 395 INIT_LIST_HEAD(&ha->base_qpair->nvme_done_list);
387 ha->base_qpair->enable_class_2 = ql2xenableclass2; 396 ha->base_qpair->enable_class_2 = ql2xenableclass2;
388 /* init qpair to this cpu. Will adjust at run time. */ 397 /* init qpair to this cpu. Will adjust at run time. */
389 qla_cpu_update(rsp->qpair, smp_processor_id()); 398 qla_cpu_update(rsp->qpair, raw_smp_processor_id());
390 ha->base_qpair->pdev = ha->pdev; 399 ha->base_qpair->pdev = ha->pdev;
391 400
392 if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) 401 if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
@@ -422,7 +431,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
422 431
423 qla_init_base_qpair(vha, req, rsp); 432 qla_init_base_qpair(vha, req, rsp);
424 433
425 if (ql2xmqsupport && ha->max_qpairs) { 434 if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) {
426 ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *), 435 ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
427 GFP_KERNEL); 436 GFP_KERNEL);
428 if (!ha->queue_pair_map) { 437 if (!ha->queue_pair_map) {
@@ -1965,7 +1974,8 @@ skip_pio:
1965 /* Determine queue resources */ 1974 /* Determine queue resources */
1966 ha->max_req_queues = ha->max_rsp_queues = 1; 1975 ha->max_req_queues = ha->max_rsp_queues = 1;
1967 ha->msix_count = QLA_BASE_VECTORS; 1976 ha->msix_count = QLA_BASE_VECTORS;
1968 if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 1977 if (!ql2xmqsupport || !ql2xnvmeenable ||
1978 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1969 goto mqiobase_exit; 1979 goto mqiobase_exit;
1970 1980
1971 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1981 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
@@ -2062,7 +2072,7 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
2062 * By default, driver uses at least two msix vectors 2072 * By default, driver uses at least two msix vectors
2063 * (default & rspq) 2073 * (default & rspq)
2064 */ 2074 */
2065 if (ql2xmqsupport) { 2075 if (ql2xmqsupport || ql2xnvmeenable) {
2066 /* MB interrupt uses 1 vector */ 2076 /* MB interrupt uses 1 vector */
2067 ha->max_req_queues = ha->msix_count - 1; 2077 ha->max_req_queues = ha->msix_count - 1;
2068 2078
@@ -3080,9 +3090,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3080 3090
3081 ql_dbg(ql_dbg_init, base_vha, 0x0192, 3091 ql_dbg(ql_dbg_init, base_vha, 0x0192,
3082 "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues); 3092 "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
3083 } else 3093 } else {
3084 ql_dbg(ql_dbg_init, base_vha, 0x0193, 3094 if (ql2xnvmeenable) {
3085 "blk/scsi-mq disabled.\n"); 3095 host->nr_hw_queues = ha->max_qpairs;
3096 ql_dbg(ql_dbg_init, base_vha, 0x0194,
3097 "FC-NVMe support is enabled, HW queues=%d\n",
3098 host->nr_hw_queues);
3099 } else {
3100 ql_dbg(ql_dbg_init, base_vha, 0x0193,
3101 "blk/scsi-mq disabled.\n");
3102 }
3103 }
3086 3104
3087 qlt_probe_one_stage1(base_vha, ha); 3105 qlt_probe_one_stage1(base_vha, ha);
3088 3106
@@ -4743,7 +4761,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4743 if (pla) 4761 if (pla)
4744 qlt_plogi_ack_unref(vha, pla); 4762 qlt_plogi_ack_unref(vha, pla);
4745 else 4763 else
4746 qla24xx_async_gnl(vha, fcport); 4764 qla24xx_async_gffid(vha, fcport);
4747 } 4765 }
4748 4766
4749 if (free_fcport) { 4767 if (free_fcport) {
@@ -6292,7 +6310,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6292 switch (state) { 6310 switch (state) {
6293 case pci_channel_io_normal: 6311 case pci_channel_io_normal:
6294 ha->flags.eeh_busy = 0; 6312 ha->flags.eeh_busy = 0;
6295 if (ql2xmqsupport) { 6313 if (ql2xmqsupport || ql2xnvmeenable) {
6296 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 6314 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
6297 qla2xxx_wake_dpc(vha); 6315 qla2xxx_wake_dpc(vha);
6298 } 6316 }
@@ -6309,7 +6327,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6309 pci_disable_device(pdev); 6327 pci_disable_device(pdev);
6310 /* Return back all IOs */ 6328 /* Return back all IOs */
6311 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 6329 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
6312 if (ql2xmqsupport) { 6330 if (ql2xmqsupport || ql2xnvmeenable) {
6313 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 6331 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
6314 qla2xxx_wake_dpc(vha); 6332 qla2xxx_wake_dpc(vha);
6315 } 6333 }
@@ -6317,7 +6335,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6317 case pci_channel_io_perm_failure: 6335 case pci_channel_io_perm_failure:
6318 ha->flags.pci_channel_io_perm_failure = 1; 6336 ha->flags.pci_channel_io_perm_failure = 1;
6319 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); 6337 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
6320 if (ql2xmqsupport) { 6338 if (ql2xmqsupport || ql2xnvmeenable) {
6321 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); 6339 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
6322 qla2xxx_wake_dpc(vha); 6340 qla2xxx_wake_dpc(vha);
6323 } 6341 }
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index f946bf889015..18069edd4773 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -6546,6 +6546,7 @@ void
6546qlt_24xx_config_rings(struct scsi_qla_host *vha) 6546qlt_24xx_config_rings(struct scsi_qla_host *vha)
6547{ 6547{
6548 struct qla_hw_data *ha = vha->hw; 6548 struct qla_hw_data *ha = vha->hw;
6549 struct init_cb_24xx *icb;
6549 if (!QLA_TGT_MODE_ENABLED()) 6550 if (!QLA_TGT_MODE_ENABLED())
6550 return; 6551 return;
6551 6552
@@ -6553,14 +6554,19 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
6553 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); 6554 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
6554 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); 6555 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
6555 6556
6556 if (IS_ATIO_MSIX_CAPABLE(ha)) { 6557 icb = (struct init_cb_24xx *)ha->init_cb;
6558
6559 if ((ql2xenablemsix != 0) && IS_ATIO_MSIX_CAPABLE(ha)) {
6557 struct qla_msix_entry *msix = &ha->msix_entries[2]; 6560 struct qla_msix_entry *msix = &ha->msix_entries[2];
6558 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
6559 6561
6560 icb->msix_atio = cpu_to_le16(msix->entry); 6562 icb->msix_atio = cpu_to_le16(msix->entry);
6561 ql_dbg(ql_dbg_init, vha, 0xf072, 6563 ql_dbg(ql_dbg_init, vha, 0xf072,
6562 "Registering ICB vector 0x%x for atio que.\n", 6564 "Registering ICB vector 0x%x for atio que.\n",
6563 msix->entry); 6565 msix->entry);
6566 } else if (ql2xenablemsix == 0) {
6567 icb->firmware_options_2 |= cpu_to_le32(BIT_26);
6568 ql_dbg(ql_dbg_init, vha, 0xf07f,
6569 "Registering INTx vector for ATIO.\n");
6564 } 6570 }
6565} 6571}
6566 6572
@@ -6805,7 +6811,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
6805 if (!QLA_TGT_MODE_ENABLED()) 6811 if (!QLA_TGT_MODE_ENABLED())
6806 return; 6812 return;
6807 6813
6808 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 6814 if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6809 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 6815 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
6810 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 6816 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
6811 } else { 6817 } else {
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 8c4b505c9f66..b6ec02b96d3d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "10.00.00.01-k" 10#define QLA2XXX_VERSION "10.00.00.02-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 10 12#define QLA_DRIVER_MAJOR_VER 10
13#define QLA_DRIVER_MINOR_VER 0 13#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 09ba494f8896..e4f037f0f38b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -953,9 +953,9 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
953} 953}
954 954
955 955
956static const char * inq_vendor_id = "Linux "; 956static char sdebug_inq_vendor_id[9] = "Linux ";
957static const char * inq_product_id = "scsi_debug "; 957static char sdebug_inq_product_id[17] = "scsi_debug ";
958static const char *inq_product_rev = "0186"; /* version less '.' */ 958static char sdebug_inq_product_rev[5] = "0186"; /* version less '.' */
959/* Use some locally assigned NAAs for SAS addresses. */ 959/* Use some locally assigned NAAs for SAS addresses. */
960static const u64 naa3_comp_a = 0x3222222000000000ULL; 960static const u64 naa3_comp_a = 0x3222222000000000ULL;
961static const u64 naa3_comp_b = 0x3333333000000000ULL; 961static const u64 naa3_comp_b = 0x3333333000000000ULL;
@@ -975,8 +975,8 @@ static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
975 arr[0] = 0x2; /* ASCII */ 975 arr[0] = 0x2; /* ASCII */
976 arr[1] = 0x1; 976 arr[1] = 0x1;
977 arr[2] = 0x0; 977 arr[2] = 0x0;
978 memcpy(&arr[4], inq_vendor_id, 8); 978 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
979 memcpy(&arr[12], inq_product_id, 16); 979 memcpy(&arr[12], sdebug_inq_product_id, 16);
980 memcpy(&arr[28], dev_id_str, dev_id_str_len); 980 memcpy(&arr[28], dev_id_str, dev_id_str_len);
981 num = 8 + 16 + dev_id_str_len; 981 num = 8 + 16 + dev_id_str_len;
982 arr[3] = num; 982 arr[3] = num;
@@ -1408,9 +1408,9 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1408 arr[6] = 0x10; /* claim: MultiP */ 1408 arr[6] = 0x10; /* claim: MultiP */
1409 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */ 1409 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1410 arr[7] = 0xa; /* claim: LINKED + CMDQUE */ 1410 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1411 memcpy(&arr[8], inq_vendor_id, 8); 1411 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1412 memcpy(&arr[16], inq_product_id, 16); 1412 memcpy(&arr[16], sdebug_inq_product_id, 16);
1413 memcpy(&arr[32], inq_product_rev, 4); 1413 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1414 /* version descriptors (2 bytes each) follow */ 1414 /* version descriptors (2 bytes each) follow */
1415 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */ 1415 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1416 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */ 1416 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
@@ -3001,11 +3001,11 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3001 if (-1 == ret) { 3001 if (-1 == ret) {
3002 write_unlock_irqrestore(&atomic_rw, iflags); 3002 write_unlock_irqrestore(&atomic_rw, iflags);
3003 return DID_ERROR << 16; 3003 return DID_ERROR << 16;
3004 } else if (sdebug_verbose && (ret < (num * sdebug_sector_size))) 3004 } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
3005 sdev_printk(KERN_INFO, scp->device, 3005 sdev_printk(KERN_INFO, scp->device,
3006 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n", 3006 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3007 my_name, "write same", 3007 my_name, "write same",
3008 num * sdebug_sector_size, ret); 3008 sdebug_sector_size, ret);
3009 3009
3010 /* Copy first sector to remaining blocks */ 3010 /* Copy first sector to remaining blocks */
3011 for (i = 1 ; i < num ; i++) 3011 for (i = 1 ; i < num ; i++)
@@ -4151,6 +4151,12 @@ module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4151module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR); 4151module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4152module_param_named(guard, sdebug_guard, uint, S_IRUGO); 4152module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4153module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR); 4153module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4154module_param_string(inq_vendor, sdebug_inq_vendor_id,
4155 sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4156module_param_string(inq_product, sdebug_inq_product_id,
4157 sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4158module_param_string(inq_rev, sdebug_inq_product_rev,
4159 sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
4154module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO); 4160module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4155module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO); 4161module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4156module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO); 4162module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
@@ -4202,6 +4208,9 @@ MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4202MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)"); 4208MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4203MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); 4209MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4204MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)"); 4210MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4211MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4212MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4213MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\"0186\")");
4205MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); 4214MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4206MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); 4215MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4207MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); 4216MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 6bf43d94cdc0..fe5a9ea27b5e 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -135,6 +135,7 @@ static struct {
135 {"3PARdata", "VV", NULL, BLIST_REPORTLUN2}, 135 {"3PARdata", "VV", NULL, BLIST_REPORTLUN2},
136 {"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN}, 136 {"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN},
137 {"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN}, 137 {"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
138 {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES},
138 {"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN}, 139 {"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
139 {"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36}, 140 {"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36},
140 {"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN}, 141 {"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN},
@@ -161,7 +162,7 @@ static struct {
161 {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */ 162 {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */
162 {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */ 163 {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */
163 {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 164 {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
164 {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN}, 165 {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
165 {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN}, 166 {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
166 {"easyRAID", "16P", NULL, BLIST_NOREPORTLUN}, 167 {"easyRAID", "16P", NULL, BLIST_NOREPORTLUN},
167 {"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN}, 168 {"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN},
@@ -174,7 +175,7 @@ static struct {
174 {"HITACHI", "DF500", "*", BLIST_REPORTLUN2}, 175 {"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
175 {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2}, 176 {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
176 {"HITACHI", "HUS1530", "*", BLIST_NO_DIF}, 177 {"HITACHI", "HUS1530", "*", BLIST_NO_DIF},
177 {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2}, 178 {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
178 {"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 179 {"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
179 {"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 180 {"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
180 {"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 181 {"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
@@ -305,8 +306,8 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
305 */ 306 */
306 to[from_length] = '\0'; 307 to[from_length] = '\0';
307 } else { 308 } else {
308 /* 309 /*
309 * space pad the string if it is short. 310 * space pad the string if it is short.
310 */ 311 */
311 strncpy(&to[from_length], spaces, 312 strncpy(&to[from_length], spaces,
312 to_length - from_length); 313 to_length - from_length);
@@ -326,10 +327,10 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
326 * @flags: if strflags NULL, use this flag value 327 * @flags: if strflags NULL, use this flag value
327 * 328 *
328 * Description: 329 * Description:
329 * Create and add one dev_info entry for @vendor, @model, @strflags or 330 * Create and add one dev_info entry for @vendor, @model, @strflags or
330 * @flag. If @compatible, add to the tail of the list, do not space 331 * @flag. If @compatible, add to the tail of the list, do not space
331 * pad, and set devinfo->compatible. The scsi_static_device_list entries 332 * pad, and set devinfo->compatible. The scsi_static_device_list entries
332 * are added with @compatible 1 and @clfags NULL. 333 * are added with @compatible 1 and @clfags NULL.
333 * 334 *
334 * Returns: 0 OK, -error on failure. 335 * Returns: 0 OK, -error on failure.
335 **/ 336 **/
@@ -351,11 +352,11 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
351 * @key: specify list to use 352 * @key: specify list to use
352 * 353 *
353 * Description: 354 * Description:
354 * Create and add one dev_info entry for @vendor, @model, 355 * Create and add one dev_info entry for @vendor, @model,
355 * @strflags or @flag in list specified by @key. If @compatible, 356 * @strflags or @flag in list specified by @key. If @compatible,
356 * add to the tail of the list, do not space pad, and set 357 * add to the tail of the list, do not space pad, and set
357 * devinfo->compatible. The scsi_static_device_list entries are 358 * devinfo->compatible. The scsi_static_device_list entries are
358 * added with @compatible 1 and @clfags NULL. 359 * added with @compatible 1 and @clfags NULL.
359 * 360 *
360 * Returns: 0 OK, -error on failure. 361 * Returns: 0 OK, -error on failure.
361 **/ 362 **/
@@ -400,13 +401,13 @@ EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
400 401
401/** 402/**
402 * scsi_dev_info_list_find - find a matching dev_info list entry. 403 * scsi_dev_info_list_find - find a matching dev_info list entry.
403 * @vendor: vendor string 404 * @vendor: full vendor string
404 * @model: model (product) string 405 * @model: full model (product) string
405 * @key: specify list to use 406 * @key: specify list to use
406 * 407 *
407 * Description: 408 * Description:
408 * Finds the first dev_info entry matching @vendor, @model 409 * Finds the first dev_info entry matching @vendor, @model
409 * in list specified by @key. 410 * in list specified by @key.
410 * 411 *
411 * Returns: pointer to matching entry, or ERR_PTR on failure. 412 * Returns: pointer to matching entry, or ERR_PTR on failure.
412 **/ 413 **/
@@ -416,7 +417,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
416 struct scsi_dev_info_list *devinfo; 417 struct scsi_dev_info_list *devinfo;
417 struct scsi_dev_info_list_table *devinfo_table = 418 struct scsi_dev_info_list_table *devinfo_table =
418 scsi_devinfo_lookup_by_key(key); 419 scsi_devinfo_lookup_by_key(key);
419 size_t vmax, mmax; 420 size_t vmax, mmax, mlen;
420 const char *vskip, *mskip; 421 const char *vskip, *mskip;
421 422
422 if (IS_ERR(devinfo_table)) 423 if (IS_ERR(devinfo_table))
@@ -455,22 +456,25 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
455 dev_info_list) { 456 dev_info_list) {
456 if (devinfo->compatible) { 457 if (devinfo->compatible) {
457 /* 458 /*
458 * Behave like the older version of get_device_flags. 459 * vendor strings must be an exact match
459 */ 460 */
460 if (memcmp(devinfo->vendor, vskip, vmax) || 461 if (vmax != strlen(devinfo->vendor) ||
461 (vmax < sizeof(devinfo->vendor) && 462 memcmp(devinfo->vendor, vskip, vmax))
462 devinfo->vendor[vmax]))
463 continue; 463 continue;
464 if (memcmp(devinfo->model, mskip, mmax) || 464
465 (mmax < sizeof(devinfo->model) && 465 /*
466 devinfo->model[mmax])) 466 * @model specifies the full string, and
467 * must be larger or equal to devinfo->model
468 */
469 mlen = strlen(devinfo->model);
470 if (mmax < mlen || memcmp(devinfo->model, mskip, mlen))
467 continue; 471 continue;
468 return devinfo; 472 return devinfo;
469 } else { 473 } else {
470 if (!memcmp(devinfo->vendor, vendor, 474 if (!memcmp(devinfo->vendor, vendor,
471 sizeof(devinfo->vendor)) && 475 sizeof(devinfo->vendor)) &&
472 !memcmp(devinfo->model, model, 476 !memcmp(devinfo->model, model,
473 sizeof(devinfo->model))) 477 sizeof(devinfo->model)))
474 return devinfo; 478 return devinfo;
475 } 479 }
476 } 480 }
@@ -509,10 +513,10 @@ EXPORT_SYMBOL(scsi_dev_info_list_del_keyed);
509 * @dev_list: string of device flags to add 513 * @dev_list: string of device flags to add
510 * 514 *
511 * Description: 515 * Description:
512 * Parse dev_list, and add entries to the scsi_dev_info_list. 516 * Parse dev_list, and add entries to the scsi_dev_info_list.
513 * dev_list is of the form "vendor:product:flag,vendor:product:flag". 517 * dev_list is of the form "vendor:product:flag,vendor:product:flag".
514 * dev_list is modified via strsep. Can be called for command line 518 * dev_list is modified via strsep. Can be called for command line
515 * addition, for proc or mabye a sysfs interface. 519 * addition, for proc or mabye a sysfs interface.
516 * 520 *
517 * Returns: 0 if OK, -error on failure. 521 * Returns: 0 if OK, -error on failure.
518 **/ 522 **/
@@ -702,7 +706,7 @@ static int proc_scsi_devinfo_open(struct inode *inode, struct file *file)
702 return seq_open(file, &scsi_devinfo_seq_ops); 706 return seq_open(file, &scsi_devinfo_seq_ops);
703} 707}
704 708
705/* 709/*
706 * proc_scsi_dev_info_write - allow additions to scsi_dev_info_list via /proc. 710 * proc_scsi_dev_info_write - allow additions to scsi_dev_info_list via /proc.
707 * 711 *
708 * Description: Adds a black/white list entry for vendor and model with an 712 * Description: Adds a black/white list entry for vendor and model with an
@@ -841,8 +845,8 @@ EXPORT_SYMBOL(scsi_dev_info_remove_list);
841 * scsi_init_devinfo - set up the dynamic device list. 845 * scsi_init_devinfo - set up the dynamic device list.
842 * 846 *
843 * Description: 847 * Description:
844 * Add command line entries from scsi_dev_flags, then add 848 * Add command line entries from scsi_dev_flags, then add
845 * scsi_static_device_list entries to the scsi device info list. 849 * scsi_static_device_list entries to the scsi device info list.
846 */ 850 */
847int __init scsi_init_devinfo(void) 851int __init scsi_init_devinfo(void)
848{ 852{
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 84addee05be6..2b785d09d5bd 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -126,20 +126,36 @@ static struct scsi_device_handler *scsi_dh_lookup(const char *name)
126static int scsi_dh_handler_attach(struct scsi_device *sdev, 126static int scsi_dh_handler_attach(struct scsi_device *sdev,
127 struct scsi_device_handler *scsi_dh) 127 struct scsi_device_handler *scsi_dh)
128{ 128{
129 int error; 129 int error, ret = 0;
130 130
131 if (!try_module_get(scsi_dh->module)) 131 if (!try_module_get(scsi_dh->module))
132 return -EINVAL; 132 return -EINVAL;
133 133
134 error = scsi_dh->attach(sdev); 134 error = scsi_dh->attach(sdev);
135 if (error) { 135 if (error != SCSI_DH_OK) {
136 sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%d)\n", 136 switch (error) {
137 scsi_dh->name, error); 137 case SCSI_DH_NOMEM:
138 ret = -ENOMEM;
139 break;
140 case SCSI_DH_RES_TEMP_UNAVAIL:
141 ret = -EAGAIN;
142 break;
143 case SCSI_DH_DEV_UNSUPP:
144 case SCSI_DH_NOSYS:
145 ret = -ENODEV;
146 break;
147 default:
148 ret = -EINVAL;
149 break;
150 }
151 if (ret != -ENODEV)
152 sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%d)\n",
153 scsi_dh->name, error);
138 module_put(scsi_dh->module); 154 module_put(scsi_dh->module);
139 } else 155 } else
140 sdev->handler = scsi_dh; 156 sdev->handler = scsi_dh;
141 157
142 return error; 158 return ret;
143} 159}
144 160
145/* 161/*
@@ -153,18 +169,20 @@ static void scsi_dh_handler_detach(struct scsi_device *sdev)
153 module_put(sdev->handler->module); 169 module_put(sdev->handler->module);
154} 170}
155 171
156int scsi_dh_add_device(struct scsi_device *sdev) 172void scsi_dh_add_device(struct scsi_device *sdev)
157{ 173{
158 struct scsi_device_handler *devinfo = NULL; 174 struct scsi_device_handler *devinfo = NULL;
159 const char *drv; 175 const char *drv;
160 int err = 0;
161 176
162 drv = scsi_dh_find_driver(sdev); 177 drv = scsi_dh_find_driver(sdev);
163 if (drv) 178 if (drv)
164 devinfo = __scsi_dh_lookup(drv); 179 devinfo = __scsi_dh_lookup(drv);
180 /*
181 * device_handler is optional, so ignore errors
182 * from scsi_dh_handler_attach()
183 */
165 if (devinfo) 184 if (devinfo)
166 err = scsi_dh_handler_attach(sdev, devinfo); 185 (void)scsi_dh_handler_attach(sdev, devinfo);
167 return err;
168} 186}
169 187
170void scsi_dh_release_device(struct scsi_device *sdev) 188void scsi_dh_release_device(struct scsi_device *sdev)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index dab876c65473..62b56de38ae8 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -403,6 +403,12 @@ static void scsi_report_sense(struct scsi_device *sdev,
403 "threshold.\n"); 403 "threshold.\n");
404 } 404 }
405 405
406 if (sshdr->asc == 0x29) {
407 evt_type = SDEV_EVT_POWER_ON_RESET_OCCURRED;
408 sdev_printk(KERN_WARNING, sdev,
409 "Power-on or device reset occurred\n");
410 }
411
406 if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) { 412 if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
407 evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED; 413 evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
408 sdev_printk(KERN_WARNING, sdev, 414 sdev_printk(KERN_WARNING, sdev,
@@ -579,6 +585,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
579 case ILLEGAL_REQUEST: 585 case ILLEGAL_REQUEST:
580 if (sshdr.asc == 0x20 || /* Invalid command operation code */ 586 if (sshdr.asc == 0x20 || /* Invalid command operation code */
581 sshdr.asc == 0x21 || /* Logical block address out of range */ 587 sshdr.asc == 0x21 || /* Logical block address out of range */
588 sshdr.asc == 0x22 || /* Invalid function */
582 sshdr.asc == 0x24 || /* Invalid field in cdb */ 589 sshdr.asc == 0x24 || /* Invalid field in cdb */
583 sshdr.asc == 0x26 || /* Parameter value invalid */ 590 sshdr.asc == 0x26 || /* Parameter value invalid */
584 sshdr.asc == 0x27) { /* Write protected */ 591 sshdr.asc == 0x27) { /* Write protected */
@@ -1747,16 +1754,12 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1747 * that it indicates SUCCESS. 1754 * that it indicates SUCCESS.
1748 */ 1755 */
1749 return SUCCESS; 1756 return SUCCESS;
1757 case DID_SOFT_ERROR:
1750 /* 1758 /*
1751 * when the low level driver returns did_soft_error, 1759 * when the low level driver returns did_soft_error,
1752 * it is responsible for keeping an internal retry counter 1760 * it is responsible for keeping an internal retry counter
1753 * in order to avoid endless loops (db) 1761 * in order to avoid endless loops (db)
1754 *
1755 * actually this is a bug in this function here. we should
1756 * be mindful of the maximum number of retries specified
1757 * and not get stuck in a loop.
1758 */ 1762 */
1759 case DID_SOFT_ERROR:
1760 goto maybe_retry; 1763 goto maybe_retry;
1761 case DID_IMM_RETRY: 1764 case DID_IMM_RETRY:
1762 return NEEDS_RETRY; 1765 return NEEDS_RETRY;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 54de24c785dd..1cbc497e00bd 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1750,7 +1750,10 @@ static void scsi_done(struct scsi_cmnd *cmd)
1750 * 1750 *
1751 * Returns: Nothing 1751 * Returns: Nothing
1752 * 1752 *
1753 * Lock status: IO request lock assumed to be held when called. 1753 * Lock status: request queue lock assumed to be held when called.
1754 *
1755 * Note: See sd_zbc.c sd_zbc_write_lock_zone() for write order
1756 * protection for ZBC disks.
1754 */ 1757 */
1755static void scsi_request_fn(struct request_queue *q) 1758static void scsi_request_fn(struct request_queue *q)
1756 __releases(q->queue_lock) 1759 __releases(q->queue_lock)
@@ -2754,6 +2757,9 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2754 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: 2757 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2755 envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED"; 2758 envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
2756 break; 2759 break;
2760 case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2761 envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED";
2762 break;
2757 default: 2763 default:
2758 /* do nothing */ 2764 /* do nothing */
2759 break; 2765 break;
@@ -2858,6 +2864,7 @@ struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2858 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: 2864 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2859 case SDEV_EVT_LUN_CHANGE_REPORTED: 2865 case SDEV_EVT_LUN_CHANGE_REPORTED:
2860 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: 2866 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2867 case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2861 default: 2868 default:
2862 /* do nothing */ 2869 /* do nothing */
2863 break; 2870 break;
diff --git a/drivers/scsi/scsi_logging.h b/drivers/scsi/scsi_logging.h
index 6907c924df72..836185de28c4 100644
--- a/drivers/scsi/scsi_logging.h
+++ b/drivers/scsi/scsi_logging.h
@@ -4,10 +4,10 @@
4 4
5 5
6/* 6/*
7 * This defines the scsi logging feature. It is a means by which the user 7 * This defines the scsi logging feature. It is a means by which the user can
8 * can select how much information they get about various goings on, and it 8 * select how much information they get about various goings on, and it can be
9 * can be really useful for fault tracing. The logging word is divided into 9 * really useful for fault tracing. The logging word is divided into 10 3-bit
10 * 8 nibbles, each of which describes a loglevel. The division of things is 10 * bitfields, each of which describes a loglevel. The division of things is
11 * somewhat arbitrary, and the division of the word could be changed if it 11 * somewhat arbitrary, and the division of the word could be changed if it
12 * were really needed for any reason. The numbers below are the only place 12 * were really needed for any reason. The numbers below are the only place
13 * where these are specified. For a first go-around, 3 bits is more than 13 * where these are specified. For a first go-around, 3 bits is more than
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index d7669caa9893..df1368aea9a3 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -177,10 +177,10 @@ extern struct async_domain scsi_sd_probe_domain;
177 177
178/* scsi_dh.c */ 178/* scsi_dh.c */
179#ifdef CONFIG_SCSI_DH 179#ifdef CONFIG_SCSI_DH
180int scsi_dh_add_device(struct scsi_device *sdev); 180void scsi_dh_add_device(struct scsi_device *sdev);
181void scsi_dh_release_device(struct scsi_device *sdev); 181void scsi_dh_release_device(struct scsi_device *sdev);
182#else 182#else
183static inline int scsi_dh_add_device(struct scsi_device *sdev) { return 0; } 183static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
184static inline void scsi_dh_release_device(struct scsi_device *sdev) { } 184static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
185#endif 185#endif
186static inline void scsi_dh_remove_device(struct scsi_device *sdev) { } 186static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 40124648a07b..a0f2a20ea9e9 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -988,6 +988,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
988 scsi_attach_vpd(sdev); 988 scsi_attach_vpd(sdev);
989 989
990 sdev->max_queue_depth = sdev->queue_depth; 990 sdev->max_queue_depth = sdev->queue_depth;
991 sdev->sdev_bflags = *bflags;
991 992
992 /* 993 /*
993 * Ok, the device is now all set up, we can 994 * Ok, the device is now all set up, we can
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index f796bd61f3f0..50e7d7e4a861 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -20,6 +20,7 @@
20#include <scsi/scsi_dh.h> 20#include <scsi/scsi_dh.h>
21#include <scsi/scsi_transport.h> 21#include <scsi/scsi_transport.h>
22#include <scsi/scsi_driver.h> 22#include <scsi/scsi_driver.h>
23#include <scsi/scsi_devinfo.h>
23 24
24#include "scsi_priv.h" 25#include "scsi_priv.h"
25#include "scsi_logging.h" 26#include "scsi_logging.h"
@@ -966,6 +967,41 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
966} 967}
967static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL); 968static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
968 969
970#define BLIST_FLAG_NAME(name) [ilog2(BLIST_##name)] = #name
971static const char *const sdev_bflags_name[] = {
972#include "scsi_devinfo_tbl.c"
973};
974#undef BLIST_FLAG_NAME
975
976static ssize_t
977sdev_show_blacklist(struct device *dev, struct device_attribute *attr,
978 char *buf)
979{
980 struct scsi_device *sdev = to_scsi_device(dev);
981 int i;
982 ssize_t len = 0;
983
984 for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) {
985 const char *name = NULL;
986
987 if (!(sdev->sdev_bflags & BIT(i)))
988 continue;
989 if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i])
990 name = sdev_bflags_name[i];
991
992 if (name)
993 len += snprintf(buf + len, PAGE_SIZE - len,
994 "%s%s", len ? " " : "", name);
995 else
996 len += snprintf(buf + len, PAGE_SIZE - len,
997 "%sINVALID_BIT(%d)", len ? " " : "", i);
998 }
999 if (len)
1000 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1001 return len;
1002}
1003static DEVICE_ATTR(blacklist, S_IRUGO, sdev_show_blacklist, NULL);
1004
969#ifdef CONFIG_SCSI_DH 1005#ifdef CONFIG_SCSI_DH
970static ssize_t 1006static ssize_t
971sdev_show_dh_state(struct device *dev, struct device_attribute *attr, 1007sdev_show_dh_state(struct device *dev, struct device_attribute *attr,
@@ -1151,6 +1187,7 @@ static struct attribute *scsi_sdev_attrs[] = {
1151 &dev_attr_queue_depth.attr, 1187 &dev_attr_queue_depth.attr,
1152 &dev_attr_queue_type.attr, 1188 &dev_attr_queue_type.attr,
1153 &dev_attr_wwid.attr, 1189 &dev_attr_wwid.attr,
1190 &dev_attr_blacklist.attr,
1154#ifdef CONFIG_SCSI_DH 1191#ifdef CONFIG_SCSI_DH
1155 &dev_attr_dh_state.attr, 1192 &dev_attr_dh_state.attr,
1156 &dev_attr_access_state.attr, 1193 &dev_attr_access_state.attr,
@@ -1234,13 +1271,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
1234 1271
1235 scsi_autopm_get_device(sdev); 1272 scsi_autopm_get_device(sdev);
1236 1273
1237 error = scsi_dh_add_device(sdev); 1274 scsi_dh_add_device(sdev);
1238 if (error)
1239 /*
1240 * device_handler is optional, so any error can be ignored
1241 */
1242 sdev_printk(KERN_INFO, sdev,
1243 "failed to add device handler: %d\n", error);
1244 1275
1245 error = device_add(&sdev->sdev_gendev); 1276 error = device_add(&sdev->sdev_gendev);
1246 if (error) { 1277 if (error) {
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 8c46a6d536af..4664024bd5d3 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -267,6 +267,8 @@ static const struct {
267 { FC_PORTSPEED_50GBIT, "50 Gbit" }, 267 { FC_PORTSPEED_50GBIT, "50 Gbit" },
268 { FC_PORTSPEED_100GBIT, "100 Gbit" }, 268 { FC_PORTSPEED_100GBIT, "100 Gbit" },
269 { FC_PORTSPEED_25GBIT, "25 Gbit" }, 269 { FC_PORTSPEED_25GBIT, "25 Gbit" },
270 { FC_PORTSPEED_64BIT, "64 Gbit" },
271 { FC_PORTSPEED_128BIT, "128 Gbit" },
270 { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" }, 272 { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
271}; 273};
272fc_bitfield_name_search(port_speed, fc_port_speed_names) 274fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 7404d26895f5..f4b52b44b966 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -3420,7 +3420,7 @@ iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
3420 3420
3421 shost = scsi_host_lookup(ev->u.get_host_stats.host_no); 3421 shost = scsi_host_lookup(ev->u.get_host_stats.host_no);
3422 if (!shost) { 3422 if (!shost) {
3423 pr_err("%s: failed. Cound not find host no %u\n", 3423 pr_err("%s: failed. Could not find host no %u\n",
3424 __func__, ev->u.get_host_stats.host_no); 3424 __func__, ev->u.get_host_stats.host_no);
3425 return -ENODEV; 3425 return -ENODEV;
3426 } 3426 }
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 319dff970237..736a1f4f9676 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -177,7 +177,7 @@ static int sas_smp_dispatch(struct bsg_job *job)
177 if (!scsi_is_host_device(job->dev)) 177 if (!scsi_is_host_device(job->dev))
178 rphy = dev_to_rphy(job->dev); 178 rphy = dev_to_rphy(job->dev);
179 179
180 if (!job->req->next_rq) { 180 if (!job->reply_payload.payload_len) {
181 dev_warn(job->dev, "space for a smp response is missing\n"); 181 dev_warn(job->dev, "space for a smp response is missing\n");
182 bsg_job_done(job, -EINVAL, 0); 182 bsg_job_done(job, -EINVAL, 0);
183 return 0; 183 return 0;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index d175c5c5ccf8..24fe68522716 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -231,11 +231,15 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr,
231{ 231{
232 struct scsi_disk *sdkp = to_scsi_disk(dev); 232 struct scsi_disk *sdkp = to_scsi_disk(dev);
233 struct scsi_device *sdp = sdkp->device; 233 struct scsi_device *sdp = sdkp->device;
234 bool v;
234 235
235 if (!capable(CAP_SYS_ADMIN)) 236 if (!capable(CAP_SYS_ADMIN))
236 return -EACCES; 237 return -EACCES;
237 238
238 sdp->manage_start_stop = simple_strtoul(buf, NULL, 10); 239 if (kstrtobool(buf, &v))
240 return -EINVAL;
241
242 sdp->manage_start_stop = v;
239 243
240 return count; 244 return count;
241} 245}
@@ -253,6 +257,7 @@ static ssize_t
253allow_restart_store(struct device *dev, struct device_attribute *attr, 257allow_restart_store(struct device *dev, struct device_attribute *attr,
254 const char *buf, size_t count) 258 const char *buf, size_t count)
255{ 259{
260 bool v;
256 struct scsi_disk *sdkp = to_scsi_disk(dev); 261 struct scsi_disk *sdkp = to_scsi_disk(dev);
257 struct scsi_device *sdp = sdkp->device; 262 struct scsi_device *sdp = sdkp->device;
258 263
@@ -262,7 +267,10 @@ allow_restart_store(struct device *dev, struct device_attribute *attr,
262 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) 267 if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
263 return -EINVAL; 268 return -EINVAL;
264 269
265 sdp->allow_restart = simple_strtoul(buf, NULL, 10); 270 if (kstrtobool(buf, &v))
271 return -EINVAL;
272
273 sdp->allow_restart = v;
266 274
267 return count; 275 return count;
268} 276}
@@ -906,6 +914,26 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
906 else 914 else
907 sdkp->zeroing_mode = SD_ZERO_WRITE; 915 sdkp->zeroing_mode = SD_ZERO_WRITE;
908 916
917 if (sdkp->max_ws_blocks &&
918 sdkp->physical_block_size > logical_block_size) {
919 /*
920 * Reporting a maximum number of blocks that is not aligned
921 * on the device physical size would cause a large write same
922 * request to be split into physically unaligned chunks by
923 * __blkdev_issue_write_zeroes() and __blkdev_issue_write_same()
924 * even if the caller of these functions took care to align the
925 * large request. So make sure the maximum reported is aligned
926 * to the device physical block size. This is only an optional
927 * optimization for regular disks, but this is mandatory to
928 * avoid failure of large write same requests directed at
929 * sequential write required zones of host-managed ZBC disks.
930 */
931 sdkp->max_ws_blocks =
932 round_down(sdkp->max_ws_blocks,
933 bytes_to_logical(sdkp->device,
934 sdkp->physical_block_size));
935 }
936
909out: 937out:
910 blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks * 938 blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
911 (logical_block_size >> 9)); 939 (logical_block_size >> 9));
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 8aa54779aac1..27793b9f54c0 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -28,38 +28,18 @@
28 28
29#include <scsi/scsi.h> 29#include <scsi/scsi.h>
30#include <scsi/scsi_cmnd.h> 30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_dbg.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_driver.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_eh.h>
36 31
37#include "sd.h" 32#include "sd.h"
38#include "scsi_priv.h"
39
40enum zbc_zone_type {
41 ZBC_ZONE_TYPE_CONV = 0x1,
42 ZBC_ZONE_TYPE_SEQWRITE_REQ,
43 ZBC_ZONE_TYPE_SEQWRITE_PREF,
44 ZBC_ZONE_TYPE_RESERVED,
45};
46
47enum zbc_zone_cond {
48 ZBC_ZONE_COND_NO_WP,
49 ZBC_ZONE_COND_EMPTY,
50 ZBC_ZONE_COND_IMP_OPEN,
51 ZBC_ZONE_COND_EXP_OPEN,
52 ZBC_ZONE_COND_CLOSED,
53 ZBC_ZONE_COND_READONLY = 0xd,
54 ZBC_ZONE_COND_FULL,
55 ZBC_ZONE_COND_OFFLINE,
56};
57 33
58/** 34/**
59 * Convert a zone descriptor to a zone struct. 35 * sd_zbc_parse_report - Convert a zone descriptor to a struct blk_zone,
36 * @sdkp: The disk the report originated from
37 * @buf: Address of the report zone descriptor
38 * @zone: the destination zone structure
39 *
40 * All LBA sized values are converted to 512B sectors unit.
60 */ 41 */
61static void sd_zbc_parse_report(struct scsi_disk *sdkp, 42static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
62 u8 *buf,
63 struct blk_zone *zone) 43 struct blk_zone *zone)
64{ 44{
65 struct scsi_device *sdp = sdkp->device; 45 struct scsi_device *sdp = sdkp->device;
@@ -82,7 +62,13 @@ static void sd_zbc_parse_report(struct scsi_disk *sdkp,
82} 62}
83 63
84/** 64/**
85 * Issue a REPORT ZONES scsi command. 65 * sd_zbc_report_zones - Issue a REPORT ZONES scsi command.
66 * @sdkp: The target disk
67 * @buf: Buffer to use for the reply
68 * @buflen: the buffer size
69 * @lba: Start LBA of the report
70 *
71 * For internal use during device validation.
86 */ 72 */
87static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf, 73static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
88 unsigned int buflen, sector_t lba) 74 unsigned int buflen, sector_t lba)
@@ -123,6 +109,12 @@ static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
123 return 0; 109 return 0;
124} 110}
125 111
112/**
113 * sd_zbc_setup_report_cmnd - Prepare a REPORT ZONES scsi command
114 * @cmd: The command to setup
115 *
116 * Call in sd_init_command() for a REQ_OP_ZONE_REPORT request.
117 */
126int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd) 118int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
127{ 119{
128 struct request *rq = cmd->request; 120 struct request *rq = cmd->request;
@@ -165,6 +157,14 @@ int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
165 return BLKPREP_OK; 157 return BLKPREP_OK;
166} 158}
167 159
160/**
161 * sd_zbc_report_zones_complete - Process a REPORT ZONES scsi command reply.
162 * @scmd: The completed report zones command
163 * @good_bytes: reply size in bytes
164 *
165 * Convert all reported zone descriptors to struct blk_zone. The conversion
166 * is done in-place, directly in the request specified sg buffer.
167 */
168static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd, 168static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
169 unsigned int good_bytes) 169 unsigned int good_bytes)
170{ 170{
@@ -220,17 +220,32 @@ static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
220 local_irq_restore(flags); 220 local_irq_restore(flags);
221} 221}
222 222
223/**
224 * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
225 * @sdkp: The target disk
226 */
223static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp) 227static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
224{ 228{
225 return logical_to_sectors(sdkp->device, sdkp->zone_blocks); 229 return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
226} 230}
227 231
232/**
233 * sd_zbc_zone_no - Get the number of the zone conataining a sector.
234 * @sdkp: The target disk
235 * @sector: 512B sector address contained in the zone
236 */
228static inline unsigned int sd_zbc_zone_no(struct scsi_disk *sdkp, 237static inline unsigned int sd_zbc_zone_no(struct scsi_disk *sdkp,
229 sector_t sector) 238 sector_t sector)
230{ 239{
231 return sectors_to_logical(sdkp->device, sector) >> sdkp->zone_shift; 240 return sectors_to_logical(sdkp->device, sector) >> sdkp->zone_shift;
232} 241}
233 242
243/**
244 * sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
245 * @cmd: the command to setup
246 *
247 * Called from sd_init_command() for a REQ_OP_ZONE_RESET request.
248 */
234int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd) 249int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
235{ 250{
236 struct request *rq = cmd->request; 251 struct request *rq = cmd->request;
@@ -263,6 +278,23 @@ int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
263 return BLKPREP_OK; 278 return BLKPREP_OK;
264} 279}
265 280
281/**
282 * sd_zbc_write_lock_zone - Write lock a sequential zone.
283 * @cmd: write command
284 *
285 * Called from sd_init_cmd() for write requests (standard write, write same or
286 * write zeroes operations). If the request target zone is not already locked,
287 * the zone is locked and BLKPREP_OK returned, allowing the request to proceed
288 * through dispatch in scsi_request_fn(). Otherwise, BLKPREP_DEFER is returned,
289 * forcing the request to wait for the zone to be unlocked, that is, for the
290 * previously issued write request targeting the same zone to complete.
291 *
292 * This is called from blk_peek_request() context with the queue lock held and
293 * before the request is removed from the scheduler. As a result, multiple
294 * contexts executing concurrently scsi_request_fn() cannot result in write
295 * sequence reordering as only a single write request per zone is allowed to
296 * proceed.
297 */
266int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd) 298int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
267{ 299{
268 struct request *rq = cmd->request; 300 struct request *rq = cmd->request;
@@ -285,10 +317,7 @@ int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
285 * Do not issue more than one write at a time per 317 * Do not issue more than one write at a time per
286 * zone. This solves write ordering problems due to 318 * zone. This solves write ordering problems due to
287 * the unlocking of the request queue in the dispatch 319 * the unlocking of the request queue in the dispatch
288 * path in the non scsi-mq case. For scsi-mq, this 320 * path in the non scsi-mq case.
289 * also avoids potential write reordering when multiple
290 * threads running on different CPUs write to the same
291 * zone (with a synchronized sequential pattern).
292 */ 321 */
293 if (sdkp->zones_wlock && 322 if (sdkp->zones_wlock &&
294 test_and_set_bit(zno, sdkp->zones_wlock)) 323 test_and_set_bit(zno, sdkp->zones_wlock))
@@ -300,6 +329,13 @@ int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
300 return BLKPREP_OK; 329 return BLKPREP_OK;
301} 330}
302 331
332/**
333 * sd_zbc_write_unlock_zone - Write unlock a sequential zone.
334 * @cmd: write command
335 *
336 * Called from sd_uninit_cmd(). Unlocking the request target zone will allow
337 * dispatching the next write request for the zone.
338 */
303void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd) 339void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd)
304{ 340{
305 struct request *rq = cmd->request; 341 struct request *rq = cmd->request;
@@ -314,8 +350,16 @@ void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd)
314 } 350 }
315} 351}
316 352
317void sd_zbc_complete(struct scsi_cmnd *cmd, 353/**
318 unsigned int good_bytes, 354 * sd_zbc_complete - ZBC command post processing.
355 * @cmd: Completed command
356 * @good_bytes: Command reply bytes
357 * @sshdr: command sense header
358 *
359 * Called from sd_done(). Process report zones reply and handle reset zone
360 * and write commands errors.
361 */
362void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
319 struct scsi_sense_hdr *sshdr) 363 struct scsi_sense_hdr *sshdr)
320{ 364{
321 int result = cmd->result; 365 int result = cmd->result;
@@ -360,7 +404,11 @@ void sd_zbc_complete(struct scsi_cmnd *cmd,
360} 404}
361 405
362/** 406/**
363 * Read zoned block device characteristics (VPD page B6). 407 * sd_zbc_read_zoned_characteristics - Read zoned block device characteristics
408 * @sdkp: Target disk
409 * @buf: Buffer where to store the VPD page data
410 *
411 * Read VPD page B6.
364 */ 412 */
365static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp, 413static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
366 unsigned char *buf) 414 unsigned char *buf)
@@ -375,25 +423,31 @@ static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
375 if (sdkp->device->type != TYPE_ZBC) { 423 if (sdkp->device->type != TYPE_ZBC) {
376 /* Host-aware */ 424 /* Host-aware */
377 sdkp->urswrz = 1; 425 sdkp->urswrz = 1;
378 sdkp->zones_optimal_open = get_unaligned_be64(&buf[8]); 426 sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]);
379 sdkp->zones_optimal_nonseq = get_unaligned_be64(&buf[12]); 427 sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]);
380 sdkp->zones_max_open = 0; 428 sdkp->zones_max_open = 0;
381 } else { 429 } else {
382 /* Host-managed */ 430 /* Host-managed */
383 sdkp->urswrz = buf[4] & 1; 431 sdkp->urswrz = buf[4] & 1;
384 sdkp->zones_optimal_open = 0; 432 sdkp->zones_optimal_open = 0;
385 sdkp->zones_optimal_nonseq = 0; 433 sdkp->zones_optimal_nonseq = 0;
386 sdkp->zones_max_open = get_unaligned_be64(&buf[16]); 434 sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
387 } 435 }
388 436
389 return 0; 437 return 0;
390} 438}
391 439
392/** 440/**
393 * Check reported capacity. 441 * sd_zbc_check_capacity - Check reported capacity.
442 * @sdkp: Target disk
443 * @buf: Buffer to use for commands
444 *
445 * ZBC drive may report only the capacity of the first conventional zones at
446 * LBA 0. This is indicated by the RC_BASIS field of the read capacity reply.
447 * Check this here. If the disk reported only its conventional zones capacity,
448 * get the total capacity by doing a report zones.
394 */ 449 */
395static int sd_zbc_check_capacity(struct scsi_disk *sdkp, 450static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
396 unsigned char *buf)
397{ 451{
398 sector_t lba; 452 sector_t lba;
399 int ret; 453 int ret;
@@ -421,8 +475,15 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp,
421 return 0; 475 return 0;
422} 476}
423 477
424#define SD_ZBC_BUF_SIZE 131072 478#define SD_ZBC_BUF_SIZE 131072U
425 479
480/**
481 * sd_zbc_check_zone_size - Check the device zone sizes
482 * @sdkp: Target disk
483 *
484 * Check that all zones of the device are equal. The last zone can however
485 * be smaller. The zone size must also be a power of two number of LBAs.
486 */
426static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) 487static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
427{ 488{
428 u64 zone_blocks; 489 u64 zone_blocks;
@@ -465,10 +526,7 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
465 /* Parse REPORT ZONES header */ 526 /* Parse REPORT ZONES header */
466 list_length = get_unaligned_be32(&buf[0]) + 64; 527 list_length = get_unaligned_be32(&buf[0]) + 64;
467 rec = buf + 64; 528 rec = buf + 64;
468 if (list_length < SD_ZBC_BUF_SIZE) 529 buf_len = min(list_length, SD_ZBC_BUF_SIZE);
469 buf_len = list_length;
470 else
471 buf_len = SD_ZBC_BUF_SIZE;
472 530
473 /* Parse zone descriptors */ 531 /* Parse zone descriptors */
474 while (rec < buf + buf_len) { 532 while (rec < buf + buf_len) {
@@ -523,6 +581,7 @@ out:
523 } 581 }
524 582
525 sdkp->zone_blocks = zone_blocks; 583 sdkp->zone_blocks = zone_blocks;
584 sdkp->zone_shift = ilog2(zone_blocks);
526 585
527 return 0; 586 return 0;
528} 587}
@@ -530,13 +589,15 @@ out:
530static int sd_zbc_setup(struct scsi_disk *sdkp) 589static int sd_zbc_setup(struct scsi_disk *sdkp)
531{ 590{
532 591
592 /* READ16/WRITE16 is mandatory for ZBC disks */
593 sdkp->device->use_16_for_rw = 1;
594 sdkp->device->use_10_for_rw = 0;
595
533 /* chunk_sectors indicates the zone size */ 596 /* chunk_sectors indicates the zone size */
534 blk_queue_chunk_sectors(sdkp->disk->queue, 597 blk_queue_chunk_sectors(sdkp->disk->queue,
535 logical_to_sectors(sdkp->device, sdkp->zone_blocks)); 598 logical_to_sectors(sdkp->device, sdkp->zone_blocks));
536 sdkp->zone_shift = ilog2(sdkp->zone_blocks); 599 sdkp->nr_zones =
537 sdkp->nr_zones = sdkp->capacity >> sdkp->zone_shift; 600 round_up(sdkp->capacity, sdkp->zone_blocks) >> sdkp->zone_shift;
538 if (sdkp->capacity & (sdkp->zone_blocks - 1))
539 sdkp->nr_zones++;
540 601
541 if (!sdkp->zones_wlock) { 602 if (!sdkp->zones_wlock) {
542 sdkp->zones_wlock = kcalloc(BITS_TO_LONGS(sdkp->nr_zones), 603 sdkp->zones_wlock = kcalloc(BITS_TO_LONGS(sdkp->nr_zones),
@@ -549,8 +610,7 @@ static int sd_zbc_setup(struct scsi_disk *sdkp)
549 return 0; 610 return 0;
550} 611}
551 612
552int sd_zbc_read_zones(struct scsi_disk *sdkp, 613int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
553 unsigned char *buf)
554{ 614{
555 int ret; 615 int ret;
556 616
@@ -561,7 +621,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp,
561 */ 621 */
562 return 0; 622 return 0;
563 623
564
565 /* Get zoned block device characteristics */ 624 /* Get zoned block device characteristics */
566 ret = sd_zbc_read_zoned_characteristics(sdkp, buf); 625 ret = sd_zbc_read_zoned_characteristics(sdkp, buf);
567 if (ret) 626 if (ret)
@@ -598,10 +657,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp,
598 if (ret) 657 if (ret)
599 goto err; 658 goto err;
600 659
601 /* READ16/WRITE16 is mandatory for ZBC disks */
602 sdkp->device->use_16_for_rw = 1;
603 sdkp->device->use_10_for_rw = 0;
604
605 return 0; 660 return 0;
606 661
607err: 662err:
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 90f6effc32b4..b2880c7709e6 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -40,11 +40,11 @@
40#define BUILD_TIMESTAMP 40#define BUILD_TIMESTAMP
41#endif 41#endif
42 42
43#define DRIVER_VERSION "1.1.2-125" 43#define DRIVER_VERSION "1.1.2-126"
44#define DRIVER_MAJOR 1 44#define DRIVER_MAJOR 1
45#define DRIVER_MINOR 1 45#define DRIVER_MINOR 1
46#define DRIVER_RELEASE 2 46#define DRIVER_RELEASE 2
47#define DRIVER_REVISION 125 47#define DRIVER_REVISION 126
48 48
49#define DRIVER_NAME "Microsemi PQI Driver (v" \ 49#define DRIVER_NAME "Microsemi PQI Driver (v" \
50 DRIVER_VERSION BUILD_TIMESTAMP ")" 50 DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -1078,9 +1078,9 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1078 1078
1079bad_raid_map: 1079bad_raid_map:
1080 dev_warn(&ctrl_info->pci_dev->dev, 1080 dev_warn(&ctrl_info->pci_dev->dev,
1081 "scsi %d:%d:%d:%d %s\n", 1081 "logical device %08x%08x %s\n",
1082 ctrl_info->scsi_host->host_no, 1082 *((u32 *)&device->scsi3addr),
1083 device->bus, device->target, device->lun, err_msg); 1083 *((u32 *)&device->scsi3addr[4]), err_msg);
1084 1084
1085 return -EINVAL; 1085 return -EINVAL;
1086} 1086}
@@ -6925,6 +6925,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
6925 }, 6925 },
6926 { 6926 {
6927 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6927 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6928 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
6929 },
6930 {
6931 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6932 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
6933 },
6934 {
6935 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6928 PCI_VENDOR_ID_ADAPTEC2, 0x1380) 6936 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
6929 }, 6937 },
6930 { 6938 {
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 5e7200f05873..1b06cf0375dc 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -486,6 +486,9 @@ struct hv_host_device {
486 unsigned int port; 486 unsigned int port;
487 unsigned char path; 487 unsigned char path;
488 unsigned char target; 488 unsigned char target;
489 struct workqueue_struct *handle_error_wq;
490 struct work_struct host_scan_work;
491 struct Scsi_Host *host;
489}; 492};
490 493
491struct storvsc_scan_work { 494struct storvsc_scan_work {
@@ -514,13 +517,12 @@ done:
514 517
515static void storvsc_host_scan(struct work_struct *work) 518static void storvsc_host_scan(struct work_struct *work)
516{ 519{
517 struct storvsc_scan_work *wrk;
518 struct Scsi_Host *host; 520 struct Scsi_Host *host;
519 struct scsi_device *sdev; 521 struct scsi_device *sdev;
522 struct hv_host_device *host_device =
523 container_of(work, struct hv_host_device, host_scan_work);
520 524
521 wrk = container_of(work, struct storvsc_scan_work, work); 525 host = host_device->host;
522 host = wrk->host;
523
524 /* 526 /*
525 * Before scanning the host, first check to see if any of the 527 * Before scanning the host, first check to see if any of the
526 * currrently known devices have been hot removed. We issue a 528 * currrently known devices have been hot removed. We issue a
@@ -540,8 +542,6 @@ static void storvsc_host_scan(struct work_struct *work)
540 * Now scan the host to discover LUNs that may have been added. 542 * Now scan the host to discover LUNs that may have been added.
541 */ 543 */
542 scsi_scan_host(host); 544 scsi_scan_host(host);
543
544 kfree(wrk);
545} 545}
546 546
547static void storvsc_remove_lun(struct work_struct *work) 547static void storvsc_remove_lun(struct work_struct *work)
@@ -922,6 +922,7 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
922{ 922{
923 struct storvsc_scan_work *wrk; 923 struct storvsc_scan_work *wrk;
924 void (*process_err_fn)(struct work_struct *work); 924 void (*process_err_fn)(struct work_struct *work);
925 struct hv_host_device *host_dev = shost_priv(host);
925 bool do_work = false; 926 bool do_work = false;
926 927
927 switch (SRB_STATUS(vm_srb->srb_status)) { 928 switch (SRB_STATUS(vm_srb->srb_status)) {
@@ -988,7 +989,7 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
988 wrk->lun = vm_srb->lun; 989 wrk->lun = vm_srb->lun;
989 wrk->tgt_id = vm_srb->target_id; 990 wrk->tgt_id = vm_srb->target_id;
990 INIT_WORK(&wrk->work, process_err_fn); 991 INIT_WORK(&wrk->work, process_err_fn);
991 schedule_work(&wrk->work); 992 queue_work(host_dev->handle_error_wq, &wrk->work);
992} 993}
993 994
994 995
@@ -1116,8 +1117,7 @@ static void storvsc_on_receive(struct storvsc_device *stor_device,
1116 struct vstor_packet *vstor_packet, 1117 struct vstor_packet *vstor_packet,
1117 struct storvsc_cmd_request *request) 1118 struct storvsc_cmd_request *request)
1118{ 1119{
1119 struct storvsc_scan_work *work; 1120 struct hv_host_device *host_dev;
1120
1121 switch (vstor_packet->operation) { 1121 switch (vstor_packet->operation) {
1122 case VSTOR_OPERATION_COMPLETE_IO: 1122 case VSTOR_OPERATION_COMPLETE_IO:
1123 storvsc_on_io_completion(stor_device, vstor_packet, request); 1123 storvsc_on_io_completion(stor_device, vstor_packet, request);
@@ -1125,13 +1125,9 @@ static void storvsc_on_receive(struct storvsc_device *stor_device,
1125 1125
1126 case VSTOR_OPERATION_REMOVE_DEVICE: 1126 case VSTOR_OPERATION_REMOVE_DEVICE:
1127 case VSTOR_OPERATION_ENUMERATE_BUS: 1127 case VSTOR_OPERATION_ENUMERATE_BUS:
1128 work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC); 1128 host_dev = shost_priv(stor_device->host);
1129 if (!work) 1129 queue_work(
1130 return; 1130 host_dev->handle_error_wq, &host_dev->host_scan_work);
1131
1132 INIT_WORK(&work->work, storvsc_host_scan);
1133 work->host = stor_device->host;
1134 schedule_work(&work->work);
1135 break; 1131 break;
1136 1132
1137 case VSTOR_OPERATION_FCHBA_DATA: 1133 case VSTOR_OPERATION_FCHBA_DATA:
@@ -1744,6 +1740,7 @@ static int storvsc_probe(struct hv_device *device,
1744 1740
1745 host_dev->port = host->host_no; 1741 host_dev->port = host->host_no;
1746 host_dev->dev = device; 1742 host_dev->dev = device;
1743 host_dev->host = host;
1747 1744
1748 1745
1749 stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL); 1746 stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
@@ -1803,10 +1800,20 @@ static int storvsc_probe(struct hv_device *device,
1803 if (stor_device->num_sc != 0) 1800 if (stor_device->num_sc != 0)
1804 host->nr_hw_queues = stor_device->num_sc + 1; 1801 host->nr_hw_queues = stor_device->num_sc + 1;
1805 1802
1803 /*
1804 * Set the error handler work queue.
1805 */
1806 host_dev->handle_error_wq =
1807 alloc_ordered_workqueue("storvsc_error_wq_%d",
1808 WQ_MEM_RECLAIM,
1809 host->host_no);
1810 if (!host_dev->handle_error_wq)
1811 goto err_out2;
1812 INIT_WORK(&host_dev->host_scan_work, storvsc_host_scan);
1806 /* Register the HBA and start the scsi bus scan */ 1813 /* Register the HBA and start the scsi bus scan */
1807 ret = scsi_add_host(host, &device->device); 1814 ret = scsi_add_host(host, &device->device);
1808 if (ret != 0) 1815 if (ret != 0)
1809 goto err_out2; 1816 goto err_out3;
1810 1817
1811 if (!dev_is_ide) { 1818 if (!dev_is_ide) {
1812 scsi_scan_host(host); 1819 scsi_scan_host(host);
@@ -1815,7 +1822,7 @@ static int storvsc_probe(struct hv_device *device,
1815 device->dev_instance.b[4]); 1822 device->dev_instance.b[4]);
1816 ret = scsi_add_device(host, 0, target, 0); 1823 ret = scsi_add_device(host, 0, target, 0);
1817 if (ret) 1824 if (ret)
1818 goto err_out3; 1825 goto err_out4;
1819 } 1826 }
1820#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 1827#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
1821 if (host->transportt == fc_transport_template) { 1828 if (host->transportt == fc_transport_template) {
@@ -1827,14 +1834,17 @@ static int storvsc_probe(struct hv_device *device,
1827 fc_host_port_name(host) = stor_device->port_name; 1834 fc_host_port_name(host) = stor_device->port_name;
1828 stor_device->rport = fc_remote_port_add(host, 0, &ids); 1835 stor_device->rport = fc_remote_port_add(host, 0, &ids);
1829 if (!stor_device->rport) 1836 if (!stor_device->rport)
1830 goto err_out3; 1837 goto err_out4;
1831 } 1838 }
1832#endif 1839#endif
1833 return 0; 1840 return 0;
1834 1841
1835err_out3: 1842err_out4:
1836 scsi_remove_host(host); 1843 scsi_remove_host(host);
1837 1844
1845err_out3:
1846 destroy_workqueue(host_dev->handle_error_wq);
1847
1838err_out2: 1848err_out2:
1839 /* 1849 /*
1840 * Once we have connected with the host, we would need to 1850 * Once we have connected with the host, we would need to
@@ -1858,6 +1868,7 @@ static int storvsc_remove(struct hv_device *dev)
1858{ 1868{
1859 struct storvsc_device *stor_device = hv_get_drvdata(dev); 1869 struct storvsc_device *stor_device = hv_get_drvdata(dev);
1860 struct Scsi_Host *host = stor_device->host; 1870 struct Scsi_Host *host = stor_device->host;
1871 struct hv_host_device *host_dev = shost_priv(host);
1861 1872
1862#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 1873#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
1863 if (host->transportt == fc_transport_template) { 1874 if (host->transportt == fc_transport_template) {
@@ -1865,6 +1876,7 @@ static int storvsc_remove(struct hv_device *dev)
1865 fc_remove_host(host); 1876 fc_remove_host(host);
1866 } 1877 }
1867#endif 1878#endif
1879 destroy_workqueue(host_dev->handle_error_wq);
1868 scsi_remove_host(host); 1880 scsi_remove_host(host);
1869 storvsc_dev_remove(dev); 1881 storvsc_dev_remove(dev);
1870 scsi_host_put(host); 1882 scsi_host_put(host);
diff --git a/drivers/scsi/ufs/tc-dwc-g210.c b/drivers/scsi/ufs/tc-dwc-g210.c
index dc03e47f7c58..3a8bc6d9cb5b 100644
--- a/drivers/scsi/ufs/tc-dwc-g210.c
+++ b/drivers/scsi/ufs/tc-dwc-g210.c
@@ -26,7 +26,7 @@
26 */ 26 */
27static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba) 27static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba)
28{ 28{
29 const struct ufshcd_dme_attr_val setup_attrs[] = { 29 static const struct ufshcd_dme_attr_val setup_attrs[] = {
30 { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL }, 30 { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL },
31 { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL }, 31 { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL },
32 { UIC_ARG_MIB(CDIRECTCTRL6), 0x80, DME_LOCAL }, 32 { UIC_ARG_MIB(CDIRECTCTRL6), 0x80, DME_LOCAL },
@@ -90,7 +90,7 @@ static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba)
90 */ 90 */
91static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba) 91static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba)
92{ 92{
93 const struct ufshcd_dme_attr_val setup_attrs[] = { 93 static const struct ufshcd_dme_attr_val setup_attrs[] = {
94 { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01, 94 { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01,
95 DME_LOCAL }, 95 DME_LOCAL },
96 { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19, 96 { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19,
@@ -147,7 +147,7 @@ static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba)
147 int connected_tx_lanes = 0; 147 int connected_tx_lanes = 0;
148 int ret = 0; 148 int ret = 0;
149 149
150 const struct ufshcd_dme_attr_val setup_tx_attrs[] = { 150 static const struct ufshcd_dme_attr_val setup_tx_attrs[] = {
151 { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN1_TX), 0x0d, 151 { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN1_TX), 0x0d,
152 DME_LOCAL }, 152 DME_LOCAL },
153 { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN1_TX), 0x19, 153 { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN1_TX), 0x19,
@@ -158,7 +158,7 @@ static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba)
158 DME_LOCAL }, 158 DME_LOCAL },
159 }; 159 };
160 160
161 const struct ufshcd_dme_attr_val setup_rx_attrs[] = { 161 static const struct ufshcd_dme_attr_val setup_rx_attrs[] = {
162 { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN1_RX), 0x01, 162 { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN1_RX), 0x01,
163 DME_LOCAL }, 163 DME_LOCAL },
164 { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN1_RX), 0x19, 164 { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN1_RX), 0x19,
@@ -222,7 +222,7 @@ static int tc_dwc_g210_setup_20bit_rmmi(struct ufs_hba *hba)
222{ 222{
223 int ret = 0; 223 int ret = 0;
224 224
225 const struct ufshcd_dme_attr_val setup_attrs[] = { 225 static const struct ufshcd_dme_attr_val setup_attrs[] = {
226 { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL }, 226 { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL },
227 { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL }, 227 { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL },
228 { UIC_ARG_MIB(CDIRECTCTRL6), 0xc0, DME_LOCAL }, 228 { UIC_ARG_MIB(CDIRECTCTRL6), 0xc0, DME_LOCAL },
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 890eafeb8ad4..2b38db2eeafa 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1453,7 +1453,7 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
1453 print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv); 1453 print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
1454 1454
1455 reg = ufshcd_readl(hba, REG_UFS_CFG1); 1455 reg = ufshcd_readl(hba, REG_UFS_CFG1);
1456 reg |= UFS_BIT(17); 1456 reg |= UTP_DBG_RAMS_EN;
1457 ufshcd_writel(hba, reg, REG_UFS_CFG1); 1457 ufshcd_writel(hba, reg, REG_UFS_CFG1);
1458 1458
1459 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM); 1459 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
@@ -1466,7 +1466,7 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
1466 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv); 1466 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
1467 1467
1468 /* clear bit 17 - UTP_DBG_RAMS_EN */ 1468 /* clear bit 17 - UTP_DBG_RAMS_EN */
1469 ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1); 1469 ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
1470 1470
1471 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM); 1471 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1472 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv); 1472 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 076f52813a4c..295f4bef6a0e 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -92,7 +92,8 @@ enum {
92#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x) 92#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x)
93 93
94/* bit definitions for REG_UFS_CFG1 register */ 94/* bit definitions for REG_UFS_CFG1 register */
95#define QUNIPRO_SEL UFS_BIT(0) 95#define QUNIPRO_SEL 0x1
96#define UTP_DBG_RAMS_EN 0x20000
96#define TEST_BUS_EN BIT(18) 97#define TEST_BUS_EN BIT(18)
97#define TEST_BUS_SEL GENMASK(22, 19) 98#define TEST_BUS_SEL GENMASK(22, 19)
98#define UFS_REG_TEST_BUS_EN BIT(30) 99#define UFS_REG_TEST_BUS_EN BIT(30)
@@ -213,13 +214,13 @@ struct ufs_qcom_host {
213 * Note: By default this capability will be kept enabled if host 214 * Note: By default this capability will be kept enabled if host
214 * controller supports the QUniPro mode. 215 * controller supports the QUniPro mode.
215 */ 216 */
216 #define UFS_QCOM_CAP_QUNIPRO UFS_BIT(0) 217 #define UFS_QCOM_CAP_QUNIPRO 0x1
217 218
218 /* 219 /*
219 * Set this capability if host controller can retain the secure 220 * Set this capability if host controller can retain the secure
220 * configuration even after UFS controller core power collapse. 221 * configuration even after UFS controller core power collapse.
221 */ 222 */
222 #define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE UFS_BIT(1) 223 #define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE 0x2
223 u32 caps; 224 u32 caps;
224 225
225 struct phy *generic_phy; 226 struct phy *generic_phy;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 794a4600e952..011c3369082c 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -385,6 +385,8 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
385 385
386 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", 386 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
387 tag, ktime_to_us(lrbp->issue_time_stamp)); 387 tag, ktime_to_us(lrbp->issue_time_stamp));
388 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
389 tag, ktime_to_us(lrbp->compl_time_stamp));
388 dev_err(hba->dev, 390 dev_err(hba->dev,
389 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n", 391 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
390 tag, (u64)lrbp->utrd_dma_addr); 392 tag, (u64)lrbp->utrd_dma_addr);
@@ -1746,6 +1748,7 @@ static inline
1746void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) 1748void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1747{ 1749{
1748 hba->lrb[task_tag].issue_time_stamp = ktime_get(); 1750 hba->lrb[task_tag].issue_time_stamp = ktime_get();
1751 hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
1749 ufshcd_clk_scaling_start_busy(hba); 1752 ufshcd_clk_scaling_start_busy(hba);
1750 __set_bit(task_tag, &hba->outstanding_reqs); 1753 __set_bit(task_tag, &hba->outstanding_reqs);
1751 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); 1754 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -2195,10 +2198,11 @@ static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2195 u32 upiu_flags; 2198 u32 upiu_flags;
2196 int ret = 0; 2199 int ret = 0;
2197 2200
2198 if (hba->ufs_version == UFSHCI_VERSION_20) 2201 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2199 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; 2202 (hba->ufs_version == UFSHCI_VERSION_11))
2200 else
2201 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; 2203 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2204 else
2205 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2202 2206
2203 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); 2207 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2204 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) 2208 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
@@ -2222,10 +2226,11 @@ static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2222 u32 upiu_flags; 2226 u32 upiu_flags;
2223 int ret = 0; 2227 int ret = 0;
2224 2228
2225 if (hba->ufs_version == UFSHCI_VERSION_20) 2229 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2226 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; 2230 (hba->ufs_version == UFSHCI_VERSION_11))
2227 else
2228 lrbp->command_type = UTP_CMD_TYPE_SCSI; 2231 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2232 else
2233 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2229 2234
2230 if (likely(lrbp->cmd)) { 2235 if (likely(lrbp->cmd)) {
2231 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, 2236 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
@@ -3586,7 +3591,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3586 status = ufshcd_get_upmcrs(hba); 3591 status = ufshcd_get_upmcrs(hba);
3587 if (status != PWR_LOCAL) { 3592 if (status != PWR_LOCAL) {
3588 dev_err(hba->dev, 3593 dev_err(hba->dev,
3589 "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n", 3594 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
3590 cmd->command, status); 3595 cmd->command, status);
3591 ret = (status != PWR_OK) ? status : -1; 3596 ret = (status != PWR_OK) ? status : -1;
3592 } 3597 }
@@ -4627,6 +4632,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4627 } 4632 }
4628 if (ufshcd_is_clkscaling_supported(hba)) 4633 if (ufshcd_is_clkscaling_supported(hba))
4629 hba->clk_scaling.active_reqs--; 4634 hba->clk_scaling.active_reqs--;
4635
4636 lrbp->compl_time_stamp = ktime_get();
4630 } 4637 }
4631 4638
4632 /* clear corresponding bits of completed commands */ 4639 /* clear corresponding bits of completed commands */
@@ -5998,25 +6005,22 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
5998 } 6005 }
5999 scsi_device_put(hba->sdev_ufs_device); 6006 scsi_device_put(hba->sdev_ufs_device);
6000 6007
6001 sdev_boot = __scsi_add_device(hba->host, 0, 0,
6002 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6003 if (IS_ERR(sdev_boot)) {
6004 ret = PTR_ERR(sdev_boot);
6005 goto remove_sdev_ufs_device;
6006 }
6007 scsi_device_put(sdev_boot);
6008
6009 sdev_rpmb = __scsi_add_device(hba->host, 0, 0, 6008 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
6010 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); 6009 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
6011 if (IS_ERR(sdev_rpmb)) { 6010 if (IS_ERR(sdev_rpmb)) {
6012 ret = PTR_ERR(sdev_rpmb); 6011 ret = PTR_ERR(sdev_rpmb);
6013 goto remove_sdev_boot; 6012 goto remove_sdev_ufs_device;
6014 } 6013 }
6015 scsi_device_put(sdev_rpmb); 6014 scsi_device_put(sdev_rpmb);
6015
6016 sdev_boot = __scsi_add_device(hba->host, 0, 0,
6017 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6018 if (IS_ERR(sdev_boot))
6019 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
6020 else
6021 scsi_device_put(sdev_boot);
6016 goto out; 6022 goto out;
6017 6023
6018remove_sdev_boot:
6019 scsi_remove_device(sdev_boot);
6020remove_sdev_ufs_device: 6024remove_sdev_ufs_device:
6021 scsi_remove_device(hba->sdev_ufs_device); 6025 scsi_remove_device(hba->sdev_ufs_device);
6022out: 6026out:
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index cdc8bd05f7df..1332e544da92 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -166,6 +166,7 @@ struct ufs_pm_lvl_states {
166 * @lun: LUN of the command 166 * @lun: LUN of the command
167 * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation) 167 * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
168 * @issue_time_stamp: time stamp for debug purposes 168 * @issue_time_stamp: time stamp for debug purposes
169 * @compl_time_stamp: time stamp for statistics
169 * @req_abort_skip: skip request abort task flag 170 * @req_abort_skip: skip request abort task flag
170 */ 171 */
171struct ufshcd_lrb { 172struct ufshcd_lrb {
@@ -189,6 +190,7 @@ struct ufshcd_lrb {
189 u8 lun; /* UPIU LUN id field is only 8-bit wide */ 190 u8 lun; /* UPIU LUN id field is only 8-bit wide */
190 bool intr_cmd; 191 bool intr_cmd;
191 ktime_t issue_time_stamp; 192 ktime_t issue_time_stamp;
193 ktime_t compl_time_stamp;
192 194
193 bool req_abort_skip; 195 bool req_abort_skip;
194}; 196};
@@ -544,13 +546,13 @@ struct ufs_hba {
544 bool is_irq_enabled; 546 bool is_irq_enabled;
545 547
546 /* Interrupt aggregation support is broken */ 548 /* Interrupt aggregation support is broken */
547 #define UFSHCD_QUIRK_BROKEN_INTR_AGGR UFS_BIT(0) 549 #define UFSHCD_QUIRK_BROKEN_INTR_AGGR 0x1
548 550
549 /* 551 /*
550 * delay before each dme command is required as the unipro 552 * delay before each dme command is required as the unipro
551 * layer has shown instabilities 553 * layer has shown instabilities
552 */ 554 */
553 #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(1) 555 #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS 0x2
554 556
555 /* 557 /*
556 * If UFS host controller is having issue in processing LCC (Line 558 * If UFS host controller is having issue in processing LCC (Line
@@ -559,21 +561,21 @@ struct ufs_hba {
559 * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE 561 * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
560 * attribute of device to 0). 562 * attribute of device to 0).
561 */ 563 */
562 #define UFSHCD_QUIRK_BROKEN_LCC UFS_BIT(2) 564 #define UFSHCD_QUIRK_BROKEN_LCC 0x4
563 565
564 /* 566 /*
565 * The attribute PA_RXHSUNTERMCAP specifies whether or not the 567 * The attribute PA_RXHSUNTERMCAP specifies whether or not the
566 * inbound Link supports unterminated line in HS mode. Setting this 568 * inbound Link supports unterminated line in HS mode. Setting this
567 * attribute to 1 fixes moving to HS gear. 569 * attribute to 1 fixes moving to HS gear.
568 */ 570 */
569 #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP UFS_BIT(3) 571 #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP 0x8
570 572
571 /* 573 /*
572 * This quirk needs to be enabled if the host contoller only allows 574 * This quirk needs to be enabled if the host contoller only allows
573 * accessing the peer dme attributes in AUTO mode (FAST AUTO or 575 * accessing the peer dme attributes in AUTO mode (FAST AUTO or
574 * SLOW AUTO). 576 * SLOW AUTO).
575 */ 577 */
576 #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE UFS_BIT(4) 578 #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE 0x10
577 579
578 /* 580 /*
579 * This quirk needs to be enabled if the host contoller doesn't 581 * This quirk needs to be enabled if the host contoller doesn't
@@ -581,13 +583,13 @@ struct ufs_hba {
581 * is enabled, standard UFS host driver will call the vendor specific 583 * is enabled, standard UFS host driver will call the vendor specific
582 * ops (get_ufs_hci_version) to get the correct version. 584 * ops (get_ufs_hci_version) to get the correct version.
583 */ 585 */
584 #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5) 586 #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION 0x20
585 587
586 /* 588 /*
587 * This quirk needs to be enabled if the host contoller regards 589 * This quirk needs to be enabled if the host contoller regards
588 * resolution of the values of PRDTO and PRDTL in UTRD as byte. 590 * resolution of the values of PRDTO and PRDTL in UTRD as byte.
589 */ 591 */
590 #define UFSHCD_QUIRK_PRDT_BYTE_GRAN UFS_BIT(7) 592 #define UFSHCD_QUIRK_PRDT_BYTE_GRAN 0x80
591 593
592 unsigned int quirks; /* Deviations from standard UFSHCI spec. */ 594 unsigned int quirks; /* Deviations from standard UFSHCI spec. */
593 595
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index f60145d4a66e..277752b0fc6f 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -119,22 +119,23 @@ enum {
119#define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0) 119#define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0)
120#define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16) 120#define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16)
121 121
122#define UFS_BIT(x) (1L << (x)) 122/*
123 123 * IS - Interrupt Status - 20h
124#define UTP_TRANSFER_REQ_COMPL UFS_BIT(0) 124 */
125#define UIC_DME_END_PT_RESET UFS_BIT(1) 125#define UTP_TRANSFER_REQ_COMPL 0x1
126#define UIC_ERROR UFS_BIT(2) 126#define UIC_DME_END_PT_RESET 0x2
127#define UIC_TEST_MODE UFS_BIT(3) 127#define UIC_ERROR 0x4
128#define UIC_POWER_MODE UFS_BIT(4) 128#define UIC_TEST_MODE 0x8
129#define UIC_HIBERNATE_EXIT UFS_BIT(5) 129#define UIC_POWER_MODE 0x10
130#define UIC_HIBERNATE_ENTER UFS_BIT(6) 130#define UIC_HIBERNATE_EXIT 0x20
131#define UIC_LINK_LOST UFS_BIT(7) 131#define UIC_HIBERNATE_ENTER 0x40
132#define UIC_LINK_STARTUP UFS_BIT(8) 132#define UIC_LINK_LOST 0x80
133#define UTP_TASK_REQ_COMPL UFS_BIT(9) 133#define UIC_LINK_STARTUP 0x100
134#define UIC_COMMAND_COMPL UFS_BIT(10) 134#define UTP_TASK_REQ_COMPL 0x200
135#define DEVICE_FATAL_ERROR UFS_BIT(11) 135#define UIC_COMMAND_COMPL 0x400
136#define CONTROLLER_FATAL_ERROR UFS_BIT(16) 136#define DEVICE_FATAL_ERROR 0x800
137#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) 137#define CONTROLLER_FATAL_ERROR 0x10000
138#define SYSTEM_BUS_FATAL_ERROR 0x20000
138 139
139#define UFSHCD_UIC_PWR_MASK (UIC_HIBERNATE_ENTER |\ 140#define UFSHCD_UIC_PWR_MASK (UIC_HIBERNATE_ENTER |\
140 UIC_HIBERNATE_EXIT |\ 141 UIC_HIBERNATE_EXIT |\
@@ -152,12 +153,10 @@ enum {
152 SYSTEM_BUS_FATAL_ERROR) 153 SYSTEM_BUS_FATAL_ERROR)
153 154
154/* HCS - Host Controller Status 30h */ 155/* HCS - Host Controller Status 30h */
155#define DEVICE_PRESENT UFS_BIT(0) 156#define DEVICE_PRESENT 0x1
156#define UTP_TRANSFER_REQ_LIST_READY UFS_BIT(1) 157#define UTP_TRANSFER_REQ_LIST_READY 0x2
157#define UTP_TASK_REQ_LIST_READY UFS_BIT(2) 158#define UTP_TASK_REQ_LIST_READY 0x4
158#define UIC_COMMAND_READY UFS_BIT(3) 159#define UIC_COMMAND_READY 0x8
159#define HOST_ERROR_INDICATOR UFS_BIT(4)
160#define DEVICE_ERROR_INDICATOR UFS_BIT(5)
161#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8) 160#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
162 161
163#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\ 162#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\
@@ -174,46 +173,47 @@ enum {
174}; 173};
175 174
176/* HCE - Host Controller Enable 34h */ 175/* HCE - Host Controller Enable 34h */
177#define CONTROLLER_ENABLE UFS_BIT(0) 176#define CONTROLLER_ENABLE 0x1
178#define CONTROLLER_DISABLE 0x0 177#define CONTROLLER_DISABLE 0x0
179#define CRYPTO_GENERAL_ENABLE UFS_BIT(1) 178#define CRYPTO_GENERAL_ENABLE 0x2
180 179
181/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */ 180/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
182#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31) 181#define UIC_PHY_ADAPTER_LAYER_ERROR 0x80000000
183#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F 182#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
184#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF 183#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
185 184
186/* UECDL - Host UIC Error Code Data Link Layer 3Ch */ 185/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
187#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31) 186#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
188#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF 187#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
189#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000 188#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
190#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001 189#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
191#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002 190#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
192 191
193/* UECN - Host UIC Error Code Network Layer 40h */ 192/* UECN - Host UIC Error Code Network Layer 40h */
194#define UIC_NETWORK_LAYER_ERROR UFS_BIT(31) 193#define UIC_NETWORK_LAYER_ERROR 0x80000000
195#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7 194#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
196 195
197/* UECT - Host UIC Error Code Transport Layer 44h */ 196/* UECT - Host UIC Error Code Transport Layer 44h */
198#define UIC_TRANSPORT_LAYER_ERROR UFS_BIT(31) 197#define UIC_TRANSPORT_LAYER_ERROR 0x80000000
199#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F 198#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
200 199
201/* UECDME - Host UIC Error Code DME 48h */ 200/* UECDME - Host UIC Error Code DME 48h */
202#define UIC_DME_ERROR UFS_BIT(31) 201#define UIC_DME_ERROR 0x80000000
203#define UIC_DME_ERROR_CODE_MASK 0x1 202#define UIC_DME_ERROR_CODE_MASK 0x1
204 203
204/* UTRIACR - Interrupt Aggregation control register - 0x4Ch */
205#define INT_AGGR_TIMEOUT_VAL_MASK 0xFF 205#define INT_AGGR_TIMEOUT_VAL_MASK 0xFF
206#define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8) 206#define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8)
207#define INT_AGGR_COUNTER_AND_TIMER_RESET UFS_BIT(16) 207#define INT_AGGR_COUNTER_AND_TIMER_RESET 0x10000
208#define INT_AGGR_STATUS_BIT UFS_BIT(20) 208#define INT_AGGR_STATUS_BIT 0x100000
209#define INT_AGGR_PARAM_WRITE UFS_BIT(24) 209#define INT_AGGR_PARAM_WRITE 0x1000000
210#define INT_AGGR_ENABLE UFS_BIT(31) 210#define INT_AGGR_ENABLE 0x80000000
211 211
212/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */ 212/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */
213#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT UFS_BIT(0) 213#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT 0x1
214 214
215/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */ 215/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
216#define UTP_TASK_REQ_LIST_RUN_STOP_BIT UFS_BIT(0) 216#define UTP_TASK_REQ_LIST_RUN_STOP_BIT 0x1
217 217
218/* UICCMD - UIC Command */ 218/* UICCMD - UIC Command */
219#define COMMAND_OPCODE_MASK 0xFF 219#define COMMAND_OPCODE_MASK 0xFF
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 388aaf72b480..0f9cbf96c093 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -60,38 +60,32 @@ enum sas_phy_type {
60 * so when updating/adding events here, please also 60 * so when updating/adding events here, please also
61 * update the other file too. 61 * update the other file too.
62 */ 62 */
63enum ha_event {
64 HAE_RESET = 0U,
65 HA_NUM_EVENTS = 1,
66};
67
68enum port_event { 63enum port_event {
69 PORTE_BYTES_DMAED = 0U, 64 PORTE_BYTES_DMAED = 0U,
70 PORTE_BROADCAST_RCVD = 1, 65 PORTE_BROADCAST_RCVD,
71 PORTE_LINK_RESET_ERR = 2, 66 PORTE_LINK_RESET_ERR,
72 PORTE_TIMER_EVENT = 3, 67 PORTE_TIMER_EVENT,
73 PORTE_HARD_RESET = 4, 68 PORTE_HARD_RESET,
74 PORT_NUM_EVENTS = 5, 69 PORT_NUM_EVENTS,
75}; 70};
76 71
77enum phy_event { 72enum phy_event {
78 PHYE_LOSS_OF_SIGNAL = 0U, 73 PHYE_LOSS_OF_SIGNAL = 0U,
79 PHYE_OOB_DONE = 1, 74 PHYE_OOB_DONE,
80 PHYE_OOB_ERROR = 2, 75 PHYE_OOB_ERROR,
81 PHYE_SPINUP_HOLD = 3, /* hot plug SATA, no COMWAKE sent */ 76 PHYE_SPINUP_HOLD, /* hot plug SATA, no COMWAKE sent */
82 PHYE_RESUME_TIMEOUT = 4, 77 PHYE_RESUME_TIMEOUT,
83 PHY_NUM_EVENTS = 5, 78 PHY_NUM_EVENTS,
84}; 79};
85 80
86enum discover_event { 81enum discover_event {
87 DISCE_DISCOVER_DOMAIN = 0U, 82 DISCE_DISCOVER_DOMAIN = 0U,
88 DISCE_REVALIDATE_DOMAIN = 1, 83 DISCE_REVALIDATE_DOMAIN,
89 DISCE_PORT_GONE = 2, 84 DISCE_PROBE,
90 DISCE_PROBE = 3, 85 DISCE_SUSPEND,
91 DISCE_SUSPEND = 4, 86 DISCE_RESUME,
92 DISCE_RESUME = 5, 87 DISCE_DESTRUCT,
93 DISCE_DESTRUCT = 6, 88 DISC_NUM_EVENTS,
94 DISC_NUM_EVENTS = 7,
95}; 89};
96 90
97/* ---------- Expander Devices ---------- */ 91/* ---------- Expander Devices ---------- */
@@ -261,8 +255,6 @@ struct sas_discovery {
261/* The port struct is Class:RW, driver:RO */ 255/* The port struct is Class:RW, driver:RO */
262struct asd_sas_port { 256struct asd_sas_port {
263/* private: */ 257/* private: */
264 struct completion port_gone_completion;
265
266 struct sas_discovery disc; 258 struct sas_discovery disc;
267 struct domain_device *port_dev; 259 struct domain_device *port_dev;
268 spinlock_t dev_list_lock; 260 spinlock_t dev_list_lock;
@@ -362,18 +354,6 @@ struct scsi_core {
362 354
363}; 355};
364 356
365struct sas_ha_event {
366 struct sas_work work;
367 struct sas_ha_struct *ha;
368};
369
370static inline struct sas_ha_event *to_sas_ha_event(struct work_struct *work)
371{
372 struct sas_ha_event *ev = container_of(work, typeof(*ev), work.work);
373
374 return ev;
375}
376
377enum sas_ha_state { 357enum sas_ha_state {
378 SAS_HA_REGISTERED, 358 SAS_HA_REGISTERED,
379 SAS_HA_DRAINING, 359 SAS_HA_DRAINING,
@@ -383,9 +363,6 @@ enum sas_ha_state {
383 363
384struct sas_ha_struct { 364struct sas_ha_struct {
385/* private: */ 365/* private: */
386 struct sas_ha_event ha_events[HA_NUM_EVENTS];
387 unsigned long pending;
388
389 struct list_head defer_q; /* work queued while draining */ 366 struct list_head defer_q; /* work queued while draining */
390 struct mutex drain_mutex; 367 struct mutex drain_mutex;
391 unsigned long state; 368 unsigned long state;
@@ -415,7 +392,6 @@ struct sas_ha_struct {
415 * their siblings when forming wide ports */ 392 * their siblings when forming wide ports */
416 393
417 /* LLDD calls these to notify the class of an event. */ 394 /* LLDD calls these to notify the class of an event. */
418 int (*notify_ha_event)(struct sas_ha_struct *, enum ha_event);
419 int (*notify_port_event)(struct asd_sas_phy *, enum port_event); 395 int (*notify_port_event)(struct asd_sas_phy *, enum port_event);
420 int (*notify_phy_event)(struct asd_sas_phy *, enum phy_event); 396 int (*notify_phy_event)(struct asd_sas_phy *, enum phy_event);
421 397
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 73af87dfbff8..1fb6ad3c5006 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -65,9 +65,10 @@ enum scsi_device_event {
65 SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */ 65 SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */
66 SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */ 66 SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */
67 SDEV_EVT_ALUA_STATE_CHANGE_REPORTED, /* 2A 06 UA reported */ 67 SDEV_EVT_ALUA_STATE_CHANGE_REPORTED, /* 2A 06 UA reported */
68 SDEV_EVT_POWER_ON_RESET_OCCURRED, /* 29 00 UA reported */
68 69
69 SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE, 70 SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE,
70 SDEV_EVT_LAST = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED, 71 SDEV_EVT_LAST = SDEV_EVT_POWER_ON_RESET_OCCURRED,
71 72
72 SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1 73 SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1
73}; 74};
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 3575693bb628..3cf125b56c3a 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -4,32 +4,57 @@
4/* 4/*
5 * Flags for SCSI devices that need special treatment 5 * Flags for SCSI devices that need special treatment
6 */ 6 */
7#define BLIST_NOLUN 0x001 /* Only scan LUN 0 */ 7
8#define BLIST_FORCELUN 0x002 /* Known to have LUNs, force scanning, 8/* Only scan LUN 0 */
9 deprecated: Use max_luns=N */ 9#define BLIST_NOLUN ((__force __u32 __bitwise)(1 << 0))
10#define BLIST_BORKEN 0x004 /* Flag for broken handshaking */ 10/* Known to have LUNs, force scanning.
11#define BLIST_KEY 0x008 /* unlock by special command */ 11 * DEPRECATED: Use max_luns=N */
12#define BLIST_SINGLELUN 0x010 /* Do not use LUNs in parallel */ 12#define BLIST_FORCELUN ((__force __u32 __bitwise)(1 << 1))
13#define BLIST_NOTQ 0x020 /* Buggy Tagged Command Queuing */ 13/* Flag for broken handshaking */
14#define BLIST_SPARSELUN 0x040 /* Non consecutive LUN numbering */ 14#define BLIST_BORKEN ((__force __u32 __bitwise)(1 << 2))
15#define BLIST_MAX5LUN 0x080 /* Avoid LUNS >= 5 */ 15/* unlock by special command */
16#define BLIST_ISROM 0x100 /* Treat as (removable) CD-ROM */ 16#define BLIST_KEY ((__force __u32 __bitwise)(1 << 3))
17#define BLIST_LARGELUN 0x200 /* LUNs past 7 on a SCSI-2 device */ 17/* Do not use LUNs in parallel */
18#define BLIST_INQUIRY_36 0x400 /* override additional length field */ 18#define BLIST_SINGLELUN ((__force __u32 __bitwise)(1 << 4))
19#define BLIST_NOSTARTONADD 0x1000 /* do not do automatic start on add */ 19/* Buggy Tagged Command Queuing */
20#define BLIST_REPORTLUN2 0x20000 /* try REPORT_LUNS even for SCSI-2 devs 20#define BLIST_NOTQ ((__force __u32 __bitwise)(1 << 5))
21 (if HBA supports more than 8 LUNs) */ 21/* Non consecutive LUN numbering */
22#define BLIST_NOREPORTLUN 0x40000 /* don't try REPORT_LUNS scan (SCSI-3 devs) */ 22#define BLIST_SPARSELUN ((__force __u32 __bitwise)(1 << 6))
23#define BLIST_NOT_LOCKABLE 0x80000 /* don't use PREVENT-ALLOW commands */ 23/* Avoid LUNS >= 5 */
24#define BLIST_NO_ULD_ATTACH 0x100000 /* device is actually for RAID config */ 24#define BLIST_MAX5LUN ((__force __u32 __bitwise)(1 << 7))
25#define BLIST_SELECT_NO_ATN 0x200000 /* select without ATN */ 25/* Treat as (removable) CD-ROM */
26#define BLIST_RETRY_HWERROR 0x400000 /* retry HARDWARE_ERROR */ 26#define BLIST_ISROM ((__force __u32 __bitwise)(1 << 8))
27#define BLIST_MAX_512 0x800000 /* maximum 512 sector cdb length */ 27/* LUNs past 7 on a SCSI-2 device */
28#define BLIST_NO_DIF 0x2000000 /* Disable T10 PI (DIF) */ 28#define BLIST_LARGELUN ((__force __u32 __bitwise)(1 << 9))
29#define BLIST_SKIP_VPD_PAGES 0x4000000 /* Ignore SBC-3 VPD pages */ 29/* override additional length field */
30#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */ 30#define BLIST_INQUIRY_36 ((__force __u32 __bitwise)(1 << 10))
31#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */ 31/* do not do automatic start on add */
32#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */ 32#define BLIST_NOSTARTONADD ((__force __u32 __bitwise)(1 << 12))
33#define BLIST_UNMAP_LIMIT_WS 0x80000000 /* Use UNMAP limit for WRITE SAME */ 33/* try REPORT_LUNS even for SCSI-2 devs (if HBA supports more than 8 LUNs) */
34#define BLIST_REPORTLUN2 ((__force __u32 __bitwise)(1 << 17))
35/* don't try REPORT_LUNS scan (SCSI-3 devs) */
36#define BLIST_NOREPORTLUN ((__force __u32 __bitwise)(1 << 18))
37/* don't use PREVENT-ALLOW commands */
38#define BLIST_NOT_LOCKABLE ((__force __u32 __bitwise)(1 << 19))
39/* device is actually for RAID config */
40#define BLIST_NO_ULD_ATTACH ((__force __u32 __bitwise)(1 << 20))
41/* select without ATN */
42#define BLIST_SELECT_NO_ATN ((__force __u32 __bitwise)(1 << 21))
43/* retry HARDWARE_ERROR */
44#define BLIST_RETRY_HWERROR ((__force __u32 __bitwise)(1 << 22))
45/* maximum 512 sector cdb length */
46#define BLIST_MAX_512 ((__force __u32 __bitwise)(1 << 23))
47/* Disable T10 PI (DIF) */
48#define BLIST_NO_DIF ((__force __u32 __bitwise)(1 << 25))
49/* Ignore SBC-3 VPD pages */
50#define BLIST_SKIP_VPD_PAGES ((__force __u32 __bitwise)(1 << 26))
51/* Attempt to read VPD pages */
52#define BLIST_TRY_VPD_PAGES ((__force __u32 __bitwise)(1 << 28))
53/* don't try to issue RSOC */
54#define BLIST_NO_RSOC ((__force __u32 __bitwise)(1 << 29))
55/* maximum 1024 sector cdb length */
56#define BLIST_MAX_1024 ((__force __u32 __bitwise)(1 << 30))
57/* Use UNMAP limit for WRITE SAME */
58#define BLIST_UNMAP_LIMIT_WS ((__force __u32 __bitwise)(1 << 31))
34 59
35#endif 60#endif
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index 1c41dbcfcb35..1df8efb0ee01 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -302,19 +302,42 @@ struct scsi_lun {
302 302
303/* Reporting options for REPORT ZONES */ 303/* Reporting options for REPORT ZONES */
304enum zbc_zone_reporting_options { 304enum zbc_zone_reporting_options {
305 ZBC_ZONE_REPORTING_OPTION_ALL = 0, 305 ZBC_ZONE_REPORTING_OPTION_ALL = 0x00,
306 ZBC_ZONE_REPORTING_OPTION_EMPTY, 306 ZBC_ZONE_REPORTING_OPTION_EMPTY = 0x01,
307 ZBC_ZONE_REPORTING_OPTION_IMPLICIT_OPEN, 307 ZBC_ZONE_REPORTING_OPTION_IMPLICIT_OPEN = 0x02,
308 ZBC_ZONE_REPORTING_OPTION_EXPLICIT_OPEN, 308 ZBC_ZONE_REPORTING_OPTION_EXPLICIT_OPEN = 0x03,
309 ZBC_ZONE_REPORTING_OPTION_CLOSED, 309 ZBC_ZONE_REPORTING_OPTION_CLOSED = 0x04,
310 ZBC_ZONE_REPORTING_OPTION_FULL, 310 ZBC_ZONE_REPORTING_OPTION_FULL = 0x05,
311 ZBC_ZONE_REPORTING_OPTION_READONLY, 311 ZBC_ZONE_REPORTING_OPTION_READONLY = 0x06,
312 ZBC_ZONE_REPORTING_OPTION_OFFLINE, 312 ZBC_ZONE_REPORTING_OPTION_OFFLINE = 0x07,
313 ZBC_ZONE_REPORTING_OPTION_NEED_RESET_WP = 0x10, 313 /* 0x08 to 0x0f are reserved */
314 ZBC_ZONE_REPORTING_OPTION_NON_SEQWRITE, 314 ZBC_ZONE_REPORTING_OPTION_NEED_RESET_WP = 0x10,
315 ZBC_ZONE_REPORTING_OPTION_NON_WP = 0x3f, 315 ZBC_ZONE_REPORTING_OPTION_NON_SEQWRITE = 0x11,
316 /* 0x12 to 0x3e are reserved */
317 ZBC_ZONE_REPORTING_OPTION_NON_WP = 0x3f,
316}; 318};
317 319
318#define ZBC_REPORT_ZONE_PARTIAL 0x80 320#define ZBC_REPORT_ZONE_PARTIAL 0x80
319 321
322/* Zone types of REPORT ZONES zone descriptors */
323enum zbc_zone_type {
324 ZBC_ZONE_TYPE_CONV = 0x1,
325 ZBC_ZONE_TYPE_SEQWRITE_REQ = 0x2,
326 ZBC_ZONE_TYPE_SEQWRITE_PREF = 0x3,
327 /* 0x4 to 0xf are reserved */
328};
329
330/* Zone conditions of REPORT ZONES zone descriptors */
331enum zbc_zone_cond {
332 ZBC_ZONE_COND_NO_WP = 0x0,
333 ZBC_ZONE_COND_EMPTY = 0x1,
334 ZBC_ZONE_COND_IMP_OPEN = 0x2,
335 ZBC_ZONE_COND_EXP_OPEN = 0x3,
336 ZBC_ZONE_COND_CLOSED = 0x4,
337 /* 0x5 to 0xc are reserved */
338 ZBC_ZONE_COND_READONLY = 0xd,
339 ZBC_ZONE_COND_FULL = 0xe,
340 ZBC_ZONE_COND_OFFLINE = 0xf,
341};
342
320#endif /* _SCSI_PROTO_H_ */ 343#endif /* _SCSI_PROTO_H_ */
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index e8644eea9fe5..8cf30215c177 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -139,6 +139,8 @@ enum fc_vport_state {
139#define FC_PORTSPEED_50GBIT 0x200 139#define FC_PORTSPEED_50GBIT 0x200
140#define FC_PORTSPEED_100GBIT 0x400 140#define FC_PORTSPEED_100GBIT 0x400
141#define FC_PORTSPEED_25GBIT 0x800 141#define FC_PORTSPEED_25GBIT 0x800
142#define FC_PORTSPEED_64BIT 0x1000
143#define FC_PORTSPEED_128BIT 0x2000
142#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */ 144#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */
143 145
144/* 146/*